]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
d331cb2b0a3af0e86dcd791abe62137a82af9c00
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/system.h>
45 #include <libkern/OSDebug.h>
46 #include <sys/proc.h>
47 #include <sys/kauth.h>
48 #include <sys/codesign.h>
49
50 #include <mach/sdt.h>
51
52 #if CONFIG_MACF
53
54 extern "C" {
55 #include <security/mac_framework.h>
56 };
57 #include <sys/kauth.h>
58
59 #define IOMACF_LOG 0
60
61 #endif /* CONFIG_MACF */
62
63 #include <IOKit/assert.h>
64
65 #include "IOServicePrivate.h"
66 #include "IOKitKernelInternal.h"
67
68 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
69 #define SCALAR32(x) ((uint32_t )x)
70 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
71 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
72 #define REF32(x) ((int)(x))
73
74 enum
75 {
76 kIOUCAsync0Flags = 3ULL,
77 kIOUCAsync64Flag = 1ULL,
78 kIOUCAsyncErrorLoggedFlag = 2ULL
79 };
80
81 #if IOKITSTATS
82
83 #define IOStatisticsRegisterCounter() \
84 do { \
85 reserved->counter = IOStatistics::registerUserClient(this); \
86 } while (0)
87
88 #define IOStatisticsUnregisterCounter() \
89 do { \
90 if (reserved) \
91 IOStatistics::unregisterUserClient(reserved->counter); \
92 } while (0)
93
94 #define IOStatisticsClientCall() \
95 do { \
96 IOStatistics::countUserClientCall(client); \
97 } while (0)
98
99 #else
100
101 #define IOStatisticsRegisterCounter()
102 #define IOStatisticsUnregisterCounter()
103 #define IOStatisticsClientCall()
104
105 #endif /* IOKITSTATS */
106
107 #if DEVELOPMENT || DEBUG
108
109 #define FAKE_STACK_FRAME(a) \
110 const void ** __frameptr; \
111 const void * __retaddr; \
112 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
113 __retaddr = __frameptr[1]; \
114 __frameptr[1] = (a);
115
116 #define FAKE_STACK_FRAME_END() \
117 __frameptr[1] = __retaddr;
118
119 #else /* DEVELOPMENT || DEBUG */
120
121 #define FAKE_STACK_FRAME(a)
122 #define FAKE_STACK_FRAME_END()
123
124 #endif /* DEVELOPMENT || DEBUG */
125
126 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
127
128 extern "C" {
129
130 #include <mach/mach_traps.h>
131 #include <vm/vm_map.h>
132
133 } /* extern "C" */
134
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136
137 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
138
139 class IOMachPort : public OSObject
140 {
141 OSDeclareDefaultStructors(IOMachPort)
142 public:
143 OSObject * object;
144 ipc_port_t port;
145 UInt32 mscount;
146 UInt8 holdDestroy;
147
148 static IOMachPort * portForObject( OSObject * obj,
149 ipc_kobject_type_t type );
150 static bool noMoreSendersForObject( OSObject * obj,
151 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
152 static void releasePortForObject( OSObject * obj,
153 ipc_kobject_type_t type );
154 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
155
156 static OSDictionary * dictForType( ipc_kobject_type_t type );
157
158 static mach_port_name_t makeSendRightForTask( task_t task,
159 io_object_t obj, ipc_kobject_type_t type );
160
161 virtual void free() APPLE_KEXT_OVERRIDE;
162 };
163
164 #define super OSObject
165 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
166
167 static IOLock * gIOObjectPortLock;
168
169 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170
171 // not in dictForType() for debugging ease
172 static OSDictionary * gIOObjectPorts;
173 static OSDictionary * gIOConnectPorts;
174 static OSDictionary * gIOIdentifierPorts;
175
176 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
177 {
178 OSDictionary ** dict;
179
180 switch (type)
181 {
182 case IKOT_IOKIT_OBJECT:
183 dict = &gIOObjectPorts;
184 break;
185 case IKOT_IOKIT_CONNECT:
186 dict = &gIOConnectPorts;
187 break;
188 case IKOT_IOKIT_IDENT:
189 dict = &gIOIdentifierPorts;
190 break;
191 default:
192 panic("dictForType %d", type);
193 dict = NULL;
194 break;
195 }
196
197 if( 0 == *dict)
198 *dict = OSDictionary::withCapacity( 1 );
199
200 return( *dict );
201 }
202
203 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
204 ipc_kobject_type_t type )
205 {
206 IOMachPort * inst = 0;
207 OSDictionary * dict;
208
209 IOTakeLock( gIOObjectPortLock);
210
211 do {
212
213 dict = dictForType( type );
214 if( !dict)
215 continue;
216
217 if( (inst = (IOMachPort *)
218 dict->getObject( (const OSSymbol *) obj ))) {
219 inst->mscount++;
220 inst->retain();
221 continue;
222 }
223
224 inst = new IOMachPort;
225 if( inst && !inst->init()) {
226 inst = 0;
227 continue;
228 }
229
230 inst->port = iokit_alloc_object_port( obj, type );
231 if( inst->port) {
232 // retains obj
233 dict->setObject( (const OSSymbol *) obj, inst );
234 inst->mscount++;
235
236 } else {
237 inst->release();
238 inst = 0;
239 }
240
241 } while( false );
242
243 IOUnlock( gIOObjectPortLock);
244
245 return( inst );
246 }
247
248 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
249 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
250 {
251 OSDictionary * dict;
252 IOMachPort * machPort;
253 IOUserClient * uc;
254 bool destroyed = true;
255
256 IOTakeLock( gIOObjectPortLock);
257
258 if( (dict = dictForType( type ))) {
259 obj->retain();
260
261 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
262 if( machPort) {
263 destroyed = (machPort->mscount <= *mscount);
264 if (!destroyed) *mscount = machPort->mscount;
265 else
266 {
267 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
268 {
269 uc->noMoreSenders();
270 }
271 dict->removeObject( (const OSSymbol *) obj );
272 }
273 }
274 obj->release();
275 }
276
277 IOUnlock( gIOObjectPortLock);
278
279 return( destroyed );
280 }
281
282 void IOMachPort::releasePortForObject( OSObject * obj,
283 ipc_kobject_type_t type )
284 {
285 OSDictionary * dict;
286 IOMachPort * machPort;
287
288 assert(IKOT_IOKIT_CONNECT != type);
289
290 IOTakeLock( gIOObjectPortLock);
291
292 if( (dict = dictForType( type ))) {
293 obj->retain();
294 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
295 if( machPort && !machPort->holdDestroy)
296 dict->removeObject( (const OSSymbol *) obj );
297 obj->release();
298 }
299
300 IOUnlock( gIOObjectPortLock);
301 }
302
303 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
304 {
305 OSDictionary * dict;
306 IOMachPort * machPort;
307
308 IOLockLock( gIOObjectPortLock );
309
310 if( (dict = dictForType( type ))) {
311 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
312 if( machPort)
313 machPort->holdDestroy = true;
314 }
315
316 IOLockUnlock( gIOObjectPortLock );
317 }
318
319 void IOUserClient::destroyUserReferences( OSObject * obj )
320 {
321 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
322
323 // panther, 3160200
324 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
325
326 OSDictionary * dict;
327
328 IOTakeLock( gIOObjectPortLock);
329 obj->retain();
330
331 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
332 {
333 IOMachPort * port;
334 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
335 if (port)
336 {
337 IOUserClient * uc;
338 if ((uc = OSDynamicCast(IOUserClient, obj)))
339 {
340 uc->noMoreSenders();
341 if (uc->mappings)
342 {
343 dict->setObject((const OSSymbol *) uc->mappings, port);
344 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
345
346 uc->mappings->release();
347 uc->mappings = 0;
348 }
349 }
350 dict->removeObject( (const OSSymbol *) obj );
351 }
352 }
353 obj->release();
354 IOUnlock( gIOObjectPortLock);
355 }
356
357 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
358 io_object_t obj, ipc_kobject_type_t type )
359 {
360 return( iokit_make_send_right( task, obj, type ));
361 }
362
363 void IOMachPort::free( void )
364 {
365 if( port)
366 iokit_destroy_object_port( port );
367 super::free();
368 }
369
370 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
371
372 class IOUserIterator : public OSIterator
373 {
374 OSDeclareDefaultStructors(IOUserIterator)
375 public:
376 OSObject * userIteratorObject;
377 IOLock * lock;
378
379 static IOUserIterator * withIterator(OSIterator * iter);
380 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
381 virtual void free() APPLE_KEXT_OVERRIDE;
382
383 virtual void reset() APPLE_KEXT_OVERRIDE;
384 virtual bool isValid() APPLE_KEXT_OVERRIDE;
385 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
386 };
387
388 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
389
390 class IOUserNotification : public IOUserIterator
391 {
392 OSDeclareDefaultStructors(IOUserNotification)
393
394 #define holdNotify userIteratorObject
395
396 public:
397
398 virtual void free() APPLE_KEXT_OVERRIDE;
399
400 virtual void setNotification( IONotifier * obj );
401
402 virtual void reset() APPLE_KEXT_OVERRIDE;
403 virtual bool isValid() APPLE_KEXT_OVERRIDE;
404 };
405
406 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
407
408 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
409
410 IOUserIterator *
411 IOUserIterator::withIterator(OSIterator * iter)
412 {
413 IOUserIterator * me;
414
415 if (!iter) return (0);
416
417 me = new IOUserIterator;
418 if (me && !me->init())
419 {
420 me->release();
421 me = 0;
422 }
423 if (!me) return me;
424 me->userIteratorObject = iter;
425
426 return (me);
427 }
428
429 bool
430 IOUserIterator::init( void )
431 {
432 if (!OSObject::init()) return (false);
433
434 lock = IOLockAlloc();
435 if( !lock)
436 return( false );
437
438 return (true);
439 }
440
441 void
442 IOUserIterator::free()
443 {
444 if (userIteratorObject) userIteratorObject->release();
445 if (lock) IOLockFree(lock);
446 OSObject::free();
447 }
448
449 void
450 IOUserIterator::reset()
451 {
452 IOLockLock(lock);
453 assert(OSDynamicCast(OSIterator, userIteratorObject));
454 ((OSIterator *)userIteratorObject)->reset();
455 IOLockUnlock(lock);
456 }
457
458 bool
459 IOUserIterator::isValid()
460 {
461 bool ret;
462
463 IOLockLock(lock);
464 assert(OSDynamicCast(OSIterator, userIteratorObject));
465 ret = ((OSIterator *)userIteratorObject)->isValid();
466 IOLockUnlock(lock);
467
468 return (ret);
469 }
470
471 OSObject *
472 IOUserIterator::getNextObject()
473 {
474 OSObject * ret;
475
476 IOLockLock(lock);
477 assert(OSDynamicCast(OSIterator, userIteratorObject));
478 ret = ((OSIterator *)userIteratorObject)->getNextObject();
479 IOLockUnlock(lock);
480
481 return (ret);
482 }
483
484 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
485 extern "C" {
486
487 // functions called from osfmk/device/iokit_rpc.c
488
489 void
490 iokit_add_reference( io_object_t obj, ipc_kobject_type_t type )
491 {
492 IOUserClient * uc;
493
494 if (!obj) return;
495
496 if ((IKOT_IOKIT_CONNECT == type)
497 && (uc = OSDynamicCast(IOUserClient, obj)))
498 {
499 OSIncrementAtomic(&uc->__ipc);
500 }
501
502 obj->retain();
503 }
504
505 void
506 iokit_remove_reference( io_object_t obj )
507 {
508 if( obj)
509 obj->release();
510 }
511
512 void
513 iokit_remove_connect_reference( io_object_t obj )
514 {
515 IOUserClient * uc;
516 bool finalize = false;
517
518 if (!obj) return;
519
520 if ((uc = OSDynamicCast(IOUserClient, obj)))
521 {
522 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
523 {
524 IOLockLock(gIOObjectPortLock);
525 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
526 IOLockUnlock(gIOObjectPortLock);
527 }
528 if (finalize) uc->scheduleFinalize(true);
529 }
530
531 obj->release();
532 }
533
534 bool
535 IOUserClient::finalizeUserReferences(OSObject * obj)
536 {
537 IOUserClient * uc;
538 bool ok = true;
539
540 if ((uc = OSDynamicCast(IOUserClient, obj)))
541 {
542 IOLockLock(gIOObjectPortLock);
543 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
544 IOLockUnlock(gIOObjectPortLock);
545 }
546 return (ok);
547 }
548
549 ipc_port_t
550 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
551 {
552 IOMachPort * machPort;
553 ipc_port_t port;
554
555 if( (machPort = IOMachPort::portForObject( obj, type ))) {
556
557 port = machPort->port;
558 if( port)
559 iokit_retain_port( port );
560
561 machPort->release();
562
563 } else
564 port = NULL;
565
566 return( port );
567 }
568
569 kern_return_t
570 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
571 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
572 {
573 IOUserClient * client;
574 IOMemoryMap * map;
575 IOUserNotification * notify;
576
577 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
578 return( kIOReturnNotReady );
579
580 if( IKOT_IOKIT_CONNECT == type)
581 {
582 if( (client = OSDynamicCast( IOUserClient, obj )))
583 {
584 IOStatisticsClientCall();
585 IOLockLock(client->lock);
586 client->clientDied();
587 IOLockUnlock(client->lock);
588 }
589 }
590 else if( IKOT_IOKIT_OBJECT == type)
591 {
592 if( (map = OSDynamicCast( IOMemoryMap, obj )))
593 map->taskDied();
594 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
595 notify->setNotification( 0 );
596 }
597
598 return( kIOReturnSuccess );
599 }
600
601 }; /* extern "C" */
602
603 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
604
605 class IOServiceUserNotification : public IOUserNotification
606 {
607 OSDeclareDefaultStructors(IOServiceUserNotification)
608
609 struct PingMsg {
610 mach_msg_header_t msgHdr;
611 OSNotificationHeader64 notifyHeader;
612 };
613
614 enum { kMaxOutstanding = 1024 };
615
616 PingMsg * pingMsg;
617 vm_size_t msgSize;
618 OSArray * newSet;
619 OSObject * lastEntry;
620 bool armed;
621 bool ipcLogged;
622
623 public:
624
625 virtual bool init( mach_port_t port, natural_t type,
626 void * reference, vm_size_t referenceSize,
627 bool clientIs64 );
628 virtual void free() APPLE_KEXT_OVERRIDE;
629 void invalidatePort(void);
630
631 static bool _handler( void * target,
632 void * ref, IOService * newService, IONotifier * notifier );
633 virtual bool handler( void * ref, IOService * newService );
634
635 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
636 };
637
638 class IOServiceMessageUserNotification : public IOUserNotification
639 {
640 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
641
642 struct PingMsg {
643 mach_msg_header_t msgHdr;
644 mach_msg_body_t msgBody;
645 mach_msg_port_descriptor_t ports[1];
646 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
647 };
648
649 PingMsg * pingMsg;
650 vm_size_t msgSize;
651 uint8_t clientIs64;
652 int owningPID;
653 bool ipcLogged;
654
655 public:
656
657 virtual bool init( mach_port_t port, natural_t type,
658 void * reference, vm_size_t referenceSize,
659 vm_size_t extraSize,
660 bool clientIs64 );
661
662 virtual void free() APPLE_KEXT_OVERRIDE;
663 void invalidatePort(void);
664
665 static IOReturn _handler( void * target, void * ref,
666 UInt32 messageType, IOService * provider,
667 void * messageArgument, vm_size_t argSize );
668 virtual IOReturn handler( void * ref,
669 UInt32 messageType, IOService * provider,
670 void * messageArgument, vm_size_t argSize );
671
672 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
673 };
674
675 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
676
677 #undef super
678 #define super IOUserIterator
679 OSDefineMetaClass( IOUserNotification, IOUserIterator )
680 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
681
682 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
683
684 void IOUserNotification::free( void )
685 {
686 if (holdNotify)
687 {
688 assert(OSDynamicCast(IONotifier, holdNotify));
689 ((IONotifier *)holdNotify)->remove();
690 holdNotify = 0;
691 }
692 // can't be in handler now
693
694 super::free();
695 }
696
697
698 void IOUserNotification::setNotification( IONotifier * notify )
699 {
700 OSObject * previousNotify;
701
702 IOLockLock( gIOObjectPortLock);
703
704 previousNotify = holdNotify;
705 holdNotify = notify;
706
707 IOLockUnlock( gIOObjectPortLock);
708
709 if( previousNotify)
710 {
711 assert(OSDynamicCast(IONotifier, previousNotify));
712 ((IONotifier *)previousNotify)->remove();
713 }
714 }
715
716 void IOUserNotification::reset()
717 {
718 // ?
719 }
720
721 bool IOUserNotification::isValid()
722 {
723 return( true );
724 }
725
726 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
727
728 #undef super
729 #define super IOUserNotification
730 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
731
732 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
733
734 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
735 void * reference, vm_size_t referenceSize,
736 bool clientIs64 )
737 {
738 if( !super::init())
739 return( false );
740
741 newSet = OSArray::withCapacity( 1 );
742 if( !newSet)
743 return( false );
744
745 if (referenceSize > sizeof(OSAsyncReference64))
746 return( false );
747
748 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
749 pingMsg = (PingMsg *) IOMalloc( msgSize);
750 if( !pingMsg)
751 return( false );
752
753 bzero( pingMsg, msgSize);
754
755 pingMsg->msgHdr.msgh_remote_port = port;
756 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
757 MACH_MSG_TYPE_COPY_SEND /*remote*/,
758 MACH_MSG_TYPE_MAKE_SEND /*local*/);
759 pingMsg->msgHdr.msgh_size = msgSize;
760 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
761
762 pingMsg->notifyHeader.size = 0;
763 pingMsg->notifyHeader.type = type;
764 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
765
766 return( true );
767 }
768
769 void IOServiceUserNotification::invalidatePort(void)
770 {
771 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
772 }
773
774 void IOServiceUserNotification::free( void )
775 {
776 PingMsg * _pingMsg;
777 vm_size_t _msgSize;
778 OSArray * _newSet;
779 OSObject * _lastEntry;
780
781 _pingMsg = pingMsg;
782 _msgSize = msgSize;
783 _lastEntry = lastEntry;
784 _newSet = newSet;
785
786 super::free();
787
788 if( _pingMsg && _msgSize) {
789 if (_pingMsg->msgHdr.msgh_remote_port) {
790 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
791 }
792 IOFree(_pingMsg, _msgSize);
793 }
794
795 if( _lastEntry)
796 _lastEntry->release();
797
798 if( _newSet)
799 _newSet->release();
800 }
801
802 bool IOServiceUserNotification::_handler( void * target,
803 void * ref, IOService * newService, IONotifier * notifier )
804 {
805 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
806 }
807
808 bool IOServiceUserNotification::handler( void * ref,
809 IOService * newService )
810 {
811 unsigned int count;
812 kern_return_t kr;
813 ipc_port_t port = NULL;
814 bool sendPing = false;
815
816 IOTakeLock( lock );
817
818 count = newSet->getCount();
819 if( count < kMaxOutstanding) {
820
821 newSet->setObject( newService );
822 if( (sendPing = (armed && (0 == count))))
823 armed = false;
824 }
825
826 IOUnlock( lock );
827
828 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
829 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
830
831 if( sendPing) {
832 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
833 pingMsg->msgHdr.msgh_local_port = port;
834 else
835 pingMsg->msgHdr.msgh_local_port = NULL;
836
837 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
838 pingMsg->msgHdr.msgh_size,
839 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
840 0);
841 if( port)
842 iokit_release_port( port );
843
844 if( (KERN_SUCCESS != kr) && !ipcLogged)
845 {
846 ipcLogged = true;
847 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
848 }
849 }
850
851 return( true );
852 }
853
854 OSObject * IOServiceUserNotification::getNextObject()
855 {
856 unsigned int count;
857 OSObject * result;
858 OSObject * releaseEntry;
859
860 IOLockLock(lock);
861
862 releaseEntry = lastEntry;
863 count = newSet->getCount();
864 if( count ) {
865 result = newSet->getObject( count - 1 );
866 result->retain();
867 newSet->removeObject( count - 1);
868 } else {
869 result = 0;
870 armed = true;
871 }
872 lastEntry = result;
873
874 IOLockUnlock(lock);
875
876 if (releaseEntry) releaseEntry->release();
877
878 return( result );
879 }
880
881 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
882
883 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
884
885 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
886
887 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
888 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
889 bool client64 )
890 {
891 if( !super::init())
892 return( false );
893
894 if (referenceSize > sizeof(OSAsyncReference64))
895 return( false );
896
897 clientIs64 = client64;
898
899 owningPID = proc_selfpid();
900
901 extraSize += sizeof(IOServiceInterestContent64);
902 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
903 pingMsg = (PingMsg *) IOMalloc( msgSize);
904 if( !pingMsg)
905 return( false );
906
907 bzero( pingMsg, msgSize);
908
909 pingMsg->msgHdr.msgh_remote_port = port;
910 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
911 | MACH_MSGH_BITS(
912 MACH_MSG_TYPE_COPY_SEND /*remote*/,
913 MACH_MSG_TYPE_MAKE_SEND /*local*/);
914 pingMsg->msgHdr.msgh_size = msgSize;
915 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
916
917 pingMsg->msgBody.msgh_descriptor_count = 1;
918
919 pingMsg->ports[0].name = 0;
920 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
921 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
922
923 pingMsg->notifyHeader.size = extraSize;
924 pingMsg->notifyHeader.type = type;
925 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
926
927 return( true );
928 }
929
930 void IOServiceMessageUserNotification::invalidatePort(void)
931 {
932 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
933 }
934
935 void IOServiceMessageUserNotification::free( void )
936 {
937 PingMsg * _pingMsg;
938 vm_size_t _msgSize;
939
940 _pingMsg = pingMsg;
941 _msgSize = msgSize;
942
943 super::free();
944
945 if( _pingMsg && _msgSize) {
946 if (_pingMsg->msgHdr.msgh_remote_port) {
947 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
948 }
949 IOFree( _pingMsg, _msgSize);
950 }
951 }
952
953 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
954 UInt32 messageType, IOService * provider,
955 void * argument, vm_size_t argSize )
956 {
957 return( ((IOServiceMessageUserNotification *) target)->handler(
958 ref, messageType, provider, argument, argSize));
959 }
960
961 IOReturn IOServiceMessageUserNotification::handler( void * ref,
962 UInt32 messageType, IOService * provider,
963 void * messageArgument, vm_size_t callerArgSize )
964 {
965 enum { kLocalMsgSize = 0x100 };
966 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
967 void * allocMsg;
968 kern_return_t kr;
969 vm_size_t argSize;
970 vm_size_t thisMsgSize;
971 ipc_port_t thisPort, providerPort;
972 struct PingMsg * thisMsg;
973 IOServiceInterestContent64 * data;
974
975 if (kIOMessageCopyClientID == messageType)
976 {
977 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
978 return (kIOReturnSuccess);
979 }
980
981 if (callerArgSize == 0)
982 {
983 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
984 else argSize = sizeof(uint32_t);
985 }
986 else
987 {
988 if( callerArgSize > kIOUserNotifyMaxMessageSize)
989 callerArgSize = kIOUserNotifyMaxMessageSize;
990 argSize = callerArgSize;
991 }
992
993 // adjust message size for ipc restrictions
994 natural_t type;
995 type = pingMsg->notifyHeader.type;
996 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
997 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
998 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
999
1000 thisMsgSize = msgSize
1001 + sizeof( IOServiceInterestContent64 )
1002 - sizeof( data->messageArgument)
1003 + argSize;
1004
1005 if (thisMsgSize > sizeof(stackMsg))
1006 {
1007 allocMsg = IOMalloc(thisMsgSize);
1008 if (!allocMsg) return (kIOReturnNoMemory);
1009 thisMsg = (typeof(thisMsg)) allocMsg;
1010 }
1011 else
1012 {
1013 allocMsg = 0;
1014 thisMsg = (typeof(thisMsg)) stackMsg;
1015 }
1016
1017 bcopy(pingMsg, thisMsg, msgSize);
1018 thisMsg->notifyHeader.type = type;
1019 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1020 // == pingMsg->notifyHeader.content;
1021 data->messageType = messageType;
1022
1023 if (callerArgSize == 0)
1024 {
1025 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1026 if (!clientIs64)
1027 {
1028 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1029 }
1030 }
1031 else
1032 {
1033 bcopy( messageArgument, data->messageArgument, callerArgSize );
1034 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1035 }
1036
1037 thisMsg->notifyHeader.type = type;
1038 thisMsg->msgHdr.msgh_size = thisMsgSize;
1039
1040 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1041 thisMsg->ports[0].name = providerPort;
1042 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1043 thisMsg->msgHdr.msgh_local_port = thisPort;
1044
1045 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1046 thisMsg->msgHdr.msgh_size,
1047 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1048 0);
1049 if( thisPort)
1050 iokit_release_port( thisPort );
1051 if( providerPort)
1052 iokit_release_port( providerPort );
1053
1054 if (allocMsg)
1055 IOFree(allocMsg, thisMsgSize);
1056
1057 if((KERN_SUCCESS != kr) && !ipcLogged)
1058 {
1059 ipcLogged = true;
1060 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1061 }
1062
1063 return( kIOReturnSuccess );
1064 }
1065
1066 OSObject * IOServiceMessageUserNotification::getNextObject()
1067 {
1068 return( 0 );
1069 }
1070
1071 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1072
1073 #undef super
1074 #define super IOService
1075 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1076
1077 IOLock * gIOUserClientOwnersLock;
1078
1079 void IOUserClient::initialize( void )
1080 {
1081 gIOObjectPortLock = IOLockAlloc();
1082 gIOUserClientOwnersLock = IOLockAlloc();
1083 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1084 }
1085
1086 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1087 mach_port_t wakePort,
1088 void *callback, void *refcon)
1089 {
1090 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1091 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1092 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1093 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1094 }
1095
1096 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1097 mach_port_t wakePort,
1098 mach_vm_address_t callback, io_user_reference_t refcon)
1099 {
1100 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1101 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1102 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1103 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1104 }
1105
1106 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1107 mach_port_t wakePort,
1108 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1109 {
1110 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1111 if (vm_map_is_64bit(get_task_map(task))) {
1112 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1113 }
1114 }
1115
1116 static OSDictionary * CopyConsoleUser(UInt32 uid)
1117 {
1118 OSArray * array;
1119 OSDictionary * user = 0;
1120
1121 if ((array = OSDynamicCast(OSArray,
1122 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1123 {
1124 for (unsigned int idx = 0;
1125 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1126 idx++) {
1127 OSNumber * num;
1128
1129 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1130 && (uid == num->unsigned32BitValue())) {
1131 user->retain();
1132 break;
1133 }
1134 }
1135 array->release();
1136 }
1137 return user;
1138 }
1139
1140 static OSDictionary * CopyUserOnConsole(void)
1141 {
1142 OSArray * array;
1143 OSDictionary * user = 0;
1144
1145 if ((array = OSDynamicCast(OSArray,
1146 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1147 {
1148 for (unsigned int idx = 0;
1149 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1150 idx++)
1151 {
1152 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1153 {
1154 user->retain();
1155 break;
1156 }
1157 }
1158 array->release();
1159 }
1160 return (user);
1161 }
1162
1163 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1164 IOService * service )
1165 {
1166 proc_t p;
1167
1168 p = (proc_t) get_bsdtask_info(task);
1169 if (p)
1170 {
1171 uint64_t authorizationID;
1172
1173 authorizationID = proc_uniqueid(p);
1174 if (authorizationID)
1175 {
1176 if (service->getAuthorizationID() == authorizationID)
1177 {
1178 return (kIOReturnSuccess);
1179 }
1180 }
1181 }
1182
1183 return (kIOReturnNotPermitted);
1184 }
1185
1186 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1187 const char * privilegeName )
1188 {
1189 kern_return_t kr;
1190 security_token_t token;
1191 mach_msg_type_number_t count;
1192 task_t task;
1193 OSDictionary * user;
1194 bool secureConsole;
1195
1196
1197 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1198 sizeof(kIOClientPrivilegeForeground)))
1199 {
1200 if (task_is_gpu_denied(current_task()))
1201 return (kIOReturnNotPrivileged);
1202 else
1203 return (kIOReturnSuccess);
1204 }
1205
1206 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1207 sizeof(kIOClientPrivilegeConsoleSession)))
1208 {
1209 kauth_cred_t cred;
1210 proc_t p;
1211
1212 task = (task_t) securityToken;
1213 if (!task)
1214 task = current_task();
1215 p = (proc_t) get_bsdtask_info(task);
1216 kr = kIOReturnNotPrivileged;
1217
1218 if (p && (cred = kauth_cred_proc_ref(p)))
1219 {
1220 user = CopyUserOnConsole();
1221 if (user)
1222 {
1223 OSNumber * num;
1224 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1225 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1226 {
1227 kr = kIOReturnSuccess;
1228 }
1229 user->release();
1230 }
1231 kauth_cred_unref(&cred);
1232 }
1233 return (kr);
1234 }
1235
1236 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1237 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1238 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1239 else
1240 task = (task_t)securityToken;
1241
1242 count = TASK_SECURITY_TOKEN_COUNT;
1243 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1244
1245 if (KERN_SUCCESS != kr)
1246 {}
1247 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1248 sizeof(kIOClientPrivilegeAdministrator))) {
1249 if (0 != token.val[0])
1250 kr = kIOReturnNotPrivileged;
1251 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1252 sizeof(kIOClientPrivilegeLocalUser))) {
1253 user = CopyConsoleUser(token.val[0]);
1254 if ( user )
1255 user->release();
1256 else
1257 kr = kIOReturnNotPrivileged;
1258 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1259 sizeof(kIOClientPrivilegeConsoleUser))) {
1260 user = CopyConsoleUser(token.val[0]);
1261 if ( user ) {
1262 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1263 kr = kIOReturnNotPrivileged;
1264 else if ( secureConsole ) {
1265 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1266 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1267 kr = kIOReturnNotPrivileged;
1268 }
1269 user->release();
1270 }
1271 else
1272 kr = kIOReturnNotPrivileged;
1273 } else
1274 kr = kIOReturnUnsupported;
1275
1276 return (kr);
1277 }
1278
1279 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1280 const char * entitlement )
1281 {
1282 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1283
1284 proc_t p = NULL;
1285 pid_t pid = 0;
1286 char procname[MAXCOMLEN + 1] = "";
1287 size_t len = 0;
1288 void *entitlements_blob = NULL;
1289 char *entitlements_data = NULL;
1290 OSObject *entitlements_obj = NULL;
1291 OSDictionary *entitlements = NULL;
1292 OSString *errorString = NULL;
1293 OSObject *value = NULL;
1294
1295 p = (proc_t)get_bsdtask_info(task);
1296 if (p == NULL)
1297 goto fail;
1298 pid = proc_pid(p);
1299 proc_name(pid, procname, (int)sizeof(procname));
1300
1301 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1302 goto fail;
1303
1304 if (len <= offsetof(CS_GenericBlob, data))
1305 goto fail;
1306
1307 /*
1308 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1309 * we'll try to parse in the kernel.
1310 */
1311 len -= offsetof(CS_GenericBlob, data);
1312 if (len > MAX_ENTITLEMENTS_LEN) {
1313 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1314 goto fail;
1315 }
1316
1317 /*
1318 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1319 * what is stored in the entitlements blob. Copy the string and
1320 * terminate it.
1321 */
1322 entitlements_data = (char *)IOMalloc(len + 1);
1323 if (entitlements_data == NULL)
1324 goto fail;
1325 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1326 entitlements_data[len] = '\0';
1327
1328 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1329 if (errorString != NULL) {
1330 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1331 goto fail;
1332 }
1333 if (entitlements_obj == NULL)
1334 goto fail;
1335
1336 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1337 if (entitlements == NULL)
1338 goto fail;
1339
1340 /* Fetch the entitlement value from the dictionary. */
1341 value = entitlements->getObject(entitlement);
1342 if (value != NULL)
1343 value->retain();
1344
1345 fail:
1346 if (entitlements_data != NULL)
1347 IOFree(entitlements_data, len + 1);
1348 if (entitlements_obj != NULL)
1349 entitlements_obj->release();
1350 if (errorString != NULL)
1351 errorString->release();
1352 return value;
1353 }
1354
1355 bool IOUserClient::init()
1356 {
1357 if (getPropertyTable() || super::init())
1358 return reserve();
1359
1360 return false;
1361 }
1362
1363 bool IOUserClient::init(OSDictionary * dictionary)
1364 {
1365 if (getPropertyTable() || super::init(dictionary))
1366 return reserve();
1367
1368 return false;
1369 }
1370
1371 bool IOUserClient::initWithTask(task_t owningTask,
1372 void * securityID,
1373 UInt32 type )
1374 {
1375 if (getPropertyTable() || super::init())
1376 return reserve();
1377
1378 return false;
1379 }
1380
1381 bool IOUserClient::initWithTask(task_t owningTask,
1382 void * securityID,
1383 UInt32 type,
1384 OSDictionary * properties )
1385 {
1386 bool ok;
1387
1388 ok = super::init( properties );
1389 ok &= initWithTask( owningTask, securityID, type );
1390
1391 return( ok );
1392 }
1393
1394 bool IOUserClient::reserve()
1395 {
1396 if(!reserved) {
1397 reserved = IONew(ExpansionData, 1);
1398 if (!reserved) {
1399 return false;
1400 }
1401 }
1402 setTerminateDefer(NULL, true);
1403 IOStatisticsRegisterCounter();
1404
1405 return true;
1406 }
1407
1408 struct IOUserClientOwner
1409 {
1410 task_t task;
1411 queue_chain_t taskLink;
1412 IOUserClient * uc;
1413 queue_chain_t ucLink;
1414 };
1415
1416 IOReturn
1417 IOUserClient::registerOwner(task_t task)
1418 {
1419 IOUserClientOwner * owner;
1420 IOReturn ret;
1421 bool newOwner;
1422
1423 IOLockLock(gIOUserClientOwnersLock);
1424
1425 newOwner = true;
1426 ret = kIOReturnSuccess;
1427
1428 if (!owners.next) queue_init(&owners);
1429 else
1430 {
1431 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1432 {
1433 if (task != owner->task) continue;
1434 newOwner = false;
1435 break;
1436 }
1437 }
1438 if (newOwner)
1439 {
1440 owner = IONew(IOUserClientOwner, 1);
1441 if (!newOwner) ret = kIOReturnNoMemory;
1442 else
1443 {
1444 owner->task = task;
1445 owner->uc = this;
1446 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1447 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1448 }
1449 }
1450
1451 IOLockUnlock(gIOUserClientOwnersLock);
1452
1453 return (ret);
1454 }
1455
1456 void
1457 IOUserClient::noMoreSenders(void)
1458 {
1459 IOUserClientOwner * owner;
1460
1461 IOLockLock(gIOUserClientOwnersLock);
1462
1463 if (owners.next)
1464 {
1465 while (!queue_empty(&owners))
1466 {
1467 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1468 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1469 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1470 IODelete(owner, IOUserClientOwner, 1);
1471 }
1472 owners.next = owners.prev = NULL;
1473 }
1474
1475 IOLockUnlock(gIOUserClientOwnersLock);
1476 }
1477
1478 extern "C" kern_return_t
1479 iokit_task_terminate(task_t task)
1480 {
1481 IOUserClientOwner * owner;
1482 IOUserClient * dead;
1483 IOUserClient * uc;
1484 queue_head_t * taskque;
1485
1486 IOLockLock(gIOUserClientOwnersLock);
1487
1488 taskque = task_io_user_clients(task);
1489 dead = NULL;
1490 while (!queue_empty(taskque))
1491 {
1492 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1493 uc = owner->uc;
1494 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1495 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1496 if (queue_empty(&uc->owners))
1497 {
1498 uc->retain();
1499 IOLog("destroying out of band connect for %s\n", uc->getName());
1500 // now using the uc queue head as a singly linked queue,
1501 // leaving .next as NULL to mark it empty
1502 uc->owners.next = NULL;
1503 uc->owners.prev = (queue_entry_t) dead;
1504 dead = uc;
1505 }
1506 IODelete(owner, IOUserClientOwner, 1);
1507 }
1508
1509 IOLockUnlock(gIOUserClientOwnersLock);
1510
1511 while (dead)
1512 {
1513 uc = dead;
1514 dead = (IOUserClient *)(void *) dead->owners.prev;
1515 uc->owners.prev = NULL;
1516 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1517 uc->release();
1518 }
1519
1520 return (KERN_SUCCESS);
1521 }
1522
1523 void IOUserClient::free()
1524 {
1525 if( mappings) mappings->release();
1526 if (lock) IOLockFree(lock);
1527
1528 IOStatisticsUnregisterCounter();
1529
1530 assert(!owners.next);
1531 assert(!owners.prev);
1532
1533 if (reserved) IODelete(reserved, ExpansionData, 1);
1534
1535 super::free();
1536 }
1537
1538 IOReturn IOUserClient::clientDied( void )
1539 {
1540 IOReturn ret = kIOReturnNotReady;
1541
1542 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1543 {
1544 ret = clientClose();
1545 }
1546
1547 return (ret);
1548 }
1549
1550 IOReturn IOUserClient::clientClose( void )
1551 {
1552 return( kIOReturnUnsupported );
1553 }
1554
1555 IOService * IOUserClient::getService( void )
1556 {
1557 return( 0 );
1558 }
1559
1560 IOReturn IOUserClient::registerNotificationPort(
1561 mach_port_t /* port */,
1562 UInt32 /* type */,
1563 UInt32 /* refCon */)
1564 {
1565 return( kIOReturnUnsupported);
1566 }
1567
1568 IOReturn IOUserClient::registerNotificationPort(
1569 mach_port_t port,
1570 UInt32 type,
1571 io_user_reference_t refCon)
1572 {
1573 return (registerNotificationPort(port, type, (UInt32) refCon));
1574 }
1575
1576 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1577 semaphore_t * semaphore )
1578 {
1579 return( kIOReturnUnsupported);
1580 }
1581
1582 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1583 {
1584 return( kIOReturnUnsupported);
1585 }
1586
1587 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1588 IOOptionBits * options,
1589 IOMemoryDescriptor ** memory )
1590 {
1591 return( kIOReturnUnsupported);
1592 }
1593
1594 #if !__LP64__
1595 IOMemoryMap * IOUserClient::mapClientMemory(
1596 IOOptionBits type,
1597 task_t task,
1598 IOOptionBits mapFlags,
1599 IOVirtualAddress atAddress )
1600 {
1601 return (NULL);
1602 }
1603 #endif
1604
1605 IOMemoryMap * IOUserClient::mapClientMemory64(
1606 IOOptionBits type,
1607 task_t task,
1608 IOOptionBits mapFlags,
1609 mach_vm_address_t atAddress )
1610 {
1611 IOReturn err;
1612 IOOptionBits options = 0;
1613 IOMemoryDescriptor * memory = 0;
1614 IOMemoryMap * map = 0;
1615
1616 err = clientMemoryForType( (UInt32) type, &options, &memory );
1617
1618 if( memory && (kIOReturnSuccess == err)) {
1619
1620 FAKE_STACK_FRAME(getMetaClass());
1621
1622 options = (options & ~kIOMapUserOptionsMask)
1623 | (mapFlags & kIOMapUserOptionsMask);
1624 map = memory->createMappingInTask( task, atAddress, options );
1625 memory->release();
1626
1627 FAKE_STACK_FRAME_END();
1628 }
1629
1630 return( map );
1631 }
1632
1633 IOReturn IOUserClient::exportObjectToClient(task_t task,
1634 OSObject *obj, io_object_t *clientObj)
1635 {
1636 mach_port_name_t name;
1637
1638 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1639
1640 *(mach_port_name_t *)clientObj = name;
1641
1642 if (obj) obj->release();
1643
1644 return kIOReturnSuccess;
1645 }
1646
1647 IOReturn IOUserClient::copyPortNameForObjectInTask(task_t task,
1648 OSObject *obj, mach_port_name_t * port_name)
1649 {
1650 mach_port_name_t name;
1651
1652 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1653
1654 *(mach_port_name_t *) port_name = name;
1655
1656 return kIOReturnSuccess;
1657 }
1658
1659 IOReturn IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1660 OSObject **obj)
1661 {
1662 OSObject * object;
1663
1664 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
1665
1666 *obj = object;
1667
1668 return (object ? kIOReturnSuccess : kIOReturnIPCError);
1669 }
1670
1671 IOReturn IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
1672 {
1673 return (iokit_mod_send_right(task, port_name, delta));
1674 }
1675
1676 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1677 {
1678 return( 0 );
1679 }
1680
1681 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1682 {
1683 return( 0 );
1684 }
1685
1686 IOExternalTrap * IOUserClient::
1687 getExternalTrapForIndex(UInt32 index)
1688 {
1689 return NULL;
1690 }
1691
1692 #pragma clang diagnostic push
1693 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1694
1695 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1696 // functions can break clients of kexts implementing getExternalMethodForIndex()
1697 IOExternalMethod * IOUserClient::
1698 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1699 {
1700 IOExternalMethod *method = getExternalMethodForIndex(index);
1701
1702 if (method)
1703 *targetP = (IOService *) method->object;
1704
1705 return method;
1706 }
1707
1708 IOExternalAsyncMethod * IOUserClient::
1709 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1710 {
1711 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1712
1713 if (method)
1714 *targetP = (IOService *) method->object;
1715
1716 return method;
1717 }
1718
1719 IOExternalTrap * IOUserClient::
1720 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1721 {
1722 IOExternalTrap *trap = getExternalTrapForIndex(index);
1723
1724 if (trap) {
1725 *targetP = trap->object;
1726 }
1727
1728 return trap;
1729 }
1730 #pragma clang diagnostic pop
1731
1732 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1733 {
1734 mach_port_t port;
1735 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1736
1737 if (MACH_PORT_NULL != port)
1738 iokit_release_port_send(port);
1739
1740 return (kIOReturnSuccess);
1741 }
1742
1743 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1744 {
1745 if (MACH_PORT_NULL != port)
1746 iokit_release_port_send(port);
1747
1748 return (kIOReturnSuccess);
1749 }
1750
1751 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1752 IOReturn result, void *args[], UInt32 numArgs)
1753 {
1754 OSAsyncReference64 reference64;
1755 io_user_reference_t args64[kMaxAsyncArgs];
1756 unsigned int idx;
1757
1758 if (numArgs > kMaxAsyncArgs)
1759 return kIOReturnMessageTooLarge;
1760
1761 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1762 reference64[idx] = REF64(reference[idx]);
1763
1764 for (idx = 0; idx < numArgs; idx++)
1765 args64[idx] = REF64(args[idx]);
1766
1767 return (sendAsyncResult64(reference64, result, args64, numArgs));
1768 }
1769
1770 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1771 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1772 {
1773 return _sendAsyncResult64(reference, result, args, numArgs, options);
1774 }
1775
1776 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1777 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1778 {
1779 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1780 }
1781
1782 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1783 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1784 {
1785 struct ReplyMsg
1786 {
1787 mach_msg_header_t msgHdr;
1788 union
1789 {
1790 struct
1791 {
1792 OSNotificationHeader notifyHdr;
1793 IOAsyncCompletionContent asyncContent;
1794 uint32_t args[kMaxAsyncArgs];
1795 } msg32;
1796 struct
1797 {
1798 OSNotificationHeader64 notifyHdr;
1799 IOAsyncCompletionContent asyncContent;
1800 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1801 } msg64;
1802 } m;
1803 };
1804 ReplyMsg replyMsg;
1805 mach_port_t replyPort;
1806 kern_return_t kr;
1807
1808 // If no reply port, do nothing.
1809 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1810 if (replyPort == MACH_PORT_NULL)
1811 return kIOReturnSuccess;
1812
1813 if (numArgs > kMaxAsyncArgs)
1814 return kIOReturnMessageTooLarge;
1815
1816 bzero(&replyMsg, sizeof(replyMsg));
1817 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1818 0 /*local*/);
1819 replyMsg.msgHdr.msgh_remote_port = replyPort;
1820 replyMsg.msgHdr.msgh_local_port = 0;
1821 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1822 if (kIOUCAsync64Flag & reference[0])
1823 {
1824 replyMsg.msgHdr.msgh_size =
1825 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1826 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1827 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1828 + numArgs * sizeof(io_user_reference_t);
1829 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1830 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1831
1832 replyMsg.m.msg64.asyncContent.result = result;
1833 if (numArgs)
1834 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1835 }
1836 else
1837 {
1838 unsigned int idx;
1839
1840 replyMsg.msgHdr.msgh_size =
1841 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1842 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1843
1844 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1845 + numArgs * sizeof(uint32_t);
1846 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1847
1848 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1849 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1850
1851 replyMsg.m.msg32.asyncContent.result = result;
1852
1853 for (idx = 0; idx < numArgs; idx++)
1854 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1855 }
1856
1857 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1858 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1859 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1860 } else {
1861 /* Fail on full queue. */
1862 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1863 replyMsg.msgHdr.msgh_size);
1864 }
1865 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1866 {
1867 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1868 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1869 }
1870 return kr;
1871 }
1872
1873
1874 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1875
1876 extern "C" {
1877
1878 #define CHECK(cls,obj,out) \
1879 cls * out; \
1880 if( !(out = OSDynamicCast( cls, obj))) \
1881 return( kIOReturnBadArgument )
1882
1883 #define CHECKLOCKED(cls,obj,out) \
1884 IOUserIterator * oIter; \
1885 cls * out; \
1886 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1887 return (kIOReturnBadArgument); \
1888 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1889 return (kIOReturnBadArgument)
1890
1891 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1892
1893 // Create a vm_map_copy_t or kalloc'ed data for memory
1894 // to be copied out. ipc will free after the copyout.
1895
1896 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1897 io_buf_ptr_t * buf )
1898 {
1899 kern_return_t err;
1900 vm_map_copy_t copy;
1901
1902 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1903 false /* src_destroy */, &copy);
1904
1905 assert( err == KERN_SUCCESS );
1906 if( err == KERN_SUCCESS )
1907 *buf = (char *) copy;
1908
1909 return( err );
1910 }
1911
1912 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1913
1914 /* Routine io_server_version */
1915 kern_return_t is_io_server_version(
1916 mach_port_t master_port,
1917 uint64_t *version)
1918 {
1919 *version = IOKIT_SERVER_VERSION;
1920 return (kIOReturnSuccess);
1921 }
1922
1923 /* Routine io_object_get_class */
1924 kern_return_t is_io_object_get_class(
1925 io_object_t object,
1926 io_name_t className )
1927 {
1928 const OSMetaClass* my_obj = NULL;
1929
1930 if( !object)
1931 return( kIOReturnBadArgument );
1932
1933 my_obj = object->getMetaClass();
1934 if (!my_obj) {
1935 return (kIOReturnNotFound);
1936 }
1937
1938 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1939
1940 return( kIOReturnSuccess );
1941 }
1942
1943 /* Routine io_object_get_superclass */
1944 kern_return_t is_io_object_get_superclass(
1945 mach_port_t master_port,
1946 io_name_t obj_name,
1947 io_name_t class_name)
1948 {
1949 IOReturn ret;
1950 const OSMetaClass * meta;
1951 const OSMetaClass * super;
1952 const OSSymbol * name;
1953 const char * cstr;
1954
1955 if (!obj_name || !class_name) return (kIOReturnBadArgument);
1956 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1957
1958 ret = kIOReturnNotFound;
1959 meta = 0;
1960 do
1961 {
1962 name = OSSymbol::withCString(obj_name);
1963 if (!name) break;
1964 meta = OSMetaClass::copyMetaClassWithName(name);
1965 if (!meta) break;
1966 super = meta->getSuperClass();
1967 if (!super) break;
1968 cstr = super->getClassName();
1969 if (!cstr) break;
1970 strlcpy(class_name, cstr, sizeof(io_name_t));
1971 ret = kIOReturnSuccess;
1972 }
1973 while (false);
1974
1975 OSSafeReleaseNULL(name);
1976 if (meta) meta->releaseMetaClass();
1977
1978 return (ret);
1979 }
1980
1981 /* Routine io_object_get_bundle_identifier */
1982 kern_return_t is_io_object_get_bundle_identifier(
1983 mach_port_t master_port,
1984 io_name_t obj_name,
1985 io_name_t bundle_name)
1986 {
1987 IOReturn ret;
1988 const OSMetaClass * meta;
1989 const OSSymbol * name;
1990 const OSSymbol * identifier;
1991 const char * cstr;
1992
1993 if (!obj_name || !bundle_name) return (kIOReturnBadArgument);
1994 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1995
1996 ret = kIOReturnNotFound;
1997 meta = 0;
1998 do
1999 {
2000 name = OSSymbol::withCString(obj_name);
2001 if (!name) break;
2002 meta = OSMetaClass::copyMetaClassWithName(name);
2003 if (!meta) break;
2004 identifier = meta->getKmodName();
2005 if (!identifier) break;
2006 cstr = identifier->getCStringNoCopy();
2007 if (!cstr) break;
2008 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2009 ret = kIOReturnSuccess;
2010 }
2011 while (false);
2012
2013 OSSafeReleaseNULL(name);
2014 if (meta) meta->releaseMetaClass();
2015
2016 return (ret);
2017 }
2018
2019 /* Routine io_object_conforms_to */
2020 kern_return_t is_io_object_conforms_to(
2021 io_object_t object,
2022 io_name_t className,
2023 boolean_t *conforms )
2024 {
2025 if( !object)
2026 return( kIOReturnBadArgument );
2027
2028 *conforms = (0 != object->metaCast( className ));
2029
2030 return( kIOReturnSuccess );
2031 }
2032
2033 /* Routine io_object_get_retain_count */
2034 kern_return_t is_io_object_get_retain_count(
2035 io_object_t object,
2036 uint32_t *retainCount )
2037 {
2038 if( !object)
2039 return( kIOReturnBadArgument );
2040
2041 *retainCount = object->getRetainCount();
2042 return( kIOReturnSuccess );
2043 }
2044
2045 /* Routine io_iterator_next */
2046 kern_return_t is_io_iterator_next(
2047 io_object_t iterator,
2048 io_object_t *object )
2049 {
2050 IOReturn ret;
2051 OSObject * obj;
2052
2053 CHECK( OSIterator, iterator, iter );
2054
2055 obj = iter->getNextObject();
2056 if( obj) {
2057 obj->retain();
2058 *object = obj;
2059 ret = kIOReturnSuccess;
2060 } else
2061 ret = kIOReturnNoDevice;
2062
2063 return (ret);
2064 }
2065
2066 /* Routine io_iterator_reset */
2067 kern_return_t is_io_iterator_reset(
2068 io_object_t iterator )
2069 {
2070 CHECK( OSIterator, iterator, iter );
2071
2072 iter->reset();
2073
2074 return( kIOReturnSuccess );
2075 }
2076
2077 /* Routine io_iterator_is_valid */
2078 kern_return_t is_io_iterator_is_valid(
2079 io_object_t iterator,
2080 boolean_t *is_valid )
2081 {
2082 CHECK( OSIterator, iterator, iter );
2083
2084 *is_valid = iter->isValid();
2085
2086 return( kIOReturnSuccess );
2087 }
2088
2089
2090 static kern_return_t internal_io_service_match_property_table(
2091 io_service_t _service,
2092 const char * matching,
2093 mach_msg_type_number_t matching_size,
2094 boolean_t *matches)
2095 {
2096 CHECK( IOService, _service, service );
2097
2098 kern_return_t kr;
2099 OSObject * obj;
2100 OSDictionary * dict;
2101
2102 assert(matching_size);
2103 obj = OSUnserializeXML(matching, matching_size);
2104
2105 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2106 *matches = service->passiveMatch( dict );
2107 kr = kIOReturnSuccess;
2108 } else
2109 kr = kIOReturnBadArgument;
2110
2111 if( obj)
2112 obj->release();
2113
2114 return( kr );
2115 }
2116
2117 /* Routine io_service_match_property_table */
2118 kern_return_t is_io_service_match_property_table(
2119 io_service_t service,
2120 io_string_t matching,
2121 boolean_t *matches )
2122 {
2123 return (kIOReturnUnsupported);
2124 }
2125
2126
2127 /* Routine io_service_match_property_table_ool */
2128 kern_return_t is_io_service_match_property_table_ool(
2129 io_object_t service,
2130 io_buf_ptr_t matching,
2131 mach_msg_type_number_t matchingCnt,
2132 kern_return_t *result,
2133 boolean_t *matches )
2134 {
2135 kern_return_t kr;
2136 vm_offset_t data;
2137 vm_map_offset_t map_data;
2138
2139 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2140 data = CAST_DOWN(vm_offset_t, map_data);
2141
2142 if( KERN_SUCCESS == kr) {
2143 // must return success after vm_map_copyout() succeeds
2144 *result = internal_io_service_match_property_table(service,
2145 (const char *)data, matchingCnt, matches );
2146 vm_deallocate( kernel_map, data, matchingCnt );
2147 }
2148
2149 return( kr );
2150 }
2151
2152 /* Routine io_service_match_property_table_bin */
2153 kern_return_t is_io_service_match_property_table_bin(
2154 io_object_t service,
2155 io_struct_inband_t matching,
2156 mach_msg_type_number_t matchingCnt,
2157 boolean_t *matches)
2158 {
2159 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2160 }
2161
2162 static kern_return_t internal_io_service_get_matching_services(
2163 mach_port_t master_port,
2164 const char * matching,
2165 mach_msg_type_number_t matching_size,
2166 io_iterator_t *existing )
2167 {
2168 kern_return_t kr;
2169 OSObject * obj;
2170 OSDictionary * dict;
2171
2172 if( master_port != master_device_port)
2173 return( kIOReturnNotPrivileged);
2174
2175 assert(matching_size);
2176 obj = OSUnserializeXML(matching, matching_size);
2177
2178 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2179 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2180 kr = kIOReturnSuccess;
2181 } else
2182 kr = kIOReturnBadArgument;
2183
2184 if( obj)
2185 obj->release();
2186
2187 return( kr );
2188 }
2189
2190 /* Routine io_service_get_matching_services */
2191 kern_return_t is_io_service_get_matching_services(
2192 mach_port_t master_port,
2193 io_string_t matching,
2194 io_iterator_t *existing )
2195 {
2196 return (kIOReturnUnsupported);
2197 }
2198
2199 /* Routine io_service_get_matching_services_ool */
2200 kern_return_t is_io_service_get_matching_services_ool(
2201 mach_port_t master_port,
2202 io_buf_ptr_t matching,
2203 mach_msg_type_number_t matchingCnt,
2204 kern_return_t *result,
2205 io_object_t *existing )
2206 {
2207 kern_return_t kr;
2208 vm_offset_t data;
2209 vm_map_offset_t map_data;
2210
2211 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2212 data = CAST_DOWN(vm_offset_t, map_data);
2213
2214 if( KERN_SUCCESS == kr) {
2215 // must return success after vm_map_copyout() succeeds
2216 // and mig will copy out objects on success
2217 *existing = 0;
2218 *result = internal_io_service_get_matching_services(master_port,
2219 (const char *) data, matchingCnt, existing);
2220 vm_deallocate( kernel_map, data, matchingCnt );
2221 }
2222
2223 return( kr );
2224 }
2225
2226 /* Routine io_service_get_matching_services_bin */
2227 kern_return_t is_io_service_get_matching_services_bin(
2228 mach_port_t master_port,
2229 io_struct_inband_t matching,
2230 mach_msg_type_number_t matchingCnt,
2231 io_object_t *existing)
2232 {
2233 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2234 }
2235
2236
2237 static kern_return_t internal_io_service_get_matching_service(
2238 mach_port_t master_port,
2239 const char * matching,
2240 mach_msg_type_number_t matching_size,
2241 io_service_t *service )
2242 {
2243 kern_return_t kr;
2244 OSObject * obj;
2245 OSDictionary * dict;
2246
2247 if( master_port != master_device_port)
2248 return( kIOReturnNotPrivileged);
2249
2250 assert(matching_size);
2251 obj = OSUnserializeXML(matching, matching_size);
2252
2253 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2254 *service = IOService::copyMatchingService( dict );
2255 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2256 } else
2257 kr = kIOReturnBadArgument;
2258
2259 if( obj)
2260 obj->release();
2261
2262 return( kr );
2263 }
2264
2265 /* Routine io_service_get_matching_service */
2266 kern_return_t is_io_service_get_matching_service(
2267 mach_port_t master_port,
2268 io_string_t matching,
2269 io_service_t *service )
2270 {
2271 return (kIOReturnUnsupported);
2272 }
2273
2274 /* Routine io_service_get_matching_services_ool */
2275 kern_return_t is_io_service_get_matching_service_ool(
2276 mach_port_t master_port,
2277 io_buf_ptr_t matching,
2278 mach_msg_type_number_t matchingCnt,
2279 kern_return_t *result,
2280 io_object_t *service )
2281 {
2282 kern_return_t kr;
2283 vm_offset_t data;
2284 vm_map_offset_t map_data;
2285
2286 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2287 data = CAST_DOWN(vm_offset_t, map_data);
2288
2289 if( KERN_SUCCESS == kr) {
2290 // must return success after vm_map_copyout() succeeds
2291 // and mig will copy out objects on success
2292 *service = 0;
2293 *result = internal_io_service_get_matching_service(master_port,
2294 (const char *) data, matchingCnt, service );
2295 vm_deallocate( kernel_map, data, matchingCnt );
2296 }
2297
2298 return( kr );
2299 }
2300
2301 /* Routine io_service_get_matching_service_bin */
2302 kern_return_t is_io_service_get_matching_service_bin(
2303 mach_port_t master_port,
2304 io_struct_inband_t matching,
2305 mach_msg_type_number_t matchingCnt,
2306 io_object_t *service)
2307 {
2308 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2309 }
2310
2311 static kern_return_t internal_io_service_add_notification(
2312 mach_port_t master_port,
2313 io_name_t notification_type,
2314 const char * matching,
2315 size_t matching_size,
2316 mach_port_t port,
2317 void * reference,
2318 vm_size_t referenceSize,
2319 bool client64,
2320 io_object_t * notification )
2321 {
2322 IOServiceUserNotification * userNotify = 0;
2323 IONotifier * notify = 0;
2324 const OSSymbol * sym;
2325 OSDictionary * dict;
2326 IOReturn err;
2327 unsigned long int userMsgType;
2328
2329 if( master_port != master_device_port)
2330 return( kIOReturnNotPrivileged);
2331
2332 do {
2333 err = kIOReturnNoResources;
2334
2335 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2336
2337 if( !(sym = OSSymbol::withCString( notification_type )))
2338 err = kIOReturnNoResources;
2339
2340 assert(matching_size);
2341 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2342 if (!dict) {
2343 err = kIOReturnBadArgument;
2344 continue;
2345 }
2346
2347 if( (sym == gIOPublishNotification)
2348 || (sym == gIOFirstPublishNotification))
2349 userMsgType = kIOServicePublishNotificationType;
2350 else if( (sym == gIOMatchedNotification)
2351 || (sym == gIOFirstMatchNotification))
2352 userMsgType = kIOServiceMatchedNotificationType;
2353 else if ((sym == gIOTerminatedNotification)
2354 || (sym == gIOWillTerminateNotification))
2355 userMsgType = kIOServiceTerminatedNotificationType;
2356 else
2357 userMsgType = kLastIOKitNotificationType;
2358
2359 userNotify = new IOServiceUserNotification;
2360
2361 if( userNotify && !userNotify->init( port, userMsgType,
2362 reference, referenceSize, client64)) {
2363 userNotify->release();
2364 userNotify = 0;
2365 }
2366 if( !userNotify)
2367 continue;
2368
2369 notify = IOService::addMatchingNotification( sym, dict,
2370 &userNotify->_handler, userNotify );
2371 if( notify) {
2372 *notification = userNotify;
2373 userNotify->setNotification( notify );
2374 err = kIOReturnSuccess;
2375 } else
2376 err = kIOReturnUnsupported;
2377
2378 } while( false );
2379
2380 if ((kIOReturnSuccess != err) && userNotify)
2381 {
2382 userNotify->invalidatePort();
2383 userNotify->release();
2384 userNotify = 0;
2385 }
2386
2387 if( sym)
2388 sym->release();
2389 if( dict)
2390 dict->release();
2391
2392 return( err );
2393 }
2394
2395
2396 /* Routine io_service_add_notification */
2397 kern_return_t is_io_service_add_notification(
2398 mach_port_t master_port,
2399 io_name_t notification_type,
2400 io_string_t matching,
2401 mach_port_t port,
2402 io_async_ref_t reference,
2403 mach_msg_type_number_t referenceCnt,
2404 io_object_t * notification )
2405 {
2406 return (kIOReturnUnsupported);
2407 }
2408
2409 /* Routine io_service_add_notification_64 */
2410 kern_return_t is_io_service_add_notification_64(
2411 mach_port_t master_port,
2412 io_name_t notification_type,
2413 io_string_t matching,
2414 mach_port_t wake_port,
2415 io_async_ref64_t reference,
2416 mach_msg_type_number_t referenceCnt,
2417 io_object_t *notification )
2418 {
2419 return (kIOReturnUnsupported);
2420 }
2421
2422 /* Routine io_service_add_notification_bin */
2423 kern_return_t is_io_service_add_notification_bin
2424 (
2425 mach_port_t master_port,
2426 io_name_t notification_type,
2427 io_struct_inband_t matching,
2428 mach_msg_type_number_t matchingCnt,
2429 mach_port_t wake_port,
2430 io_async_ref_t reference,
2431 mach_msg_type_number_t referenceCnt,
2432 io_object_t *notification)
2433 {
2434 return (internal_io_service_add_notification(master_port, notification_type,
2435 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2436 false, notification));
2437 }
2438
2439 /* Routine io_service_add_notification_bin_64 */
2440 kern_return_t is_io_service_add_notification_bin_64
2441 (
2442 mach_port_t master_port,
2443 io_name_t notification_type,
2444 io_struct_inband_t matching,
2445 mach_msg_type_number_t matchingCnt,
2446 mach_port_t wake_port,
2447 io_async_ref64_t reference,
2448 mach_msg_type_number_t referenceCnt,
2449 io_object_t *notification)
2450 {
2451 return (internal_io_service_add_notification(master_port, notification_type,
2452 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2453 true, notification));
2454 }
2455
2456 static kern_return_t internal_io_service_add_notification_ool(
2457 mach_port_t master_port,
2458 io_name_t notification_type,
2459 io_buf_ptr_t matching,
2460 mach_msg_type_number_t matchingCnt,
2461 mach_port_t wake_port,
2462 void * reference,
2463 vm_size_t referenceSize,
2464 bool client64,
2465 kern_return_t *result,
2466 io_object_t *notification )
2467 {
2468 kern_return_t kr;
2469 vm_offset_t data;
2470 vm_map_offset_t map_data;
2471
2472 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2473 data = CAST_DOWN(vm_offset_t, map_data);
2474
2475 if( KERN_SUCCESS == kr) {
2476 // must return success after vm_map_copyout() succeeds
2477 // and mig will copy out objects on success
2478 *notification = 0;
2479 *result = internal_io_service_add_notification( master_port, notification_type,
2480 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2481 vm_deallocate( kernel_map, data, matchingCnt );
2482 }
2483
2484 return( kr );
2485 }
2486
2487 /* Routine io_service_add_notification_ool */
2488 kern_return_t is_io_service_add_notification_ool(
2489 mach_port_t master_port,
2490 io_name_t notification_type,
2491 io_buf_ptr_t matching,
2492 mach_msg_type_number_t matchingCnt,
2493 mach_port_t wake_port,
2494 io_async_ref_t reference,
2495 mach_msg_type_number_t referenceCnt,
2496 kern_return_t *result,
2497 io_object_t *notification )
2498 {
2499 return (internal_io_service_add_notification_ool(master_port, notification_type,
2500 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2501 false, result, notification));
2502 }
2503
2504 /* Routine io_service_add_notification_ool_64 */
2505 kern_return_t is_io_service_add_notification_ool_64(
2506 mach_port_t master_port,
2507 io_name_t notification_type,
2508 io_buf_ptr_t matching,
2509 mach_msg_type_number_t matchingCnt,
2510 mach_port_t wake_port,
2511 io_async_ref64_t reference,
2512 mach_msg_type_number_t referenceCnt,
2513 kern_return_t *result,
2514 io_object_t *notification )
2515 {
2516 return (internal_io_service_add_notification_ool(master_port, notification_type,
2517 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2518 true, result, notification));
2519 }
2520
2521 /* Routine io_service_add_notification_old */
2522 kern_return_t is_io_service_add_notification_old(
2523 mach_port_t master_port,
2524 io_name_t notification_type,
2525 io_string_t matching,
2526 mach_port_t port,
2527 // for binary compatibility reasons, this must be natural_t for ILP32
2528 natural_t ref,
2529 io_object_t * notification )
2530 {
2531 return( is_io_service_add_notification( master_port, notification_type,
2532 matching, port, &ref, 1, notification ));
2533 }
2534
2535
2536 static kern_return_t internal_io_service_add_interest_notification(
2537 io_object_t _service,
2538 io_name_t type_of_interest,
2539 mach_port_t port,
2540 void * reference,
2541 vm_size_t referenceSize,
2542 bool client64,
2543 io_object_t * notification )
2544 {
2545
2546 IOServiceMessageUserNotification * userNotify = 0;
2547 IONotifier * notify = 0;
2548 const OSSymbol * sym;
2549 IOReturn err;
2550
2551 CHECK( IOService, _service, service );
2552
2553 err = kIOReturnNoResources;
2554 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2555
2556 userNotify = new IOServiceMessageUserNotification;
2557
2558 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2559 reference, referenceSize,
2560 kIOUserNotifyMaxMessageSize,
2561 client64 )) {
2562 userNotify->release();
2563 userNotify = 0;
2564 }
2565 if( !userNotify)
2566 continue;
2567
2568 notify = service->registerInterest( sym,
2569 &userNotify->_handler, userNotify );
2570 if( notify) {
2571 *notification = userNotify;
2572 userNotify->setNotification( notify );
2573 err = kIOReturnSuccess;
2574 } else
2575 err = kIOReturnUnsupported;
2576
2577 sym->release();
2578
2579 } while( false );
2580
2581 if ((kIOReturnSuccess != err) && userNotify)
2582 {
2583 userNotify->invalidatePort();
2584 userNotify->release();
2585 userNotify = 0;
2586 }
2587
2588 return( err );
2589 }
2590
2591 /* Routine io_service_add_message_notification */
2592 kern_return_t is_io_service_add_interest_notification(
2593 io_object_t service,
2594 io_name_t type_of_interest,
2595 mach_port_t port,
2596 io_async_ref_t reference,
2597 mach_msg_type_number_t referenceCnt,
2598 io_object_t * notification )
2599 {
2600 return (internal_io_service_add_interest_notification(service, type_of_interest,
2601 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2602 }
2603
2604 /* Routine io_service_add_interest_notification_64 */
2605 kern_return_t is_io_service_add_interest_notification_64(
2606 io_object_t service,
2607 io_name_t type_of_interest,
2608 mach_port_t wake_port,
2609 io_async_ref64_t reference,
2610 mach_msg_type_number_t referenceCnt,
2611 io_object_t *notification )
2612 {
2613 return (internal_io_service_add_interest_notification(service, type_of_interest,
2614 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2615 }
2616
2617
2618 /* Routine io_service_acknowledge_notification */
2619 kern_return_t is_io_service_acknowledge_notification(
2620 io_object_t _service,
2621 natural_t notify_ref,
2622 natural_t response )
2623 {
2624 CHECK( IOService, _service, service );
2625
2626 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2627 (IOOptionBits) response ));
2628
2629 }
2630
2631 /* Routine io_connect_get_semaphore */
2632 kern_return_t is_io_connect_get_notification_semaphore(
2633 io_connect_t connection,
2634 natural_t notification_type,
2635 semaphore_t *semaphore )
2636 {
2637 CHECK( IOUserClient, connection, client );
2638
2639 IOStatisticsClientCall();
2640 return( client->getNotificationSemaphore( (UInt32) notification_type,
2641 semaphore ));
2642 }
2643
2644 /* Routine io_registry_get_root_entry */
2645 kern_return_t is_io_registry_get_root_entry(
2646 mach_port_t master_port,
2647 io_object_t *root )
2648 {
2649 IORegistryEntry * entry;
2650
2651 if( master_port != master_device_port)
2652 return( kIOReturnNotPrivileged);
2653
2654 entry = IORegistryEntry::getRegistryRoot();
2655 if( entry)
2656 entry->retain();
2657 *root = entry;
2658
2659 return( kIOReturnSuccess );
2660 }
2661
2662 /* Routine io_registry_create_iterator */
2663 kern_return_t is_io_registry_create_iterator(
2664 mach_port_t master_port,
2665 io_name_t plane,
2666 uint32_t options,
2667 io_object_t *iterator )
2668 {
2669 if( master_port != master_device_port)
2670 return( kIOReturnNotPrivileged);
2671
2672 *iterator = IOUserIterator::withIterator(
2673 IORegistryIterator::iterateOver(
2674 IORegistryEntry::getPlane( plane ), options ));
2675
2676 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2677 }
2678
2679 /* Routine io_registry_entry_create_iterator */
2680 kern_return_t is_io_registry_entry_create_iterator(
2681 io_object_t registry_entry,
2682 io_name_t plane,
2683 uint32_t options,
2684 io_object_t *iterator )
2685 {
2686 CHECK( IORegistryEntry, registry_entry, entry );
2687
2688 *iterator = IOUserIterator::withIterator(
2689 IORegistryIterator::iterateOver( entry,
2690 IORegistryEntry::getPlane( plane ), options ));
2691
2692 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2693 }
2694
2695 /* Routine io_registry_iterator_enter */
2696 kern_return_t is_io_registry_iterator_enter_entry(
2697 io_object_t iterator )
2698 {
2699 CHECKLOCKED( IORegistryIterator, iterator, iter );
2700
2701 IOLockLock(oIter->lock);
2702 iter->enterEntry();
2703 IOLockUnlock(oIter->lock);
2704
2705 return( kIOReturnSuccess );
2706 }
2707
2708 /* Routine io_registry_iterator_exit */
2709 kern_return_t is_io_registry_iterator_exit_entry(
2710 io_object_t iterator )
2711 {
2712 bool didIt;
2713
2714 CHECKLOCKED( IORegistryIterator, iterator, iter );
2715
2716 IOLockLock(oIter->lock);
2717 didIt = iter->exitEntry();
2718 IOLockUnlock(oIter->lock);
2719
2720 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2721 }
2722
2723 /* Routine io_registry_entry_from_path */
2724 kern_return_t is_io_registry_entry_from_path(
2725 mach_port_t master_port,
2726 io_string_t path,
2727 io_object_t *registry_entry )
2728 {
2729 IORegistryEntry * entry;
2730
2731 if( master_port != master_device_port)
2732 return( kIOReturnNotPrivileged);
2733
2734 entry = IORegistryEntry::fromPath( path );
2735
2736 *registry_entry = entry;
2737
2738 return( kIOReturnSuccess );
2739 }
2740
2741
2742 /* Routine io_registry_entry_from_path */
2743 kern_return_t is_io_registry_entry_from_path_ool(
2744 mach_port_t master_port,
2745 io_string_inband_t path,
2746 io_buf_ptr_t path_ool,
2747 mach_msg_type_number_t path_oolCnt,
2748 kern_return_t *result,
2749 io_object_t *registry_entry)
2750 {
2751 IORegistryEntry * entry;
2752 vm_map_offset_t map_data;
2753 const char * cpath;
2754 IOReturn res;
2755 kern_return_t err;
2756
2757 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2758
2759 map_data = 0;
2760 entry = 0;
2761 res = err = KERN_SUCCESS;
2762 if (path[0]) cpath = path;
2763 else
2764 {
2765 if (!path_oolCnt) return(kIOReturnBadArgument);
2766 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2767
2768 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2769 if (KERN_SUCCESS == err)
2770 {
2771 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2772 cpath = CAST_DOWN(const char *, map_data);
2773 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2774 }
2775 }
2776
2777 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2778 {
2779 entry = IORegistryEntry::fromPath(cpath);
2780 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2781 }
2782
2783 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2784
2785 if (KERN_SUCCESS != err) res = err;
2786 *registry_entry = entry;
2787 *result = res;
2788
2789 return (err);
2790 }
2791
2792
2793 /* Routine io_registry_entry_in_plane */
2794 kern_return_t is_io_registry_entry_in_plane(
2795 io_object_t registry_entry,
2796 io_name_t plane,
2797 boolean_t *inPlane )
2798 {
2799 CHECK( IORegistryEntry, registry_entry, entry );
2800
2801 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2802
2803 return( kIOReturnSuccess );
2804 }
2805
2806
2807 /* Routine io_registry_entry_get_path */
2808 kern_return_t is_io_registry_entry_get_path(
2809 io_object_t registry_entry,
2810 io_name_t plane,
2811 io_string_t path )
2812 {
2813 int length;
2814 CHECK( IORegistryEntry, registry_entry, entry );
2815
2816 length = sizeof( io_string_t);
2817 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2818 return( kIOReturnSuccess );
2819 else
2820 return( kIOReturnBadArgument );
2821 }
2822
2823 /* Routine io_registry_entry_get_path */
2824 kern_return_t is_io_registry_entry_get_path_ool(
2825 io_object_t registry_entry,
2826 io_name_t plane,
2827 io_string_inband_t path,
2828 io_buf_ptr_t *path_ool,
2829 mach_msg_type_number_t *path_oolCnt)
2830 {
2831 enum { kMaxPath = 16384 };
2832 IOReturn err;
2833 int length;
2834 char * buf;
2835
2836 CHECK( IORegistryEntry, registry_entry, entry );
2837
2838 *path_ool = NULL;
2839 *path_oolCnt = 0;
2840 length = sizeof(io_string_inband_t);
2841 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2842 else
2843 {
2844 length = kMaxPath;
2845 buf = IONew(char, length);
2846 if (!buf) err = kIOReturnNoMemory;
2847 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2848 else
2849 {
2850 *path_oolCnt = length;
2851 err = copyoutkdata(buf, length, path_ool);
2852 }
2853 if (buf) IODelete(buf, char, kMaxPath);
2854 }
2855
2856 return (err);
2857 }
2858
2859
2860 /* Routine io_registry_entry_get_name */
2861 kern_return_t is_io_registry_entry_get_name(
2862 io_object_t registry_entry,
2863 io_name_t name )
2864 {
2865 CHECK( IORegistryEntry, registry_entry, entry );
2866
2867 strncpy( name, entry->getName(), sizeof( io_name_t));
2868
2869 return( kIOReturnSuccess );
2870 }
2871
2872 /* Routine io_registry_entry_get_name_in_plane */
2873 kern_return_t is_io_registry_entry_get_name_in_plane(
2874 io_object_t registry_entry,
2875 io_name_t planeName,
2876 io_name_t name )
2877 {
2878 const IORegistryPlane * plane;
2879 CHECK( IORegistryEntry, registry_entry, entry );
2880
2881 if( planeName[0])
2882 plane = IORegistryEntry::getPlane( planeName );
2883 else
2884 plane = 0;
2885
2886 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2887
2888 return( kIOReturnSuccess );
2889 }
2890
2891 /* Routine io_registry_entry_get_location_in_plane */
2892 kern_return_t is_io_registry_entry_get_location_in_plane(
2893 io_object_t registry_entry,
2894 io_name_t planeName,
2895 io_name_t location )
2896 {
2897 const IORegistryPlane * plane;
2898 CHECK( IORegistryEntry, registry_entry, entry );
2899
2900 if( planeName[0])
2901 plane = IORegistryEntry::getPlane( planeName );
2902 else
2903 plane = 0;
2904
2905 const char * cstr = entry->getLocation( plane );
2906
2907 if( cstr) {
2908 strncpy( location, cstr, sizeof( io_name_t));
2909 return( kIOReturnSuccess );
2910 } else
2911 return( kIOReturnNotFound );
2912 }
2913
2914 /* Routine io_registry_entry_get_registry_entry_id */
2915 kern_return_t is_io_registry_entry_get_registry_entry_id(
2916 io_object_t registry_entry,
2917 uint64_t *entry_id )
2918 {
2919 CHECK( IORegistryEntry, registry_entry, entry );
2920
2921 *entry_id = entry->getRegistryEntryID();
2922
2923 return (kIOReturnSuccess);
2924 }
2925
2926 /* Routine io_registry_entry_get_property */
2927 kern_return_t is_io_registry_entry_get_property_bytes(
2928 io_object_t registry_entry,
2929 io_name_t property_name,
2930 io_struct_inband_t buf,
2931 mach_msg_type_number_t *dataCnt )
2932 {
2933 OSObject * obj;
2934 OSData * data;
2935 OSString * str;
2936 OSBoolean * boo;
2937 OSNumber * off;
2938 UInt64 offsetBytes;
2939 unsigned int len = 0;
2940 const void * bytes = 0;
2941 IOReturn ret = kIOReturnSuccess;
2942
2943 CHECK( IORegistryEntry, registry_entry, entry );
2944
2945 #if CONFIG_MACF
2946 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2947 return kIOReturnNotPermitted;
2948 #endif
2949
2950 obj = entry->copyProperty(property_name);
2951 if( !obj)
2952 return( kIOReturnNoResources );
2953
2954 // One day OSData will be a common container base class
2955 // until then...
2956 if( (data = OSDynamicCast( OSData, obj ))) {
2957 len = data->getLength();
2958 bytes = data->getBytesNoCopy();
2959 if (!data->isSerializable()) len = 0;
2960
2961 } else if( (str = OSDynamicCast( OSString, obj ))) {
2962 len = str->getLength() + 1;
2963 bytes = str->getCStringNoCopy();
2964
2965 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2966 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2967 bytes = boo->isTrue() ? "Yes" : "No";
2968
2969 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2970 offsetBytes = off->unsigned64BitValue();
2971 len = off->numberOfBytes();
2972 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2973 bytes = &offsetBytes;
2974 #ifdef __BIG_ENDIAN__
2975 bytes = (const void *)
2976 (((UInt32) bytes) + (sizeof( UInt64) - len));
2977 #endif
2978
2979 } else
2980 ret = kIOReturnBadArgument;
2981
2982 if( bytes) {
2983 if( *dataCnt < len)
2984 ret = kIOReturnIPCError;
2985 else {
2986 *dataCnt = len;
2987 bcopy( bytes, buf, len );
2988 }
2989 }
2990 obj->release();
2991
2992 return( ret );
2993 }
2994
2995
2996 /* Routine io_registry_entry_get_property */
2997 kern_return_t is_io_registry_entry_get_property(
2998 io_object_t registry_entry,
2999 io_name_t property_name,
3000 io_buf_ptr_t *properties,
3001 mach_msg_type_number_t *propertiesCnt )
3002 {
3003 kern_return_t err;
3004 vm_size_t len;
3005 OSObject * obj;
3006
3007 CHECK( IORegistryEntry, registry_entry, entry );
3008
3009 #if CONFIG_MACF
3010 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3011 return kIOReturnNotPermitted;
3012 #endif
3013
3014 obj = entry->copyProperty(property_name);
3015 if( !obj)
3016 return( kIOReturnNotFound );
3017
3018 OSSerialize * s = OSSerialize::withCapacity(4096);
3019 if( !s) {
3020 obj->release();
3021 return( kIOReturnNoMemory );
3022 }
3023
3024 if( obj->serialize( s )) {
3025 len = s->getLength();
3026 *propertiesCnt = len;
3027 err = copyoutkdata( s->text(), len, properties );
3028
3029 } else
3030 err = kIOReturnUnsupported;
3031
3032 s->release();
3033 obj->release();
3034
3035 return( err );
3036 }
3037
3038 /* Routine io_registry_entry_get_property_recursively */
3039 kern_return_t is_io_registry_entry_get_property_recursively(
3040 io_object_t registry_entry,
3041 io_name_t plane,
3042 io_name_t property_name,
3043 uint32_t options,
3044 io_buf_ptr_t *properties,
3045 mach_msg_type_number_t *propertiesCnt )
3046 {
3047 kern_return_t err;
3048 vm_size_t len;
3049 OSObject * obj;
3050
3051 CHECK( IORegistryEntry, registry_entry, entry );
3052
3053 #if CONFIG_MACF
3054 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3055 return kIOReturnNotPermitted;
3056 #endif
3057
3058 obj = entry->copyProperty( property_name,
3059 IORegistryEntry::getPlane( plane ), options );
3060 if( !obj)
3061 return( kIOReturnNotFound );
3062
3063 OSSerialize * s = OSSerialize::withCapacity(4096);
3064 if( !s) {
3065 obj->release();
3066 return( kIOReturnNoMemory );
3067 }
3068
3069 if( obj->serialize( s )) {
3070 len = s->getLength();
3071 *propertiesCnt = len;
3072 err = copyoutkdata( s->text(), len, properties );
3073
3074 } else
3075 err = kIOReturnUnsupported;
3076
3077 s->release();
3078 obj->release();
3079
3080 return( err );
3081 }
3082
3083 /* Routine io_registry_entry_get_properties */
3084 kern_return_t is_io_registry_entry_get_properties(
3085 io_object_t registry_entry,
3086 io_buf_ptr_t *properties,
3087 mach_msg_type_number_t *propertiesCnt )
3088 {
3089 return (kIOReturnUnsupported);
3090 }
3091
3092 #if CONFIG_MACF
3093
3094 struct GetPropertiesEditorRef
3095 {
3096 kauth_cred_t cred;
3097 IORegistryEntry * entry;
3098 OSCollection * root;
3099 };
3100
3101 static const OSMetaClassBase *
3102 GetPropertiesEditor(void * reference,
3103 OSSerialize * s,
3104 OSCollection * container,
3105 const OSSymbol * name,
3106 const OSMetaClassBase * value)
3107 {
3108 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3109
3110 if (!ref->root) ref->root = container;
3111 if (ref->root == container)
3112 {
3113 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3114 {
3115 value = 0;
3116 }
3117 }
3118 if (value) value->retain();
3119 return (value);
3120 }
3121
3122 #endif /* CONFIG_MACF */
3123
3124 /* Routine io_registry_entry_get_properties */
3125 kern_return_t is_io_registry_entry_get_properties_bin(
3126 io_object_t registry_entry,
3127 io_buf_ptr_t *properties,
3128 mach_msg_type_number_t *propertiesCnt)
3129 {
3130 kern_return_t err = kIOReturnSuccess;
3131 vm_size_t len;
3132 OSSerialize * s;
3133 OSSerialize::Editor editor = 0;
3134 void * editRef = 0;
3135
3136 CHECK(IORegistryEntry, registry_entry, entry);
3137
3138 #if CONFIG_MACF
3139 GetPropertiesEditorRef ref;
3140 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3141 {
3142 editor = &GetPropertiesEditor;
3143 editRef = &ref;
3144 ref.cred = kauth_cred_get();
3145 ref.entry = entry;
3146 ref.root = 0;
3147 }
3148 #endif
3149
3150 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3151 if (!s) return (kIOReturnNoMemory);
3152
3153 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3154
3155 if (kIOReturnSuccess == err)
3156 {
3157 len = s->getLength();
3158 *propertiesCnt = len;
3159 err = copyoutkdata(s->text(), len, properties);
3160 }
3161 s->release();
3162
3163 return (err);
3164 }
3165
3166 /* Routine io_registry_entry_get_property_bin */
3167 kern_return_t is_io_registry_entry_get_property_bin(
3168 io_object_t registry_entry,
3169 io_name_t plane,
3170 io_name_t property_name,
3171 uint32_t options,
3172 io_buf_ptr_t *properties,
3173 mach_msg_type_number_t *propertiesCnt )
3174 {
3175 kern_return_t err;
3176 vm_size_t len;
3177 OSObject * obj;
3178 const OSSymbol * sym;
3179
3180 CHECK( IORegistryEntry, registry_entry, entry );
3181
3182 #if CONFIG_MACF
3183 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3184 return kIOReturnNotPermitted;
3185 #endif
3186
3187 sym = OSSymbol::withCString(property_name);
3188 if (!sym) return (kIOReturnNoMemory);
3189
3190 if (gIORegistryEntryPropertyKeysKey == sym)
3191 {
3192 obj = entry->copyPropertyKeys();
3193 }
3194 else
3195 {
3196 if ((kIORegistryIterateRecursively & options) && plane[0])
3197 {
3198 obj = entry->copyProperty(property_name,
3199 IORegistryEntry::getPlane(plane), options );
3200 }
3201 else
3202 {
3203 obj = entry->copyProperty(property_name);
3204 }
3205 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3206 }
3207
3208 sym->release();
3209 if (!obj) return (kIOReturnNotFound);
3210
3211 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3212 if( !s) {
3213 obj->release();
3214 return( kIOReturnNoMemory );
3215 }
3216
3217 if( obj->serialize( s )) {
3218 len = s->getLength();
3219 *propertiesCnt = len;
3220 err = copyoutkdata( s->text(), len, properties );
3221
3222 } else err = kIOReturnUnsupported;
3223
3224 s->release();
3225 obj->release();
3226
3227 return( err );
3228 }
3229
3230
3231 /* Routine io_registry_entry_set_properties */
3232 kern_return_t is_io_registry_entry_set_properties
3233 (
3234 io_object_t registry_entry,
3235 io_buf_ptr_t properties,
3236 mach_msg_type_number_t propertiesCnt,
3237 kern_return_t * result)
3238 {
3239 OSObject * obj;
3240 kern_return_t err;
3241 IOReturn res;
3242 vm_offset_t data;
3243 vm_map_offset_t map_data;
3244
3245 CHECK( IORegistryEntry, registry_entry, entry );
3246
3247 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3248 return( kIOReturnMessageTooLarge);
3249
3250 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3251 data = CAST_DOWN(vm_offset_t, map_data);
3252
3253 if( KERN_SUCCESS == err) {
3254
3255 FAKE_STACK_FRAME(entry->getMetaClass());
3256
3257 // must return success after vm_map_copyout() succeeds
3258 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3259 vm_deallocate( kernel_map, data, propertiesCnt );
3260
3261 if (!obj)
3262 res = kIOReturnBadArgument;
3263 #if CONFIG_MACF
3264 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3265 registry_entry, obj))
3266 {
3267 res = kIOReturnNotPermitted;
3268 }
3269 #endif
3270 else
3271 {
3272 res = entry->setProperties( obj );
3273 }
3274
3275 if (obj)
3276 obj->release();
3277
3278 FAKE_STACK_FRAME_END();
3279
3280 } else
3281 res = err;
3282
3283 *result = res;
3284 return( err );
3285 }
3286
3287 /* Routine io_registry_entry_get_child_iterator */
3288 kern_return_t is_io_registry_entry_get_child_iterator(
3289 io_object_t registry_entry,
3290 io_name_t plane,
3291 io_object_t *iterator )
3292 {
3293 CHECK( IORegistryEntry, registry_entry, entry );
3294
3295 *iterator = entry->getChildIterator(
3296 IORegistryEntry::getPlane( plane ));
3297
3298 return( kIOReturnSuccess );
3299 }
3300
3301 /* Routine io_registry_entry_get_parent_iterator */
3302 kern_return_t is_io_registry_entry_get_parent_iterator(
3303 io_object_t registry_entry,
3304 io_name_t plane,
3305 io_object_t *iterator)
3306 {
3307 CHECK( IORegistryEntry, registry_entry, entry );
3308
3309 *iterator = entry->getParentIterator(
3310 IORegistryEntry::getPlane( plane ));
3311
3312 return( kIOReturnSuccess );
3313 }
3314
3315 /* Routine io_service_get_busy_state */
3316 kern_return_t is_io_service_get_busy_state(
3317 io_object_t _service,
3318 uint32_t *busyState )
3319 {
3320 CHECK( IOService, _service, service );
3321
3322 *busyState = service->getBusyState();
3323
3324 return( kIOReturnSuccess );
3325 }
3326
3327 /* Routine io_service_get_state */
3328 kern_return_t is_io_service_get_state(
3329 io_object_t _service,
3330 uint64_t *state,
3331 uint32_t *busy_state,
3332 uint64_t *accumulated_busy_time )
3333 {
3334 CHECK( IOService, _service, service );
3335
3336 *state = service->getState();
3337 *busy_state = service->getBusyState();
3338 *accumulated_busy_time = service->getAccumulatedBusyTime();
3339
3340 return( kIOReturnSuccess );
3341 }
3342
3343 /* Routine io_service_wait_quiet */
3344 kern_return_t is_io_service_wait_quiet(
3345 io_object_t _service,
3346 mach_timespec_t wait_time )
3347 {
3348 uint64_t timeoutNS;
3349
3350 CHECK( IOService, _service, service );
3351
3352 timeoutNS = wait_time.tv_sec;
3353 timeoutNS *= kSecondScale;
3354 timeoutNS += wait_time.tv_nsec;
3355
3356 return( service->waitQuiet(timeoutNS) );
3357 }
3358
3359 /* Routine io_service_request_probe */
3360 kern_return_t is_io_service_request_probe(
3361 io_object_t _service,
3362 uint32_t options )
3363 {
3364 CHECK( IOService, _service, service );
3365
3366 return( service->requestProbe( options ));
3367 }
3368
3369 /* Routine io_service_get_authorization_id */
3370 kern_return_t is_io_service_get_authorization_id(
3371 io_object_t _service,
3372 uint64_t *authorization_id )
3373 {
3374 kern_return_t kr;
3375
3376 CHECK( IOService, _service, service );
3377
3378 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3379 kIOClientPrivilegeAdministrator );
3380 if( kIOReturnSuccess != kr)
3381 return( kr );
3382
3383 *authorization_id = service->getAuthorizationID();
3384
3385 return( kr );
3386 }
3387
3388 /* Routine io_service_set_authorization_id */
3389 kern_return_t is_io_service_set_authorization_id(
3390 io_object_t _service,
3391 uint64_t authorization_id )
3392 {
3393 CHECK( IOService, _service, service );
3394
3395 return( service->setAuthorizationID( authorization_id ) );
3396 }
3397
3398 /* Routine io_service_open_ndr */
3399 kern_return_t is_io_service_open_extended(
3400 io_object_t _service,
3401 task_t owningTask,
3402 uint32_t connect_type,
3403 NDR_record_t ndr,
3404 io_buf_ptr_t properties,
3405 mach_msg_type_number_t propertiesCnt,
3406 kern_return_t * result,
3407 io_object_t *connection )
3408 {
3409 IOUserClient * client = 0;
3410 kern_return_t err = KERN_SUCCESS;
3411 IOReturn res = kIOReturnSuccess;
3412 OSDictionary * propertiesDict = 0;
3413 bool crossEndian;
3414 bool disallowAccess;
3415
3416 CHECK( IOService, _service, service );
3417
3418 if (!owningTask) return (kIOReturnBadArgument);
3419 assert(owningTask == current_task());
3420 if (owningTask != current_task()) return (kIOReturnBadArgument);
3421
3422 do
3423 {
3424 if (properties) return (kIOReturnUnsupported);
3425 #if 0
3426 {
3427 OSObject * obj;
3428 vm_offset_t data;
3429 vm_map_offset_t map_data;
3430
3431 if( propertiesCnt > sizeof(io_struct_inband_t))
3432 return( kIOReturnMessageTooLarge);
3433
3434 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3435 res = err;
3436 data = CAST_DOWN(vm_offset_t, map_data);
3437 if (KERN_SUCCESS == err)
3438 {
3439 // must return success after vm_map_copyout() succeeds
3440 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3441 vm_deallocate( kernel_map, data, propertiesCnt );
3442 propertiesDict = OSDynamicCast(OSDictionary, obj);
3443 if (!propertiesDict)
3444 {
3445 res = kIOReturnBadArgument;
3446 if (obj)
3447 obj->release();
3448 }
3449 }
3450 if (kIOReturnSuccess != res)
3451 break;
3452 }
3453 #endif
3454 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3455 if (crossEndian)
3456 {
3457 if (!propertiesDict)
3458 propertiesDict = OSDictionary::withCapacity(4);
3459 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3460 if (data)
3461 {
3462 if (propertiesDict)
3463 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3464 data->release();
3465 }
3466 }
3467
3468 res = service->newUserClient( owningTask, (void *) owningTask,
3469 connect_type, propertiesDict, &client );
3470
3471 if (propertiesDict)
3472 propertiesDict->release();
3473
3474 if (res == kIOReturnSuccess)
3475 {
3476 assert( OSDynamicCast(IOUserClient, client) );
3477
3478 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3479 client->closed = false;
3480 client->lock = IOLockAlloc();
3481
3482 disallowAccess = (crossEndian
3483 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3484 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3485 if (disallowAccess) res = kIOReturnUnsupported;
3486 #if CONFIG_MACF
3487 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3488 res = kIOReturnNotPermitted;
3489 #endif
3490
3491 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3492
3493 if (kIOReturnSuccess != res)
3494 {
3495 IOStatisticsClientCall();
3496 client->clientClose();
3497 client->release();
3498 client = 0;
3499 break;
3500 }
3501 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3502 if (creatorName)
3503 {
3504 client->setProperty(kIOUserClientCreatorKey, creatorName);
3505 creatorName->release();
3506 }
3507 client->setTerminateDefer(service, false);
3508 }
3509 }
3510 while (false);
3511
3512 *connection = client;
3513 *result = res;
3514
3515 return (err);
3516 }
3517
3518 /* Routine io_service_close */
3519 kern_return_t is_io_service_close(
3520 io_object_t connection )
3521 {
3522 OSSet * mappings;
3523 if ((mappings = OSDynamicCast(OSSet, connection)))
3524 return( kIOReturnSuccess );
3525
3526 CHECK( IOUserClient, connection, client );
3527
3528 IOStatisticsClientCall();
3529
3530 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3531 {
3532 IOLockLock(client->lock);
3533 client->clientClose();
3534 IOLockUnlock(client->lock);
3535 }
3536 else
3537 {
3538 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3539 client->getRegistryEntryID(), client->getName());
3540 }
3541
3542 return( kIOReturnSuccess );
3543 }
3544
3545 /* Routine io_connect_get_service */
3546 kern_return_t is_io_connect_get_service(
3547 io_object_t connection,
3548 io_object_t *service )
3549 {
3550 IOService * theService;
3551
3552 CHECK( IOUserClient, connection, client );
3553
3554 theService = client->getService();
3555 if( theService)
3556 theService->retain();
3557
3558 *service = theService;
3559
3560 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3561 }
3562
3563 /* Routine io_connect_set_notification_port */
3564 kern_return_t is_io_connect_set_notification_port(
3565 io_object_t connection,
3566 uint32_t notification_type,
3567 mach_port_t port,
3568 uint32_t reference)
3569 {
3570 kern_return_t ret;
3571 CHECK( IOUserClient, connection, client );
3572
3573 IOStatisticsClientCall();
3574 IOLockLock(client->lock);
3575 ret = client->registerNotificationPort( port, notification_type,
3576 (io_user_reference_t) reference );
3577 IOLockUnlock(client->lock);
3578 return (ret);
3579 }
3580
3581 /* Routine io_connect_set_notification_port */
3582 kern_return_t is_io_connect_set_notification_port_64(
3583 io_object_t connection,
3584 uint32_t notification_type,
3585 mach_port_t port,
3586 io_user_reference_t reference)
3587 {
3588 kern_return_t ret;
3589 CHECK( IOUserClient, connection, client );
3590
3591 IOStatisticsClientCall();
3592 IOLockLock(client->lock);
3593 ret = client->registerNotificationPort( port, notification_type,
3594 reference );
3595 IOLockUnlock(client->lock);
3596 return (ret);
3597 }
3598
3599 /* Routine io_connect_map_memory_into_task */
3600 kern_return_t is_io_connect_map_memory_into_task
3601 (
3602 io_connect_t connection,
3603 uint32_t memory_type,
3604 task_t into_task,
3605 mach_vm_address_t *address,
3606 mach_vm_size_t *size,
3607 uint32_t flags
3608 )
3609 {
3610 IOReturn err;
3611 IOMemoryMap * map;
3612
3613 CHECK( IOUserClient, connection, client );
3614
3615 if (!into_task) return (kIOReturnBadArgument);
3616
3617 IOStatisticsClientCall();
3618 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3619
3620 if( map) {
3621 *address = map->getAddress();
3622 if( size)
3623 *size = map->getSize();
3624
3625 if( client->sharedInstance
3626 || (into_task != current_task())) {
3627 // push a name out to the task owning the map,
3628 // so we can clean up maps
3629 mach_port_name_t name __unused =
3630 IOMachPort::makeSendRightForTask(
3631 into_task, map, IKOT_IOKIT_OBJECT );
3632 map->release();
3633
3634 } else {
3635 // keep it with the user client
3636 IOLockLock( gIOObjectPortLock);
3637 if( 0 == client->mappings)
3638 client->mappings = OSSet::withCapacity(2);
3639 if( client->mappings)
3640 client->mappings->setObject( map);
3641 IOLockUnlock( gIOObjectPortLock);
3642 map->release();
3643 }
3644 err = kIOReturnSuccess;
3645
3646 } else
3647 err = kIOReturnBadArgument;
3648
3649 return( err );
3650 }
3651
3652 /* Routine is_io_connect_map_memory */
3653 kern_return_t is_io_connect_map_memory(
3654 io_object_t connect,
3655 uint32_t type,
3656 task_t task,
3657 uint32_t * mapAddr,
3658 uint32_t * mapSize,
3659 uint32_t flags )
3660 {
3661 IOReturn err;
3662 mach_vm_address_t address;
3663 mach_vm_size_t size;
3664
3665 address = SCALAR64(*mapAddr);
3666 size = SCALAR64(*mapSize);
3667
3668 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3669
3670 *mapAddr = SCALAR32(address);
3671 *mapSize = SCALAR32(size);
3672
3673 return (err);
3674 }
3675
3676 } /* extern "C" */
3677
3678 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3679 {
3680 OSIterator * iter;
3681 IOMemoryMap * map = 0;
3682
3683 IOLockLock(gIOObjectPortLock);
3684
3685 iter = OSCollectionIterator::withCollection(mappings);
3686 if(iter)
3687 {
3688 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3689 {
3690 if(mem == map->getMemoryDescriptor())
3691 {
3692 map->retain();
3693 mappings->removeObject(map);
3694 break;
3695 }
3696 }
3697 iter->release();
3698 }
3699
3700 IOLockUnlock(gIOObjectPortLock);
3701
3702 return (map);
3703 }
3704
3705 extern "C" {
3706
3707 /* Routine io_connect_unmap_memory_from_task */
3708 kern_return_t is_io_connect_unmap_memory_from_task
3709 (
3710 io_connect_t connection,
3711 uint32_t memory_type,
3712 task_t from_task,
3713 mach_vm_address_t address)
3714 {
3715 IOReturn err;
3716 IOOptionBits options = 0;
3717 IOMemoryDescriptor * memory = 0;
3718 IOMemoryMap * map;
3719
3720 CHECK( IOUserClient, connection, client );
3721
3722 if (!from_task) return (kIOReturnBadArgument);
3723
3724 IOStatisticsClientCall();
3725 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3726
3727 if( memory && (kIOReturnSuccess == err)) {
3728
3729 options = (options & ~kIOMapUserOptionsMask)
3730 | kIOMapAnywhere | kIOMapReference;
3731
3732 map = memory->createMappingInTask( from_task, address, options );
3733 memory->release();
3734 if( map)
3735 {
3736 IOLockLock( gIOObjectPortLock);
3737 if( client->mappings)
3738 client->mappings->removeObject( map);
3739 IOLockUnlock( gIOObjectPortLock);
3740
3741 mach_port_name_t name = 0;
3742 if (from_task != current_task())
3743 {
3744 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3745 map->release();
3746 }
3747
3748 if (name)
3749 {
3750 map->userClientUnmap();
3751 err = iokit_mod_send_right( from_task, name, -2 );
3752 err = kIOReturnSuccess;
3753 }
3754 else
3755 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3756 if (from_task == current_task())
3757 map->release();
3758 }
3759 else
3760 err = kIOReturnBadArgument;
3761 }
3762
3763 return( err );
3764 }
3765
3766 kern_return_t is_io_connect_unmap_memory(
3767 io_object_t connect,
3768 uint32_t type,
3769 task_t task,
3770 uint32_t mapAddr )
3771 {
3772 IOReturn err;
3773 mach_vm_address_t address;
3774
3775 address = SCALAR64(mapAddr);
3776
3777 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3778
3779 return (err);
3780 }
3781
3782
3783 /* Routine io_connect_add_client */
3784 kern_return_t is_io_connect_add_client(
3785 io_object_t connection,
3786 io_object_t connect_to)
3787 {
3788 CHECK( IOUserClient, connection, client );
3789 CHECK( IOUserClient, connect_to, to );
3790
3791 IOStatisticsClientCall();
3792 return( client->connectClient( to ) );
3793 }
3794
3795
3796 /* Routine io_connect_set_properties */
3797 kern_return_t is_io_connect_set_properties(
3798 io_object_t connection,
3799 io_buf_ptr_t properties,
3800 mach_msg_type_number_t propertiesCnt,
3801 kern_return_t * result)
3802 {
3803 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3804 }
3805
3806 /* Routine io_user_client_method */
3807 kern_return_t is_io_connect_method_var_output
3808 (
3809 io_connect_t connection,
3810 uint32_t selector,
3811 io_scalar_inband64_t scalar_input,
3812 mach_msg_type_number_t scalar_inputCnt,
3813 io_struct_inband_t inband_input,
3814 mach_msg_type_number_t inband_inputCnt,
3815 mach_vm_address_t ool_input,
3816 mach_vm_size_t ool_input_size,
3817 io_struct_inband_t inband_output,
3818 mach_msg_type_number_t *inband_outputCnt,
3819 io_scalar_inband64_t scalar_output,
3820 mach_msg_type_number_t *scalar_outputCnt,
3821 io_buf_ptr_t *var_output,
3822 mach_msg_type_number_t *var_outputCnt
3823 )
3824 {
3825 CHECK( IOUserClient, connection, client );
3826
3827 IOExternalMethodArguments args;
3828 IOReturn ret;
3829 IOMemoryDescriptor * inputMD = 0;
3830 OSObject * structureVariableOutputData = 0;
3831
3832 bzero(&args.__reserved[0], sizeof(args.__reserved));
3833 args.__reservedA = 0;
3834 args.version = kIOExternalMethodArgumentsCurrentVersion;
3835
3836 args.selector = selector;
3837
3838 args.asyncWakePort = MACH_PORT_NULL;
3839 args.asyncReference = 0;
3840 args.asyncReferenceCount = 0;
3841 args.structureVariableOutputData = &structureVariableOutputData;
3842
3843 args.scalarInput = scalar_input;
3844 args.scalarInputCount = scalar_inputCnt;
3845 args.structureInput = inband_input;
3846 args.structureInputSize = inband_inputCnt;
3847
3848 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3849
3850 if (ool_input)
3851 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3852 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3853 current_task());
3854
3855 args.structureInputDescriptor = inputMD;
3856
3857 args.scalarOutput = scalar_output;
3858 args.scalarOutputCount = *scalar_outputCnt;
3859 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3860 args.structureOutput = inband_output;
3861 args.structureOutputSize = *inband_outputCnt;
3862 args.structureOutputDescriptor = NULL;
3863 args.structureOutputDescriptorSize = 0;
3864
3865 IOStatisticsClientCall();
3866 ret = client->externalMethod( selector, &args );
3867
3868 *scalar_outputCnt = args.scalarOutputCount;
3869 *inband_outputCnt = args.structureOutputSize;
3870
3871 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3872 {
3873 OSSerialize * serialize;
3874 OSData * data;
3875 vm_size_t len;
3876
3877 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3878 {
3879 len = serialize->getLength();
3880 *var_outputCnt = len;
3881 ret = copyoutkdata(serialize->text(), len, var_output);
3882 }
3883 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3884 {
3885 len = data->getLength();
3886 *var_outputCnt = len;
3887 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3888 }
3889 else
3890 {
3891 ret = kIOReturnUnderrun;
3892 }
3893 }
3894
3895 if (inputMD)
3896 inputMD->release();
3897 if (structureVariableOutputData)
3898 structureVariableOutputData->release();
3899
3900 return (ret);
3901 }
3902
3903 /* Routine io_user_client_method */
3904 kern_return_t is_io_connect_method
3905 (
3906 io_connect_t connection,
3907 uint32_t selector,
3908 io_scalar_inband64_t scalar_input,
3909 mach_msg_type_number_t scalar_inputCnt,
3910 io_struct_inband_t inband_input,
3911 mach_msg_type_number_t inband_inputCnt,
3912 mach_vm_address_t ool_input,
3913 mach_vm_size_t ool_input_size,
3914 io_struct_inband_t inband_output,
3915 mach_msg_type_number_t *inband_outputCnt,
3916 io_scalar_inband64_t scalar_output,
3917 mach_msg_type_number_t *scalar_outputCnt,
3918 mach_vm_address_t ool_output,
3919 mach_vm_size_t *ool_output_size
3920 )
3921 {
3922 CHECK( IOUserClient, connection, client );
3923
3924 IOExternalMethodArguments args;
3925 IOReturn ret;
3926 IOMemoryDescriptor * inputMD = 0;
3927 IOMemoryDescriptor * outputMD = 0;
3928
3929 bzero(&args.__reserved[0], sizeof(args.__reserved));
3930 args.__reservedA = 0;
3931 args.version = kIOExternalMethodArgumentsCurrentVersion;
3932
3933 args.selector = selector;
3934
3935 args.asyncWakePort = MACH_PORT_NULL;
3936 args.asyncReference = 0;
3937 args.asyncReferenceCount = 0;
3938 args.structureVariableOutputData = 0;
3939
3940 args.scalarInput = scalar_input;
3941 args.scalarInputCount = scalar_inputCnt;
3942 args.structureInput = inband_input;
3943 args.structureInputSize = inband_inputCnt;
3944
3945 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3946 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3947
3948 if (ool_input)
3949 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3950 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3951 current_task());
3952
3953 args.structureInputDescriptor = inputMD;
3954
3955 args.scalarOutput = scalar_output;
3956 args.scalarOutputCount = *scalar_outputCnt;
3957 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3958 args.structureOutput = inband_output;
3959 args.structureOutputSize = *inband_outputCnt;
3960
3961 if (ool_output && ool_output_size)
3962 {
3963 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3964 kIODirectionIn, current_task());
3965 }
3966
3967 args.structureOutputDescriptor = outputMD;
3968 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3969
3970 IOStatisticsClientCall();
3971 ret = client->externalMethod( selector, &args );
3972
3973 *scalar_outputCnt = args.scalarOutputCount;
3974 *inband_outputCnt = args.structureOutputSize;
3975 *ool_output_size = args.structureOutputDescriptorSize;
3976
3977 if (inputMD)
3978 inputMD->release();
3979 if (outputMD)
3980 outputMD->release();
3981
3982 return (ret);
3983 }
3984
3985 /* Routine io_async_user_client_method */
3986 kern_return_t is_io_connect_async_method
3987 (
3988 io_connect_t connection,
3989 mach_port_t wake_port,
3990 io_async_ref64_t reference,
3991 mach_msg_type_number_t referenceCnt,
3992 uint32_t selector,
3993 io_scalar_inband64_t scalar_input,
3994 mach_msg_type_number_t scalar_inputCnt,
3995 io_struct_inband_t inband_input,
3996 mach_msg_type_number_t inband_inputCnt,
3997 mach_vm_address_t ool_input,
3998 mach_vm_size_t ool_input_size,
3999 io_struct_inband_t inband_output,
4000 mach_msg_type_number_t *inband_outputCnt,
4001 io_scalar_inband64_t scalar_output,
4002 mach_msg_type_number_t *scalar_outputCnt,
4003 mach_vm_address_t ool_output,
4004 mach_vm_size_t * ool_output_size
4005 )
4006 {
4007 CHECK( IOUserClient, connection, client );
4008
4009 IOExternalMethodArguments args;
4010 IOReturn ret;
4011 IOMemoryDescriptor * inputMD = 0;
4012 IOMemoryDescriptor * outputMD = 0;
4013
4014 bzero(&args.__reserved[0], sizeof(args.__reserved));
4015 args.__reservedA = 0;
4016 args.version = kIOExternalMethodArgumentsCurrentVersion;
4017
4018 reference[0] = (io_user_reference_t) wake_port;
4019 if (vm_map_is_64bit(get_task_map(current_task())))
4020 reference[0] |= kIOUCAsync64Flag;
4021
4022 args.selector = selector;
4023
4024 args.asyncWakePort = wake_port;
4025 args.asyncReference = reference;
4026 args.asyncReferenceCount = referenceCnt;
4027
4028 args.structureVariableOutputData = 0;
4029
4030 args.scalarInput = scalar_input;
4031 args.scalarInputCount = scalar_inputCnt;
4032 args.structureInput = inband_input;
4033 args.structureInputSize = inband_inputCnt;
4034
4035 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4036 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4037
4038 if (ool_input)
4039 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4040 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4041 current_task());
4042
4043 args.structureInputDescriptor = inputMD;
4044
4045 args.scalarOutput = scalar_output;
4046 args.scalarOutputCount = *scalar_outputCnt;
4047 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4048 args.structureOutput = inband_output;
4049 args.structureOutputSize = *inband_outputCnt;
4050
4051 if (ool_output)
4052 {
4053 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4054 kIODirectionIn, current_task());
4055 }
4056
4057 args.structureOutputDescriptor = outputMD;
4058 args.structureOutputDescriptorSize = *ool_output_size;
4059
4060 IOStatisticsClientCall();
4061 ret = client->externalMethod( selector, &args );
4062
4063 *inband_outputCnt = args.structureOutputSize;
4064 *ool_output_size = args.structureOutputDescriptorSize;
4065
4066 if (inputMD)
4067 inputMD->release();
4068 if (outputMD)
4069 outputMD->release();
4070
4071 return (ret);
4072 }
4073
4074 /* Routine io_connect_method_scalarI_scalarO */
4075 kern_return_t is_io_connect_method_scalarI_scalarO(
4076 io_object_t connect,
4077 uint32_t index,
4078 io_scalar_inband_t input,
4079 mach_msg_type_number_t inputCount,
4080 io_scalar_inband_t output,
4081 mach_msg_type_number_t * outputCount )
4082 {
4083 IOReturn err;
4084 uint32_t i;
4085 io_scalar_inband64_t _input;
4086 io_scalar_inband64_t _output;
4087
4088 mach_msg_type_number_t struct_outputCnt = 0;
4089 mach_vm_size_t ool_output_size = 0;
4090
4091 bzero(&_output[0], sizeof(_output));
4092 for (i = 0; i < inputCount; i++)
4093 _input[i] = SCALAR64(input[i]);
4094
4095 err = is_io_connect_method(connect, index,
4096 _input, inputCount,
4097 NULL, 0,
4098 0, 0,
4099 NULL, &struct_outputCnt,
4100 _output, outputCount,
4101 0, &ool_output_size);
4102
4103 for (i = 0; i < *outputCount; i++)
4104 output[i] = SCALAR32(_output[i]);
4105
4106 return (err);
4107 }
4108
4109 kern_return_t shim_io_connect_method_scalarI_scalarO(
4110 IOExternalMethod * method,
4111 IOService * object,
4112 const io_user_scalar_t * input,
4113 mach_msg_type_number_t inputCount,
4114 io_user_scalar_t * output,
4115 mach_msg_type_number_t * outputCount )
4116 {
4117 IOMethod func;
4118 io_scalar_inband_t _output;
4119 IOReturn err;
4120 err = kIOReturnBadArgument;
4121
4122 bzero(&_output[0], sizeof(_output));
4123 do {
4124
4125 if( inputCount != method->count0)
4126 {
4127 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4128 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4129 continue;
4130 }
4131 if( *outputCount != method->count1)
4132 {
4133 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4134 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4135 continue;
4136 }
4137
4138 func = method->func;
4139
4140 switch( inputCount) {
4141
4142 case 6:
4143 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4144 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4145 break;
4146 case 5:
4147 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4148 ARG32(input[3]), ARG32(input[4]),
4149 &_output[0] );
4150 break;
4151 case 4:
4152 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4153 ARG32(input[3]),
4154 &_output[0], &_output[1] );
4155 break;
4156 case 3:
4157 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4158 &_output[0], &_output[1], &_output[2] );
4159 break;
4160 case 2:
4161 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4162 &_output[0], &_output[1], &_output[2],
4163 &_output[3] );
4164 break;
4165 case 1:
4166 err = (object->*func)( ARG32(input[0]),
4167 &_output[0], &_output[1], &_output[2],
4168 &_output[3], &_output[4] );
4169 break;
4170 case 0:
4171 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4172 &_output[3], &_output[4], &_output[5] );
4173 break;
4174
4175 default:
4176 IOLog("%s: Bad method table\n", object->getName());
4177 }
4178 }
4179 while( false);
4180
4181 uint32_t i;
4182 for (i = 0; i < *outputCount; i++)
4183 output[i] = SCALAR32(_output[i]);
4184
4185 return( err);
4186 }
4187
4188 /* Routine io_async_method_scalarI_scalarO */
4189 kern_return_t is_io_async_method_scalarI_scalarO(
4190 io_object_t connect,
4191 mach_port_t wake_port,
4192 io_async_ref_t reference,
4193 mach_msg_type_number_t referenceCnt,
4194 uint32_t index,
4195 io_scalar_inband_t input,
4196 mach_msg_type_number_t inputCount,
4197 io_scalar_inband_t output,
4198 mach_msg_type_number_t * outputCount )
4199 {
4200 IOReturn err;
4201 uint32_t i;
4202 io_scalar_inband64_t _input;
4203 io_scalar_inband64_t _output;
4204 io_async_ref64_t _reference;
4205
4206 bzero(&_output[0], sizeof(_output));
4207 for (i = 0; i < referenceCnt; i++)
4208 _reference[i] = REF64(reference[i]);
4209
4210 mach_msg_type_number_t struct_outputCnt = 0;
4211 mach_vm_size_t ool_output_size = 0;
4212
4213 for (i = 0; i < inputCount; i++)
4214 _input[i] = SCALAR64(input[i]);
4215
4216 err = is_io_connect_async_method(connect,
4217 wake_port, _reference, referenceCnt,
4218 index,
4219 _input, inputCount,
4220 NULL, 0,
4221 0, 0,
4222 NULL, &struct_outputCnt,
4223 _output, outputCount,
4224 0, &ool_output_size);
4225
4226 for (i = 0; i < *outputCount; i++)
4227 output[i] = SCALAR32(_output[i]);
4228
4229 return (err);
4230 }
4231 /* Routine io_async_method_scalarI_structureO */
4232 kern_return_t is_io_async_method_scalarI_structureO(
4233 io_object_t connect,
4234 mach_port_t wake_port,
4235 io_async_ref_t reference,
4236 mach_msg_type_number_t referenceCnt,
4237 uint32_t index,
4238 io_scalar_inband_t input,
4239 mach_msg_type_number_t inputCount,
4240 io_struct_inband_t output,
4241 mach_msg_type_number_t * outputCount )
4242 {
4243 uint32_t i;
4244 io_scalar_inband64_t _input;
4245 io_async_ref64_t _reference;
4246
4247 for (i = 0; i < referenceCnt; i++)
4248 _reference[i] = REF64(reference[i]);
4249
4250 mach_msg_type_number_t scalar_outputCnt = 0;
4251 mach_vm_size_t ool_output_size = 0;
4252
4253 for (i = 0; i < inputCount; i++)
4254 _input[i] = SCALAR64(input[i]);
4255
4256 return (is_io_connect_async_method(connect,
4257 wake_port, _reference, referenceCnt,
4258 index,
4259 _input, inputCount,
4260 NULL, 0,
4261 0, 0,
4262 output, outputCount,
4263 NULL, &scalar_outputCnt,
4264 0, &ool_output_size));
4265 }
4266
4267 /* Routine io_async_method_scalarI_structureI */
4268 kern_return_t is_io_async_method_scalarI_structureI(
4269 io_connect_t connect,
4270 mach_port_t wake_port,
4271 io_async_ref_t reference,
4272 mach_msg_type_number_t referenceCnt,
4273 uint32_t index,
4274 io_scalar_inband_t input,
4275 mach_msg_type_number_t inputCount,
4276 io_struct_inband_t inputStruct,
4277 mach_msg_type_number_t inputStructCount )
4278 {
4279 uint32_t i;
4280 io_scalar_inband64_t _input;
4281 io_async_ref64_t _reference;
4282
4283 for (i = 0; i < referenceCnt; i++)
4284 _reference[i] = REF64(reference[i]);
4285
4286 mach_msg_type_number_t scalar_outputCnt = 0;
4287 mach_msg_type_number_t inband_outputCnt = 0;
4288 mach_vm_size_t ool_output_size = 0;
4289
4290 for (i = 0; i < inputCount; i++)
4291 _input[i] = SCALAR64(input[i]);
4292
4293 return (is_io_connect_async_method(connect,
4294 wake_port, _reference, referenceCnt,
4295 index,
4296 _input, inputCount,
4297 inputStruct, inputStructCount,
4298 0, 0,
4299 NULL, &inband_outputCnt,
4300 NULL, &scalar_outputCnt,
4301 0, &ool_output_size));
4302 }
4303
4304 /* Routine io_async_method_structureI_structureO */
4305 kern_return_t is_io_async_method_structureI_structureO(
4306 io_object_t connect,
4307 mach_port_t wake_port,
4308 io_async_ref_t reference,
4309 mach_msg_type_number_t referenceCnt,
4310 uint32_t index,
4311 io_struct_inband_t input,
4312 mach_msg_type_number_t inputCount,
4313 io_struct_inband_t output,
4314 mach_msg_type_number_t * outputCount )
4315 {
4316 uint32_t i;
4317 mach_msg_type_number_t scalar_outputCnt = 0;
4318 mach_vm_size_t ool_output_size = 0;
4319 io_async_ref64_t _reference;
4320
4321 for (i = 0; i < referenceCnt; i++)
4322 _reference[i] = REF64(reference[i]);
4323
4324 return (is_io_connect_async_method(connect,
4325 wake_port, _reference, referenceCnt,
4326 index,
4327 NULL, 0,
4328 input, inputCount,
4329 0, 0,
4330 output, outputCount,
4331 NULL, &scalar_outputCnt,
4332 0, &ool_output_size));
4333 }
4334
4335
4336 kern_return_t shim_io_async_method_scalarI_scalarO(
4337 IOExternalAsyncMethod * method,
4338 IOService * object,
4339 mach_port_t asyncWakePort,
4340 io_user_reference_t * asyncReference,
4341 uint32_t asyncReferenceCount,
4342 const io_user_scalar_t * input,
4343 mach_msg_type_number_t inputCount,
4344 io_user_scalar_t * output,
4345 mach_msg_type_number_t * outputCount )
4346 {
4347 IOAsyncMethod func;
4348 uint32_t i;
4349 io_scalar_inband_t _output;
4350 IOReturn err;
4351 io_async_ref_t reference;
4352
4353 bzero(&_output[0], sizeof(_output));
4354 for (i = 0; i < asyncReferenceCount; i++)
4355 reference[i] = REF32(asyncReference[i]);
4356
4357 err = kIOReturnBadArgument;
4358
4359 do {
4360
4361 if( inputCount != method->count0)
4362 {
4363 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4364 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4365 continue;
4366 }
4367 if( *outputCount != method->count1)
4368 {
4369 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4370 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4371 continue;
4372 }
4373
4374 func = method->func;
4375
4376 switch( inputCount) {
4377
4378 case 6:
4379 err = (object->*func)( reference,
4380 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4381 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4382 break;
4383 case 5:
4384 err = (object->*func)( reference,
4385 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4386 ARG32(input[3]), ARG32(input[4]),
4387 &_output[0] );
4388 break;
4389 case 4:
4390 err = (object->*func)( reference,
4391 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4392 ARG32(input[3]),
4393 &_output[0], &_output[1] );
4394 break;
4395 case 3:
4396 err = (object->*func)( reference,
4397 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4398 &_output[0], &_output[1], &_output[2] );
4399 break;
4400 case 2:
4401 err = (object->*func)( reference,
4402 ARG32(input[0]), ARG32(input[1]),
4403 &_output[0], &_output[1], &_output[2],
4404 &_output[3] );
4405 break;
4406 case 1:
4407 err = (object->*func)( reference,
4408 ARG32(input[0]),
4409 &_output[0], &_output[1], &_output[2],
4410 &_output[3], &_output[4] );
4411 break;
4412 case 0:
4413 err = (object->*func)( reference,
4414 &_output[0], &_output[1], &_output[2],
4415 &_output[3], &_output[4], &_output[5] );
4416 break;
4417
4418 default:
4419 IOLog("%s: Bad method table\n", object->getName());
4420 }
4421 }
4422 while( false);
4423
4424 for (i = 0; i < *outputCount; i++)
4425 output[i] = SCALAR32(_output[i]);
4426
4427 return( err);
4428 }
4429
4430
4431 /* Routine io_connect_method_scalarI_structureO */
4432 kern_return_t is_io_connect_method_scalarI_structureO(
4433 io_object_t connect,
4434 uint32_t index,
4435 io_scalar_inband_t input,
4436 mach_msg_type_number_t inputCount,
4437 io_struct_inband_t output,
4438 mach_msg_type_number_t * outputCount )
4439 {
4440 uint32_t i;
4441 io_scalar_inband64_t _input;
4442
4443 mach_msg_type_number_t scalar_outputCnt = 0;
4444 mach_vm_size_t ool_output_size = 0;
4445
4446 for (i = 0; i < inputCount; i++)
4447 _input[i] = SCALAR64(input[i]);
4448
4449 return (is_io_connect_method(connect, index,
4450 _input, inputCount,
4451 NULL, 0,
4452 0, 0,
4453 output, outputCount,
4454 NULL, &scalar_outputCnt,
4455 0, &ool_output_size));
4456 }
4457
4458 kern_return_t shim_io_connect_method_scalarI_structureO(
4459
4460 IOExternalMethod * method,
4461 IOService * object,
4462 const io_user_scalar_t * input,
4463 mach_msg_type_number_t inputCount,
4464 io_struct_inband_t output,
4465 IOByteCount * outputCount )
4466 {
4467 IOMethod func;
4468 IOReturn err;
4469
4470 err = kIOReturnBadArgument;
4471
4472 do {
4473 if( inputCount != method->count0)
4474 {
4475 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4476 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4477 continue;
4478 }
4479 if( (kIOUCVariableStructureSize != method->count1)
4480 && (*outputCount != method->count1))
4481 {
4482 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4483 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4484 continue;
4485 }
4486
4487 func = method->func;
4488
4489 switch( inputCount) {
4490
4491 case 5:
4492 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4493 ARG32(input[3]), ARG32(input[4]),
4494 output );
4495 break;
4496 case 4:
4497 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4498 ARG32(input[3]),
4499 output, (void *)outputCount );
4500 break;
4501 case 3:
4502 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4503 output, (void *)outputCount, 0 );
4504 break;
4505 case 2:
4506 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4507 output, (void *)outputCount, 0, 0 );
4508 break;
4509 case 1:
4510 err = (object->*func)( ARG32(input[0]),
4511 output, (void *)outputCount, 0, 0, 0 );
4512 break;
4513 case 0:
4514 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4515 break;
4516
4517 default:
4518 IOLog("%s: Bad method table\n", object->getName());
4519 }
4520 }
4521 while( false);
4522
4523 return( err);
4524 }
4525
4526
4527 kern_return_t shim_io_async_method_scalarI_structureO(
4528 IOExternalAsyncMethod * method,
4529 IOService * object,
4530 mach_port_t asyncWakePort,
4531 io_user_reference_t * asyncReference,
4532 uint32_t asyncReferenceCount,
4533 const io_user_scalar_t * input,
4534 mach_msg_type_number_t inputCount,
4535 io_struct_inband_t output,
4536 mach_msg_type_number_t * outputCount )
4537 {
4538 IOAsyncMethod func;
4539 uint32_t i;
4540 IOReturn err;
4541 io_async_ref_t reference;
4542
4543 for (i = 0; i < asyncReferenceCount; i++)
4544 reference[i] = REF32(asyncReference[i]);
4545
4546 err = kIOReturnBadArgument;
4547 do {
4548 if( inputCount != method->count0)
4549 {
4550 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4551 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4552 continue;
4553 }
4554 if( (kIOUCVariableStructureSize != method->count1)
4555 && (*outputCount != method->count1))
4556 {
4557 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4558 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4559 continue;
4560 }
4561
4562 func = method->func;
4563
4564 switch( inputCount) {
4565
4566 case 5:
4567 err = (object->*func)( reference,
4568 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4569 ARG32(input[3]), ARG32(input[4]),
4570 output );
4571 break;
4572 case 4:
4573 err = (object->*func)( reference,
4574 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4575 ARG32(input[3]),
4576 output, (void *)outputCount );
4577 break;
4578 case 3:
4579 err = (object->*func)( reference,
4580 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4581 output, (void *)outputCount, 0 );
4582 break;
4583 case 2:
4584 err = (object->*func)( reference,
4585 ARG32(input[0]), ARG32(input[1]),
4586 output, (void *)outputCount, 0, 0 );
4587 break;
4588 case 1:
4589 err = (object->*func)( reference,
4590 ARG32(input[0]),
4591 output, (void *)outputCount, 0, 0, 0 );
4592 break;
4593 case 0:
4594 err = (object->*func)( reference,
4595 output, (void *)outputCount, 0, 0, 0, 0 );
4596 break;
4597
4598 default:
4599 IOLog("%s: Bad method table\n", object->getName());
4600 }
4601 }
4602 while( false);
4603
4604 return( err);
4605 }
4606
4607 /* Routine io_connect_method_scalarI_structureI */
4608 kern_return_t is_io_connect_method_scalarI_structureI(
4609 io_connect_t connect,
4610 uint32_t index,
4611 io_scalar_inband_t input,
4612 mach_msg_type_number_t inputCount,
4613 io_struct_inband_t inputStruct,
4614 mach_msg_type_number_t inputStructCount )
4615 {
4616 uint32_t i;
4617 io_scalar_inband64_t _input;
4618
4619 mach_msg_type_number_t scalar_outputCnt = 0;
4620 mach_msg_type_number_t inband_outputCnt = 0;
4621 mach_vm_size_t ool_output_size = 0;
4622
4623 for (i = 0; i < inputCount; i++)
4624 _input[i] = SCALAR64(input[i]);
4625
4626 return (is_io_connect_method(connect, index,
4627 _input, inputCount,
4628 inputStruct, inputStructCount,
4629 0, 0,
4630 NULL, &inband_outputCnt,
4631 NULL, &scalar_outputCnt,
4632 0, &ool_output_size));
4633 }
4634
4635 kern_return_t shim_io_connect_method_scalarI_structureI(
4636 IOExternalMethod * method,
4637 IOService * object,
4638 const io_user_scalar_t * input,
4639 mach_msg_type_number_t inputCount,
4640 io_struct_inband_t inputStruct,
4641 mach_msg_type_number_t inputStructCount )
4642 {
4643 IOMethod func;
4644 IOReturn err = kIOReturnBadArgument;
4645
4646 do
4647 {
4648 if (inputCount != method->count0)
4649 {
4650 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4651 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4652 continue;
4653 }
4654 if( (kIOUCVariableStructureSize != method->count1)
4655 && (inputStructCount != method->count1))
4656 {
4657 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4658 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4659 continue;
4660 }
4661
4662 func = method->func;
4663
4664 switch( inputCount) {
4665
4666 case 5:
4667 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4668 ARG32(input[3]), ARG32(input[4]),
4669 inputStruct );
4670 break;
4671 case 4:
4672 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4673 ARG32(input[3]),
4674 inputStruct, (void *)(uintptr_t)inputStructCount );
4675 break;
4676 case 3:
4677 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4678 inputStruct, (void *)(uintptr_t)inputStructCount,
4679 0 );
4680 break;
4681 case 2:
4682 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4683 inputStruct, (void *)(uintptr_t)inputStructCount,
4684 0, 0 );
4685 break;
4686 case 1:
4687 err = (object->*func)( ARG32(input[0]),
4688 inputStruct, (void *)(uintptr_t)inputStructCount,
4689 0, 0, 0 );
4690 break;
4691 case 0:
4692 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4693 0, 0, 0, 0 );
4694 break;
4695
4696 default:
4697 IOLog("%s: Bad method table\n", object->getName());
4698 }
4699 }
4700 while (false);
4701
4702 return( err);
4703 }
4704
4705 kern_return_t shim_io_async_method_scalarI_structureI(
4706 IOExternalAsyncMethod * method,
4707 IOService * object,
4708 mach_port_t asyncWakePort,
4709 io_user_reference_t * asyncReference,
4710 uint32_t asyncReferenceCount,
4711 const io_user_scalar_t * input,
4712 mach_msg_type_number_t inputCount,
4713 io_struct_inband_t inputStruct,
4714 mach_msg_type_number_t inputStructCount )
4715 {
4716 IOAsyncMethod func;
4717 uint32_t i;
4718 IOReturn err = kIOReturnBadArgument;
4719 io_async_ref_t reference;
4720
4721 for (i = 0; i < asyncReferenceCount; i++)
4722 reference[i] = REF32(asyncReference[i]);
4723
4724 do
4725 {
4726 if (inputCount != method->count0)
4727 {
4728 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4729 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4730 continue;
4731 }
4732 if( (kIOUCVariableStructureSize != method->count1)
4733 && (inputStructCount != method->count1))
4734 {
4735 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4736 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4737 continue;
4738 }
4739
4740 func = method->func;
4741
4742 switch( inputCount) {
4743
4744 case 5:
4745 err = (object->*func)( reference,
4746 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4747 ARG32(input[3]), ARG32(input[4]),
4748 inputStruct );
4749 break;
4750 case 4:
4751 err = (object->*func)( reference,
4752 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4753 ARG32(input[3]),
4754 inputStruct, (void *)(uintptr_t)inputStructCount );
4755 break;
4756 case 3:
4757 err = (object->*func)( reference,
4758 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4759 inputStruct, (void *)(uintptr_t)inputStructCount,
4760 0 );
4761 break;
4762 case 2:
4763 err = (object->*func)( reference,
4764 ARG32(input[0]), ARG32(input[1]),
4765 inputStruct, (void *)(uintptr_t)inputStructCount,
4766 0, 0 );
4767 break;
4768 case 1:
4769 err = (object->*func)( reference,
4770 ARG32(input[0]),
4771 inputStruct, (void *)(uintptr_t)inputStructCount,
4772 0, 0, 0 );
4773 break;
4774 case 0:
4775 err = (object->*func)( reference,
4776 inputStruct, (void *)(uintptr_t)inputStructCount,
4777 0, 0, 0, 0 );
4778 break;
4779
4780 default:
4781 IOLog("%s: Bad method table\n", object->getName());
4782 }
4783 }
4784 while (false);
4785
4786 return( err);
4787 }
4788
4789 /* Routine io_connect_method_structureI_structureO */
4790 kern_return_t is_io_connect_method_structureI_structureO(
4791 io_object_t connect,
4792 uint32_t index,
4793 io_struct_inband_t input,
4794 mach_msg_type_number_t inputCount,
4795 io_struct_inband_t output,
4796 mach_msg_type_number_t * outputCount )
4797 {
4798 mach_msg_type_number_t scalar_outputCnt = 0;
4799 mach_vm_size_t ool_output_size = 0;
4800
4801 return (is_io_connect_method(connect, index,
4802 NULL, 0,
4803 input, inputCount,
4804 0, 0,
4805 output, outputCount,
4806 NULL, &scalar_outputCnt,
4807 0, &ool_output_size));
4808 }
4809
4810 kern_return_t shim_io_connect_method_structureI_structureO(
4811 IOExternalMethod * method,
4812 IOService * object,
4813 io_struct_inband_t input,
4814 mach_msg_type_number_t inputCount,
4815 io_struct_inband_t output,
4816 IOByteCount * outputCount )
4817 {
4818 IOMethod func;
4819 IOReturn err = kIOReturnBadArgument;
4820
4821 do
4822 {
4823 if( (kIOUCVariableStructureSize != method->count0)
4824 && (inputCount != method->count0))
4825 {
4826 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4827 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4828 continue;
4829 }
4830 if( (kIOUCVariableStructureSize != method->count1)
4831 && (*outputCount != method->count1))
4832 {
4833 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4834 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4835 continue;
4836 }
4837
4838 func = method->func;
4839
4840 if( method->count1) {
4841 if( method->count0) {
4842 err = (object->*func)( input, output,
4843 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4844 } else {
4845 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4846 }
4847 } else {
4848 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4849 }
4850 }
4851 while( false);
4852
4853
4854 return( err);
4855 }
4856
4857 kern_return_t shim_io_async_method_structureI_structureO(
4858 IOExternalAsyncMethod * method,
4859 IOService * object,
4860 mach_port_t asyncWakePort,
4861 io_user_reference_t * asyncReference,
4862 uint32_t asyncReferenceCount,
4863 io_struct_inband_t input,
4864 mach_msg_type_number_t inputCount,
4865 io_struct_inband_t output,
4866 mach_msg_type_number_t * outputCount )
4867 {
4868 IOAsyncMethod func;
4869 uint32_t i;
4870 IOReturn err;
4871 io_async_ref_t reference;
4872
4873 for (i = 0; i < asyncReferenceCount; i++)
4874 reference[i] = REF32(asyncReference[i]);
4875
4876 err = kIOReturnBadArgument;
4877 do
4878 {
4879 if( (kIOUCVariableStructureSize != method->count0)
4880 && (inputCount != method->count0))
4881 {
4882 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4883 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4884 continue;
4885 }
4886 if( (kIOUCVariableStructureSize != method->count1)
4887 && (*outputCount != method->count1))
4888 {
4889 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4890 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4891 continue;
4892 }
4893
4894 func = method->func;
4895
4896 if( method->count1) {
4897 if( method->count0) {
4898 err = (object->*func)( reference,
4899 input, output,
4900 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4901 } else {
4902 err = (object->*func)( reference,
4903 output, outputCount, 0, 0, 0, 0 );
4904 }
4905 } else {
4906 err = (object->*func)( reference,
4907 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4908 }
4909 }
4910 while( false);
4911
4912 return( err);
4913 }
4914
4915 #if !NO_KEXTD
4916 bool gIOKextdClearedBusy = false;
4917 #endif
4918
4919 /* Routine io_catalog_send_data */
4920 kern_return_t is_io_catalog_send_data(
4921 mach_port_t master_port,
4922 uint32_t flag,
4923 io_buf_ptr_t inData,
4924 mach_msg_type_number_t inDataCount,
4925 kern_return_t * result)
4926 {
4927 #if NO_KEXTD
4928 return kIOReturnNotPrivileged;
4929 #else /* NO_KEXTD */
4930 OSObject * obj = 0;
4931 vm_offset_t data;
4932 kern_return_t kr = kIOReturnError;
4933
4934 //printf("io_catalog_send_data called. flag: %d\n", flag);
4935
4936 if( master_port != master_device_port)
4937 return kIOReturnNotPrivileged;
4938
4939 if( (flag != kIOCatalogRemoveKernelLinker &&
4940 flag != kIOCatalogKextdActive &&
4941 flag != kIOCatalogKextdFinishedLaunching) &&
4942 ( !inData || !inDataCount) )
4943 {
4944 return kIOReturnBadArgument;
4945 }
4946
4947 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management"))
4948 {
4949 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4950 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4951 OSSafeReleaseNULL(taskName);
4952 // For now, fake success to not break applications relying on this function succeeding.
4953 // See <rdar://problem/32554970> for more details.
4954 return kIOReturnSuccess;
4955 }
4956
4957 if (inData) {
4958 vm_map_offset_t map_data;
4959
4960 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4961 return( kIOReturnMessageTooLarge);
4962
4963 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4964 data = CAST_DOWN(vm_offset_t, map_data);
4965
4966 if( kr != KERN_SUCCESS)
4967 return kr;
4968
4969 // must return success after vm_map_copyout() succeeds
4970
4971 if( inDataCount ) {
4972 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4973 vm_deallocate( kernel_map, data, inDataCount );
4974 if( !obj) {
4975 *result = kIOReturnNoMemory;
4976 return( KERN_SUCCESS);
4977 }
4978 }
4979 }
4980
4981 switch ( flag ) {
4982 case kIOCatalogResetDrivers:
4983 case kIOCatalogResetDriversNoMatch: {
4984 OSArray * array;
4985
4986 array = OSDynamicCast(OSArray, obj);
4987 if (array) {
4988 if ( !gIOCatalogue->resetAndAddDrivers(array,
4989 flag == kIOCatalogResetDrivers) ) {
4990
4991 kr = kIOReturnError;
4992 }
4993 } else {
4994 kr = kIOReturnBadArgument;
4995 }
4996 }
4997 break;
4998
4999 case kIOCatalogAddDrivers:
5000 case kIOCatalogAddDriversNoMatch: {
5001 OSArray * array;
5002
5003 array = OSDynamicCast(OSArray, obj);
5004 if ( array ) {
5005 if ( !gIOCatalogue->addDrivers( array ,
5006 flag == kIOCatalogAddDrivers) ) {
5007 kr = kIOReturnError;
5008 }
5009 }
5010 else {
5011 kr = kIOReturnBadArgument;
5012 }
5013 }
5014 break;
5015
5016 case kIOCatalogRemoveDrivers:
5017 case kIOCatalogRemoveDriversNoMatch: {
5018 OSDictionary * dict;
5019
5020 dict = OSDynamicCast(OSDictionary, obj);
5021 if ( dict ) {
5022 if ( !gIOCatalogue->removeDrivers( dict,
5023 flag == kIOCatalogRemoveDrivers ) ) {
5024 kr = kIOReturnError;
5025 }
5026 }
5027 else {
5028 kr = kIOReturnBadArgument;
5029 }
5030 }
5031 break;
5032
5033 case kIOCatalogStartMatching: {
5034 OSDictionary * dict;
5035
5036 dict = OSDynamicCast(OSDictionary, obj);
5037 if ( dict ) {
5038 if ( !gIOCatalogue->startMatching( dict ) ) {
5039 kr = kIOReturnError;
5040 }
5041 }
5042 else {
5043 kr = kIOReturnBadArgument;
5044 }
5045 }
5046 break;
5047
5048 case kIOCatalogRemoveKernelLinker:
5049 kr = KERN_NOT_SUPPORTED;
5050 break;
5051
5052 case kIOCatalogKextdActive:
5053 #if !NO_KEXTD
5054 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5055 OSKext::setKextdActive();
5056
5057 /* Dump all nonloaded startup extensions; kextd will now send them
5058 * down on request.
5059 */
5060 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5061 #endif
5062 kr = kIOReturnSuccess;
5063 break;
5064
5065 case kIOCatalogKextdFinishedLaunching: {
5066 #if !NO_KEXTD
5067 if (!gIOKextdClearedBusy) {
5068 IOService * serviceRoot = IOService::getServiceRoot();
5069 if (serviceRoot) {
5070 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
5071 serviceRoot->adjustBusy(-1);
5072 gIOKextdClearedBusy = true;
5073 }
5074 }
5075 #endif
5076 kr = kIOReturnSuccess;
5077 }
5078 break;
5079
5080 default:
5081 kr = kIOReturnBadArgument;
5082 break;
5083 }
5084
5085 if (obj) obj->release();
5086
5087 *result = kr;
5088 return( KERN_SUCCESS);
5089 #endif /* NO_KEXTD */
5090 }
5091
5092 /* Routine io_catalog_terminate */
5093 kern_return_t is_io_catalog_terminate(
5094 mach_port_t master_port,
5095 uint32_t flag,
5096 io_name_t name )
5097 {
5098 kern_return_t kr;
5099
5100 if( master_port != master_device_port )
5101 return kIOReturnNotPrivileged;
5102
5103 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5104 kIOClientPrivilegeAdministrator );
5105 if( kIOReturnSuccess != kr)
5106 return( kr );
5107
5108 switch ( flag ) {
5109 #if !defined(SECURE_KERNEL)
5110 case kIOCatalogServiceTerminate:
5111 OSIterator * iter;
5112 IOService * service;
5113
5114 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5115 kIORegistryIterateRecursively);
5116 if ( !iter )
5117 return kIOReturnNoMemory;
5118
5119 do {
5120 iter->reset();
5121 while( (service = (IOService *)iter->getNextObject()) ) {
5122 if( service->metaCast(name)) {
5123 if ( !service->terminate( kIOServiceRequired
5124 | kIOServiceSynchronous) ) {
5125 kr = kIOReturnUnsupported;
5126 break;
5127 }
5128 }
5129 }
5130 } while( !service && !iter->isValid());
5131 iter->release();
5132 break;
5133
5134 case kIOCatalogModuleUnload:
5135 case kIOCatalogModuleTerminate:
5136 kr = gIOCatalogue->terminateDriversForModule(name,
5137 flag == kIOCatalogModuleUnload);
5138 break;
5139 #endif
5140
5141 default:
5142 kr = kIOReturnBadArgument;
5143 break;
5144 }
5145
5146 return( kr );
5147 }
5148
5149 /* Routine io_catalog_get_data */
5150 kern_return_t is_io_catalog_get_data(
5151 mach_port_t master_port,
5152 uint32_t flag,
5153 io_buf_ptr_t *outData,
5154 mach_msg_type_number_t *outDataCount)
5155 {
5156 kern_return_t kr = kIOReturnSuccess;
5157 OSSerialize * s;
5158
5159 if( master_port != master_device_port)
5160 return kIOReturnNotPrivileged;
5161
5162 //printf("io_catalog_get_data called. flag: %d\n", flag);
5163
5164 s = OSSerialize::withCapacity(4096);
5165 if ( !s )
5166 return kIOReturnNoMemory;
5167
5168 kr = gIOCatalogue->serializeData(flag, s);
5169
5170 if ( kr == kIOReturnSuccess ) {
5171 vm_offset_t data;
5172 vm_map_copy_t copy;
5173 vm_size_t size;
5174
5175 size = s->getLength();
5176 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5177 if ( kr == kIOReturnSuccess ) {
5178 bcopy(s->text(), (void *)data, size);
5179 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5180 (vm_map_size_t)size, true, &copy);
5181 *outData = (char *)copy;
5182 *outDataCount = size;
5183 }
5184 }
5185
5186 s->release();
5187
5188 return kr;
5189 }
5190
5191 /* Routine io_catalog_get_gen_count */
5192 kern_return_t is_io_catalog_get_gen_count(
5193 mach_port_t master_port,
5194 uint32_t *genCount)
5195 {
5196 if( master_port != master_device_port)
5197 return kIOReturnNotPrivileged;
5198
5199 //printf("io_catalog_get_gen_count called.\n");
5200
5201 if ( !genCount )
5202 return kIOReturnBadArgument;
5203
5204 *genCount = gIOCatalogue->getGenerationCount();
5205
5206 return kIOReturnSuccess;
5207 }
5208
5209 /* Routine io_catalog_module_loaded.
5210 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5211 */
5212 kern_return_t is_io_catalog_module_loaded(
5213 mach_port_t master_port,
5214 io_name_t name)
5215 {
5216 if( master_port != master_device_port)
5217 return kIOReturnNotPrivileged;
5218
5219 //printf("io_catalog_module_loaded called. name %s\n", name);
5220
5221 if ( !name )
5222 return kIOReturnBadArgument;
5223
5224 gIOCatalogue->moduleHasLoaded(name);
5225
5226 return kIOReturnSuccess;
5227 }
5228
5229 kern_return_t is_io_catalog_reset(
5230 mach_port_t master_port,
5231 uint32_t flag)
5232 {
5233 if( master_port != master_device_port)
5234 return kIOReturnNotPrivileged;
5235
5236 switch ( flag ) {
5237 case kIOCatalogResetDefault:
5238 gIOCatalogue->reset();
5239 break;
5240
5241 default:
5242 return kIOReturnBadArgument;
5243 }
5244
5245 return kIOReturnSuccess;
5246 }
5247
5248 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5249 {
5250 kern_return_t result = kIOReturnBadArgument;
5251 IOUserClient *userClient;
5252
5253 if ((userClient = OSDynamicCast(IOUserClient,
5254 iokit_lookup_connect_ref_current_task((mach_port_name_t)(uintptr_t)args->userClientRef)))) {
5255 IOExternalTrap *trap;
5256 IOService *target = NULL;
5257
5258 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5259
5260 if (trap && target) {
5261 IOTrap func;
5262
5263 func = trap->func;
5264
5265 if (func) {
5266 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5267 }
5268 }
5269
5270 iokit_remove_connect_reference(userClient);
5271 }
5272
5273 return result;
5274 }
5275
5276 /* Routine io_device_tree_entry_exists_with_name */
5277 kern_return_t is_io_device_tree_entry_exists_with_name(
5278 mach_port_t master_port,
5279 io_name_t name,
5280 boolean_t *exists )
5281 {
5282 OSCollectionIterator *iter;
5283
5284 if (master_port != master_device_port)
5285 return (kIOReturnNotPrivileged);
5286
5287 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
5288 *exists = iter && iter->getNextObject();
5289 OSSafeReleaseNULL(iter);
5290
5291 return kIOReturnSuccess;
5292 }
5293
5294 } /* extern "C" */
5295
5296 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5297 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5298 {
5299 IOReturn err;
5300 IOService * object;
5301 IOByteCount structureOutputSize;
5302
5303 if (dispatch)
5304 {
5305 uint32_t count;
5306 count = dispatch->checkScalarInputCount;
5307 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5308 {
5309 return (kIOReturnBadArgument);
5310 }
5311
5312 count = dispatch->checkStructureInputSize;
5313 if ((kIOUCVariableStructureSize != count)
5314 && (count != ((args->structureInputDescriptor)
5315 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5316 {
5317 return (kIOReturnBadArgument);
5318 }
5319
5320 count = dispatch->checkScalarOutputCount;
5321 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5322 {
5323 return (kIOReturnBadArgument);
5324 }
5325
5326 count = dispatch->checkStructureOutputSize;
5327 if ((kIOUCVariableStructureSize != count)
5328 && (count != ((args->structureOutputDescriptor)
5329 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5330 {
5331 return (kIOReturnBadArgument);
5332 }
5333
5334 if (dispatch->function)
5335 err = (*dispatch->function)(target, reference, args);
5336 else
5337 err = kIOReturnNoCompletion; /* implementator can dispatch */
5338
5339 return (err);
5340 }
5341
5342
5343 // pre-Leopard API's don't do ool structs
5344 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5345 {
5346 err = kIOReturnIPCError;
5347 return (err);
5348 }
5349
5350 structureOutputSize = args->structureOutputSize;
5351
5352 if (args->asyncWakePort)
5353 {
5354 IOExternalAsyncMethod * method;
5355 object = 0;
5356 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5357 return (kIOReturnUnsupported);
5358
5359 if (kIOUCForegroundOnly & method->flags)
5360 {
5361 if (task_is_gpu_denied(current_task()))
5362 return (kIOReturnNotPermitted);
5363 }
5364
5365 switch (method->flags & kIOUCTypeMask)
5366 {
5367 case kIOUCScalarIStructI:
5368 err = shim_io_async_method_scalarI_structureI( method, object,
5369 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5370 args->scalarInput, args->scalarInputCount,
5371 (char *)args->structureInput, args->structureInputSize );
5372 break;
5373
5374 case kIOUCScalarIScalarO:
5375 err = shim_io_async_method_scalarI_scalarO( method, object,
5376 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5377 args->scalarInput, args->scalarInputCount,
5378 args->scalarOutput, &args->scalarOutputCount );
5379 break;
5380
5381 case kIOUCScalarIStructO:
5382 err = shim_io_async_method_scalarI_structureO( method, object,
5383 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5384 args->scalarInput, args->scalarInputCount,
5385 (char *) args->structureOutput, &args->structureOutputSize );
5386 break;
5387
5388
5389 case kIOUCStructIStructO:
5390 err = shim_io_async_method_structureI_structureO( method, object,
5391 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5392 (char *)args->structureInput, args->structureInputSize,
5393 (char *) args->structureOutput, &args->structureOutputSize );
5394 break;
5395
5396 default:
5397 err = kIOReturnBadArgument;
5398 break;
5399 }
5400 }
5401 else
5402 {
5403 IOExternalMethod * method;
5404 object = 0;
5405 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5406 return (kIOReturnUnsupported);
5407
5408 if (kIOUCForegroundOnly & method->flags)
5409 {
5410 if (task_is_gpu_denied(current_task()))
5411 return (kIOReturnNotPermitted);
5412 }
5413
5414 switch (method->flags & kIOUCTypeMask)
5415 {
5416 case kIOUCScalarIStructI:
5417 err = shim_io_connect_method_scalarI_structureI( method, object,
5418 args->scalarInput, args->scalarInputCount,
5419 (char *) args->structureInput, args->structureInputSize );
5420 break;
5421
5422 case kIOUCScalarIScalarO:
5423 err = shim_io_connect_method_scalarI_scalarO( method, object,
5424 args->scalarInput, args->scalarInputCount,
5425 args->scalarOutput, &args->scalarOutputCount );
5426 break;
5427
5428 case kIOUCScalarIStructO:
5429 err = shim_io_connect_method_scalarI_structureO( method, object,
5430 args->scalarInput, args->scalarInputCount,
5431 (char *) args->structureOutput, &structureOutputSize );
5432 break;
5433
5434
5435 case kIOUCStructIStructO:
5436 err = shim_io_connect_method_structureI_structureO( method, object,
5437 (char *) args->structureInput, args->structureInputSize,
5438 (char *) args->structureOutput, &structureOutputSize );
5439 break;
5440
5441 default:
5442 err = kIOReturnBadArgument;
5443 break;
5444 }
5445 }
5446
5447 args->structureOutputSize = structureOutputSize;
5448
5449 return (err);
5450 }
5451
5452 #if __LP64__
5453 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5454 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5455 #else
5456 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5457 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5458 #endif
5459 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5460 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5461 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5462 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5463 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5464 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5465 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5466 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5467 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5468 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5469 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5470 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5471 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5472 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5473