]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
12ae32416702a74a2c35c1e9403f20c5a111ba18
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/system.h>
45 #include <libkern/OSDebug.h>
46 #include <sys/proc.h>
47 #include <sys/kauth.h>
48 #include <sys/codesign.h>
49
50 #include <mach/sdt.h>
51
52 #if CONFIG_MACF
53
54 extern "C" {
55 #include <security/mac_framework.h>
56 };
57 #include <sys/kauth.h>
58
59 #define IOMACF_LOG 0
60
61 #endif /* CONFIG_MACF */
62
63 #include <IOKit/assert.h>
64
65 #include "IOServicePrivate.h"
66 #include "IOKitKernelInternal.h"
67
68 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
69 #define SCALAR32(x) ((uint32_t )x)
70 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
71 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
72 #define REF32(x) ((int)(x))
73
74 enum{
75 kIOUCAsync0Flags = 3ULL,
76 kIOUCAsync64Flag = 1ULL,
77 kIOUCAsyncErrorLoggedFlag = 2ULL
78 };
79
80 #if IOKITSTATS
81
82 #define IOStatisticsRegisterCounter() \
83 do { \
84 reserved->counter = IOStatistics::registerUserClient(this); \
85 } while (0)
86
87 #define IOStatisticsUnregisterCounter() \
88 do { \
89 if (reserved) \
90 IOStatistics::unregisterUserClient(reserved->counter); \
91 } while (0)
92
93 #define IOStatisticsClientCall() \
94 do { \
95 IOStatistics::countUserClientCall(client); \
96 } while (0)
97
98 #else
99
100 #define IOStatisticsRegisterCounter()
101 #define IOStatisticsUnregisterCounter()
102 #define IOStatisticsClientCall()
103
104 #endif /* IOKITSTATS */
105
106 #if DEVELOPMENT || DEBUG
107
108 #define FAKE_STACK_FRAME(a) \
109 const void ** __frameptr; \
110 const void * __retaddr; \
111 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
112 __retaddr = __frameptr[1]; \
113 __frameptr[1] = (a);
114
115 #define FAKE_STACK_FRAME_END() \
116 __frameptr[1] = __retaddr;
117
118 #else /* DEVELOPMENT || DEBUG */
119
120 #define FAKE_STACK_FRAME(a)
121 #define FAKE_STACK_FRAME_END()
122
123 #endif /* DEVELOPMENT || DEBUG */
124
125 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
126 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
127
128 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
129
130 extern "C" {
131 #include <mach/mach_traps.h>
132 #include <vm/vm_map.h>
133 } /* extern "C" */
134
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136
137 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
138
139 class IOMachPort : public OSObject
140 {
141 OSDeclareDefaultStructors(IOMachPort)
142 public:
143 OSObject * object;
144 ipc_port_t port;
145 UInt32 mscount;
146 UInt8 holdDestroy;
147
148 static IOMachPort * portForObject( OSObject * obj,
149 ipc_kobject_type_t type );
150 static bool noMoreSendersForObject( OSObject * obj,
151 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
152 static void releasePortForObject( OSObject * obj,
153 ipc_kobject_type_t type );
154 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
155
156 static OSDictionary * dictForType( ipc_kobject_type_t type );
157
158 static mach_port_name_t makeSendRightForTask( task_t task,
159 io_object_t obj, ipc_kobject_type_t type );
160
161 virtual void free() APPLE_KEXT_OVERRIDE;
162 };
163
164 #define super OSObject
165 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
166
167 static IOLock * gIOObjectPortLock;
168
169 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170
171 // not in dictForType() for debugging ease
172 static OSDictionary * gIOObjectPorts;
173 static OSDictionary * gIOConnectPorts;
174 static OSDictionary * gIOIdentifierPorts;
175
176 OSDictionary *
177 IOMachPort::dictForType( ipc_kobject_type_t type )
178 {
179 OSDictionary ** dict;
180
181 switch (type) {
182 case IKOT_IOKIT_OBJECT:
183 dict = &gIOObjectPorts;
184 break;
185 case IKOT_IOKIT_CONNECT:
186 dict = &gIOConnectPorts;
187 break;
188 case IKOT_IOKIT_IDENT:
189 dict = &gIOIdentifierPorts;
190 break;
191 default:
192 panic("dictForType %d", type);
193 dict = NULL;
194 break;
195 }
196
197 if (0 == *dict) {
198 *dict = OSDictionary::withCapacity( 1 );
199 }
200
201 return *dict;
202 }
203
204 IOMachPort *
205 IOMachPort::portForObject( OSObject * obj,
206 ipc_kobject_type_t type )
207 {
208 IOMachPort * inst = 0;
209 OSDictionary * dict;
210
211 IOTakeLock( gIOObjectPortLock);
212
213 do {
214 dict = dictForType( type );
215 if (!dict) {
216 continue;
217 }
218
219 if ((inst = (IOMachPort *)
220 dict->getObject((const OSSymbol *) obj ))) {
221 inst->mscount++;
222 inst->retain();
223 continue;
224 }
225
226 inst = new IOMachPort;
227 if (inst && !inst->init()) {
228 inst = 0;
229 continue;
230 }
231
232 inst->port = iokit_alloc_object_port( obj, type );
233 if (inst->port) {
234 // retains obj
235 dict->setObject((const OSSymbol *) obj, inst );
236 inst->mscount++;
237 } else {
238 inst->release();
239 inst = 0;
240 }
241 } while (false);
242
243 IOUnlock( gIOObjectPortLock);
244
245 return inst;
246 }
247
248 bool
249 IOMachPort::noMoreSendersForObject( OSObject * obj,
250 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
251 {
252 OSDictionary * dict;
253 IOMachPort * machPort;
254 IOUserClient * uc;
255 bool destroyed = true;
256
257 IOTakeLock( gIOObjectPortLock);
258
259 if ((dict = dictForType( type ))) {
260 obj->retain();
261
262 machPort = (IOMachPort *) dict->getObject((const OSSymbol *) obj );
263 if (machPort) {
264 destroyed = (machPort->mscount <= *mscount);
265 if (!destroyed) {
266 *mscount = machPort->mscount;
267 } else {
268 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
269 uc->noMoreSenders();
270 }
271 dict->removeObject((const OSSymbol *) obj );
272 }
273 }
274 obj->release();
275 }
276
277 IOUnlock( gIOObjectPortLock);
278
279 return destroyed;
280 }
281
282 void
283 IOMachPort::releasePortForObject( OSObject * obj,
284 ipc_kobject_type_t type )
285 {
286 OSDictionary * dict;
287 IOMachPort * machPort;
288
289 assert(IKOT_IOKIT_CONNECT != type);
290
291 IOTakeLock( gIOObjectPortLock);
292
293 if ((dict = dictForType( type ))) {
294 obj->retain();
295 machPort = (IOMachPort *) dict->getObject((const OSSymbol *) obj );
296 if (machPort && !machPort->holdDestroy) {
297 dict->removeObject((const OSSymbol *) obj );
298 }
299 obj->release();
300 }
301
302 IOUnlock( gIOObjectPortLock);
303 }
304
305 void
306 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
307 {
308 OSDictionary * dict;
309 IOMachPort * machPort;
310
311 IOLockLock( gIOObjectPortLock );
312
313 if ((dict = dictForType( type ))) {
314 machPort = (IOMachPort *) dict->getObject((const OSSymbol *) obj );
315 if (machPort) {
316 machPort->holdDestroy = true;
317 }
318 }
319
320 IOLockUnlock( gIOObjectPortLock );
321 }
322
323 void
324 IOUserClient::destroyUserReferences( OSObject * obj )
325 {
326 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
327
328 // panther, 3160200
329 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
330
331 OSDictionary * dict;
332
333 IOTakeLock( gIOObjectPortLock);
334 obj->retain();
335
336 if ((dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT ))) {
337 IOMachPort * port;
338 port = (IOMachPort *) dict->getObject((const OSSymbol *) obj );
339 if (port) {
340 IOUserClient * uc;
341 if ((uc = OSDynamicCast(IOUserClient, obj))) {
342 uc->noMoreSenders();
343 if (uc->mappings) {
344 dict->setObject((const OSSymbol *) uc->mappings, port);
345 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
346
347 uc->mappings->release();
348 uc->mappings = 0;
349 }
350 }
351 dict->removeObject((const OSSymbol *) obj );
352 }
353 }
354 obj->release();
355 IOUnlock( gIOObjectPortLock);
356 }
357
358 mach_port_name_t
359 IOMachPort::makeSendRightForTask( task_t task,
360 io_object_t obj, ipc_kobject_type_t type )
361 {
362 return iokit_make_send_right( task, obj, type );
363 }
364
365 void
366 IOMachPort::free( void )
367 {
368 if (port) {
369 iokit_destroy_object_port( port );
370 }
371 super::free();
372 }
373
374 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
375
376 class IOUserIterator : public OSIterator
377 {
378 OSDeclareDefaultStructors(IOUserIterator)
379 public:
380 OSObject * userIteratorObject;
381 IOLock * lock;
382
383 static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
384 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
385 virtual void free() APPLE_KEXT_OVERRIDE;
386
387 virtual void reset() APPLE_KEXT_OVERRIDE;
388 virtual bool isValid() APPLE_KEXT_OVERRIDE;
389 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
390 virtual OSObject * copyNextObject();
391 };
392
393 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
394
395 class IOUserNotification : public IOUserIterator
396 {
397 OSDeclareDefaultStructors(IOUserNotification)
398
399 #define holdNotify userIteratorObject
400
401 public:
402
403 virtual void free() APPLE_KEXT_OVERRIDE;
404
405 virtual void setNotification( IONotifier * obj );
406
407 virtual void reset() APPLE_KEXT_OVERRIDE;
408 virtual bool isValid() APPLE_KEXT_OVERRIDE;
409 };
410
411 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
412
413 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
414
415 IOUserIterator *
416 IOUserIterator::withIterator(OSIterator * iter)
417 {
418 IOUserIterator * me;
419
420 if (!iter) {
421 return 0;
422 }
423
424 me = new IOUserIterator;
425 if (me && !me->init()) {
426 me->release();
427 me = 0;
428 }
429 if (!me) {
430 return me;
431 }
432 me->userIteratorObject = iter;
433
434 return me;
435 }
436
437 bool
438 IOUserIterator::init( void )
439 {
440 if (!OSObject::init()) {
441 return false;
442 }
443
444 lock = IOLockAlloc();
445 if (!lock) {
446 return false;
447 }
448
449 return true;
450 }
451
452 void
453 IOUserIterator::free()
454 {
455 if (userIteratorObject) {
456 userIteratorObject->release();
457 }
458 if (lock) {
459 IOLockFree(lock);
460 }
461 OSObject::free();
462 }
463
464 void
465 IOUserIterator::reset()
466 {
467 IOLockLock(lock);
468 assert(OSDynamicCast(OSIterator, userIteratorObject));
469 ((OSIterator *)userIteratorObject)->reset();
470 IOLockUnlock(lock);
471 }
472
473 bool
474 IOUserIterator::isValid()
475 {
476 bool ret;
477
478 IOLockLock(lock);
479 assert(OSDynamicCast(OSIterator, userIteratorObject));
480 ret = ((OSIterator *)userIteratorObject)->isValid();
481 IOLockUnlock(lock);
482
483 return ret;
484 }
485
486 OSObject *
487 IOUserIterator::getNextObject()
488 {
489 assert(false);
490 return NULL;
491 }
492
493 OSObject *
494 IOUserIterator::copyNextObject()
495 {
496 OSObject * ret = NULL;
497
498 IOLockLock(lock);
499 if (userIteratorObject) {
500 ret = ((OSIterator *)userIteratorObject)->getNextObject();
501 if (ret) {
502 ret->retain();
503 }
504 }
505 IOLockUnlock(lock);
506
507 return ret;
508 }
509
510 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
511 extern "C" {
512 // functions called from osfmk/device/iokit_rpc.c
513
514 void
515 iokit_add_reference( io_object_t obj, ipc_kobject_type_t type )
516 {
517 IOUserClient * uc;
518
519 if (!obj) {
520 return;
521 }
522
523 if ((IKOT_IOKIT_CONNECT == type)
524 && (uc = OSDynamicCast(IOUserClient, obj))) {
525 OSIncrementAtomic(&uc->__ipc);
526 }
527
528 obj->retain();
529 }
530
531 void
532 iokit_remove_reference( io_object_t obj )
533 {
534 if (obj) {
535 obj->release();
536 }
537 }
538
539 void
540 iokit_remove_connect_reference( io_object_t obj )
541 {
542 IOUserClient * uc;
543 bool finalize = false;
544
545 if (!obj) {
546 return;
547 }
548
549 if ((uc = OSDynamicCast(IOUserClient, obj))) {
550 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
551 IOLockLock(gIOObjectPortLock);
552 if ((finalize = uc->__ipcFinal)) {
553 uc->__ipcFinal = false;
554 }
555 IOLockUnlock(gIOObjectPortLock);
556 }
557 if (finalize) {
558 uc->scheduleFinalize(true);
559 }
560 }
561
562 obj->release();
563 }
564
565 bool
566 IOUserClient::finalizeUserReferences(OSObject * obj)
567 {
568 IOUserClient * uc;
569 bool ok = true;
570
571 if ((uc = OSDynamicCast(IOUserClient, obj))) {
572 IOLockLock(gIOObjectPortLock);
573 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
574 ok = false;
575 }
576 IOLockUnlock(gIOObjectPortLock);
577 }
578 return ok;
579 }
580
581 ipc_port_t
582 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
583 {
584 IOMachPort * machPort;
585 ipc_port_t port;
586
587 if ((machPort = IOMachPort::portForObject( obj, type ))) {
588 port = machPort->port;
589 if (port) {
590 iokit_retain_port( port );
591 }
592
593 machPort->release();
594 } else {
595 port = NULL;
596 }
597
598 return port;
599 }
600
601 kern_return_t
602 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
603 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
604 {
605 IOUserClient * client;
606 IOMemoryMap * map;
607 IOUserNotification * notify;
608
609 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
610 return kIOReturnNotReady;
611 }
612
613 if (IKOT_IOKIT_CONNECT == type) {
614 if ((client = OSDynamicCast( IOUserClient, obj ))) {
615 IOStatisticsClientCall();
616 IOLockLock(client->lock);
617 client->clientDied();
618 IOLockUnlock(client->lock);
619 }
620 } else if (IKOT_IOKIT_OBJECT == type) {
621 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
622 map->taskDied();
623 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
624 notify->setNotification( 0 );
625 }
626 }
627
628 return kIOReturnSuccess;
629 }
630 }; /* extern "C" */
631
632 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
633
634 class IOServiceUserNotification : public IOUserNotification
635 {
636 OSDeclareDefaultStructors(IOServiceUserNotification)
637
638 struct PingMsg {
639 mach_msg_header_t msgHdr;
640 OSNotificationHeader64 notifyHeader;
641 };
642
643 enum { kMaxOutstanding = 1024 };
644
645 PingMsg * pingMsg;
646 vm_size_t msgSize;
647 OSArray * newSet;
648 bool armed;
649 bool ipcLogged;
650
651 public:
652
653 virtual bool init( mach_port_t port, natural_t type,
654 void * reference, vm_size_t referenceSize,
655 bool clientIs64 );
656 virtual void free() APPLE_KEXT_OVERRIDE;
657 void invalidatePort(void);
658
659 static bool _handler( void * target,
660 void * ref, IOService * newService, IONotifier * notifier );
661 virtual bool handler( void * ref, IOService * newService );
662
663 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
664 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
665 };
666
667 class IOServiceMessageUserNotification : public IOUserNotification
668 {
669 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
670
671 struct PingMsg {
672 mach_msg_header_t msgHdr;
673 mach_msg_body_t msgBody;
674 mach_msg_port_descriptor_t ports[1];
675 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
676 };
677
678 PingMsg * pingMsg;
679 vm_size_t msgSize;
680 uint8_t clientIs64;
681 int owningPID;
682 bool ipcLogged;
683
684 public:
685
686 virtual bool init( mach_port_t port, natural_t type,
687 void * reference, vm_size_t referenceSize,
688 vm_size_t extraSize,
689 bool clientIs64 );
690
691 virtual void free() APPLE_KEXT_OVERRIDE;
692 void invalidatePort(void);
693
694 static IOReturn _handler( void * target, void * ref,
695 UInt32 messageType, IOService * provider,
696 void * messageArgument, vm_size_t argSize );
697 virtual IOReturn handler( void * ref,
698 UInt32 messageType, IOService * provider,
699 void * messageArgument, vm_size_t argSize );
700
701 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
702 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
703 };
704
705 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
706
707 #undef super
708 #define super IOUserIterator
709 OSDefineMetaClass( IOUserNotification, IOUserIterator )
710 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
711
712 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
713
714 void
715 IOUserNotification::free( void )
716 {
717 if (holdNotify) {
718 assert(OSDynamicCast(IONotifier, holdNotify));
719 ((IONotifier *)holdNotify)->remove();
720 holdNotify = 0;
721 }
722 // can't be in handler now
723
724 super::free();
725 }
726
727
728 void
729 IOUserNotification::setNotification( IONotifier * notify )
730 {
731 OSObject * previousNotify;
732
733 IOLockLock( gIOObjectPortLock);
734
735 previousNotify = holdNotify;
736 holdNotify = notify;
737
738 IOLockUnlock( gIOObjectPortLock);
739
740 if (previousNotify) {
741 assert(OSDynamicCast(IONotifier, previousNotify));
742 ((IONotifier *)previousNotify)->remove();
743 }
744 }
745
746 void
747 IOUserNotification::reset()
748 {
749 // ?
750 }
751
752 bool
753 IOUserNotification::isValid()
754 {
755 return true;
756 }
757
758 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
759
760 #undef super
761 #define super IOUserNotification
762 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
763
764 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
765
766 bool
767 IOServiceUserNotification::init( mach_port_t port, natural_t type,
768 void * reference, vm_size_t referenceSize,
769 bool clientIs64 )
770 {
771 if (!super::init()) {
772 return false;
773 }
774
775 newSet = OSArray::withCapacity( 1 );
776 if (!newSet) {
777 return false;
778 }
779
780 if (referenceSize > sizeof(OSAsyncReference64)) {
781 return false;
782 }
783
784 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
785 pingMsg = (PingMsg *) IOMalloc( msgSize);
786 if (!pingMsg) {
787 return false;
788 }
789
790 bzero( pingMsg, msgSize);
791
792 pingMsg->msgHdr.msgh_remote_port = port;
793 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
794 MACH_MSG_TYPE_COPY_SEND /*remote*/,
795 MACH_MSG_TYPE_MAKE_SEND /*local*/);
796 pingMsg->msgHdr.msgh_size = msgSize;
797 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
798
799 pingMsg->notifyHeader.size = 0;
800 pingMsg->notifyHeader.type = type;
801 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
802
803 return true;
804 }
805
806 void
807 IOServiceUserNotification::invalidatePort(void)
808 {
809 if (pingMsg) {
810 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
811 }
812 }
813
814 void
815 IOServiceUserNotification::free( void )
816 {
817 PingMsg * _pingMsg;
818 vm_size_t _msgSize;
819 OSArray * _newSet;
820
821 _pingMsg = pingMsg;
822 _msgSize = msgSize;
823 _newSet = newSet;
824
825 super::free();
826
827 if (_pingMsg && _msgSize) {
828 if (_pingMsg->msgHdr.msgh_remote_port) {
829 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
830 }
831 IOFree(_pingMsg, _msgSize);
832 }
833
834 if (_newSet) {
835 _newSet->release();
836 }
837 }
838
839 bool
840 IOServiceUserNotification::_handler( void * target,
841 void * ref, IOService * newService, IONotifier * notifier )
842 {
843 return ((IOServiceUserNotification *) target)->handler( ref, newService );
844 }
845
846 bool
847 IOServiceUserNotification::handler( void * ref,
848 IOService * newService )
849 {
850 unsigned int count;
851 kern_return_t kr;
852 ipc_port_t port = NULL;
853 bool sendPing = false;
854
855 IOTakeLock( lock );
856
857 count = newSet->getCount();
858 if (count < kMaxOutstanding) {
859 newSet->setObject( newService );
860 if ((sendPing = (armed && (0 == count)))) {
861 armed = false;
862 }
863 }
864
865 IOUnlock( lock );
866
867 if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
868 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
869 }
870
871 if (sendPing) {
872 if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
873 pingMsg->msgHdr.msgh_local_port = port;
874 } else {
875 pingMsg->msgHdr.msgh_local_port = NULL;
876 }
877
878 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
879 pingMsg->msgHdr.msgh_size,
880 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
881 0);
882 if (port) {
883 iokit_release_port( port );
884 }
885
886 if ((KERN_SUCCESS != kr) && !ipcLogged) {
887 ipcLogged = true;
888 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
889 }
890 }
891
892 return true;
893 }
894 OSObject *
895 IOServiceUserNotification::getNextObject()
896 {
897 assert(false);
898 return NULL;
899 }
900
901 OSObject *
902 IOServiceUserNotification::copyNextObject()
903 {
904 unsigned int count;
905 OSObject * result;
906
907 IOLockLock(lock);
908
909 count = newSet->getCount();
910 if (count) {
911 result = newSet->getObject( count - 1 );
912 result->retain();
913 newSet->removeObject( count - 1);
914 } else {
915 result = 0;
916 armed = true;
917 }
918
919 IOLockUnlock(lock);
920
921 return result;
922 }
923
924 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
925
926 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
927
928 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
929
930 bool
931 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
932 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
933 bool client64 )
934 {
935 if (!super::init()) {
936 return false;
937 }
938
939 if (referenceSize > sizeof(OSAsyncReference64)) {
940 return false;
941 }
942
943 clientIs64 = client64;
944
945 owningPID = proc_selfpid();
946
947 extraSize += sizeof(IOServiceInterestContent64);
948 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
949 pingMsg = (PingMsg *) IOMalloc( msgSize);
950 if (!pingMsg) {
951 return false;
952 }
953
954 bzero( pingMsg, msgSize);
955
956 pingMsg->msgHdr.msgh_remote_port = port;
957 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
958 | MACH_MSGH_BITS(
959 MACH_MSG_TYPE_COPY_SEND /*remote*/,
960 MACH_MSG_TYPE_MAKE_SEND /*local*/);
961 pingMsg->msgHdr.msgh_size = msgSize;
962 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
963
964 pingMsg->msgBody.msgh_descriptor_count = 1;
965
966 pingMsg->ports[0].name = 0;
967 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
968 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
969
970 pingMsg->notifyHeader.size = extraSize;
971 pingMsg->notifyHeader.type = type;
972 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
973
974 return true;
975 }
976
977 void
978 IOServiceMessageUserNotification::invalidatePort(void)
979 {
980 if (pingMsg) {
981 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
982 }
983 }
984
985 void
986 IOServiceMessageUserNotification::free( void )
987 {
988 PingMsg * _pingMsg;
989 vm_size_t _msgSize;
990
991 _pingMsg = pingMsg;
992 _msgSize = msgSize;
993
994 super::free();
995
996 if (_pingMsg && _msgSize) {
997 if (_pingMsg->msgHdr.msgh_remote_port) {
998 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
999 }
1000 IOFree( _pingMsg, _msgSize);
1001 }
1002 }
1003
1004 IOReturn
1005 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1006 UInt32 messageType, IOService * provider,
1007 void * argument, vm_size_t argSize )
1008 {
1009 return ((IOServiceMessageUserNotification *) target)->handler(
1010 ref, messageType, provider, argument, argSize);
1011 }
1012
1013 IOReturn
1014 IOServiceMessageUserNotification::handler( void * ref,
1015 UInt32 messageType, IOService * provider,
1016 void * messageArgument, vm_size_t callerArgSize )
1017 {
1018 enum { kLocalMsgSize = 0x100 };
1019 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1020 void * allocMsg;
1021 kern_return_t kr;
1022 vm_size_t argSize;
1023 vm_size_t thisMsgSize;
1024 ipc_port_t thisPort, providerPort;
1025 struct PingMsg * thisMsg;
1026 IOServiceInterestContent64 * data;
1027
1028 if (kIOMessageCopyClientID == messageType) {
1029 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1030 return kIOReturnSuccess;
1031 }
1032
1033 if (callerArgSize == 0) {
1034 if (clientIs64) {
1035 argSize = sizeof(data->messageArgument[0]);
1036 } else {
1037 argSize = sizeof(uint32_t);
1038 }
1039 } else {
1040 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1041 callerArgSize = kIOUserNotifyMaxMessageSize;
1042 }
1043 argSize = callerArgSize;
1044 }
1045
1046 // adjust message size for ipc restrictions
1047 natural_t type;
1048 type = pingMsg->notifyHeader.type;
1049 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1050 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1051 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1052
1053 thisMsgSize = msgSize
1054 + sizeof(IOServiceInterestContent64)
1055 - sizeof(data->messageArgument)
1056 + argSize;
1057
1058 if (thisMsgSize > sizeof(stackMsg)) {
1059 allocMsg = IOMalloc(thisMsgSize);
1060 if (!allocMsg) {
1061 return kIOReturnNoMemory;
1062 }
1063 thisMsg = (typeof(thisMsg))allocMsg;
1064 } else {
1065 allocMsg = 0;
1066 thisMsg = (typeof(thisMsg))stackMsg;
1067 }
1068
1069 bcopy(pingMsg, thisMsg, msgSize);
1070 thisMsg->notifyHeader.type = type;
1071 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1072 // == pingMsg->notifyHeader.content;
1073 data->messageType = messageType;
1074
1075 if (callerArgSize == 0) {
1076 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1077 if (!clientIs64) {
1078 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1079 }
1080 } else {
1081 bcopy( messageArgument, data->messageArgument, callerArgSize );
1082 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1083 }
1084
1085 thisMsg->notifyHeader.type = type;
1086 thisMsg->msgHdr.msgh_size = thisMsgSize;
1087
1088 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1089 thisMsg->ports[0].name = providerPort;
1090 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1091 thisMsg->msgHdr.msgh_local_port = thisPort;
1092
1093 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1094 thisMsg->msgHdr.msgh_size,
1095 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1096 0);
1097 if (thisPort) {
1098 iokit_release_port( thisPort );
1099 }
1100 if (providerPort) {
1101 iokit_release_port( providerPort );
1102 }
1103
1104 if (allocMsg) {
1105 IOFree(allocMsg, thisMsgSize);
1106 }
1107
1108 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1109 ipcLogged = true;
1110 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1111 }
1112
1113 return kIOReturnSuccess;
1114 }
1115
1116 OSObject *
1117 IOServiceMessageUserNotification::getNextObject()
1118 {
1119 return 0;
1120 }
1121
1122 OSObject *
1123 IOServiceMessageUserNotification::copyNextObject()
1124 {
1125 return NULL;
1126 }
1127
1128 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1129
1130 #undef super
1131 #define super IOService
1132 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1133
1134 IOLock * gIOUserClientOwnersLock;
1135
1136 void
1137 IOUserClient::initialize( void )
1138 {
1139 gIOObjectPortLock = IOLockAlloc();
1140 gIOUserClientOwnersLock = IOLockAlloc();
1141 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1142 }
1143
1144 void
1145 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1146 mach_port_t wakePort,
1147 void *callback, void *refcon)
1148 {
1149 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1150 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1151 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1152 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1153 }
1154
1155 void
1156 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1157 mach_port_t wakePort,
1158 mach_vm_address_t callback, io_user_reference_t refcon)
1159 {
1160 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1161 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1162 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1163 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1164 }
1165
1166 void
1167 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1168 mach_port_t wakePort,
1169 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1170 {
1171 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1172 if (vm_map_is_64bit(get_task_map(task))) {
1173 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1174 }
1175 }
1176
1177 static OSDictionary *
1178 CopyConsoleUser(UInt32 uid)
1179 {
1180 OSArray * array;
1181 OSDictionary * user = 0;
1182
1183 if ((array = OSDynamicCast(OSArray,
1184 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1185 for (unsigned int idx = 0;
1186 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1187 idx++) {
1188 OSNumber * num;
1189
1190 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1191 && (uid == num->unsigned32BitValue())) {
1192 user->retain();
1193 break;
1194 }
1195 }
1196 array->release();
1197 }
1198 return user;
1199 }
1200
1201 static OSDictionary *
1202 CopyUserOnConsole(void)
1203 {
1204 OSArray * array;
1205 OSDictionary * user = 0;
1206
1207 if ((array = OSDynamicCast(OSArray,
1208 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1209 for (unsigned int idx = 0;
1210 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1211 idx++) {
1212 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1213 user->retain();
1214 break;
1215 }
1216 }
1217 array->release();
1218 }
1219 return user;
1220 }
1221
1222 IOReturn
1223 IOUserClient::clientHasAuthorization( task_t task,
1224 IOService * service )
1225 {
1226 proc_t p;
1227
1228 p = (proc_t) get_bsdtask_info(task);
1229 if (p) {
1230 uint64_t authorizationID;
1231
1232 authorizationID = proc_uniqueid(p);
1233 if (authorizationID) {
1234 if (service->getAuthorizationID() == authorizationID) {
1235 return kIOReturnSuccess;
1236 }
1237 }
1238 }
1239
1240 return kIOReturnNotPermitted;
1241 }
1242
1243 IOReturn
1244 IOUserClient::clientHasPrivilege( void * securityToken,
1245 const char * privilegeName )
1246 {
1247 kern_return_t kr;
1248 security_token_t token;
1249 mach_msg_type_number_t count;
1250 task_t task;
1251 OSDictionary * user;
1252 bool secureConsole;
1253
1254
1255 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1256 sizeof(kIOClientPrivilegeForeground))) {
1257 if (task_is_gpu_denied(current_task())) {
1258 return kIOReturnNotPrivileged;
1259 } else {
1260 return kIOReturnSuccess;
1261 }
1262 }
1263
1264 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1265 sizeof(kIOClientPrivilegeConsoleSession))) {
1266 kauth_cred_t cred;
1267 proc_t p;
1268
1269 task = (task_t) securityToken;
1270 if (!task) {
1271 task = current_task();
1272 }
1273 p = (proc_t) get_bsdtask_info(task);
1274 kr = kIOReturnNotPrivileged;
1275
1276 if (p && (cred = kauth_cred_proc_ref(p))) {
1277 user = CopyUserOnConsole();
1278 if (user) {
1279 OSNumber * num;
1280 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1281 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1282 kr = kIOReturnSuccess;
1283 }
1284 user->release();
1285 }
1286 kauth_cred_unref(&cred);
1287 }
1288 return kr;
1289 }
1290
1291 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1292 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1293 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1294 } else {
1295 task = (task_t)securityToken;
1296 }
1297
1298 count = TASK_SECURITY_TOKEN_COUNT;
1299 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1300
1301 if (KERN_SUCCESS != kr) {
1302 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1303 sizeof(kIOClientPrivilegeAdministrator))) {
1304 if (0 != token.val[0]) {
1305 kr = kIOReturnNotPrivileged;
1306 }
1307 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1308 sizeof(kIOClientPrivilegeLocalUser))) {
1309 user = CopyConsoleUser(token.val[0]);
1310 if (user) {
1311 user->release();
1312 } else {
1313 kr = kIOReturnNotPrivileged;
1314 }
1315 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1316 sizeof(kIOClientPrivilegeConsoleUser))) {
1317 user = CopyConsoleUser(token.val[0]);
1318 if (user) {
1319 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1320 kr = kIOReturnNotPrivileged;
1321 } else if (secureConsole) {
1322 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1323 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1324 kr = kIOReturnNotPrivileged;
1325 }
1326 }
1327 user->release();
1328 } else {
1329 kr = kIOReturnNotPrivileged;
1330 }
1331 } else {
1332 kr = kIOReturnUnsupported;
1333 }
1334
1335 return kr;
1336 }
1337
1338 OSObject *
1339 IOUserClient::copyClientEntitlement( task_t task,
1340 const char * entitlement )
1341 {
1342 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1343
1344 proc_t p = NULL;
1345 pid_t pid = 0;
1346 char procname[MAXCOMLEN + 1] = "";
1347 size_t len = 0;
1348 void *entitlements_blob = NULL;
1349 char *entitlements_data = NULL;
1350 OSObject *entitlements_obj = NULL;
1351 OSDictionary *entitlements = NULL;
1352 OSString *errorString = NULL;
1353 OSObject *value = NULL;
1354
1355 p = (proc_t)get_bsdtask_info(task);
1356 if (p == NULL) {
1357 goto fail;
1358 }
1359 pid = proc_pid(p);
1360 proc_name(pid, procname, (int)sizeof(procname));
1361
1362 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1363 goto fail;
1364 }
1365
1366 if (len <= offsetof(CS_GenericBlob, data)) {
1367 goto fail;
1368 }
1369
1370 /*
1371 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1372 * we'll try to parse in the kernel.
1373 */
1374 len -= offsetof(CS_GenericBlob, data);
1375 if (len > MAX_ENTITLEMENTS_LEN) {
1376 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1377 goto fail;
1378 }
1379
1380 /*
1381 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1382 * what is stored in the entitlements blob. Copy the string and
1383 * terminate it.
1384 */
1385 entitlements_data = (char *)IOMalloc(len + 1);
1386 if (entitlements_data == NULL) {
1387 goto fail;
1388 }
1389 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1390 entitlements_data[len] = '\0';
1391
1392 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1393 if (errorString != NULL) {
1394 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1395 goto fail;
1396 }
1397 if (entitlements_obj == NULL) {
1398 goto fail;
1399 }
1400
1401 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1402 if (entitlements == NULL) {
1403 goto fail;
1404 }
1405
1406 /* Fetch the entitlement value from the dictionary. */
1407 value = entitlements->getObject(entitlement);
1408 if (value != NULL) {
1409 value->retain();
1410 }
1411
1412 fail:
1413 if (entitlements_data != NULL) {
1414 IOFree(entitlements_data, len + 1);
1415 }
1416 if (entitlements_obj != NULL) {
1417 entitlements_obj->release();
1418 }
1419 if (errorString != NULL) {
1420 errorString->release();
1421 }
1422 return value;
1423 }
1424
1425 bool
1426 IOUserClient::init()
1427 {
1428 if (getPropertyTable() || super::init()) {
1429 return reserve();
1430 }
1431
1432 return false;
1433 }
1434
1435 bool
1436 IOUserClient::init(OSDictionary * dictionary)
1437 {
1438 if (getPropertyTable() || super::init(dictionary)) {
1439 return reserve();
1440 }
1441
1442 return false;
1443 }
1444
1445 bool
1446 IOUserClient::initWithTask(task_t owningTask,
1447 void * securityID,
1448 UInt32 type )
1449 {
1450 if (getPropertyTable() || super::init()) {
1451 return reserve();
1452 }
1453
1454 return false;
1455 }
1456
1457 bool
1458 IOUserClient::initWithTask(task_t owningTask,
1459 void * securityID,
1460 UInt32 type,
1461 OSDictionary * properties )
1462 {
1463 bool ok;
1464
1465 ok = super::init( properties );
1466 ok &= initWithTask( owningTask, securityID, type );
1467
1468 return ok;
1469 }
1470
1471 bool
1472 IOUserClient::reserve()
1473 {
1474 if (!reserved) {
1475 reserved = IONew(ExpansionData, 1);
1476 if (!reserved) {
1477 return false;
1478 }
1479 }
1480 setTerminateDefer(NULL, true);
1481 IOStatisticsRegisterCounter();
1482
1483 return true;
1484 }
1485
1486 struct IOUserClientOwner {
1487 task_t task;
1488 queue_chain_t taskLink;
1489 IOUserClient * uc;
1490 queue_chain_t ucLink;
1491 };
1492
1493 IOReturn
1494 IOUserClient::registerOwner(task_t task)
1495 {
1496 IOUserClientOwner * owner;
1497 IOReturn ret;
1498 bool newOwner;
1499
1500 IOLockLock(gIOUserClientOwnersLock);
1501
1502 newOwner = true;
1503 ret = kIOReturnSuccess;
1504
1505 if (!owners.next) {
1506 queue_init(&owners);
1507 } else {
1508 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1509 {
1510 if (task != owner->task) {
1511 continue;
1512 }
1513 newOwner = false;
1514 break;
1515 }
1516 }
1517 if (newOwner) {
1518 owner = IONew(IOUserClientOwner, 1);
1519 if (!owner) {
1520 ret = kIOReturnNoMemory;
1521 } else {
1522 owner->task = task;
1523 owner->uc = this;
1524 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1525 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1526 }
1527 }
1528
1529 IOLockUnlock(gIOUserClientOwnersLock);
1530
1531 return ret;
1532 }
1533
1534 void
1535 IOUserClient::noMoreSenders(void)
1536 {
1537 IOUserClientOwner * owner;
1538
1539 IOLockLock(gIOUserClientOwnersLock);
1540
1541 if (owners.next) {
1542 while (!queue_empty(&owners)) {
1543 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1544 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1545 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1546 IODelete(owner, IOUserClientOwner, 1);
1547 }
1548 owners.next = owners.prev = NULL;
1549 }
1550
1551 IOLockUnlock(gIOUserClientOwnersLock);
1552 }
1553
1554 extern "C" kern_return_t
1555 iokit_task_terminate(task_t task)
1556 {
1557 IOUserClientOwner * owner;
1558 IOUserClient * dead;
1559 IOUserClient * uc;
1560 queue_head_t * taskque;
1561
1562 IOLockLock(gIOUserClientOwnersLock);
1563
1564 taskque = task_io_user_clients(task);
1565 dead = NULL;
1566 while (!queue_empty(taskque)) {
1567 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1568 uc = owner->uc;
1569 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1570 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1571 if (queue_empty(&uc->owners)) {
1572 uc->retain();
1573 IOLog("destroying out of band connect for %s\n", uc->getName());
1574 // now using the uc queue head as a singly linked queue,
1575 // leaving .next as NULL to mark it empty
1576 uc->owners.next = NULL;
1577 uc->owners.prev = (queue_entry_t) dead;
1578 dead = uc;
1579 }
1580 IODelete(owner, IOUserClientOwner, 1);
1581 }
1582
1583 IOLockUnlock(gIOUserClientOwnersLock);
1584
1585 while (dead) {
1586 uc = dead;
1587 dead = (IOUserClient *)(void *) dead->owners.prev;
1588 uc->owners.prev = NULL;
1589 if (uc->sharedInstance || !uc->closed) {
1590 uc->clientDied();
1591 }
1592 uc->release();
1593 }
1594
1595 return KERN_SUCCESS;
1596 }
1597
1598 void
1599 IOUserClient::free()
1600 {
1601 if (mappings) {
1602 mappings->release();
1603 }
1604 if (lock) {
1605 IOLockFree(lock);
1606 }
1607
1608 IOStatisticsUnregisterCounter();
1609
1610 assert(!owners.next);
1611 assert(!owners.prev);
1612
1613 if (reserved) {
1614 IODelete(reserved, ExpansionData, 1);
1615 }
1616
1617 super::free();
1618 }
1619
1620 IOReturn
1621 IOUserClient::clientDied( void )
1622 {
1623 IOReturn ret = kIOReturnNotReady;
1624
1625 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1626 ret = clientClose();
1627 }
1628
1629 return ret;
1630 }
1631
1632 IOReturn
1633 IOUserClient::clientClose( void )
1634 {
1635 return kIOReturnUnsupported;
1636 }
1637
1638 IOService *
1639 IOUserClient::getService( void )
1640 {
1641 return 0;
1642 }
1643
1644 IOReturn
1645 IOUserClient::registerNotificationPort(
1646 mach_port_t /* port */,
1647 UInt32 /* type */,
1648 UInt32 /* refCon */)
1649 {
1650 return kIOReturnUnsupported;
1651 }
1652
1653 IOReturn
1654 IOUserClient::registerNotificationPort(
1655 mach_port_t port,
1656 UInt32 type,
1657 io_user_reference_t refCon)
1658 {
1659 return registerNotificationPort(port, type, (UInt32) refCon);
1660 }
1661
1662 IOReturn
1663 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1664 semaphore_t * semaphore )
1665 {
1666 return kIOReturnUnsupported;
1667 }
1668
1669 IOReturn
1670 IOUserClient::connectClient( IOUserClient * /* client */ )
1671 {
1672 return kIOReturnUnsupported;
1673 }
1674
1675 IOReturn
1676 IOUserClient::clientMemoryForType( UInt32 type,
1677 IOOptionBits * options,
1678 IOMemoryDescriptor ** memory )
1679 {
1680 return kIOReturnUnsupported;
1681 }
1682
1683 #if !__LP64__
1684 IOMemoryMap *
1685 IOUserClient::mapClientMemory(
1686 IOOptionBits type,
1687 task_t task,
1688 IOOptionBits mapFlags,
1689 IOVirtualAddress atAddress )
1690 {
1691 return NULL;
1692 }
1693 #endif
1694
1695 IOMemoryMap *
1696 IOUserClient::mapClientMemory64(
1697 IOOptionBits type,
1698 task_t task,
1699 IOOptionBits mapFlags,
1700 mach_vm_address_t atAddress )
1701 {
1702 IOReturn err;
1703 IOOptionBits options = 0;
1704 IOMemoryDescriptor * memory = 0;
1705 IOMemoryMap * map = 0;
1706
1707 err = clientMemoryForType((UInt32) type, &options, &memory );
1708
1709 if (memory && (kIOReturnSuccess == err)) {
1710 FAKE_STACK_FRAME(getMetaClass());
1711
1712 options = (options & ~kIOMapUserOptionsMask)
1713 | (mapFlags & kIOMapUserOptionsMask);
1714 map = memory->createMappingInTask( task, atAddress, options );
1715 memory->release();
1716
1717 FAKE_STACK_FRAME_END();
1718 }
1719
1720 return map;
1721 }
1722
1723 IOReturn
1724 IOUserClient::exportObjectToClient(task_t task,
1725 OSObject *obj, io_object_t *clientObj)
1726 {
1727 mach_port_name_t name;
1728
1729 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1730
1731 *clientObj = (io_object_t)(uintptr_t) name;
1732
1733 if (obj) {
1734 obj->release();
1735 }
1736
1737 return kIOReturnSuccess;
1738 }
1739
1740 IOReturn
1741 IOUserClient::copyPortNameForObjectInTask(task_t task,
1742 OSObject *obj, mach_port_name_t * port_name)
1743 {
1744 mach_port_name_t name;
1745
1746 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1747
1748 *(mach_port_name_t *) port_name = name;
1749
1750 return kIOReturnSuccess;
1751 }
1752
1753 IOReturn
1754 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1755 OSObject **obj)
1756 {
1757 OSObject * object;
1758
1759 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
1760
1761 *obj = object;
1762
1763 return object ? kIOReturnSuccess : kIOReturnIPCError;
1764 }
1765
1766 IOReturn
1767 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
1768 {
1769 return iokit_mod_send_right(task, port_name, delta);
1770 }
1771
1772 IOExternalMethod *
1773 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1774 {
1775 return 0;
1776 }
1777
1778 IOExternalAsyncMethod *
1779 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1780 {
1781 return 0;
1782 }
1783
1784 IOExternalTrap *
1785 IOUserClient::
1786 getExternalTrapForIndex(UInt32 index)
1787 {
1788 return NULL;
1789 }
1790
1791 #pragma clang diagnostic push
1792 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1793
1794 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1795 // functions can break clients of kexts implementing getExternalMethodForIndex()
1796 IOExternalMethod *
1797 IOUserClient::
1798 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1799 {
1800 IOExternalMethod *method = getExternalMethodForIndex(index);
1801
1802 if (method) {
1803 *targetP = (IOService *) method->object;
1804 }
1805
1806 return method;
1807 }
1808
1809 IOExternalAsyncMethod *
1810 IOUserClient::
1811 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1812 {
1813 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1814
1815 if (method) {
1816 *targetP = (IOService *) method->object;
1817 }
1818
1819 return method;
1820 }
1821
1822 IOExternalTrap *
1823 IOUserClient::
1824 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1825 {
1826 IOExternalTrap *trap = getExternalTrapForIndex(index);
1827
1828 if (trap) {
1829 *targetP = trap->object;
1830 }
1831
1832 return trap;
1833 }
1834 #pragma clang diagnostic pop
1835
1836 IOReturn
1837 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1838 {
1839 mach_port_t port;
1840 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1841
1842 if (MACH_PORT_NULL != port) {
1843 iokit_release_port_send(port);
1844 }
1845
1846 return kIOReturnSuccess;
1847 }
1848
1849 IOReturn
1850 IOUserClient::releaseNotificationPort(mach_port_t port)
1851 {
1852 if (MACH_PORT_NULL != port) {
1853 iokit_release_port_send(port);
1854 }
1855
1856 return kIOReturnSuccess;
1857 }
1858
1859 IOReturn
1860 IOUserClient::sendAsyncResult(OSAsyncReference reference,
1861 IOReturn result, void *args[], UInt32 numArgs)
1862 {
1863 OSAsyncReference64 reference64;
1864 io_user_reference_t args64[kMaxAsyncArgs];
1865 unsigned int idx;
1866
1867 if (numArgs > kMaxAsyncArgs) {
1868 return kIOReturnMessageTooLarge;
1869 }
1870
1871 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
1872 reference64[idx] = REF64(reference[idx]);
1873 }
1874
1875 for (idx = 0; idx < numArgs; idx++) {
1876 args64[idx] = REF64(args[idx]);
1877 }
1878
1879 return sendAsyncResult64(reference64, result, args64, numArgs);
1880 }
1881
1882 IOReturn
1883 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1884 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1885 {
1886 return _sendAsyncResult64(reference, result, args, numArgs, options);
1887 }
1888
1889 IOReturn
1890 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1891 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1892 {
1893 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1894 }
1895
1896 IOReturn
1897 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1898 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1899 {
1900 struct ReplyMsg {
1901 mach_msg_header_t msgHdr;
1902 union{
1903 struct{
1904 OSNotificationHeader notifyHdr;
1905 IOAsyncCompletionContent asyncContent;
1906 uint32_t args[kMaxAsyncArgs];
1907 } msg32;
1908 struct{
1909 OSNotificationHeader64 notifyHdr;
1910 IOAsyncCompletionContent asyncContent;
1911 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1912 } msg64;
1913 } m;
1914 };
1915 ReplyMsg replyMsg;
1916 mach_port_t replyPort;
1917 kern_return_t kr;
1918
1919 // If no reply port, do nothing.
1920 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1921 if (replyPort == MACH_PORT_NULL) {
1922 return kIOReturnSuccess;
1923 }
1924
1925 if (numArgs > kMaxAsyncArgs) {
1926 return kIOReturnMessageTooLarge;
1927 }
1928
1929 bzero(&replyMsg, sizeof(replyMsg));
1930 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1931 0 /*local*/);
1932 replyMsg.msgHdr.msgh_remote_port = replyPort;
1933 replyMsg.msgHdr.msgh_local_port = 0;
1934 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1935 if (kIOUCAsync64Flag & reference[0]) {
1936 replyMsg.msgHdr.msgh_size =
1937 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1938 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1939 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1940 + numArgs * sizeof(io_user_reference_t);
1941 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1942 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1943
1944 replyMsg.m.msg64.asyncContent.result = result;
1945 if (numArgs) {
1946 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1947 }
1948 } else {
1949 unsigned int idx;
1950
1951 replyMsg.msgHdr.msgh_size =
1952 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1953 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1954
1955 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1956 + numArgs * sizeof(uint32_t);
1957 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1958
1959 for (idx = 0; idx < kOSAsyncRefCount; idx++) {
1960 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1961 }
1962
1963 replyMsg.m.msg32.asyncContent.result = result;
1964
1965 for (idx = 0; idx < numArgs; idx++) {
1966 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1967 }
1968 }
1969
1970 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1971 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1972 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1973 } else {
1974 /* Fail on full queue. */
1975 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1976 replyMsg.msgHdr.msgh_size);
1977 }
1978 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
1979 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1980 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1981 }
1982 return kr;
1983 }
1984
1985
1986 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1987
1988 extern "C" {
1989 #define CHECK(cls, obj, out) \
1990 cls * out; \
1991 if( !(out = OSDynamicCast( cls, obj))) \
1992 return( kIOReturnBadArgument )
1993
1994 #define CHECKLOCKED(cls, obj, out) \
1995 IOUserIterator * oIter; \
1996 cls * out; \
1997 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1998 return (kIOReturnBadArgument); \
1999 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2000 return (kIOReturnBadArgument)
2001
2002 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2003
2004 // Create a vm_map_copy_t or kalloc'ed data for memory
2005 // to be copied out. ipc will free after the copyout.
2006
2007 static kern_return_t
2008 copyoutkdata( const void * data, vm_size_t len,
2009 io_buf_ptr_t * buf )
2010 {
2011 kern_return_t err;
2012 vm_map_copy_t copy;
2013
2014 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2015 false /* src_destroy */, &copy);
2016
2017 assert( err == KERN_SUCCESS );
2018 if (err == KERN_SUCCESS) {
2019 *buf = (char *) copy;
2020 }
2021
2022 return err;
2023 }
2024
2025 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2026
2027 /* Routine io_server_version */
2028 kern_return_t
2029 is_io_server_version(
2030 mach_port_t master_port,
2031 uint64_t *version)
2032 {
2033 *version = IOKIT_SERVER_VERSION;
2034 return kIOReturnSuccess;
2035 }
2036
2037 /* Routine io_object_get_class */
2038 kern_return_t
2039 is_io_object_get_class(
2040 io_object_t object,
2041 io_name_t className )
2042 {
2043 const OSMetaClass* my_obj = NULL;
2044
2045 if (!object) {
2046 return kIOReturnBadArgument;
2047 }
2048
2049 my_obj = object->getMetaClass();
2050 if (!my_obj) {
2051 return kIOReturnNotFound;
2052 }
2053
2054 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2055
2056 return kIOReturnSuccess;
2057 }
2058
2059 /* Routine io_object_get_superclass */
2060 kern_return_t
2061 is_io_object_get_superclass(
2062 mach_port_t master_port,
2063 io_name_t obj_name,
2064 io_name_t class_name)
2065 {
2066 IOReturn ret;
2067 const OSMetaClass * meta;
2068 const OSMetaClass * super;
2069 const OSSymbol * name;
2070 const char * cstr;
2071
2072 if (!obj_name || !class_name) {
2073 return kIOReturnBadArgument;
2074 }
2075 if (master_port != master_device_port) {
2076 return kIOReturnNotPrivileged;
2077 }
2078
2079 ret = kIOReturnNotFound;
2080 meta = 0;
2081 do{
2082 name = OSSymbol::withCString(obj_name);
2083 if (!name) {
2084 break;
2085 }
2086 meta = OSMetaClass::copyMetaClassWithName(name);
2087 if (!meta) {
2088 break;
2089 }
2090 super = meta->getSuperClass();
2091 if (!super) {
2092 break;
2093 }
2094 cstr = super->getClassName();
2095 if (!cstr) {
2096 break;
2097 }
2098 strlcpy(class_name, cstr, sizeof(io_name_t));
2099 ret = kIOReturnSuccess;
2100 }while (false);
2101
2102 OSSafeReleaseNULL(name);
2103 if (meta) {
2104 meta->releaseMetaClass();
2105 }
2106
2107 return ret;
2108 }
2109
2110 /* Routine io_object_get_bundle_identifier */
2111 kern_return_t
2112 is_io_object_get_bundle_identifier(
2113 mach_port_t master_port,
2114 io_name_t obj_name,
2115 io_name_t bundle_name)
2116 {
2117 IOReturn ret;
2118 const OSMetaClass * meta;
2119 const OSSymbol * name;
2120 const OSSymbol * identifier;
2121 const char * cstr;
2122
2123 if (!obj_name || !bundle_name) {
2124 return kIOReturnBadArgument;
2125 }
2126 if (master_port != master_device_port) {
2127 return kIOReturnNotPrivileged;
2128 }
2129
2130 ret = kIOReturnNotFound;
2131 meta = 0;
2132 do{
2133 name = OSSymbol::withCString(obj_name);
2134 if (!name) {
2135 break;
2136 }
2137 meta = OSMetaClass::copyMetaClassWithName(name);
2138 if (!meta) {
2139 break;
2140 }
2141 identifier = meta->getKmodName();
2142 if (!identifier) {
2143 break;
2144 }
2145 cstr = identifier->getCStringNoCopy();
2146 if (!cstr) {
2147 break;
2148 }
2149 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2150 ret = kIOReturnSuccess;
2151 }while (false);
2152
2153 OSSafeReleaseNULL(name);
2154 if (meta) {
2155 meta->releaseMetaClass();
2156 }
2157
2158 return ret;
2159 }
2160
2161 /* Routine io_object_conforms_to */
2162 kern_return_t
2163 is_io_object_conforms_to(
2164 io_object_t object,
2165 io_name_t className,
2166 boolean_t *conforms )
2167 {
2168 if (!object) {
2169 return kIOReturnBadArgument;
2170 }
2171
2172 *conforms = (0 != object->metaCast( className ));
2173
2174 return kIOReturnSuccess;
2175 }
2176
2177 /* Routine io_object_get_retain_count */
2178 kern_return_t
2179 is_io_object_get_retain_count(
2180 io_object_t object,
2181 uint32_t *retainCount )
2182 {
2183 if (!object) {
2184 return kIOReturnBadArgument;
2185 }
2186
2187 *retainCount = object->getRetainCount();
2188 return kIOReturnSuccess;
2189 }
2190
2191 /* Routine io_iterator_next */
2192 kern_return_t
2193 is_io_iterator_next(
2194 io_object_t iterator,
2195 io_object_t *object )
2196 {
2197 IOReturn ret;
2198 OSObject * obj;
2199 OSIterator * iter;
2200 IOUserIterator * uiter;
2201
2202 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2203 obj = uiter->copyNextObject();
2204 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2205 obj = iter->getNextObject();
2206 if (obj) {
2207 obj->retain();
2208 }
2209 } else {
2210 return kIOReturnBadArgument;
2211 }
2212
2213 if (obj) {
2214 *object = obj;
2215 ret = kIOReturnSuccess;
2216 } else {
2217 ret = kIOReturnNoDevice;
2218 }
2219
2220 return ret;
2221 }
2222
2223 /* Routine io_iterator_reset */
2224 kern_return_t
2225 is_io_iterator_reset(
2226 io_object_t iterator )
2227 {
2228 CHECK( OSIterator, iterator, iter );
2229
2230 iter->reset();
2231
2232 return kIOReturnSuccess;
2233 }
2234
2235 /* Routine io_iterator_is_valid */
2236 kern_return_t
2237 is_io_iterator_is_valid(
2238 io_object_t iterator,
2239 boolean_t *is_valid )
2240 {
2241 CHECK( OSIterator, iterator, iter );
2242
2243 *is_valid = iter->isValid();
2244
2245 return kIOReturnSuccess;
2246 }
2247
2248
2249 static kern_return_t
2250 internal_io_service_match_property_table(
2251 io_service_t _service,
2252 const char * matching,
2253 mach_msg_type_number_t matching_size,
2254 boolean_t *matches)
2255 {
2256 CHECK( IOService, _service, service );
2257
2258 kern_return_t kr;
2259 OSObject * obj;
2260 OSDictionary * dict;
2261
2262 assert(matching_size);
2263 obj = OSUnserializeXML(matching, matching_size);
2264
2265 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2266 *matches = service->passiveMatch( dict );
2267 kr = kIOReturnSuccess;
2268 } else {
2269 kr = kIOReturnBadArgument;
2270 }
2271
2272 if (obj) {
2273 obj->release();
2274 }
2275
2276 return kr;
2277 }
2278
2279 /* Routine io_service_match_property_table */
2280 kern_return_t
2281 is_io_service_match_property_table(
2282 io_service_t service,
2283 io_string_t matching,
2284 boolean_t *matches )
2285 {
2286 return kIOReturnUnsupported;
2287 }
2288
2289
2290 /* Routine io_service_match_property_table_ool */
2291 kern_return_t
2292 is_io_service_match_property_table_ool(
2293 io_object_t service,
2294 io_buf_ptr_t matching,
2295 mach_msg_type_number_t matchingCnt,
2296 kern_return_t *result,
2297 boolean_t *matches )
2298 {
2299 kern_return_t kr;
2300 vm_offset_t data;
2301 vm_map_offset_t map_data;
2302
2303 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2304 data = CAST_DOWN(vm_offset_t, map_data);
2305
2306 if (KERN_SUCCESS == kr) {
2307 // must return success after vm_map_copyout() succeeds
2308 *result = internal_io_service_match_property_table(service,
2309 (const char *)data, matchingCnt, matches );
2310 vm_deallocate( kernel_map, data, matchingCnt );
2311 }
2312
2313 return kr;
2314 }
2315
2316 /* Routine io_service_match_property_table_bin */
2317 kern_return_t
2318 is_io_service_match_property_table_bin(
2319 io_object_t service,
2320 io_struct_inband_t matching,
2321 mach_msg_type_number_t matchingCnt,
2322 boolean_t *matches)
2323 {
2324 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2325 }
2326
2327 static kern_return_t
2328 internal_io_service_get_matching_services(
2329 mach_port_t master_port,
2330 const char * matching,
2331 mach_msg_type_number_t matching_size,
2332 io_iterator_t *existing )
2333 {
2334 kern_return_t kr;
2335 OSObject * obj;
2336 OSDictionary * dict;
2337
2338 if (master_port != master_device_port) {
2339 return kIOReturnNotPrivileged;
2340 }
2341
2342 assert(matching_size);
2343 obj = OSUnserializeXML(matching, matching_size);
2344
2345 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2346 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2347 kr = kIOReturnSuccess;
2348 } else {
2349 kr = kIOReturnBadArgument;
2350 }
2351
2352 if (obj) {
2353 obj->release();
2354 }
2355
2356 return kr;
2357 }
2358
2359 /* Routine io_service_get_matching_services */
2360 kern_return_t
2361 is_io_service_get_matching_services(
2362 mach_port_t master_port,
2363 io_string_t matching,
2364 io_iterator_t *existing )
2365 {
2366 return kIOReturnUnsupported;
2367 }
2368
2369 /* Routine io_service_get_matching_services_ool */
2370 kern_return_t
2371 is_io_service_get_matching_services_ool(
2372 mach_port_t master_port,
2373 io_buf_ptr_t matching,
2374 mach_msg_type_number_t matchingCnt,
2375 kern_return_t *result,
2376 io_object_t *existing )
2377 {
2378 kern_return_t kr;
2379 vm_offset_t data;
2380 vm_map_offset_t map_data;
2381
2382 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2383 data = CAST_DOWN(vm_offset_t, map_data);
2384
2385 if (KERN_SUCCESS == kr) {
2386 // must return success after vm_map_copyout() succeeds
2387 // and mig will copy out objects on success
2388 *existing = 0;
2389 *result = internal_io_service_get_matching_services(master_port,
2390 (const char *) data, matchingCnt, existing);
2391 vm_deallocate( kernel_map, data, matchingCnt );
2392 }
2393
2394 return kr;
2395 }
2396
2397 /* Routine io_service_get_matching_services_bin */
2398 kern_return_t
2399 is_io_service_get_matching_services_bin(
2400 mach_port_t master_port,
2401 io_struct_inband_t matching,
2402 mach_msg_type_number_t matchingCnt,
2403 io_object_t *existing)
2404 {
2405 return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2406 }
2407
2408
2409 static kern_return_t
2410 internal_io_service_get_matching_service(
2411 mach_port_t master_port,
2412 const char * matching,
2413 mach_msg_type_number_t matching_size,
2414 io_service_t *service )
2415 {
2416 kern_return_t kr;
2417 OSObject * obj;
2418 OSDictionary * dict;
2419
2420 if (master_port != master_device_port) {
2421 return kIOReturnNotPrivileged;
2422 }
2423
2424 assert(matching_size);
2425 obj = OSUnserializeXML(matching, matching_size);
2426
2427 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2428 *service = IOService::copyMatchingService( dict );
2429 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2430 } else {
2431 kr = kIOReturnBadArgument;
2432 }
2433
2434 if (obj) {
2435 obj->release();
2436 }
2437
2438 return kr;
2439 }
2440
2441 /* Routine io_service_get_matching_service */
2442 kern_return_t
2443 is_io_service_get_matching_service(
2444 mach_port_t master_port,
2445 io_string_t matching,
2446 io_service_t *service )
2447 {
2448 return kIOReturnUnsupported;
2449 }
2450
2451 /* Routine io_service_get_matching_services_ool */
2452 kern_return_t
2453 is_io_service_get_matching_service_ool(
2454 mach_port_t master_port,
2455 io_buf_ptr_t matching,
2456 mach_msg_type_number_t matchingCnt,
2457 kern_return_t *result,
2458 io_object_t *service )
2459 {
2460 kern_return_t kr;
2461 vm_offset_t data;
2462 vm_map_offset_t map_data;
2463
2464 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2465 data = CAST_DOWN(vm_offset_t, map_data);
2466
2467 if (KERN_SUCCESS == kr) {
2468 // must return success after vm_map_copyout() succeeds
2469 // and mig will copy out objects on success
2470 *service = 0;
2471 *result = internal_io_service_get_matching_service(master_port,
2472 (const char *) data, matchingCnt, service );
2473 vm_deallocate( kernel_map, data, matchingCnt );
2474 }
2475
2476 return kr;
2477 }
2478
2479 /* Routine io_service_get_matching_service_bin */
2480 kern_return_t
2481 is_io_service_get_matching_service_bin(
2482 mach_port_t master_port,
2483 io_struct_inband_t matching,
2484 mach_msg_type_number_t matchingCnt,
2485 io_object_t *service)
2486 {
2487 return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2488 }
2489
2490 static kern_return_t
2491 internal_io_service_add_notification(
2492 mach_port_t master_port,
2493 io_name_t notification_type,
2494 const char * matching,
2495 size_t matching_size,
2496 mach_port_t port,
2497 void * reference,
2498 vm_size_t referenceSize,
2499 bool client64,
2500 io_object_t * notification )
2501 {
2502 IOServiceUserNotification * userNotify = 0;
2503 IONotifier * notify = 0;
2504 const OSSymbol * sym;
2505 OSDictionary * dict;
2506 IOReturn err;
2507 unsigned long int userMsgType;
2508
2509 if (master_port != master_device_port) {
2510 return kIOReturnNotPrivileged;
2511 }
2512
2513 do {
2514 err = kIOReturnNoResources;
2515
2516 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2517 return kIOReturnMessageTooLarge;
2518 }
2519
2520 if (!(sym = OSSymbol::withCString( notification_type ))) {
2521 err = kIOReturnNoResources;
2522 }
2523
2524 assert(matching_size);
2525 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2526 if (!dict) {
2527 err = kIOReturnBadArgument;
2528 continue;
2529 }
2530
2531 if ((sym == gIOPublishNotification)
2532 || (sym == gIOFirstPublishNotification)) {
2533 userMsgType = kIOServicePublishNotificationType;
2534 } else if ((sym == gIOMatchedNotification)
2535 || (sym == gIOFirstMatchNotification)) {
2536 userMsgType = kIOServiceMatchedNotificationType;
2537 } else if ((sym == gIOTerminatedNotification)
2538 || (sym == gIOWillTerminateNotification)) {
2539 userMsgType = kIOServiceTerminatedNotificationType;
2540 } else {
2541 userMsgType = kLastIOKitNotificationType;
2542 }
2543
2544 userNotify = new IOServiceUserNotification;
2545
2546 if (userNotify && !userNotify->init( port, userMsgType,
2547 reference, referenceSize, client64)) {
2548 userNotify->release();
2549 userNotify = 0;
2550 }
2551 if (!userNotify) {
2552 continue;
2553 }
2554
2555 notify = IOService::addMatchingNotification( sym, dict,
2556 &userNotify->_handler, userNotify );
2557 if (notify) {
2558 *notification = userNotify;
2559 userNotify->setNotification( notify );
2560 err = kIOReturnSuccess;
2561 } else {
2562 err = kIOReturnUnsupported;
2563 }
2564 } while (false);
2565
2566 if ((kIOReturnSuccess != err) && userNotify) {
2567 userNotify->invalidatePort();
2568 userNotify->release();
2569 userNotify = 0;
2570 }
2571
2572 if (sym) {
2573 sym->release();
2574 }
2575 if (dict) {
2576 dict->release();
2577 }
2578
2579 return err;
2580 }
2581
2582
2583 /* Routine io_service_add_notification */
2584 kern_return_t
2585 is_io_service_add_notification(
2586 mach_port_t master_port,
2587 io_name_t notification_type,
2588 io_string_t matching,
2589 mach_port_t port,
2590 io_async_ref_t reference,
2591 mach_msg_type_number_t referenceCnt,
2592 io_object_t * notification )
2593 {
2594 return kIOReturnUnsupported;
2595 }
2596
2597 /* Routine io_service_add_notification_64 */
2598 kern_return_t
2599 is_io_service_add_notification_64(
2600 mach_port_t master_port,
2601 io_name_t notification_type,
2602 io_string_t matching,
2603 mach_port_t wake_port,
2604 io_async_ref64_t reference,
2605 mach_msg_type_number_t referenceCnt,
2606 io_object_t *notification )
2607 {
2608 return kIOReturnUnsupported;
2609 }
2610
2611 /* Routine io_service_add_notification_bin */
2612 kern_return_t
2613 is_io_service_add_notification_bin
2614 (
2615 mach_port_t master_port,
2616 io_name_t notification_type,
2617 io_struct_inband_t matching,
2618 mach_msg_type_number_t matchingCnt,
2619 mach_port_t wake_port,
2620 io_async_ref_t reference,
2621 mach_msg_type_number_t referenceCnt,
2622 io_object_t *notification)
2623 {
2624 io_async_ref_t zreference;
2625
2626 if (referenceCnt > ASYNC_REF_COUNT) {
2627 return kIOReturnBadArgument;
2628 }
2629 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2630 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2631
2632 return internal_io_service_add_notification(master_port, notification_type,
2633 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2634 false, notification);
2635 }
2636
2637 /* Routine io_service_add_notification_bin_64 */
2638 kern_return_t
2639 is_io_service_add_notification_bin_64
2640 (
2641 mach_port_t master_port,
2642 io_name_t notification_type,
2643 io_struct_inband_t matching,
2644 mach_msg_type_number_t matchingCnt,
2645 mach_port_t wake_port,
2646 io_async_ref64_t reference,
2647 mach_msg_type_number_t referenceCnt,
2648 io_object_t *notification)
2649 {
2650 io_async_ref64_t zreference;
2651
2652 if (referenceCnt > ASYNC_REF64_COUNT) {
2653 return kIOReturnBadArgument;
2654 }
2655 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2656 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2657
2658 return internal_io_service_add_notification(master_port, notification_type,
2659 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2660 true, notification);
2661 }
2662
2663 static kern_return_t
2664 internal_io_service_add_notification_ool(
2665 mach_port_t master_port,
2666 io_name_t notification_type,
2667 io_buf_ptr_t matching,
2668 mach_msg_type_number_t matchingCnt,
2669 mach_port_t wake_port,
2670 void * reference,
2671 vm_size_t referenceSize,
2672 bool client64,
2673 kern_return_t *result,
2674 io_object_t *notification )
2675 {
2676 kern_return_t kr;
2677 vm_offset_t data;
2678 vm_map_offset_t map_data;
2679
2680 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2681 data = CAST_DOWN(vm_offset_t, map_data);
2682
2683 if (KERN_SUCCESS == kr) {
2684 // must return success after vm_map_copyout() succeeds
2685 // and mig will copy out objects on success
2686 *notification = 0;
2687 *result = internal_io_service_add_notification( master_port, notification_type,
2688 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2689 vm_deallocate( kernel_map, data, matchingCnt );
2690 }
2691
2692 return kr;
2693 }
2694
2695 /* Routine io_service_add_notification_ool */
2696 kern_return_t
2697 is_io_service_add_notification_ool(
2698 mach_port_t master_port,
2699 io_name_t notification_type,
2700 io_buf_ptr_t matching,
2701 mach_msg_type_number_t matchingCnt,
2702 mach_port_t wake_port,
2703 io_async_ref_t reference,
2704 mach_msg_type_number_t referenceCnt,
2705 kern_return_t *result,
2706 io_object_t *notification )
2707 {
2708 io_async_ref_t zreference;
2709
2710 if (referenceCnt > ASYNC_REF_COUNT) {
2711 return kIOReturnBadArgument;
2712 }
2713 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2714 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2715
2716 return internal_io_service_add_notification_ool(master_port, notification_type,
2717 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2718 false, result, notification);
2719 }
2720
2721 /* Routine io_service_add_notification_ool_64 */
2722 kern_return_t
2723 is_io_service_add_notification_ool_64(
2724 mach_port_t master_port,
2725 io_name_t notification_type,
2726 io_buf_ptr_t matching,
2727 mach_msg_type_number_t matchingCnt,
2728 mach_port_t wake_port,
2729 io_async_ref64_t reference,
2730 mach_msg_type_number_t referenceCnt,
2731 kern_return_t *result,
2732 io_object_t *notification )
2733 {
2734 io_async_ref64_t zreference;
2735
2736 if (referenceCnt > ASYNC_REF64_COUNT) {
2737 return kIOReturnBadArgument;
2738 }
2739 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2740 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2741
2742 return internal_io_service_add_notification_ool(master_port, notification_type,
2743 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2744 true, result, notification);
2745 }
2746
2747 /* Routine io_service_add_notification_old */
2748 kern_return_t
2749 is_io_service_add_notification_old(
2750 mach_port_t master_port,
2751 io_name_t notification_type,
2752 io_string_t matching,
2753 mach_port_t port,
2754 // for binary compatibility reasons, this must be natural_t for ILP32
2755 natural_t ref,
2756 io_object_t * notification )
2757 {
2758 return is_io_service_add_notification( master_port, notification_type,
2759 matching, port, &ref, 1, notification );
2760 }
2761
2762
2763 static kern_return_t
2764 internal_io_service_add_interest_notification(
2765 io_object_t _service,
2766 io_name_t type_of_interest,
2767 mach_port_t port,
2768 void * reference,
2769 vm_size_t referenceSize,
2770 bool client64,
2771 io_object_t * notification )
2772 {
2773 IOServiceMessageUserNotification * userNotify = 0;
2774 IONotifier * notify = 0;
2775 const OSSymbol * sym;
2776 IOReturn err;
2777
2778 CHECK( IOService, _service, service );
2779
2780 err = kIOReturnNoResources;
2781 if ((sym = OSSymbol::withCString( type_of_interest ))) {
2782 do {
2783 userNotify = new IOServiceMessageUserNotification;
2784
2785 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2786 reference, referenceSize,
2787 kIOUserNotifyMaxMessageSize,
2788 client64 )) {
2789 userNotify->release();
2790 userNotify = 0;
2791 }
2792 if (!userNotify) {
2793 continue;
2794 }
2795
2796 notify = service->registerInterest( sym,
2797 &userNotify->_handler, userNotify );
2798 if (notify) {
2799 *notification = userNotify;
2800 userNotify->setNotification( notify );
2801 err = kIOReturnSuccess;
2802 } else {
2803 err = kIOReturnUnsupported;
2804 }
2805
2806 sym->release();
2807 } while (false);
2808 }
2809
2810 if ((kIOReturnSuccess != err) && userNotify) {
2811 userNotify->invalidatePort();
2812 userNotify->release();
2813 userNotify = 0;
2814 }
2815
2816 return err;
2817 }
2818
2819 /* Routine io_service_add_message_notification */
2820 kern_return_t
2821 is_io_service_add_interest_notification(
2822 io_object_t service,
2823 io_name_t type_of_interest,
2824 mach_port_t port,
2825 io_async_ref_t reference,
2826 mach_msg_type_number_t referenceCnt,
2827 io_object_t * notification )
2828 {
2829 io_async_ref_t zreference;
2830
2831 if (referenceCnt > ASYNC_REF_COUNT) {
2832 return kIOReturnBadArgument;
2833 }
2834 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2835 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2836
2837 return internal_io_service_add_interest_notification(service, type_of_interest,
2838 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
2839 }
2840
2841 /* Routine io_service_add_interest_notification_64 */
2842 kern_return_t
2843 is_io_service_add_interest_notification_64(
2844 io_object_t service,
2845 io_name_t type_of_interest,
2846 mach_port_t wake_port,
2847 io_async_ref64_t reference,
2848 mach_msg_type_number_t referenceCnt,
2849 io_object_t *notification )
2850 {
2851 io_async_ref64_t zreference;
2852
2853 if (referenceCnt > ASYNC_REF64_COUNT) {
2854 return kIOReturnBadArgument;
2855 }
2856 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2857 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2858
2859 return internal_io_service_add_interest_notification(service, type_of_interest,
2860 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
2861 }
2862
2863
2864 /* Routine io_service_acknowledge_notification */
2865 kern_return_t
2866 is_io_service_acknowledge_notification(
2867 io_object_t _service,
2868 natural_t notify_ref,
2869 natural_t response )
2870 {
2871 CHECK( IOService, _service, service );
2872
2873 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
2874 (IOOptionBits) response );
2875 }
2876
2877 /* Routine io_connect_get_semaphore */
2878 kern_return_t
2879 is_io_connect_get_notification_semaphore(
2880 io_connect_t connection,
2881 natural_t notification_type,
2882 semaphore_t *semaphore )
2883 {
2884 CHECK( IOUserClient, connection, client );
2885
2886 IOStatisticsClientCall();
2887 return client->getNotificationSemaphore((UInt32) notification_type,
2888 semaphore );
2889 }
2890
2891 /* Routine io_registry_get_root_entry */
2892 kern_return_t
2893 is_io_registry_get_root_entry(
2894 mach_port_t master_port,
2895 io_object_t *root )
2896 {
2897 IORegistryEntry * entry;
2898
2899 if (master_port != master_device_port) {
2900 return kIOReturnNotPrivileged;
2901 }
2902
2903 entry = IORegistryEntry::getRegistryRoot();
2904 if (entry) {
2905 entry->retain();
2906 }
2907 *root = entry;
2908
2909 return kIOReturnSuccess;
2910 }
2911
2912 /* Routine io_registry_create_iterator */
2913 kern_return_t
2914 is_io_registry_create_iterator(
2915 mach_port_t master_port,
2916 io_name_t plane,
2917 uint32_t options,
2918 io_object_t *iterator )
2919 {
2920 if (master_port != master_device_port) {
2921 return kIOReturnNotPrivileged;
2922 }
2923
2924 *iterator = IOUserIterator::withIterator(
2925 IORegistryIterator::iterateOver(
2926 IORegistryEntry::getPlane( plane ), options ));
2927
2928 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
2929 }
2930
2931 /* Routine io_registry_entry_create_iterator */
2932 kern_return_t
2933 is_io_registry_entry_create_iterator(
2934 io_object_t registry_entry,
2935 io_name_t plane,
2936 uint32_t options,
2937 io_object_t *iterator )
2938 {
2939 CHECK( IORegistryEntry, registry_entry, entry );
2940
2941 *iterator = IOUserIterator::withIterator(
2942 IORegistryIterator::iterateOver( entry,
2943 IORegistryEntry::getPlane( plane ), options ));
2944
2945 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
2946 }
2947
2948 /* Routine io_registry_iterator_enter */
2949 kern_return_t
2950 is_io_registry_iterator_enter_entry(
2951 io_object_t iterator )
2952 {
2953 CHECKLOCKED( IORegistryIterator, iterator, iter );
2954
2955 IOLockLock(oIter->lock);
2956 iter->enterEntry();
2957 IOLockUnlock(oIter->lock);
2958
2959 return kIOReturnSuccess;
2960 }
2961
2962 /* Routine io_registry_iterator_exit */
2963 kern_return_t
2964 is_io_registry_iterator_exit_entry(
2965 io_object_t iterator )
2966 {
2967 bool didIt;
2968
2969 CHECKLOCKED( IORegistryIterator, iterator, iter );
2970
2971 IOLockLock(oIter->lock);
2972 didIt = iter->exitEntry();
2973 IOLockUnlock(oIter->lock);
2974
2975 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
2976 }
2977
2978 /* Routine io_registry_entry_from_path */
2979 kern_return_t
2980 is_io_registry_entry_from_path(
2981 mach_port_t master_port,
2982 io_string_t path,
2983 io_object_t *registry_entry )
2984 {
2985 IORegistryEntry * entry;
2986
2987 if (master_port != master_device_port) {
2988 return kIOReturnNotPrivileged;
2989 }
2990
2991 entry = IORegistryEntry::fromPath( path );
2992
2993 *registry_entry = entry;
2994
2995 return kIOReturnSuccess;
2996 }
2997
2998
2999 /* Routine io_registry_entry_from_path */
3000 kern_return_t
3001 is_io_registry_entry_from_path_ool(
3002 mach_port_t master_port,
3003 io_string_inband_t path,
3004 io_buf_ptr_t path_ool,
3005 mach_msg_type_number_t path_oolCnt,
3006 kern_return_t *result,
3007 io_object_t *registry_entry)
3008 {
3009 IORegistryEntry * entry;
3010 vm_map_offset_t map_data;
3011 const char * cpath;
3012 IOReturn res;
3013 kern_return_t err;
3014
3015 if (master_port != master_device_port) {
3016 return kIOReturnNotPrivileged;
3017 }
3018
3019 map_data = 0;
3020 entry = 0;
3021 res = err = KERN_SUCCESS;
3022 if (path[0]) {
3023 cpath = path;
3024 } else {
3025 if (!path_oolCnt) {
3026 return kIOReturnBadArgument;
3027 }
3028 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3029 return kIOReturnMessageTooLarge;
3030 }
3031
3032 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3033 if (KERN_SUCCESS == err) {
3034 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3035 cpath = CAST_DOWN(const char *, map_data);
3036 if (cpath[path_oolCnt - 1]) {
3037 res = kIOReturnBadArgument;
3038 }
3039 }
3040 }
3041
3042 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3043 entry = IORegistryEntry::fromPath(cpath);
3044 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3045 }
3046
3047 if (map_data) {
3048 vm_deallocate(kernel_map, map_data, path_oolCnt);
3049 }
3050
3051 if (KERN_SUCCESS != err) {
3052 res = err;
3053 }
3054 *registry_entry = entry;
3055 *result = res;
3056
3057 return err;
3058 }
3059
3060
3061 /* Routine io_registry_entry_in_plane */
3062 kern_return_t
3063 is_io_registry_entry_in_plane(
3064 io_object_t registry_entry,
3065 io_name_t plane,
3066 boolean_t *inPlane )
3067 {
3068 CHECK( IORegistryEntry, registry_entry, entry );
3069
3070 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3071
3072 return kIOReturnSuccess;
3073 }
3074
3075
3076 /* Routine io_registry_entry_get_path */
3077 kern_return_t
3078 is_io_registry_entry_get_path(
3079 io_object_t registry_entry,
3080 io_name_t plane,
3081 io_string_t path )
3082 {
3083 int length;
3084 CHECK( IORegistryEntry, registry_entry, entry );
3085
3086 length = sizeof(io_string_t);
3087 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3088 return kIOReturnSuccess;
3089 } else {
3090 return kIOReturnBadArgument;
3091 }
3092 }
3093
3094 /* Routine io_registry_entry_get_path */
3095 kern_return_t
3096 is_io_registry_entry_get_path_ool(
3097 io_object_t registry_entry,
3098 io_name_t plane,
3099 io_string_inband_t path,
3100 io_buf_ptr_t *path_ool,
3101 mach_msg_type_number_t *path_oolCnt)
3102 {
3103 enum { kMaxPath = 16384 };
3104 IOReturn err;
3105 int length;
3106 char * buf;
3107
3108 CHECK( IORegistryEntry, registry_entry, entry );
3109
3110 *path_ool = NULL;
3111 *path_oolCnt = 0;
3112 length = sizeof(io_string_inband_t);
3113 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3114 err = kIOReturnSuccess;
3115 } else {
3116 length = kMaxPath;
3117 buf = IONew(char, length);
3118 if (!buf) {
3119 err = kIOReturnNoMemory;
3120 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3121 err = kIOReturnError;
3122 } else {
3123 *path_oolCnt = length;
3124 err = copyoutkdata(buf, length, path_ool);
3125 }
3126 if (buf) {
3127 IODelete(buf, char, kMaxPath);
3128 }
3129 }
3130
3131 return err;
3132 }
3133
3134
3135 /* Routine io_registry_entry_get_name */
3136 kern_return_t
3137 is_io_registry_entry_get_name(
3138 io_object_t registry_entry,
3139 io_name_t name )
3140 {
3141 CHECK( IORegistryEntry, registry_entry, entry );
3142
3143 strncpy( name, entry->getName(), sizeof(io_name_t));
3144
3145 return kIOReturnSuccess;
3146 }
3147
3148 /* Routine io_registry_entry_get_name_in_plane */
3149 kern_return_t
3150 is_io_registry_entry_get_name_in_plane(
3151 io_object_t registry_entry,
3152 io_name_t planeName,
3153 io_name_t name )
3154 {
3155 const IORegistryPlane * plane;
3156 CHECK( IORegistryEntry, registry_entry, entry );
3157
3158 if (planeName[0]) {
3159 plane = IORegistryEntry::getPlane( planeName );
3160 } else {
3161 plane = 0;
3162 }
3163
3164 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3165
3166 return kIOReturnSuccess;
3167 }
3168
3169 /* Routine io_registry_entry_get_location_in_plane */
3170 kern_return_t
3171 is_io_registry_entry_get_location_in_plane(
3172 io_object_t registry_entry,
3173 io_name_t planeName,
3174 io_name_t location )
3175 {
3176 const IORegistryPlane * plane;
3177 CHECK( IORegistryEntry, registry_entry, entry );
3178
3179 if (planeName[0]) {
3180 plane = IORegistryEntry::getPlane( planeName );
3181 } else {
3182 plane = 0;
3183 }
3184
3185 const char * cstr = entry->getLocation( plane );
3186
3187 if (cstr) {
3188 strncpy( location, cstr, sizeof(io_name_t));
3189 return kIOReturnSuccess;
3190 } else {
3191 return kIOReturnNotFound;
3192 }
3193 }
3194
3195 /* Routine io_registry_entry_get_registry_entry_id */
3196 kern_return_t
3197 is_io_registry_entry_get_registry_entry_id(
3198 io_object_t registry_entry,
3199 uint64_t *entry_id )
3200 {
3201 CHECK( IORegistryEntry, registry_entry, entry );
3202
3203 *entry_id = entry->getRegistryEntryID();
3204
3205 return kIOReturnSuccess;
3206 }
3207
3208 /* Routine io_registry_entry_get_property */
3209 kern_return_t
3210 is_io_registry_entry_get_property_bytes(
3211 io_object_t registry_entry,
3212 io_name_t property_name,
3213 io_struct_inband_t buf,
3214 mach_msg_type_number_t *dataCnt )
3215 {
3216 OSObject * obj;
3217 OSData * data;
3218 OSString * str;
3219 OSBoolean * boo;
3220 OSNumber * off;
3221 UInt64 offsetBytes;
3222 unsigned int len = 0;
3223 const void * bytes = 0;
3224 IOReturn ret = kIOReturnSuccess;
3225
3226 CHECK( IORegistryEntry, registry_entry, entry );
3227
3228 #if CONFIG_MACF
3229 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3230 return kIOReturnNotPermitted;
3231 }
3232 #endif
3233
3234 obj = entry->copyProperty(property_name);
3235 if (!obj) {
3236 return kIOReturnNoResources;
3237 }
3238
3239 // One day OSData will be a common container base class
3240 // until then...
3241 if ((data = OSDynamicCast( OSData, obj ))) {
3242 len = data->getLength();
3243 bytes = data->getBytesNoCopy();
3244 if (!data->isSerializable()) {
3245 len = 0;
3246 }
3247 } else if ((str = OSDynamicCast( OSString, obj ))) {
3248 len = str->getLength() + 1;
3249 bytes = str->getCStringNoCopy();
3250 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3251 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3252 bytes = boo->isTrue() ? "Yes" : "No";
3253 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3254 offsetBytes = off->unsigned64BitValue();
3255 len = off->numberOfBytes();
3256 if (len > sizeof(offsetBytes)) {
3257 len = sizeof(offsetBytes);
3258 }
3259 bytes = &offsetBytes;
3260 #ifdef __BIG_ENDIAN__
3261 bytes = (const void *)
3262 (((UInt32) bytes) + (sizeof(UInt64) - len));
3263 #endif
3264 } else {
3265 ret = kIOReturnBadArgument;
3266 }
3267
3268 if (bytes) {
3269 if (*dataCnt < len) {
3270 ret = kIOReturnIPCError;
3271 } else {
3272 *dataCnt = len;
3273 bcopy( bytes, buf, len );
3274 }
3275 }
3276 obj->release();
3277
3278 return ret;
3279 }
3280
3281
3282 /* Routine io_registry_entry_get_property */
3283 kern_return_t
3284 is_io_registry_entry_get_property(
3285 io_object_t registry_entry,
3286 io_name_t property_name,
3287 io_buf_ptr_t *properties,
3288 mach_msg_type_number_t *propertiesCnt )
3289 {
3290 kern_return_t err;
3291 vm_size_t len;
3292 OSObject * obj;
3293
3294 CHECK( IORegistryEntry, registry_entry, entry );
3295
3296 #if CONFIG_MACF
3297 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3298 return kIOReturnNotPermitted;
3299 }
3300 #endif
3301
3302 obj = entry->copyProperty(property_name);
3303 if (!obj) {
3304 return kIOReturnNotFound;
3305 }
3306
3307 OSSerialize * s = OSSerialize::withCapacity(4096);
3308 if (!s) {
3309 obj->release();
3310 return kIOReturnNoMemory;
3311 }
3312
3313 if (obj->serialize( s )) {
3314 len = s->getLength();
3315 *propertiesCnt = len;
3316 err = copyoutkdata( s->text(), len, properties );
3317 } else {
3318 err = kIOReturnUnsupported;
3319 }
3320
3321 s->release();
3322 obj->release();
3323
3324 return err;
3325 }
3326
3327 /* Routine io_registry_entry_get_property_recursively */
3328 kern_return_t
3329 is_io_registry_entry_get_property_recursively(
3330 io_object_t registry_entry,
3331 io_name_t plane,
3332 io_name_t property_name,
3333 uint32_t options,
3334 io_buf_ptr_t *properties,
3335 mach_msg_type_number_t *propertiesCnt )
3336 {
3337 kern_return_t err;
3338 vm_size_t len;
3339 OSObject * obj;
3340
3341 CHECK( IORegistryEntry, registry_entry, entry );
3342
3343 #if CONFIG_MACF
3344 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3345 return kIOReturnNotPermitted;
3346 }
3347 #endif
3348
3349 obj = entry->copyProperty( property_name,
3350 IORegistryEntry::getPlane( plane ), options );
3351 if (!obj) {
3352 return kIOReturnNotFound;
3353 }
3354
3355 OSSerialize * s = OSSerialize::withCapacity(4096);
3356 if (!s) {
3357 obj->release();
3358 return kIOReturnNoMemory;
3359 }
3360
3361 if (obj->serialize( s )) {
3362 len = s->getLength();
3363 *propertiesCnt = len;
3364 err = copyoutkdata( s->text(), len, properties );
3365 } else {
3366 err = kIOReturnUnsupported;
3367 }
3368
3369 s->release();
3370 obj->release();
3371
3372 return err;
3373 }
3374
3375 /* Routine io_registry_entry_get_properties */
3376 kern_return_t
3377 is_io_registry_entry_get_properties(
3378 io_object_t registry_entry,
3379 io_buf_ptr_t *properties,
3380 mach_msg_type_number_t *propertiesCnt )
3381 {
3382 return kIOReturnUnsupported;
3383 }
3384
3385 #if CONFIG_MACF
3386
3387 struct GetPropertiesEditorRef {
3388 kauth_cred_t cred;
3389 IORegistryEntry * entry;
3390 OSCollection * root;
3391 };
3392
3393 static const OSMetaClassBase *
3394 GetPropertiesEditor(void * reference,
3395 OSSerialize * s,
3396 OSCollection * container,
3397 const OSSymbol * name,
3398 const OSMetaClassBase * value)
3399 {
3400 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3401
3402 if (!ref->root) {
3403 ref->root = container;
3404 }
3405 if (ref->root == container) {
3406 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3407 value = 0;
3408 }
3409 }
3410 if (value) {
3411 value->retain();
3412 }
3413 return value;
3414 }
3415
3416 #endif /* CONFIG_MACF */
3417
3418 /* Routine io_registry_entry_get_properties */
3419 kern_return_t
3420 is_io_registry_entry_get_properties_bin(
3421 io_object_t registry_entry,
3422 io_buf_ptr_t *properties,
3423 mach_msg_type_number_t *propertiesCnt)
3424 {
3425 kern_return_t err = kIOReturnSuccess;
3426 vm_size_t len;
3427 OSSerialize * s;
3428 OSSerialize::Editor editor = 0;
3429 void * editRef = 0;
3430
3431 CHECK(IORegistryEntry, registry_entry, entry);
3432
3433 #if CONFIG_MACF
3434 GetPropertiesEditorRef ref;
3435 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3436 editor = &GetPropertiesEditor;
3437 editRef = &ref;
3438 ref.cred = kauth_cred_get();
3439 ref.entry = entry;
3440 ref.root = 0;
3441 }
3442 #endif
3443
3444 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3445 if (!s) {
3446 return kIOReturnNoMemory;
3447 }
3448
3449 if (!entry->serializeProperties(s)) {
3450 err = kIOReturnUnsupported;
3451 }
3452
3453 if (kIOReturnSuccess == err) {
3454 len = s->getLength();
3455 *propertiesCnt = len;
3456 err = copyoutkdata(s->text(), len, properties);
3457 }
3458 s->release();
3459
3460 return err;
3461 }
3462
3463 /* Routine io_registry_entry_get_property_bin */
3464 kern_return_t
3465 is_io_registry_entry_get_property_bin(
3466 io_object_t registry_entry,
3467 io_name_t plane,
3468 io_name_t property_name,
3469 uint32_t options,
3470 io_buf_ptr_t *properties,
3471 mach_msg_type_number_t *propertiesCnt )
3472 {
3473 kern_return_t err;
3474 vm_size_t len;
3475 OSObject * obj;
3476 const OSSymbol * sym;
3477
3478 CHECK( IORegistryEntry, registry_entry, entry );
3479
3480 #if CONFIG_MACF
3481 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3482 return kIOReturnNotPermitted;
3483 }
3484 #endif
3485
3486 sym = OSSymbol::withCString(property_name);
3487 if (!sym) {
3488 return kIOReturnNoMemory;
3489 }
3490
3491 if (gIORegistryEntryPropertyKeysKey == sym) {
3492 obj = entry->copyPropertyKeys();
3493 } else {
3494 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3495 obj = entry->copyProperty(property_name,
3496 IORegistryEntry::getPlane(plane), options );
3497 } else {
3498 obj = entry->copyProperty(property_name);
3499 }
3500 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3501 entry->removeProperty(sym);
3502 }
3503 }
3504
3505 sym->release();
3506 if (!obj) {
3507 return kIOReturnNotFound;
3508 }
3509
3510 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3511 if (!s) {
3512 obj->release();
3513 return kIOReturnNoMemory;
3514 }
3515
3516 if (obj->serialize( s )) {
3517 len = s->getLength();
3518 *propertiesCnt = len;
3519 err = copyoutkdata( s->text(), len, properties );
3520 } else {
3521 err = kIOReturnUnsupported;
3522 }
3523
3524 s->release();
3525 obj->release();
3526
3527 return err;
3528 }
3529
3530
3531 /* Routine io_registry_entry_set_properties */
3532 kern_return_t
3533 is_io_registry_entry_set_properties
3534 (
3535 io_object_t registry_entry,
3536 io_buf_ptr_t properties,
3537 mach_msg_type_number_t propertiesCnt,
3538 kern_return_t * result)
3539 {
3540 OSObject * obj;
3541 kern_return_t err;
3542 IOReturn res;
3543 vm_offset_t data;
3544 vm_map_offset_t map_data;
3545
3546 CHECK( IORegistryEntry, registry_entry, entry );
3547
3548 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3549 return kIOReturnMessageTooLarge;
3550 }
3551
3552 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3553 data = CAST_DOWN(vm_offset_t, map_data);
3554
3555 if (KERN_SUCCESS == err) {
3556 FAKE_STACK_FRAME(entry->getMetaClass());
3557
3558 // must return success after vm_map_copyout() succeeds
3559 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3560 vm_deallocate( kernel_map, data, propertiesCnt );
3561
3562 if (!obj) {
3563 res = kIOReturnBadArgument;
3564 }
3565 #if CONFIG_MACF
3566 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3567 registry_entry, obj)) {
3568 res = kIOReturnNotPermitted;
3569 }
3570 #endif
3571 else {
3572 res = entry->setProperties( obj );
3573 }
3574
3575 if (obj) {
3576 obj->release();
3577 }
3578
3579 FAKE_STACK_FRAME_END();
3580 } else {
3581 res = err;
3582 }
3583
3584 *result = res;
3585 return err;
3586 }
3587
3588 /* Routine io_registry_entry_get_child_iterator */
3589 kern_return_t
3590 is_io_registry_entry_get_child_iterator(
3591 io_object_t registry_entry,
3592 io_name_t plane,
3593 io_object_t *iterator )
3594 {
3595 CHECK( IORegistryEntry, registry_entry, entry );
3596
3597 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
3598 IORegistryEntry::getPlane( plane )));
3599
3600 return kIOReturnSuccess;
3601 }
3602
3603 /* Routine io_registry_entry_get_parent_iterator */
3604 kern_return_t
3605 is_io_registry_entry_get_parent_iterator(
3606 io_object_t registry_entry,
3607 io_name_t plane,
3608 io_object_t *iterator)
3609 {
3610 CHECK( IORegistryEntry, registry_entry, entry );
3611
3612 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
3613 IORegistryEntry::getPlane( plane )));
3614
3615 return kIOReturnSuccess;
3616 }
3617
3618 /* Routine io_service_get_busy_state */
3619 kern_return_t
3620 is_io_service_get_busy_state(
3621 io_object_t _service,
3622 uint32_t *busyState )
3623 {
3624 CHECK( IOService, _service, service );
3625
3626 *busyState = service->getBusyState();
3627
3628 return kIOReturnSuccess;
3629 }
3630
3631 /* Routine io_service_get_state */
3632 kern_return_t
3633 is_io_service_get_state(
3634 io_object_t _service,
3635 uint64_t *state,
3636 uint32_t *busy_state,
3637 uint64_t *accumulated_busy_time )
3638 {
3639 CHECK( IOService, _service, service );
3640
3641 *state = service->getState();
3642 *busy_state = service->getBusyState();
3643 *accumulated_busy_time = service->getAccumulatedBusyTime();
3644
3645 return kIOReturnSuccess;
3646 }
3647
3648 /* Routine io_service_wait_quiet */
3649 kern_return_t
3650 is_io_service_wait_quiet(
3651 io_object_t _service,
3652 mach_timespec_t wait_time )
3653 {
3654 uint64_t timeoutNS;
3655
3656 CHECK( IOService, _service, service );
3657
3658 timeoutNS = wait_time.tv_sec;
3659 timeoutNS *= kSecondScale;
3660 timeoutNS += wait_time.tv_nsec;
3661
3662 return service->waitQuiet(timeoutNS);
3663 }
3664
3665 /* Routine io_service_request_probe */
3666 kern_return_t
3667 is_io_service_request_probe(
3668 io_object_t _service,
3669 uint32_t options )
3670 {
3671 CHECK( IOService, _service, service );
3672
3673 return service->requestProbe( options );
3674 }
3675
3676 /* Routine io_service_get_authorization_id */
3677 kern_return_t
3678 is_io_service_get_authorization_id(
3679 io_object_t _service,
3680 uint64_t *authorization_id )
3681 {
3682 kern_return_t kr;
3683
3684 CHECK( IOService, _service, service );
3685
3686 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
3687 kIOClientPrivilegeAdministrator );
3688 if (kIOReturnSuccess != kr) {
3689 return kr;
3690 }
3691
3692 *authorization_id = service->getAuthorizationID();
3693
3694 return kr;
3695 }
3696
3697 /* Routine io_service_set_authorization_id */
3698 kern_return_t
3699 is_io_service_set_authorization_id(
3700 io_object_t _service,
3701 uint64_t authorization_id )
3702 {
3703 CHECK( IOService, _service, service );
3704
3705 return service->setAuthorizationID( authorization_id );
3706 }
3707
3708 /* Routine io_service_open_ndr */
3709 kern_return_t
3710 is_io_service_open_extended(
3711 io_object_t _service,
3712 task_t owningTask,
3713 uint32_t connect_type,
3714 NDR_record_t ndr,
3715 io_buf_ptr_t properties,
3716 mach_msg_type_number_t propertiesCnt,
3717 kern_return_t * result,
3718 io_object_t *connection )
3719 {
3720 IOUserClient * client = 0;
3721 kern_return_t err = KERN_SUCCESS;
3722 IOReturn res = kIOReturnSuccess;
3723 OSDictionary * propertiesDict = 0;
3724 bool crossEndian;
3725 bool disallowAccess;
3726
3727 CHECK( IOService, _service, service );
3728
3729 if (!owningTask) {
3730 return kIOReturnBadArgument;
3731 }
3732 assert(owningTask == current_task());
3733 if (owningTask != current_task()) {
3734 return kIOReturnBadArgument;
3735 }
3736
3737 do{
3738 if (properties) {
3739 return kIOReturnUnsupported;
3740 }
3741 #if 0
3742 {
3743 OSObject * obj;
3744 vm_offset_t data;
3745 vm_map_offset_t map_data;
3746
3747 if (propertiesCnt > sizeof(io_struct_inband_t)) {
3748 return kIOReturnMessageTooLarge;
3749 }
3750
3751 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3752 res = err;
3753 data = CAST_DOWN(vm_offset_t, map_data);
3754 if (KERN_SUCCESS == err) {
3755 // must return success after vm_map_copyout() succeeds
3756 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3757 vm_deallocate( kernel_map, data, propertiesCnt );
3758 propertiesDict = OSDynamicCast(OSDictionary, obj);
3759 if (!propertiesDict) {
3760 res = kIOReturnBadArgument;
3761 if (obj) {
3762 obj->release();
3763 }
3764 }
3765 }
3766 if (kIOReturnSuccess != res) {
3767 break;
3768 }
3769 }
3770 #endif
3771 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3772 if (crossEndian) {
3773 if (!propertiesDict) {
3774 propertiesDict = OSDictionary::withCapacity(4);
3775 }
3776 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3777 if (data) {
3778 if (propertiesDict) {
3779 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3780 }
3781 data->release();
3782 }
3783 }
3784
3785 res = service->newUserClient( owningTask, (void *) owningTask,
3786 connect_type, propertiesDict, &client );
3787
3788 if (propertiesDict) {
3789 propertiesDict->release();
3790 }
3791
3792 if (res == kIOReturnSuccess) {
3793 assert( OSDynamicCast(IOUserClient, client));
3794
3795 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3796 client->closed = false;
3797 client->lock = IOLockAlloc();
3798
3799 disallowAccess = (crossEndian
3800 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3801 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3802 if (disallowAccess) {
3803 res = kIOReturnUnsupported;
3804 }
3805 #if CONFIG_MACF
3806 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
3807 res = kIOReturnNotPermitted;
3808 }
3809 #endif
3810
3811 if (kIOReturnSuccess == res) {
3812 res = client->registerOwner(owningTask);
3813 }
3814
3815 if (kIOReturnSuccess != res) {
3816 IOStatisticsClientCall();
3817 client->clientClose();
3818 client->release();
3819 client = 0;
3820 break;
3821 }
3822 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3823 if (creatorName) {
3824 client->setProperty(kIOUserClientCreatorKey, creatorName);
3825 creatorName->release();
3826 }
3827 client->setTerminateDefer(service, false);
3828 }
3829 }while (false);
3830
3831 *connection = client;
3832 *result = res;
3833
3834 return err;
3835 }
3836
3837 /* Routine io_service_close */
3838 kern_return_t
3839 is_io_service_close(
3840 io_object_t connection )
3841 {
3842 OSSet * mappings;
3843 if ((mappings = OSDynamicCast(OSSet, connection))) {
3844 return kIOReturnSuccess;
3845 }
3846
3847 CHECK( IOUserClient, connection, client );
3848
3849 IOStatisticsClientCall();
3850
3851 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
3852 IOLockLock(client->lock);
3853 client->clientClose();
3854 IOLockUnlock(client->lock);
3855 } else {
3856 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3857 client->getRegistryEntryID(), client->getName());
3858 }
3859
3860 return kIOReturnSuccess;
3861 }
3862
3863 /* Routine io_connect_get_service */
3864 kern_return_t
3865 is_io_connect_get_service(
3866 io_object_t connection,
3867 io_object_t *service )
3868 {
3869 IOService * theService;
3870
3871 CHECK( IOUserClient, connection, client );
3872
3873 theService = client->getService();
3874 if (theService) {
3875 theService->retain();
3876 }
3877
3878 *service = theService;
3879
3880 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
3881 }
3882
3883 /* Routine io_connect_set_notification_port */
3884 kern_return_t
3885 is_io_connect_set_notification_port(
3886 io_object_t connection,
3887 uint32_t notification_type,
3888 mach_port_t port,
3889 uint32_t reference)
3890 {
3891 kern_return_t ret;
3892 CHECK( IOUserClient, connection, client );
3893
3894 IOStatisticsClientCall();
3895 IOLockLock(client->lock);
3896 ret = client->registerNotificationPort( port, notification_type,
3897 (io_user_reference_t) reference );
3898 IOLockUnlock(client->lock);
3899 return ret;
3900 }
3901
3902 /* Routine io_connect_set_notification_port */
3903 kern_return_t
3904 is_io_connect_set_notification_port_64(
3905 io_object_t connection,
3906 uint32_t notification_type,
3907 mach_port_t port,
3908 io_user_reference_t reference)
3909 {
3910 kern_return_t ret;
3911 CHECK( IOUserClient, connection, client );
3912
3913 IOStatisticsClientCall();
3914 IOLockLock(client->lock);
3915 ret = client->registerNotificationPort( port, notification_type,
3916 reference );
3917 IOLockUnlock(client->lock);
3918 return ret;
3919 }
3920
3921 /* Routine io_connect_map_memory_into_task */
3922 kern_return_t
3923 is_io_connect_map_memory_into_task
3924 (
3925 io_connect_t connection,
3926 uint32_t memory_type,
3927 task_t into_task,
3928 mach_vm_address_t *address,
3929 mach_vm_size_t *size,
3930 uint32_t flags
3931 )
3932 {
3933 IOReturn err;
3934 IOMemoryMap * map;
3935
3936 CHECK( IOUserClient, connection, client );
3937
3938 if (!into_task) {
3939 return kIOReturnBadArgument;
3940 }
3941
3942 IOStatisticsClientCall();
3943 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3944
3945 if (map) {
3946 *address = map->getAddress();
3947 if (size) {
3948 *size = map->getSize();
3949 }
3950
3951 if (client->sharedInstance
3952 || (into_task != current_task())) {
3953 // push a name out to the task owning the map,
3954 // so we can clean up maps
3955 mach_port_name_t name __unused =
3956 IOMachPort::makeSendRightForTask(
3957 into_task, map, IKOT_IOKIT_OBJECT );
3958 map->release();
3959 } else {
3960 // keep it with the user client
3961 IOLockLock( gIOObjectPortLock);
3962 if (0 == client->mappings) {
3963 client->mappings = OSSet::withCapacity(2);
3964 }
3965 if (client->mappings) {
3966 client->mappings->setObject( map);
3967 }
3968 IOLockUnlock( gIOObjectPortLock);
3969 map->release();
3970 }
3971 err = kIOReturnSuccess;
3972 } else {
3973 err = kIOReturnBadArgument;
3974 }
3975
3976 return err;
3977 }
3978
3979 /* Routine is_io_connect_map_memory */
3980 kern_return_t
3981 is_io_connect_map_memory(
3982 io_object_t connect,
3983 uint32_t type,
3984 task_t task,
3985 uint32_t * mapAddr,
3986 uint32_t * mapSize,
3987 uint32_t flags )
3988 {
3989 IOReturn err;
3990 mach_vm_address_t address;
3991 mach_vm_size_t size;
3992
3993 address = SCALAR64(*mapAddr);
3994 size = SCALAR64(*mapSize);
3995
3996 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3997
3998 *mapAddr = SCALAR32(address);
3999 *mapSize = SCALAR32(size);
4000
4001 return err;
4002 }
4003 } /* extern "C" */
4004
4005 IOMemoryMap *
4006 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4007 {
4008 OSIterator * iter;
4009 IOMemoryMap * map = 0;
4010
4011 IOLockLock(gIOObjectPortLock);
4012
4013 iter = OSCollectionIterator::withCollection(mappings);
4014 if (iter) {
4015 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4016 if (mem == map->getMemoryDescriptor()) {
4017 map->retain();
4018 mappings->removeObject(map);
4019 break;
4020 }
4021 }
4022 iter->release();
4023 }
4024
4025 IOLockUnlock(gIOObjectPortLock);
4026
4027 return map;
4028 }
4029
4030 extern "C" {
4031 /* Routine io_connect_unmap_memory_from_task */
4032 kern_return_t
4033 is_io_connect_unmap_memory_from_task
4034 (
4035 io_connect_t connection,
4036 uint32_t memory_type,
4037 task_t from_task,
4038 mach_vm_address_t address)
4039 {
4040 IOReturn err;
4041 IOOptionBits options = 0;
4042 IOMemoryDescriptor * memory = 0;
4043 IOMemoryMap * map;
4044
4045 CHECK( IOUserClient, connection, client );
4046
4047 if (!from_task) {
4048 return kIOReturnBadArgument;
4049 }
4050
4051 IOStatisticsClientCall();
4052 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4053
4054 if (memory && (kIOReturnSuccess == err)) {
4055 options = (options & ~kIOMapUserOptionsMask)
4056 | kIOMapAnywhere | kIOMapReference;
4057
4058 map = memory->createMappingInTask( from_task, address, options );
4059 memory->release();
4060 if (map) {
4061 IOLockLock( gIOObjectPortLock);
4062 if (client->mappings) {
4063 client->mappings->removeObject( map);
4064 }
4065 IOLockUnlock( gIOObjectPortLock);
4066
4067 mach_port_name_t name = 0;
4068 if (from_task != current_task()) {
4069 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4070 map->release();
4071 }
4072
4073 if (name) {
4074 map->userClientUnmap();
4075 err = iokit_mod_send_right( from_task, name, -2 );
4076 err = kIOReturnSuccess;
4077 } else {
4078 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4079 }
4080 if (from_task == current_task()) {
4081 map->release();
4082 }
4083 } else {
4084 err = kIOReturnBadArgument;
4085 }
4086 }
4087
4088 return err;
4089 }
4090
4091 kern_return_t
4092 is_io_connect_unmap_memory(
4093 io_object_t connect,
4094 uint32_t type,
4095 task_t task,
4096 uint32_t mapAddr )
4097 {
4098 IOReturn err;
4099 mach_vm_address_t address;
4100
4101 address = SCALAR64(mapAddr);
4102
4103 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4104
4105 return err;
4106 }
4107
4108
4109 /* Routine io_connect_add_client */
4110 kern_return_t
4111 is_io_connect_add_client(
4112 io_object_t connection,
4113 io_object_t connect_to)
4114 {
4115 CHECK( IOUserClient, connection, client );
4116 CHECK( IOUserClient, connect_to, to );
4117
4118 IOStatisticsClientCall();
4119 return client->connectClient( to );
4120 }
4121
4122
4123 /* Routine io_connect_set_properties */
4124 kern_return_t
4125 is_io_connect_set_properties(
4126 io_object_t connection,
4127 io_buf_ptr_t properties,
4128 mach_msg_type_number_t propertiesCnt,
4129 kern_return_t * result)
4130 {
4131 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4132 }
4133
4134 /* Routine io_user_client_method */
4135 kern_return_t
4136 is_io_connect_method_var_output
4137 (
4138 io_connect_t connection,
4139 uint32_t selector,
4140 io_scalar_inband64_t scalar_input,
4141 mach_msg_type_number_t scalar_inputCnt,
4142 io_struct_inband_t inband_input,
4143 mach_msg_type_number_t inband_inputCnt,
4144 mach_vm_address_t ool_input,
4145 mach_vm_size_t ool_input_size,
4146 io_struct_inband_t inband_output,
4147 mach_msg_type_number_t *inband_outputCnt,
4148 io_scalar_inband64_t scalar_output,
4149 mach_msg_type_number_t *scalar_outputCnt,
4150 io_buf_ptr_t *var_output,
4151 mach_msg_type_number_t *var_outputCnt
4152 )
4153 {
4154 CHECK( IOUserClient, connection, client );
4155
4156 IOExternalMethodArguments args;
4157 IOReturn ret;
4158 IOMemoryDescriptor * inputMD = 0;
4159 OSObject * structureVariableOutputData = 0;
4160
4161 bzero(&args.__reserved[0], sizeof(args.__reserved));
4162 args.__reservedA = 0;
4163 args.version = kIOExternalMethodArgumentsCurrentVersion;
4164
4165 args.selector = selector;
4166
4167 args.asyncWakePort = MACH_PORT_NULL;
4168 args.asyncReference = 0;
4169 args.asyncReferenceCount = 0;
4170 args.structureVariableOutputData = &structureVariableOutputData;
4171
4172 args.scalarInput = scalar_input;
4173 args.scalarInputCount = scalar_inputCnt;
4174 args.structureInput = inband_input;
4175 args.structureInputSize = inband_inputCnt;
4176
4177 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4178 return kIOReturnIPCError;
4179 }
4180
4181 if (ool_input) {
4182 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4183 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4184 current_task());
4185 }
4186
4187 args.structureInputDescriptor = inputMD;
4188
4189 args.scalarOutput = scalar_output;
4190 args.scalarOutputCount = *scalar_outputCnt;
4191 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4192 args.structureOutput = inband_output;
4193 args.structureOutputSize = *inband_outputCnt;
4194 args.structureOutputDescriptor = NULL;
4195 args.structureOutputDescriptorSize = 0;
4196
4197 IOStatisticsClientCall();
4198 ret = client->externalMethod( selector, &args );
4199
4200 *scalar_outputCnt = args.scalarOutputCount;
4201 *inband_outputCnt = args.structureOutputSize;
4202
4203 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4204 OSSerialize * serialize;
4205 OSData * data;
4206 vm_size_t len;
4207
4208 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4209 len = serialize->getLength();
4210 *var_outputCnt = len;
4211 ret = copyoutkdata(serialize->text(), len, var_output);
4212 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4213 len = data->getLength();
4214 *var_outputCnt = len;
4215 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4216 } else {
4217 ret = kIOReturnUnderrun;
4218 }
4219 }
4220
4221 if (inputMD) {
4222 inputMD->release();
4223 }
4224 if (structureVariableOutputData) {
4225 structureVariableOutputData->release();
4226 }
4227
4228 return ret;
4229 }
4230
4231 /* Routine io_user_client_method */
4232 kern_return_t
4233 is_io_connect_method
4234 (
4235 io_connect_t connection,
4236 uint32_t selector,
4237 io_scalar_inband64_t scalar_input,
4238 mach_msg_type_number_t scalar_inputCnt,
4239 io_struct_inband_t inband_input,
4240 mach_msg_type_number_t inband_inputCnt,
4241 mach_vm_address_t ool_input,
4242 mach_vm_size_t ool_input_size,
4243 io_struct_inband_t inband_output,
4244 mach_msg_type_number_t *inband_outputCnt,
4245 io_scalar_inband64_t scalar_output,
4246 mach_msg_type_number_t *scalar_outputCnt,
4247 mach_vm_address_t ool_output,
4248 mach_vm_size_t *ool_output_size
4249 )
4250 {
4251 CHECK( IOUserClient, connection, client );
4252
4253 IOExternalMethodArguments args;
4254 IOReturn ret;
4255 IOMemoryDescriptor * inputMD = 0;
4256 IOMemoryDescriptor * outputMD = 0;
4257
4258 bzero(&args.__reserved[0], sizeof(args.__reserved));
4259 args.__reservedA = 0;
4260 args.version = kIOExternalMethodArgumentsCurrentVersion;
4261
4262 args.selector = selector;
4263
4264 args.asyncWakePort = MACH_PORT_NULL;
4265 args.asyncReference = 0;
4266 args.asyncReferenceCount = 0;
4267 args.structureVariableOutputData = 0;
4268
4269 args.scalarInput = scalar_input;
4270 args.scalarInputCount = scalar_inputCnt;
4271 args.structureInput = inband_input;
4272 args.structureInputSize = inband_inputCnt;
4273
4274 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4275 return kIOReturnIPCError;
4276 }
4277 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4278 return kIOReturnIPCError;
4279 }
4280
4281 if (ool_input) {
4282 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4283 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4284 current_task());
4285 }
4286
4287 args.structureInputDescriptor = inputMD;
4288
4289 args.scalarOutput = scalar_output;
4290 args.scalarOutputCount = *scalar_outputCnt;
4291 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4292 args.structureOutput = inband_output;
4293 args.structureOutputSize = *inband_outputCnt;
4294
4295 if (ool_output && ool_output_size) {
4296 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4297 kIODirectionIn, current_task());
4298 }
4299
4300 args.structureOutputDescriptor = outputMD;
4301 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
4302
4303 IOStatisticsClientCall();
4304 ret = client->externalMethod( selector, &args );
4305
4306 *scalar_outputCnt = args.scalarOutputCount;
4307 *inband_outputCnt = args.structureOutputSize;
4308 *ool_output_size = args.structureOutputDescriptorSize;
4309
4310 if (inputMD) {
4311 inputMD->release();
4312 }
4313 if (outputMD) {
4314 outputMD->release();
4315 }
4316
4317 return ret;
4318 }
4319
4320 /* Routine io_async_user_client_method */
4321 kern_return_t
4322 is_io_connect_async_method
4323 (
4324 io_connect_t connection,
4325 mach_port_t wake_port,
4326 io_async_ref64_t reference,
4327 mach_msg_type_number_t referenceCnt,
4328 uint32_t selector,
4329 io_scalar_inband64_t scalar_input,
4330 mach_msg_type_number_t scalar_inputCnt,
4331 io_struct_inband_t inband_input,
4332 mach_msg_type_number_t inband_inputCnt,
4333 mach_vm_address_t ool_input,
4334 mach_vm_size_t ool_input_size,
4335 io_struct_inband_t inband_output,
4336 mach_msg_type_number_t *inband_outputCnt,
4337 io_scalar_inband64_t scalar_output,
4338 mach_msg_type_number_t *scalar_outputCnt,
4339 mach_vm_address_t ool_output,
4340 mach_vm_size_t * ool_output_size
4341 )
4342 {
4343 CHECK( IOUserClient, connection, client );
4344
4345 IOExternalMethodArguments args;
4346 IOReturn ret;
4347 IOMemoryDescriptor * inputMD = 0;
4348 IOMemoryDescriptor * outputMD = 0;
4349
4350 bzero(&args.__reserved[0], sizeof(args.__reserved));
4351 args.__reservedA = 0;
4352 args.version = kIOExternalMethodArgumentsCurrentVersion;
4353
4354 reference[0] = (io_user_reference_t) wake_port;
4355 if (vm_map_is_64bit(get_task_map(current_task()))) {
4356 reference[0] |= kIOUCAsync64Flag;
4357 }
4358
4359 args.selector = selector;
4360
4361 args.asyncWakePort = wake_port;
4362 args.asyncReference = reference;
4363 args.asyncReferenceCount = referenceCnt;
4364
4365 args.structureVariableOutputData = 0;
4366
4367 args.scalarInput = scalar_input;
4368 args.scalarInputCount = scalar_inputCnt;
4369 args.structureInput = inband_input;
4370 args.structureInputSize = inband_inputCnt;
4371
4372 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4373 return kIOReturnIPCError;
4374 }
4375 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4376 return kIOReturnIPCError;
4377 }
4378
4379 if (ool_input) {
4380 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4381 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4382 current_task());
4383 }
4384
4385 args.structureInputDescriptor = inputMD;
4386
4387 args.scalarOutput = scalar_output;
4388 args.scalarOutputCount = *scalar_outputCnt;
4389 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4390 args.structureOutput = inband_output;
4391 args.structureOutputSize = *inband_outputCnt;
4392
4393 if (ool_output) {
4394 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4395 kIODirectionIn, current_task());
4396 }
4397
4398 args.structureOutputDescriptor = outputMD;
4399 args.structureOutputDescriptorSize = *ool_output_size;
4400
4401 IOStatisticsClientCall();
4402 ret = client->externalMethod( selector, &args );
4403
4404 *inband_outputCnt = args.structureOutputSize;
4405 *ool_output_size = args.structureOutputDescriptorSize;
4406
4407 if (inputMD) {
4408 inputMD->release();
4409 }
4410 if (outputMD) {
4411 outputMD->release();
4412 }
4413
4414 return ret;
4415 }
4416
4417 /* Routine io_connect_method_scalarI_scalarO */
4418 kern_return_t
4419 is_io_connect_method_scalarI_scalarO(
4420 io_object_t connect,
4421 uint32_t index,
4422 io_scalar_inband_t input,
4423 mach_msg_type_number_t inputCount,
4424 io_scalar_inband_t output,
4425 mach_msg_type_number_t * outputCount )
4426 {
4427 IOReturn err;
4428 uint32_t i;
4429 io_scalar_inband64_t _input;
4430 io_scalar_inband64_t _output;
4431
4432 mach_msg_type_number_t struct_outputCnt = 0;
4433 mach_vm_size_t ool_output_size = 0;
4434
4435 bzero(&_output[0], sizeof(_output));
4436 for (i = 0; i < inputCount; i++) {
4437 _input[i] = SCALAR64(input[i]);
4438 }
4439
4440 err = is_io_connect_method(connect, index,
4441 _input, inputCount,
4442 NULL, 0,
4443 0, 0,
4444 NULL, &struct_outputCnt,
4445 _output, outputCount,
4446 0, &ool_output_size);
4447
4448 for (i = 0; i < *outputCount; i++) {
4449 output[i] = SCALAR32(_output[i]);
4450 }
4451
4452 return err;
4453 }
4454
4455 kern_return_t
4456 shim_io_connect_method_scalarI_scalarO(
4457 IOExternalMethod * method,
4458 IOService * object,
4459 const io_user_scalar_t * input,
4460 mach_msg_type_number_t inputCount,
4461 io_user_scalar_t * output,
4462 mach_msg_type_number_t * outputCount )
4463 {
4464 IOMethod func;
4465 io_scalar_inband_t _output;
4466 IOReturn err;
4467 err = kIOReturnBadArgument;
4468
4469 bzero(&_output[0], sizeof(_output));
4470 do {
4471 if (inputCount != method->count0) {
4472 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4473 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4474 continue;
4475 }
4476 if (*outputCount != method->count1) {
4477 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4478 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4479 continue;
4480 }
4481
4482 func = method->func;
4483
4484 switch (inputCount) {
4485 case 6:
4486 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4487 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4488 break;
4489 case 5:
4490 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4491 ARG32(input[3]), ARG32(input[4]),
4492 &_output[0] );
4493 break;
4494 case 4:
4495 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4496 ARG32(input[3]),
4497 &_output[0], &_output[1] );
4498 break;
4499 case 3:
4500 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4501 &_output[0], &_output[1], &_output[2] );
4502 break;
4503 case 2:
4504 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4505 &_output[0], &_output[1], &_output[2],
4506 &_output[3] );
4507 break;
4508 case 1:
4509 err = (object->*func)( ARG32(input[0]),
4510 &_output[0], &_output[1], &_output[2],
4511 &_output[3], &_output[4] );
4512 break;
4513 case 0:
4514 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4515 &_output[3], &_output[4], &_output[5] );
4516 break;
4517
4518 default:
4519 IOLog("%s: Bad method table\n", object->getName());
4520 }
4521 }while (false);
4522
4523 uint32_t i;
4524 for (i = 0; i < *outputCount; i++) {
4525 output[i] = SCALAR32(_output[i]);
4526 }
4527
4528 return err;
4529 }
4530
4531 /* Routine io_async_method_scalarI_scalarO */
4532 kern_return_t
4533 is_io_async_method_scalarI_scalarO(
4534 io_object_t connect,
4535 mach_port_t wake_port,
4536 io_async_ref_t reference,
4537 mach_msg_type_number_t referenceCnt,
4538 uint32_t index,
4539 io_scalar_inband_t input,
4540 mach_msg_type_number_t inputCount,
4541 io_scalar_inband_t output,
4542 mach_msg_type_number_t * outputCount )
4543 {
4544 IOReturn err;
4545 uint32_t i;
4546 io_scalar_inband64_t _input;
4547 io_scalar_inband64_t _output;
4548 io_async_ref64_t _reference;
4549
4550 bzero(&_output[0], sizeof(_output));
4551 for (i = 0; i < referenceCnt; i++) {
4552 _reference[i] = REF64(reference[i]);
4553 }
4554
4555 mach_msg_type_number_t struct_outputCnt = 0;
4556 mach_vm_size_t ool_output_size = 0;
4557
4558 for (i = 0; i < inputCount; i++) {
4559 _input[i] = SCALAR64(input[i]);
4560 }
4561
4562 err = is_io_connect_async_method(connect,
4563 wake_port, _reference, referenceCnt,
4564 index,
4565 _input, inputCount,
4566 NULL, 0,
4567 0, 0,
4568 NULL, &struct_outputCnt,
4569 _output, outputCount,
4570 0, &ool_output_size);
4571
4572 for (i = 0; i < *outputCount; i++) {
4573 output[i] = SCALAR32(_output[i]);
4574 }
4575
4576 return err;
4577 }
4578 /* Routine io_async_method_scalarI_structureO */
4579 kern_return_t
4580 is_io_async_method_scalarI_structureO(
4581 io_object_t connect,
4582 mach_port_t wake_port,
4583 io_async_ref_t reference,
4584 mach_msg_type_number_t referenceCnt,
4585 uint32_t index,
4586 io_scalar_inband_t input,
4587 mach_msg_type_number_t inputCount,
4588 io_struct_inband_t output,
4589 mach_msg_type_number_t * outputCount )
4590 {
4591 uint32_t i;
4592 io_scalar_inband64_t _input;
4593 io_async_ref64_t _reference;
4594
4595 for (i = 0; i < referenceCnt; i++) {
4596 _reference[i] = REF64(reference[i]);
4597 }
4598
4599 mach_msg_type_number_t scalar_outputCnt = 0;
4600 mach_vm_size_t ool_output_size = 0;
4601
4602 for (i = 0; i < inputCount; i++) {
4603 _input[i] = SCALAR64(input[i]);
4604 }
4605
4606 return is_io_connect_async_method(connect,
4607 wake_port, _reference, referenceCnt,
4608 index,
4609 _input, inputCount,
4610 NULL, 0,
4611 0, 0,
4612 output, outputCount,
4613 NULL, &scalar_outputCnt,
4614 0, &ool_output_size);
4615 }
4616
4617 /* Routine io_async_method_scalarI_structureI */
4618 kern_return_t
4619 is_io_async_method_scalarI_structureI(
4620 io_connect_t connect,
4621 mach_port_t wake_port,
4622 io_async_ref_t reference,
4623 mach_msg_type_number_t referenceCnt,
4624 uint32_t index,
4625 io_scalar_inband_t input,
4626 mach_msg_type_number_t inputCount,
4627 io_struct_inband_t inputStruct,
4628 mach_msg_type_number_t inputStructCount )
4629 {
4630 uint32_t i;
4631 io_scalar_inband64_t _input;
4632 io_async_ref64_t _reference;
4633
4634 for (i = 0; i < referenceCnt; i++) {
4635 _reference[i] = REF64(reference[i]);
4636 }
4637
4638 mach_msg_type_number_t scalar_outputCnt = 0;
4639 mach_msg_type_number_t inband_outputCnt = 0;
4640 mach_vm_size_t ool_output_size = 0;
4641
4642 for (i = 0; i < inputCount; i++) {
4643 _input[i] = SCALAR64(input[i]);
4644 }
4645
4646 return is_io_connect_async_method(connect,
4647 wake_port, _reference, referenceCnt,
4648 index,
4649 _input, inputCount,
4650 inputStruct, inputStructCount,
4651 0, 0,
4652 NULL, &inband_outputCnt,
4653 NULL, &scalar_outputCnt,
4654 0, &ool_output_size);
4655 }
4656
4657 /* Routine io_async_method_structureI_structureO */
4658 kern_return_t
4659 is_io_async_method_structureI_structureO(
4660 io_object_t connect,
4661 mach_port_t wake_port,
4662 io_async_ref_t reference,
4663 mach_msg_type_number_t referenceCnt,
4664 uint32_t index,
4665 io_struct_inband_t input,
4666 mach_msg_type_number_t inputCount,
4667 io_struct_inband_t output,
4668 mach_msg_type_number_t * outputCount )
4669 {
4670 uint32_t i;
4671 mach_msg_type_number_t scalar_outputCnt = 0;
4672 mach_vm_size_t ool_output_size = 0;
4673 io_async_ref64_t _reference;
4674
4675 for (i = 0; i < referenceCnt; i++) {
4676 _reference[i] = REF64(reference[i]);
4677 }
4678
4679 return is_io_connect_async_method(connect,
4680 wake_port, _reference, referenceCnt,
4681 index,
4682 NULL, 0,
4683 input, inputCount,
4684 0, 0,
4685 output, outputCount,
4686 NULL, &scalar_outputCnt,
4687 0, &ool_output_size);
4688 }
4689
4690
4691 kern_return_t
4692 shim_io_async_method_scalarI_scalarO(
4693 IOExternalAsyncMethod * method,
4694 IOService * object,
4695 mach_port_t asyncWakePort,
4696 io_user_reference_t * asyncReference,
4697 uint32_t asyncReferenceCount,
4698 const io_user_scalar_t * input,
4699 mach_msg_type_number_t inputCount,
4700 io_user_scalar_t * output,
4701 mach_msg_type_number_t * outputCount )
4702 {
4703 IOAsyncMethod func;
4704 uint32_t i;
4705 io_scalar_inband_t _output;
4706 IOReturn err;
4707 io_async_ref_t reference;
4708
4709 bzero(&_output[0], sizeof(_output));
4710 for (i = 0; i < asyncReferenceCount; i++) {
4711 reference[i] = REF32(asyncReference[i]);
4712 }
4713
4714 err = kIOReturnBadArgument;
4715
4716 do {
4717 if (inputCount != method->count0) {
4718 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4719 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4720 continue;
4721 }
4722 if (*outputCount != method->count1) {
4723 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4724 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4725 continue;
4726 }
4727
4728 func = method->func;
4729
4730 switch (inputCount) {
4731 case 6:
4732 err = (object->*func)( reference,
4733 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4734 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4735 break;
4736 case 5:
4737 err = (object->*func)( reference,
4738 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4739 ARG32(input[3]), ARG32(input[4]),
4740 &_output[0] );
4741 break;
4742 case 4:
4743 err = (object->*func)( reference,
4744 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4745 ARG32(input[3]),
4746 &_output[0], &_output[1] );
4747 break;
4748 case 3:
4749 err = (object->*func)( reference,
4750 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4751 &_output[0], &_output[1], &_output[2] );
4752 break;
4753 case 2:
4754 err = (object->*func)( reference,
4755 ARG32(input[0]), ARG32(input[1]),
4756 &_output[0], &_output[1], &_output[2],
4757 &_output[3] );
4758 break;
4759 case 1:
4760 err = (object->*func)( reference,
4761 ARG32(input[0]),
4762 &_output[0], &_output[1], &_output[2],
4763 &_output[3], &_output[4] );
4764 break;
4765 case 0:
4766 err = (object->*func)( reference,
4767 &_output[0], &_output[1], &_output[2],
4768 &_output[3], &_output[4], &_output[5] );
4769 break;
4770
4771 default:
4772 IOLog("%s: Bad method table\n", object->getName());
4773 }
4774 }while (false);
4775
4776 for (i = 0; i < *outputCount; i++) {
4777 output[i] = SCALAR32(_output[i]);
4778 }
4779
4780 return err;
4781 }
4782
4783
4784 /* Routine io_connect_method_scalarI_structureO */
4785 kern_return_t
4786 is_io_connect_method_scalarI_structureO(
4787 io_object_t connect,
4788 uint32_t index,
4789 io_scalar_inband_t input,
4790 mach_msg_type_number_t inputCount,
4791 io_struct_inband_t output,
4792 mach_msg_type_number_t * outputCount )
4793 {
4794 uint32_t i;
4795 io_scalar_inband64_t _input;
4796
4797 mach_msg_type_number_t scalar_outputCnt = 0;
4798 mach_vm_size_t ool_output_size = 0;
4799
4800 for (i = 0; i < inputCount; i++) {
4801 _input[i] = SCALAR64(input[i]);
4802 }
4803
4804 return is_io_connect_method(connect, index,
4805 _input, inputCount,
4806 NULL, 0,
4807 0, 0,
4808 output, outputCount,
4809 NULL, &scalar_outputCnt,
4810 0, &ool_output_size);
4811 }
4812
4813 kern_return_t
4814 shim_io_connect_method_scalarI_structureO(
4815
4816 IOExternalMethod * method,
4817 IOService * object,
4818 const io_user_scalar_t * input,
4819 mach_msg_type_number_t inputCount,
4820 io_struct_inband_t output,
4821 IOByteCount * outputCount )
4822 {
4823 IOMethod func;
4824 IOReturn err;
4825
4826 err = kIOReturnBadArgument;
4827
4828 do {
4829 if (inputCount != method->count0) {
4830 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4831 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4832 continue;
4833 }
4834 if ((kIOUCVariableStructureSize != method->count1)
4835 && (*outputCount != method->count1)) {
4836 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4837 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4838 continue;
4839 }
4840
4841 func = method->func;
4842
4843 switch (inputCount) {
4844 case 5:
4845 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4846 ARG32(input[3]), ARG32(input[4]),
4847 output );
4848 break;
4849 case 4:
4850 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4851 ARG32(input[3]),
4852 output, (void *)outputCount );
4853 break;
4854 case 3:
4855 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4856 output, (void *)outputCount, 0 );
4857 break;
4858 case 2:
4859 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4860 output, (void *)outputCount, 0, 0 );
4861 break;
4862 case 1:
4863 err = (object->*func)( ARG32(input[0]),
4864 output, (void *)outputCount, 0, 0, 0 );
4865 break;
4866 case 0:
4867 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4868 break;
4869
4870 default:
4871 IOLog("%s: Bad method table\n", object->getName());
4872 }
4873 }while (false);
4874
4875 return err;
4876 }
4877
4878
4879 kern_return_t
4880 shim_io_async_method_scalarI_structureO(
4881 IOExternalAsyncMethod * method,
4882 IOService * object,
4883 mach_port_t asyncWakePort,
4884 io_user_reference_t * asyncReference,
4885 uint32_t asyncReferenceCount,
4886 const io_user_scalar_t * input,
4887 mach_msg_type_number_t inputCount,
4888 io_struct_inband_t output,
4889 mach_msg_type_number_t * outputCount )
4890 {
4891 IOAsyncMethod func;
4892 uint32_t i;
4893 IOReturn err;
4894 io_async_ref_t reference;
4895
4896 for (i = 0; i < asyncReferenceCount; i++) {
4897 reference[i] = REF32(asyncReference[i]);
4898 }
4899
4900 err = kIOReturnBadArgument;
4901 do {
4902 if (inputCount != method->count0) {
4903 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4904 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4905 continue;
4906 }
4907 if ((kIOUCVariableStructureSize != method->count1)
4908 && (*outputCount != method->count1)) {
4909 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4910 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4911 continue;
4912 }
4913
4914 func = method->func;
4915
4916 switch (inputCount) {
4917 case 5:
4918 err = (object->*func)( reference,
4919 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4920 ARG32(input[3]), ARG32(input[4]),
4921 output );
4922 break;
4923 case 4:
4924 err = (object->*func)( reference,
4925 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4926 ARG32(input[3]),
4927 output, (void *)outputCount );
4928 break;
4929 case 3:
4930 err = (object->*func)( reference,
4931 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4932 output, (void *)outputCount, 0 );
4933 break;
4934 case 2:
4935 err = (object->*func)( reference,
4936 ARG32(input[0]), ARG32(input[1]),
4937 output, (void *)outputCount, 0, 0 );
4938 break;
4939 case 1:
4940 err = (object->*func)( reference,
4941 ARG32(input[0]),
4942 output, (void *)outputCount, 0, 0, 0 );
4943 break;
4944 case 0:
4945 err = (object->*func)( reference,
4946 output, (void *)outputCount, 0, 0, 0, 0 );
4947 break;
4948
4949 default:
4950 IOLog("%s: Bad method table\n", object->getName());
4951 }
4952 }while (false);
4953
4954 return err;
4955 }
4956
4957 /* Routine io_connect_method_scalarI_structureI */
4958 kern_return_t
4959 is_io_connect_method_scalarI_structureI(
4960 io_connect_t connect,
4961 uint32_t index,
4962 io_scalar_inband_t input,
4963 mach_msg_type_number_t inputCount,
4964 io_struct_inband_t inputStruct,
4965 mach_msg_type_number_t inputStructCount )
4966 {
4967 uint32_t i;
4968 io_scalar_inband64_t _input;
4969
4970 mach_msg_type_number_t scalar_outputCnt = 0;
4971 mach_msg_type_number_t inband_outputCnt = 0;
4972 mach_vm_size_t ool_output_size = 0;
4973
4974 for (i = 0; i < inputCount; i++) {
4975 _input[i] = SCALAR64(input[i]);
4976 }
4977
4978 return is_io_connect_method(connect, index,
4979 _input, inputCount,
4980 inputStruct, inputStructCount,
4981 0, 0,
4982 NULL, &inband_outputCnt,
4983 NULL, &scalar_outputCnt,
4984 0, &ool_output_size);
4985 }
4986
4987 kern_return_t
4988 shim_io_connect_method_scalarI_structureI(
4989 IOExternalMethod * method,
4990 IOService * object,
4991 const io_user_scalar_t * input,
4992 mach_msg_type_number_t inputCount,
4993 io_struct_inband_t inputStruct,
4994 mach_msg_type_number_t inputStructCount )
4995 {
4996 IOMethod func;
4997 IOReturn err = kIOReturnBadArgument;
4998
4999 do{
5000 if (inputCount != method->count0) {
5001 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5002 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5003 continue;
5004 }
5005 if ((kIOUCVariableStructureSize != method->count1)
5006 && (inputStructCount != method->count1)) {
5007 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5008 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5009 continue;
5010 }
5011
5012 func = method->func;
5013
5014 switch (inputCount) {
5015 case 5:
5016 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5017 ARG32(input[3]), ARG32(input[4]),
5018 inputStruct );
5019 break;
5020 case 4:
5021 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5022 ARG32(input[3]),
5023 inputStruct, (void *)(uintptr_t)inputStructCount );
5024 break;
5025 case 3:
5026 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5027 inputStruct, (void *)(uintptr_t)inputStructCount,
5028 0 );
5029 break;
5030 case 2:
5031 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5032 inputStruct, (void *)(uintptr_t)inputStructCount,
5033 0, 0 );
5034 break;
5035 case 1:
5036 err = (object->*func)( ARG32(input[0]),
5037 inputStruct, (void *)(uintptr_t)inputStructCount,
5038 0, 0, 0 );
5039 break;
5040 case 0:
5041 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5042 0, 0, 0, 0 );
5043 break;
5044
5045 default:
5046 IOLog("%s: Bad method table\n", object->getName());
5047 }
5048 }while (false);
5049
5050 return err;
5051 }
5052
5053 kern_return_t
5054 shim_io_async_method_scalarI_structureI(
5055 IOExternalAsyncMethod * method,
5056 IOService * object,
5057 mach_port_t asyncWakePort,
5058 io_user_reference_t * asyncReference,
5059 uint32_t asyncReferenceCount,
5060 const io_user_scalar_t * input,
5061 mach_msg_type_number_t inputCount,
5062 io_struct_inband_t inputStruct,
5063 mach_msg_type_number_t inputStructCount )
5064 {
5065 IOAsyncMethod func;
5066 uint32_t i;
5067 IOReturn err = kIOReturnBadArgument;
5068 io_async_ref_t reference;
5069
5070 for (i = 0; i < asyncReferenceCount; i++) {
5071 reference[i] = REF32(asyncReference[i]);
5072 }
5073
5074 do{
5075 if (inputCount != method->count0) {
5076 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5077 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5078 continue;
5079 }
5080 if ((kIOUCVariableStructureSize != method->count1)
5081 && (inputStructCount != method->count1)) {
5082 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5083 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5084 continue;
5085 }
5086
5087 func = method->func;
5088
5089 switch (inputCount) {
5090 case 5:
5091 err = (object->*func)( reference,
5092 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5093 ARG32(input[3]), ARG32(input[4]),
5094 inputStruct );
5095 break;
5096 case 4:
5097 err = (object->*func)( reference,
5098 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5099 ARG32(input[3]),
5100 inputStruct, (void *)(uintptr_t)inputStructCount );
5101 break;
5102 case 3:
5103 err = (object->*func)( reference,
5104 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5105 inputStruct, (void *)(uintptr_t)inputStructCount,
5106 0 );
5107 break;
5108 case 2:
5109 err = (object->*func)( reference,
5110 ARG32(input[0]), ARG32(input[1]),
5111 inputStruct, (void *)(uintptr_t)inputStructCount,
5112 0, 0 );
5113 break;
5114 case 1:
5115 err = (object->*func)( reference,
5116 ARG32(input[0]),
5117 inputStruct, (void *)(uintptr_t)inputStructCount,
5118 0, 0, 0 );
5119 break;
5120 case 0:
5121 err = (object->*func)( reference,
5122 inputStruct, (void *)(uintptr_t)inputStructCount,
5123 0, 0, 0, 0 );
5124 break;
5125
5126 default:
5127 IOLog("%s: Bad method table\n", object->getName());
5128 }
5129 }while (false);
5130
5131 return err;
5132 }
5133
5134 /* Routine io_connect_method_structureI_structureO */
5135 kern_return_t
5136 is_io_connect_method_structureI_structureO(
5137 io_object_t connect,
5138 uint32_t index,
5139 io_struct_inband_t input,
5140 mach_msg_type_number_t inputCount,
5141 io_struct_inband_t output,
5142 mach_msg_type_number_t * outputCount )
5143 {
5144 mach_msg_type_number_t scalar_outputCnt = 0;
5145 mach_vm_size_t ool_output_size = 0;
5146
5147 return is_io_connect_method(connect, index,
5148 NULL, 0,
5149 input, inputCount,
5150 0, 0,
5151 output, outputCount,
5152 NULL, &scalar_outputCnt,
5153 0, &ool_output_size);
5154 }
5155
5156 kern_return_t
5157 shim_io_connect_method_structureI_structureO(
5158 IOExternalMethod * method,
5159 IOService * object,
5160 io_struct_inband_t input,
5161 mach_msg_type_number_t inputCount,
5162 io_struct_inband_t output,
5163 IOByteCount * outputCount )
5164 {
5165 IOMethod func;
5166 IOReturn err = kIOReturnBadArgument;
5167
5168 do{
5169 if ((kIOUCVariableStructureSize != method->count0)
5170 && (inputCount != method->count0)) {
5171 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5172 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5173 continue;
5174 }
5175 if ((kIOUCVariableStructureSize != method->count1)
5176 && (*outputCount != method->count1)) {
5177 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5178 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5179 continue;
5180 }
5181
5182 func = method->func;
5183
5184 if (method->count1) {
5185 if (method->count0) {
5186 err = (object->*func)( input, output,
5187 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
5188 } else {
5189 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
5190 }
5191 } else {
5192 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
5193 }
5194 }while (false);
5195
5196
5197 return err;
5198 }
5199
5200 kern_return_t
5201 shim_io_async_method_structureI_structureO(
5202 IOExternalAsyncMethod * method,
5203 IOService * object,
5204 mach_port_t asyncWakePort,
5205 io_user_reference_t * asyncReference,
5206 uint32_t asyncReferenceCount,
5207 io_struct_inband_t input,
5208 mach_msg_type_number_t inputCount,
5209 io_struct_inband_t output,
5210 mach_msg_type_number_t * outputCount )
5211 {
5212 IOAsyncMethod func;
5213 uint32_t i;
5214 IOReturn err;
5215 io_async_ref_t reference;
5216
5217 for (i = 0; i < asyncReferenceCount; i++) {
5218 reference[i] = REF32(asyncReference[i]);
5219 }
5220
5221 err = kIOReturnBadArgument;
5222 do{
5223 if ((kIOUCVariableStructureSize != method->count0)
5224 && (inputCount != method->count0)) {
5225 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5226 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5227 continue;
5228 }
5229 if ((kIOUCVariableStructureSize != method->count1)
5230 && (*outputCount != method->count1)) {
5231 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5232 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5233 continue;
5234 }
5235
5236 func = method->func;
5237
5238 if (method->count1) {
5239 if (method->count0) {
5240 err = (object->*func)( reference,
5241 input, output,
5242 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
5243 } else {
5244 err = (object->*func)( reference,
5245 output, outputCount, 0, 0, 0, 0 );
5246 }
5247 } else {
5248 err = (object->*func)( reference,
5249 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
5250 }
5251 }while (false);
5252
5253 return err;
5254 }
5255
5256 #if !NO_KEXTD
5257 bool gIOKextdClearedBusy = false;
5258 #endif
5259
5260 /* Routine io_catalog_send_data */
5261 kern_return_t
5262 is_io_catalog_send_data(
5263 mach_port_t master_port,
5264 uint32_t flag,
5265 io_buf_ptr_t inData,
5266 mach_msg_type_number_t inDataCount,
5267 kern_return_t * result)
5268 {
5269 #if NO_KEXTD
5270 return kIOReturnNotPrivileged;
5271 #else /* NO_KEXTD */
5272 OSObject * obj = 0;
5273 vm_offset_t data;
5274 kern_return_t kr = kIOReturnError;
5275
5276 //printf("io_catalog_send_data called. flag: %d\n", flag);
5277
5278 if (master_port != master_device_port) {
5279 return kIOReturnNotPrivileged;
5280 }
5281
5282 if ((flag != kIOCatalogRemoveKernelLinker &&
5283 flag != kIOCatalogKextdActive &&
5284 flag != kIOCatalogKextdFinishedLaunching) &&
5285 (!inData || !inDataCount)) {
5286 return kIOReturnBadArgument;
5287 }
5288
5289 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management")) {
5290 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5291 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5292 OSSafeReleaseNULL(taskName);
5293 // For now, fake success to not break applications relying on this function succeeding.
5294 // See <rdar://problem/32554970> for more details.
5295 return kIOReturnSuccess;
5296 }
5297
5298 if (inData) {
5299 vm_map_offset_t map_data;
5300
5301 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5302 return kIOReturnMessageTooLarge;
5303 }
5304
5305 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5306 data = CAST_DOWN(vm_offset_t, map_data);
5307
5308 if (kr != KERN_SUCCESS) {
5309 return kr;
5310 }
5311
5312 // must return success after vm_map_copyout() succeeds
5313
5314 if (inDataCount) {
5315 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5316 vm_deallocate( kernel_map, data, inDataCount );
5317 if (!obj) {
5318 *result = kIOReturnNoMemory;
5319 return KERN_SUCCESS;
5320 }
5321 }
5322 }
5323
5324 switch (flag) {
5325 case kIOCatalogResetDrivers:
5326 case kIOCatalogResetDriversNoMatch: {
5327 OSArray * array;
5328
5329 array = OSDynamicCast(OSArray, obj);
5330 if (array) {
5331 if (!gIOCatalogue->resetAndAddDrivers(array,
5332 flag == kIOCatalogResetDrivers)) {
5333 kr = kIOReturnError;
5334 }
5335 } else {
5336 kr = kIOReturnBadArgument;
5337 }
5338 }
5339 break;
5340
5341 case kIOCatalogAddDrivers:
5342 case kIOCatalogAddDriversNoMatch: {
5343 OSArray * array;
5344
5345 array = OSDynamicCast(OSArray, obj);
5346 if (array) {
5347 if (!gIOCatalogue->addDrivers( array,
5348 flag == kIOCatalogAddDrivers)) {
5349 kr = kIOReturnError;
5350 }
5351 } else {
5352 kr = kIOReturnBadArgument;
5353 }
5354 }
5355 break;
5356
5357 case kIOCatalogRemoveDrivers:
5358 case kIOCatalogRemoveDriversNoMatch: {
5359 OSDictionary * dict;
5360
5361 dict = OSDynamicCast(OSDictionary, obj);
5362 if (dict) {
5363 if (!gIOCatalogue->removeDrivers( dict,
5364 flag == kIOCatalogRemoveDrivers )) {
5365 kr = kIOReturnError;
5366 }
5367 } else {
5368 kr = kIOReturnBadArgument;
5369 }
5370 }
5371 break;
5372
5373 case kIOCatalogStartMatching: {
5374 OSDictionary * dict;
5375
5376 dict = OSDynamicCast(OSDictionary, obj);
5377 if (dict) {
5378 if (!gIOCatalogue->startMatching( dict )) {
5379 kr = kIOReturnError;
5380 }
5381 } else {
5382 kr = kIOReturnBadArgument;
5383 }
5384 }
5385 break;
5386
5387 case kIOCatalogRemoveKernelLinker:
5388 kr = KERN_NOT_SUPPORTED;
5389 break;
5390
5391 case kIOCatalogKextdActive:
5392 #if !NO_KEXTD
5393 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5394 OSKext::setKextdActive();
5395
5396 /* Dump all nonloaded startup extensions; kextd will now send them
5397 * down on request.
5398 */
5399 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5400 #endif
5401 kr = kIOReturnSuccess;
5402 break;
5403
5404 case kIOCatalogKextdFinishedLaunching: {
5405 #if !NO_KEXTD
5406 if (!gIOKextdClearedBusy) {
5407 IOService * serviceRoot = IOService::getServiceRoot();
5408 if (serviceRoot) {
5409 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
5410 serviceRoot->adjustBusy(-1);
5411 gIOKextdClearedBusy = true;
5412 }
5413 }
5414 #endif
5415 kr = kIOReturnSuccess;
5416 }
5417 break;
5418
5419 default:
5420 kr = kIOReturnBadArgument;
5421 break;
5422 }
5423
5424 if (obj) {
5425 obj->release();
5426 }
5427
5428 *result = kr;
5429 return KERN_SUCCESS;
5430 #endif /* NO_KEXTD */
5431 }
5432
5433 /* Routine io_catalog_terminate */
5434 kern_return_t
5435 is_io_catalog_terminate(
5436 mach_port_t master_port,
5437 uint32_t flag,
5438 io_name_t name )
5439 {
5440 kern_return_t kr;
5441
5442 if (master_port != master_device_port) {
5443 return kIOReturnNotPrivileged;
5444 }
5445
5446 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
5447 kIOClientPrivilegeAdministrator );
5448 if (kIOReturnSuccess != kr) {
5449 return kr;
5450 }
5451
5452 switch (flag) {
5453 #if !defined(SECURE_KERNEL)
5454 case kIOCatalogServiceTerminate:
5455 OSIterator * iter;
5456 IOService * service;
5457
5458 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5459 kIORegistryIterateRecursively);
5460 if (!iter) {
5461 return kIOReturnNoMemory;
5462 }
5463
5464 do {
5465 iter->reset();
5466 while ((service = (IOService *)iter->getNextObject())) {
5467 if (service->metaCast(name)) {
5468 if (!service->terminate( kIOServiceRequired
5469 | kIOServiceSynchronous)) {
5470 kr = kIOReturnUnsupported;
5471 break;
5472 }
5473 }
5474 }
5475 } while (!service && !iter->isValid());
5476 iter->release();
5477 break;
5478
5479 case kIOCatalogModuleUnload:
5480 case kIOCatalogModuleTerminate:
5481 kr = gIOCatalogue->terminateDriversForModule(name,
5482 flag == kIOCatalogModuleUnload);
5483 break;
5484 #endif
5485
5486 default:
5487 kr = kIOReturnBadArgument;
5488 break;
5489 }
5490
5491 return kr;
5492 }
5493
5494 /* Routine io_catalog_get_data */
5495 kern_return_t
5496 is_io_catalog_get_data(
5497 mach_port_t master_port,
5498 uint32_t flag,
5499 io_buf_ptr_t *outData,
5500 mach_msg_type_number_t *outDataCount)
5501 {
5502 kern_return_t kr = kIOReturnSuccess;
5503 OSSerialize * s;
5504
5505 if (master_port != master_device_port) {
5506 return kIOReturnNotPrivileged;
5507 }
5508
5509 //printf("io_catalog_get_data called. flag: %d\n", flag);
5510
5511 s = OSSerialize::withCapacity(4096);
5512 if (!s) {
5513 return kIOReturnNoMemory;
5514 }
5515
5516 kr = gIOCatalogue->serializeData(flag, s);
5517
5518 if (kr == kIOReturnSuccess) {
5519 vm_offset_t data;
5520 vm_map_copy_t copy;
5521 vm_size_t size;
5522
5523 size = s->getLength();
5524 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5525 if (kr == kIOReturnSuccess) {
5526 bcopy(s->text(), (void *)data, size);
5527 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5528 (vm_map_size_t)size, true, &copy);
5529 *outData = (char *)copy;
5530 *outDataCount = size;
5531 }
5532 }
5533
5534 s->release();
5535
5536 return kr;
5537 }
5538
5539 /* Routine io_catalog_get_gen_count */
5540 kern_return_t
5541 is_io_catalog_get_gen_count(
5542 mach_port_t master_port,
5543 uint32_t *genCount)
5544 {
5545 if (master_port != master_device_port) {
5546 return kIOReturnNotPrivileged;
5547 }
5548
5549 //printf("io_catalog_get_gen_count called.\n");
5550
5551 if (!genCount) {
5552 return kIOReturnBadArgument;
5553 }
5554
5555 *genCount = gIOCatalogue->getGenerationCount();
5556
5557 return kIOReturnSuccess;
5558 }
5559
5560 /* Routine io_catalog_module_loaded.
5561 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5562 */
5563 kern_return_t
5564 is_io_catalog_module_loaded(
5565 mach_port_t master_port,
5566 io_name_t name)
5567 {
5568 if (master_port != master_device_port) {
5569 return kIOReturnNotPrivileged;
5570 }
5571
5572 //printf("io_catalog_module_loaded called. name %s\n", name);
5573
5574 if (!name) {
5575 return kIOReturnBadArgument;
5576 }
5577
5578 gIOCatalogue->moduleHasLoaded(name);
5579
5580 return kIOReturnSuccess;
5581 }
5582
5583 kern_return_t
5584 is_io_catalog_reset(
5585 mach_port_t master_port,
5586 uint32_t flag)
5587 {
5588 if (master_port != master_device_port) {
5589 return kIOReturnNotPrivileged;
5590 }
5591
5592 switch (flag) {
5593 case kIOCatalogResetDefault:
5594 gIOCatalogue->reset();
5595 break;
5596
5597 default:
5598 return kIOReturnBadArgument;
5599 }
5600
5601 return kIOReturnSuccess;
5602 }
5603
5604 kern_return_t
5605 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5606 {
5607 kern_return_t result = kIOReturnBadArgument;
5608 IOUserClient *userClient;
5609
5610 if ((userClient = OSDynamicCast(IOUserClient,
5611 iokit_lookup_connect_ref_current_task((mach_port_name_t)(uintptr_t)args->userClientRef)))) {
5612 IOExternalTrap *trap;
5613 IOService *target = NULL;
5614
5615 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5616
5617 if (trap && target) {
5618 IOTrap func;
5619
5620 func = trap->func;
5621
5622 if (func) {
5623 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5624 }
5625 }
5626
5627 iokit_remove_connect_reference(userClient);
5628 }
5629
5630 return result;
5631 }
5632
5633 /* Routine io_device_tree_entry_exists_with_name */
5634 kern_return_t
5635 is_io_device_tree_entry_exists_with_name(
5636 mach_port_t master_port,
5637 io_name_t name,
5638 boolean_t *exists )
5639 {
5640 OSCollectionIterator *iter;
5641
5642 if (master_port != master_device_port) {
5643 return kIOReturnNotPrivileged;
5644 }
5645
5646 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
5647 *exists = iter && iter->getNextObject();
5648 OSSafeReleaseNULL(iter);
5649
5650 return kIOReturnSuccess;
5651 }
5652 } /* extern "C" */
5653
5654 IOReturn
5655 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5656 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5657 {
5658 IOReturn err;
5659 IOService * object;
5660 IOByteCount structureOutputSize;
5661
5662 if (dispatch) {
5663 uint32_t count;
5664 count = dispatch->checkScalarInputCount;
5665 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
5666 return kIOReturnBadArgument;
5667 }
5668
5669 count = dispatch->checkStructureInputSize;
5670 if ((kIOUCVariableStructureSize != count)
5671 && (count != ((args->structureInputDescriptor)
5672 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
5673 return kIOReturnBadArgument;
5674 }
5675
5676 count = dispatch->checkScalarOutputCount;
5677 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
5678 return kIOReturnBadArgument;
5679 }
5680
5681 count = dispatch->checkStructureOutputSize;
5682 if ((kIOUCVariableStructureSize != count)
5683 && (count != ((args->structureOutputDescriptor)
5684 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
5685 return kIOReturnBadArgument;
5686 }
5687
5688 if (dispatch->function) {
5689 err = (*dispatch->function)(target, reference, args);
5690 } else {
5691 err = kIOReturnNoCompletion; /* implementator can dispatch */
5692 }
5693 return err;
5694 }
5695
5696
5697 // pre-Leopard API's don't do ool structs
5698 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
5699 err = kIOReturnIPCError;
5700 return err;
5701 }
5702
5703 structureOutputSize = args->structureOutputSize;
5704
5705 if (args->asyncWakePort) {
5706 IOExternalAsyncMethod * method;
5707 object = 0;
5708 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
5709 return kIOReturnUnsupported;
5710 }
5711
5712 if (kIOUCForegroundOnly & method->flags) {
5713 if (task_is_gpu_denied(current_task())) {
5714 return kIOReturnNotPermitted;
5715 }
5716 }
5717
5718 switch (method->flags & kIOUCTypeMask) {
5719 case kIOUCScalarIStructI:
5720 err = shim_io_async_method_scalarI_structureI( method, object,
5721 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5722 args->scalarInput, args->scalarInputCount,
5723 (char *)args->structureInput, args->structureInputSize );
5724 break;
5725
5726 case kIOUCScalarIScalarO:
5727 err = shim_io_async_method_scalarI_scalarO( method, object,
5728 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5729 args->scalarInput, args->scalarInputCount,
5730 args->scalarOutput, &args->scalarOutputCount );
5731 break;
5732
5733 case kIOUCScalarIStructO:
5734 err = shim_io_async_method_scalarI_structureO( method, object,
5735 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5736 args->scalarInput, args->scalarInputCount,
5737 (char *) args->structureOutput, &args->structureOutputSize );
5738 break;
5739
5740
5741 case kIOUCStructIStructO:
5742 err = shim_io_async_method_structureI_structureO( method, object,
5743 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5744 (char *)args->structureInput, args->structureInputSize,
5745 (char *) args->structureOutput, &args->structureOutputSize );
5746 break;
5747
5748 default:
5749 err = kIOReturnBadArgument;
5750 break;
5751 }
5752 } else {
5753 IOExternalMethod * method;
5754 object = 0;
5755 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
5756 return kIOReturnUnsupported;
5757 }
5758
5759 if (kIOUCForegroundOnly & method->flags) {
5760 if (task_is_gpu_denied(current_task())) {
5761 return kIOReturnNotPermitted;
5762 }
5763 }
5764
5765 switch (method->flags & kIOUCTypeMask) {
5766 case kIOUCScalarIStructI:
5767 err = shim_io_connect_method_scalarI_structureI( method, object,
5768 args->scalarInput, args->scalarInputCount,
5769 (char *) args->structureInput, args->structureInputSize );
5770 break;
5771
5772 case kIOUCScalarIScalarO:
5773 err = shim_io_connect_method_scalarI_scalarO( method, object,
5774 args->scalarInput, args->scalarInputCount,
5775 args->scalarOutput, &args->scalarOutputCount );
5776 break;
5777
5778 case kIOUCScalarIStructO:
5779 err = shim_io_connect_method_scalarI_structureO( method, object,
5780 args->scalarInput, args->scalarInputCount,
5781 (char *) args->structureOutput, &structureOutputSize );
5782 break;
5783
5784
5785 case kIOUCStructIStructO:
5786 err = shim_io_connect_method_structureI_structureO( method, object,
5787 (char *) args->structureInput, args->structureInputSize,
5788 (char *) args->structureOutput, &structureOutputSize );
5789 break;
5790
5791 default:
5792 err = kIOReturnBadArgument;
5793 break;
5794 }
5795 }
5796
5797 args->structureOutputSize = structureOutputSize;
5798
5799 return err;
5800 }
5801
5802 #if __LP64__
5803 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5804 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5805 #else
5806 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5807 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5808 #endif
5809 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5810 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5811 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5812 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5813 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5814 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5815 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5816 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5817 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5818 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5819 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5820 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5821 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5822 OSMetaClassDefineReservedUnused(IOUserClient, 15);