]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
6d1edda91f769008692375811c3bc3b100da0243
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <libkern/c++/OSSharedPtr.h>
32 #include <IOKit/IOKitServer.h>
33 #include <IOKit/IOKitKeysPrivate.h>
34 #include <IOKit/IOUserClient.h>
35 #include <IOKit/IOService.h>
36 #include <IOKit/IORegistryEntry.h>
37 #include <IOKit/IOCatalogue.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOBSD.h>
42 #include <IOKit/IOStatisticsPrivate.h>
43 #include <IOKit/IOTimeStamp.h>
44 #include <IOKit/IODeviceTreeSupport.h>
45 #include <IOKit/IOUserServer.h>
46 #include <IOKit/system.h>
47 #include <libkern/OSDebug.h>
48 #include <DriverKit/OSAction.h>
49 #include <sys/proc.h>
50 #include <sys/kauth.h>
51 #include <sys/codesign.h>
52
53 #include <mach/sdt.h>
54 #include <os/hash.h>
55
56 #if CONFIG_MACF
57
58 extern "C" {
59 #include <security/mac_framework.h>
60 };
61 #include <sys/kauth.h>
62
63 #define IOMACF_LOG 0
64
65 #endif /* CONFIG_MACF */
66
67 #include <IOKit/assert.h>
68
69 #include "IOServicePrivate.h"
70 #include "IOKitKernelInternal.h"
71
72 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
73 #define SCALAR32(x) ((uint32_t )x)
74 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
75 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
76 #define REF32(x) ((int)(x))
77
78 enum{
79 kIOUCAsync0Flags = 3ULL,
80 kIOUCAsync64Flag = 1ULL,
81 kIOUCAsyncErrorLoggedFlag = 2ULL
82 };
83
84 #if IOKITSTATS
85
86 #define IOStatisticsRegisterCounter() \
87 do { \
88 reserved->counter = IOStatistics::registerUserClient(this); \
89 } while (0)
90
91 #define IOStatisticsUnregisterCounter() \
92 do { \
93 if (reserved) \
94 IOStatistics::unregisterUserClient(reserved->counter); \
95 } while (0)
96
97 #define IOStatisticsClientCall() \
98 do { \
99 IOStatistics::countUserClientCall(client); \
100 } while (0)
101
102 #else
103
104 #define IOStatisticsRegisterCounter()
105 #define IOStatisticsUnregisterCounter()
106 #define IOStatisticsClientCall()
107
108 #endif /* IOKITSTATS */
109
110 #if DEVELOPMENT || DEBUG
111
112 #define FAKE_STACK_FRAME(a) \
113 const void ** __frameptr; \
114 const void * __retaddr; \
115 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
116 __retaddr = __frameptr[1]; \
117 __frameptr[1] = (a);
118
119 #define FAKE_STACK_FRAME_END() \
120 __frameptr[1] = __retaddr;
121
122 #else /* DEVELOPMENT || DEBUG */
123
124 #define FAKE_STACK_FRAME(a)
125 #define FAKE_STACK_FRAME_END()
126
127 #endif /* DEVELOPMENT || DEBUG */
128
129 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
130 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
131
132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
133
134 extern "C" {
135 #include <mach/mach_traps.h>
136 #include <vm/vm_map.h>
137 } /* extern "C" */
138
139 struct IOMachPortHashList;
140
141 static_assert(IKOT_MAX_TYPE <= 255);
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146 class IOMachPort : public OSObject
147 {
148 OSDeclareDefaultStructors(IOMachPort);
149 public:
150 SLIST_ENTRY(IOMachPort) link;
151 ipc_port_t port;
152 OSObject* object;
153 UInt32 mscount;
154 UInt8 holdDestroy;
155 UInt8 type;
156
157 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
158
159 static IOMachPortHashList* bucketForObject(OSObject *obj,
160 ipc_kobject_type_t type);
161
162 static IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
163
164 static bool noMoreSendersForObject( OSObject * obj,
165 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
166 static void releasePortForObject( OSObject * obj,
167 ipc_kobject_type_t type );
168 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
169
170 static mach_port_name_t makeSendRightForTask( task_t task,
171 io_object_t obj, ipc_kobject_type_t type );
172
173 virtual void free() APPLE_KEXT_OVERRIDE;
174 };
175
176 #define super OSObject
177 OSDefineMetaClassAndStructorsWithZone(IOMachPort, OSObject, ZC_ZFREE_CLEARMEM)
178
179 static IOLock * gIOObjectPortLock;
180 IOLock * gIOUserServerLock;
181
182 SECURITY_READ_ONLY_LATE(const struct io_filter_callbacks *) gIOUCFilterCallbacks;
183
184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
185
186 SLIST_HEAD(IOMachPortHashList, IOMachPort);
187
188 #if defined(XNU_TARGET_OS_OSX)
189 #define PORT_HASH_SIZE 4096
190 #else /* defined(!XNU_TARGET_OS_OSX) */
191 #define PORT_HASH_SIZE 256
192 #endif /* !defined(!XNU_TARGET_OS_OSX) */
193
194 IOMachPortHashList gIOMachPortHash[PORT_HASH_SIZE];
195
196 void
197 IOMachPortInitialize(void)
198 {
199 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
200 SLIST_INIT(&gIOMachPortHash[i]);
201 }
202 }
203
204 IOMachPortHashList*
205 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
206 {
207 return &gIOMachPortHash[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
208 }
209
210 IOMachPort*
211 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
212 {
213 IOMachPort *machPort;
214
215 SLIST_FOREACH(machPort, bucket, link) {
216 if (machPort->object == obj && machPort->type == type) {
217 return machPort;
218 }
219 }
220 return NULL;
221 }
222
223 IOMachPort*
224 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
225 {
226 IOMachPort *machPort = NULL;
227
228 machPort = new IOMachPort;
229 if (__improbable(machPort && !machPort->init())) {
230 return NULL;
231 }
232
233 machPort->object = obj;
234 machPort->type = (typeof(machPort->type))type;
235 machPort->port = iokit_alloc_object_port(obj, type);
236
237 obj->taggedRetain(OSTypeID(OSCollection));
238 machPort->mscount++;
239
240 return machPort;
241 }
242
243 bool
244 IOMachPort::noMoreSendersForObject( OSObject * obj,
245 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
246 {
247 IOMachPort *machPort = NULL;
248 IOUserClient *uc;
249 OSAction *action;
250 bool destroyed = true;
251
252 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
253
254 obj->retain();
255
256 lck_mtx_lock(gIOObjectPortLock);
257
258 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
259
260 if (machPort) {
261 destroyed = (machPort->mscount <= *mscount);
262 if (!destroyed) {
263 *mscount = machPort->mscount;
264 lck_mtx_unlock(gIOObjectPortLock);
265 } else {
266 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
267 uc->noMoreSenders();
268 }
269 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
270
271 lck_mtx_unlock(gIOObjectPortLock);
272
273 machPort->release();
274 obj->taggedRelease(OSTypeID(OSCollection));
275 }
276 } else {
277 lck_mtx_unlock(gIOObjectPortLock);
278 }
279
280 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
281 action->Aborted();
282 }
283
284 obj->release();
285
286 return destroyed;
287 }
288
289 void
290 IOMachPort::releasePortForObject( OSObject * obj,
291 ipc_kobject_type_t type )
292 {
293 IOMachPort *machPort;
294 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
295
296 assert(IKOT_IOKIT_CONNECT != type);
297
298 lck_mtx_lock(gIOObjectPortLock);
299
300 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
301
302 if (machPort && !machPort->holdDestroy) {
303 obj->retain();
304 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
305
306 lck_mtx_unlock(gIOObjectPortLock);
307
308 machPort->release();
309 obj->taggedRelease(OSTypeID(OSCollection));
310 obj->release();
311 } else {
312 lck_mtx_unlock(gIOObjectPortLock);
313 }
314 }
315
316 void
317 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
318 {
319 IOMachPort * machPort;
320
321 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
322 lck_mtx_lock(gIOObjectPortLock);
323
324 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
325
326 if (machPort) {
327 machPort->holdDestroy = true;
328 }
329
330 lck_mtx_unlock(gIOObjectPortLock);
331 }
332
333 void
334 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
335 {
336 IOMachPort::releasePortForObject(obj, type);
337 }
338
339 void
340 IOUserClient::destroyUserReferences( OSObject * obj )
341 {
342 IOMachPort *machPort;
343
344 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
345
346 // panther, 3160200
347 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348
349 obj->retain();
350 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
351 IOMachPortHashList *mappingBucket = NULL;
352
353 lck_mtx_lock(gIOObjectPortLock);
354
355 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
356 if (uc && uc->mappings) {
357 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
358 }
359
360 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
361
362 if (machPort == NULL) {
363 lck_mtx_unlock(gIOObjectPortLock);
364 goto end;
365 }
366
367 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
368 obj->taggedRelease(OSTypeID(OSCollection));
369
370 if (uc) {
371 uc->noMoreSenders();
372 if (uc->mappings) {
373 uc->mappings->taggedRetain(OSTypeID(OSCollection));
374 machPort->object = uc->mappings;
375 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
376 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
377
378 lck_mtx_unlock(gIOObjectPortLock);
379
380 uc->mappings->release();
381 uc->mappings = NULL;
382 } else {
383 lck_mtx_unlock(gIOObjectPortLock);
384 machPort->release();
385 }
386 } else {
387 lck_mtx_unlock(gIOObjectPortLock);
388 machPort->release();
389 }
390
391
392 end:
393
394 obj->release();
395 }
396
397 mach_port_name_t
398 IOMachPort::makeSendRightForTask( task_t task,
399 io_object_t obj, ipc_kobject_type_t type )
400 {
401 return iokit_make_send_right( task, obj, type );
402 }
403
404 void
405 IOMachPort::free( void )
406 {
407 if (port) {
408 iokit_destroy_object_port( port );
409 }
410 super::free();
411 }
412
413 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
414
415 static bool
416 IOTaskRegistryCompatibility(task_t task)
417 {
418 return false;
419 }
420
421 static void
422 IOTaskRegistryCompatibilityMatching(task_t task, OSDictionary * matching)
423 {
424 if (!IOTaskRegistryCompatibility(task)) {
425 return;
426 }
427 matching->setObject(gIOCompatibilityMatchKey, kOSBooleanTrue);
428 }
429
430 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
431
432 class IOUserIterator : public OSIterator
433 {
434 OSDeclareDefaultStructors(IOUserIterator);
435 public:
436 OSObject * userIteratorObject;
437 IOLock * lock;
438
439 static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
440 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
441 virtual void free() APPLE_KEXT_OVERRIDE;
442
443 virtual void reset() APPLE_KEXT_OVERRIDE;
444 virtual bool isValid() APPLE_KEXT_OVERRIDE;
445 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
446 virtual OSObject * copyNextObject();
447 };
448
449 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
450
451 class IOUserNotification : public IOUserIterator
452 {
453 OSDeclareDefaultStructors(IOUserNotification);
454
455 #define holdNotify userIteratorObject
456
457 public:
458
459 virtual void free() APPLE_KEXT_OVERRIDE;
460
461 virtual void setNotification( IONotifier * obj );
462
463 virtual void reset() APPLE_KEXT_OVERRIDE;
464 virtual bool isValid() APPLE_KEXT_OVERRIDE;
465 };
466
467 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
468
469 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
470
471 IOUserIterator *
472 IOUserIterator::withIterator(OSIterator * iter)
473 {
474 IOUserIterator * me;
475
476 if (!iter) {
477 return NULL;
478 }
479
480 me = new IOUserIterator;
481 if (me && !me->init()) {
482 me->release();
483 me = NULL;
484 }
485 if (!me) {
486 return me;
487 }
488 me->userIteratorObject = iter;
489
490 return me;
491 }
492
493 bool
494 IOUserIterator::init( void )
495 {
496 if (!OSObject::init()) {
497 return false;
498 }
499
500 lock = IOLockAlloc();
501 if (!lock) {
502 return false;
503 }
504
505 return true;
506 }
507
508 void
509 IOUserIterator::free()
510 {
511 if (userIteratorObject) {
512 userIteratorObject->release();
513 }
514 if (lock) {
515 IOLockFree(lock);
516 }
517 OSObject::free();
518 }
519
520 void
521 IOUserIterator::reset()
522 {
523 IOLockLock(lock);
524 assert(OSDynamicCast(OSIterator, userIteratorObject));
525 ((OSIterator *)userIteratorObject)->reset();
526 IOLockUnlock(lock);
527 }
528
529 bool
530 IOUserIterator::isValid()
531 {
532 bool ret;
533
534 IOLockLock(lock);
535 assert(OSDynamicCast(OSIterator, userIteratorObject));
536 ret = ((OSIterator *)userIteratorObject)->isValid();
537 IOLockUnlock(lock);
538
539 return ret;
540 }
541
542 OSObject *
543 IOUserIterator::getNextObject()
544 {
545 assert(false);
546 return NULL;
547 }
548
549 OSObject *
550 IOUserIterator::copyNextObject()
551 {
552 OSObject * ret = NULL;
553
554 IOLockLock(lock);
555 if (userIteratorObject) {
556 ret = ((OSIterator *)userIteratorObject)->getNextObject();
557 if (ret) {
558 ret->retain();
559 }
560 }
561 IOLockUnlock(lock);
562
563 return ret;
564 }
565
566 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
567 extern "C" {
568 // functions called from osfmk/device/iokit_rpc.c
569
570 void
571 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
572 {
573 IORegistryEntry * regEntry;
574 IOUserNotification * __unused noti;
575 _IOServiceNotifier * __unused serviceNoti;
576 OSSerialize * __unused s;
577
578 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
579 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
580 #if DEVELOPMENT || DEBUG
581 } else if ((noti = OSDynamicCast(IOUserNotification, obj))
582 && ((serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->holdNotify)))) {
583 s = OSSerialize::withCapacity((unsigned int) page_size);
584 if (s && serviceNoti->matching->serialize(s)) {
585 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
586 }
587 OSSafeReleaseNULL(s);
588 #endif /* DEVELOPMENT || DEBUG */
589 } else {
590 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
591 }
592 }
593
594 // FIXME: Implementation of these functions are hidden from the static analyzer.
595 // As for now, the analyzer doesn't consistently support wrapper functions
596 // for retain and release.
597 #ifndef __clang_analyzer__
598 void
599 iokit_add_reference( io_object_t obj, natural_t type )
600 {
601 IOUserClient * uc;
602
603 if (!obj) {
604 return;
605 }
606
607 if ((IKOT_IOKIT_CONNECT == type)
608 && (uc = OSDynamicCast(IOUserClient, obj))) {
609 OSIncrementAtomic(&uc->__ipc);
610 }
611
612 obj->retain();
613 }
614
615 void
616 iokit_remove_reference( io_object_t obj )
617 {
618 if (obj) {
619 obj->release();
620 }
621 }
622 #endif // __clang_analyzer__
623
624 void
625 iokit_remove_connect_reference( io_object_t obj )
626 {
627 IOUserClient * uc;
628 bool finalize = false;
629
630 if (!obj) {
631 return;
632 }
633
634 if ((uc = OSDynamicCast(IOUserClient, obj))) {
635 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
636 IOLockLock(gIOObjectPortLock);
637 if ((finalize = uc->__ipcFinal)) {
638 uc->__ipcFinal = false;
639 }
640 IOLockUnlock(gIOObjectPortLock);
641 }
642 if (finalize) {
643 uc->scheduleFinalize(true);
644 }
645 }
646
647 obj->release();
648 }
649
650 bool
651 IOUserClient::finalizeUserReferences(OSObject * obj)
652 {
653 IOUserClient * uc;
654 bool ok = true;
655
656 if ((uc = OSDynamicCast(IOUserClient, obj))) {
657 IOLockLock(gIOObjectPortLock);
658 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
659 ok = false;
660 }
661 IOLockUnlock(gIOObjectPortLock);
662 }
663 return ok;
664 }
665
666 ipc_port_t
667 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
668 {
669 IOMachPort *machPort = NULL;
670 ipc_port_t port = NULL;
671
672 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
673
674 lck_mtx_lock(gIOObjectPortLock);
675
676 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
677
678 if (__improbable(machPort == NULL)) {
679 machPort = IOMachPort::withObjectAndType(obj, type);
680 if (__improbable(machPort == NULL)) {
681 goto end;
682 }
683 SLIST_INSERT_HEAD(bucket, machPort, link);
684 } else {
685 machPort->mscount++;
686 }
687
688 iokit_retain_port(machPort->port);
689 port = machPort->port;
690
691 end:
692 lck_mtx_unlock(gIOObjectPortLock);
693
694 return port;
695 }
696
697 kern_return_t
698 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
699 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
700 {
701 IOUserClient * client;
702 IOMemoryMap * map;
703 IOUserNotification * notify;
704 IOUserServerCheckInToken * token;
705
706 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
707 return kIOReturnNotReady;
708 }
709
710 switch (type) {
711 case IKOT_IOKIT_CONNECT:
712 if ((client = OSDynamicCast( IOUserClient, obj ))) {
713 IOStatisticsClientCall();
714 IORWLockWrite(client->lock);
715 client->clientDied();
716 IORWLockUnlock(client->lock);
717 }
718 break;
719 case IKOT_IOKIT_OBJECT:
720 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
721 map->taskDied();
722 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
723 notify->setNotification( NULL );
724 }
725 break;
726 case IKOT_IOKIT_IDENT:
727 if ((token = OSDynamicCast( IOUserServerCheckInToken, obj ))) {
728 IOUserServerCheckInToken::notifyNoSenders( token );
729 }
730 break;
731 }
732
733 return kIOReturnSuccess;
734 }
735 }; /* extern "C" */
736
737 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
738
739 class IOServiceUserNotification : public IOUserNotification
740 {
741 OSDeclareDefaultStructors(IOServiceUserNotification);
742
743 struct PingMsg {
744 mach_msg_header_t msgHdr;
745 OSNotificationHeader64 notifyHeader;
746 };
747
748 enum { kMaxOutstanding = 1024 };
749
750 PingMsg * pingMsg;
751 mach_msg_size_t msgSize;
752 OSArray * newSet;
753 bool armed;
754 bool ipcLogged;
755
756 public:
757
758 virtual bool init( mach_port_t port, natural_t type,
759 void * reference, vm_size_t referenceSize,
760 bool clientIs64 );
761 virtual void free() APPLE_KEXT_OVERRIDE;
762 void invalidatePort(void);
763
764 static bool _handler( void * target,
765 void * ref, IOService * newService, IONotifier * notifier );
766 virtual bool handler( void * ref, IOService * newService );
767
768 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
769 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
770 };
771
772 class IOServiceMessageUserNotification : public IOUserNotification
773 {
774 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
775
776 struct PingMsg {
777 mach_msg_header_t msgHdr;
778 mach_msg_body_t msgBody;
779 mach_msg_port_descriptor_t ports[1];
780 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
781 };
782
783 PingMsg * pingMsg;
784 mach_msg_size_t msgSize;
785 uint8_t clientIs64;
786 int owningPID;
787 bool ipcLogged;
788
789 public:
790
791 virtual bool init( mach_port_t port, natural_t type,
792 void * reference, vm_size_t referenceSize,
793 mach_msg_size_t extraSize,
794 bool clientIs64 );
795
796 virtual void free() APPLE_KEXT_OVERRIDE;
797 void invalidatePort(void);
798
799 static IOReturn _handler( void * target, void * ref,
800 UInt32 messageType, IOService * provider,
801 void * messageArgument, vm_size_t argSize );
802 virtual IOReturn handler( void * ref,
803 UInt32 messageType, IOService * provider,
804 void * messageArgument, vm_size_t argSize );
805
806 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
807 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
808 };
809
810 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
811
812 #undef super
813 #define super IOUserIterator
814 OSDefineMetaClass( IOUserNotification, IOUserIterator );
815 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
816
817 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
818
819 void
820 IOUserNotification::free( void )
821 {
822 if (holdNotify) {
823 assert(OSDynamicCast(IONotifier, holdNotify));
824 ((IONotifier *)holdNotify)->remove();
825 holdNotify = NULL;
826 }
827 // can't be in handler now
828
829 super::free();
830 }
831
832
833 void
834 IOUserNotification::setNotification( IONotifier * notify )
835 {
836 OSObject * previousNotify;
837
838 IOLockLock( gIOObjectPortLock);
839
840 previousNotify = holdNotify;
841 holdNotify = notify;
842
843 IOLockUnlock( gIOObjectPortLock);
844
845 if (previousNotify) {
846 assert(OSDynamicCast(IONotifier, previousNotify));
847 ((IONotifier *)previousNotify)->remove();
848 }
849 }
850
851 void
852 IOUserNotification::reset()
853 {
854 // ?
855 }
856
857 bool
858 IOUserNotification::isValid()
859 {
860 return true;
861 }
862
863 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
864
865 #undef super
866 #define super IOUserNotification
867 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
868
869 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
870
871 bool
872 IOServiceUserNotification::init( mach_port_t port, natural_t type,
873 void * reference, vm_size_t referenceSize,
874 bool clientIs64 )
875 {
876 if (!super::init()) {
877 return false;
878 }
879
880 newSet = OSArray::withCapacity( 1 );
881 if (!newSet) {
882 return false;
883 }
884
885 if (referenceSize > sizeof(OSAsyncReference64)) {
886 return false;
887 }
888
889 msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize);
890
891 pingMsg = (PingMsg *) IOMalloc( msgSize);
892 if (!pingMsg) {
893 return false;
894 }
895
896 bzero( pingMsg, msgSize);
897
898 pingMsg->msgHdr.msgh_remote_port = port;
899 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
900 MACH_MSG_TYPE_COPY_SEND /*remote*/,
901 MACH_MSG_TYPE_MAKE_SEND /*local*/);
902 pingMsg->msgHdr.msgh_size = msgSize;
903 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
904
905 pingMsg->notifyHeader.size = 0;
906 pingMsg->notifyHeader.type = type;
907 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
908
909 return true;
910 }
911
912 void
913 IOServiceUserNotification::invalidatePort(void)
914 {
915 if (pingMsg) {
916 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
917 }
918 }
919
920 void
921 IOServiceUserNotification::free( void )
922 {
923 PingMsg * _pingMsg;
924 vm_size_t _msgSize;
925 OSArray * _newSet;
926
927 _pingMsg = pingMsg;
928 _msgSize = msgSize;
929 _newSet = newSet;
930
931 super::free();
932
933 if (_pingMsg && _msgSize) {
934 if (_pingMsg->msgHdr.msgh_remote_port) {
935 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
936 }
937 IOFree(_pingMsg, _msgSize);
938 }
939
940 if (_newSet) {
941 _newSet->release();
942 }
943 }
944
945 bool
946 IOServiceUserNotification::_handler( void * target,
947 void * ref, IOService * newService, IONotifier * notifier )
948 {
949 return ((IOServiceUserNotification *) target)->handler( ref, newService );
950 }
951
952 bool
953 IOServiceUserNotification::handler( void * ref,
954 IOService * newService )
955 {
956 unsigned int count;
957 kern_return_t kr;
958 ipc_port_t port = NULL;
959 bool sendPing = false;
960
961 IOTakeLock( lock );
962
963 count = newSet->getCount();
964 if (count < kMaxOutstanding) {
965 newSet->setObject( newService );
966 if ((sendPing = (armed && (0 == count)))) {
967 armed = false;
968 }
969 }
970
971 IOUnlock( lock );
972
973 if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
974 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
975 }
976
977 if (sendPing) {
978 if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
979 pingMsg->msgHdr.msgh_local_port = port;
980 } else {
981 pingMsg->msgHdr.msgh_local_port = NULL;
982 }
983
984 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
985 pingMsg->msgHdr.msgh_size,
986 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
987 0);
988 if (port) {
989 iokit_release_port( port );
990 }
991
992 if ((KERN_SUCCESS != kr) && !ipcLogged) {
993 ipcLogged = true;
994 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
995 }
996 }
997
998 return true;
999 }
1000 OSObject *
1001 IOServiceUserNotification::getNextObject()
1002 {
1003 assert(false);
1004 return NULL;
1005 }
1006
1007 OSObject *
1008 IOServiceUserNotification::copyNextObject()
1009 {
1010 unsigned int count;
1011 OSObject * result;
1012
1013 IOLockLock(lock);
1014
1015 count = newSet->getCount();
1016 if (count) {
1017 result = newSet->getObject( count - 1 );
1018 result->retain();
1019 newSet->removeObject( count - 1);
1020 } else {
1021 result = NULL;
1022 armed = true;
1023 }
1024
1025 IOLockUnlock(lock);
1026
1027 return result;
1028 }
1029
1030 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1031
1032 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
1033
1034 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1035
1036 bool
1037 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1038 void * reference, vm_size_t referenceSize, mach_msg_size_t extraSize,
1039 bool client64 )
1040 {
1041 if (!super::init()) {
1042 return false;
1043 }
1044
1045 if (referenceSize > sizeof(OSAsyncReference64)) {
1046 return false;
1047 }
1048
1049 clientIs64 = client64;
1050
1051 owningPID = proc_selfpid();
1052
1053 extraSize += sizeof(IOServiceInterestContent64);
1054 msgSize = (mach_msg_size_t) (sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize);
1055 pingMsg = (PingMsg *) IOMalloc( msgSize);
1056 if (!pingMsg) {
1057 return false;
1058 }
1059
1060 bzero( pingMsg, msgSize);
1061
1062 pingMsg->msgHdr.msgh_remote_port = port;
1063 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
1064 | MACH_MSGH_BITS(
1065 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1066 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1067 pingMsg->msgHdr.msgh_size = msgSize;
1068 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
1069
1070 pingMsg->msgBody.msgh_descriptor_count = 1;
1071
1072 pingMsg->ports[0].name = NULL;
1073 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1074 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
1075
1076 pingMsg->notifyHeader.size = extraSize;
1077 pingMsg->notifyHeader.type = type;
1078 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
1079
1080 return true;
1081 }
1082
1083 void
1084 IOServiceMessageUserNotification::invalidatePort(void)
1085 {
1086 if (pingMsg) {
1087 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
1088 }
1089 }
1090
1091 void
1092 IOServiceMessageUserNotification::free( void )
1093 {
1094 PingMsg * _pingMsg;
1095 vm_size_t _msgSize;
1096
1097 _pingMsg = pingMsg;
1098 _msgSize = msgSize;
1099
1100 super::free();
1101
1102 if (_pingMsg && _msgSize) {
1103 if (_pingMsg->msgHdr.msgh_remote_port) {
1104 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
1105 }
1106 IOFree( _pingMsg, _msgSize);
1107 }
1108 }
1109
1110 IOReturn
1111 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1112 UInt32 messageType, IOService * provider,
1113 void * argument, vm_size_t argSize )
1114 {
1115 return ((IOServiceMessageUserNotification *) target)->handler(
1116 ref, messageType, provider, argument, argSize);
1117 }
1118
1119 IOReturn
1120 IOServiceMessageUserNotification::handler( void * ref,
1121 UInt32 messageType, IOService * provider,
1122 void * messageArgument, vm_size_t callerArgSize )
1123 {
1124 enum { kLocalMsgSize = 0x100 };
1125 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1126 void * allocMsg;
1127 kern_return_t kr;
1128 vm_size_t argSize;
1129 mach_msg_size_t thisMsgSize;
1130 ipc_port_t thisPort, providerPort;
1131 struct PingMsg * thisMsg;
1132 IOServiceInterestContent64 * data;
1133
1134 if (kIOMessageCopyClientID == messageType) {
1135 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1136 return kIOReturnSuccess;
1137 }
1138
1139 if (callerArgSize == 0) {
1140 if (clientIs64) {
1141 argSize = sizeof(data->messageArgument[0]);
1142 } else {
1143 argSize = sizeof(uint32_t);
1144 }
1145 } else {
1146 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1147 callerArgSize = kIOUserNotifyMaxMessageSize;
1148 }
1149 argSize = callerArgSize;
1150 }
1151
1152 // adjust message size for ipc restrictions
1153 natural_t type;
1154 type = pingMsg->notifyHeader.type;
1155 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1156 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1157 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1158
1159 if (os_add3_overflow(msgSize, sizeof(IOServiceInterestContent64) - sizeof(data->messageArgument), argSize, &thisMsgSize)) {
1160 return kIOReturnBadArgument;
1161 }
1162
1163 if (thisMsgSize > sizeof(stackMsg)) {
1164 allocMsg = IOMalloc(thisMsgSize);
1165 if (!allocMsg) {
1166 return kIOReturnNoMemory;
1167 }
1168 thisMsg = (typeof(thisMsg))allocMsg;
1169 } else {
1170 allocMsg = NULL;
1171 thisMsg = (typeof(thisMsg))stackMsg;
1172 }
1173
1174 bcopy(pingMsg, thisMsg, msgSize);
1175 thisMsg->notifyHeader.type = type;
1176 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1177 // == pingMsg->notifyHeader.content;
1178 data->messageType = messageType;
1179
1180 if (callerArgSize == 0) {
1181 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1182 if (!clientIs64) {
1183 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1184 }
1185 } else {
1186 bcopy( messageArgument, data->messageArgument, callerArgSize );
1187 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1188 }
1189
1190 thisMsg->notifyHeader.type = type;
1191 thisMsg->msgHdr.msgh_size = thisMsgSize;
1192
1193 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1194 thisMsg->ports[0].name = providerPort;
1195 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1196 thisMsg->msgHdr.msgh_local_port = thisPort;
1197
1198 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1199 thisMsg->msgHdr.msgh_size,
1200 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1201 0);
1202 if (thisPort) {
1203 iokit_release_port( thisPort );
1204 }
1205 if (providerPort) {
1206 iokit_release_port( providerPort );
1207 }
1208
1209 if (allocMsg) {
1210 IOFree(allocMsg, thisMsgSize);
1211 }
1212
1213 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1214 ipcLogged = true;
1215 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1216 }
1217
1218 return kIOReturnSuccess;
1219 }
1220
1221 OSObject *
1222 IOServiceMessageUserNotification::getNextObject()
1223 {
1224 return NULL;
1225 }
1226
1227 OSObject *
1228 IOServiceMessageUserNotification::copyNextObject()
1229 {
1230 return NULL;
1231 }
1232
1233 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1234
1235 #undef super
1236 #define super IOService
1237 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1238
1239 IOLock * gIOUserClientOwnersLock;
1240
1241 void
1242 IOUserClient::initialize( void )
1243 {
1244 gIOObjectPortLock = IOLockAlloc();
1245 gIOUserClientOwnersLock = IOLockAlloc();
1246 gIOUserServerLock = IOLockAlloc();
1247 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1248
1249 #if IOTRACKING
1250 IOTrackingQueueCollectUser(IOUserIterator::gMetaClass.getTracking());
1251 IOTrackingQueueCollectUser(IOServiceMessageUserNotification::gMetaClass.getTracking());
1252 IOTrackingQueueCollectUser(IOServiceUserNotification::gMetaClass.getTracking());
1253 IOTrackingQueueCollectUser(IOUserClient::gMetaClass.getTracking());
1254 IOTrackingQueueCollectUser(IOMachPort::gMetaClass.getTracking());
1255 #endif /* IOTRACKING */
1256 }
1257
1258 void
1259 #if __LP64__
1260 __attribute__((__noreturn__))
1261 #endif
1262 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1263 mach_port_t wakePort,
1264 void *callback, void *refcon)
1265 {
1266 #if __LP64__
1267 panic("setAsyncReference not valid for 64b");
1268 #else
1269 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1270 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1271 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1272 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1273 #endif
1274 }
1275
1276 void
1277 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1278 mach_port_t wakePort,
1279 mach_vm_address_t callback, io_user_reference_t refcon)
1280 {
1281 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1282 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1283 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1284 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1285 }
1286
1287 void
1288 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1289 mach_port_t wakePort,
1290 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1291 {
1292 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1293 if (vm_map_is_64bit(get_task_map(task))) {
1294 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1295 }
1296 }
1297
1298 static OSDictionary *
1299 CopyConsoleUser(UInt32 uid)
1300 {
1301 OSArray * array;
1302 OSDictionary * user = NULL;
1303
1304 if ((array = OSDynamicCast(OSArray,
1305 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1306 for (unsigned int idx = 0;
1307 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1308 idx++) {
1309 OSNumber * num;
1310
1311 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1312 && (uid == num->unsigned32BitValue())) {
1313 user->retain();
1314 break;
1315 }
1316 }
1317 array->release();
1318 }
1319 return user;
1320 }
1321
1322 static OSDictionary *
1323 CopyUserOnConsole(void)
1324 {
1325 OSArray * array;
1326 OSDictionary * user = NULL;
1327
1328 if ((array = OSDynamicCast(OSArray,
1329 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1330 for (unsigned int idx = 0;
1331 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1332 idx++) {
1333 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1334 user->retain();
1335 break;
1336 }
1337 }
1338 array->release();
1339 }
1340 return user;
1341 }
1342
1343 IOReturn
1344 IOUserClient::clientHasAuthorization( task_t task,
1345 IOService * service )
1346 {
1347 proc_t p;
1348
1349 p = (proc_t) get_bsdtask_info(task);
1350 if (p) {
1351 uint64_t authorizationID;
1352
1353 authorizationID = proc_uniqueid(p);
1354 if (authorizationID) {
1355 if (service->getAuthorizationID() == authorizationID) {
1356 return kIOReturnSuccess;
1357 }
1358 }
1359 }
1360
1361 return kIOReturnNotPermitted;
1362 }
1363
1364 IOReturn
1365 IOUserClient::clientHasPrivilege( void * securityToken,
1366 const char * privilegeName )
1367 {
1368 kern_return_t kr;
1369 security_token_t token;
1370 mach_msg_type_number_t count;
1371 task_t task;
1372 OSDictionary * user;
1373 bool secureConsole;
1374
1375
1376 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1377 sizeof(kIOClientPrivilegeForeground))) {
1378 if (task_is_gpu_denied(current_task())) {
1379 return kIOReturnNotPrivileged;
1380 } else {
1381 return kIOReturnSuccess;
1382 }
1383 }
1384
1385 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1386 sizeof(kIOClientPrivilegeConsoleSession))) {
1387 kauth_cred_t cred;
1388 proc_t p;
1389
1390 task = (task_t) securityToken;
1391 if (!task) {
1392 task = current_task();
1393 }
1394 p = (proc_t) get_bsdtask_info(task);
1395 kr = kIOReturnNotPrivileged;
1396
1397 if (p && (cred = kauth_cred_proc_ref(p))) {
1398 user = CopyUserOnConsole();
1399 if (user) {
1400 OSNumber * num;
1401 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1402 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1403 kr = kIOReturnSuccess;
1404 }
1405 user->release();
1406 }
1407 kauth_cred_unref(&cred);
1408 }
1409 return kr;
1410 }
1411
1412 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1413 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1414 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1415 } else {
1416 task = (task_t)securityToken;
1417 }
1418
1419 count = TASK_SECURITY_TOKEN_COUNT;
1420 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1421
1422 if (KERN_SUCCESS != kr) {
1423 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1424 sizeof(kIOClientPrivilegeAdministrator))) {
1425 if (0 != token.val[0]) {
1426 kr = kIOReturnNotPrivileged;
1427 }
1428 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1429 sizeof(kIOClientPrivilegeLocalUser))) {
1430 user = CopyConsoleUser(token.val[0]);
1431 if (user) {
1432 user->release();
1433 } else {
1434 kr = kIOReturnNotPrivileged;
1435 }
1436 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1437 sizeof(kIOClientPrivilegeConsoleUser))) {
1438 user = CopyConsoleUser(token.val[0]);
1439 if (user) {
1440 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1441 kr = kIOReturnNotPrivileged;
1442 } else if (secureConsole) {
1443 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1444 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1445 kr = kIOReturnNotPrivileged;
1446 }
1447 }
1448 user->release();
1449 } else {
1450 kr = kIOReturnNotPrivileged;
1451 }
1452 } else {
1453 kr = kIOReturnUnsupported;
1454 }
1455
1456 return kr;
1457 }
1458 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1459
1460 OSDictionary *
1461 IOUserClient::copyClientEntitlements(task_t task)
1462 {
1463 proc_t p = NULL;
1464 pid_t pid = 0;
1465 size_t len = 0;
1466 void *entitlements_blob = NULL;
1467 OSDictionary *entitlements = NULL;
1468
1469 p = (proc_t)get_bsdtask_info(task);
1470 if (p == NULL) {
1471 return NULL;
1472 }
1473 pid = proc_pid(p);
1474
1475 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1476 if (entitlements) {
1477 return entitlements;
1478 }
1479 }
1480
1481 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1482 return NULL;
1483 }
1484 return IOUserClient::copyEntitlementsFromBlob(entitlements_blob, len);
1485 }
1486
1487 OSDictionary *
1488 IOUserClient::copyEntitlementsFromBlob(void *entitlements_blob, size_t len)
1489 {
1490 char *entitlements_data = NULL;
1491 OSObject *entitlements_obj = NULL;
1492 OSString *errorString = NULL;
1493 OSDictionary *entitlements = NULL;
1494
1495 if (len <= offsetof(CS_GenericBlob, data)) {
1496 goto fail;
1497 }
1498
1499 /*
1500 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1501 * we'll try to parse in the kernel.
1502 */
1503 len -= offsetof(CS_GenericBlob, data);
1504 if (len > MAX_ENTITLEMENTS_LEN) {
1505 IOLog("failed to parse entitlements: %lu bytes of entitlements exceeds maximum of %u\n",
1506 len, MAX_ENTITLEMENTS_LEN);
1507 goto fail;
1508 }
1509
1510 /*
1511 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1512 * what is stored in the entitlements blob. Copy the string and
1513 * terminate it.
1514 */
1515 entitlements_data = (char *)IOMalloc(len + 1);
1516 if (entitlements_data == NULL) {
1517 goto fail;
1518 }
1519 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1520 entitlements_data[len] = '\0';
1521
1522 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1523 if (errorString != NULL) {
1524 IOLog("failed to parse entitlements: %s\n", errorString->getCStringNoCopy());
1525 goto fail;
1526 }
1527 if (entitlements_obj == NULL) {
1528 goto fail;
1529 }
1530
1531 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1532 if (entitlements == NULL) {
1533 goto fail;
1534 }
1535 entitlements_obj = NULL;
1536
1537 fail:
1538 if (entitlements_data != NULL) {
1539 IOFree(entitlements_data, len + 1);
1540 }
1541 if (entitlements_obj != NULL) {
1542 entitlements_obj->release();
1543 }
1544 if (errorString != NULL) {
1545 errorString->release();
1546 }
1547 return entitlements;
1548 }
1549
1550 OSDictionary *
1551 IOUserClient::copyClientEntitlementsVnode(vnode_t vnode, off_t offset)
1552 {
1553 size_t len = 0;
1554 void *entitlements_blob = NULL;
1555
1556 if (cs_entitlements_blob_get_vnode(vnode, offset, &entitlements_blob, &len) != 0) {
1557 return NULL;
1558 }
1559 return IOUserClient::copyEntitlementsFromBlob(entitlements_blob, len);
1560 }
1561
1562 OSObject *
1563 IOUserClient::copyClientEntitlement( task_t task,
1564 const char * entitlement )
1565 {
1566 OSDictionary *entitlements;
1567 OSObject *value;
1568
1569 entitlements = copyClientEntitlements(task);
1570 if (entitlements == NULL) {
1571 return NULL;
1572 }
1573
1574 /* Fetch the entitlement value from the dictionary. */
1575 value = entitlements->getObject(entitlement);
1576 if (value != NULL) {
1577 value->retain();
1578 }
1579
1580 entitlements->release();
1581 return value;
1582 }
1583
1584 OSObject *
1585 IOUserClient::copyClientEntitlementVnode(
1586 struct vnode *vnode,
1587 off_t offset,
1588 const char *entitlement)
1589 {
1590 OSDictionary *entitlements;
1591 OSObject *value;
1592
1593 entitlements = copyClientEntitlementsVnode(vnode, offset);
1594 if (entitlements == NULL) {
1595 return NULL;
1596 }
1597
1598 /* Fetch the entitlement value from the dictionary. */
1599 value = entitlements->getObject(entitlement);
1600 if (value != NULL) {
1601 value->retain();
1602 }
1603
1604 entitlements->release();
1605 return value;
1606 }
1607
1608 bool
1609 IOUserClient::init()
1610 {
1611 if (getPropertyTable() || super::init()) {
1612 return reserve();
1613 }
1614
1615 return false;
1616 }
1617
1618 bool
1619 IOUserClient::init(OSDictionary * dictionary)
1620 {
1621 if (getPropertyTable() || super::init(dictionary)) {
1622 return reserve();
1623 }
1624
1625 return false;
1626 }
1627
1628 bool
1629 IOUserClient::initWithTask(task_t owningTask,
1630 void * securityID,
1631 UInt32 type )
1632 {
1633 if (getPropertyTable() || super::init()) {
1634 return reserve();
1635 }
1636
1637 return false;
1638 }
1639
1640 bool
1641 IOUserClient::initWithTask(task_t owningTask,
1642 void * securityID,
1643 UInt32 type,
1644 OSDictionary * properties )
1645 {
1646 bool ok;
1647
1648 ok = super::init( properties );
1649 ok &= initWithTask( owningTask, securityID, type );
1650
1651 return ok;
1652 }
1653
1654 bool
1655 IOUserClient::reserve()
1656 {
1657 if (!reserved) {
1658 reserved = IONewZero(ExpansionData, 1);
1659 if (!reserved) {
1660 return false;
1661 }
1662 }
1663 setTerminateDefer(NULL, true);
1664 IOStatisticsRegisterCounter();
1665
1666 return true;
1667 }
1668
1669 struct IOUserClientOwner {
1670 task_t task;
1671 queue_chain_t taskLink;
1672 IOUserClient * uc;
1673 queue_chain_t ucLink;
1674 };
1675
1676 IOReturn
1677 IOUserClient::registerOwner(task_t task)
1678 {
1679 IOUserClientOwner * owner;
1680 IOReturn ret;
1681 bool newOwner;
1682
1683 IOLockLock(gIOUserClientOwnersLock);
1684
1685 newOwner = true;
1686 ret = kIOReturnSuccess;
1687
1688 if (!owners.next) {
1689 queue_init(&owners);
1690 } else {
1691 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1692 {
1693 if (task != owner->task) {
1694 continue;
1695 }
1696 newOwner = false;
1697 break;
1698 }
1699 }
1700 if (newOwner) {
1701 owner = IONew(IOUserClientOwner, 1);
1702 if (!owner) {
1703 ret = kIOReturnNoMemory;
1704 } else {
1705 owner->task = task;
1706 owner->uc = this;
1707 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1708 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1709 if (messageAppSuspended) {
1710 task_set_message_app_suspended(task, true);
1711 }
1712 }
1713 }
1714
1715 IOLockUnlock(gIOUserClientOwnersLock);
1716
1717 return ret;
1718 }
1719
1720 void
1721 IOUserClient::noMoreSenders(void)
1722 {
1723 IOUserClientOwner * owner;
1724 IOUserClientOwner * iter;
1725 queue_head_t * taskque;
1726 bool hasMessageAppSuspended;
1727
1728 IOLockLock(gIOUserClientOwnersLock);
1729
1730 if (owners.next) {
1731 while (!queue_empty(&owners)) {
1732 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1733 taskque = task_io_user_clients(owner->task);
1734 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1735 hasMessageAppSuspended = false;
1736 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1737 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1738 if (hasMessageAppSuspended) {
1739 break;
1740 }
1741 }
1742 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1743 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1744 IODelete(owner, IOUserClientOwner, 1);
1745 }
1746 owners.next = owners.prev = NULL;
1747 }
1748
1749 IOLockUnlock(gIOUserClientOwnersLock);
1750 }
1751
1752
1753 extern "C" void
1754 iokit_task_app_suspended_changed(task_t task)
1755 {
1756 queue_head_t * taskque;
1757 IOUserClientOwner * owner;
1758 OSSet * set;
1759
1760 IOLockLock(gIOUserClientOwnersLock);
1761
1762 taskque = task_io_user_clients(task);
1763 set = NULL;
1764 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1765 if (!owner->uc->messageAppSuspended) {
1766 continue;
1767 }
1768 if (!set) {
1769 set = OSSet::withCapacity(4);
1770 if (!set) {
1771 break;
1772 }
1773 }
1774 set->setObject(owner->uc);
1775 }
1776
1777 IOLockUnlock(gIOUserClientOwnersLock);
1778
1779 if (set) {
1780 set->iterateObjects(^bool (OSObject * obj) {
1781 IOUserClient * uc;
1782
1783 uc = (typeof(uc))obj;
1784 #if 0
1785 {
1786 OSString * str;
1787 str = IOCopyLogNameForPID(task_pid(task));
1788 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1789 uc->getName(), task_is_app_suspended(task));
1790 OSSafeReleaseNULL(str);
1791 }
1792 #endif
1793 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1794
1795 return false;
1796 });
1797 set->release();
1798 }
1799 }
1800
1801 extern "C" kern_return_t
1802 iokit_task_terminate(task_t task)
1803 {
1804 IOUserClientOwner * owner;
1805 IOUserClient * dead;
1806 IOUserClient * uc;
1807 queue_head_t * taskque;
1808
1809 IOLockLock(gIOUserClientOwnersLock);
1810
1811 taskque = task_io_user_clients(task);
1812 dead = NULL;
1813 while (!queue_empty(taskque)) {
1814 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1815 uc = owner->uc;
1816 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1817 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1818 if (queue_empty(&uc->owners)) {
1819 uc->retain();
1820 IOLog("destroying out of band connect for %s\n", uc->getName());
1821 // now using the uc queue head as a singly linked queue,
1822 // leaving .next as NULL to mark it empty
1823 uc->owners.next = NULL;
1824 uc->owners.prev = (queue_entry_t) dead;
1825 dead = uc;
1826 }
1827 IODelete(owner, IOUserClientOwner, 1);
1828 }
1829
1830 IOLockUnlock(gIOUserClientOwnersLock);
1831
1832 while (dead) {
1833 uc = dead;
1834 dead = (IOUserClient *)(void *) dead->owners.prev;
1835 uc->owners.prev = NULL;
1836 if (uc->sharedInstance || !uc->closed) {
1837 uc->clientDied();
1838 }
1839 uc->release();
1840 }
1841
1842 return KERN_SUCCESS;
1843 }
1844
1845 struct IOUCFilterPolicy {
1846 task_t task;
1847 io_filter_policy_t filterPolicy;
1848 IOUCFilterPolicy * next;
1849 };
1850
1851 io_filter_policy_t
1852 IOUserClient::filterForTask(task_t task, io_filter_policy_t addFilterPolicy)
1853 {
1854 IOUCFilterPolicy * elem;
1855 io_filter_policy_t filterPolicy;
1856
1857 filterPolicy = 0;
1858 IOLockLock(filterLock);
1859
1860 for (elem = reserved->filterPolicies; elem && (elem->task != task); elem = elem->next) {
1861 }
1862
1863 if (elem) {
1864 if (addFilterPolicy) {
1865 assert(addFilterPolicy == elem->filterPolicy);
1866 }
1867 filterPolicy = elem->filterPolicy;
1868 } else if (addFilterPolicy) {
1869 elem = IONewZero(IOUCFilterPolicy, 1);
1870 if (elem) {
1871 elem->task = task;
1872 elem->filterPolicy = addFilterPolicy;
1873 elem->next = reserved->filterPolicies;
1874 reserved->filterPolicies = elem;
1875 filterPolicy = addFilterPolicy;
1876 }
1877 }
1878
1879 IOLockUnlock(filterLock);
1880 return filterPolicy;
1881 }
1882
1883 void
1884 IOUserClient::free()
1885 {
1886 if (mappings) {
1887 mappings->release();
1888 }
1889 if (lock) {
1890 IORWLockFree(lock);
1891 }
1892 if (filterLock) {
1893 IOLockFree(filterLock);
1894 }
1895
1896 IOStatisticsUnregisterCounter();
1897
1898 assert(!owners.next);
1899 assert(!owners.prev);
1900
1901 if (reserved) {
1902 IOUCFilterPolicy * elem;
1903 IOUCFilterPolicy * nextElem;
1904 for (elem = reserved->filterPolicies; elem; elem = nextElem) {
1905 nextElem = elem->next;
1906 if (elem->filterPolicy && gIOUCFilterCallbacks->io_filter_release) {
1907 gIOUCFilterCallbacks->io_filter_release(elem->filterPolicy);
1908 }
1909 IODelete(elem, IOUCFilterPolicy, 1);
1910 }
1911 IODelete(reserved, ExpansionData, 1);
1912 }
1913
1914 super::free();
1915 }
1916
1917 IOReturn
1918 IOUserClient::clientDied( void )
1919 {
1920 IOReturn ret = kIOReturnNotReady;
1921
1922 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1923 ret = clientClose();
1924 }
1925
1926 return ret;
1927 }
1928
1929 IOReturn
1930 IOUserClient::clientClose( void )
1931 {
1932 return kIOReturnUnsupported;
1933 }
1934
1935 IOService *
1936 IOUserClient::getService( void )
1937 {
1938 return NULL;
1939 }
1940
1941 IOReturn
1942 IOUserClient::registerNotificationPort(
1943 mach_port_t /* port */,
1944 UInt32 /* type */,
1945 UInt32 /* refCon */)
1946 {
1947 return kIOReturnUnsupported;
1948 }
1949
1950 IOReturn
1951 IOUserClient::registerNotificationPort(
1952 mach_port_t port,
1953 UInt32 type,
1954 io_user_reference_t refCon)
1955 {
1956 return registerNotificationPort(port, type, (UInt32) refCon);
1957 }
1958
1959 IOReturn
1960 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1961 semaphore_t * semaphore )
1962 {
1963 return kIOReturnUnsupported;
1964 }
1965
1966 IOReturn
1967 IOUserClient::connectClient( IOUserClient * /* client */ )
1968 {
1969 return kIOReturnUnsupported;
1970 }
1971
1972 IOReturn
1973 IOUserClient::clientMemoryForType( UInt32 type,
1974 IOOptionBits * options,
1975 IOMemoryDescriptor ** memory )
1976 {
1977 return kIOReturnUnsupported;
1978 }
1979
1980 IOReturn
1981 IOUserClient::clientMemoryForType( UInt32 type,
1982 IOOptionBits * options,
1983 OSSharedPtr<IOMemoryDescriptor>& memory )
1984 {
1985 IOMemoryDescriptor* memoryRaw = nullptr;
1986 IOReturn result = clientMemoryForType(type, options, &memoryRaw);
1987 memory.reset(memoryRaw, OSNoRetain);
1988 return result;
1989 }
1990
1991 #if !__LP64__
1992 IOMemoryMap *
1993 IOUserClient::mapClientMemory(
1994 IOOptionBits type,
1995 task_t task,
1996 IOOptionBits mapFlags,
1997 IOVirtualAddress atAddress )
1998 {
1999 return NULL;
2000 }
2001 #endif
2002
2003 IOMemoryMap *
2004 IOUserClient::mapClientMemory64(
2005 IOOptionBits type,
2006 task_t task,
2007 IOOptionBits mapFlags,
2008 mach_vm_address_t atAddress )
2009 {
2010 IOReturn err;
2011 IOOptionBits options = 0;
2012 IOMemoryDescriptor * memory = NULL;
2013 IOMemoryMap * map = NULL;
2014
2015 err = clientMemoryForType((UInt32) type, &options, &memory );
2016
2017 if (memory && (kIOReturnSuccess == err)) {
2018 FAKE_STACK_FRAME(getMetaClass());
2019
2020 options = (options & ~kIOMapUserOptionsMask)
2021 | (mapFlags & kIOMapUserOptionsMask);
2022 map = memory->createMappingInTask( task, atAddress, options );
2023 memory->release();
2024
2025 FAKE_STACK_FRAME_END();
2026 }
2027
2028 return map;
2029 }
2030
2031 IOReturn
2032 IOUserClient::exportObjectToClient(task_t task,
2033 OSObject *obj, io_object_t *clientObj)
2034 {
2035 mach_port_name_t name;
2036
2037 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
2038
2039 *clientObj = (io_object_t)(uintptr_t) name;
2040
2041 if (obj) {
2042 obj->release();
2043 }
2044
2045 return kIOReturnSuccess;
2046 }
2047
2048 IOReturn
2049 IOUserClient::copyPortNameForObjectInTask(task_t task,
2050 OSObject *obj, mach_port_name_t * port_name)
2051 {
2052 mach_port_name_t name;
2053
2054 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
2055
2056 *(mach_port_name_t *) port_name = name;
2057
2058 return kIOReturnSuccess;
2059 }
2060
2061 IOReturn
2062 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2063 OSObject **obj)
2064 {
2065 OSObject * object;
2066
2067 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
2068
2069 *obj = object;
2070
2071 return object ? kIOReturnSuccess : kIOReturnIPCError;
2072 }
2073
2074 IOReturn
2075 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
2076 OSSharedPtr<OSObject>& obj)
2077 {
2078 OSObject* objRaw = NULL;
2079 IOReturn result = copyObjectForPortNameInTask(task, port_name, &objRaw);
2080 obj.reset(objRaw, OSNoRetain);
2081 return result;
2082 }
2083
2084 IOReturn
2085 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
2086 {
2087 return iokit_mod_send_right(task, port_name, delta);
2088 }
2089
2090 IOExternalMethod *
2091 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
2092 {
2093 return NULL;
2094 }
2095
2096 IOExternalAsyncMethod *
2097 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
2098 {
2099 return NULL;
2100 }
2101
2102 IOExternalTrap *
2103 IOUserClient::
2104 getExternalTrapForIndex(UInt32 index)
2105 {
2106 return NULL;
2107 }
2108
2109 #pragma clang diagnostic push
2110 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2111
2112 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
2113 // functions can break clients of kexts implementing getExternalMethodForIndex()
2114 IOExternalMethod *
2115 IOUserClient::
2116 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
2117 {
2118 IOExternalMethod *method = getExternalMethodForIndex(index);
2119
2120 if (method) {
2121 *targetP = (IOService *) method->object;
2122 }
2123
2124 return method;
2125 }
2126
2127 IOExternalMethod *
2128 IOUserClient::
2129 getTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2130 {
2131 IOService* targetPRaw = NULL;
2132 IOExternalMethod* result = getTargetAndMethodForIndex(&targetPRaw, index);
2133 targetP.reset(targetPRaw, OSRetain);
2134 return result;
2135 }
2136
2137 IOExternalAsyncMethod *
2138 IOUserClient::
2139 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
2140 {
2141 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
2142
2143 if (method) {
2144 *targetP = (IOService *) method->object;
2145 }
2146
2147 return method;
2148 }
2149
2150 IOExternalAsyncMethod *
2151 IOUserClient::
2152 getAsyncTargetAndMethodForIndex(OSSharedPtr<IOService>& targetP, UInt32 index)
2153 {
2154 IOService* targetPRaw = NULL;
2155 IOExternalAsyncMethod* result = getAsyncTargetAndMethodForIndex(&targetPRaw, index);
2156 targetP.reset(targetPRaw, OSRetain);
2157 return result;
2158 }
2159
2160 IOExternalTrap *
2161 IOUserClient::
2162 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
2163 {
2164 IOExternalTrap *trap = getExternalTrapForIndex(index);
2165
2166 if (trap) {
2167 *targetP = trap->object;
2168 }
2169
2170 return trap;
2171 }
2172 #pragma clang diagnostic pop
2173
2174 IOReturn
2175 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
2176 {
2177 mach_port_t port;
2178 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2179
2180 if (MACH_PORT_NULL != port) {
2181 iokit_release_port_send(port);
2182 }
2183
2184 return kIOReturnSuccess;
2185 }
2186
2187 IOReturn
2188 IOUserClient::releaseNotificationPort(mach_port_t port)
2189 {
2190 if (MACH_PORT_NULL != port) {
2191 iokit_release_port_send(port);
2192 }
2193
2194 return kIOReturnSuccess;
2195 }
2196
2197 IOReturn
2198 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2199 IOReturn result, void *args[], UInt32 numArgs)
2200 {
2201 OSAsyncReference64 reference64;
2202 io_user_reference_t args64[kMaxAsyncArgs];
2203 unsigned int idx;
2204
2205 if (numArgs > kMaxAsyncArgs) {
2206 return kIOReturnMessageTooLarge;
2207 }
2208
2209 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2210 reference64[idx] = REF64(reference[idx]);
2211 }
2212
2213 for (idx = 0; idx < numArgs; idx++) {
2214 args64[idx] = REF64(args[idx]);
2215 }
2216
2217 return sendAsyncResult64(reference64, result, args64, numArgs);
2218 }
2219
2220 IOReturn
2221 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2222 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2223 {
2224 return _sendAsyncResult64(reference, result, args, numArgs, options);
2225 }
2226
2227 IOReturn
2228 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2229 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2230 {
2231 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2232 }
2233
2234 IOReturn
2235 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2236 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2237 {
2238 struct ReplyMsg {
2239 mach_msg_header_t msgHdr;
2240 union{
2241 struct{
2242 OSNotificationHeader notifyHdr;
2243 IOAsyncCompletionContent asyncContent;
2244 uint32_t args[kMaxAsyncArgs];
2245 } msg32;
2246 struct{
2247 OSNotificationHeader64 notifyHdr;
2248 IOAsyncCompletionContent asyncContent;
2249 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2250 } msg64;
2251 } m;
2252 };
2253 ReplyMsg replyMsg;
2254 mach_port_t replyPort;
2255 kern_return_t kr;
2256
2257 // If no reply port, do nothing.
2258 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2259 if (replyPort == MACH_PORT_NULL) {
2260 return kIOReturnSuccess;
2261 }
2262
2263 if (numArgs > kMaxAsyncArgs) {
2264 return kIOReturnMessageTooLarge;
2265 }
2266
2267 bzero(&replyMsg, sizeof(replyMsg));
2268 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2269 0 /*local*/);
2270 replyMsg.msgHdr.msgh_remote_port = replyPort;
2271 replyMsg.msgHdr.msgh_local_port = NULL;
2272 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2273 if (kIOUCAsync64Flag & reference[0]) {
2274 replyMsg.msgHdr.msgh_size =
2275 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2276 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2277 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2278 + numArgs * sizeof(io_user_reference_t);
2279 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2280 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2281 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2282
2283 replyMsg.m.msg64.asyncContent.result = result;
2284 if (numArgs) {
2285 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2286 }
2287 } else {
2288 unsigned int idx;
2289
2290 replyMsg.msgHdr.msgh_size =
2291 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2292 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2293
2294 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2295 + numArgs * sizeof(uint32_t);
2296 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2297
2298 /* Skip reference[0] which is left as 0 from the earlier bzero */
2299 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2300 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2301 }
2302
2303 replyMsg.m.msg32.asyncContent.result = result;
2304
2305 for (idx = 0; idx < numArgs; idx++) {
2306 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2307 }
2308 }
2309
2310 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2311 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2312 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2313 } else {
2314 /* Fail on full queue. */
2315 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2316 replyMsg.msgHdr.msgh_size);
2317 }
2318 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2319 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2320 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2321 }
2322 return kr;
2323 }
2324
2325
2326 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2327
2328 extern "C" {
2329 #define CHECK(cls, obj, out) \
2330 cls * out; \
2331 if( !(out = OSDynamicCast( cls, obj))) \
2332 return( kIOReturnBadArgument )
2333
2334 #define CHECKLOCKED(cls, obj, out) \
2335 IOUserIterator * oIter; \
2336 cls * out; \
2337 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2338 return (kIOReturnBadArgument); \
2339 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2340 return (kIOReturnBadArgument)
2341
2342 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2343
2344 // Create a vm_map_copy_t or kalloc'ed data for memory
2345 // to be copied out. ipc will free after the copyout.
2346
2347 static kern_return_t
2348 copyoutkdata( const void * data, vm_size_t len,
2349 io_buf_ptr_t * buf )
2350 {
2351 kern_return_t err;
2352 vm_map_copy_t copy;
2353
2354 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2355 false /* src_destroy */, &copy);
2356
2357 assert( err == KERN_SUCCESS );
2358 if (err == KERN_SUCCESS) {
2359 *buf = (char *) copy;
2360 }
2361
2362 return err;
2363 }
2364
2365 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2366
2367 /* Routine io_server_version */
2368 kern_return_t
2369 is_io_server_version(
2370 mach_port_t master_port,
2371 uint64_t *version)
2372 {
2373 *version = IOKIT_SERVER_VERSION;
2374 return kIOReturnSuccess;
2375 }
2376
2377 /* Routine io_object_get_class */
2378 kern_return_t
2379 is_io_object_get_class(
2380 io_object_t object,
2381 io_name_t className )
2382 {
2383 const OSMetaClass* my_obj = NULL;
2384
2385 if (!object) {
2386 return kIOReturnBadArgument;
2387 }
2388
2389 my_obj = object->getMetaClass();
2390 if (!my_obj) {
2391 return kIOReturnNotFound;
2392 }
2393
2394 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2395
2396 return kIOReturnSuccess;
2397 }
2398
2399 /* Routine io_object_get_superclass */
2400 kern_return_t
2401 is_io_object_get_superclass(
2402 mach_port_t master_port,
2403 io_name_t obj_name,
2404 io_name_t class_name)
2405 {
2406 IOReturn ret;
2407 const OSMetaClass * meta;
2408 const OSMetaClass * super;
2409 const OSSymbol * name;
2410 const char * cstr;
2411
2412 if (!obj_name || !class_name) {
2413 return kIOReturnBadArgument;
2414 }
2415 if (master_port != master_device_port) {
2416 return kIOReturnNotPrivileged;
2417 }
2418
2419 ret = kIOReturnNotFound;
2420 meta = NULL;
2421 do{
2422 name = OSSymbol::withCString(obj_name);
2423 if (!name) {
2424 break;
2425 }
2426 meta = OSMetaClass::copyMetaClassWithName(name);
2427 if (!meta) {
2428 break;
2429 }
2430 super = meta->getSuperClass();
2431 if (!super) {
2432 break;
2433 }
2434 cstr = super->getClassName();
2435 if (!cstr) {
2436 break;
2437 }
2438 strlcpy(class_name, cstr, sizeof(io_name_t));
2439 ret = kIOReturnSuccess;
2440 }while (false);
2441
2442 OSSafeReleaseNULL(name);
2443 if (meta) {
2444 meta->releaseMetaClass();
2445 }
2446
2447 return ret;
2448 }
2449
2450 /* Routine io_object_get_bundle_identifier */
2451 kern_return_t
2452 is_io_object_get_bundle_identifier(
2453 mach_port_t master_port,
2454 io_name_t obj_name,
2455 io_name_t bundle_name)
2456 {
2457 IOReturn ret;
2458 const OSMetaClass * meta;
2459 const OSSymbol * name;
2460 const OSSymbol * identifier;
2461 const char * cstr;
2462
2463 if (!obj_name || !bundle_name) {
2464 return kIOReturnBadArgument;
2465 }
2466 if (master_port != master_device_port) {
2467 return kIOReturnNotPrivileged;
2468 }
2469
2470 ret = kIOReturnNotFound;
2471 meta = NULL;
2472 do{
2473 name = OSSymbol::withCString(obj_name);
2474 if (!name) {
2475 break;
2476 }
2477 meta = OSMetaClass::copyMetaClassWithName(name);
2478 if (!meta) {
2479 break;
2480 }
2481 identifier = meta->getKmodName();
2482 if (!identifier) {
2483 break;
2484 }
2485 cstr = identifier->getCStringNoCopy();
2486 if (!cstr) {
2487 break;
2488 }
2489 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2490 ret = kIOReturnSuccess;
2491 }while (false);
2492
2493 OSSafeReleaseNULL(name);
2494 if (meta) {
2495 meta->releaseMetaClass();
2496 }
2497
2498 return ret;
2499 }
2500
2501 /* Routine io_object_conforms_to */
2502 kern_return_t
2503 is_io_object_conforms_to(
2504 io_object_t object,
2505 io_name_t className,
2506 boolean_t *conforms )
2507 {
2508 if (!object) {
2509 return kIOReturnBadArgument;
2510 }
2511
2512 *conforms = (NULL != object->metaCast( className ));
2513
2514 return kIOReturnSuccess;
2515 }
2516
2517 /* Routine io_object_get_retain_count */
2518 kern_return_t
2519 is_io_object_get_retain_count(
2520 io_object_t object,
2521 uint32_t *retainCount )
2522 {
2523 if (!object) {
2524 return kIOReturnBadArgument;
2525 }
2526
2527 *retainCount = object->getRetainCount();
2528 return kIOReturnSuccess;
2529 }
2530
2531 /* Routine io_iterator_next */
2532 kern_return_t
2533 is_io_iterator_next(
2534 io_object_t iterator,
2535 io_object_t *object )
2536 {
2537 IOReturn ret;
2538 OSObject * obj;
2539 OSIterator * iter;
2540 IOUserIterator * uiter;
2541
2542 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2543 obj = uiter->copyNextObject();
2544 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2545 obj = iter->getNextObject();
2546 if (obj) {
2547 obj->retain();
2548 }
2549 } else {
2550 return kIOReturnBadArgument;
2551 }
2552
2553 if (obj) {
2554 *object = obj;
2555 ret = kIOReturnSuccess;
2556 } else {
2557 ret = kIOReturnNoDevice;
2558 }
2559
2560 return ret;
2561 }
2562
2563 /* Routine io_iterator_reset */
2564 kern_return_t
2565 is_io_iterator_reset(
2566 io_object_t iterator )
2567 {
2568 CHECK( OSIterator, iterator, iter );
2569
2570 iter->reset();
2571
2572 return kIOReturnSuccess;
2573 }
2574
2575 /* Routine io_iterator_is_valid */
2576 kern_return_t
2577 is_io_iterator_is_valid(
2578 io_object_t iterator,
2579 boolean_t *is_valid )
2580 {
2581 CHECK( OSIterator, iterator, iter );
2582
2583 *is_valid = iter->isValid();
2584
2585 return kIOReturnSuccess;
2586 }
2587
2588 static kern_return_t
2589 internal_io_service_match_property_table(
2590 io_service_t _service,
2591 const char * matching,
2592 mach_msg_type_number_t matching_size,
2593 boolean_t *matches)
2594 {
2595 CHECK( IOService, _service, service );
2596
2597 kern_return_t kr;
2598 OSObject * obj;
2599 OSDictionary * dict;
2600
2601 assert(matching_size);
2602
2603
2604 obj = OSUnserializeXML(matching, matching_size);
2605
2606 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2607 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2608 *matches = service->passiveMatch( dict );
2609 kr = kIOReturnSuccess;
2610 } else {
2611 kr = kIOReturnBadArgument;
2612 }
2613
2614 if (obj) {
2615 obj->release();
2616 }
2617
2618 return kr;
2619 }
2620
2621 /* Routine io_service_match_property_table */
2622 kern_return_t
2623 is_io_service_match_property_table(
2624 io_service_t service,
2625 io_string_t matching,
2626 boolean_t *matches )
2627 {
2628 return kIOReturnUnsupported;
2629 }
2630
2631
2632 /* Routine io_service_match_property_table_ool */
2633 kern_return_t
2634 is_io_service_match_property_table_ool(
2635 io_object_t service,
2636 io_buf_ptr_t matching,
2637 mach_msg_type_number_t matchingCnt,
2638 kern_return_t *result,
2639 boolean_t *matches )
2640 {
2641 kern_return_t kr;
2642 vm_offset_t data;
2643 vm_map_offset_t map_data;
2644
2645 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2646 data = CAST_DOWN(vm_offset_t, map_data);
2647
2648 if (KERN_SUCCESS == kr) {
2649 // must return success after vm_map_copyout() succeeds
2650 *result = internal_io_service_match_property_table(service,
2651 (const char *)data, matchingCnt, matches );
2652 vm_deallocate( kernel_map, data, matchingCnt );
2653 }
2654
2655 return kr;
2656 }
2657
2658 /* Routine io_service_match_property_table_bin */
2659 kern_return_t
2660 is_io_service_match_property_table_bin(
2661 io_object_t service,
2662 io_struct_inband_t matching,
2663 mach_msg_type_number_t matchingCnt,
2664 boolean_t *matches)
2665 {
2666 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2667 }
2668
2669 static kern_return_t
2670 internal_io_service_get_matching_services(
2671 mach_port_t master_port,
2672 const char * matching,
2673 mach_msg_type_number_t matching_size,
2674 io_iterator_t *existing )
2675 {
2676 kern_return_t kr;
2677 OSObject * obj;
2678 OSDictionary * dict;
2679
2680 if (master_port != master_device_port) {
2681 return kIOReturnNotPrivileged;
2682 }
2683
2684 assert(matching_size);
2685 obj = OSUnserializeXML(matching, matching_size);
2686
2687 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2688 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2689 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2690 kr = kIOReturnSuccess;
2691 } else {
2692 kr = kIOReturnBadArgument;
2693 }
2694
2695 if (obj) {
2696 obj->release();
2697 }
2698
2699 return kr;
2700 }
2701
2702 /* Routine io_service_get_matching_services */
2703 kern_return_t
2704 is_io_service_get_matching_services(
2705 mach_port_t master_port,
2706 io_string_t matching,
2707 io_iterator_t *existing )
2708 {
2709 return kIOReturnUnsupported;
2710 }
2711
2712 /* Routine io_service_get_matching_services_ool */
2713 kern_return_t
2714 is_io_service_get_matching_services_ool(
2715 mach_port_t master_port,
2716 io_buf_ptr_t matching,
2717 mach_msg_type_number_t matchingCnt,
2718 kern_return_t *result,
2719 io_object_t *existing )
2720 {
2721 kern_return_t kr;
2722 vm_offset_t data;
2723 vm_map_offset_t map_data;
2724
2725 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2726 data = CAST_DOWN(vm_offset_t, map_data);
2727
2728 if (KERN_SUCCESS == kr) {
2729 // must return success after vm_map_copyout() succeeds
2730 // and mig will copy out objects on success
2731 *existing = NULL;
2732 *result = internal_io_service_get_matching_services(master_port,
2733 (const char *) data, matchingCnt, existing);
2734 vm_deallocate( kernel_map, data, matchingCnt );
2735 }
2736
2737 return kr;
2738 }
2739
2740 /* Routine io_service_get_matching_services_bin */
2741 kern_return_t
2742 is_io_service_get_matching_services_bin(
2743 mach_port_t master_port,
2744 io_struct_inband_t matching,
2745 mach_msg_type_number_t matchingCnt,
2746 io_object_t *existing)
2747 {
2748 return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2749 }
2750
2751
2752 static kern_return_t
2753 internal_io_service_get_matching_service(
2754 mach_port_t master_port,
2755 const char * matching,
2756 mach_msg_type_number_t matching_size,
2757 io_service_t *service )
2758 {
2759 kern_return_t kr;
2760 OSObject * obj;
2761 OSDictionary * dict;
2762
2763 if (master_port != master_device_port) {
2764 return kIOReturnNotPrivileged;
2765 }
2766
2767 assert(matching_size);
2768 obj = OSUnserializeXML(matching, matching_size);
2769
2770 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2771 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2772 *service = IOService::copyMatchingService( dict );
2773 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2774 } else {
2775 kr = kIOReturnBadArgument;
2776 }
2777
2778 if (obj) {
2779 obj->release();
2780 }
2781
2782 return kr;
2783 }
2784
2785 /* Routine io_service_get_matching_service */
2786 kern_return_t
2787 is_io_service_get_matching_service(
2788 mach_port_t master_port,
2789 io_string_t matching,
2790 io_service_t *service )
2791 {
2792 return kIOReturnUnsupported;
2793 }
2794
2795 /* Routine io_service_get_matching_services_ool */
2796 kern_return_t
2797 is_io_service_get_matching_service_ool(
2798 mach_port_t master_port,
2799 io_buf_ptr_t matching,
2800 mach_msg_type_number_t matchingCnt,
2801 kern_return_t *result,
2802 io_object_t *service )
2803 {
2804 kern_return_t kr;
2805 vm_offset_t data;
2806 vm_map_offset_t map_data;
2807
2808 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2809 data = CAST_DOWN(vm_offset_t, map_data);
2810
2811 if (KERN_SUCCESS == kr) {
2812 // must return success after vm_map_copyout() succeeds
2813 // and mig will copy out objects on success
2814 *service = NULL;
2815 *result = internal_io_service_get_matching_service(master_port,
2816 (const char *) data, matchingCnt, service );
2817 vm_deallocate( kernel_map, data, matchingCnt );
2818 }
2819
2820 return kr;
2821 }
2822
2823 /* Routine io_service_get_matching_service_bin */
2824 kern_return_t
2825 is_io_service_get_matching_service_bin(
2826 mach_port_t master_port,
2827 io_struct_inband_t matching,
2828 mach_msg_type_number_t matchingCnt,
2829 io_object_t *service)
2830 {
2831 return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2832 }
2833
2834 static kern_return_t
2835 internal_io_service_add_notification(
2836 mach_port_t master_port,
2837 io_name_t notification_type,
2838 const char * matching,
2839 size_t matching_size,
2840 mach_port_t port,
2841 void * reference,
2842 vm_size_t referenceSize,
2843 bool client64,
2844 io_object_t * notification )
2845 {
2846 IOServiceUserNotification * userNotify = NULL;
2847 IONotifier * notify = NULL;
2848 const OSSymbol * sym;
2849 OSObject * obj;
2850 OSDictionary * dict;
2851 IOReturn err;
2852 natural_t userMsgType;
2853
2854 if (master_port != master_device_port) {
2855 return kIOReturnNotPrivileged;
2856 }
2857
2858 do {
2859 err = kIOReturnNoResources;
2860
2861 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2862 return kIOReturnMessageTooLarge;
2863 }
2864
2865 if (!(sym = OSSymbol::withCString( notification_type ))) {
2866 err = kIOReturnNoResources;
2867 }
2868
2869 assert(matching_size);
2870 obj = OSUnserializeXML(matching, matching_size);
2871 dict = OSDynamicCast(OSDictionary, obj);
2872 if (!dict) {
2873 err = kIOReturnBadArgument;
2874 continue;
2875 }
2876 IOTaskRegistryCompatibilityMatching(current_task(), dict);
2877
2878 if ((sym == gIOPublishNotification)
2879 || (sym == gIOFirstPublishNotification)) {
2880 userMsgType = kIOServicePublishNotificationType;
2881 } else if ((sym == gIOMatchedNotification)
2882 || (sym == gIOFirstMatchNotification)) {
2883 userMsgType = kIOServiceMatchedNotificationType;
2884 } else if ((sym == gIOTerminatedNotification)
2885 || (sym == gIOWillTerminateNotification)) {
2886 userMsgType = kIOServiceTerminatedNotificationType;
2887 } else {
2888 userMsgType = kLastIOKitNotificationType;
2889 }
2890
2891 userNotify = new IOServiceUserNotification;
2892
2893 if (userNotify && !userNotify->init( port, userMsgType,
2894 reference, referenceSize, client64)) {
2895 userNotify->release();
2896 userNotify = NULL;
2897 }
2898 if (!userNotify) {
2899 continue;
2900 }
2901
2902 notify = IOService::addMatchingNotification( sym, dict,
2903 &userNotify->_handler, userNotify );
2904 if (notify) {
2905 *notification = userNotify;
2906 userNotify->setNotification( notify );
2907 err = kIOReturnSuccess;
2908 } else {
2909 err = kIOReturnUnsupported;
2910 }
2911 } while (false);
2912
2913 if ((kIOReturnSuccess != err) && userNotify) {
2914 userNotify->invalidatePort();
2915 userNotify->release();
2916 userNotify = NULL;
2917 }
2918
2919 if (sym) {
2920 sym->release();
2921 }
2922 if (obj) {
2923 obj->release();
2924 }
2925
2926 return err;
2927 }
2928
2929
2930 /* Routine io_service_add_notification */
2931 kern_return_t
2932 is_io_service_add_notification(
2933 mach_port_t master_port,
2934 io_name_t notification_type,
2935 io_string_t matching,
2936 mach_port_t port,
2937 io_async_ref_t reference,
2938 mach_msg_type_number_t referenceCnt,
2939 io_object_t * notification )
2940 {
2941 return kIOReturnUnsupported;
2942 }
2943
2944 /* Routine io_service_add_notification_64 */
2945 kern_return_t
2946 is_io_service_add_notification_64(
2947 mach_port_t master_port,
2948 io_name_t notification_type,
2949 io_string_t matching,
2950 mach_port_t wake_port,
2951 io_async_ref64_t reference,
2952 mach_msg_type_number_t referenceCnt,
2953 io_object_t *notification )
2954 {
2955 return kIOReturnUnsupported;
2956 }
2957
2958 /* Routine io_service_add_notification_bin */
2959 kern_return_t
2960 is_io_service_add_notification_bin
2961 (
2962 mach_port_t master_port,
2963 io_name_t notification_type,
2964 io_struct_inband_t matching,
2965 mach_msg_type_number_t matchingCnt,
2966 mach_port_t wake_port,
2967 io_async_ref_t reference,
2968 mach_msg_type_number_t referenceCnt,
2969 io_object_t *notification)
2970 {
2971 io_async_ref_t zreference;
2972
2973 if (referenceCnt > ASYNC_REF_COUNT) {
2974 return kIOReturnBadArgument;
2975 }
2976 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2977 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2978
2979 return internal_io_service_add_notification(master_port, notification_type,
2980 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2981 false, notification);
2982 }
2983
2984 /* Routine io_service_add_notification_bin_64 */
2985 kern_return_t
2986 is_io_service_add_notification_bin_64
2987 (
2988 mach_port_t master_port,
2989 io_name_t notification_type,
2990 io_struct_inband_t matching,
2991 mach_msg_type_number_t matchingCnt,
2992 mach_port_t wake_port,
2993 io_async_ref64_t reference,
2994 mach_msg_type_number_t referenceCnt,
2995 io_object_t *notification)
2996 {
2997 io_async_ref64_t zreference;
2998
2999 if (referenceCnt > ASYNC_REF64_COUNT) {
3000 return kIOReturnBadArgument;
3001 }
3002 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3003 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3004
3005 return internal_io_service_add_notification(master_port, notification_type,
3006 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3007 true, notification);
3008 }
3009
3010 static kern_return_t
3011 internal_io_service_add_notification_ool(
3012 mach_port_t master_port,
3013 io_name_t notification_type,
3014 io_buf_ptr_t matching,
3015 mach_msg_type_number_t matchingCnt,
3016 mach_port_t wake_port,
3017 void * reference,
3018 vm_size_t referenceSize,
3019 bool client64,
3020 kern_return_t *result,
3021 io_object_t *notification )
3022 {
3023 kern_return_t kr;
3024 vm_offset_t data;
3025 vm_map_offset_t map_data;
3026
3027 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
3028 data = CAST_DOWN(vm_offset_t, map_data);
3029
3030 if (KERN_SUCCESS == kr) {
3031 // must return success after vm_map_copyout() succeeds
3032 // and mig will copy out objects on success
3033 *notification = NULL;
3034 *result = internal_io_service_add_notification( master_port, notification_type,
3035 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
3036 vm_deallocate( kernel_map, data, matchingCnt );
3037 }
3038
3039 return kr;
3040 }
3041
3042 /* Routine io_service_add_notification_ool */
3043 kern_return_t
3044 is_io_service_add_notification_ool(
3045 mach_port_t master_port,
3046 io_name_t notification_type,
3047 io_buf_ptr_t matching,
3048 mach_msg_type_number_t matchingCnt,
3049 mach_port_t wake_port,
3050 io_async_ref_t reference,
3051 mach_msg_type_number_t referenceCnt,
3052 kern_return_t *result,
3053 io_object_t *notification )
3054 {
3055 io_async_ref_t zreference;
3056
3057 if (referenceCnt > ASYNC_REF_COUNT) {
3058 return kIOReturnBadArgument;
3059 }
3060 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3061 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3062
3063 return internal_io_service_add_notification_ool(master_port, notification_type,
3064 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
3065 false, result, notification);
3066 }
3067
3068 /* Routine io_service_add_notification_ool_64 */
3069 kern_return_t
3070 is_io_service_add_notification_ool_64(
3071 mach_port_t master_port,
3072 io_name_t notification_type,
3073 io_buf_ptr_t matching,
3074 mach_msg_type_number_t matchingCnt,
3075 mach_port_t wake_port,
3076 io_async_ref64_t reference,
3077 mach_msg_type_number_t referenceCnt,
3078 kern_return_t *result,
3079 io_object_t *notification )
3080 {
3081 io_async_ref64_t zreference;
3082
3083 if (referenceCnt > ASYNC_REF64_COUNT) {
3084 return kIOReturnBadArgument;
3085 }
3086 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3087 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3088
3089 return internal_io_service_add_notification_ool(master_port, notification_type,
3090 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
3091 true, result, notification);
3092 }
3093
3094 /* Routine io_service_add_notification_old */
3095 kern_return_t
3096 is_io_service_add_notification_old(
3097 mach_port_t master_port,
3098 io_name_t notification_type,
3099 io_string_t matching,
3100 mach_port_t port,
3101 // for binary compatibility reasons, this must be natural_t for ILP32
3102 natural_t ref,
3103 io_object_t * notification )
3104 {
3105 return is_io_service_add_notification( master_port, notification_type,
3106 matching, port, &ref, 1, notification );
3107 }
3108
3109
3110 static kern_return_t
3111 internal_io_service_add_interest_notification(
3112 io_object_t _service,
3113 io_name_t type_of_interest,
3114 mach_port_t port,
3115 void * reference,
3116 vm_size_t referenceSize,
3117 bool client64,
3118 io_object_t * notification )
3119 {
3120 IOServiceMessageUserNotification * userNotify = NULL;
3121 IONotifier * notify = NULL;
3122 const OSSymbol * sym;
3123 IOReturn err;
3124
3125 CHECK( IOService, _service, service );
3126
3127 err = kIOReturnNoResources;
3128 if ((sym = OSSymbol::withCString( type_of_interest ))) {
3129 do {
3130 userNotify = new IOServiceMessageUserNotification;
3131
3132 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
3133 reference, referenceSize,
3134 kIOUserNotifyMaxMessageSize,
3135 client64 )) {
3136 userNotify->release();
3137 userNotify = NULL;
3138 }
3139 if (!userNotify) {
3140 continue;
3141 }
3142
3143 notify = service->registerInterest( sym,
3144 &userNotify->_handler, userNotify );
3145 if (notify) {
3146 *notification = userNotify;
3147 userNotify->setNotification( notify );
3148 err = kIOReturnSuccess;
3149 } else {
3150 err = kIOReturnUnsupported;
3151 }
3152
3153 sym->release();
3154 } while (false);
3155 }
3156
3157 if ((kIOReturnSuccess != err) && userNotify) {
3158 userNotify->invalidatePort();
3159 userNotify->release();
3160 userNotify = NULL;
3161 }
3162
3163 return err;
3164 }
3165
3166 /* Routine io_service_add_message_notification */
3167 kern_return_t
3168 is_io_service_add_interest_notification(
3169 io_object_t service,
3170 io_name_t type_of_interest,
3171 mach_port_t port,
3172 io_async_ref_t reference,
3173 mach_msg_type_number_t referenceCnt,
3174 io_object_t * notification )
3175 {
3176 io_async_ref_t zreference;
3177
3178 if (referenceCnt > ASYNC_REF_COUNT) {
3179 return kIOReturnBadArgument;
3180 }
3181 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3182 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
3183
3184 return internal_io_service_add_interest_notification(service, type_of_interest,
3185 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
3186 }
3187
3188 /* Routine io_service_add_interest_notification_64 */
3189 kern_return_t
3190 is_io_service_add_interest_notification_64(
3191 io_object_t service,
3192 io_name_t type_of_interest,
3193 mach_port_t wake_port,
3194 io_async_ref64_t reference,
3195 mach_msg_type_number_t referenceCnt,
3196 io_object_t *notification )
3197 {
3198 io_async_ref64_t zreference;
3199
3200 if (referenceCnt > ASYNC_REF64_COUNT) {
3201 return kIOReturnBadArgument;
3202 }
3203 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3204 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3205
3206 return internal_io_service_add_interest_notification(service, type_of_interest,
3207 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3208 }
3209
3210
3211 /* Routine io_service_acknowledge_notification */
3212 kern_return_t
3213 is_io_service_acknowledge_notification(
3214 io_object_t _service,
3215 natural_t notify_ref,
3216 natural_t response )
3217 {
3218 CHECK( IOService, _service, service );
3219
3220 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3221 (IOOptionBits) response );
3222 }
3223
3224 /* Routine io_connect_get_semaphore */
3225 kern_return_t
3226 is_io_connect_get_notification_semaphore(
3227 io_connect_t connection,
3228 natural_t notification_type,
3229 semaphore_t *semaphore )
3230 {
3231 IOReturn ret;
3232 CHECK( IOUserClient, connection, client );
3233
3234 IOStatisticsClientCall();
3235 IORWLockWrite(client->lock);
3236 ret = client->getNotificationSemaphore((UInt32) notification_type,
3237 semaphore );
3238 IORWLockUnlock(client->lock);
3239
3240 return ret;
3241 }
3242
3243 /* Routine io_registry_get_root_entry */
3244 kern_return_t
3245 is_io_registry_get_root_entry(
3246 mach_port_t master_port,
3247 io_object_t *root )
3248 {
3249 IORegistryEntry * entry;
3250
3251 if (master_port != master_device_port) {
3252 return kIOReturnNotPrivileged;
3253 }
3254
3255 entry = IORegistryEntry::getRegistryRoot();
3256 if (entry) {
3257 entry->retain();
3258 }
3259 *root = entry;
3260
3261 return kIOReturnSuccess;
3262 }
3263
3264 /* Routine io_registry_create_iterator */
3265 kern_return_t
3266 is_io_registry_create_iterator(
3267 mach_port_t master_port,
3268 io_name_t plane,
3269 uint32_t options,
3270 io_object_t *iterator )
3271 {
3272 if (master_port != master_device_port) {
3273 return kIOReturnNotPrivileged;
3274 }
3275
3276 *iterator = IOUserIterator::withIterator(
3277 IORegistryIterator::iterateOver(
3278 IORegistryEntry::getPlane( plane ), options ));
3279
3280 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3281 }
3282
3283 /* Routine io_registry_entry_create_iterator */
3284 kern_return_t
3285 is_io_registry_entry_create_iterator(
3286 io_object_t registry_entry,
3287 io_name_t plane,
3288 uint32_t options,
3289 io_object_t *iterator )
3290 {
3291 CHECK( IORegistryEntry, registry_entry, entry );
3292
3293 *iterator = IOUserIterator::withIterator(
3294 IORegistryIterator::iterateOver( entry,
3295 IORegistryEntry::getPlane( plane ), options ));
3296
3297 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3298 }
3299
3300 /* Routine io_registry_iterator_enter */
3301 kern_return_t
3302 is_io_registry_iterator_enter_entry(
3303 io_object_t iterator )
3304 {
3305 CHECKLOCKED( IORegistryIterator, iterator, iter );
3306
3307 IOLockLock(oIter->lock);
3308 iter->enterEntry();
3309 IOLockUnlock(oIter->lock);
3310
3311 return kIOReturnSuccess;
3312 }
3313
3314 /* Routine io_registry_iterator_exit */
3315 kern_return_t
3316 is_io_registry_iterator_exit_entry(
3317 io_object_t iterator )
3318 {
3319 bool didIt;
3320
3321 CHECKLOCKED( IORegistryIterator, iterator, iter );
3322
3323 IOLockLock(oIter->lock);
3324 didIt = iter->exitEntry();
3325 IOLockUnlock(oIter->lock);
3326
3327 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3328 }
3329
3330 /* Routine io_registry_entry_from_path */
3331 kern_return_t
3332 is_io_registry_entry_from_path(
3333 mach_port_t master_port,
3334 io_string_t path,
3335 io_object_t *registry_entry )
3336 {
3337 IORegistryEntry * entry;
3338
3339 if (master_port != master_device_port) {
3340 return kIOReturnNotPrivileged;
3341 }
3342
3343 entry = IORegistryEntry::fromPath( path );
3344
3345 if (!entry && IOTaskRegistryCompatibility(current_task())) {
3346 OSDictionary * matching;
3347 const OSObject * objects[2] = { kOSBooleanTrue, NULL };
3348 const OSSymbol * keys[2] = { gIOCompatibilityMatchKey, gIOPathMatchKey };
3349
3350 objects[1] = OSString::withCStringNoCopy(path);
3351 matching = OSDictionary::withObjects(objects, keys, 2, 2);
3352 if (matching) {
3353 entry = IOService::copyMatchingService(matching);
3354 }
3355 OSSafeReleaseNULL(matching);
3356 OSSafeReleaseNULL(objects[1]);
3357 }
3358
3359 *registry_entry = entry;
3360
3361 return kIOReturnSuccess;
3362 }
3363
3364
3365 /* Routine io_registry_entry_from_path */
3366 kern_return_t
3367 is_io_registry_entry_from_path_ool(
3368 mach_port_t master_port,
3369 io_string_inband_t path,
3370 io_buf_ptr_t path_ool,
3371 mach_msg_type_number_t path_oolCnt,
3372 kern_return_t *result,
3373 io_object_t *registry_entry)
3374 {
3375 IORegistryEntry * entry;
3376 vm_map_offset_t map_data;
3377 const char * cpath;
3378 IOReturn res;
3379 kern_return_t err;
3380
3381 if (master_port != master_device_port) {
3382 return kIOReturnNotPrivileged;
3383 }
3384
3385 map_data = 0;
3386 entry = NULL;
3387 res = err = KERN_SUCCESS;
3388 if (path[0]) {
3389 cpath = path;
3390 } else {
3391 if (!path_oolCnt) {
3392 return kIOReturnBadArgument;
3393 }
3394 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3395 return kIOReturnMessageTooLarge;
3396 }
3397
3398 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3399 if (KERN_SUCCESS == err) {
3400 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3401 cpath = CAST_DOWN(const char *, map_data);
3402 if (cpath[path_oolCnt - 1]) {
3403 res = kIOReturnBadArgument;
3404 }
3405 }
3406 }
3407
3408 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3409 entry = IORegistryEntry::fromPath(cpath);
3410 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3411 }
3412
3413 if (map_data) {
3414 vm_deallocate(kernel_map, map_data, path_oolCnt);
3415 }
3416
3417 if (KERN_SUCCESS != err) {
3418 res = err;
3419 }
3420 *registry_entry = entry;
3421 *result = res;
3422
3423 return err;
3424 }
3425
3426
3427 /* Routine io_registry_entry_in_plane */
3428 kern_return_t
3429 is_io_registry_entry_in_plane(
3430 io_object_t registry_entry,
3431 io_name_t plane,
3432 boolean_t *inPlane )
3433 {
3434 CHECK( IORegistryEntry, registry_entry, entry );
3435
3436 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3437
3438 return kIOReturnSuccess;
3439 }
3440
3441
3442 /* Routine io_registry_entry_get_path */
3443 kern_return_t
3444 is_io_registry_entry_get_path(
3445 io_object_t registry_entry,
3446 io_name_t plane,
3447 io_string_t path )
3448 {
3449 int length;
3450 CHECK( IORegistryEntry, registry_entry, entry );
3451
3452 length = sizeof(io_string_t);
3453 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3454 return kIOReturnSuccess;
3455 } else {
3456 return kIOReturnBadArgument;
3457 }
3458 }
3459
3460 /* Routine io_registry_entry_get_path */
3461 kern_return_t
3462 is_io_registry_entry_get_path_ool(
3463 io_object_t registry_entry,
3464 io_name_t plane,
3465 io_string_inband_t path,
3466 io_buf_ptr_t *path_ool,
3467 mach_msg_type_number_t *path_oolCnt)
3468 {
3469 enum { kMaxPath = 16384 };
3470 IOReturn err;
3471 int length;
3472 char * buf;
3473
3474 CHECK( IORegistryEntry, registry_entry, entry );
3475
3476 *path_ool = NULL;
3477 *path_oolCnt = 0;
3478 length = sizeof(io_string_inband_t);
3479 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3480 err = kIOReturnSuccess;
3481 } else {
3482 length = kMaxPath;
3483 buf = IONew(char, length);
3484 if (!buf) {
3485 err = kIOReturnNoMemory;
3486 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3487 err = kIOReturnError;
3488 } else {
3489 *path_oolCnt = length;
3490 err = copyoutkdata(buf, length, path_ool);
3491 }
3492 if (buf) {
3493 IODelete(buf, char, kMaxPath);
3494 }
3495 }
3496
3497 return err;
3498 }
3499
3500
3501 /* Routine io_registry_entry_get_name */
3502 kern_return_t
3503 is_io_registry_entry_get_name(
3504 io_object_t registry_entry,
3505 io_name_t name )
3506 {
3507 CHECK( IORegistryEntry, registry_entry, entry );
3508
3509 strncpy( name, entry->getName(), sizeof(io_name_t));
3510
3511 return kIOReturnSuccess;
3512 }
3513
3514 /* Routine io_registry_entry_get_name_in_plane */
3515 kern_return_t
3516 is_io_registry_entry_get_name_in_plane(
3517 io_object_t registry_entry,
3518 io_name_t planeName,
3519 io_name_t name )
3520 {
3521 const IORegistryPlane * plane;
3522 CHECK( IORegistryEntry, registry_entry, entry );
3523
3524 if (planeName[0]) {
3525 plane = IORegistryEntry::getPlane( planeName );
3526 } else {
3527 plane = NULL;
3528 }
3529
3530 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3531
3532 return kIOReturnSuccess;
3533 }
3534
3535 /* Routine io_registry_entry_get_location_in_plane */
3536 kern_return_t
3537 is_io_registry_entry_get_location_in_plane(
3538 io_object_t registry_entry,
3539 io_name_t planeName,
3540 io_name_t location )
3541 {
3542 const IORegistryPlane * plane;
3543 CHECK( IORegistryEntry, registry_entry, entry );
3544
3545 if (planeName[0]) {
3546 plane = IORegistryEntry::getPlane( planeName );
3547 } else {
3548 plane = NULL;
3549 }
3550
3551 const char * cstr = entry->getLocation( plane );
3552
3553 if (cstr) {
3554 strncpy( location, cstr, sizeof(io_name_t));
3555 return kIOReturnSuccess;
3556 } else {
3557 return kIOReturnNotFound;
3558 }
3559 }
3560
3561 /* Routine io_registry_entry_get_registry_entry_id */
3562 kern_return_t
3563 is_io_registry_entry_get_registry_entry_id(
3564 io_object_t registry_entry,
3565 uint64_t *entry_id )
3566 {
3567 CHECK( IORegistryEntry, registry_entry, entry );
3568
3569 *entry_id = entry->getRegistryEntryID();
3570
3571 return kIOReturnSuccess;
3572 }
3573
3574
3575 static OSObject *
3576 IOCopyPropertyCompatible(IORegistryEntry * regEntry, const char * name)
3577 {
3578 OSObject * obj;
3579 OSObject * compatProps;
3580 OSDictionary * props;
3581
3582 obj = regEntry->copyProperty(name);
3583 if (!obj
3584 && IOTaskRegistryCompatibility(current_task())
3585 && (compatProps = regEntry->copyProperty(gIOCompatibilityPropertiesKey))) {
3586 props = OSDynamicCast(OSDictionary, compatProps);
3587 if (props) {
3588 obj = props->getObject(name);
3589 if (obj) {
3590 obj->retain();
3591 }
3592 }
3593 compatProps->release();
3594 }
3595
3596 return obj;
3597 }
3598
3599 /* Routine io_registry_entry_get_property */
3600 kern_return_t
3601 is_io_registry_entry_get_property_bytes(
3602 io_object_t registry_entry,
3603 io_name_t property_name,
3604 io_struct_inband_t buf,
3605 mach_msg_type_number_t *dataCnt )
3606 {
3607 OSObject * obj;
3608 OSData * data;
3609 OSString * str;
3610 OSBoolean * boo;
3611 OSNumber * off;
3612 UInt64 offsetBytes;
3613 unsigned int len = 0;
3614 const void * bytes = NULL;
3615 IOReturn ret = kIOReturnSuccess;
3616
3617 CHECK( IORegistryEntry, registry_entry, entry );
3618
3619 #if CONFIG_MACF
3620 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3621 return kIOReturnNotPermitted;
3622 }
3623 #endif
3624
3625 obj = IOCopyPropertyCompatible(entry, property_name);
3626 if (!obj) {
3627 return kIOReturnNoResources;
3628 }
3629
3630 // One day OSData will be a common container base class
3631 // until then...
3632 if ((data = OSDynamicCast( OSData, obj ))) {
3633 len = data->getLength();
3634 bytes = data->getBytesNoCopy();
3635 if (!data->isSerializable()) {
3636 len = 0;
3637 }
3638 } else if ((str = OSDynamicCast( OSString, obj ))) {
3639 len = str->getLength() + 1;
3640 bytes = str->getCStringNoCopy();
3641 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3642 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3643 bytes = boo->isTrue() ? "Yes" : "No";
3644 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3645 offsetBytes = off->unsigned64BitValue();
3646 len = off->numberOfBytes();
3647 if (len > sizeof(offsetBytes)) {
3648 len = sizeof(offsetBytes);
3649 }
3650 bytes = &offsetBytes;
3651 #ifdef __BIG_ENDIAN__
3652 bytes = (const void *)
3653 (((UInt32) bytes) + (sizeof(UInt64) - len));
3654 #endif
3655 } else {
3656 ret = kIOReturnBadArgument;
3657 }
3658
3659 if (bytes) {
3660 if (*dataCnt < len) {
3661 ret = kIOReturnIPCError;
3662 } else {
3663 *dataCnt = len;
3664 bcopy( bytes, buf, len );
3665 }
3666 }
3667 obj->release();
3668
3669 return ret;
3670 }
3671
3672
3673 /* Routine io_registry_entry_get_property */
3674 kern_return_t
3675 is_io_registry_entry_get_property(
3676 io_object_t registry_entry,
3677 io_name_t property_name,
3678 io_buf_ptr_t *properties,
3679 mach_msg_type_number_t *propertiesCnt )
3680 {
3681 kern_return_t err;
3682 unsigned int len;
3683 OSObject * obj;
3684
3685 CHECK( IORegistryEntry, registry_entry, entry );
3686
3687 #if CONFIG_MACF
3688 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3689 return kIOReturnNotPermitted;
3690 }
3691 #endif
3692
3693 obj = IOCopyPropertyCompatible(entry, property_name);
3694 if (!obj) {
3695 return kIOReturnNotFound;
3696 }
3697
3698 OSSerialize * s = OSSerialize::withCapacity(4096);
3699 if (!s) {
3700 obj->release();
3701 return kIOReturnNoMemory;
3702 }
3703
3704 if (obj->serialize( s )) {
3705 len = s->getLength();
3706 *propertiesCnt = len;
3707 err = copyoutkdata( s->text(), len, properties );
3708 } else {
3709 err = kIOReturnUnsupported;
3710 }
3711
3712 s->release();
3713 obj->release();
3714
3715 return err;
3716 }
3717
3718 /* Routine io_registry_entry_get_property_recursively */
3719 kern_return_t
3720 is_io_registry_entry_get_property_recursively(
3721 io_object_t registry_entry,
3722 io_name_t plane,
3723 io_name_t property_name,
3724 uint32_t options,
3725 io_buf_ptr_t *properties,
3726 mach_msg_type_number_t *propertiesCnt )
3727 {
3728 kern_return_t err;
3729 unsigned int len;
3730 OSObject * obj;
3731
3732 CHECK( IORegistryEntry, registry_entry, entry );
3733
3734 #if CONFIG_MACF
3735 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3736 return kIOReturnNotPermitted;
3737 }
3738 #endif
3739
3740 obj = entry->copyProperty( property_name,
3741 IORegistryEntry::getPlane( plane ), options );
3742 if (!obj) {
3743 return kIOReturnNotFound;
3744 }
3745
3746 OSSerialize * s = OSSerialize::withCapacity(4096);
3747 if (!s) {
3748 obj->release();
3749 return kIOReturnNoMemory;
3750 }
3751
3752 if (obj->serialize( s )) {
3753 len = s->getLength();
3754 *propertiesCnt = len;
3755 err = copyoutkdata( s->text(), len, properties );
3756 } else {
3757 err = kIOReturnUnsupported;
3758 }
3759
3760 s->release();
3761 obj->release();
3762
3763 return err;
3764 }
3765
3766 /* Routine io_registry_entry_get_properties */
3767 kern_return_t
3768 is_io_registry_entry_get_properties(
3769 io_object_t registry_entry,
3770 io_buf_ptr_t *properties,
3771 mach_msg_type_number_t *propertiesCnt )
3772 {
3773 return kIOReturnUnsupported;
3774 }
3775
3776 #if CONFIG_MACF
3777
3778 struct GetPropertiesEditorRef {
3779 kauth_cred_t cred;
3780 IORegistryEntry * entry;
3781 OSCollection * root;
3782 };
3783
3784 static const OSMetaClassBase *
3785 GetPropertiesEditor(void * reference,
3786 OSSerialize * s,
3787 OSCollection * container,
3788 const OSSymbol * name,
3789 const OSMetaClassBase * value)
3790 {
3791 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3792
3793 if (!ref->root) {
3794 ref->root = container;
3795 }
3796 if (ref->root == container) {
3797 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3798 value = NULL;
3799 }
3800 }
3801 if (value) {
3802 value->retain();
3803 }
3804 return value;
3805 }
3806
3807 #endif /* CONFIG_MACF */
3808
3809 /* Routine io_registry_entry_get_properties_bin_buf */
3810 kern_return_t
3811 is_io_registry_entry_get_properties_bin_buf(
3812 io_object_t registry_entry,
3813 mach_vm_address_t buf,
3814 mach_vm_size_t *bufsize,
3815 io_buf_ptr_t *properties,
3816 mach_msg_type_number_t *propertiesCnt)
3817 {
3818 kern_return_t err = kIOReturnSuccess;
3819 unsigned int len;
3820 OSObject * compatProperties;
3821 OSSerialize * s;
3822 OSSerialize::Editor editor = NULL;
3823 void * editRef = NULL;
3824
3825 CHECK(IORegistryEntry, registry_entry, entry);
3826
3827 #if CONFIG_MACF
3828 GetPropertiesEditorRef ref;
3829 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3830 editor = &GetPropertiesEditor;
3831 editRef = &ref;
3832 ref.cred = kauth_cred_get();
3833 ref.entry = entry;
3834 ref.root = NULL;
3835 }
3836 #endif
3837
3838 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3839 if (!s) {
3840 return kIOReturnNoMemory;
3841 }
3842
3843 if (IOTaskRegistryCompatibility(current_task())
3844 && (compatProperties = entry->copyProperty(gIOCompatibilityPropertiesKey))) {
3845 OSDictionary * dict;
3846
3847 dict = entry->dictionaryWithProperties();
3848 if (!dict) {
3849 err = kIOReturnNoMemory;
3850 } else {
3851 dict->removeObject(gIOCompatibilityPropertiesKey);
3852 dict->merge(OSDynamicCast(OSDictionary, compatProperties));
3853 if (!dict->serialize(s)) {
3854 err = kIOReturnUnsupported;
3855 }
3856 dict->release();
3857 }
3858 compatProperties->release();
3859 } else if (!entry->serializeProperties(s)) {
3860 err = kIOReturnUnsupported;
3861 }
3862
3863 if (kIOReturnSuccess == err) {
3864 len = s->getLength();
3865 if (buf && bufsize && len <= *bufsize) {
3866 *bufsize = len;
3867 *propertiesCnt = 0;
3868 *properties = nullptr;
3869 if (copyout(s->text(), buf, len)) {
3870 err = kIOReturnVMError;
3871 } else {
3872 err = kIOReturnSuccess;
3873 }
3874 } else {
3875 if (bufsize) {
3876 *bufsize = 0;
3877 }
3878 *propertiesCnt = len;
3879 err = copyoutkdata( s->text(), len, properties );
3880 }
3881 }
3882 s->release();
3883
3884 return err;
3885 }
3886
3887 /* Routine io_registry_entry_get_properties_bin */
3888 kern_return_t
3889 is_io_registry_entry_get_properties_bin(
3890 io_object_t registry_entry,
3891 io_buf_ptr_t *properties,
3892 mach_msg_type_number_t *propertiesCnt)
3893 {
3894 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3895 0, NULL, properties, propertiesCnt);
3896 }
3897
3898 /* Routine io_registry_entry_get_property_bin_buf */
3899 kern_return_t
3900 is_io_registry_entry_get_property_bin_buf(
3901 io_object_t registry_entry,
3902 io_name_t plane,
3903 io_name_t property_name,
3904 uint32_t options,
3905 mach_vm_address_t buf,
3906 mach_vm_size_t *bufsize,
3907 io_buf_ptr_t *properties,
3908 mach_msg_type_number_t *propertiesCnt )
3909 {
3910 kern_return_t err;
3911 unsigned int len;
3912 OSObject * obj;
3913 const OSSymbol * sym;
3914
3915 CHECK( IORegistryEntry, registry_entry, entry );
3916
3917 #if CONFIG_MACF
3918 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3919 return kIOReturnNotPermitted;
3920 }
3921 #endif
3922
3923 sym = OSSymbol::withCString(property_name);
3924 if (!sym) {
3925 return kIOReturnNoMemory;
3926 }
3927
3928 if (gIORegistryEntryPropertyKeysKey == sym) {
3929 obj = entry->copyPropertyKeys();
3930 } else {
3931 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3932 if (!IOTaskRegistryCompatibility(current_task())) {
3933 obj = entry->copyProperty(property_name,
3934 IORegistryEntry::getPlane(plane), options);
3935 } else {
3936 obj = IOCopyPropertyCompatible(entry, property_name);
3937 if ((NULL == obj) && plane && (options & kIORegistryIterateRecursively)) {
3938 IORegistryIterator * iter;
3939 iter = IORegistryIterator::iterateOver(entry, IORegistryEntry::getPlane(plane), options);
3940 if (iter) {
3941 while ((NULL == obj) && (entry = iter->getNextObject())) {
3942 obj = IOCopyPropertyCompatible(entry, property_name);
3943 }
3944 iter->release();
3945 }
3946 }
3947 }
3948 } else {
3949 obj = IOCopyPropertyCompatible(entry, property_name);
3950 }
3951 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3952 entry->removeProperty(sym);
3953 }
3954 }
3955
3956 sym->release();
3957 if (!obj) {
3958 return kIOReturnNotFound;
3959 }
3960
3961 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3962 if (!s) {
3963 obj->release();
3964 return kIOReturnNoMemory;
3965 }
3966
3967 if (obj->serialize( s )) {
3968 len = s->getLength();
3969 if (buf && bufsize && len <= *bufsize) {
3970 *bufsize = len;
3971 *propertiesCnt = 0;
3972 *properties = nullptr;
3973 if (copyout(s->text(), buf, len)) {
3974 err = kIOReturnVMError;
3975 } else {
3976 err = kIOReturnSuccess;
3977 }
3978 } else {
3979 if (bufsize) {
3980 *bufsize = 0;
3981 }
3982 *propertiesCnt = len;
3983 err = copyoutkdata( s->text(), len, properties );
3984 }
3985 } else {
3986 err = kIOReturnUnsupported;
3987 }
3988
3989 s->release();
3990 obj->release();
3991
3992 return err;
3993 }
3994
3995 /* Routine io_registry_entry_get_property_bin */
3996 kern_return_t
3997 is_io_registry_entry_get_property_bin(
3998 io_object_t registry_entry,
3999 io_name_t plane,
4000 io_name_t property_name,
4001 uint32_t options,
4002 io_buf_ptr_t *properties,
4003 mach_msg_type_number_t *propertiesCnt )
4004 {
4005 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
4006 property_name, options, 0, NULL, properties, propertiesCnt);
4007 }
4008
4009
4010 /* Routine io_registry_entry_set_properties */
4011 kern_return_t
4012 is_io_registry_entry_set_properties
4013 (
4014 io_object_t registry_entry,
4015 io_buf_ptr_t properties,
4016 mach_msg_type_number_t propertiesCnt,
4017 kern_return_t * result)
4018 {
4019 OSObject * obj;
4020 kern_return_t err;
4021 IOReturn res;
4022 vm_offset_t data;
4023 vm_map_offset_t map_data;
4024
4025 CHECK( IORegistryEntry, registry_entry, entry );
4026
4027 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
4028 return kIOReturnMessageTooLarge;
4029 }
4030
4031 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4032 data = CAST_DOWN(vm_offset_t, map_data);
4033
4034 if (KERN_SUCCESS == err) {
4035 FAKE_STACK_FRAME(entry->getMetaClass());
4036
4037 // must return success after vm_map_copyout() succeeds
4038 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4039 vm_deallocate( kernel_map, data, propertiesCnt );
4040
4041 if (!obj) {
4042 res = kIOReturnBadArgument;
4043 }
4044 #if CONFIG_MACF
4045 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
4046 registry_entry, obj)) {
4047 res = kIOReturnNotPermitted;
4048 }
4049 #endif
4050 else {
4051 res = entry->setProperties( obj );
4052 }
4053
4054 if (obj) {
4055 obj->release();
4056 }
4057
4058 FAKE_STACK_FRAME_END();
4059 } else {
4060 res = err;
4061 }
4062
4063 *result = res;
4064 return err;
4065 }
4066
4067 /* Routine io_registry_entry_get_child_iterator */
4068 kern_return_t
4069 is_io_registry_entry_get_child_iterator(
4070 io_object_t registry_entry,
4071 io_name_t plane,
4072 io_object_t *iterator )
4073 {
4074 CHECK( IORegistryEntry, registry_entry, entry );
4075
4076 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
4077 IORegistryEntry::getPlane( plane )));
4078
4079 return kIOReturnSuccess;
4080 }
4081
4082 /* Routine io_registry_entry_get_parent_iterator */
4083 kern_return_t
4084 is_io_registry_entry_get_parent_iterator(
4085 io_object_t registry_entry,
4086 io_name_t plane,
4087 io_object_t *iterator)
4088 {
4089 CHECK( IORegistryEntry, registry_entry, entry );
4090
4091 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
4092 IORegistryEntry::getPlane( plane )));
4093
4094 return kIOReturnSuccess;
4095 }
4096
4097 /* Routine io_service_get_busy_state */
4098 kern_return_t
4099 is_io_service_get_busy_state(
4100 io_object_t _service,
4101 uint32_t *busyState )
4102 {
4103 CHECK( IOService, _service, service );
4104
4105 *busyState = service->getBusyState();
4106
4107 return kIOReturnSuccess;
4108 }
4109
4110 /* Routine io_service_get_state */
4111 kern_return_t
4112 is_io_service_get_state(
4113 io_object_t _service,
4114 uint64_t *state,
4115 uint32_t *busy_state,
4116 uint64_t *accumulated_busy_time )
4117 {
4118 CHECK( IOService, _service, service );
4119
4120 *state = service->getState();
4121 *busy_state = service->getBusyState();
4122 *accumulated_busy_time = service->getAccumulatedBusyTime();
4123
4124 return kIOReturnSuccess;
4125 }
4126
4127 /* Routine io_service_wait_quiet */
4128 kern_return_t
4129 is_io_service_wait_quiet(
4130 io_object_t _service,
4131 mach_timespec_t wait_time )
4132 {
4133 uint64_t timeoutNS;
4134
4135 CHECK( IOService, _service, service );
4136
4137 timeoutNS = wait_time.tv_sec;
4138 timeoutNS *= kSecondScale;
4139 timeoutNS += wait_time.tv_nsec;
4140
4141 return service->waitQuiet(timeoutNS);
4142 }
4143
4144 /* Routine io_service_request_probe */
4145 kern_return_t
4146 is_io_service_request_probe(
4147 io_object_t _service,
4148 uint32_t options )
4149 {
4150 CHECK( IOService, _service, service );
4151
4152 return service->requestProbe( options );
4153 }
4154
4155 /* Routine io_service_get_authorization_id */
4156 kern_return_t
4157 is_io_service_get_authorization_id(
4158 io_object_t _service,
4159 uint64_t *authorization_id )
4160 {
4161 kern_return_t kr;
4162
4163 CHECK( IOService, _service, service );
4164
4165 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
4166 kIOClientPrivilegeAdministrator );
4167 if (kIOReturnSuccess != kr) {
4168 return kr;
4169 }
4170
4171 *authorization_id = service->getAuthorizationID();
4172
4173 return kr;
4174 }
4175
4176 /* Routine io_service_set_authorization_id */
4177 kern_return_t
4178 is_io_service_set_authorization_id(
4179 io_object_t _service,
4180 uint64_t authorization_id )
4181 {
4182 CHECK( IOService, _service, service );
4183
4184 return service->setAuthorizationID( authorization_id );
4185 }
4186
4187 /* Routine io_service_open_ndr */
4188 kern_return_t
4189 is_io_service_open_extended(
4190 io_object_t _service,
4191 task_t owningTask,
4192 uint32_t connect_type,
4193 NDR_record_t ndr,
4194 io_buf_ptr_t properties,
4195 mach_msg_type_number_t propertiesCnt,
4196 kern_return_t * result,
4197 io_object_t *connection )
4198 {
4199 IOUserClient * client = NULL;
4200 kern_return_t err = KERN_SUCCESS;
4201 IOReturn res = kIOReturnSuccess;
4202 OSDictionary * propertiesDict = NULL;
4203 bool crossEndian;
4204 bool disallowAccess;
4205
4206 CHECK( IOService, _service, service );
4207
4208 if (!owningTask) {
4209 return kIOReturnBadArgument;
4210 }
4211 assert(owningTask == current_task());
4212 if (owningTask != current_task()) {
4213 return kIOReturnBadArgument;
4214 }
4215
4216 #if CONFIG_MACF
4217 if (mac_iokit_check_open_service(kauth_cred_get(), service, connect_type) != 0) {
4218 return kIOReturnNotPermitted;
4219 }
4220 #endif
4221 do{
4222 if (properties) {
4223 return kIOReturnUnsupported;
4224 }
4225 #if 0
4226 {
4227 OSObject * obj;
4228 vm_offset_t data;
4229 vm_map_offset_t map_data;
4230
4231 if (propertiesCnt > sizeof(io_struct_inband_t)) {
4232 return kIOReturnMessageTooLarge;
4233 }
4234
4235 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
4236 res = err;
4237 data = CAST_DOWN(vm_offset_t, map_data);
4238 if (KERN_SUCCESS == err) {
4239 // must return success after vm_map_copyout() succeeds
4240 obj = OSUnserializeXML((const char *) data, propertiesCnt );
4241 vm_deallocate( kernel_map, data, propertiesCnt );
4242 propertiesDict = OSDynamicCast(OSDictionary, obj);
4243 if (!propertiesDict) {
4244 res = kIOReturnBadArgument;
4245 if (obj) {
4246 obj->release();
4247 }
4248 }
4249 }
4250 if (kIOReturnSuccess != res) {
4251 break;
4252 }
4253 }
4254 #endif
4255 crossEndian = (ndr.int_rep != NDR_record.int_rep);
4256 if (crossEndian) {
4257 if (!propertiesDict) {
4258 propertiesDict = OSDictionary::withCapacity(4);
4259 }
4260 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
4261 if (data) {
4262 if (propertiesDict) {
4263 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
4264 }
4265 data->release();
4266 }
4267 }
4268
4269 res = service->newUserClient( owningTask, (void *) owningTask,
4270 connect_type, propertiesDict, &client );
4271
4272 if (propertiesDict) {
4273 propertiesDict->release();
4274 }
4275
4276 if (res == kIOReturnSuccess) {
4277 assert( OSDynamicCast(IOUserClient, client));
4278 if (!client->reserved) {
4279 if (!client->reserve()) {
4280 client->clientClose();
4281 OSSafeReleaseNULL(client);
4282 res = kIOReturnNoMemory;
4283 }
4284 }
4285 }
4286
4287 if (res == kIOReturnSuccess) {
4288 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4289 if (client->sharedInstance) {
4290 IOLockLock(gIOUserClientOwnersLock);
4291 }
4292 if (!client->lock) {
4293 client->lock = IORWLockAlloc();
4294 client->filterLock = IOLockAlloc();
4295
4296 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4297 {
4298 OSObject * obj;
4299 extern const OSSymbol * gIOSurfaceIdentifier;
4300 obj = client->getProperty(kIOUserClientDefaultLockingKey);
4301 if (obj) {
4302 client->defaultLocking = (kOSBooleanFalse != client->getProperty(kIOUserClientDefaultLockingKey));
4303 } else {
4304 const OSMetaClass * meta;
4305 OSKext * kext;
4306 meta = client->getMetaClass();
4307 kext = meta->getKext();
4308 if (!kext || !kext->hasDependency(gIOSurfaceIdentifier)) {
4309 client->defaultLocking = true;
4310 client->setProperty(kIOUserClientDefaultLockingKey, kOSBooleanTrue);
4311 }
4312 }
4313 }
4314 }
4315 if (client->sharedInstance) {
4316 IOLockUnlock(gIOUserClientOwnersLock);
4317 }
4318
4319 disallowAccess = (crossEndian
4320 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
4321 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
4322 if (disallowAccess) {
4323 res = kIOReturnUnsupported;
4324 }
4325 #if CONFIG_MACF
4326 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4327 res = kIOReturnNotPermitted;
4328 }
4329 #endif
4330
4331 if ((kIOReturnSuccess == res)
4332 && gIOUCFilterCallbacks
4333 && gIOUCFilterCallbacks->io_filter_resolver) {
4334 io_filter_policy_t filterPolicy;
4335 filterPolicy = client->filterForTask(owningTask, 0);
4336 if (!filterPolicy) {
4337 res = gIOUCFilterCallbacks->io_filter_resolver(owningTask, client, connect_type, &filterPolicy);
4338 if (kIOReturnUnsupported == res) {
4339 res = kIOReturnSuccess;
4340 } else if (kIOReturnSuccess == res) {
4341 client->filterForTask(owningTask, filterPolicy);
4342 }
4343 }
4344 }
4345
4346 if (kIOReturnSuccess == res) {
4347 res = client->registerOwner(owningTask);
4348 }
4349
4350 if (kIOReturnSuccess != res) {
4351 IOStatisticsClientCall();
4352 client->clientClose();
4353 client->release();
4354 client = NULL;
4355 break;
4356 }
4357 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4358 if (creatorName) {
4359 client->setProperty(kIOUserClientCreatorKey, creatorName);
4360 creatorName->release();
4361 }
4362 client->setTerminateDefer(service, false);
4363 }
4364 }while (false);
4365
4366 *connection = client;
4367 *result = res;
4368
4369 return err;
4370 }
4371
4372 /* Routine io_service_close */
4373 kern_return_t
4374 is_io_service_close(
4375 io_object_t connection )
4376 {
4377 OSSet * mappings;
4378 if ((mappings = OSDynamicCast(OSSet, connection))) {
4379 return kIOReturnSuccess;
4380 }
4381
4382 CHECK( IOUserClient, connection, client );
4383
4384 IOStatisticsClientCall();
4385
4386 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4387 IORWLockWrite(client->lock);
4388 client->clientClose();
4389 IORWLockUnlock(client->lock);
4390 } else {
4391 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4392 client->getRegistryEntryID(), client->getName());
4393 }
4394
4395 return kIOReturnSuccess;
4396 }
4397
4398 /* Routine io_connect_get_service */
4399 kern_return_t
4400 is_io_connect_get_service(
4401 io_object_t connection,
4402 io_object_t *service )
4403 {
4404 IOService * theService;
4405
4406 CHECK( IOUserClient, connection, client );
4407
4408 theService = client->getService();
4409 if (theService) {
4410 theService->retain();
4411 }
4412
4413 *service = theService;
4414
4415 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4416 }
4417
4418 /* Routine io_connect_set_notification_port */
4419 kern_return_t
4420 is_io_connect_set_notification_port(
4421 io_object_t connection,
4422 uint32_t notification_type,
4423 mach_port_t port,
4424 uint32_t reference)
4425 {
4426 kern_return_t ret;
4427 CHECK( IOUserClient, connection, client );
4428
4429 IOStatisticsClientCall();
4430 IORWLockWrite(client->lock);
4431 ret = client->registerNotificationPort( port, notification_type,
4432 (io_user_reference_t) reference );
4433 IORWLockUnlock(client->lock);
4434 return ret;
4435 }
4436
4437 /* Routine io_connect_set_notification_port */
4438 kern_return_t
4439 is_io_connect_set_notification_port_64(
4440 io_object_t connection,
4441 uint32_t notification_type,
4442 mach_port_t port,
4443 io_user_reference_t reference)
4444 {
4445 kern_return_t ret;
4446 CHECK( IOUserClient, connection, client );
4447
4448 IOStatisticsClientCall();
4449 IORWLockWrite(client->lock);
4450 ret = client->registerNotificationPort( port, notification_type,
4451 reference );
4452 IORWLockUnlock(client->lock);
4453 return ret;
4454 }
4455
4456 /* Routine io_connect_map_memory_into_task */
4457 kern_return_t
4458 is_io_connect_map_memory_into_task
4459 (
4460 io_connect_t connection,
4461 uint32_t memory_type,
4462 task_t into_task,
4463 mach_vm_address_t *address,
4464 mach_vm_size_t *size,
4465 uint32_t flags
4466 )
4467 {
4468 IOReturn err;
4469 IOMemoryMap * map;
4470
4471 CHECK( IOUserClient, connection, client );
4472
4473 if (!into_task) {
4474 return kIOReturnBadArgument;
4475 }
4476
4477 IOStatisticsClientCall();
4478 if (client->defaultLocking) {
4479 IORWLockWrite(client->lock);
4480 }
4481 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4482 if (client->defaultLocking) {
4483 IORWLockUnlock(client->lock);
4484 }
4485
4486 if (map) {
4487 *address = map->getAddress();
4488 if (size) {
4489 *size = map->getSize();
4490 }
4491
4492 if (client->sharedInstance
4493 || (into_task != current_task())) {
4494 // push a name out to the task owning the map,
4495 // so we can clean up maps
4496 mach_port_name_t name __unused =
4497 IOMachPort::makeSendRightForTask(
4498 into_task, map, IKOT_IOKIT_OBJECT );
4499 map->release();
4500 } else {
4501 // keep it with the user client
4502 IOLockLock( gIOObjectPortLock);
4503 if (NULL == client->mappings) {
4504 client->mappings = OSSet::withCapacity(2);
4505 }
4506 if (client->mappings) {
4507 client->mappings->setObject( map);
4508 }
4509 IOLockUnlock( gIOObjectPortLock);
4510 map->release();
4511 }
4512 err = kIOReturnSuccess;
4513 } else {
4514 err = kIOReturnBadArgument;
4515 }
4516
4517 return err;
4518 }
4519
4520 /* Routine is_io_connect_map_memory */
4521 kern_return_t
4522 is_io_connect_map_memory(
4523 io_object_t connect,
4524 uint32_t type,
4525 task_t task,
4526 uint32_t * mapAddr,
4527 uint32_t * mapSize,
4528 uint32_t flags )
4529 {
4530 IOReturn err;
4531 mach_vm_address_t address;
4532 mach_vm_size_t size;
4533
4534 address = SCALAR64(*mapAddr);
4535 size = SCALAR64(*mapSize);
4536
4537 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4538
4539 *mapAddr = SCALAR32(address);
4540 *mapSize = SCALAR32(size);
4541
4542 return err;
4543 }
4544 } /* extern "C" */
4545
4546 IOMemoryMap *
4547 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4548 {
4549 OSIterator * iter;
4550 IOMemoryMap * map = NULL;
4551
4552 IOLockLock(gIOObjectPortLock);
4553
4554 iter = OSCollectionIterator::withCollection(mappings);
4555 if (iter) {
4556 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4557 if (mem == map->getMemoryDescriptor()) {
4558 map->retain();
4559 mappings->removeObject(map);
4560 break;
4561 }
4562 }
4563 iter->release();
4564 }
4565
4566 IOLockUnlock(gIOObjectPortLock);
4567
4568 return map;
4569 }
4570
4571 extern "C" {
4572 /* Routine io_connect_unmap_memory_from_task */
4573 kern_return_t
4574 is_io_connect_unmap_memory_from_task
4575 (
4576 io_connect_t connection,
4577 uint32_t memory_type,
4578 task_t from_task,
4579 mach_vm_address_t address)
4580 {
4581 IOReturn err;
4582 IOOptionBits options = 0;
4583 IOMemoryDescriptor * memory = NULL;
4584 IOMemoryMap * map;
4585
4586 CHECK( IOUserClient, connection, client );
4587
4588 if (!from_task) {
4589 return kIOReturnBadArgument;
4590 }
4591
4592 IOStatisticsClientCall();
4593 if (client->defaultLocking) {
4594 IORWLockWrite(client->lock);
4595 }
4596 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4597 if (client->defaultLocking) {
4598 IORWLockUnlock(client->lock);
4599 }
4600
4601 if (memory && (kIOReturnSuccess == err)) {
4602 options = (options & ~kIOMapUserOptionsMask)
4603 | kIOMapAnywhere | kIOMapReference;
4604
4605 map = memory->createMappingInTask( from_task, address, options );
4606 memory->release();
4607 if (map) {
4608 IOLockLock( gIOObjectPortLock);
4609 if (client->mappings) {
4610 client->mappings->removeObject( map);
4611 }
4612 IOLockUnlock( gIOObjectPortLock);
4613
4614 mach_port_name_t name = 0;
4615 bool is_shared_instance_or_from_current_task = from_task != current_task() || client->sharedInstance;
4616 if (is_shared_instance_or_from_current_task) {
4617 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4618 map->release();
4619 }
4620
4621 if (name) {
4622 map->userClientUnmap();
4623 err = iokit_mod_send_right( from_task, name, -2 );
4624 err = kIOReturnSuccess;
4625 } else {
4626 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4627 }
4628 if (!is_shared_instance_or_from_current_task) {
4629 map->release();
4630 }
4631 } else {
4632 err = kIOReturnBadArgument;
4633 }
4634 }
4635
4636 return err;
4637 }
4638
4639 kern_return_t
4640 is_io_connect_unmap_memory(
4641 io_object_t connect,
4642 uint32_t type,
4643 task_t task,
4644 uint32_t mapAddr )
4645 {
4646 IOReturn err;
4647 mach_vm_address_t address;
4648
4649 address = SCALAR64(mapAddr);
4650
4651 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4652
4653 return err;
4654 }
4655
4656
4657 /* Routine io_connect_add_client */
4658 kern_return_t
4659 is_io_connect_add_client(
4660 io_object_t connection,
4661 io_object_t connect_to)
4662 {
4663 CHECK( IOUserClient, connection, client );
4664 CHECK( IOUserClient, connect_to, to );
4665
4666 IOReturn ret;
4667
4668 IOStatisticsClientCall();
4669 if (client->defaultLocking) {
4670 IORWLockWrite(client->lock);
4671 }
4672 ret = client->connectClient( to );
4673 if (client->defaultLocking) {
4674 IORWLockUnlock(client->lock);
4675 }
4676 return ret;
4677 }
4678
4679
4680 /* Routine io_connect_set_properties */
4681 kern_return_t
4682 is_io_connect_set_properties(
4683 io_object_t connection,
4684 io_buf_ptr_t properties,
4685 mach_msg_type_number_t propertiesCnt,
4686 kern_return_t * result)
4687 {
4688 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4689 }
4690
4691 /* Routine io_user_client_method */
4692 kern_return_t
4693 is_io_connect_method_var_output
4694 (
4695 io_connect_t connection,
4696 uint32_t selector,
4697 io_scalar_inband64_t scalar_input,
4698 mach_msg_type_number_t scalar_inputCnt,
4699 io_struct_inband_t inband_input,
4700 mach_msg_type_number_t inband_inputCnt,
4701 mach_vm_address_t ool_input,
4702 mach_vm_size_t ool_input_size,
4703 io_struct_inband_t inband_output,
4704 mach_msg_type_number_t *inband_outputCnt,
4705 io_scalar_inband64_t scalar_output,
4706 mach_msg_type_number_t *scalar_outputCnt,
4707 io_buf_ptr_t *var_output,
4708 mach_msg_type_number_t *var_outputCnt
4709 )
4710 {
4711 CHECK( IOUserClient, connection, client );
4712
4713 IOExternalMethodArguments args;
4714 IOReturn ret;
4715 IOMemoryDescriptor * inputMD = NULL;
4716 OSObject * structureVariableOutputData = NULL;
4717
4718 bzero(&args.__reserved[0], sizeof(args.__reserved));
4719 args.__reservedA = 0;
4720 args.version = kIOExternalMethodArgumentsCurrentVersion;
4721
4722 args.selector = selector;
4723
4724 args.asyncWakePort = MACH_PORT_NULL;
4725 args.asyncReference = NULL;
4726 args.asyncReferenceCount = 0;
4727 args.structureVariableOutputData = &structureVariableOutputData;
4728
4729 args.scalarInput = scalar_input;
4730 args.scalarInputCount = scalar_inputCnt;
4731 args.structureInput = inband_input;
4732 args.structureInputSize = inband_inputCnt;
4733
4734 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4735 return kIOReturnIPCError;
4736 }
4737
4738 if (ool_input) {
4739 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4740 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4741 current_task());
4742 }
4743
4744 args.structureInputDescriptor = inputMD;
4745
4746 args.scalarOutput = scalar_output;
4747 args.scalarOutputCount = *scalar_outputCnt;
4748 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4749 args.structureOutput = inband_output;
4750 args.structureOutputSize = *inband_outputCnt;
4751 args.structureOutputDescriptor = NULL;
4752 args.structureOutputDescriptorSize = 0;
4753
4754 IOStatisticsClientCall();
4755 ret = kIOReturnSuccess;
4756
4757 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4758 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4759 ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_method, selector);
4760 }
4761 if (kIOReturnSuccess == ret) {
4762 if (client->defaultLocking) {
4763 IORWLockRead(client->lock);
4764 }
4765 ret = client->externalMethod( selector, &args );
4766 if (client->defaultLocking) {
4767 IORWLockUnlock(client->lock);
4768 }
4769 }
4770
4771 *scalar_outputCnt = args.scalarOutputCount;
4772 *inband_outputCnt = args.structureOutputSize;
4773
4774 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4775 OSSerialize * serialize;
4776 OSData * data;
4777 unsigned int len;
4778
4779 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4780 len = serialize->getLength();
4781 *var_outputCnt = len;
4782 ret = copyoutkdata(serialize->text(), len, var_output);
4783 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4784 len = data->getLength();
4785 *var_outputCnt = len;
4786 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4787 } else {
4788 ret = kIOReturnUnderrun;
4789 }
4790 }
4791
4792 if (inputMD) {
4793 inputMD->release();
4794 }
4795 if (structureVariableOutputData) {
4796 structureVariableOutputData->release();
4797 }
4798
4799 return ret;
4800 }
4801
4802 /* Routine io_user_client_method */
4803 kern_return_t
4804 is_io_connect_method
4805 (
4806 io_connect_t connection,
4807 uint32_t selector,
4808 io_scalar_inband64_t scalar_input,
4809 mach_msg_type_number_t scalar_inputCnt,
4810 io_struct_inband_t inband_input,
4811 mach_msg_type_number_t inband_inputCnt,
4812 mach_vm_address_t ool_input,
4813 mach_vm_size_t ool_input_size,
4814 io_struct_inband_t inband_output,
4815 mach_msg_type_number_t *inband_outputCnt,
4816 io_scalar_inband64_t scalar_output,
4817 mach_msg_type_number_t *scalar_outputCnt,
4818 mach_vm_address_t ool_output,
4819 mach_vm_size_t *ool_output_size
4820 )
4821 {
4822 CHECK( IOUserClient, connection, client );
4823
4824 IOExternalMethodArguments args;
4825 IOReturn ret;
4826 IOMemoryDescriptor * inputMD = NULL;
4827 IOMemoryDescriptor * outputMD = NULL;
4828
4829 bzero(&args.__reserved[0], sizeof(args.__reserved));
4830 args.__reservedA = 0;
4831 args.version = kIOExternalMethodArgumentsCurrentVersion;
4832
4833 args.selector = selector;
4834
4835 args.asyncWakePort = MACH_PORT_NULL;
4836 args.asyncReference = NULL;
4837 args.asyncReferenceCount = 0;
4838 args.structureVariableOutputData = NULL;
4839
4840 args.scalarInput = scalar_input;
4841 args.scalarInputCount = scalar_inputCnt;
4842 args.structureInput = inband_input;
4843 args.structureInputSize = inband_inputCnt;
4844
4845 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4846 return kIOReturnIPCError;
4847 }
4848 if (ool_output) {
4849 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4850 return kIOReturnIPCError;
4851 }
4852 if (*ool_output_size > UINT_MAX) {
4853 return kIOReturnIPCError;
4854 }
4855 }
4856
4857 if (ool_input) {
4858 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4859 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4860 current_task());
4861 }
4862
4863 args.structureInputDescriptor = inputMD;
4864
4865 args.scalarOutput = scalar_output;
4866 args.scalarOutputCount = *scalar_outputCnt;
4867 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4868 args.structureOutput = inband_output;
4869 args.structureOutputSize = *inband_outputCnt;
4870
4871 if (ool_output && ool_output_size) {
4872 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4873 kIODirectionIn, current_task());
4874 }
4875
4876 args.structureOutputDescriptor = outputMD;
4877 args.structureOutputDescriptorSize = ool_output_size
4878 ? ((typeof(args.structureOutputDescriptorSize)) * ool_output_size)
4879 : 0;
4880
4881 IOStatisticsClientCall();
4882 ret = kIOReturnSuccess;
4883 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
4884 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
4885 ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_method, selector);
4886 }
4887 if (kIOReturnSuccess == ret) {
4888 if (client->defaultLocking) {
4889 IORWLockRead(client->lock);
4890 }
4891 ret = client->externalMethod( selector, &args );
4892 if (client->defaultLocking) {
4893 IORWLockUnlock(client->lock);
4894 }
4895 }
4896
4897 *scalar_outputCnt = args.scalarOutputCount;
4898 *inband_outputCnt = args.structureOutputSize;
4899 *ool_output_size = args.structureOutputDescriptorSize;
4900
4901 if (inputMD) {
4902 inputMD->release();
4903 }
4904 if (outputMD) {
4905 outputMD->release();
4906 }
4907
4908 return ret;
4909 }
4910
4911 /* Routine io_async_user_client_method */
4912 kern_return_t
4913 is_io_connect_async_method
4914 (
4915 io_connect_t connection,
4916 mach_port_t wake_port,
4917 io_async_ref64_t reference,
4918 mach_msg_type_number_t referenceCnt,
4919 uint32_t selector,
4920 io_scalar_inband64_t scalar_input,
4921 mach_msg_type_number_t scalar_inputCnt,
4922 io_struct_inband_t inband_input,
4923 mach_msg_type_number_t inband_inputCnt,
4924 mach_vm_address_t ool_input,
4925 mach_vm_size_t ool_input_size,
4926 io_struct_inband_t inband_output,
4927 mach_msg_type_number_t *inband_outputCnt,
4928 io_scalar_inband64_t scalar_output,
4929 mach_msg_type_number_t *scalar_outputCnt,
4930 mach_vm_address_t ool_output,
4931 mach_vm_size_t * ool_output_size
4932 )
4933 {
4934 CHECK( IOUserClient, connection, client );
4935
4936 IOExternalMethodArguments args;
4937 IOReturn ret;
4938 IOMemoryDescriptor * inputMD = NULL;
4939 IOMemoryDescriptor * outputMD = NULL;
4940
4941 if (referenceCnt < 1) {
4942 return kIOReturnBadArgument;
4943 }
4944
4945 bzero(&args.__reserved[0], sizeof(args.__reserved));
4946 args.__reservedA = 0;
4947 args.version = kIOExternalMethodArgumentsCurrentVersion;
4948
4949 reference[0] = (io_user_reference_t) wake_port;
4950 if (vm_map_is_64bit(get_task_map(current_task()))) {
4951 reference[0] |= kIOUCAsync64Flag;
4952 }
4953
4954 args.selector = selector;
4955
4956 args.asyncWakePort = wake_port;
4957 args.asyncReference = reference;
4958 args.asyncReferenceCount = referenceCnt;
4959
4960 args.structureVariableOutputData = NULL;
4961
4962 args.scalarInput = scalar_input;
4963 args.scalarInputCount = scalar_inputCnt;
4964 args.structureInput = inband_input;
4965 args.structureInputSize = inband_inputCnt;
4966
4967 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4968 return kIOReturnIPCError;
4969 }
4970 if (ool_output) {
4971 if (*ool_output_size <= sizeof(io_struct_inband_t)) {
4972 return kIOReturnIPCError;
4973 }
4974 if (*ool_output_size > UINT_MAX) {
4975 return kIOReturnIPCError;
4976 }
4977 }
4978
4979 if (ool_input) {
4980 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4981 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4982 current_task());
4983 }
4984
4985 args.structureInputDescriptor = inputMD;
4986
4987 args.scalarOutput = scalar_output;
4988 args.scalarOutputCount = *scalar_outputCnt;
4989 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4990 args.structureOutput = inband_output;
4991 args.structureOutputSize = *inband_outputCnt;
4992
4993 if (ool_output) {
4994 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4995 kIODirectionIn, current_task());
4996 }
4997
4998 args.structureOutputDescriptor = outputMD;
4999 args.structureOutputDescriptorSize = ((typeof(args.structureOutputDescriptorSize)) * ool_output_size);
5000
5001 IOStatisticsClientCall();
5002 ret = kIOReturnSuccess;
5003 io_filter_policy_t filterPolicy = client->filterForTask(current_task(), 0);
5004 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
5005 ret = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_external_async_method, selector);
5006 }
5007 if (kIOReturnSuccess == ret) {
5008 if (client->defaultLocking) {
5009 IORWLockRead(client->lock);
5010 }
5011 ret = client->externalMethod( selector, &args );
5012 if (client->defaultLocking) {
5013 IORWLockUnlock(client->lock);
5014 }
5015 }
5016
5017 *scalar_outputCnt = args.scalarOutputCount;
5018 *inband_outputCnt = args.structureOutputSize;
5019 *ool_output_size = args.structureOutputDescriptorSize;
5020
5021 if (inputMD) {
5022 inputMD->release();
5023 }
5024 if (outputMD) {
5025 outputMD->release();
5026 }
5027
5028 return ret;
5029 }
5030
5031 /* Routine io_connect_method_scalarI_scalarO */
5032 kern_return_t
5033 is_io_connect_method_scalarI_scalarO(
5034 io_object_t connect,
5035 uint32_t index,
5036 io_scalar_inband_t input,
5037 mach_msg_type_number_t inputCount,
5038 io_scalar_inband_t output,
5039 mach_msg_type_number_t * outputCount )
5040 {
5041 IOReturn err;
5042 uint32_t i;
5043 io_scalar_inband64_t _input;
5044 io_scalar_inband64_t _output;
5045
5046 mach_msg_type_number_t struct_outputCnt = 0;
5047 mach_vm_size_t ool_output_size = 0;
5048
5049 bzero(&_output[0], sizeof(_output));
5050 for (i = 0; i < inputCount; i++) {
5051 _input[i] = SCALAR64(input[i]);
5052 }
5053
5054 err = is_io_connect_method(connect, index,
5055 _input, inputCount,
5056 NULL, 0,
5057 0, 0,
5058 NULL, &struct_outputCnt,
5059 _output, outputCount,
5060 0, &ool_output_size);
5061
5062 for (i = 0; i < *outputCount; i++) {
5063 output[i] = SCALAR32(_output[i]);
5064 }
5065
5066 return err;
5067 }
5068
5069 kern_return_t
5070 shim_io_connect_method_scalarI_scalarO(
5071 IOExternalMethod * method,
5072 IOService * object,
5073 const io_user_scalar_t * input,
5074 mach_msg_type_number_t inputCount,
5075 io_user_scalar_t * output,
5076 mach_msg_type_number_t * outputCount )
5077 {
5078 IOMethod func;
5079 io_scalar_inband_t _output;
5080 IOReturn err;
5081 err = kIOReturnBadArgument;
5082
5083 bzero(&_output[0], sizeof(_output));
5084 do {
5085 if (inputCount != method->count0) {
5086 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5087 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5088 continue;
5089 }
5090 if (*outputCount != method->count1) {
5091 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5092 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5093 continue;
5094 }
5095
5096 func = method->func;
5097
5098 switch (inputCount) {
5099 case 6:
5100 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5101 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5102 break;
5103 case 5:
5104 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5105 ARG32(input[3]), ARG32(input[4]),
5106 &_output[0] );
5107 break;
5108 case 4:
5109 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5110 ARG32(input[3]),
5111 &_output[0], &_output[1] );
5112 break;
5113 case 3:
5114 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5115 &_output[0], &_output[1], &_output[2] );
5116 break;
5117 case 2:
5118 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5119 &_output[0], &_output[1], &_output[2],
5120 &_output[3] );
5121 break;
5122 case 1:
5123 err = (object->*func)( ARG32(input[0]),
5124 &_output[0], &_output[1], &_output[2],
5125 &_output[3], &_output[4] );
5126 break;
5127 case 0:
5128 err = (object->*func)( &_output[0], &_output[1], &_output[2],
5129 &_output[3], &_output[4], &_output[5] );
5130 break;
5131
5132 default:
5133 IOLog("%s: Bad method table\n", object->getName());
5134 }
5135 }while (false);
5136
5137 uint32_t i;
5138 for (i = 0; i < *outputCount; i++) {
5139 output[i] = SCALAR32(_output[i]);
5140 }
5141
5142 return err;
5143 }
5144
5145 /* Routine io_async_method_scalarI_scalarO */
5146 kern_return_t
5147 is_io_async_method_scalarI_scalarO(
5148 io_object_t connect,
5149 mach_port_t wake_port,
5150 io_async_ref_t reference,
5151 mach_msg_type_number_t referenceCnt,
5152 uint32_t index,
5153 io_scalar_inband_t input,
5154 mach_msg_type_number_t inputCount,
5155 io_scalar_inband_t output,
5156 mach_msg_type_number_t * outputCount )
5157 {
5158 IOReturn err;
5159 uint32_t i;
5160 io_scalar_inband64_t _input;
5161 io_scalar_inband64_t _output;
5162 io_async_ref64_t _reference;
5163
5164 if (referenceCnt > ASYNC_REF64_COUNT) {
5165 return kIOReturnBadArgument;
5166 }
5167 bzero(&_output[0], sizeof(_output));
5168 for (i = 0; i < referenceCnt; i++) {
5169 _reference[i] = REF64(reference[i]);
5170 }
5171 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5172
5173 mach_msg_type_number_t struct_outputCnt = 0;
5174 mach_vm_size_t ool_output_size = 0;
5175
5176 for (i = 0; i < inputCount; i++) {
5177 _input[i] = SCALAR64(input[i]);
5178 }
5179
5180 err = is_io_connect_async_method(connect,
5181 wake_port, _reference, referenceCnt,
5182 index,
5183 _input, inputCount,
5184 NULL, 0,
5185 0, 0,
5186 NULL, &struct_outputCnt,
5187 _output, outputCount,
5188 0, &ool_output_size);
5189
5190 for (i = 0; i < *outputCount; i++) {
5191 output[i] = SCALAR32(_output[i]);
5192 }
5193
5194 return err;
5195 }
5196 /* Routine io_async_method_scalarI_structureO */
5197 kern_return_t
5198 is_io_async_method_scalarI_structureO(
5199 io_object_t connect,
5200 mach_port_t wake_port,
5201 io_async_ref_t reference,
5202 mach_msg_type_number_t referenceCnt,
5203 uint32_t index,
5204 io_scalar_inband_t input,
5205 mach_msg_type_number_t inputCount,
5206 io_struct_inband_t output,
5207 mach_msg_type_number_t * outputCount )
5208 {
5209 uint32_t i;
5210 io_scalar_inband64_t _input;
5211 io_async_ref64_t _reference;
5212
5213 if (referenceCnt > ASYNC_REF64_COUNT) {
5214 return kIOReturnBadArgument;
5215 }
5216 for (i = 0; i < referenceCnt; i++) {
5217 _reference[i] = REF64(reference[i]);
5218 }
5219 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5220
5221 mach_msg_type_number_t scalar_outputCnt = 0;
5222 mach_vm_size_t ool_output_size = 0;
5223
5224 for (i = 0; i < inputCount; i++) {
5225 _input[i] = SCALAR64(input[i]);
5226 }
5227
5228 return is_io_connect_async_method(connect,
5229 wake_port, _reference, referenceCnt,
5230 index,
5231 _input, inputCount,
5232 NULL, 0,
5233 0, 0,
5234 output, outputCount,
5235 NULL, &scalar_outputCnt,
5236 0, &ool_output_size);
5237 }
5238
5239 /* Routine io_async_method_scalarI_structureI */
5240 kern_return_t
5241 is_io_async_method_scalarI_structureI(
5242 io_connect_t connect,
5243 mach_port_t wake_port,
5244 io_async_ref_t reference,
5245 mach_msg_type_number_t referenceCnt,
5246 uint32_t index,
5247 io_scalar_inband_t input,
5248 mach_msg_type_number_t inputCount,
5249 io_struct_inband_t inputStruct,
5250 mach_msg_type_number_t inputStructCount )
5251 {
5252 uint32_t i;
5253 io_scalar_inband64_t _input;
5254 io_async_ref64_t _reference;
5255
5256 if (referenceCnt > ASYNC_REF64_COUNT) {
5257 return kIOReturnBadArgument;
5258 }
5259 for (i = 0; i < referenceCnt; i++) {
5260 _reference[i] = REF64(reference[i]);
5261 }
5262 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5263
5264 mach_msg_type_number_t scalar_outputCnt = 0;
5265 mach_msg_type_number_t inband_outputCnt = 0;
5266 mach_vm_size_t ool_output_size = 0;
5267
5268 for (i = 0; i < inputCount; i++) {
5269 _input[i] = SCALAR64(input[i]);
5270 }
5271
5272 return is_io_connect_async_method(connect,
5273 wake_port, _reference, referenceCnt,
5274 index,
5275 _input, inputCount,
5276 inputStruct, inputStructCount,
5277 0, 0,
5278 NULL, &inband_outputCnt,
5279 NULL, &scalar_outputCnt,
5280 0, &ool_output_size);
5281 }
5282
5283 /* Routine io_async_method_structureI_structureO */
5284 kern_return_t
5285 is_io_async_method_structureI_structureO(
5286 io_object_t connect,
5287 mach_port_t wake_port,
5288 io_async_ref_t reference,
5289 mach_msg_type_number_t referenceCnt,
5290 uint32_t index,
5291 io_struct_inband_t input,
5292 mach_msg_type_number_t inputCount,
5293 io_struct_inband_t output,
5294 mach_msg_type_number_t * outputCount )
5295 {
5296 uint32_t i;
5297 mach_msg_type_number_t scalar_outputCnt = 0;
5298 mach_vm_size_t ool_output_size = 0;
5299 io_async_ref64_t _reference;
5300
5301 if (referenceCnt > ASYNC_REF64_COUNT) {
5302 return kIOReturnBadArgument;
5303 }
5304 for (i = 0; i < referenceCnt; i++) {
5305 _reference[i] = REF64(reference[i]);
5306 }
5307 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
5308
5309 return is_io_connect_async_method(connect,
5310 wake_port, _reference, referenceCnt,
5311 index,
5312 NULL, 0,
5313 input, inputCount,
5314 0, 0,
5315 output, outputCount,
5316 NULL, &scalar_outputCnt,
5317 0, &ool_output_size);
5318 }
5319
5320
5321 kern_return_t
5322 shim_io_async_method_scalarI_scalarO(
5323 IOExternalAsyncMethod * method,
5324 IOService * object,
5325 mach_port_t asyncWakePort,
5326 io_user_reference_t * asyncReference,
5327 uint32_t asyncReferenceCount,
5328 const io_user_scalar_t * input,
5329 mach_msg_type_number_t inputCount,
5330 io_user_scalar_t * output,
5331 mach_msg_type_number_t * outputCount )
5332 {
5333 IOAsyncMethod func;
5334 uint32_t i;
5335 io_scalar_inband_t _output;
5336 IOReturn err;
5337 io_async_ref_t reference;
5338
5339 bzero(&_output[0], sizeof(_output));
5340 for (i = 0; i < asyncReferenceCount; i++) {
5341 reference[i] = REF32(asyncReference[i]);
5342 }
5343
5344 err = kIOReturnBadArgument;
5345
5346 do {
5347 if (inputCount != method->count0) {
5348 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5349 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5350 continue;
5351 }
5352 if (*outputCount != method->count1) {
5353 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
5354 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5355 continue;
5356 }
5357
5358 func = method->func;
5359
5360 switch (inputCount) {
5361 case 6:
5362 err = (object->*func)( reference,
5363 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5364 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
5365 break;
5366 case 5:
5367 err = (object->*func)( reference,
5368 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5369 ARG32(input[3]), ARG32(input[4]),
5370 &_output[0] );
5371 break;
5372 case 4:
5373 err = (object->*func)( reference,
5374 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5375 ARG32(input[3]),
5376 &_output[0], &_output[1] );
5377 break;
5378 case 3:
5379 err = (object->*func)( reference,
5380 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5381 &_output[0], &_output[1], &_output[2] );
5382 break;
5383 case 2:
5384 err = (object->*func)( reference,
5385 ARG32(input[0]), ARG32(input[1]),
5386 &_output[0], &_output[1], &_output[2],
5387 &_output[3] );
5388 break;
5389 case 1:
5390 err = (object->*func)( reference,
5391 ARG32(input[0]),
5392 &_output[0], &_output[1], &_output[2],
5393 &_output[3], &_output[4] );
5394 break;
5395 case 0:
5396 err = (object->*func)( reference,
5397 &_output[0], &_output[1], &_output[2],
5398 &_output[3], &_output[4], &_output[5] );
5399 break;
5400
5401 default:
5402 IOLog("%s: Bad method table\n", object->getName());
5403 }
5404 }while (false);
5405
5406 for (i = 0; i < *outputCount; i++) {
5407 output[i] = SCALAR32(_output[i]);
5408 }
5409
5410 return err;
5411 }
5412
5413
5414 /* Routine io_connect_method_scalarI_structureO */
5415 kern_return_t
5416 is_io_connect_method_scalarI_structureO(
5417 io_object_t connect,
5418 uint32_t index,
5419 io_scalar_inband_t input,
5420 mach_msg_type_number_t inputCount,
5421 io_struct_inband_t output,
5422 mach_msg_type_number_t * outputCount )
5423 {
5424 uint32_t i;
5425 io_scalar_inband64_t _input;
5426
5427 mach_msg_type_number_t scalar_outputCnt = 0;
5428 mach_vm_size_t ool_output_size = 0;
5429
5430 for (i = 0; i < inputCount; i++) {
5431 _input[i] = SCALAR64(input[i]);
5432 }
5433
5434 return is_io_connect_method(connect, index,
5435 _input, inputCount,
5436 NULL, 0,
5437 0, 0,
5438 output, outputCount,
5439 NULL, &scalar_outputCnt,
5440 0, &ool_output_size);
5441 }
5442
5443 kern_return_t
5444 shim_io_connect_method_scalarI_structureO(
5445
5446 IOExternalMethod * method,
5447 IOService * object,
5448 const io_user_scalar_t * input,
5449 mach_msg_type_number_t inputCount,
5450 io_struct_inband_t output,
5451 IOByteCount * outputCount )
5452 {
5453 IOMethod func;
5454 IOReturn err;
5455
5456 err = kIOReturnBadArgument;
5457
5458 do {
5459 if (inputCount != method->count0) {
5460 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5461 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5462 continue;
5463 }
5464 if ((kIOUCVariableStructureSize != method->count1)
5465 && (*outputCount != method->count1)) {
5466 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5467 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5468 continue;
5469 }
5470
5471 func = method->func;
5472
5473 switch (inputCount) {
5474 case 5:
5475 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5476 ARG32(input[3]), ARG32(input[4]),
5477 output );
5478 break;
5479 case 4:
5480 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5481 ARG32(input[3]),
5482 output, (void *)outputCount );
5483 break;
5484 case 3:
5485 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5486 output, (void *)outputCount, NULL );
5487 break;
5488 case 2:
5489 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5490 output, (void *)outputCount, NULL, NULL );
5491 break;
5492 case 1:
5493 err = (object->*func)( ARG32(input[0]),
5494 output, (void *)outputCount, NULL, NULL, NULL );
5495 break;
5496 case 0:
5497 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5498 break;
5499
5500 default:
5501 IOLog("%s: Bad method table\n", object->getName());
5502 }
5503 }while (false);
5504
5505 return err;
5506 }
5507
5508
5509 kern_return_t
5510 shim_io_async_method_scalarI_structureO(
5511 IOExternalAsyncMethod * method,
5512 IOService * object,
5513 mach_port_t asyncWakePort,
5514 io_user_reference_t * asyncReference,
5515 uint32_t asyncReferenceCount,
5516 const io_user_scalar_t * input,
5517 mach_msg_type_number_t inputCount,
5518 io_struct_inband_t output,
5519 mach_msg_type_number_t * outputCount )
5520 {
5521 IOAsyncMethod func;
5522 uint32_t i;
5523 IOReturn err;
5524 io_async_ref_t reference;
5525
5526 for (i = 0; i < asyncReferenceCount; i++) {
5527 reference[i] = REF32(asyncReference[i]);
5528 }
5529
5530 err = kIOReturnBadArgument;
5531 do {
5532 if (inputCount != method->count0) {
5533 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5534 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5535 continue;
5536 }
5537 if ((kIOUCVariableStructureSize != method->count1)
5538 && (*outputCount != method->count1)) {
5539 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5540 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5541 continue;
5542 }
5543
5544 func = method->func;
5545
5546 switch (inputCount) {
5547 case 5:
5548 err = (object->*func)( reference,
5549 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5550 ARG32(input[3]), ARG32(input[4]),
5551 output );
5552 break;
5553 case 4:
5554 err = (object->*func)( reference,
5555 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5556 ARG32(input[3]),
5557 output, (void *)outputCount );
5558 break;
5559 case 3:
5560 err = (object->*func)( reference,
5561 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5562 output, (void *)outputCount, NULL );
5563 break;
5564 case 2:
5565 err = (object->*func)( reference,
5566 ARG32(input[0]), ARG32(input[1]),
5567 output, (void *)outputCount, NULL, NULL );
5568 break;
5569 case 1:
5570 err = (object->*func)( reference,
5571 ARG32(input[0]),
5572 output, (void *)outputCount, NULL, NULL, NULL );
5573 break;
5574 case 0:
5575 err = (object->*func)( reference,
5576 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5577 break;
5578
5579 default:
5580 IOLog("%s: Bad method table\n", object->getName());
5581 }
5582 }while (false);
5583
5584 return err;
5585 }
5586
5587 /* Routine io_connect_method_scalarI_structureI */
5588 kern_return_t
5589 is_io_connect_method_scalarI_structureI(
5590 io_connect_t connect,
5591 uint32_t index,
5592 io_scalar_inband_t input,
5593 mach_msg_type_number_t inputCount,
5594 io_struct_inband_t inputStruct,
5595 mach_msg_type_number_t inputStructCount )
5596 {
5597 uint32_t i;
5598 io_scalar_inband64_t _input;
5599
5600 mach_msg_type_number_t scalar_outputCnt = 0;
5601 mach_msg_type_number_t inband_outputCnt = 0;
5602 mach_vm_size_t ool_output_size = 0;
5603
5604 for (i = 0; i < inputCount; i++) {
5605 _input[i] = SCALAR64(input[i]);
5606 }
5607
5608 return is_io_connect_method(connect, index,
5609 _input, inputCount,
5610 inputStruct, inputStructCount,
5611 0, 0,
5612 NULL, &inband_outputCnt,
5613 NULL, &scalar_outputCnt,
5614 0, &ool_output_size);
5615 }
5616
5617 kern_return_t
5618 shim_io_connect_method_scalarI_structureI(
5619 IOExternalMethod * method,
5620 IOService * object,
5621 const io_user_scalar_t * input,
5622 mach_msg_type_number_t inputCount,
5623 io_struct_inband_t inputStruct,
5624 mach_msg_type_number_t inputStructCount )
5625 {
5626 IOMethod func;
5627 IOReturn err = kIOReturnBadArgument;
5628
5629 do{
5630 if (inputCount != method->count0) {
5631 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5632 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5633 continue;
5634 }
5635 if ((kIOUCVariableStructureSize != method->count1)
5636 && (inputStructCount != method->count1)) {
5637 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5638 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5639 continue;
5640 }
5641
5642 func = method->func;
5643
5644 switch (inputCount) {
5645 case 5:
5646 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5647 ARG32(input[3]), ARG32(input[4]),
5648 inputStruct );
5649 break;
5650 case 4:
5651 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5652 ARG32(input[3]),
5653 inputStruct, (void *)(uintptr_t)inputStructCount );
5654 break;
5655 case 3:
5656 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5657 inputStruct, (void *)(uintptr_t)inputStructCount,
5658 NULL );
5659 break;
5660 case 2:
5661 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5662 inputStruct, (void *)(uintptr_t)inputStructCount,
5663 NULL, NULL );
5664 break;
5665 case 1:
5666 err = (object->*func)( ARG32(input[0]),
5667 inputStruct, (void *)(uintptr_t)inputStructCount,
5668 NULL, NULL, NULL );
5669 break;
5670 case 0:
5671 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5672 NULL, NULL, NULL, NULL );
5673 break;
5674
5675 default:
5676 IOLog("%s: Bad method table\n", object->getName());
5677 }
5678 }while (false);
5679
5680 return err;
5681 }
5682
5683 kern_return_t
5684 shim_io_async_method_scalarI_structureI(
5685 IOExternalAsyncMethod * method,
5686 IOService * object,
5687 mach_port_t asyncWakePort,
5688 io_user_reference_t * asyncReference,
5689 uint32_t asyncReferenceCount,
5690 const io_user_scalar_t * input,
5691 mach_msg_type_number_t inputCount,
5692 io_struct_inband_t inputStruct,
5693 mach_msg_type_number_t inputStructCount )
5694 {
5695 IOAsyncMethod func;
5696 uint32_t i;
5697 IOReturn err = kIOReturnBadArgument;
5698 io_async_ref_t reference;
5699
5700 for (i = 0; i < asyncReferenceCount; i++) {
5701 reference[i] = REF32(asyncReference[i]);
5702 }
5703
5704 do{
5705 if (inputCount != method->count0) {
5706 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5707 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5708 continue;
5709 }
5710 if ((kIOUCVariableStructureSize != method->count1)
5711 && (inputStructCount != method->count1)) {
5712 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5713 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5714 continue;
5715 }
5716
5717 func = method->func;
5718
5719 switch (inputCount) {
5720 case 5:
5721 err = (object->*func)( reference,
5722 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5723 ARG32(input[3]), ARG32(input[4]),
5724 inputStruct );
5725 break;
5726 case 4:
5727 err = (object->*func)( reference,
5728 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5729 ARG32(input[3]),
5730 inputStruct, (void *)(uintptr_t)inputStructCount );
5731 break;
5732 case 3:
5733 err = (object->*func)( reference,
5734 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5735 inputStruct, (void *)(uintptr_t)inputStructCount,
5736 NULL );
5737 break;
5738 case 2:
5739 err = (object->*func)( reference,
5740 ARG32(input[0]), ARG32(input[1]),
5741 inputStruct, (void *)(uintptr_t)inputStructCount,
5742 NULL, NULL );
5743 break;
5744 case 1:
5745 err = (object->*func)( reference,
5746 ARG32(input[0]),
5747 inputStruct, (void *)(uintptr_t)inputStructCount,
5748 NULL, NULL, NULL );
5749 break;
5750 case 0:
5751 err = (object->*func)( reference,
5752 inputStruct, (void *)(uintptr_t)inputStructCount,
5753 NULL, NULL, NULL, NULL );
5754 break;
5755
5756 default:
5757 IOLog("%s: Bad method table\n", object->getName());
5758 }
5759 }while (false);
5760
5761 return err;
5762 }
5763
5764 /* Routine io_connect_method_structureI_structureO */
5765 kern_return_t
5766 is_io_connect_method_structureI_structureO(
5767 io_object_t connect,
5768 uint32_t index,
5769 io_struct_inband_t input,
5770 mach_msg_type_number_t inputCount,
5771 io_struct_inband_t output,
5772 mach_msg_type_number_t * outputCount )
5773 {
5774 mach_msg_type_number_t scalar_outputCnt = 0;
5775 mach_vm_size_t ool_output_size = 0;
5776
5777 return is_io_connect_method(connect, index,
5778 NULL, 0,
5779 input, inputCount,
5780 0, 0,
5781 output, outputCount,
5782 NULL, &scalar_outputCnt,
5783 0, &ool_output_size);
5784 }
5785
5786 kern_return_t
5787 shim_io_connect_method_structureI_structureO(
5788 IOExternalMethod * method,
5789 IOService * object,
5790 io_struct_inband_t input,
5791 mach_msg_type_number_t inputCount,
5792 io_struct_inband_t output,
5793 IOByteCount * outputCount )
5794 {
5795 IOMethod func;
5796 IOReturn err = kIOReturnBadArgument;
5797
5798 do{
5799 if ((kIOUCVariableStructureSize != method->count0)
5800 && (inputCount != method->count0)) {
5801 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5802 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5803 continue;
5804 }
5805 if ((kIOUCVariableStructureSize != method->count1)
5806 && (*outputCount != method->count1)) {
5807 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5808 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5809 continue;
5810 }
5811
5812 func = method->func;
5813
5814 if (method->count1) {
5815 if (method->count0) {
5816 err = (object->*func)( input, output,
5817 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5818 } else {
5819 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5820 }
5821 } else {
5822 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5823 }
5824 }while (false);
5825
5826
5827 return err;
5828 }
5829
5830 kern_return_t
5831 shim_io_async_method_structureI_structureO(
5832 IOExternalAsyncMethod * method,
5833 IOService * object,
5834 mach_port_t asyncWakePort,
5835 io_user_reference_t * asyncReference,
5836 uint32_t asyncReferenceCount,
5837 io_struct_inband_t input,
5838 mach_msg_type_number_t inputCount,
5839 io_struct_inband_t output,
5840 mach_msg_type_number_t * outputCount )
5841 {
5842 IOAsyncMethod func;
5843 uint32_t i;
5844 IOReturn err;
5845 io_async_ref_t reference;
5846
5847 for (i = 0; i < asyncReferenceCount; i++) {
5848 reference[i] = REF32(asyncReference[i]);
5849 }
5850
5851 err = kIOReturnBadArgument;
5852 do{
5853 if ((kIOUCVariableStructureSize != method->count0)
5854 && (inputCount != method->count0)) {
5855 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5856 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5857 continue;
5858 }
5859 if ((kIOUCVariableStructureSize != method->count1)
5860 && (*outputCount != method->count1)) {
5861 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5862 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5863 continue;
5864 }
5865
5866 func = method->func;
5867
5868 if (method->count1) {
5869 if (method->count0) {
5870 err = (object->*func)( reference,
5871 input, output,
5872 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5873 } else {
5874 err = (object->*func)( reference,
5875 output, outputCount, NULL, NULL, NULL, NULL );
5876 }
5877 } else {
5878 err = (object->*func)( reference,
5879 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5880 }
5881 }while (false);
5882
5883 return err;
5884 }
5885
5886 /* Routine io_catalog_send_data */
5887 kern_return_t
5888 is_io_catalog_send_data(
5889 mach_port_t master_port,
5890 uint32_t flag,
5891 io_buf_ptr_t inData,
5892 mach_msg_type_number_t inDataCount,
5893 kern_return_t * result)
5894 {
5895 #if NO_KEXTD
5896 return kIOReturnNotPrivileged;
5897 #else /* NO_KEXTD */
5898 OSObject * obj = NULL;
5899 vm_offset_t data;
5900 kern_return_t kr = kIOReturnError;
5901
5902 //printf("io_catalog_send_data called. flag: %d\n", flag);
5903
5904 if (master_port != master_device_port) {
5905 return kIOReturnNotPrivileged;
5906 }
5907
5908 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5909 flag != kIOCatalogKextdActive &&
5910 flag != kIOCatalogKextdFinishedLaunching) &&
5911 (!inData || !inDataCount)) {
5912 return kIOReturnBadArgument;
5913 }
5914
5915 if (!IOTaskHasEntitlement(current_task(), kIOCatalogManagementEntitlement)) {
5916 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5917 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5918 OSSafeReleaseNULL(taskName);
5919 // For now, fake success to not break applications relying on this function succeeding.
5920 // See <rdar://problem/32554970> for more details.
5921 return kIOReturnSuccess;
5922 }
5923
5924 if (inData) {
5925 vm_map_offset_t map_data;
5926
5927 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5928 return kIOReturnMessageTooLarge;
5929 }
5930
5931 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5932 data = CAST_DOWN(vm_offset_t, map_data);
5933
5934 if (kr != KERN_SUCCESS) {
5935 return kr;
5936 }
5937
5938 // must return success after vm_map_copyout() succeeds
5939
5940 if (inDataCount) {
5941 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5942 vm_deallocate( kernel_map, data, inDataCount );
5943 if (!obj) {
5944 *result = kIOReturnNoMemory;
5945 return KERN_SUCCESS;
5946 }
5947 }
5948 }
5949
5950 switch (flag) {
5951 case kIOCatalogResetDrivers:
5952 case kIOCatalogResetDriversNoMatch: {
5953 OSArray * array;
5954
5955 array = OSDynamicCast(OSArray, obj);
5956 if (array) {
5957 if (!gIOCatalogue->resetAndAddDrivers(array,
5958 flag == kIOCatalogResetDrivers)) {
5959 kr = kIOReturnError;
5960 }
5961 } else {
5962 kr = kIOReturnBadArgument;
5963 }
5964 }
5965 break;
5966
5967 case kIOCatalogAddDrivers:
5968 case kIOCatalogAddDriversNoMatch: {
5969 OSArray * array;
5970
5971 array = OSDynamicCast(OSArray, obj);
5972 if (array) {
5973 if (!gIOCatalogue->addDrivers( array,
5974 flag == kIOCatalogAddDrivers)) {
5975 kr = kIOReturnError;
5976 }
5977 } else {
5978 kr = kIOReturnBadArgument;
5979 }
5980 }
5981 break;
5982
5983 case kIOCatalogRemoveDrivers:
5984 case kIOCatalogRemoveDriversNoMatch: {
5985 OSDictionary * dict;
5986
5987 dict = OSDynamicCast(OSDictionary, obj);
5988 if (dict) {
5989 if (!gIOCatalogue->removeDrivers( dict,
5990 flag == kIOCatalogRemoveDrivers )) {
5991 kr = kIOReturnError;
5992 }
5993 } else {
5994 kr = kIOReturnBadArgument;
5995 }
5996 }
5997 break;
5998
5999 case kIOCatalogStartMatching__Removed:
6000 case kIOCatalogRemoveKernelLinker__Removed:
6001 case kIOCatalogKextdActive:
6002 case kIOCatalogKextdFinishedLaunching:
6003 kr = KERN_NOT_SUPPORTED;
6004 break;
6005
6006 default:
6007 kr = kIOReturnBadArgument;
6008 break;
6009 }
6010
6011 if (obj) {
6012 obj->release();
6013 }
6014
6015 *result = kr;
6016 return KERN_SUCCESS;
6017 #endif /* NO_KEXTD */
6018 }
6019
6020 /* Routine io_catalog_terminate */
6021 kern_return_t
6022 is_io_catalog_terminate(
6023 mach_port_t master_port,
6024 uint32_t flag,
6025 io_name_t name )
6026 {
6027 kern_return_t kr;
6028
6029 if (master_port != master_device_port) {
6030 return kIOReturnNotPrivileged;
6031 }
6032
6033 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
6034 kIOClientPrivilegeAdministrator );
6035 if (kIOReturnSuccess != kr) {
6036 return kr;
6037 }
6038
6039 switch (flag) {
6040 #if !defined(SECURE_KERNEL)
6041 case kIOCatalogServiceTerminate:
6042 kr = gIOCatalogue->terminateDrivers(NULL, name);
6043 break;
6044
6045 case kIOCatalogModuleUnload:
6046 case kIOCatalogModuleTerminate:
6047 kr = gIOCatalogue->terminateDriversForModule(name,
6048 flag == kIOCatalogModuleUnload);
6049 break;
6050 #endif
6051
6052 default:
6053 kr = kIOReturnBadArgument;
6054 break;
6055 }
6056
6057 return kr;
6058 }
6059
6060 /* Routine io_catalog_get_data */
6061 kern_return_t
6062 is_io_catalog_get_data(
6063 mach_port_t master_port,
6064 uint32_t flag,
6065 io_buf_ptr_t *outData,
6066 mach_msg_type_number_t *outDataCount)
6067 {
6068 kern_return_t kr = kIOReturnSuccess;
6069 OSSerialize * s;
6070
6071 if (master_port != master_device_port) {
6072 return kIOReturnNotPrivileged;
6073 }
6074
6075 //printf("io_catalog_get_data called. flag: %d\n", flag);
6076
6077 s = OSSerialize::withCapacity(4096);
6078 if (!s) {
6079 return kIOReturnNoMemory;
6080 }
6081
6082 kr = gIOCatalogue->serializeData(flag, s);
6083
6084 if (kr == kIOReturnSuccess) {
6085 vm_offset_t data;
6086 vm_map_copy_t copy;
6087 unsigned int size;
6088
6089 size = s->getLength();
6090 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
6091 if (kr == kIOReturnSuccess) {
6092 bcopy(s->text(), (void *)data, size);
6093 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
6094 size, true, &copy);
6095 *outData = (char *)copy;
6096 *outDataCount = size;
6097 }
6098 }
6099
6100 s->release();
6101
6102 return kr;
6103 }
6104
6105 /* Routine io_catalog_get_gen_count */
6106 kern_return_t
6107 is_io_catalog_get_gen_count(
6108 mach_port_t master_port,
6109 uint32_t *genCount)
6110 {
6111 if (master_port != master_device_port) {
6112 return kIOReturnNotPrivileged;
6113 }
6114
6115 //printf("io_catalog_get_gen_count called.\n");
6116
6117 if (!genCount) {
6118 return kIOReturnBadArgument;
6119 }
6120
6121 *genCount = gIOCatalogue->getGenerationCount();
6122
6123 return kIOReturnSuccess;
6124 }
6125
6126 /* Routine io_catalog_module_loaded.
6127 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
6128 */
6129 kern_return_t
6130 is_io_catalog_module_loaded(
6131 mach_port_t master_port,
6132 io_name_t name)
6133 {
6134 if (master_port != master_device_port) {
6135 return kIOReturnNotPrivileged;
6136 }
6137
6138 //printf("io_catalog_module_loaded called. name %s\n", name);
6139
6140 if (!name) {
6141 return kIOReturnBadArgument;
6142 }
6143
6144 gIOCatalogue->moduleHasLoaded(name);
6145
6146 return kIOReturnSuccess;
6147 }
6148
6149 kern_return_t
6150 is_io_catalog_reset(
6151 mach_port_t master_port,
6152 uint32_t flag)
6153 {
6154 if (master_port != master_device_port) {
6155 return kIOReturnNotPrivileged;
6156 }
6157
6158 switch (flag) {
6159 case kIOCatalogResetDefault:
6160 gIOCatalogue->reset();
6161 break;
6162
6163 default:
6164 return kIOReturnBadArgument;
6165 }
6166
6167 return kIOReturnSuccess;
6168 }
6169
6170 kern_return_t
6171 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
6172 {
6173 kern_return_t result = kIOReturnBadArgument;
6174 IOUserClient * userClient;
6175 OSObject * object;
6176 uintptr_t ref;
6177
6178 ref = (uintptr_t) args->userClientRef;
6179 if ((1ULL << 32) & ref) {
6180 object = iokit_lookup_uext_ref_current_task((mach_port_name_t) ref);
6181 if (object) {
6182 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6183 }
6184 OSSafeReleaseNULL(object);
6185 } else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) {
6186 IOExternalTrap *trap = NULL;
6187 IOService *target = NULL;
6188
6189 result = kIOReturnSuccess;
6190 io_filter_policy_t filterPolicy = userClient->filterForTask(current_task(), 0);
6191 if (filterPolicy && gIOUCFilterCallbacks->io_filter_applier) {
6192 result = gIOUCFilterCallbacks->io_filter_applier(filterPolicy, io_filter_type_trap, args->index);
6193 }
6194 if (kIOReturnSuccess == result) {
6195 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
6196 }
6197 if (trap && target) {
6198 IOTrap func;
6199
6200 func = trap->func;
6201
6202 if (func) {
6203 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
6204 }
6205 }
6206
6207 iokit_remove_connect_reference(userClient);
6208 }
6209
6210 return result;
6211 }
6212
6213 /* Routine io_device_tree_entry_exists_with_name */
6214 kern_return_t
6215 is_io_device_tree_entry_exists_with_name(
6216 mach_port_t master_port,
6217 io_name_t name,
6218 boolean_t *exists )
6219 {
6220 OSCollectionIterator *iter;
6221
6222 if (master_port != master_device_port) {
6223 return kIOReturnNotPrivileged;
6224 }
6225
6226 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
6227 *exists = iter && iter->getNextObject();
6228 OSSafeReleaseNULL(iter);
6229
6230 return kIOReturnSuccess;
6231 }
6232 } /* extern "C" */
6233
6234 IOReturn
6235 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
6236 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
6237 {
6238 IOReturn err;
6239 IOService * object;
6240 IOByteCount structureOutputSize;
6241
6242 if (dispatch) {
6243 uint32_t count;
6244 count = dispatch->checkScalarInputCount;
6245 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
6246 return kIOReturnBadArgument;
6247 }
6248
6249 count = dispatch->checkStructureInputSize;
6250 if ((kIOUCVariableStructureSize != count)
6251 && (count != ((args->structureInputDescriptor)
6252 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
6253 return kIOReturnBadArgument;
6254 }
6255
6256 count = dispatch->checkScalarOutputCount;
6257 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
6258 return kIOReturnBadArgument;
6259 }
6260
6261 count = dispatch->checkStructureOutputSize;
6262 if ((kIOUCVariableStructureSize != count)
6263 && (count != ((args->structureOutputDescriptor)
6264 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
6265 return kIOReturnBadArgument;
6266 }
6267
6268 if (dispatch->function) {
6269 err = (*dispatch->function)(target, reference, args);
6270 } else {
6271 err = kIOReturnNoCompletion; /* implementator can dispatch */
6272 }
6273 return err;
6274 }
6275
6276
6277 // pre-Leopard API's don't do ool structs
6278 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
6279 err = kIOReturnIPCError;
6280 return err;
6281 }
6282
6283 structureOutputSize = args->structureOutputSize;
6284
6285 if (args->asyncWakePort) {
6286 IOExternalAsyncMethod * method;
6287 object = NULL;
6288 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
6289 return kIOReturnUnsupported;
6290 }
6291
6292 if (kIOUCForegroundOnly & method->flags) {
6293 if (task_is_gpu_denied(current_task())) {
6294 return kIOReturnNotPermitted;
6295 }
6296 }
6297
6298 switch (method->flags & kIOUCTypeMask) {
6299 case kIOUCScalarIStructI:
6300 err = shim_io_async_method_scalarI_structureI( method, object,
6301 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6302 args->scalarInput, args->scalarInputCount,
6303 (char *)args->structureInput, args->structureInputSize );
6304 break;
6305
6306 case kIOUCScalarIScalarO:
6307 err = shim_io_async_method_scalarI_scalarO( method, object,
6308 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6309 args->scalarInput, args->scalarInputCount,
6310 args->scalarOutput, &args->scalarOutputCount );
6311 break;
6312
6313 case kIOUCScalarIStructO:
6314 err = shim_io_async_method_scalarI_structureO( method, object,
6315 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6316 args->scalarInput, args->scalarInputCount,
6317 (char *) args->structureOutput, &args->structureOutputSize );
6318 break;
6319
6320
6321 case kIOUCStructIStructO:
6322 err = shim_io_async_method_structureI_structureO( method, object,
6323 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
6324 (char *)args->structureInput, args->structureInputSize,
6325 (char *) args->structureOutput, &args->structureOutputSize );
6326 break;
6327
6328 default:
6329 err = kIOReturnBadArgument;
6330 break;
6331 }
6332 } else {
6333 IOExternalMethod * method;
6334 object = NULL;
6335 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
6336 return kIOReturnUnsupported;
6337 }
6338
6339 if (kIOUCForegroundOnly & method->flags) {
6340 if (task_is_gpu_denied(current_task())) {
6341 return kIOReturnNotPermitted;
6342 }
6343 }
6344
6345 switch (method->flags & kIOUCTypeMask) {
6346 case kIOUCScalarIStructI:
6347 err = shim_io_connect_method_scalarI_structureI( method, object,
6348 args->scalarInput, args->scalarInputCount,
6349 (char *) args->structureInput, args->structureInputSize );
6350 break;
6351
6352 case kIOUCScalarIScalarO:
6353 err = shim_io_connect_method_scalarI_scalarO( method, object,
6354 args->scalarInput, args->scalarInputCount,
6355 args->scalarOutput, &args->scalarOutputCount );
6356 break;
6357
6358 case kIOUCScalarIStructO:
6359 err = shim_io_connect_method_scalarI_structureO( method, object,
6360 args->scalarInput, args->scalarInputCount,
6361 (char *) args->structureOutput, &structureOutputSize );
6362 break;
6363
6364
6365 case kIOUCStructIStructO:
6366 err = shim_io_connect_method_structureI_structureO( method, object,
6367 (char *) args->structureInput, args->structureInputSize,
6368 (char *) args->structureOutput, &structureOutputSize );
6369 break;
6370
6371 default:
6372 err = kIOReturnBadArgument;
6373 break;
6374 }
6375 }
6376
6377 if (structureOutputSize > UINT_MAX) {
6378 structureOutputSize = 0;
6379 err = kIOReturnBadArgument;
6380 }
6381
6382 args->structureOutputSize = ((typeof(args->structureOutputSize))structureOutputSize);
6383
6384 return err;
6385 }
6386
6387 IOReturn
6388 IOUserClient::registerFilterCallbacks(const struct io_filter_callbacks *callbacks, size_t size)
6389 {
6390 if (size < sizeof(*callbacks)) {
6391 return kIOReturnBadArgument;
6392 }
6393 if (!OSCompareAndSwapPtr(NULL, __DECONST(void *, callbacks), &gIOUCFilterCallbacks)) {
6394 return kIOReturnBusy;
6395 }
6396 return kIOReturnSuccess;
6397 }
6398
6399 #if __LP64__
6400 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6401 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6402 #else
6403 OSMetaClassDefineReservedUsed(IOUserClient, 0);
6404 OSMetaClassDefineReservedUsed(IOUserClient, 1);
6405 #endif
6406 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6407 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6408 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6409 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6410 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6411 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6412 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6413 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6414 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6415 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6416 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6417 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6418 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6419 OSMetaClassDefineReservedUnused(IOUserClient, 15);