]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-6153.61.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51
52 #include <mach/sdt.h>
53 #include <os/hash.h>
54
55 #if CONFIG_MACF
56
57 extern "C" {
58 #include <security/mac_framework.h>
59 };
60 #include <sys/kauth.h>
61
62 #define IOMACF_LOG 0
63
64 #endif /* CONFIG_MACF */
65
66 #include <IOKit/assert.h>
67
68 #include "IOServicePrivate.h"
69 #include "IOKitKernelInternal.h"
70
71 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
72 #define SCALAR32(x) ((uint32_t )x)
73 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
74 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
75 #define REF32(x) ((int)(x))
76
77 enum{
78 kIOUCAsync0Flags = 3ULL,
79 kIOUCAsync64Flag = 1ULL,
80 kIOUCAsyncErrorLoggedFlag = 2ULL
81 };
82
83 #if IOKITSTATS
84
85 #define IOStatisticsRegisterCounter() \
86 do { \
87 reserved->counter = IOStatistics::registerUserClient(this); \
88 } while (0)
89
90 #define IOStatisticsUnregisterCounter() \
91 do { \
92 if (reserved) \
93 IOStatistics::unregisterUserClient(reserved->counter); \
94 } while (0)
95
96 #define IOStatisticsClientCall() \
97 do { \
98 IOStatistics::countUserClientCall(client); \
99 } while (0)
100
101 #else
102
103 #define IOStatisticsRegisterCounter()
104 #define IOStatisticsUnregisterCounter()
105 #define IOStatisticsClientCall()
106
107 #endif /* IOKITSTATS */
108
109 #if DEVELOPMENT || DEBUG
110
111 #define FAKE_STACK_FRAME(a) \
112 const void ** __frameptr; \
113 const void * __retaddr; \
114 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
115 __retaddr = __frameptr[1]; \
116 __frameptr[1] = (a);
117
118 #define FAKE_STACK_FRAME_END() \
119 __frameptr[1] = __retaddr;
120
121 #else /* DEVELOPMENT || DEBUG */
122
123 #define FAKE_STACK_FRAME(a)
124 #define FAKE_STACK_FRAME_END()
125
126 #endif /* DEVELOPMENT || DEBUG */
127
128 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
129 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
130
131 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
132
133 extern "C" {
134 #include <mach/mach_traps.h>
135 #include <vm/vm_map.h>
136 } /* extern "C" */
137
138 struct IOMachPortHashList;
139
140 static_assert(IKOT_MAX_TYPE <= 255);
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
145 class IOMachPort : public OSObject
146 {
147 OSDeclareDefaultStructors(IOMachPort);
148 public:
149 SLIST_ENTRY(IOMachPort) link;
150 ipc_port_t port;
151 OSObject* object;
152 UInt32 mscount;
153 UInt8 holdDestroy;
154 UInt8 type;
155
156 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
157
158 static IOMachPortHashList* bucketForObject(OSObject *obj,
159 ipc_kobject_type_t type);
160
161 static IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
162
163 static bool noMoreSendersForObject( OSObject * obj,
164 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
165 static void releasePortForObject( OSObject * obj,
166 ipc_kobject_type_t type );
167 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
168
169 static mach_port_name_t makeSendRightForTask( task_t task,
170 io_object_t obj, ipc_kobject_type_t type );
171
172 virtual void free() APPLE_KEXT_OVERRIDE;
173 };
174
175 #define super OSObject
176 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
177
178 static IOLock * gIOObjectPortLock;
179 IOLock * gIOUserServerLock;
180
181 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
182
183 SLIST_HEAD(IOMachPortHashList, IOMachPort);
184
185 #if CONFIG_EMBEDDED
186 #define PORT_HASH_SIZE 256
187 #else
188 #define PORT_HASH_SIZE 4096
189 #endif /* CONFIG_EMBEDDED */
190
191 IOMachPortHashList ports[PORT_HASH_SIZE];
192
193 void
194 IOMachPortInitialize(void)
195 {
196 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
197 SLIST_INIT(&ports[i]);
198 }
199 }
200
201 IOMachPortHashList*
202 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
203 {
204 return &ports[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
205 }
206
207 IOMachPort*
208 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
209 {
210 IOMachPort *machPort;
211
212 SLIST_FOREACH(machPort, bucket, link) {
213 if (machPort->object == obj && machPort->type == type) {
214 return machPort;
215 }
216 }
217 return NULL;
218 }
219
220 IOMachPort*
221 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
222 {
223 IOMachPort *machPort = NULL;
224
225 machPort = new IOMachPort;
226 if (__improbable(machPort && !machPort->init())) {
227 return NULL;
228 }
229
230 machPort->object = obj;
231 machPort->type = (typeof(machPort->type))type;
232 machPort->port = iokit_alloc_object_port(obj, type);
233
234 obj->taggedRetain(OSTypeID(OSCollection));
235 machPort->mscount++;
236
237 return machPort;
238 }
239
240 bool
241 IOMachPort::noMoreSendersForObject( OSObject * obj,
242 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
243 {
244 IOMachPort *machPort = NULL;
245 IOUserClient *uc;
246 OSAction *action;
247 bool destroyed = true;
248
249 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
250
251 obj->retain();
252
253 lck_mtx_lock(gIOObjectPortLock);
254
255 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
256
257 if (machPort) {
258 destroyed = (machPort->mscount <= *mscount);
259 if (!destroyed) {
260 *mscount = machPort->mscount;
261 lck_mtx_unlock(gIOObjectPortLock);
262 } else {
263 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
264 uc->noMoreSenders();
265 }
266 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
267
268 lck_mtx_unlock(gIOObjectPortLock);
269
270 machPort->release();
271 obj->taggedRelease(OSTypeID(OSCollection));
272 }
273 } else {
274 lck_mtx_unlock(gIOObjectPortLock);
275 }
276
277 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
278 action->Aborted();
279 }
280
281 obj->release();
282
283 return destroyed;
284 }
285
286 void
287 IOMachPort::releasePortForObject( OSObject * obj,
288 ipc_kobject_type_t type )
289 {
290 IOMachPort *machPort;
291 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
292
293 assert(IKOT_IOKIT_CONNECT != type);
294
295 lck_mtx_lock(gIOObjectPortLock);
296
297 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
298
299 if (machPort && !machPort->holdDestroy) {
300 obj->retain();
301 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
302
303 lck_mtx_unlock(gIOObjectPortLock);
304
305 machPort->release();
306 obj->taggedRelease(OSTypeID(OSCollection));
307 obj->release();
308 } else {
309 lck_mtx_unlock(gIOObjectPortLock);
310 }
311 }
312
313 void
314 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
315 {
316 IOMachPort * machPort;
317
318 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
319 lck_mtx_lock(gIOObjectPortLock);
320
321 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
322
323 if (machPort) {
324 machPort->holdDestroy = true;
325 }
326
327 lck_mtx_unlock(gIOObjectPortLock);
328 }
329
330 void
331 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
332 {
333 IOMachPort::releasePortForObject(obj, type);
334 }
335
336 void
337 IOUserClient::destroyUserReferences( OSObject * obj )
338 {
339 IOMachPort *machPort;
340
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 obj->retain();
347 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
348 IOMachPortHashList *mappingBucket = NULL;
349
350 lck_mtx_lock(gIOObjectPortLock);
351
352 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
353 if (uc && uc->mappings) {
354 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
355 }
356
357 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
358
359 if (machPort == NULL) {
360 lck_mtx_unlock(gIOObjectPortLock);
361 goto end;
362 }
363
364 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
365 obj->taggedRelease(OSTypeID(OSCollection));
366
367 if (uc) {
368 uc->noMoreSenders();
369 if (uc->mappings) {
370 uc->mappings->taggedRetain(OSTypeID(OSCollection));
371 machPort->object = uc->mappings;
372 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
373 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
374
375 lck_mtx_unlock(gIOObjectPortLock);
376
377 uc->mappings->release();
378 uc->mappings = NULL;
379 } else {
380 lck_mtx_unlock(gIOObjectPortLock);
381 machPort->release();
382 }
383 } else {
384 lck_mtx_unlock(gIOObjectPortLock);
385 machPort->release();
386 }
387
388
389 end:
390
391 obj->release();
392 }
393
394 mach_port_name_t
395 IOMachPort::makeSendRightForTask( task_t task,
396 io_object_t obj, ipc_kobject_type_t type )
397 {
398 return iokit_make_send_right( task, obj, type );
399 }
400
401 void
402 IOMachPort::free( void )
403 {
404 if (port) {
405 iokit_destroy_object_port( port );
406 }
407 super::free();
408 }
409
410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411
412 class IOUserIterator : public OSIterator
413 {
414 OSDeclareDefaultStructors(IOUserIterator);
415 public:
416 OSObject * userIteratorObject;
417 IOLock * lock;
418
419 static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
420 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
421 virtual void free() APPLE_KEXT_OVERRIDE;
422
423 virtual void reset() APPLE_KEXT_OVERRIDE;
424 virtual bool isValid() APPLE_KEXT_OVERRIDE;
425 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
426 virtual OSObject * copyNextObject();
427 };
428
429 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
430
431 class IOUserNotification : public IOUserIterator
432 {
433 OSDeclareDefaultStructors(IOUserNotification);
434
435 #define holdNotify userIteratorObject
436
437 public:
438
439 virtual void free() APPLE_KEXT_OVERRIDE;
440
441 virtual void setNotification( IONotifier * obj );
442
443 virtual void reset() APPLE_KEXT_OVERRIDE;
444 virtual bool isValid() APPLE_KEXT_OVERRIDE;
445 };
446
447 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
448
449 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
450
451 IOUserIterator *
452 IOUserIterator::withIterator(OSIterator * iter)
453 {
454 IOUserIterator * me;
455
456 if (!iter) {
457 return NULL;
458 }
459
460 me = new IOUserIterator;
461 if (me && !me->init()) {
462 me->release();
463 me = NULL;
464 }
465 if (!me) {
466 return me;
467 }
468 me->userIteratorObject = iter;
469
470 return me;
471 }
472
473 bool
474 IOUserIterator::init( void )
475 {
476 if (!OSObject::init()) {
477 return false;
478 }
479
480 lock = IOLockAlloc();
481 if (!lock) {
482 return false;
483 }
484
485 return true;
486 }
487
488 void
489 IOUserIterator::free()
490 {
491 if (userIteratorObject) {
492 userIteratorObject->release();
493 }
494 if (lock) {
495 IOLockFree(lock);
496 }
497 OSObject::free();
498 }
499
500 void
501 IOUserIterator::reset()
502 {
503 IOLockLock(lock);
504 assert(OSDynamicCast(OSIterator, userIteratorObject));
505 ((OSIterator *)userIteratorObject)->reset();
506 IOLockUnlock(lock);
507 }
508
509 bool
510 IOUserIterator::isValid()
511 {
512 bool ret;
513
514 IOLockLock(lock);
515 assert(OSDynamicCast(OSIterator, userIteratorObject));
516 ret = ((OSIterator *)userIteratorObject)->isValid();
517 IOLockUnlock(lock);
518
519 return ret;
520 }
521
522 OSObject *
523 IOUserIterator::getNextObject()
524 {
525 assert(false);
526 return NULL;
527 }
528
529 OSObject *
530 IOUserIterator::copyNextObject()
531 {
532 OSObject * ret = NULL;
533
534 IOLockLock(lock);
535 if (userIteratorObject) {
536 ret = ((OSIterator *)userIteratorObject)->getNextObject();
537 if (ret) {
538 ret->retain();
539 }
540 }
541 IOLockUnlock(lock);
542
543 return ret;
544 }
545
546 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
547 extern "C" {
548 // functions called from osfmk/device/iokit_rpc.c
549
550 void
551 iokit_add_reference( io_object_t obj, ipc_kobject_type_t type )
552 {
553 IOUserClient * uc;
554
555 if (!obj) {
556 return;
557 }
558
559 if ((IKOT_IOKIT_CONNECT == type)
560 && (uc = OSDynamicCast(IOUserClient, obj))) {
561 OSIncrementAtomic(&uc->__ipc);
562 }
563
564 obj->retain();
565 }
566
567 void
568 iokit_remove_reference( io_object_t obj )
569 {
570 if (obj) {
571 obj->release();
572 }
573 }
574
575 void
576 iokit_remove_connect_reference( io_object_t obj )
577 {
578 IOUserClient * uc;
579 bool finalize = false;
580
581 if (!obj) {
582 return;
583 }
584
585 if ((uc = OSDynamicCast(IOUserClient, obj))) {
586 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
587 IOLockLock(gIOObjectPortLock);
588 if ((finalize = uc->__ipcFinal)) {
589 uc->__ipcFinal = false;
590 }
591 IOLockUnlock(gIOObjectPortLock);
592 }
593 if (finalize) {
594 uc->scheduleFinalize(true);
595 }
596 }
597
598 obj->release();
599 }
600
601 bool
602 IOUserClient::finalizeUserReferences(OSObject * obj)
603 {
604 IOUserClient * uc;
605 bool ok = true;
606
607 if ((uc = OSDynamicCast(IOUserClient, obj))) {
608 IOLockLock(gIOObjectPortLock);
609 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
610 ok = false;
611 }
612 IOLockUnlock(gIOObjectPortLock);
613 }
614 return ok;
615 }
616
617 ipc_port_t
618 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
619 {
620 IOMachPort *machPort = NULL;
621 ipc_port_t port = NULL;
622
623 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
624
625 lck_mtx_lock(gIOObjectPortLock);
626
627 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
628
629 if (__improbable(machPort == NULL)) {
630 machPort = IOMachPort::withObjectAndType(obj, type);
631 if (__improbable(machPort == NULL)) {
632 goto end;
633 }
634 SLIST_INSERT_HEAD(bucket, machPort, link);
635 } else {
636 machPort->mscount++;
637 }
638
639 iokit_retain_port(machPort->port);
640 port = machPort->port;
641
642 end:
643 lck_mtx_unlock(gIOObjectPortLock);
644
645 return port;
646 }
647
648 kern_return_t
649 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
650 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
651 {
652 IOUserClient * client;
653 IOMemoryMap * map;
654 IOUserNotification * notify;
655
656 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
657 return kIOReturnNotReady;
658 }
659
660 if (IKOT_IOKIT_CONNECT == type) {
661 if ((client = OSDynamicCast( IOUserClient, obj ))) {
662 IOStatisticsClientCall();
663 IOLockLock(client->lock);
664 client->clientDied();
665 IOLockUnlock(client->lock);
666 }
667 } else if (IKOT_IOKIT_OBJECT == type) {
668 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
669 map->taskDied();
670 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
671 notify->setNotification( NULL );
672 }
673 }
674
675 return kIOReturnSuccess;
676 }
677 }; /* extern "C" */
678
679 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
680
681 class IOServiceUserNotification : public IOUserNotification
682 {
683 OSDeclareDefaultStructors(IOServiceUserNotification);
684
685 struct PingMsg {
686 mach_msg_header_t msgHdr;
687 OSNotificationHeader64 notifyHeader;
688 };
689
690 enum { kMaxOutstanding = 1024 };
691
692 PingMsg * pingMsg;
693 vm_size_t msgSize;
694 OSArray * newSet;
695 bool armed;
696 bool ipcLogged;
697
698 public:
699
700 virtual bool init( mach_port_t port, natural_t type,
701 void * reference, vm_size_t referenceSize,
702 bool clientIs64 );
703 virtual void free() APPLE_KEXT_OVERRIDE;
704 void invalidatePort(void);
705
706 static bool _handler( void * target,
707 void * ref, IOService * newService, IONotifier * notifier );
708 virtual bool handler( void * ref, IOService * newService );
709
710 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
711 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
712 };
713
714 class IOServiceMessageUserNotification : public IOUserNotification
715 {
716 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
717
718 struct PingMsg {
719 mach_msg_header_t msgHdr;
720 mach_msg_body_t msgBody;
721 mach_msg_port_descriptor_t ports[1];
722 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
723 };
724
725 PingMsg * pingMsg;
726 vm_size_t msgSize;
727 uint8_t clientIs64;
728 int owningPID;
729 bool ipcLogged;
730
731 public:
732
733 virtual bool init( mach_port_t port, natural_t type,
734 void * reference, vm_size_t referenceSize,
735 vm_size_t extraSize,
736 bool clientIs64 );
737
738 virtual void free() APPLE_KEXT_OVERRIDE;
739 void invalidatePort(void);
740
741 static IOReturn _handler( void * target, void * ref,
742 UInt32 messageType, IOService * provider,
743 void * messageArgument, vm_size_t argSize );
744 virtual IOReturn handler( void * ref,
745 UInt32 messageType, IOService * provider,
746 void * messageArgument, vm_size_t argSize );
747
748 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
749 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
750 };
751
752 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
753
754 #undef super
755 #define super IOUserIterator
756 OSDefineMetaClass( IOUserNotification, IOUserIterator );
757 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
758
759 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
760
761 void
762 IOUserNotification::free( void )
763 {
764 if (holdNotify) {
765 assert(OSDynamicCast(IONotifier, holdNotify));
766 ((IONotifier *)holdNotify)->remove();
767 holdNotify = NULL;
768 }
769 // can't be in handler now
770
771 super::free();
772 }
773
774
775 void
776 IOUserNotification::setNotification( IONotifier * notify )
777 {
778 OSObject * previousNotify;
779
780 IOLockLock( gIOObjectPortLock);
781
782 previousNotify = holdNotify;
783 holdNotify = notify;
784
785 IOLockUnlock( gIOObjectPortLock);
786
787 if (previousNotify) {
788 assert(OSDynamicCast(IONotifier, previousNotify));
789 ((IONotifier *)previousNotify)->remove();
790 }
791 }
792
793 void
794 IOUserNotification::reset()
795 {
796 // ?
797 }
798
799 bool
800 IOUserNotification::isValid()
801 {
802 return true;
803 }
804
805 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
806
807 #undef super
808 #define super IOUserNotification
809 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
810
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
812
813 bool
814 IOServiceUserNotification::init( mach_port_t port, natural_t type,
815 void * reference, vm_size_t referenceSize,
816 bool clientIs64 )
817 {
818 if (!super::init()) {
819 return false;
820 }
821
822 newSet = OSArray::withCapacity( 1 );
823 if (!newSet) {
824 return false;
825 }
826
827 if (referenceSize > sizeof(OSAsyncReference64)) {
828 return false;
829 }
830
831 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
832 pingMsg = (PingMsg *) IOMalloc( msgSize);
833 if (!pingMsg) {
834 return false;
835 }
836
837 bzero( pingMsg, msgSize);
838
839 pingMsg->msgHdr.msgh_remote_port = port;
840 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
841 MACH_MSG_TYPE_COPY_SEND /*remote*/,
842 MACH_MSG_TYPE_MAKE_SEND /*local*/);
843 pingMsg->msgHdr.msgh_size = msgSize;
844 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
845
846 pingMsg->notifyHeader.size = 0;
847 pingMsg->notifyHeader.type = type;
848 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
849
850 return true;
851 }
852
853 void
854 IOServiceUserNotification::invalidatePort(void)
855 {
856 if (pingMsg) {
857 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
858 }
859 }
860
861 void
862 IOServiceUserNotification::free( void )
863 {
864 PingMsg * _pingMsg;
865 vm_size_t _msgSize;
866 OSArray * _newSet;
867
868 _pingMsg = pingMsg;
869 _msgSize = msgSize;
870 _newSet = newSet;
871
872 super::free();
873
874 if (_pingMsg && _msgSize) {
875 if (_pingMsg->msgHdr.msgh_remote_port) {
876 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
877 }
878 IOFree(_pingMsg, _msgSize);
879 }
880
881 if (_newSet) {
882 _newSet->release();
883 }
884 }
885
886 bool
887 IOServiceUserNotification::_handler( void * target,
888 void * ref, IOService * newService, IONotifier * notifier )
889 {
890 return ((IOServiceUserNotification *) target)->handler( ref, newService );
891 }
892
893 bool
894 IOServiceUserNotification::handler( void * ref,
895 IOService * newService )
896 {
897 unsigned int count;
898 kern_return_t kr;
899 ipc_port_t port = NULL;
900 bool sendPing = false;
901
902 IOTakeLock( lock );
903
904 count = newSet->getCount();
905 if (count < kMaxOutstanding) {
906 newSet->setObject( newService );
907 if ((sendPing = (armed && (0 == count)))) {
908 armed = false;
909 }
910 }
911
912 IOUnlock( lock );
913
914 if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
915 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
916 }
917
918 if (sendPing) {
919 if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
920 pingMsg->msgHdr.msgh_local_port = port;
921 } else {
922 pingMsg->msgHdr.msgh_local_port = NULL;
923 }
924
925 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
926 pingMsg->msgHdr.msgh_size,
927 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
928 0);
929 if (port) {
930 iokit_release_port( port );
931 }
932
933 if ((KERN_SUCCESS != kr) && !ipcLogged) {
934 ipcLogged = true;
935 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
936 }
937 }
938
939 return true;
940 }
941 OSObject *
942 IOServiceUserNotification::getNextObject()
943 {
944 assert(false);
945 return NULL;
946 }
947
948 OSObject *
949 IOServiceUserNotification::copyNextObject()
950 {
951 unsigned int count;
952 OSObject * result;
953
954 IOLockLock(lock);
955
956 count = newSet->getCount();
957 if (count) {
958 result = newSet->getObject( count - 1 );
959 result->retain();
960 newSet->removeObject( count - 1);
961 } else {
962 result = NULL;
963 armed = true;
964 }
965
966 IOLockUnlock(lock);
967
968 return result;
969 }
970
971 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
972
973 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
974
975 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
976
977 bool
978 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
979 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
980 bool client64 )
981 {
982 if (!super::init()) {
983 return false;
984 }
985
986 if (referenceSize > sizeof(OSAsyncReference64)) {
987 return false;
988 }
989
990 clientIs64 = client64;
991
992 owningPID = proc_selfpid();
993
994 extraSize += sizeof(IOServiceInterestContent64);
995 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
996 pingMsg = (PingMsg *) IOMalloc( msgSize);
997 if (!pingMsg) {
998 return false;
999 }
1000
1001 bzero( pingMsg, msgSize);
1002
1003 pingMsg->msgHdr.msgh_remote_port = port;
1004 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
1005 | MACH_MSGH_BITS(
1006 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1007 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1008 pingMsg->msgHdr.msgh_size = msgSize;
1009 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
1010
1011 pingMsg->msgBody.msgh_descriptor_count = 1;
1012
1013 pingMsg->ports[0].name = NULL;
1014 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1015 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
1016
1017 pingMsg->notifyHeader.size = extraSize;
1018 pingMsg->notifyHeader.type = type;
1019 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
1020
1021 return true;
1022 }
1023
1024 void
1025 IOServiceMessageUserNotification::invalidatePort(void)
1026 {
1027 if (pingMsg) {
1028 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
1029 }
1030 }
1031
1032 void
1033 IOServiceMessageUserNotification::free( void )
1034 {
1035 PingMsg * _pingMsg;
1036 vm_size_t _msgSize;
1037
1038 _pingMsg = pingMsg;
1039 _msgSize = msgSize;
1040
1041 super::free();
1042
1043 if (_pingMsg && _msgSize) {
1044 if (_pingMsg->msgHdr.msgh_remote_port) {
1045 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
1046 }
1047 IOFree( _pingMsg, _msgSize);
1048 }
1049 }
1050
1051 IOReturn
1052 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1053 UInt32 messageType, IOService * provider,
1054 void * argument, vm_size_t argSize )
1055 {
1056 return ((IOServiceMessageUserNotification *) target)->handler(
1057 ref, messageType, provider, argument, argSize);
1058 }
1059
1060 IOReturn
1061 IOServiceMessageUserNotification::handler( void * ref,
1062 UInt32 messageType, IOService * provider,
1063 void * messageArgument, vm_size_t callerArgSize )
1064 {
1065 enum { kLocalMsgSize = 0x100 };
1066 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1067 void * allocMsg;
1068 kern_return_t kr;
1069 vm_size_t argSize;
1070 vm_size_t thisMsgSize;
1071 ipc_port_t thisPort, providerPort;
1072 struct PingMsg * thisMsg;
1073 IOServiceInterestContent64 * data;
1074
1075 if (kIOMessageCopyClientID == messageType) {
1076 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1077 return kIOReturnSuccess;
1078 }
1079
1080 if (callerArgSize == 0) {
1081 if (clientIs64) {
1082 argSize = sizeof(data->messageArgument[0]);
1083 } else {
1084 argSize = sizeof(uint32_t);
1085 }
1086 } else {
1087 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1088 callerArgSize = kIOUserNotifyMaxMessageSize;
1089 }
1090 argSize = callerArgSize;
1091 }
1092
1093 // adjust message size for ipc restrictions
1094 natural_t type;
1095 type = pingMsg->notifyHeader.type;
1096 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1097 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1098 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1099
1100 thisMsgSize = msgSize
1101 + sizeof(IOServiceInterestContent64)
1102 - sizeof(data->messageArgument)
1103 + argSize;
1104
1105 if (thisMsgSize > sizeof(stackMsg)) {
1106 allocMsg = IOMalloc(thisMsgSize);
1107 if (!allocMsg) {
1108 return kIOReturnNoMemory;
1109 }
1110 thisMsg = (typeof(thisMsg))allocMsg;
1111 } else {
1112 allocMsg = NULL;
1113 thisMsg = (typeof(thisMsg))stackMsg;
1114 }
1115
1116 bcopy(pingMsg, thisMsg, msgSize);
1117 thisMsg->notifyHeader.type = type;
1118 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1119 // == pingMsg->notifyHeader.content;
1120 data->messageType = messageType;
1121
1122 if (callerArgSize == 0) {
1123 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1124 if (!clientIs64) {
1125 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1126 }
1127 } else {
1128 bcopy( messageArgument, data->messageArgument, callerArgSize );
1129 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1130 }
1131
1132 thisMsg->notifyHeader.type = type;
1133 thisMsg->msgHdr.msgh_size = thisMsgSize;
1134
1135 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1136 thisMsg->ports[0].name = providerPort;
1137 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1138 thisMsg->msgHdr.msgh_local_port = thisPort;
1139
1140 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1141 thisMsg->msgHdr.msgh_size,
1142 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1143 0);
1144 if (thisPort) {
1145 iokit_release_port( thisPort );
1146 }
1147 if (providerPort) {
1148 iokit_release_port( providerPort );
1149 }
1150
1151 if (allocMsg) {
1152 IOFree(allocMsg, thisMsgSize);
1153 }
1154
1155 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1156 ipcLogged = true;
1157 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1158 }
1159
1160 return kIOReturnSuccess;
1161 }
1162
1163 OSObject *
1164 IOServiceMessageUserNotification::getNextObject()
1165 {
1166 return NULL;
1167 }
1168
1169 OSObject *
1170 IOServiceMessageUserNotification::copyNextObject()
1171 {
1172 return NULL;
1173 }
1174
1175 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1176
1177 #undef super
1178 #define super IOService
1179 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1180
1181 IOLock * gIOUserClientOwnersLock;
1182
1183 void
1184 IOUserClient::initialize( void )
1185 {
1186 gIOObjectPortLock = IOLockAlloc();
1187 gIOUserClientOwnersLock = IOLockAlloc();
1188 gIOUserServerLock = IOLockAlloc();
1189 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1190 }
1191
1192 void
1193 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1194 mach_port_t wakePort,
1195 void *callback, void *refcon)
1196 {
1197 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1198 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1199 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1200 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1201 }
1202
1203 void
1204 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1205 mach_port_t wakePort,
1206 mach_vm_address_t callback, io_user_reference_t refcon)
1207 {
1208 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1209 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1210 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1211 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1212 }
1213
1214 void
1215 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1216 mach_port_t wakePort,
1217 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1218 {
1219 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1220 if (vm_map_is_64bit(get_task_map(task))) {
1221 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1222 }
1223 }
1224
1225 static OSDictionary *
1226 CopyConsoleUser(UInt32 uid)
1227 {
1228 OSArray * array;
1229 OSDictionary * user = NULL;
1230
1231 if ((array = OSDynamicCast(OSArray,
1232 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1233 for (unsigned int idx = 0;
1234 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1235 idx++) {
1236 OSNumber * num;
1237
1238 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1239 && (uid == num->unsigned32BitValue())) {
1240 user->retain();
1241 break;
1242 }
1243 }
1244 array->release();
1245 }
1246 return user;
1247 }
1248
1249 static OSDictionary *
1250 CopyUserOnConsole(void)
1251 {
1252 OSArray * array;
1253 OSDictionary * user = NULL;
1254
1255 if ((array = OSDynamicCast(OSArray,
1256 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1257 for (unsigned int idx = 0;
1258 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1259 idx++) {
1260 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1261 user->retain();
1262 break;
1263 }
1264 }
1265 array->release();
1266 }
1267 return user;
1268 }
1269
1270 IOReturn
1271 IOUserClient::clientHasAuthorization( task_t task,
1272 IOService * service )
1273 {
1274 proc_t p;
1275
1276 p = (proc_t) get_bsdtask_info(task);
1277 if (p) {
1278 uint64_t authorizationID;
1279
1280 authorizationID = proc_uniqueid(p);
1281 if (authorizationID) {
1282 if (service->getAuthorizationID() == authorizationID) {
1283 return kIOReturnSuccess;
1284 }
1285 }
1286 }
1287
1288 return kIOReturnNotPermitted;
1289 }
1290
1291 IOReturn
1292 IOUserClient::clientHasPrivilege( void * securityToken,
1293 const char * privilegeName )
1294 {
1295 kern_return_t kr;
1296 security_token_t token;
1297 mach_msg_type_number_t count;
1298 task_t task;
1299 OSDictionary * user;
1300 bool secureConsole;
1301
1302
1303 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1304 sizeof(kIOClientPrivilegeForeground))) {
1305 if (task_is_gpu_denied(current_task())) {
1306 return kIOReturnNotPrivileged;
1307 } else {
1308 return kIOReturnSuccess;
1309 }
1310 }
1311
1312 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1313 sizeof(kIOClientPrivilegeConsoleSession))) {
1314 kauth_cred_t cred;
1315 proc_t p;
1316
1317 task = (task_t) securityToken;
1318 if (!task) {
1319 task = current_task();
1320 }
1321 p = (proc_t) get_bsdtask_info(task);
1322 kr = kIOReturnNotPrivileged;
1323
1324 if (p && (cred = kauth_cred_proc_ref(p))) {
1325 user = CopyUserOnConsole();
1326 if (user) {
1327 OSNumber * num;
1328 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1329 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1330 kr = kIOReturnSuccess;
1331 }
1332 user->release();
1333 }
1334 kauth_cred_unref(&cred);
1335 }
1336 return kr;
1337 }
1338
1339 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1340 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1341 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1342 } else {
1343 task = (task_t)securityToken;
1344 }
1345
1346 count = TASK_SECURITY_TOKEN_COUNT;
1347 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1348
1349 if (KERN_SUCCESS != kr) {
1350 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1351 sizeof(kIOClientPrivilegeAdministrator))) {
1352 if (0 != token.val[0]) {
1353 kr = kIOReturnNotPrivileged;
1354 }
1355 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1356 sizeof(kIOClientPrivilegeLocalUser))) {
1357 user = CopyConsoleUser(token.val[0]);
1358 if (user) {
1359 user->release();
1360 } else {
1361 kr = kIOReturnNotPrivileged;
1362 }
1363 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1364 sizeof(kIOClientPrivilegeConsoleUser))) {
1365 user = CopyConsoleUser(token.val[0]);
1366 if (user) {
1367 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1368 kr = kIOReturnNotPrivileged;
1369 } else if (secureConsole) {
1370 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1371 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1372 kr = kIOReturnNotPrivileged;
1373 }
1374 }
1375 user->release();
1376 } else {
1377 kr = kIOReturnNotPrivileged;
1378 }
1379 } else {
1380 kr = kIOReturnUnsupported;
1381 }
1382
1383 return kr;
1384 }
1385
1386 OSDictionary *
1387 IOUserClient::copyClientEntitlements(task_t task)
1388 {
1389 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1390
1391 proc_t p = NULL;
1392 pid_t pid = 0;
1393 size_t len = 0;
1394 void *entitlements_blob = NULL;
1395 char *entitlements_data = NULL;
1396 OSObject *entitlements_obj = NULL;
1397 OSDictionary *entitlements = NULL;
1398 OSString *errorString = NULL;
1399
1400 p = (proc_t)get_bsdtask_info(task);
1401 if (p == NULL) {
1402 goto fail;
1403 }
1404 pid = proc_pid(p);
1405
1406 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1407 if (entitlements) {
1408 return entitlements;
1409 }
1410 }
1411
1412 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1413 goto fail;
1414 }
1415
1416 if (len <= offsetof(CS_GenericBlob, data)) {
1417 goto fail;
1418 }
1419
1420 /*
1421 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1422 * we'll try to parse in the kernel.
1423 */
1424 len -= offsetof(CS_GenericBlob, data);
1425 if (len > MAX_ENTITLEMENTS_LEN) {
1426 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n",
1427 proc_best_name(p), pid, len, MAX_ENTITLEMENTS_LEN);
1428 goto fail;
1429 }
1430
1431 /*
1432 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1433 * what is stored in the entitlements blob. Copy the string and
1434 * terminate it.
1435 */
1436 entitlements_data = (char *)IOMalloc(len + 1);
1437 if (entitlements_data == NULL) {
1438 goto fail;
1439 }
1440 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1441 entitlements_data[len] = '\0';
1442
1443 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1444 if (errorString != NULL) {
1445 IOLog("failed to parse entitlements for %s[%u]: %s\n",
1446 proc_best_name(p), pid, errorString->getCStringNoCopy());
1447 goto fail;
1448 }
1449 if (entitlements_obj == NULL) {
1450 goto fail;
1451 }
1452
1453 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1454 if (entitlements == NULL) {
1455 goto fail;
1456 }
1457 entitlements_obj = NULL;
1458
1459 fail:
1460 if (entitlements_data != NULL) {
1461 IOFree(entitlements_data, len + 1);
1462 }
1463 if (entitlements_obj != NULL) {
1464 entitlements_obj->release();
1465 }
1466 if (errorString != NULL) {
1467 errorString->release();
1468 }
1469 return entitlements;
1470 }
1471
1472 OSObject *
1473 IOUserClient::copyClientEntitlement( task_t task,
1474 const char * entitlement )
1475 {
1476 OSDictionary *entitlements;
1477 OSObject *value;
1478
1479 entitlements = copyClientEntitlements(task);
1480 if (entitlements == NULL) {
1481 return NULL;
1482 }
1483
1484 /* Fetch the entitlement value from the dictionary. */
1485 value = entitlements->getObject(entitlement);
1486 if (value != NULL) {
1487 value->retain();
1488 }
1489
1490 entitlements->release();
1491 return value;
1492 }
1493
1494 bool
1495 IOUserClient::init()
1496 {
1497 if (getPropertyTable() || super::init()) {
1498 return reserve();
1499 }
1500
1501 return false;
1502 }
1503
1504 bool
1505 IOUserClient::init(OSDictionary * dictionary)
1506 {
1507 if (getPropertyTable() || super::init(dictionary)) {
1508 return reserve();
1509 }
1510
1511 return false;
1512 }
1513
1514 bool
1515 IOUserClient::initWithTask(task_t owningTask,
1516 void * securityID,
1517 UInt32 type )
1518 {
1519 if (getPropertyTable() || super::init()) {
1520 return reserve();
1521 }
1522
1523 return false;
1524 }
1525
1526 bool
1527 IOUserClient::initWithTask(task_t owningTask,
1528 void * securityID,
1529 UInt32 type,
1530 OSDictionary * properties )
1531 {
1532 bool ok;
1533
1534 ok = super::init( properties );
1535 ok &= initWithTask( owningTask, securityID, type );
1536
1537 return ok;
1538 }
1539
1540 bool
1541 IOUserClient::reserve()
1542 {
1543 if (!reserved) {
1544 reserved = IONew(ExpansionData, 1);
1545 if (!reserved) {
1546 return false;
1547 }
1548 }
1549 setTerminateDefer(NULL, true);
1550 IOStatisticsRegisterCounter();
1551
1552 return true;
1553 }
1554
1555 struct IOUserClientOwner {
1556 task_t task;
1557 queue_chain_t taskLink;
1558 IOUserClient * uc;
1559 queue_chain_t ucLink;
1560 };
1561
1562 IOReturn
1563 IOUserClient::registerOwner(task_t task)
1564 {
1565 IOUserClientOwner * owner;
1566 IOReturn ret;
1567 bool newOwner;
1568
1569 IOLockLock(gIOUserClientOwnersLock);
1570
1571 newOwner = true;
1572 ret = kIOReturnSuccess;
1573
1574 if (!owners.next) {
1575 queue_init(&owners);
1576 } else {
1577 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1578 {
1579 if (task != owner->task) {
1580 continue;
1581 }
1582 newOwner = false;
1583 break;
1584 }
1585 }
1586 if (newOwner) {
1587 owner = IONew(IOUserClientOwner, 1);
1588 if (!owner) {
1589 ret = kIOReturnNoMemory;
1590 } else {
1591 owner->task = task;
1592 owner->uc = this;
1593 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1594 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1595 if (messageAppSuspended) {
1596 task_set_message_app_suspended(task, true);
1597 }
1598 }
1599 }
1600
1601 IOLockUnlock(gIOUserClientOwnersLock);
1602
1603 return ret;
1604 }
1605
1606 void
1607 IOUserClient::noMoreSenders(void)
1608 {
1609 IOUserClientOwner * owner;
1610 IOUserClientOwner * iter;
1611 queue_head_t * taskque;
1612 bool hasMessageAppSuspended;
1613
1614 IOLockLock(gIOUserClientOwnersLock);
1615
1616 if (owners.next) {
1617 while (!queue_empty(&owners)) {
1618 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1619 taskque = task_io_user_clients(owner->task);
1620 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1621 hasMessageAppSuspended = false;
1622 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1623 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1624 if (hasMessageAppSuspended) {
1625 break;
1626 }
1627 }
1628 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1629 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1630 IODelete(owner, IOUserClientOwner, 1);
1631 }
1632 owners.next = owners.prev = NULL;
1633 }
1634
1635 IOLockUnlock(gIOUserClientOwnersLock);
1636 }
1637
1638
1639 extern "C" void
1640 iokit_task_app_suspended_changed(task_t task)
1641 {
1642 queue_head_t * taskque;
1643 IOUserClientOwner * owner;
1644 OSSet * set;
1645
1646 IOLockLock(gIOUserClientOwnersLock);
1647
1648 taskque = task_io_user_clients(task);
1649 set = NULL;
1650 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1651 if (!owner->uc->messageAppSuspended) {
1652 continue;
1653 }
1654 if (!set) {
1655 set = OSSet::withCapacity(4);
1656 if (!set) {
1657 break;
1658 }
1659 }
1660 set->setObject(owner->uc);
1661 }
1662
1663 IOLockUnlock(gIOUserClientOwnersLock);
1664
1665 if (set) {
1666 set->iterateObjects(^bool (OSObject * obj) {
1667 IOUserClient * uc;
1668
1669 uc = (typeof(uc))obj;
1670 #if 0
1671 {
1672 OSString * str;
1673 str = IOCopyLogNameForPID(task_pid(task));
1674 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1675 uc->getName(), task_is_app_suspended(task));
1676 OSSafeReleaseNULL(str);
1677 }
1678 #endif
1679 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1680
1681 return false;
1682 });
1683 set->release();
1684 }
1685 }
1686
1687 extern "C" kern_return_t
1688 iokit_task_terminate(task_t task)
1689 {
1690 IOUserClientOwner * owner;
1691 IOUserClient * dead;
1692 IOUserClient * uc;
1693 queue_head_t * taskque;
1694
1695 IOLockLock(gIOUserClientOwnersLock);
1696
1697 taskque = task_io_user_clients(task);
1698 dead = NULL;
1699 while (!queue_empty(taskque)) {
1700 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1701 uc = owner->uc;
1702 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1703 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1704 if (queue_empty(&uc->owners)) {
1705 uc->retain();
1706 IOLog("destroying out of band connect for %s\n", uc->getName());
1707 // now using the uc queue head as a singly linked queue,
1708 // leaving .next as NULL to mark it empty
1709 uc->owners.next = NULL;
1710 uc->owners.prev = (queue_entry_t) dead;
1711 dead = uc;
1712 }
1713 IODelete(owner, IOUserClientOwner, 1);
1714 }
1715
1716 IOLockUnlock(gIOUserClientOwnersLock);
1717
1718 while (dead) {
1719 uc = dead;
1720 dead = (IOUserClient *)(void *) dead->owners.prev;
1721 uc->owners.prev = NULL;
1722 if (uc->sharedInstance || !uc->closed) {
1723 uc->clientDied();
1724 }
1725 uc->release();
1726 }
1727
1728 return KERN_SUCCESS;
1729 }
1730
1731 void
1732 IOUserClient::free()
1733 {
1734 if (mappings) {
1735 mappings->release();
1736 }
1737 if (lock) {
1738 IOLockFree(lock);
1739 }
1740
1741 IOStatisticsUnregisterCounter();
1742
1743 assert(!owners.next);
1744 assert(!owners.prev);
1745
1746 if (reserved) {
1747 IODelete(reserved, ExpansionData, 1);
1748 }
1749
1750 super::free();
1751 }
1752
1753 IOReturn
1754 IOUserClient::clientDied( void )
1755 {
1756 IOReturn ret = kIOReturnNotReady;
1757
1758 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1759 ret = clientClose();
1760 }
1761
1762 return ret;
1763 }
1764
1765 IOReturn
1766 IOUserClient::clientClose( void )
1767 {
1768 return kIOReturnUnsupported;
1769 }
1770
1771 IOService *
1772 IOUserClient::getService( void )
1773 {
1774 return NULL;
1775 }
1776
1777 IOReturn
1778 IOUserClient::registerNotificationPort(
1779 mach_port_t /* port */,
1780 UInt32 /* type */,
1781 UInt32 /* refCon */)
1782 {
1783 return kIOReturnUnsupported;
1784 }
1785
1786 IOReturn
1787 IOUserClient::registerNotificationPort(
1788 mach_port_t port,
1789 UInt32 type,
1790 io_user_reference_t refCon)
1791 {
1792 return registerNotificationPort(port, type, (UInt32) refCon);
1793 }
1794
1795 IOReturn
1796 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1797 semaphore_t * semaphore )
1798 {
1799 return kIOReturnUnsupported;
1800 }
1801
1802 IOReturn
1803 IOUserClient::connectClient( IOUserClient * /* client */ )
1804 {
1805 return kIOReturnUnsupported;
1806 }
1807
1808 IOReturn
1809 IOUserClient::clientMemoryForType( UInt32 type,
1810 IOOptionBits * options,
1811 IOMemoryDescriptor ** memory )
1812 {
1813 return kIOReturnUnsupported;
1814 }
1815
1816 #if !__LP64__
1817 IOMemoryMap *
1818 IOUserClient::mapClientMemory(
1819 IOOptionBits type,
1820 task_t task,
1821 IOOptionBits mapFlags,
1822 IOVirtualAddress atAddress )
1823 {
1824 return NULL;
1825 }
1826 #endif
1827
1828 IOMemoryMap *
1829 IOUserClient::mapClientMemory64(
1830 IOOptionBits type,
1831 task_t task,
1832 IOOptionBits mapFlags,
1833 mach_vm_address_t atAddress )
1834 {
1835 IOReturn err;
1836 IOOptionBits options = 0;
1837 IOMemoryDescriptor * memory = NULL;
1838 IOMemoryMap * map = NULL;
1839
1840 err = clientMemoryForType((UInt32) type, &options, &memory );
1841
1842 if (memory && (kIOReturnSuccess == err)) {
1843 FAKE_STACK_FRAME(getMetaClass());
1844
1845 options = (options & ~kIOMapUserOptionsMask)
1846 | (mapFlags & kIOMapUserOptionsMask);
1847 map = memory->createMappingInTask( task, atAddress, options );
1848 memory->release();
1849
1850 FAKE_STACK_FRAME_END();
1851 }
1852
1853 return map;
1854 }
1855
1856 IOReturn
1857 IOUserClient::exportObjectToClient(task_t task,
1858 OSObject *obj, io_object_t *clientObj)
1859 {
1860 mach_port_name_t name;
1861
1862 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1863
1864 *clientObj = (io_object_t)(uintptr_t) name;
1865
1866 if (obj) {
1867 obj->release();
1868 }
1869
1870 return kIOReturnSuccess;
1871 }
1872
1873 IOReturn
1874 IOUserClient::copyPortNameForObjectInTask(task_t task,
1875 OSObject *obj, mach_port_name_t * port_name)
1876 {
1877 mach_port_name_t name;
1878
1879 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1880
1881 *(mach_port_name_t *) port_name = name;
1882
1883 return kIOReturnSuccess;
1884 }
1885
1886 IOReturn
1887 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1888 OSObject **obj)
1889 {
1890 OSObject * object;
1891
1892 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
1893
1894 *obj = object;
1895
1896 return object ? kIOReturnSuccess : kIOReturnIPCError;
1897 }
1898
1899 IOReturn
1900 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
1901 {
1902 return iokit_mod_send_right(task, port_name, delta);
1903 }
1904
1905 IOExternalMethod *
1906 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1907 {
1908 return NULL;
1909 }
1910
1911 IOExternalAsyncMethod *
1912 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1913 {
1914 return NULL;
1915 }
1916
1917 IOExternalTrap *
1918 IOUserClient::
1919 getExternalTrapForIndex(UInt32 index)
1920 {
1921 return NULL;
1922 }
1923
1924 #pragma clang diagnostic push
1925 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1926
1927 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1928 // functions can break clients of kexts implementing getExternalMethodForIndex()
1929 IOExternalMethod *
1930 IOUserClient::
1931 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1932 {
1933 IOExternalMethod *method = getExternalMethodForIndex(index);
1934
1935 if (method) {
1936 *targetP = (IOService *) method->object;
1937 }
1938
1939 return method;
1940 }
1941
1942 IOExternalAsyncMethod *
1943 IOUserClient::
1944 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1945 {
1946 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1947
1948 if (method) {
1949 *targetP = (IOService *) method->object;
1950 }
1951
1952 return method;
1953 }
1954
1955 IOExternalTrap *
1956 IOUserClient::
1957 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1958 {
1959 IOExternalTrap *trap = getExternalTrapForIndex(index);
1960
1961 if (trap) {
1962 *targetP = trap->object;
1963 }
1964
1965 return trap;
1966 }
1967 #pragma clang diagnostic pop
1968
1969 IOReturn
1970 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1971 {
1972 mach_port_t port;
1973 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1974
1975 if (MACH_PORT_NULL != port) {
1976 iokit_release_port_send(port);
1977 }
1978
1979 return kIOReturnSuccess;
1980 }
1981
1982 IOReturn
1983 IOUserClient::releaseNotificationPort(mach_port_t port)
1984 {
1985 if (MACH_PORT_NULL != port) {
1986 iokit_release_port_send(port);
1987 }
1988
1989 return kIOReturnSuccess;
1990 }
1991
1992 IOReturn
1993 IOUserClient::sendAsyncResult(OSAsyncReference reference,
1994 IOReturn result, void *args[], UInt32 numArgs)
1995 {
1996 OSAsyncReference64 reference64;
1997 io_user_reference_t args64[kMaxAsyncArgs];
1998 unsigned int idx;
1999
2000 if (numArgs > kMaxAsyncArgs) {
2001 return kIOReturnMessageTooLarge;
2002 }
2003
2004 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2005 reference64[idx] = REF64(reference[idx]);
2006 }
2007
2008 for (idx = 0; idx < numArgs; idx++) {
2009 args64[idx] = REF64(args[idx]);
2010 }
2011
2012 return sendAsyncResult64(reference64, result, args64, numArgs);
2013 }
2014
2015 IOReturn
2016 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2017 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2018 {
2019 return _sendAsyncResult64(reference, result, args, numArgs, options);
2020 }
2021
2022 IOReturn
2023 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2024 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2025 {
2026 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2027 }
2028
2029 IOReturn
2030 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2031 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2032 {
2033 struct ReplyMsg {
2034 mach_msg_header_t msgHdr;
2035 union{
2036 struct{
2037 OSNotificationHeader notifyHdr;
2038 IOAsyncCompletionContent asyncContent;
2039 uint32_t args[kMaxAsyncArgs];
2040 } msg32;
2041 struct{
2042 OSNotificationHeader64 notifyHdr;
2043 IOAsyncCompletionContent asyncContent;
2044 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2045 } msg64;
2046 } m;
2047 };
2048 ReplyMsg replyMsg;
2049 mach_port_t replyPort;
2050 kern_return_t kr;
2051
2052 // If no reply port, do nothing.
2053 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2054 if (replyPort == MACH_PORT_NULL) {
2055 return kIOReturnSuccess;
2056 }
2057
2058 if (numArgs > kMaxAsyncArgs) {
2059 return kIOReturnMessageTooLarge;
2060 }
2061
2062 bzero(&replyMsg, sizeof(replyMsg));
2063 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2064 0 /*local*/);
2065 replyMsg.msgHdr.msgh_remote_port = replyPort;
2066 replyMsg.msgHdr.msgh_local_port = NULL;
2067 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2068 if (kIOUCAsync64Flag & reference[0]) {
2069 replyMsg.msgHdr.msgh_size =
2070 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2071 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2072 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2073 + numArgs * sizeof(io_user_reference_t);
2074 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2075 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
2076
2077 replyMsg.m.msg64.asyncContent.result = result;
2078 if (numArgs) {
2079 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2080 }
2081 } else {
2082 unsigned int idx;
2083
2084 replyMsg.msgHdr.msgh_size =
2085 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2086 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2087
2088 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2089 + numArgs * sizeof(uint32_t);
2090 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2091
2092 for (idx = 0; idx < kOSAsyncRefCount; idx++) {
2093 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2094 }
2095
2096 replyMsg.m.msg32.asyncContent.result = result;
2097
2098 for (idx = 0; idx < numArgs; idx++) {
2099 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2100 }
2101 }
2102
2103 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2104 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2105 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2106 } else {
2107 /* Fail on full queue. */
2108 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2109 replyMsg.msgHdr.msgh_size);
2110 }
2111 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2112 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2113 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2114 }
2115 return kr;
2116 }
2117
2118
2119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2120
2121 extern "C" {
2122 #define CHECK(cls, obj, out) \
2123 cls * out; \
2124 if( !(out = OSDynamicCast( cls, obj))) \
2125 return( kIOReturnBadArgument )
2126
2127 #define CHECKLOCKED(cls, obj, out) \
2128 IOUserIterator * oIter; \
2129 cls * out; \
2130 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2131 return (kIOReturnBadArgument); \
2132 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2133 return (kIOReturnBadArgument)
2134
2135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2136
2137 // Create a vm_map_copy_t or kalloc'ed data for memory
2138 // to be copied out. ipc will free after the copyout.
2139
2140 static kern_return_t
2141 copyoutkdata( const void * data, vm_size_t len,
2142 io_buf_ptr_t * buf )
2143 {
2144 kern_return_t err;
2145 vm_map_copy_t copy;
2146
2147 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2148 false /* src_destroy */, &copy);
2149
2150 assert( err == KERN_SUCCESS );
2151 if (err == KERN_SUCCESS) {
2152 *buf = (char *) copy;
2153 }
2154
2155 return err;
2156 }
2157
2158 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2159
2160 /* Routine io_server_version */
2161 kern_return_t
2162 is_io_server_version(
2163 mach_port_t master_port,
2164 uint64_t *version)
2165 {
2166 *version = IOKIT_SERVER_VERSION;
2167 return kIOReturnSuccess;
2168 }
2169
2170 /* Routine io_object_get_class */
2171 kern_return_t
2172 is_io_object_get_class(
2173 io_object_t object,
2174 io_name_t className )
2175 {
2176 const OSMetaClass* my_obj = NULL;
2177
2178 if (!object) {
2179 return kIOReturnBadArgument;
2180 }
2181
2182 my_obj = object->getMetaClass();
2183 if (!my_obj) {
2184 return kIOReturnNotFound;
2185 }
2186
2187 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2188
2189 return kIOReturnSuccess;
2190 }
2191
2192 /* Routine io_object_get_superclass */
2193 kern_return_t
2194 is_io_object_get_superclass(
2195 mach_port_t master_port,
2196 io_name_t obj_name,
2197 io_name_t class_name)
2198 {
2199 IOReturn ret;
2200 const OSMetaClass * meta;
2201 const OSMetaClass * super;
2202 const OSSymbol * name;
2203 const char * cstr;
2204
2205 if (!obj_name || !class_name) {
2206 return kIOReturnBadArgument;
2207 }
2208 if (master_port != master_device_port) {
2209 return kIOReturnNotPrivileged;
2210 }
2211
2212 ret = kIOReturnNotFound;
2213 meta = NULL;
2214 do{
2215 name = OSSymbol::withCString(obj_name);
2216 if (!name) {
2217 break;
2218 }
2219 meta = OSMetaClass::copyMetaClassWithName(name);
2220 if (!meta) {
2221 break;
2222 }
2223 super = meta->getSuperClass();
2224 if (!super) {
2225 break;
2226 }
2227 cstr = super->getClassName();
2228 if (!cstr) {
2229 break;
2230 }
2231 strlcpy(class_name, cstr, sizeof(io_name_t));
2232 ret = kIOReturnSuccess;
2233 }while (false);
2234
2235 OSSafeReleaseNULL(name);
2236 if (meta) {
2237 meta->releaseMetaClass();
2238 }
2239
2240 return ret;
2241 }
2242
2243 /* Routine io_object_get_bundle_identifier */
2244 kern_return_t
2245 is_io_object_get_bundle_identifier(
2246 mach_port_t master_port,
2247 io_name_t obj_name,
2248 io_name_t bundle_name)
2249 {
2250 IOReturn ret;
2251 const OSMetaClass * meta;
2252 const OSSymbol * name;
2253 const OSSymbol * identifier;
2254 const char * cstr;
2255
2256 if (!obj_name || !bundle_name) {
2257 return kIOReturnBadArgument;
2258 }
2259 if (master_port != master_device_port) {
2260 return kIOReturnNotPrivileged;
2261 }
2262
2263 ret = kIOReturnNotFound;
2264 meta = NULL;
2265 do{
2266 name = OSSymbol::withCString(obj_name);
2267 if (!name) {
2268 break;
2269 }
2270 meta = OSMetaClass::copyMetaClassWithName(name);
2271 if (!meta) {
2272 break;
2273 }
2274 identifier = meta->getKmodName();
2275 if (!identifier) {
2276 break;
2277 }
2278 cstr = identifier->getCStringNoCopy();
2279 if (!cstr) {
2280 break;
2281 }
2282 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2283 ret = kIOReturnSuccess;
2284 }while (false);
2285
2286 OSSafeReleaseNULL(name);
2287 if (meta) {
2288 meta->releaseMetaClass();
2289 }
2290
2291 return ret;
2292 }
2293
2294 /* Routine io_object_conforms_to */
2295 kern_return_t
2296 is_io_object_conforms_to(
2297 io_object_t object,
2298 io_name_t className,
2299 boolean_t *conforms )
2300 {
2301 if (!object) {
2302 return kIOReturnBadArgument;
2303 }
2304
2305 *conforms = (NULL != object->metaCast( className ));
2306
2307 return kIOReturnSuccess;
2308 }
2309
2310 /* Routine io_object_get_retain_count */
2311 kern_return_t
2312 is_io_object_get_retain_count(
2313 io_object_t object,
2314 uint32_t *retainCount )
2315 {
2316 if (!object) {
2317 return kIOReturnBadArgument;
2318 }
2319
2320 *retainCount = object->getRetainCount();
2321 return kIOReturnSuccess;
2322 }
2323
2324 /* Routine io_iterator_next */
2325 kern_return_t
2326 is_io_iterator_next(
2327 io_object_t iterator,
2328 io_object_t *object )
2329 {
2330 IOReturn ret;
2331 OSObject * obj;
2332 OSIterator * iter;
2333 IOUserIterator * uiter;
2334
2335 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2336 obj = uiter->copyNextObject();
2337 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2338 obj = iter->getNextObject();
2339 if (obj) {
2340 obj->retain();
2341 }
2342 } else {
2343 return kIOReturnBadArgument;
2344 }
2345
2346 if (obj) {
2347 *object = obj;
2348 ret = kIOReturnSuccess;
2349 } else {
2350 ret = kIOReturnNoDevice;
2351 }
2352
2353 return ret;
2354 }
2355
2356 /* Routine io_iterator_reset */
2357 kern_return_t
2358 is_io_iterator_reset(
2359 io_object_t iterator )
2360 {
2361 CHECK( OSIterator, iterator, iter );
2362
2363 iter->reset();
2364
2365 return kIOReturnSuccess;
2366 }
2367
2368 /* Routine io_iterator_is_valid */
2369 kern_return_t
2370 is_io_iterator_is_valid(
2371 io_object_t iterator,
2372 boolean_t *is_valid )
2373 {
2374 CHECK( OSIterator, iterator, iter );
2375
2376 *is_valid = iter->isValid();
2377
2378 return kIOReturnSuccess;
2379 }
2380
2381
2382 static kern_return_t
2383 internal_io_service_match_property_table(
2384 io_service_t _service,
2385 const char * matching,
2386 mach_msg_type_number_t matching_size,
2387 boolean_t *matches)
2388 {
2389 CHECK( IOService, _service, service );
2390
2391 kern_return_t kr;
2392 OSObject * obj;
2393 OSDictionary * dict;
2394
2395 assert(matching_size);
2396 obj = OSUnserializeXML(matching, matching_size);
2397
2398 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2399 *matches = service->passiveMatch( dict );
2400 kr = kIOReturnSuccess;
2401 } else {
2402 kr = kIOReturnBadArgument;
2403 }
2404
2405 if (obj) {
2406 obj->release();
2407 }
2408
2409 return kr;
2410 }
2411
2412 /* Routine io_service_match_property_table */
2413 kern_return_t
2414 is_io_service_match_property_table(
2415 io_service_t service,
2416 io_string_t matching,
2417 boolean_t *matches )
2418 {
2419 return kIOReturnUnsupported;
2420 }
2421
2422
2423 /* Routine io_service_match_property_table_ool */
2424 kern_return_t
2425 is_io_service_match_property_table_ool(
2426 io_object_t service,
2427 io_buf_ptr_t matching,
2428 mach_msg_type_number_t matchingCnt,
2429 kern_return_t *result,
2430 boolean_t *matches )
2431 {
2432 kern_return_t kr;
2433 vm_offset_t data;
2434 vm_map_offset_t map_data;
2435
2436 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2437 data = CAST_DOWN(vm_offset_t, map_data);
2438
2439 if (KERN_SUCCESS == kr) {
2440 // must return success after vm_map_copyout() succeeds
2441 *result = internal_io_service_match_property_table(service,
2442 (const char *)data, matchingCnt, matches );
2443 vm_deallocate( kernel_map, data, matchingCnt );
2444 }
2445
2446 return kr;
2447 }
2448
2449 /* Routine io_service_match_property_table_bin */
2450 kern_return_t
2451 is_io_service_match_property_table_bin(
2452 io_object_t service,
2453 io_struct_inband_t matching,
2454 mach_msg_type_number_t matchingCnt,
2455 boolean_t *matches)
2456 {
2457 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2458 }
2459
2460 static kern_return_t
2461 internal_io_service_get_matching_services(
2462 mach_port_t master_port,
2463 const char * matching,
2464 mach_msg_type_number_t matching_size,
2465 io_iterator_t *existing )
2466 {
2467 kern_return_t kr;
2468 OSObject * obj;
2469 OSDictionary * dict;
2470
2471 if (master_port != master_device_port) {
2472 return kIOReturnNotPrivileged;
2473 }
2474
2475 assert(matching_size);
2476 obj = OSUnserializeXML(matching, matching_size);
2477
2478 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2479 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2480 kr = kIOReturnSuccess;
2481 } else {
2482 kr = kIOReturnBadArgument;
2483 }
2484
2485 if (obj) {
2486 obj->release();
2487 }
2488
2489 return kr;
2490 }
2491
2492 /* Routine io_service_get_matching_services */
2493 kern_return_t
2494 is_io_service_get_matching_services(
2495 mach_port_t master_port,
2496 io_string_t matching,
2497 io_iterator_t *existing )
2498 {
2499 return kIOReturnUnsupported;
2500 }
2501
2502 /* Routine io_service_get_matching_services_ool */
2503 kern_return_t
2504 is_io_service_get_matching_services_ool(
2505 mach_port_t master_port,
2506 io_buf_ptr_t matching,
2507 mach_msg_type_number_t matchingCnt,
2508 kern_return_t *result,
2509 io_object_t *existing )
2510 {
2511 kern_return_t kr;
2512 vm_offset_t data;
2513 vm_map_offset_t map_data;
2514
2515 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2516 data = CAST_DOWN(vm_offset_t, map_data);
2517
2518 if (KERN_SUCCESS == kr) {
2519 // must return success after vm_map_copyout() succeeds
2520 // and mig will copy out objects on success
2521 *existing = NULL;
2522 *result = internal_io_service_get_matching_services(master_port,
2523 (const char *) data, matchingCnt, existing);
2524 vm_deallocate( kernel_map, data, matchingCnt );
2525 }
2526
2527 return kr;
2528 }
2529
2530 /* Routine io_service_get_matching_services_bin */
2531 kern_return_t
2532 is_io_service_get_matching_services_bin(
2533 mach_port_t master_port,
2534 io_struct_inband_t matching,
2535 mach_msg_type_number_t matchingCnt,
2536 io_object_t *existing)
2537 {
2538 return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2539 }
2540
2541
2542 static kern_return_t
2543 internal_io_service_get_matching_service(
2544 mach_port_t master_port,
2545 const char * matching,
2546 mach_msg_type_number_t matching_size,
2547 io_service_t *service )
2548 {
2549 kern_return_t kr;
2550 OSObject * obj;
2551 OSDictionary * dict;
2552
2553 if (master_port != master_device_port) {
2554 return kIOReturnNotPrivileged;
2555 }
2556
2557 assert(matching_size);
2558 obj = OSUnserializeXML(matching, matching_size);
2559
2560 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2561 *service = IOService::copyMatchingService( dict );
2562 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2563 } else {
2564 kr = kIOReturnBadArgument;
2565 }
2566
2567 if (obj) {
2568 obj->release();
2569 }
2570
2571 return kr;
2572 }
2573
2574 /* Routine io_service_get_matching_service */
2575 kern_return_t
2576 is_io_service_get_matching_service(
2577 mach_port_t master_port,
2578 io_string_t matching,
2579 io_service_t *service )
2580 {
2581 return kIOReturnUnsupported;
2582 }
2583
2584 /* Routine io_service_get_matching_services_ool */
2585 kern_return_t
2586 is_io_service_get_matching_service_ool(
2587 mach_port_t master_port,
2588 io_buf_ptr_t matching,
2589 mach_msg_type_number_t matchingCnt,
2590 kern_return_t *result,
2591 io_object_t *service )
2592 {
2593 kern_return_t kr;
2594 vm_offset_t data;
2595 vm_map_offset_t map_data;
2596
2597 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2598 data = CAST_DOWN(vm_offset_t, map_data);
2599
2600 if (KERN_SUCCESS == kr) {
2601 // must return success after vm_map_copyout() succeeds
2602 // and mig will copy out objects on success
2603 *service = NULL;
2604 *result = internal_io_service_get_matching_service(master_port,
2605 (const char *) data, matchingCnt, service );
2606 vm_deallocate( kernel_map, data, matchingCnt );
2607 }
2608
2609 return kr;
2610 }
2611
2612 /* Routine io_service_get_matching_service_bin */
2613 kern_return_t
2614 is_io_service_get_matching_service_bin(
2615 mach_port_t master_port,
2616 io_struct_inband_t matching,
2617 mach_msg_type_number_t matchingCnt,
2618 io_object_t *service)
2619 {
2620 return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2621 }
2622
2623 static kern_return_t
2624 internal_io_service_add_notification(
2625 mach_port_t master_port,
2626 io_name_t notification_type,
2627 const char * matching,
2628 size_t matching_size,
2629 mach_port_t port,
2630 void * reference,
2631 vm_size_t referenceSize,
2632 bool client64,
2633 io_object_t * notification )
2634 {
2635 IOServiceUserNotification * userNotify = NULL;
2636 IONotifier * notify = NULL;
2637 const OSSymbol * sym;
2638 OSDictionary * dict;
2639 IOReturn err;
2640 unsigned long int userMsgType;
2641
2642 if (master_port != master_device_port) {
2643 return kIOReturnNotPrivileged;
2644 }
2645
2646 do {
2647 err = kIOReturnNoResources;
2648
2649 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2650 return kIOReturnMessageTooLarge;
2651 }
2652
2653 if (!(sym = OSSymbol::withCString( notification_type ))) {
2654 err = kIOReturnNoResources;
2655 }
2656
2657 assert(matching_size);
2658 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2659 if (!dict) {
2660 err = kIOReturnBadArgument;
2661 continue;
2662 }
2663
2664 if ((sym == gIOPublishNotification)
2665 || (sym == gIOFirstPublishNotification)) {
2666 userMsgType = kIOServicePublishNotificationType;
2667 } else if ((sym == gIOMatchedNotification)
2668 || (sym == gIOFirstMatchNotification)) {
2669 userMsgType = kIOServiceMatchedNotificationType;
2670 } else if ((sym == gIOTerminatedNotification)
2671 || (sym == gIOWillTerminateNotification)) {
2672 userMsgType = kIOServiceTerminatedNotificationType;
2673 } else {
2674 userMsgType = kLastIOKitNotificationType;
2675 }
2676
2677 userNotify = new IOServiceUserNotification;
2678
2679 if (userNotify && !userNotify->init( port, userMsgType,
2680 reference, referenceSize, client64)) {
2681 userNotify->release();
2682 userNotify = NULL;
2683 }
2684 if (!userNotify) {
2685 continue;
2686 }
2687
2688 notify = IOService::addMatchingNotification( sym, dict,
2689 &userNotify->_handler, userNotify );
2690 if (notify) {
2691 *notification = userNotify;
2692 userNotify->setNotification( notify );
2693 err = kIOReturnSuccess;
2694 } else {
2695 err = kIOReturnUnsupported;
2696 }
2697 } while (false);
2698
2699 if ((kIOReturnSuccess != err) && userNotify) {
2700 userNotify->invalidatePort();
2701 userNotify->release();
2702 userNotify = NULL;
2703 }
2704
2705 if (sym) {
2706 sym->release();
2707 }
2708 if (dict) {
2709 dict->release();
2710 }
2711
2712 return err;
2713 }
2714
2715
2716 /* Routine io_service_add_notification */
2717 kern_return_t
2718 is_io_service_add_notification(
2719 mach_port_t master_port,
2720 io_name_t notification_type,
2721 io_string_t matching,
2722 mach_port_t port,
2723 io_async_ref_t reference,
2724 mach_msg_type_number_t referenceCnt,
2725 io_object_t * notification )
2726 {
2727 return kIOReturnUnsupported;
2728 }
2729
2730 /* Routine io_service_add_notification_64 */
2731 kern_return_t
2732 is_io_service_add_notification_64(
2733 mach_port_t master_port,
2734 io_name_t notification_type,
2735 io_string_t matching,
2736 mach_port_t wake_port,
2737 io_async_ref64_t reference,
2738 mach_msg_type_number_t referenceCnt,
2739 io_object_t *notification )
2740 {
2741 return kIOReturnUnsupported;
2742 }
2743
2744 /* Routine io_service_add_notification_bin */
2745 kern_return_t
2746 is_io_service_add_notification_bin
2747 (
2748 mach_port_t master_port,
2749 io_name_t notification_type,
2750 io_struct_inband_t matching,
2751 mach_msg_type_number_t matchingCnt,
2752 mach_port_t wake_port,
2753 io_async_ref_t reference,
2754 mach_msg_type_number_t referenceCnt,
2755 io_object_t *notification)
2756 {
2757 io_async_ref_t zreference;
2758
2759 if (referenceCnt > ASYNC_REF_COUNT) {
2760 return kIOReturnBadArgument;
2761 }
2762 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2763 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2764
2765 return internal_io_service_add_notification(master_port, notification_type,
2766 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2767 false, notification);
2768 }
2769
2770 /* Routine io_service_add_notification_bin_64 */
2771 kern_return_t
2772 is_io_service_add_notification_bin_64
2773 (
2774 mach_port_t master_port,
2775 io_name_t notification_type,
2776 io_struct_inband_t matching,
2777 mach_msg_type_number_t matchingCnt,
2778 mach_port_t wake_port,
2779 io_async_ref64_t reference,
2780 mach_msg_type_number_t referenceCnt,
2781 io_object_t *notification)
2782 {
2783 io_async_ref64_t zreference;
2784
2785 if (referenceCnt > ASYNC_REF64_COUNT) {
2786 return kIOReturnBadArgument;
2787 }
2788 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2789 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2790
2791 return internal_io_service_add_notification(master_port, notification_type,
2792 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2793 true, notification);
2794 }
2795
2796 static kern_return_t
2797 internal_io_service_add_notification_ool(
2798 mach_port_t master_port,
2799 io_name_t notification_type,
2800 io_buf_ptr_t matching,
2801 mach_msg_type_number_t matchingCnt,
2802 mach_port_t wake_port,
2803 void * reference,
2804 vm_size_t referenceSize,
2805 bool client64,
2806 kern_return_t *result,
2807 io_object_t *notification )
2808 {
2809 kern_return_t kr;
2810 vm_offset_t data;
2811 vm_map_offset_t map_data;
2812
2813 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2814 data = CAST_DOWN(vm_offset_t, map_data);
2815
2816 if (KERN_SUCCESS == kr) {
2817 // must return success after vm_map_copyout() succeeds
2818 // and mig will copy out objects on success
2819 *notification = NULL;
2820 *result = internal_io_service_add_notification( master_port, notification_type,
2821 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2822 vm_deallocate( kernel_map, data, matchingCnt );
2823 }
2824
2825 return kr;
2826 }
2827
2828 /* Routine io_service_add_notification_ool */
2829 kern_return_t
2830 is_io_service_add_notification_ool(
2831 mach_port_t master_port,
2832 io_name_t notification_type,
2833 io_buf_ptr_t matching,
2834 mach_msg_type_number_t matchingCnt,
2835 mach_port_t wake_port,
2836 io_async_ref_t reference,
2837 mach_msg_type_number_t referenceCnt,
2838 kern_return_t *result,
2839 io_object_t *notification )
2840 {
2841 io_async_ref_t zreference;
2842
2843 if (referenceCnt > ASYNC_REF_COUNT) {
2844 return kIOReturnBadArgument;
2845 }
2846 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2847 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2848
2849 return internal_io_service_add_notification_ool(master_port, notification_type,
2850 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2851 false, result, notification);
2852 }
2853
2854 /* Routine io_service_add_notification_ool_64 */
2855 kern_return_t
2856 is_io_service_add_notification_ool_64(
2857 mach_port_t master_port,
2858 io_name_t notification_type,
2859 io_buf_ptr_t matching,
2860 mach_msg_type_number_t matchingCnt,
2861 mach_port_t wake_port,
2862 io_async_ref64_t reference,
2863 mach_msg_type_number_t referenceCnt,
2864 kern_return_t *result,
2865 io_object_t *notification )
2866 {
2867 io_async_ref64_t zreference;
2868
2869 if (referenceCnt > ASYNC_REF64_COUNT) {
2870 return kIOReturnBadArgument;
2871 }
2872 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2873 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2874
2875 return internal_io_service_add_notification_ool(master_port, notification_type,
2876 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2877 true, result, notification);
2878 }
2879
2880 /* Routine io_service_add_notification_old */
2881 kern_return_t
2882 is_io_service_add_notification_old(
2883 mach_port_t master_port,
2884 io_name_t notification_type,
2885 io_string_t matching,
2886 mach_port_t port,
2887 // for binary compatibility reasons, this must be natural_t for ILP32
2888 natural_t ref,
2889 io_object_t * notification )
2890 {
2891 return is_io_service_add_notification( master_port, notification_type,
2892 matching, port, &ref, 1, notification );
2893 }
2894
2895
2896 static kern_return_t
2897 internal_io_service_add_interest_notification(
2898 io_object_t _service,
2899 io_name_t type_of_interest,
2900 mach_port_t port,
2901 void * reference,
2902 vm_size_t referenceSize,
2903 bool client64,
2904 io_object_t * notification )
2905 {
2906 IOServiceMessageUserNotification * userNotify = NULL;
2907 IONotifier * notify = NULL;
2908 const OSSymbol * sym;
2909 IOReturn err;
2910
2911 CHECK( IOService, _service, service );
2912
2913 err = kIOReturnNoResources;
2914 if ((sym = OSSymbol::withCString( type_of_interest ))) {
2915 do {
2916 userNotify = new IOServiceMessageUserNotification;
2917
2918 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2919 reference, referenceSize,
2920 kIOUserNotifyMaxMessageSize,
2921 client64 )) {
2922 userNotify->release();
2923 userNotify = NULL;
2924 }
2925 if (!userNotify) {
2926 continue;
2927 }
2928
2929 notify = service->registerInterest( sym,
2930 &userNotify->_handler, userNotify );
2931 if (notify) {
2932 *notification = userNotify;
2933 userNotify->setNotification( notify );
2934 err = kIOReturnSuccess;
2935 } else {
2936 err = kIOReturnUnsupported;
2937 }
2938
2939 sym->release();
2940 } while (false);
2941 }
2942
2943 if ((kIOReturnSuccess != err) && userNotify) {
2944 userNotify->invalidatePort();
2945 userNotify->release();
2946 userNotify = NULL;
2947 }
2948
2949 return err;
2950 }
2951
2952 /* Routine io_service_add_message_notification */
2953 kern_return_t
2954 is_io_service_add_interest_notification(
2955 io_object_t service,
2956 io_name_t type_of_interest,
2957 mach_port_t port,
2958 io_async_ref_t reference,
2959 mach_msg_type_number_t referenceCnt,
2960 io_object_t * notification )
2961 {
2962 io_async_ref_t zreference;
2963
2964 if (referenceCnt > ASYNC_REF_COUNT) {
2965 return kIOReturnBadArgument;
2966 }
2967 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2968 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2969
2970 return internal_io_service_add_interest_notification(service, type_of_interest,
2971 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
2972 }
2973
2974 /* Routine io_service_add_interest_notification_64 */
2975 kern_return_t
2976 is_io_service_add_interest_notification_64(
2977 io_object_t service,
2978 io_name_t type_of_interest,
2979 mach_port_t wake_port,
2980 io_async_ref64_t reference,
2981 mach_msg_type_number_t referenceCnt,
2982 io_object_t *notification )
2983 {
2984 io_async_ref64_t zreference;
2985
2986 if (referenceCnt > ASYNC_REF64_COUNT) {
2987 return kIOReturnBadArgument;
2988 }
2989 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2990 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2991
2992 return internal_io_service_add_interest_notification(service, type_of_interest,
2993 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
2994 }
2995
2996
2997 /* Routine io_service_acknowledge_notification */
2998 kern_return_t
2999 is_io_service_acknowledge_notification(
3000 io_object_t _service,
3001 natural_t notify_ref,
3002 natural_t response )
3003 {
3004 CHECK( IOService, _service, service );
3005
3006 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3007 (IOOptionBits) response );
3008 }
3009
3010 /* Routine io_connect_get_semaphore */
3011 kern_return_t
3012 is_io_connect_get_notification_semaphore(
3013 io_connect_t connection,
3014 natural_t notification_type,
3015 semaphore_t *semaphore )
3016 {
3017 CHECK( IOUserClient, connection, client );
3018
3019 IOStatisticsClientCall();
3020 return client->getNotificationSemaphore((UInt32) notification_type,
3021 semaphore );
3022 }
3023
3024 /* Routine io_registry_get_root_entry */
3025 kern_return_t
3026 is_io_registry_get_root_entry(
3027 mach_port_t master_port,
3028 io_object_t *root )
3029 {
3030 IORegistryEntry * entry;
3031
3032 if (master_port != master_device_port) {
3033 return kIOReturnNotPrivileged;
3034 }
3035
3036 entry = IORegistryEntry::getRegistryRoot();
3037 if (entry) {
3038 entry->retain();
3039 }
3040 *root = entry;
3041
3042 return kIOReturnSuccess;
3043 }
3044
3045 /* Routine io_registry_create_iterator */
3046 kern_return_t
3047 is_io_registry_create_iterator(
3048 mach_port_t master_port,
3049 io_name_t plane,
3050 uint32_t options,
3051 io_object_t *iterator )
3052 {
3053 if (master_port != master_device_port) {
3054 return kIOReturnNotPrivileged;
3055 }
3056
3057 *iterator = IOUserIterator::withIterator(
3058 IORegistryIterator::iterateOver(
3059 IORegistryEntry::getPlane( plane ), options ));
3060
3061 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3062 }
3063
3064 /* Routine io_registry_entry_create_iterator */
3065 kern_return_t
3066 is_io_registry_entry_create_iterator(
3067 io_object_t registry_entry,
3068 io_name_t plane,
3069 uint32_t options,
3070 io_object_t *iterator )
3071 {
3072 CHECK( IORegistryEntry, registry_entry, entry );
3073
3074 *iterator = IOUserIterator::withIterator(
3075 IORegistryIterator::iterateOver( entry,
3076 IORegistryEntry::getPlane( plane ), options ));
3077
3078 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3079 }
3080
3081 /* Routine io_registry_iterator_enter */
3082 kern_return_t
3083 is_io_registry_iterator_enter_entry(
3084 io_object_t iterator )
3085 {
3086 CHECKLOCKED( IORegistryIterator, iterator, iter );
3087
3088 IOLockLock(oIter->lock);
3089 iter->enterEntry();
3090 IOLockUnlock(oIter->lock);
3091
3092 return kIOReturnSuccess;
3093 }
3094
3095 /* Routine io_registry_iterator_exit */
3096 kern_return_t
3097 is_io_registry_iterator_exit_entry(
3098 io_object_t iterator )
3099 {
3100 bool didIt;
3101
3102 CHECKLOCKED( IORegistryIterator, iterator, iter );
3103
3104 IOLockLock(oIter->lock);
3105 didIt = iter->exitEntry();
3106 IOLockUnlock(oIter->lock);
3107
3108 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3109 }
3110
3111 /* Routine io_registry_entry_from_path */
3112 kern_return_t
3113 is_io_registry_entry_from_path(
3114 mach_port_t master_port,
3115 io_string_t path,
3116 io_object_t *registry_entry )
3117 {
3118 IORegistryEntry * entry;
3119
3120 if (master_port != master_device_port) {
3121 return kIOReturnNotPrivileged;
3122 }
3123
3124 entry = IORegistryEntry::fromPath( path );
3125
3126 *registry_entry = entry;
3127
3128 return kIOReturnSuccess;
3129 }
3130
3131
3132 /* Routine io_registry_entry_from_path */
3133 kern_return_t
3134 is_io_registry_entry_from_path_ool(
3135 mach_port_t master_port,
3136 io_string_inband_t path,
3137 io_buf_ptr_t path_ool,
3138 mach_msg_type_number_t path_oolCnt,
3139 kern_return_t *result,
3140 io_object_t *registry_entry)
3141 {
3142 IORegistryEntry * entry;
3143 vm_map_offset_t map_data;
3144 const char * cpath;
3145 IOReturn res;
3146 kern_return_t err;
3147
3148 if (master_port != master_device_port) {
3149 return kIOReturnNotPrivileged;
3150 }
3151
3152 map_data = 0;
3153 entry = NULL;
3154 res = err = KERN_SUCCESS;
3155 if (path[0]) {
3156 cpath = path;
3157 } else {
3158 if (!path_oolCnt) {
3159 return kIOReturnBadArgument;
3160 }
3161 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3162 return kIOReturnMessageTooLarge;
3163 }
3164
3165 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3166 if (KERN_SUCCESS == err) {
3167 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3168 cpath = CAST_DOWN(const char *, map_data);
3169 if (cpath[path_oolCnt - 1]) {
3170 res = kIOReturnBadArgument;
3171 }
3172 }
3173 }
3174
3175 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3176 entry = IORegistryEntry::fromPath(cpath);
3177 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3178 }
3179
3180 if (map_data) {
3181 vm_deallocate(kernel_map, map_data, path_oolCnt);
3182 }
3183
3184 if (KERN_SUCCESS != err) {
3185 res = err;
3186 }
3187 *registry_entry = entry;
3188 *result = res;
3189
3190 return err;
3191 }
3192
3193
3194 /* Routine io_registry_entry_in_plane */
3195 kern_return_t
3196 is_io_registry_entry_in_plane(
3197 io_object_t registry_entry,
3198 io_name_t plane,
3199 boolean_t *inPlane )
3200 {
3201 CHECK( IORegistryEntry, registry_entry, entry );
3202
3203 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3204
3205 return kIOReturnSuccess;
3206 }
3207
3208
3209 /* Routine io_registry_entry_get_path */
3210 kern_return_t
3211 is_io_registry_entry_get_path(
3212 io_object_t registry_entry,
3213 io_name_t plane,
3214 io_string_t path )
3215 {
3216 int length;
3217 CHECK( IORegistryEntry, registry_entry, entry );
3218
3219 length = sizeof(io_string_t);
3220 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3221 return kIOReturnSuccess;
3222 } else {
3223 return kIOReturnBadArgument;
3224 }
3225 }
3226
3227 /* Routine io_registry_entry_get_path */
3228 kern_return_t
3229 is_io_registry_entry_get_path_ool(
3230 io_object_t registry_entry,
3231 io_name_t plane,
3232 io_string_inband_t path,
3233 io_buf_ptr_t *path_ool,
3234 mach_msg_type_number_t *path_oolCnt)
3235 {
3236 enum { kMaxPath = 16384 };
3237 IOReturn err;
3238 int length;
3239 char * buf;
3240
3241 CHECK( IORegistryEntry, registry_entry, entry );
3242
3243 *path_ool = NULL;
3244 *path_oolCnt = 0;
3245 length = sizeof(io_string_inband_t);
3246 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3247 err = kIOReturnSuccess;
3248 } else {
3249 length = kMaxPath;
3250 buf = IONew(char, length);
3251 if (!buf) {
3252 err = kIOReturnNoMemory;
3253 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3254 err = kIOReturnError;
3255 } else {
3256 *path_oolCnt = length;
3257 err = copyoutkdata(buf, length, path_ool);
3258 }
3259 if (buf) {
3260 IODelete(buf, char, kMaxPath);
3261 }
3262 }
3263
3264 return err;
3265 }
3266
3267
3268 /* Routine io_registry_entry_get_name */
3269 kern_return_t
3270 is_io_registry_entry_get_name(
3271 io_object_t registry_entry,
3272 io_name_t name )
3273 {
3274 CHECK( IORegistryEntry, registry_entry, entry );
3275
3276 strncpy( name, entry->getName(), sizeof(io_name_t));
3277
3278 return kIOReturnSuccess;
3279 }
3280
3281 /* Routine io_registry_entry_get_name_in_plane */
3282 kern_return_t
3283 is_io_registry_entry_get_name_in_plane(
3284 io_object_t registry_entry,
3285 io_name_t planeName,
3286 io_name_t name )
3287 {
3288 const IORegistryPlane * plane;
3289 CHECK( IORegistryEntry, registry_entry, entry );
3290
3291 if (planeName[0]) {
3292 plane = IORegistryEntry::getPlane( planeName );
3293 } else {
3294 plane = NULL;
3295 }
3296
3297 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3298
3299 return kIOReturnSuccess;
3300 }
3301
3302 /* Routine io_registry_entry_get_location_in_plane */
3303 kern_return_t
3304 is_io_registry_entry_get_location_in_plane(
3305 io_object_t registry_entry,
3306 io_name_t planeName,
3307 io_name_t location )
3308 {
3309 const IORegistryPlane * plane;
3310 CHECK( IORegistryEntry, registry_entry, entry );
3311
3312 if (planeName[0]) {
3313 plane = IORegistryEntry::getPlane( planeName );
3314 } else {
3315 plane = NULL;
3316 }
3317
3318 const char * cstr = entry->getLocation( plane );
3319
3320 if (cstr) {
3321 strncpy( location, cstr, sizeof(io_name_t));
3322 return kIOReturnSuccess;
3323 } else {
3324 return kIOReturnNotFound;
3325 }
3326 }
3327
3328 /* Routine io_registry_entry_get_registry_entry_id */
3329 kern_return_t
3330 is_io_registry_entry_get_registry_entry_id(
3331 io_object_t registry_entry,
3332 uint64_t *entry_id )
3333 {
3334 CHECK( IORegistryEntry, registry_entry, entry );
3335
3336 *entry_id = entry->getRegistryEntryID();
3337
3338 return kIOReturnSuccess;
3339 }
3340
3341 /* Routine io_registry_entry_get_property */
3342 kern_return_t
3343 is_io_registry_entry_get_property_bytes(
3344 io_object_t registry_entry,
3345 io_name_t property_name,
3346 io_struct_inband_t buf,
3347 mach_msg_type_number_t *dataCnt )
3348 {
3349 OSObject * obj;
3350 OSData * data;
3351 OSString * str;
3352 OSBoolean * boo;
3353 OSNumber * off;
3354 UInt64 offsetBytes;
3355 unsigned int len = 0;
3356 const void * bytes = NULL;
3357 IOReturn ret = kIOReturnSuccess;
3358
3359 CHECK( IORegistryEntry, registry_entry, entry );
3360
3361 #if CONFIG_MACF
3362 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3363 return kIOReturnNotPermitted;
3364 }
3365 #endif
3366
3367 obj = entry->copyProperty(property_name);
3368 if (!obj) {
3369 return kIOReturnNoResources;
3370 }
3371
3372 // One day OSData will be a common container base class
3373 // until then...
3374 if ((data = OSDynamicCast( OSData, obj ))) {
3375 len = data->getLength();
3376 bytes = data->getBytesNoCopy();
3377 if (!data->isSerializable()) {
3378 len = 0;
3379 }
3380 } else if ((str = OSDynamicCast( OSString, obj ))) {
3381 len = str->getLength() + 1;
3382 bytes = str->getCStringNoCopy();
3383 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3384 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3385 bytes = boo->isTrue() ? "Yes" : "No";
3386 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3387 offsetBytes = off->unsigned64BitValue();
3388 len = off->numberOfBytes();
3389 if (len > sizeof(offsetBytes)) {
3390 len = sizeof(offsetBytes);
3391 }
3392 bytes = &offsetBytes;
3393 #ifdef __BIG_ENDIAN__
3394 bytes = (const void *)
3395 (((UInt32) bytes) + (sizeof(UInt64) - len));
3396 #endif
3397 } else {
3398 ret = kIOReturnBadArgument;
3399 }
3400
3401 if (bytes) {
3402 if (*dataCnt < len) {
3403 ret = kIOReturnIPCError;
3404 } else {
3405 *dataCnt = len;
3406 bcopy( bytes, buf, len );
3407 }
3408 }
3409 obj->release();
3410
3411 return ret;
3412 }
3413
3414
3415 /* Routine io_registry_entry_get_property */
3416 kern_return_t
3417 is_io_registry_entry_get_property(
3418 io_object_t registry_entry,
3419 io_name_t property_name,
3420 io_buf_ptr_t *properties,
3421 mach_msg_type_number_t *propertiesCnt )
3422 {
3423 kern_return_t err;
3424 vm_size_t len;
3425 OSObject * obj;
3426
3427 CHECK( IORegistryEntry, registry_entry, entry );
3428
3429 #if CONFIG_MACF
3430 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3431 return kIOReturnNotPermitted;
3432 }
3433 #endif
3434
3435 obj = entry->copyProperty(property_name);
3436 if (!obj) {
3437 return kIOReturnNotFound;
3438 }
3439
3440 OSSerialize * s = OSSerialize::withCapacity(4096);
3441 if (!s) {
3442 obj->release();
3443 return kIOReturnNoMemory;
3444 }
3445
3446 if (obj->serialize( s )) {
3447 len = s->getLength();
3448 *propertiesCnt = len;
3449 err = copyoutkdata( s->text(), len, properties );
3450 } else {
3451 err = kIOReturnUnsupported;
3452 }
3453
3454 s->release();
3455 obj->release();
3456
3457 return err;
3458 }
3459
3460 /* Routine io_registry_entry_get_property_recursively */
3461 kern_return_t
3462 is_io_registry_entry_get_property_recursively(
3463 io_object_t registry_entry,
3464 io_name_t plane,
3465 io_name_t property_name,
3466 uint32_t options,
3467 io_buf_ptr_t *properties,
3468 mach_msg_type_number_t *propertiesCnt )
3469 {
3470 kern_return_t err;
3471 vm_size_t len;
3472 OSObject * obj;
3473
3474 CHECK( IORegistryEntry, registry_entry, entry );
3475
3476 #if CONFIG_MACF
3477 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3478 return kIOReturnNotPermitted;
3479 }
3480 #endif
3481
3482 obj = entry->copyProperty( property_name,
3483 IORegistryEntry::getPlane( plane ), options );
3484 if (!obj) {
3485 return kIOReturnNotFound;
3486 }
3487
3488 OSSerialize * s = OSSerialize::withCapacity(4096);
3489 if (!s) {
3490 obj->release();
3491 return kIOReturnNoMemory;
3492 }
3493
3494 if (obj->serialize( s )) {
3495 len = s->getLength();
3496 *propertiesCnt = len;
3497 err = copyoutkdata( s->text(), len, properties );
3498 } else {
3499 err = kIOReturnUnsupported;
3500 }
3501
3502 s->release();
3503 obj->release();
3504
3505 return err;
3506 }
3507
3508 /* Routine io_registry_entry_get_properties */
3509 kern_return_t
3510 is_io_registry_entry_get_properties(
3511 io_object_t registry_entry,
3512 io_buf_ptr_t *properties,
3513 mach_msg_type_number_t *propertiesCnt )
3514 {
3515 return kIOReturnUnsupported;
3516 }
3517
3518 #if CONFIG_MACF
3519
3520 struct GetPropertiesEditorRef {
3521 kauth_cred_t cred;
3522 IORegistryEntry * entry;
3523 OSCollection * root;
3524 };
3525
3526 static const OSMetaClassBase *
3527 GetPropertiesEditor(void * reference,
3528 OSSerialize * s,
3529 OSCollection * container,
3530 const OSSymbol * name,
3531 const OSMetaClassBase * value)
3532 {
3533 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3534
3535 if (!ref->root) {
3536 ref->root = container;
3537 }
3538 if (ref->root == container) {
3539 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3540 value = NULL;
3541 }
3542 }
3543 if (value) {
3544 value->retain();
3545 }
3546 return value;
3547 }
3548
3549 #endif /* CONFIG_MACF */
3550
3551 /* Routine io_registry_entry_get_properties_bin_buf */
3552 kern_return_t
3553 is_io_registry_entry_get_properties_bin_buf(
3554 io_object_t registry_entry,
3555 mach_vm_address_t buf,
3556 mach_vm_size_t *bufsize,
3557 io_buf_ptr_t *properties,
3558 mach_msg_type_number_t *propertiesCnt)
3559 {
3560 kern_return_t err = kIOReturnSuccess;
3561 vm_size_t len;
3562 OSSerialize * s;
3563 OSSerialize::Editor editor = NULL;
3564 void * editRef = NULL;
3565
3566 CHECK(IORegistryEntry, registry_entry, entry);
3567
3568 #if CONFIG_MACF
3569 GetPropertiesEditorRef ref;
3570 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3571 editor = &GetPropertiesEditor;
3572 editRef = &ref;
3573 ref.cred = kauth_cred_get();
3574 ref.entry = entry;
3575 ref.root = NULL;
3576 }
3577 #endif
3578
3579 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3580 if (!s) {
3581 return kIOReturnNoMemory;
3582 }
3583
3584 if (!entry->serializeProperties(s)) {
3585 err = kIOReturnUnsupported;
3586 }
3587
3588 if (kIOReturnSuccess == err) {
3589 len = s->getLength();
3590 if (buf && bufsize && len <= *bufsize) {
3591 *bufsize = len;
3592 *propertiesCnt = 0;
3593 *properties = nullptr;
3594 if (copyout(s->text(), buf, len)) {
3595 err = kIOReturnVMError;
3596 } else {
3597 err = kIOReturnSuccess;
3598 }
3599 } else {
3600 if (bufsize) {
3601 *bufsize = 0;
3602 }
3603 *propertiesCnt = len;
3604 err = copyoutkdata( s->text(), len, properties );
3605 }
3606 }
3607 s->release();
3608
3609 return err;
3610 }
3611
3612 /* Routine io_registry_entry_get_properties_bin */
3613 kern_return_t
3614 is_io_registry_entry_get_properties_bin(
3615 io_object_t registry_entry,
3616 io_buf_ptr_t *properties,
3617 mach_msg_type_number_t *propertiesCnt)
3618 {
3619 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3620 0, NULL, properties, propertiesCnt);
3621 }
3622
3623 /* Routine io_registry_entry_get_property_bin_buf */
3624 kern_return_t
3625 is_io_registry_entry_get_property_bin_buf(
3626 io_object_t registry_entry,
3627 io_name_t plane,
3628 io_name_t property_name,
3629 uint32_t options,
3630 mach_vm_address_t buf,
3631 mach_vm_size_t *bufsize,
3632 io_buf_ptr_t *properties,
3633 mach_msg_type_number_t *propertiesCnt )
3634 {
3635 kern_return_t err;
3636 vm_size_t len;
3637 OSObject * obj;
3638 const OSSymbol * sym;
3639
3640 CHECK( IORegistryEntry, registry_entry, entry );
3641
3642 #if CONFIG_MACF
3643 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3644 return kIOReturnNotPermitted;
3645 }
3646 #endif
3647
3648 sym = OSSymbol::withCString(property_name);
3649 if (!sym) {
3650 return kIOReturnNoMemory;
3651 }
3652
3653 if (gIORegistryEntryPropertyKeysKey == sym) {
3654 obj = entry->copyPropertyKeys();
3655 } else {
3656 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3657 obj = entry->copyProperty(property_name,
3658 IORegistryEntry::getPlane(plane), options );
3659 } else {
3660 obj = entry->copyProperty(property_name);
3661 }
3662 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3663 entry->removeProperty(sym);
3664 }
3665 }
3666
3667 sym->release();
3668 if (!obj) {
3669 return kIOReturnNotFound;
3670 }
3671
3672 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3673 if (!s) {
3674 obj->release();
3675 return kIOReturnNoMemory;
3676 }
3677
3678 if (obj->serialize( s )) {
3679 len = s->getLength();
3680 if (buf && bufsize && len <= *bufsize) {
3681 *bufsize = len;
3682 *propertiesCnt = 0;
3683 *properties = nullptr;
3684 if (copyout(s->text(), buf, len)) {
3685 err = kIOReturnVMError;
3686 } else {
3687 err = kIOReturnSuccess;
3688 }
3689 } else {
3690 if (bufsize) {
3691 *bufsize = 0;
3692 }
3693 *propertiesCnt = len;
3694 err = copyoutkdata( s->text(), len, properties );
3695 }
3696 } else {
3697 err = kIOReturnUnsupported;
3698 }
3699
3700 s->release();
3701 obj->release();
3702
3703 return err;
3704 }
3705
3706 /* Routine io_registry_entry_get_property_bin */
3707 kern_return_t
3708 is_io_registry_entry_get_property_bin(
3709 io_object_t registry_entry,
3710 io_name_t plane,
3711 io_name_t property_name,
3712 uint32_t options,
3713 io_buf_ptr_t *properties,
3714 mach_msg_type_number_t *propertiesCnt )
3715 {
3716 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3717 property_name, options, 0, NULL, properties, propertiesCnt);
3718 }
3719
3720
3721 /* Routine io_registry_entry_set_properties */
3722 kern_return_t
3723 is_io_registry_entry_set_properties
3724 (
3725 io_object_t registry_entry,
3726 io_buf_ptr_t properties,
3727 mach_msg_type_number_t propertiesCnt,
3728 kern_return_t * result)
3729 {
3730 OSObject * obj;
3731 kern_return_t err;
3732 IOReturn res;
3733 vm_offset_t data;
3734 vm_map_offset_t map_data;
3735
3736 CHECK( IORegistryEntry, registry_entry, entry );
3737
3738 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3739 return kIOReturnMessageTooLarge;
3740 }
3741
3742 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3743 data = CAST_DOWN(vm_offset_t, map_data);
3744
3745 if (KERN_SUCCESS == err) {
3746 FAKE_STACK_FRAME(entry->getMetaClass());
3747
3748 // must return success after vm_map_copyout() succeeds
3749 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3750 vm_deallocate( kernel_map, data, propertiesCnt );
3751
3752 if (!obj) {
3753 res = kIOReturnBadArgument;
3754 }
3755 #if CONFIG_MACF
3756 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3757 registry_entry, obj)) {
3758 res = kIOReturnNotPermitted;
3759 }
3760 #endif
3761 else {
3762 res = entry->setProperties( obj );
3763 }
3764
3765 if (obj) {
3766 obj->release();
3767 }
3768
3769 FAKE_STACK_FRAME_END();
3770 } else {
3771 res = err;
3772 }
3773
3774 *result = res;
3775 return err;
3776 }
3777
3778 /* Routine io_registry_entry_get_child_iterator */
3779 kern_return_t
3780 is_io_registry_entry_get_child_iterator(
3781 io_object_t registry_entry,
3782 io_name_t plane,
3783 io_object_t *iterator )
3784 {
3785 CHECK( IORegistryEntry, registry_entry, entry );
3786
3787 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
3788 IORegistryEntry::getPlane( plane )));
3789
3790 return kIOReturnSuccess;
3791 }
3792
3793 /* Routine io_registry_entry_get_parent_iterator */
3794 kern_return_t
3795 is_io_registry_entry_get_parent_iterator(
3796 io_object_t registry_entry,
3797 io_name_t plane,
3798 io_object_t *iterator)
3799 {
3800 CHECK( IORegistryEntry, registry_entry, entry );
3801
3802 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
3803 IORegistryEntry::getPlane( plane )));
3804
3805 return kIOReturnSuccess;
3806 }
3807
3808 /* Routine io_service_get_busy_state */
3809 kern_return_t
3810 is_io_service_get_busy_state(
3811 io_object_t _service,
3812 uint32_t *busyState )
3813 {
3814 CHECK( IOService, _service, service );
3815
3816 *busyState = service->getBusyState();
3817
3818 return kIOReturnSuccess;
3819 }
3820
3821 /* Routine io_service_get_state */
3822 kern_return_t
3823 is_io_service_get_state(
3824 io_object_t _service,
3825 uint64_t *state,
3826 uint32_t *busy_state,
3827 uint64_t *accumulated_busy_time )
3828 {
3829 CHECK( IOService, _service, service );
3830
3831 *state = service->getState();
3832 *busy_state = service->getBusyState();
3833 *accumulated_busy_time = service->getAccumulatedBusyTime();
3834
3835 return kIOReturnSuccess;
3836 }
3837
3838 /* Routine io_service_wait_quiet */
3839 kern_return_t
3840 is_io_service_wait_quiet(
3841 io_object_t _service,
3842 mach_timespec_t wait_time )
3843 {
3844 uint64_t timeoutNS;
3845
3846 CHECK( IOService, _service, service );
3847
3848 timeoutNS = wait_time.tv_sec;
3849 timeoutNS *= kSecondScale;
3850 timeoutNS += wait_time.tv_nsec;
3851
3852 return service->waitQuiet(timeoutNS);
3853 }
3854
3855 /* Routine io_service_request_probe */
3856 kern_return_t
3857 is_io_service_request_probe(
3858 io_object_t _service,
3859 uint32_t options )
3860 {
3861 CHECK( IOService, _service, service );
3862
3863 return service->requestProbe( options );
3864 }
3865
3866 /* Routine io_service_get_authorization_id */
3867 kern_return_t
3868 is_io_service_get_authorization_id(
3869 io_object_t _service,
3870 uint64_t *authorization_id )
3871 {
3872 kern_return_t kr;
3873
3874 CHECK( IOService, _service, service );
3875
3876 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
3877 kIOClientPrivilegeAdministrator );
3878 if (kIOReturnSuccess != kr) {
3879 return kr;
3880 }
3881
3882 *authorization_id = service->getAuthorizationID();
3883
3884 return kr;
3885 }
3886
3887 /* Routine io_service_set_authorization_id */
3888 kern_return_t
3889 is_io_service_set_authorization_id(
3890 io_object_t _service,
3891 uint64_t authorization_id )
3892 {
3893 CHECK( IOService, _service, service );
3894
3895 return service->setAuthorizationID( authorization_id );
3896 }
3897
3898 /* Routine io_service_open_ndr */
3899 kern_return_t
3900 is_io_service_open_extended(
3901 io_object_t _service,
3902 task_t owningTask,
3903 uint32_t connect_type,
3904 NDR_record_t ndr,
3905 io_buf_ptr_t properties,
3906 mach_msg_type_number_t propertiesCnt,
3907 kern_return_t * result,
3908 io_object_t *connection )
3909 {
3910 IOUserClient * client = NULL;
3911 kern_return_t err = KERN_SUCCESS;
3912 IOReturn res = kIOReturnSuccess;
3913 OSDictionary * propertiesDict = NULL;
3914 bool crossEndian;
3915 bool disallowAccess;
3916
3917 CHECK( IOService, _service, service );
3918
3919 if (!owningTask) {
3920 return kIOReturnBadArgument;
3921 }
3922 assert(owningTask == current_task());
3923 if (owningTask != current_task()) {
3924 return kIOReturnBadArgument;
3925 }
3926
3927 do{
3928 if (properties) {
3929 return kIOReturnUnsupported;
3930 }
3931 #if 0
3932 {
3933 OSObject * obj;
3934 vm_offset_t data;
3935 vm_map_offset_t map_data;
3936
3937 if (propertiesCnt > sizeof(io_struct_inband_t)) {
3938 return kIOReturnMessageTooLarge;
3939 }
3940
3941 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3942 res = err;
3943 data = CAST_DOWN(vm_offset_t, map_data);
3944 if (KERN_SUCCESS == err) {
3945 // must return success after vm_map_copyout() succeeds
3946 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3947 vm_deallocate( kernel_map, data, propertiesCnt );
3948 propertiesDict = OSDynamicCast(OSDictionary, obj);
3949 if (!propertiesDict) {
3950 res = kIOReturnBadArgument;
3951 if (obj) {
3952 obj->release();
3953 }
3954 }
3955 }
3956 if (kIOReturnSuccess != res) {
3957 break;
3958 }
3959 }
3960 #endif
3961 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3962 if (crossEndian) {
3963 if (!propertiesDict) {
3964 propertiesDict = OSDictionary::withCapacity(4);
3965 }
3966 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3967 if (data) {
3968 if (propertiesDict) {
3969 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3970 }
3971 data->release();
3972 }
3973 }
3974
3975 res = service->newUserClient( owningTask, (void *) owningTask,
3976 connect_type, propertiesDict, &client );
3977
3978 if (propertiesDict) {
3979 propertiesDict->release();
3980 }
3981
3982 if (res == kIOReturnSuccess) {
3983 assert( OSDynamicCast(IOUserClient, client));
3984
3985 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
3986 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
3987 client->closed = false;
3988 client->lock = IOLockAlloc();
3989
3990 disallowAccess = (crossEndian
3991 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3992 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3993 if (disallowAccess) {
3994 res = kIOReturnUnsupported;
3995 }
3996 #if CONFIG_MACF
3997 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
3998 res = kIOReturnNotPermitted;
3999 }
4000 #endif
4001
4002 if (kIOReturnSuccess == res) {
4003 res = client->registerOwner(owningTask);
4004 }
4005
4006 if (kIOReturnSuccess != res) {
4007 IOStatisticsClientCall();
4008 client->clientClose();
4009 client->release();
4010 client = NULL;
4011 break;
4012 }
4013 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4014 if (creatorName) {
4015 client->setProperty(kIOUserClientCreatorKey, creatorName);
4016 creatorName->release();
4017 }
4018 client->setTerminateDefer(service, false);
4019 }
4020 }while (false);
4021
4022 *connection = client;
4023 *result = res;
4024
4025 return err;
4026 }
4027
4028 /* Routine io_service_close */
4029 kern_return_t
4030 is_io_service_close(
4031 io_object_t connection )
4032 {
4033 OSSet * mappings;
4034 if ((mappings = OSDynamicCast(OSSet, connection))) {
4035 return kIOReturnSuccess;
4036 }
4037
4038 CHECK( IOUserClient, connection, client );
4039
4040 IOStatisticsClientCall();
4041
4042 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4043 IOLockLock(client->lock);
4044 client->clientClose();
4045 IOLockUnlock(client->lock);
4046 } else {
4047 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4048 client->getRegistryEntryID(), client->getName());
4049 }
4050
4051 return kIOReturnSuccess;
4052 }
4053
4054 /* Routine io_connect_get_service */
4055 kern_return_t
4056 is_io_connect_get_service(
4057 io_object_t connection,
4058 io_object_t *service )
4059 {
4060 IOService * theService;
4061
4062 CHECK( IOUserClient, connection, client );
4063
4064 theService = client->getService();
4065 if (theService) {
4066 theService->retain();
4067 }
4068
4069 *service = theService;
4070
4071 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4072 }
4073
4074 /* Routine io_connect_set_notification_port */
4075 kern_return_t
4076 is_io_connect_set_notification_port(
4077 io_object_t connection,
4078 uint32_t notification_type,
4079 mach_port_t port,
4080 uint32_t reference)
4081 {
4082 kern_return_t ret;
4083 CHECK( IOUserClient, connection, client );
4084
4085 IOStatisticsClientCall();
4086 IOLockLock(client->lock);
4087 ret = client->registerNotificationPort( port, notification_type,
4088 (io_user_reference_t) reference );
4089 IOLockUnlock(client->lock);
4090 return ret;
4091 }
4092
4093 /* Routine io_connect_set_notification_port */
4094 kern_return_t
4095 is_io_connect_set_notification_port_64(
4096 io_object_t connection,
4097 uint32_t notification_type,
4098 mach_port_t port,
4099 io_user_reference_t reference)
4100 {
4101 kern_return_t ret;
4102 CHECK( IOUserClient, connection, client );
4103
4104 IOStatisticsClientCall();
4105 IOLockLock(client->lock);
4106 ret = client->registerNotificationPort( port, notification_type,
4107 reference );
4108 IOLockUnlock(client->lock);
4109 return ret;
4110 }
4111
4112 /* Routine io_connect_map_memory_into_task */
4113 kern_return_t
4114 is_io_connect_map_memory_into_task
4115 (
4116 io_connect_t connection,
4117 uint32_t memory_type,
4118 task_t into_task,
4119 mach_vm_address_t *address,
4120 mach_vm_size_t *size,
4121 uint32_t flags
4122 )
4123 {
4124 IOReturn err;
4125 IOMemoryMap * map;
4126
4127 CHECK( IOUserClient, connection, client );
4128
4129 if (!into_task) {
4130 return kIOReturnBadArgument;
4131 }
4132
4133 IOStatisticsClientCall();
4134 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4135
4136 if (map) {
4137 *address = map->getAddress();
4138 if (size) {
4139 *size = map->getSize();
4140 }
4141
4142 if (client->sharedInstance
4143 || (into_task != current_task())) {
4144 // push a name out to the task owning the map,
4145 // so we can clean up maps
4146 mach_port_name_t name __unused =
4147 IOMachPort::makeSendRightForTask(
4148 into_task, map, IKOT_IOKIT_OBJECT );
4149 map->release();
4150 } else {
4151 // keep it with the user client
4152 IOLockLock( gIOObjectPortLock);
4153 if (NULL == client->mappings) {
4154 client->mappings = OSSet::withCapacity(2);
4155 }
4156 if (client->mappings) {
4157 client->mappings->setObject( map);
4158 }
4159 IOLockUnlock( gIOObjectPortLock);
4160 map->release();
4161 }
4162 err = kIOReturnSuccess;
4163 } else {
4164 err = kIOReturnBadArgument;
4165 }
4166
4167 return err;
4168 }
4169
4170 /* Routine is_io_connect_map_memory */
4171 kern_return_t
4172 is_io_connect_map_memory(
4173 io_object_t connect,
4174 uint32_t type,
4175 task_t task,
4176 uint32_t * mapAddr,
4177 uint32_t * mapSize,
4178 uint32_t flags )
4179 {
4180 IOReturn err;
4181 mach_vm_address_t address;
4182 mach_vm_size_t size;
4183
4184 address = SCALAR64(*mapAddr);
4185 size = SCALAR64(*mapSize);
4186
4187 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4188
4189 *mapAddr = SCALAR32(address);
4190 *mapSize = SCALAR32(size);
4191
4192 return err;
4193 }
4194 } /* extern "C" */
4195
4196 IOMemoryMap *
4197 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4198 {
4199 OSIterator * iter;
4200 IOMemoryMap * map = NULL;
4201
4202 IOLockLock(gIOObjectPortLock);
4203
4204 iter = OSCollectionIterator::withCollection(mappings);
4205 if (iter) {
4206 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4207 if (mem == map->getMemoryDescriptor()) {
4208 map->retain();
4209 mappings->removeObject(map);
4210 break;
4211 }
4212 }
4213 iter->release();
4214 }
4215
4216 IOLockUnlock(gIOObjectPortLock);
4217
4218 return map;
4219 }
4220
4221 extern "C" {
4222 /* Routine io_connect_unmap_memory_from_task */
4223 kern_return_t
4224 is_io_connect_unmap_memory_from_task
4225 (
4226 io_connect_t connection,
4227 uint32_t memory_type,
4228 task_t from_task,
4229 mach_vm_address_t address)
4230 {
4231 IOReturn err;
4232 IOOptionBits options = 0;
4233 IOMemoryDescriptor * memory = NULL;
4234 IOMemoryMap * map;
4235
4236 CHECK( IOUserClient, connection, client );
4237
4238 if (!from_task) {
4239 return kIOReturnBadArgument;
4240 }
4241
4242 IOStatisticsClientCall();
4243 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4244
4245 if (memory && (kIOReturnSuccess == err)) {
4246 options = (options & ~kIOMapUserOptionsMask)
4247 | kIOMapAnywhere | kIOMapReference;
4248
4249 map = memory->createMappingInTask( from_task, address, options );
4250 memory->release();
4251 if (map) {
4252 IOLockLock( gIOObjectPortLock);
4253 if (client->mappings) {
4254 client->mappings->removeObject( map);
4255 }
4256 IOLockUnlock( gIOObjectPortLock);
4257
4258 mach_port_name_t name = 0;
4259 if (from_task != current_task()) {
4260 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4261 map->release();
4262 }
4263
4264 if (name) {
4265 map->userClientUnmap();
4266 err = iokit_mod_send_right( from_task, name, -2 );
4267 err = kIOReturnSuccess;
4268 } else {
4269 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4270 }
4271 if (from_task == current_task()) {
4272 map->release();
4273 }
4274 } else {
4275 err = kIOReturnBadArgument;
4276 }
4277 }
4278
4279 return err;
4280 }
4281
4282 kern_return_t
4283 is_io_connect_unmap_memory(
4284 io_object_t connect,
4285 uint32_t type,
4286 task_t task,
4287 uint32_t mapAddr )
4288 {
4289 IOReturn err;
4290 mach_vm_address_t address;
4291
4292 address = SCALAR64(mapAddr);
4293
4294 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4295
4296 return err;
4297 }
4298
4299
4300 /* Routine io_connect_add_client */
4301 kern_return_t
4302 is_io_connect_add_client(
4303 io_object_t connection,
4304 io_object_t connect_to)
4305 {
4306 CHECK( IOUserClient, connection, client );
4307 CHECK( IOUserClient, connect_to, to );
4308
4309 IOStatisticsClientCall();
4310 return client->connectClient( to );
4311 }
4312
4313
4314 /* Routine io_connect_set_properties */
4315 kern_return_t
4316 is_io_connect_set_properties(
4317 io_object_t connection,
4318 io_buf_ptr_t properties,
4319 mach_msg_type_number_t propertiesCnt,
4320 kern_return_t * result)
4321 {
4322 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4323 }
4324
4325 /* Routine io_user_client_method */
4326 kern_return_t
4327 is_io_connect_method_var_output
4328 (
4329 io_connect_t connection,
4330 uint32_t selector,
4331 io_scalar_inband64_t scalar_input,
4332 mach_msg_type_number_t scalar_inputCnt,
4333 io_struct_inband_t inband_input,
4334 mach_msg_type_number_t inband_inputCnt,
4335 mach_vm_address_t ool_input,
4336 mach_vm_size_t ool_input_size,
4337 io_struct_inband_t inband_output,
4338 mach_msg_type_number_t *inband_outputCnt,
4339 io_scalar_inband64_t scalar_output,
4340 mach_msg_type_number_t *scalar_outputCnt,
4341 io_buf_ptr_t *var_output,
4342 mach_msg_type_number_t *var_outputCnt
4343 )
4344 {
4345 CHECK( IOUserClient, connection, client );
4346
4347 IOExternalMethodArguments args;
4348 IOReturn ret;
4349 IOMemoryDescriptor * inputMD = NULL;
4350 OSObject * structureVariableOutputData = NULL;
4351
4352 bzero(&args.__reserved[0], sizeof(args.__reserved));
4353 args.__reservedA = 0;
4354 args.version = kIOExternalMethodArgumentsCurrentVersion;
4355
4356 args.selector = selector;
4357
4358 args.asyncWakePort = MACH_PORT_NULL;
4359 args.asyncReference = NULL;
4360 args.asyncReferenceCount = 0;
4361 args.structureVariableOutputData = &structureVariableOutputData;
4362
4363 args.scalarInput = scalar_input;
4364 args.scalarInputCount = scalar_inputCnt;
4365 args.structureInput = inband_input;
4366 args.structureInputSize = inband_inputCnt;
4367
4368 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4369 return kIOReturnIPCError;
4370 }
4371
4372 if (ool_input) {
4373 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4374 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4375 current_task());
4376 }
4377
4378 args.structureInputDescriptor = inputMD;
4379
4380 args.scalarOutput = scalar_output;
4381 args.scalarOutputCount = *scalar_outputCnt;
4382 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4383 args.structureOutput = inband_output;
4384 args.structureOutputSize = *inband_outputCnt;
4385 args.structureOutputDescriptor = NULL;
4386 args.structureOutputDescriptorSize = 0;
4387
4388 IOStatisticsClientCall();
4389 ret = client->externalMethod( selector, &args );
4390
4391 *scalar_outputCnt = args.scalarOutputCount;
4392 *inband_outputCnt = args.structureOutputSize;
4393
4394 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4395 OSSerialize * serialize;
4396 OSData * data;
4397 vm_size_t len;
4398
4399 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4400 len = serialize->getLength();
4401 *var_outputCnt = len;
4402 ret = copyoutkdata(serialize->text(), len, var_output);
4403 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4404 len = data->getLength();
4405 *var_outputCnt = len;
4406 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4407 } else {
4408 ret = kIOReturnUnderrun;
4409 }
4410 }
4411
4412 if (inputMD) {
4413 inputMD->release();
4414 }
4415 if (structureVariableOutputData) {
4416 structureVariableOutputData->release();
4417 }
4418
4419 return ret;
4420 }
4421
4422 /* Routine io_user_client_method */
4423 kern_return_t
4424 is_io_connect_method
4425 (
4426 io_connect_t connection,
4427 uint32_t selector,
4428 io_scalar_inband64_t scalar_input,
4429 mach_msg_type_number_t scalar_inputCnt,
4430 io_struct_inband_t inband_input,
4431 mach_msg_type_number_t inband_inputCnt,
4432 mach_vm_address_t ool_input,
4433 mach_vm_size_t ool_input_size,
4434 io_struct_inband_t inband_output,
4435 mach_msg_type_number_t *inband_outputCnt,
4436 io_scalar_inband64_t scalar_output,
4437 mach_msg_type_number_t *scalar_outputCnt,
4438 mach_vm_address_t ool_output,
4439 mach_vm_size_t *ool_output_size
4440 )
4441 {
4442 CHECK( IOUserClient, connection, client );
4443
4444 IOExternalMethodArguments args;
4445 IOReturn ret;
4446 IOMemoryDescriptor * inputMD = NULL;
4447 IOMemoryDescriptor * outputMD = NULL;
4448
4449 bzero(&args.__reserved[0], sizeof(args.__reserved));
4450 args.__reservedA = 0;
4451 args.version = kIOExternalMethodArgumentsCurrentVersion;
4452
4453 args.selector = selector;
4454
4455 args.asyncWakePort = MACH_PORT_NULL;
4456 args.asyncReference = NULL;
4457 args.asyncReferenceCount = 0;
4458 args.structureVariableOutputData = NULL;
4459
4460 args.scalarInput = scalar_input;
4461 args.scalarInputCount = scalar_inputCnt;
4462 args.structureInput = inband_input;
4463 args.structureInputSize = inband_inputCnt;
4464
4465 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4466 return kIOReturnIPCError;
4467 }
4468 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4469 return kIOReturnIPCError;
4470 }
4471
4472 if (ool_input) {
4473 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4474 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4475 current_task());
4476 }
4477
4478 args.structureInputDescriptor = inputMD;
4479
4480 args.scalarOutput = scalar_output;
4481 args.scalarOutputCount = *scalar_outputCnt;
4482 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4483 args.structureOutput = inband_output;
4484 args.structureOutputSize = *inband_outputCnt;
4485
4486 if (ool_output && ool_output_size) {
4487 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4488 kIODirectionIn, current_task());
4489 }
4490
4491 args.structureOutputDescriptor = outputMD;
4492 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
4493
4494 IOStatisticsClientCall();
4495 ret = client->externalMethod( selector, &args );
4496
4497 *scalar_outputCnt = args.scalarOutputCount;
4498 *inband_outputCnt = args.structureOutputSize;
4499 *ool_output_size = args.structureOutputDescriptorSize;
4500
4501 if (inputMD) {
4502 inputMD->release();
4503 }
4504 if (outputMD) {
4505 outputMD->release();
4506 }
4507
4508 return ret;
4509 }
4510
4511 /* Routine io_async_user_client_method */
4512 kern_return_t
4513 is_io_connect_async_method
4514 (
4515 io_connect_t connection,
4516 mach_port_t wake_port,
4517 io_async_ref64_t reference,
4518 mach_msg_type_number_t referenceCnt,
4519 uint32_t selector,
4520 io_scalar_inband64_t scalar_input,
4521 mach_msg_type_number_t scalar_inputCnt,
4522 io_struct_inband_t inband_input,
4523 mach_msg_type_number_t inband_inputCnt,
4524 mach_vm_address_t ool_input,
4525 mach_vm_size_t ool_input_size,
4526 io_struct_inband_t inband_output,
4527 mach_msg_type_number_t *inband_outputCnt,
4528 io_scalar_inband64_t scalar_output,
4529 mach_msg_type_number_t *scalar_outputCnt,
4530 mach_vm_address_t ool_output,
4531 mach_vm_size_t * ool_output_size
4532 )
4533 {
4534 CHECK( IOUserClient, connection, client );
4535
4536 IOExternalMethodArguments args;
4537 IOReturn ret;
4538 IOMemoryDescriptor * inputMD = NULL;
4539 IOMemoryDescriptor * outputMD = NULL;
4540
4541 bzero(&args.__reserved[0], sizeof(args.__reserved));
4542 args.__reservedA = 0;
4543 args.version = kIOExternalMethodArgumentsCurrentVersion;
4544
4545 reference[0] = (io_user_reference_t) wake_port;
4546 if (vm_map_is_64bit(get_task_map(current_task()))) {
4547 reference[0] |= kIOUCAsync64Flag;
4548 }
4549
4550 args.selector = selector;
4551
4552 args.asyncWakePort = wake_port;
4553 args.asyncReference = reference;
4554 args.asyncReferenceCount = referenceCnt;
4555
4556 args.structureVariableOutputData = NULL;
4557
4558 args.scalarInput = scalar_input;
4559 args.scalarInputCount = scalar_inputCnt;
4560 args.structureInput = inband_input;
4561 args.structureInputSize = inband_inputCnt;
4562
4563 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4564 return kIOReturnIPCError;
4565 }
4566 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4567 return kIOReturnIPCError;
4568 }
4569
4570 if (ool_input) {
4571 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4572 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4573 current_task());
4574 }
4575
4576 args.structureInputDescriptor = inputMD;
4577
4578 args.scalarOutput = scalar_output;
4579 args.scalarOutputCount = *scalar_outputCnt;
4580 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4581 args.structureOutput = inband_output;
4582 args.structureOutputSize = *inband_outputCnt;
4583
4584 if (ool_output) {
4585 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4586 kIODirectionIn, current_task());
4587 }
4588
4589 args.structureOutputDescriptor = outputMD;
4590 args.structureOutputDescriptorSize = *ool_output_size;
4591
4592 IOStatisticsClientCall();
4593 ret = client->externalMethod( selector, &args );
4594
4595 *scalar_outputCnt = args.scalarOutputCount;
4596 *inband_outputCnt = args.structureOutputSize;
4597 *ool_output_size = args.structureOutputDescriptorSize;
4598
4599 if (inputMD) {
4600 inputMD->release();
4601 }
4602 if (outputMD) {
4603 outputMD->release();
4604 }
4605
4606 return ret;
4607 }
4608
4609 /* Routine io_connect_method_scalarI_scalarO */
4610 kern_return_t
4611 is_io_connect_method_scalarI_scalarO(
4612 io_object_t connect,
4613 uint32_t index,
4614 io_scalar_inband_t input,
4615 mach_msg_type_number_t inputCount,
4616 io_scalar_inband_t output,
4617 mach_msg_type_number_t * outputCount )
4618 {
4619 IOReturn err;
4620 uint32_t i;
4621 io_scalar_inband64_t _input;
4622 io_scalar_inband64_t _output;
4623
4624 mach_msg_type_number_t struct_outputCnt = 0;
4625 mach_vm_size_t ool_output_size = 0;
4626
4627 bzero(&_output[0], sizeof(_output));
4628 for (i = 0; i < inputCount; i++) {
4629 _input[i] = SCALAR64(input[i]);
4630 }
4631
4632 err = is_io_connect_method(connect, index,
4633 _input, inputCount,
4634 NULL, 0,
4635 0, 0,
4636 NULL, &struct_outputCnt,
4637 _output, outputCount,
4638 0, &ool_output_size);
4639
4640 for (i = 0; i < *outputCount; i++) {
4641 output[i] = SCALAR32(_output[i]);
4642 }
4643
4644 return err;
4645 }
4646
4647 kern_return_t
4648 shim_io_connect_method_scalarI_scalarO(
4649 IOExternalMethod * method,
4650 IOService * object,
4651 const io_user_scalar_t * input,
4652 mach_msg_type_number_t inputCount,
4653 io_user_scalar_t * output,
4654 mach_msg_type_number_t * outputCount )
4655 {
4656 IOMethod func;
4657 io_scalar_inband_t _output;
4658 IOReturn err;
4659 err = kIOReturnBadArgument;
4660
4661 bzero(&_output[0], sizeof(_output));
4662 do {
4663 if (inputCount != method->count0) {
4664 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4665 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4666 continue;
4667 }
4668 if (*outputCount != method->count1) {
4669 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4670 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4671 continue;
4672 }
4673
4674 func = method->func;
4675
4676 switch (inputCount) {
4677 case 6:
4678 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4679 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4680 break;
4681 case 5:
4682 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4683 ARG32(input[3]), ARG32(input[4]),
4684 &_output[0] );
4685 break;
4686 case 4:
4687 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4688 ARG32(input[3]),
4689 &_output[0], &_output[1] );
4690 break;
4691 case 3:
4692 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4693 &_output[0], &_output[1], &_output[2] );
4694 break;
4695 case 2:
4696 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4697 &_output[0], &_output[1], &_output[2],
4698 &_output[3] );
4699 break;
4700 case 1:
4701 err = (object->*func)( ARG32(input[0]),
4702 &_output[0], &_output[1], &_output[2],
4703 &_output[3], &_output[4] );
4704 break;
4705 case 0:
4706 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4707 &_output[3], &_output[4], &_output[5] );
4708 break;
4709
4710 default:
4711 IOLog("%s: Bad method table\n", object->getName());
4712 }
4713 }while (false);
4714
4715 uint32_t i;
4716 for (i = 0; i < *outputCount; i++) {
4717 output[i] = SCALAR32(_output[i]);
4718 }
4719
4720 return err;
4721 }
4722
4723 /* Routine io_async_method_scalarI_scalarO */
4724 kern_return_t
4725 is_io_async_method_scalarI_scalarO(
4726 io_object_t connect,
4727 mach_port_t wake_port,
4728 io_async_ref_t reference,
4729 mach_msg_type_number_t referenceCnt,
4730 uint32_t index,
4731 io_scalar_inband_t input,
4732 mach_msg_type_number_t inputCount,
4733 io_scalar_inband_t output,
4734 mach_msg_type_number_t * outputCount )
4735 {
4736 IOReturn err;
4737 uint32_t i;
4738 io_scalar_inband64_t _input;
4739 io_scalar_inband64_t _output;
4740 io_async_ref64_t _reference;
4741
4742 if (referenceCnt > ASYNC_REF64_COUNT) {
4743 return kIOReturnBadArgument;
4744 }
4745 bzero(&_output[0], sizeof(_output));
4746 for (i = 0; i < referenceCnt; i++) {
4747 _reference[i] = REF64(reference[i]);
4748 }
4749 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4750
4751 mach_msg_type_number_t struct_outputCnt = 0;
4752 mach_vm_size_t ool_output_size = 0;
4753
4754 for (i = 0; i < inputCount; i++) {
4755 _input[i] = SCALAR64(input[i]);
4756 }
4757
4758 err = is_io_connect_async_method(connect,
4759 wake_port, _reference, referenceCnt,
4760 index,
4761 _input, inputCount,
4762 NULL, 0,
4763 0, 0,
4764 NULL, &struct_outputCnt,
4765 _output, outputCount,
4766 0, &ool_output_size);
4767
4768 for (i = 0; i < *outputCount; i++) {
4769 output[i] = SCALAR32(_output[i]);
4770 }
4771
4772 return err;
4773 }
4774 /* Routine io_async_method_scalarI_structureO */
4775 kern_return_t
4776 is_io_async_method_scalarI_structureO(
4777 io_object_t connect,
4778 mach_port_t wake_port,
4779 io_async_ref_t reference,
4780 mach_msg_type_number_t referenceCnt,
4781 uint32_t index,
4782 io_scalar_inband_t input,
4783 mach_msg_type_number_t inputCount,
4784 io_struct_inband_t output,
4785 mach_msg_type_number_t * outputCount )
4786 {
4787 uint32_t i;
4788 io_scalar_inband64_t _input;
4789 io_async_ref64_t _reference;
4790
4791 if (referenceCnt > ASYNC_REF64_COUNT) {
4792 return kIOReturnBadArgument;
4793 }
4794 for (i = 0; i < referenceCnt; i++) {
4795 _reference[i] = REF64(reference[i]);
4796 }
4797 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4798
4799 mach_msg_type_number_t scalar_outputCnt = 0;
4800 mach_vm_size_t ool_output_size = 0;
4801
4802 for (i = 0; i < inputCount; i++) {
4803 _input[i] = SCALAR64(input[i]);
4804 }
4805
4806 return is_io_connect_async_method(connect,
4807 wake_port, _reference, referenceCnt,
4808 index,
4809 _input, inputCount,
4810 NULL, 0,
4811 0, 0,
4812 output, outputCount,
4813 NULL, &scalar_outputCnt,
4814 0, &ool_output_size);
4815 }
4816
4817 /* Routine io_async_method_scalarI_structureI */
4818 kern_return_t
4819 is_io_async_method_scalarI_structureI(
4820 io_connect_t connect,
4821 mach_port_t wake_port,
4822 io_async_ref_t reference,
4823 mach_msg_type_number_t referenceCnt,
4824 uint32_t index,
4825 io_scalar_inband_t input,
4826 mach_msg_type_number_t inputCount,
4827 io_struct_inband_t inputStruct,
4828 mach_msg_type_number_t inputStructCount )
4829 {
4830 uint32_t i;
4831 io_scalar_inband64_t _input;
4832 io_async_ref64_t _reference;
4833
4834 if (referenceCnt > ASYNC_REF64_COUNT) {
4835 return kIOReturnBadArgument;
4836 }
4837 for (i = 0; i < referenceCnt; i++) {
4838 _reference[i] = REF64(reference[i]);
4839 }
4840 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4841
4842 mach_msg_type_number_t scalar_outputCnt = 0;
4843 mach_msg_type_number_t inband_outputCnt = 0;
4844 mach_vm_size_t ool_output_size = 0;
4845
4846 for (i = 0; i < inputCount; i++) {
4847 _input[i] = SCALAR64(input[i]);
4848 }
4849
4850 return is_io_connect_async_method(connect,
4851 wake_port, _reference, referenceCnt,
4852 index,
4853 _input, inputCount,
4854 inputStruct, inputStructCount,
4855 0, 0,
4856 NULL, &inband_outputCnt,
4857 NULL, &scalar_outputCnt,
4858 0, &ool_output_size);
4859 }
4860
4861 /* Routine io_async_method_structureI_structureO */
4862 kern_return_t
4863 is_io_async_method_structureI_structureO(
4864 io_object_t connect,
4865 mach_port_t wake_port,
4866 io_async_ref_t reference,
4867 mach_msg_type_number_t referenceCnt,
4868 uint32_t index,
4869 io_struct_inband_t input,
4870 mach_msg_type_number_t inputCount,
4871 io_struct_inband_t output,
4872 mach_msg_type_number_t * outputCount )
4873 {
4874 uint32_t i;
4875 mach_msg_type_number_t scalar_outputCnt = 0;
4876 mach_vm_size_t ool_output_size = 0;
4877 io_async_ref64_t _reference;
4878
4879 if (referenceCnt > ASYNC_REF64_COUNT) {
4880 return kIOReturnBadArgument;
4881 }
4882 for (i = 0; i < referenceCnt; i++) {
4883 _reference[i] = REF64(reference[i]);
4884 }
4885 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4886
4887 return is_io_connect_async_method(connect,
4888 wake_port, _reference, referenceCnt,
4889 index,
4890 NULL, 0,
4891 input, inputCount,
4892 0, 0,
4893 output, outputCount,
4894 NULL, &scalar_outputCnt,
4895 0, &ool_output_size);
4896 }
4897
4898
4899 kern_return_t
4900 shim_io_async_method_scalarI_scalarO(
4901 IOExternalAsyncMethod * method,
4902 IOService * object,
4903 mach_port_t asyncWakePort,
4904 io_user_reference_t * asyncReference,
4905 uint32_t asyncReferenceCount,
4906 const io_user_scalar_t * input,
4907 mach_msg_type_number_t inputCount,
4908 io_user_scalar_t * output,
4909 mach_msg_type_number_t * outputCount )
4910 {
4911 IOAsyncMethod func;
4912 uint32_t i;
4913 io_scalar_inband_t _output;
4914 IOReturn err;
4915 io_async_ref_t reference;
4916
4917 bzero(&_output[0], sizeof(_output));
4918 for (i = 0; i < asyncReferenceCount; i++) {
4919 reference[i] = REF32(asyncReference[i]);
4920 }
4921
4922 err = kIOReturnBadArgument;
4923
4924 do {
4925 if (inputCount != method->count0) {
4926 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4927 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4928 continue;
4929 }
4930 if (*outputCount != method->count1) {
4931 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4932 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4933 continue;
4934 }
4935
4936 func = method->func;
4937
4938 switch (inputCount) {
4939 case 6:
4940 err = (object->*func)( reference,
4941 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4942 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4943 break;
4944 case 5:
4945 err = (object->*func)( reference,
4946 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4947 ARG32(input[3]), ARG32(input[4]),
4948 &_output[0] );
4949 break;
4950 case 4:
4951 err = (object->*func)( reference,
4952 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4953 ARG32(input[3]),
4954 &_output[0], &_output[1] );
4955 break;
4956 case 3:
4957 err = (object->*func)( reference,
4958 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4959 &_output[0], &_output[1], &_output[2] );
4960 break;
4961 case 2:
4962 err = (object->*func)( reference,
4963 ARG32(input[0]), ARG32(input[1]),
4964 &_output[0], &_output[1], &_output[2],
4965 &_output[3] );
4966 break;
4967 case 1:
4968 err = (object->*func)( reference,
4969 ARG32(input[0]),
4970 &_output[0], &_output[1], &_output[2],
4971 &_output[3], &_output[4] );
4972 break;
4973 case 0:
4974 err = (object->*func)( reference,
4975 &_output[0], &_output[1], &_output[2],
4976 &_output[3], &_output[4], &_output[5] );
4977 break;
4978
4979 default:
4980 IOLog("%s: Bad method table\n", object->getName());
4981 }
4982 }while (false);
4983
4984 for (i = 0; i < *outputCount; i++) {
4985 output[i] = SCALAR32(_output[i]);
4986 }
4987
4988 return err;
4989 }
4990
4991
4992 /* Routine io_connect_method_scalarI_structureO */
4993 kern_return_t
4994 is_io_connect_method_scalarI_structureO(
4995 io_object_t connect,
4996 uint32_t index,
4997 io_scalar_inband_t input,
4998 mach_msg_type_number_t inputCount,
4999 io_struct_inband_t output,
5000 mach_msg_type_number_t * outputCount )
5001 {
5002 uint32_t i;
5003 io_scalar_inband64_t _input;
5004
5005 mach_msg_type_number_t scalar_outputCnt = 0;
5006 mach_vm_size_t ool_output_size = 0;
5007
5008 for (i = 0; i < inputCount; i++) {
5009 _input[i] = SCALAR64(input[i]);
5010 }
5011
5012 return is_io_connect_method(connect, index,
5013 _input, inputCount,
5014 NULL, 0,
5015 0, 0,
5016 output, outputCount,
5017 NULL, &scalar_outputCnt,
5018 0, &ool_output_size);
5019 }
5020
5021 kern_return_t
5022 shim_io_connect_method_scalarI_structureO(
5023
5024 IOExternalMethod * method,
5025 IOService * object,
5026 const io_user_scalar_t * input,
5027 mach_msg_type_number_t inputCount,
5028 io_struct_inband_t output,
5029 IOByteCount * outputCount )
5030 {
5031 IOMethod func;
5032 IOReturn err;
5033
5034 err = kIOReturnBadArgument;
5035
5036 do {
5037 if (inputCount != method->count0) {
5038 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5039 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5040 continue;
5041 }
5042 if ((kIOUCVariableStructureSize != method->count1)
5043 && (*outputCount != method->count1)) {
5044 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5045 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5046 continue;
5047 }
5048
5049 func = method->func;
5050
5051 switch (inputCount) {
5052 case 5:
5053 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5054 ARG32(input[3]), ARG32(input[4]),
5055 output );
5056 break;
5057 case 4:
5058 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5059 ARG32(input[3]),
5060 output, (void *)outputCount );
5061 break;
5062 case 3:
5063 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5064 output, (void *)outputCount, NULL );
5065 break;
5066 case 2:
5067 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5068 output, (void *)outputCount, NULL, NULL );
5069 break;
5070 case 1:
5071 err = (object->*func)( ARG32(input[0]),
5072 output, (void *)outputCount, NULL, NULL, NULL );
5073 break;
5074 case 0:
5075 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5076 break;
5077
5078 default:
5079 IOLog("%s: Bad method table\n", object->getName());
5080 }
5081 }while (false);
5082
5083 return err;
5084 }
5085
5086
5087 kern_return_t
5088 shim_io_async_method_scalarI_structureO(
5089 IOExternalAsyncMethod * method,
5090 IOService * object,
5091 mach_port_t asyncWakePort,
5092 io_user_reference_t * asyncReference,
5093 uint32_t asyncReferenceCount,
5094 const io_user_scalar_t * input,
5095 mach_msg_type_number_t inputCount,
5096 io_struct_inband_t output,
5097 mach_msg_type_number_t * outputCount )
5098 {
5099 IOAsyncMethod func;
5100 uint32_t i;
5101 IOReturn err;
5102 io_async_ref_t reference;
5103
5104 for (i = 0; i < asyncReferenceCount; i++) {
5105 reference[i] = REF32(asyncReference[i]);
5106 }
5107
5108 err = kIOReturnBadArgument;
5109 do {
5110 if (inputCount != method->count0) {
5111 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5112 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5113 continue;
5114 }
5115 if ((kIOUCVariableStructureSize != method->count1)
5116 && (*outputCount != method->count1)) {
5117 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5118 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5119 continue;
5120 }
5121
5122 func = method->func;
5123
5124 switch (inputCount) {
5125 case 5:
5126 err = (object->*func)( reference,
5127 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5128 ARG32(input[3]), ARG32(input[4]),
5129 output );
5130 break;
5131 case 4:
5132 err = (object->*func)( reference,
5133 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5134 ARG32(input[3]),
5135 output, (void *)outputCount );
5136 break;
5137 case 3:
5138 err = (object->*func)( reference,
5139 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5140 output, (void *)outputCount, NULL );
5141 break;
5142 case 2:
5143 err = (object->*func)( reference,
5144 ARG32(input[0]), ARG32(input[1]),
5145 output, (void *)outputCount, NULL, NULL );
5146 break;
5147 case 1:
5148 err = (object->*func)( reference,
5149 ARG32(input[0]),
5150 output, (void *)outputCount, NULL, NULL, NULL );
5151 break;
5152 case 0:
5153 err = (object->*func)( reference,
5154 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5155 break;
5156
5157 default:
5158 IOLog("%s: Bad method table\n", object->getName());
5159 }
5160 }while (false);
5161
5162 return err;
5163 }
5164
5165 /* Routine io_connect_method_scalarI_structureI */
5166 kern_return_t
5167 is_io_connect_method_scalarI_structureI(
5168 io_connect_t connect,
5169 uint32_t index,
5170 io_scalar_inband_t input,
5171 mach_msg_type_number_t inputCount,
5172 io_struct_inband_t inputStruct,
5173 mach_msg_type_number_t inputStructCount )
5174 {
5175 uint32_t i;
5176 io_scalar_inband64_t _input;
5177
5178 mach_msg_type_number_t scalar_outputCnt = 0;
5179 mach_msg_type_number_t inband_outputCnt = 0;
5180 mach_vm_size_t ool_output_size = 0;
5181
5182 for (i = 0; i < inputCount; i++) {
5183 _input[i] = SCALAR64(input[i]);
5184 }
5185
5186 return is_io_connect_method(connect, index,
5187 _input, inputCount,
5188 inputStruct, inputStructCount,
5189 0, 0,
5190 NULL, &inband_outputCnt,
5191 NULL, &scalar_outputCnt,
5192 0, &ool_output_size);
5193 }
5194
5195 kern_return_t
5196 shim_io_connect_method_scalarI_structureI(
5197 IOExternalMethod * method,
5198 IOService * object,
5199 const io_user_scalar_t * input,
5200 mach_msg_type_number_t inputCount,
5201 io_struct_inband_t inputStruct,
5202 mach_msg_type_number_t inputStructCount )
5203 {
5204 IOMethod func;
5205 IOReturn err = kIOReturnBadArgument;
5206
5207 do{
5208 if (inputCount != method->count0) {
5209 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5210 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5211 continue;
5212 }
5213 if ((kIOUCVariableStructureSize != method->count1)
5214 && (inputStructCount != method->count1)) {
5215 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5216 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5217 continue;
5218 }
5219
5220 func = method->func;
5221
5222 switch (inputCount) {
5223 case 5:
5224 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5225 ARG32(input[3]), ARG32(input[4]),
5226 inputStruct );
5227 break;
5228 case 4:
5229 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5230 ARG32(input[3]),
5231 inputStruct, (void *)(uintptr_t)inputStructCount );
5232 break;
5233 case 3:
5234 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5235 inputStruct, (void *)(uintptr_t)inputStructCount,
5236 NULL );
5237 break;
5238 case 2:
5239 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5240 inputStruct, (void *)(uintptr_t)inputStructCount,
5241 NULL, NULL );
5242 break;
5243 case 1:
5244 err = (object->*func)( ARG32(input[0]),
5245 inputStruct, (void *)(uintptr_t)inputStructCount,
5246 NULL, NULL, NULL );
5247 break;
5248 case 0:
5249 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5250 NULL, NULL, NULL, NULL );
5251 break;
5252
5253 default:
5254 IOLog("%s: Bad method table\n", object->getName());
5255 }
5256 }while (false);
5257
5258 return err;
5259 }
5260
5261 kern_return_t
5262 shim_io_async_method_scalarI_structureI(
5263 IOExternalAsyncMethod * method,
5264 IOService * object,
5265 mach_port_t asyncWakePort,
5266 io_user_reference_t * asyncReference,
5267 uint32_t asyncReferenceCount,
5268 const io_user_scalar_t * input,
5269 mach_msg_type_number_t inputCount,
5270 io_struct_inband_t inputStruct,
5271 mach_msg_type_number_t inputStructCount )
5272 {
5273 IOAsyncMethod func;
5274 uint32_t i;
5275 IOReturn err = kIOReturnBadArgument;
5276 io_async_ref_t reference;
5277
5278 for (i = 0; i < asyncReferenceCount; i++) {
5279 reference[i] = REF32(asyncReference[i]);
5280 }
5281
5282 do{
5283 if (inputCount != method->count0) {
5284 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5285 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5286 continue;
5287 }
5288 if ((kIOUCVariableStructureSize != method->count1)
5289 && (inputStructCount != method->count1)) {
5290 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5291 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5292 continue;
5293 }
5294
5295 func = method->func;
5296
5297 switch (inputCount) {
5298 case 5:
5299 err = (object->*func)( reference,
5300 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5301 ARG32(input[3]), ARG32(input[4]),
5302 inputStruct );
5303 break;
5304 case 4:
5305 err = (object->*func)( reference,
5306 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5307 ARG32(input[3]),
5308 inputStruct, (void *)(uintptr_t)inputStructCount );
5309 break;
5310 case 3:
5311 err = (object->*func)( reference,
5312 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5313 inputStruct, (void *)(uintptr_t)inputStructCount,
5314 NULL );
5315 break;
5316 case 2:
5317 err = (object->*func)( reference,
5318 ARG32(input[0]), ARG32(input[1]),
5319 inputStruct, (void *)(uintptr_t)inputStructCount,
5320 NULL, NULL );
5321 break;
5322 case 1:
5323 err = (object->*func)( reference,
5324 ARG32(input[0]),
5325 inputStruct, (void *)(uintptr_t)inputStructCount,
5326 NULL, NULL, NULL );
5327 break;
5328 case 0:
5329 err = (object->*func)( reference,
5330 inputStruct, (void *)(uintptr_t)inputStructCount,
5331 NULL, NULL, NULL, NULL );
5332 break;
5333
5334 default:
5335 IOLog("%s: Bad method table\n", object->getName());
5336 }
5337 }while (false);
5338
5339 return err;
5340 }
5341
5342 /* Routine io_connect_method_structureI_structureO */
5343 kern_return_t
5344 is_io_connect_method_structureI_structureO(
5345 io_object_t connect,
5346 uint32_t index,
5347 io_struct_inband_t input,
5348 mach_msg_type_number_t inputCount,
5349 io_struct_inband_t output,
5350 mach_msg_type_number_t * outputCount )
5351 {
5352 mach_msg_type_number_t scalar_outputCnt = 0;
5353 mach_vm_size_t ool_output_size = 0;
5354
5355 return is_io_connect_method(connect, index,
5356 NULL, 0,
5357 input, inputCount,
5358 0, 0,
5359 output, outputCount,
5360 NULL, &scalar_outputCnt,
5361 0, &ool_output_size);
5362 }
5363
5364 kern_return_t
5365 shim_io_connect_method_structureI_structureO(
5366 IOExternalMethod * method,
5367 IOService * object,
5368 io_struct_inband_t input,
5369 mach_msg_type_number_t inputCount,
5370 io_struct_inband_t output,
5371 IOByteCount * outputCount )
5372 {
5373 IOMethod func;
5374 IOReturn err = kIOReturnBadArgument;
5375
5376 do{
5377 if ((kIOUCVariableStructureSize != method->count0)
5378 && (inputCount != method->count0)) {
5379 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5380 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5381 continue;
5382 }
5383 if ((kIOUCVariableStructureSize != method->count1)
5384 && (*outputCount != method->count1)) {
5385 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5386 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5387 continue;
5388 }
5389
5390 func = method->func;
5391
5392 if (method->count1) {
5393 if (method->count0) {
5394 err = (object->*func)( input, output,
5395 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5396 } else {
5397 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5398 }
5399 } else {
5400 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5401 }
5402 }while (false);
5403
5404
5405 return err;
5406 }
5407
5408 kern_return_t
5409 shim_io_async_method_structureI_structureO(
5410 IOExternalAsyncMethod * method,
5411 IOService * object,
5412 mach_port_t asyncWakePort,
5413 io_user_reference_t * asyncReference,
5414 uint32_t asyncReferenceCount,
5415 io_struct_inband_t input,
5416 mach_msg_type_number_t inputCount,
5417 io_struct_inband_t output,
5418 mach_msg_type_number_t * outputCount )
5419 {
5420 IOAsyncMethod func;
5421 uint32_t i;
5422 IOReturn err;
5423 io_async_ref_t reference;
5424
5425 for (i = 0; i < asyncReferenceCount; i++) {
5426 reference[i] = REF32(asyncReference[i]);
5427 }
5428
5429 err = kIOReturnBadArgument;
5430 do{
5431 if ((kIOUCVariableStructureSize != method->count0)
5432 && (inputCount != method->count0)) {
5433 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5434 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5435 continue;
5436 }
5437 if ((kIOUCVariableStructureSize != method->count1)
5438 && (*outputCount != method->count1)) {
5439 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5440 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5441 continue;
5442 }
5443
5444 func = method->func;
5445
5446 if (method->count1) {
5447 if (method->count0) {
5448 err = (object->*func)( reference,
5449 input, output,
5450 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5451 } else {
5452 err = (object->*func)( reference,
5453 output, outputCount, NULL, NULL, NULL, NULL );
5454 }
5455 } else {
5456 err = (object->*func)( reference,
5457 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5458 }
5459 }while (false);
5460
5461 return err;
5462 }
5463
5464 #if !NO_KEXTD
5465 bool gIOKextdClearedBusy = false;
5466 #endif
5467
5468 /* Routine io_catalog_send_data */
5469 kern_return_t
5470 is_io_catalog_send_data(
5471 mach_port_t master_port,
5472 uint32_t flag,
5473 io_buf_ptr_t inData,
5474 mach_msg_type_number_t inDataCount,
5475 kern_return_t * result)
5476 {
5477 #if NO_KEXTD
5478 return kIOReturnNotPrivileged;
5479 #else /* NO_KEXTD */
5480 OSObject * obj = NULL;
5481 vm_offset_t data;
5482 kern_return_t kr = kIOReturnError;
5483
5484 //printf("io_catalog_send_data called. flag: %d\n", flag);
5485
5486 if (master_port != master_device_port) {
5487 return kIOReturnNotPrivileged;
5488 }
5489
5490 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5491 flag != kIOCatalogKextdActive &&
5492 flag != kIOCatalogKextdFinishedLaunching) &&
5493 (!inData || !inDataCount)) {
5494 return kIOReturnBadArgument;
5495 }
5496
5497 if (!IOTaskHasEntitlement(current_task(), kOSKextManagementEntitlement)) {
5498 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5499 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5500 OSSafeReleaseNULL(taskName);
5501 // For now, fake success to not break applications relying on this function succeeding.
5502 // See <rdar://problem/32554970> for more details.
5503 return kIOReturnSuccess;
5504 }
5505
5506 if (inData) {
5507 vm_map_offset_t map_data;
5508
5509 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5510 return kIOReturnMessageTooLarge;
5511 }
5512
5513 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5514 data = CAST_DOWN(vm_offset_t, map_data);
5515
5516 if (kr != KERN_SUCCESS) {
5517 return kr;
5518 }
5519
5520 // must return success after vm_map_copyout() succeeds
5521
5522 if (inDataCount) {
5523 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5524 vm_deallocate( kernel_map, data, inDataCount );
5525 if (!obj) {
5526 *result = kIOReturnNoMemory;
5527 return KERN_SUCCESS;
5528 }
5529 }
5530 }
5531
5532 switch (flag) {
5533 case kIOCatalogResetDrivers:
5534 case kIOCatalogResetDriversNoMatch: {
5535 OSArray * array;
5536
5537 array = OSDynamicCast(OSArray, obj);
5538 if (array) {
5539 if (!gIOCatalogue->resetAndAddDrivers(array,
5540 flag == kIOCatalogResetDrivers)) {
5541 kr = kIOReturnError;
5542 }
5543 } else {
5544 kr = kIOReturnBadArgument;
5545 }
5546 }
5547 break;
5548
5549 case kIOCatalogAddDrivers:
5550 case kIOCatalogAddDriversNoMatch: {
5551 OSArray * array;
5552
5553 array = OSDynamicCast(OSArray, obj);
5554 if (array) {
5555 if (!gIOCatalogue->addDrivers( array,
5556 flag == kIOCatalogAddDrivers)) {
5557 kr = kIOReturnError;
5558 }
5559 } else {
5560 kr = kIOReturnBadArgument;
5561 }
5562 }
5563 break;
5564
5565 case kIOCatalogRemoveDrivers:
5566 case kIOCatalogRemoveDriversNoMatch: {
5567 OSDictionary * dict;
5568
5569 dict = OSDynamicCast(OSDictionary, obj);
5570 if (dict) {
5571 if (!gIOCatalogue->removeDrivers( dict,
5572 flag == kIOCatalogRemoveDrivers )) {
5573 kr = kIOReturnError;
5574 }
5575 } else {
5576 kr = kIOReturnBadArgument;
5577 }
5578 }
5579 break;
5580
5581 case kIOCatalogStartMatching__Removed:
5582 case kIOCatalogRemoveKernelLinker__Removed:
5583 kr = KERN_NOT_SUPPORTED;
5584 break;
5585
5586 case kIOCatalogKextdActive:
5587 #if !NO_KEXTD
5588 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5589 OSKext::setKextdActive();
5590
5591 /* Dump all nonloaded startup extensions; kextd will now send them
5592 * down on request.
5593 */
5594 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5595 #endif
5596 kr = kIOReturnSuccess;
5597 break;
5598
5599 case kIOCatalogKextdFinishedLaunching: {
5600 #if !NO_KEXTD
5601 if (!gIOKextdClearedBusy) {
5602 IOService::kextdLaunched();
5603 gIOKextdClearedBusy = true;
5604 }
5605 #endif
5606 kr = kIOReturnSuccess;
5607 }
5608 break;
5609
5610 default:
5611 kr = kIOReturnBadArgument;
5612 break;
5613 }
5614
5615 if (obj) {
5616 obj->release();
5617 }
5618
5619 *result = kr;
5620 return KERN_SUCCESS;
5621 #endif /* NO_KEXTD */
5622 }
5623
5624 /* Routine io_catalog_terminate */
5625 kern_return_t
5626 is_io_catalog_terminate(
5627 mach_port_t master_port,
5628 uint32_t flag,
5629 io_name_t name )
5630 {
5631 kern_return_t kr;
5632
5633 if (master_port != master_device_port) {
5634 return kIOReturnNotPrivileged;
5635 }
5636
5637 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
5638 kIOClientPrivilegeAdministrator );
5639 if (kIOReturnSuccess != kr) {
5640 return kr;
5641 }
5642
5643 switch (flag) {
5644 #if !defined(SECURE_KERNEL)
5645 case kIOCatalogServiceTerminate:
5646 OSIterator * iter;
5647 IOService * service;
5648
5649 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5650 kIORegistryIterateRecursively);
5651 if (!iter) {
5652 return kIOReturnNoMemory;
5653 }
5654
5655 do {
5656 iter->reset();
5657 while ((service = (IOService *)iter->getNextObject())) {
5658 if (service->metaCast(name)) {
5659 if (!service->terminate( kIOServiceRequired
5660 | kIOServiceSynchronous)) {
5661 kr = kIOReturnUnsupported;
5662 break;
5663 }
5664 }
5665 }
5666 } while (!service && !iter->isValid());
5667 iter->release();
5668 break;
5669
5670 case kIOCatalogModuleUnload:
5671 case kIOCatalogModuleTerminate:
5672 kr = gIOCatalogue->terminateDriversForModule(name,
5673 flag == kIOCatalogModuleUnload);
5674 break;
5675 #endif
5676
5677 default:
5678 kr = kIOReturnBadArgument;
5679 break;
5680 }
5681
5682 return kr;
5683 }
5684
5685 /* Routine io_catalog_get_data */
5686 kern_return_t
5687 is_io_catalog_get_data(
5688 mach_port_t master_port,
5689 uint32_t flag,
5690 io_buf_ptr_t *outData,
5691 mach_msg_type_number_t *outDataCount)
5692 {
5693 kern_return_t kr = kIOReturnSuccess;
5694 OSSerialize * s;
5695
5696 if (master_port != master_device_port) {
5697 return kIOReturnNotPrivileged;
5698 }
5699
5700 //printf("io_catalog_get_data called. flag: %d\n", flag);
5701
5702 s = OSSerialize::withCapacity(4096);
5703 if (!s) {
5704 return kIOReturnNoMemory;
5705 }
5706
5707 kr = gIOCatalogue->serializeData(flag, s);
5708
5709 if (kr == kIOReturnSuccess) {
5710 vm_offset_t data;
5711 vm_map_copy_t copy;
5712 vm_size_t size;
5713
5714 size = s->getLength();
5715 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5716 if (kr == kIOReturnSuccess) {
5717 bcopy(s->text(), (void *)data, size);
5718 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5719 (vm_map_size_t)size, true, &copy);
5720 *outData = (char *)copy;
5721 *outDataCount = size;
5722 }
5723 }
5724
5725 s->release();
5726
5727 return kr;
5728 }
5729
5730 /* Routine io_catalog_get_gen_count */
5731 kern_return_t
5732 is_io_catalog_get_gen_count(
5733 mach_port_t master_port,
5734 uint32_t *genCount)
5735 {
5736 if (master_port != master_device_port) {
5737 return kIOReturnNotPrivileged;
5738 }
5739
5740 //printf("io_catalog_get_gen_count called.\n");
5741
5742 if (!genCount) {
5743 return kIOReturnBadArgument;
5744 }
5745
5746 *genCount = gIOCatalogue->getGenerationCount();
5747
5748 return kIOReturnSuccess;
5749 }
5750
5751 /* Routine io_catalog_module_loaded.
5752 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5753 */
5754 kern_return_t
5755 is_io_catalog_module_loaded(
5756 mach_port_t master_port,
5757 io_name_t name)
5758 {
5759 if (master_port != master_device_port) {
5760 return kIOReturnNotPrivileged;
5761 }
5762
5763 //printf("io_catalog_module_loaded called. name %s\n", name);
5764
5765 if (!name) {
5766 return kIOReturnBadArgument;
5767 }
5768
5769 gIOCatalogue->moduleHasLoaded(name);
5770
5771 return kIOReturnSuccess;
5772 }
5773
5774 kern_return_t
5775 is_io_catalog_reset(
5776 mach_port_t master_port,
5777 uint32_t flag)
5778 {
5779 if (master_port != master_device_port) {
5780 return kIOReturnNotPrivileged;
5781 }
5782
5783 switch (flag) {
5784 case kIOCatalogResetDefault:
5785 gIOCatalogue->reset();
5786 break;
5787
5788 default:
5789 return kIOReturnBadArgument;
5790 }
5791
5792 return kIOReturnSuccess;
5793 }
5794
5795 kern_return_t
5796 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5797 {
5798 kern_return_t result = kIOReturnBadArgument;
5799 IOUserClient * userClient;
5800 OSObject * object;
5801 uintptr_t ref;
5802
5803 ref = (uintptr_t) args->userClientRef;
5804 if ((1ULL << 32) & ref) {
5805 object = iokit_lookup_uext_ref_current_task((mach_port_name_t) ref);
5806 if (object) {
5807 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5808 }
5809 OSSafeReleaseNULL(object);
5810 } else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) {
5811 IOExternalTrap *trap;
5812 IOService *target = NULL;
5813
5814 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5815
5816 if (trap && target) {
5817 IOTrap func;
5818
5819 func = trap->func;
5820
5821 if (func) {
5822 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5823 }
5824 }
5825
5826 iokit_remove_connect_reference(userClient);
5827 }
5828
5829 return result;
5830 }
5831
5832 /* Routine io_device_tree_entry_exists_with_name */
5833 kern_return_t
5834 is_io_device_tree_entry_exists_with_name(
5835 mach_port_t master_port,
5836 io_name_t name,
5837 boolean_t *exists )
5838 {
5839 OSCollectionIterator *iter;
5840
5841 if (master_port != master_device_port) {
5842 return kIOReturnNotPrivileged;
5843 }
5844
5845 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
5846 *exists = iter && iter->getNextObject();
5847 OSSafeReleaseNULL(iter);
5848
5849 return kIOReturnSuccess;
5850 }
5851 } /* extern "C" */
5852
5853 IOReturn
5854 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5855 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5856 {
5857 IOReturn err;
5858 IOService * object;
5859 IOByteCount structureOutputSize;
5860
5861 if (dispatch) {
5862 uint32_t count;
5863 count = dispatch->checkScalarInputCount;
5864 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
5865 return kIOReturnBadArgument;
5866 }
5867
5868 count = dispatch->checkStructureInputSize;
5869 if ((kIOUCVariableStructureSize != count)
5870 && (count != ((args->structureInputDescriptor)
5871 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
5872 return kIOReturnBadArgument;
5873 }
5874
5875 count = dispatch->checkScalarOutputCount;
5876 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
5877 return kIOReturnBadArgument;
5878 }
5879
5880 count = dispatch->checkStructureOutputSize;
5881 if ((kIOUCVariableStructureSize != count)
5882 && (count != ((args->structureOutputDescriptor)
5883 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
5884 return kIOReturnBadArgument;
5885 }
5886
5887 if (dispatch->function) {
5888 err = (*dispatch->function)(target, reference, args);
5889 } else {
5890 err = kIOReturnNoCompletion; /* implementator can dispatch */
5891 }
5892 return err;
5893 }
5894
5895
5896 // pre-Leopard API's don't do ool structs
5897 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
5898 err = kIOReturnIPCError;
5899 return err;
5900 }
5901
5902 structureOutputSize = args->structureOutputSize;
5903
5904 if (args->asyncWakePort) {
5905 IOExternalAsyncMethod * method;
5906 object = NULL;
5907 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
5908 return kIOReturnUnsupported;
5909 }
5910
5911 if (kIOUCForegroundOnly & method->flags) {
5912 if (task_is_gpu_denied(current_task())) {
5913 return kIOReturnNotPermitted;
5914 }
5915 }
5916
5917 switch (method->flags & kIOUCTypeMask) {
5918 case kIOUCScalarIStructI:
5919 err = shim_io_async_method_scalarI_structureI( method, object,
5920 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5921 args->scalarInput, args->scalarInputCount,
5922 (char *)args->structureInput, args->structureInputSize );
5923 break;
5924
5925 case kIOUCScalarIScalarO:
5926 err = shim_io_async_method_scalarI_scalarO( method, object,
5927 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5928 args->scalarInput, args->scalarInputCount,
5929 args->scalarOutput, &args->scalarOutputCount );
5930 break;
5931
5932 case kIOUCScalarIStructO:
5933 err = shim_io_async_method_scalarI_structureO( method, object,
5934 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5935 args->scalarInput, args->scalarInputCount,
5936 (char *) args->structureOutput, &args->structureOutputSize );
5937 break;
5938
5939
5940 case kIOUCStructIStructO:
5941 err = shim_io_async_method_structureI_structureO( method, object,
5942 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5943 (char *)args->structureInput, args->structureInputSize,
5944 (char *) args->structureOutput, &args->structureOutputSize );
5945 break;
5946
5947 default:
5948 err = kIOReturnBadArgument;
5949 break;
5950 }
5951 } else {
5952 IOExternalMethod * method;
5953 object = NULL;
5954 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
5955 return kIOReturnUnsupported;
5956 }
5957
5958 if (kIOUCForegroundOnly & method->flags) {
5959 if (task_is_gpu_denied(current_task())) {
5960 return kIOReturnNotPermitted;
5961 }
5962 }
5963
5964 switch (method->flags & kIOUCTypeMask) {
5965 case kIOUCScalarIStructI:
5966 err = shim_io_connect_method_scalarI_structureI( method, object,
5967 args->scalarInput, args->scalarInputCount,
5968 (char *) args->structureInput, args->structureInputSize );
5969 break;
5970
5971 case kIOUCScalarIScalarO:
5972 err = shim_io_connect_method_scalarI_scalarO( method, object,
5973 args->scalarInput, args->scalarInputCount,
5974 args->scalarOutput, &args->scalarOutputCount );
5975 break;
5976
5977 case kIOUCScalarIStructO:
5978 err = shim_io_connect_method_scalarI_structureO( method, object,
5979 args->scalarInput, args->scalarInputCount,
5980 (char *) args->structureOutput, &structureOutputSize );
5981 break;
5982
5983
5984 case kIOUCStructIStructO:
5985 err = shim_io_connect_method_structureI_structureO( method, object,
5986 (char *) args->structureInput, args->structureInputSize,
5987 (char *) args->structureOutput, &structureOutputSize );
5988 break;
5989
5990 default:
5991 err = kIOReturnBadArgument;
5992 break;
5993 }
5994 }
5995
5996 args->structureOutputSize = structureOutputSize;
5997
5998 return err;
5999 }
6000
6001 #if __LP64__
6002 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6003 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6004 #else
6005 OSMetaClassDefineReservedUsed(IOUserClient, 0);
6006 OSMetaClassDefineReservedUsed(IOUserClient, 1);
6007 #endif
6008 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6009 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6010 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6011 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6012 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6013 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6014 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6015 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6016 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6017 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6018 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6019 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6020 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6021 OSMetaClassDefineReservedUnused(IOUserClient, 15);