]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
bbe9448fd3b1eacaf7a155dc077aa7aef8ae83bf
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51
52 #include <mach/sdt.h>
53 #include <os/hash.h>
54
55 #if CONFIG_MACF
56
57 extern "C" {
58 #include <security/mac_framework.h>
59 };
60 #include <sys/kauth.h>
61
62 #define IOMACF_LOG 0
63
64 #endif /* CONFIG_MACF */
65
66 #include <IOKit/assert.h>
67
68 #include "IOServicePrivate.h"
69 #include "IOKitKernelInternal.h"
70
71 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
72 #define SCALAR32(x) ((uint32_t )x)
73 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
74 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
75 #define REF32(x) ((int)(x))
76
77 enum{
78 kIOUCAsync0Flags = 3ULL,
79 kIOUCAsync64Flag = 1ULL,
80 kIOUCAsyncErrorLoggedFlag = 2ULL
81 };
82
83 #if IOKITSTATS
84
85 #define IOStatisticsRegisterCounter() \
86 do { \
87 reserved->counter = IOStatistics::registerUserClient(this); \
88 } while (0)
89
90 #define IOStatisticsUnregisterCounter() \
91 do { \
92 if (reserved) \
93 IOStatistics::unregisterUserClient(reserved->counter); \
94 } while (0)
95
96 #define IOStatisticsClientCall() \
97 do { \
98 IOStatistics::countUserClientCall(client); \
99 } while (0)
100
101 #else
102
103 #define IOStatisticsRegisterCounter()
104 #define IOStatisticsUnregisterCounter()
105 #define IOStatisticsClientCall()
106
107 #endif /* IOKITSTATS */
108
109 #if DEVELOPMENT || DEBUG
110
111 #define FAKE_STACK_FRAME(a) \
112 const void ** __frameptr; \
113 const void * __retaddr; \
114 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
115 __retaddr = __frameptr[1]; \
116 __frameptr[1] = (a);
117
118 #define FAKE_STACK_FRAME_END() \
119 __frameptr[1] = __retaddr;
120
121 #else /* DEVELOPMENT || DEBUG */
122
123 #define FAKE_STACK_FRAME(a)
124 #define FAKE_STACK_FRAME_END()
125
126 #endif /* DEVELOPMENT || DEBUG */
127
128 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
129 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
130
131 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
132
133 extern "C" {
134 #include <mach/mach_traps.h>
135 #include <vm/vm_map.h>
136 } /* extern "C" */
137
138 struct IOMachPortHashList;
139
140 static_assert(IKOT_MAX_TYPE <= 255);
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
145 class IOMachPort : public OSObject
146 {
147 OSDeclareDefaultStructors(IOMachPort);
148 public:
149 SLIST_ENTRY(IOMachPort) link;
150 ipc_port_t port;
151 OSObject* object;
152 UInt32 mscount;
153 UInt8 holdDestroy;
154 UInt8 type;
155
156 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
157
158 static IOMachPortHashList* bucketForObject(OSObject *obj,
159 ipc_kobject_type_t type);
160
161 static IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
162
163 static bool noMoreSendersForObject( OSObject * obj,
164 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
165 static void releasePortForObject( OSObject * obj,
166 ipc_kobject_type_t type );
167 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
168
169 static mach_port_name_t makeSendRightForTask( task_t task,
170 io_object_t obj, ipc_kobject_type_t type );
171
172 virtual void free() APPLE_KEXT_OVERRIDE;
173 };
174
175 #define super OSObject
176 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
177
178 static IOLock * gIOObjectPortLock;
179 IOLock * gIOUserServerLock;
180
181 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
182
183 SLIST_HEAD(IOMachPortHashList, IOMachPort);
184
185 #if CONFIG_EMBEDDED
186 #define PORT_HASH_SIZE 256
187 #else
188 #define PORT_HASH_SIZE 4096
189 #endif /* CONFIG_EMBEDDED */
190
191 IOMachPortHashList ports[PORT_HASH_SIZE];
192
193 void
194 IOMachPortInitialize(void)
195 {
196 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
197 SLIST_INIT(&ports[i]);
198 }
199 }
200
201 IOMachPortHashList*
202 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
203 {
204 return &ports[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
205 }
206
207 IOMachPort*
208 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
209 {
210 IOMachPort *machPort;
211
212 SLIST_FOREACH(machPort, bucket, link) {
213 if (machPort->object == obj && machPort->type == type) {
214 return machPort;
215 }
216 }
217 return NULL;
218 }
219
220 IOMachPort*
221 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
222 {
223 IOMachPort *machPort = NULL;
224
225 machPort = new IOMachPort;
226 if (__improbable(machPort && !machPort->init())) {
227 return NULL;
228 }
229
230 machPort->object = obj;
231 machPort->type = (typeof(machPort->type))type;
232 machPort->port = iokit_alloc_object_port(obj, type);
233
234 obj->taggedRetain(OSTypeID(OSCollection));
235 machPort->mscount++;
236
237 return machPort;
238 }
239
240 bool
241 IOMachPort::noMoreSendersForObject( OSObject * obj,
242 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
243 {
244 IOMachPort *machPort = NULL;
245 IOUserClient *uc;
246 OSAction *action;
247 bool destroyed = true;
248
249 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
250
251 obj->retain();
252
253 lck_mtx_lock(gIOObjectPortLock);
254
255 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
256
257 if (machPort) {
258 destroyed = (machPort->mscount <= *mscount);
259 if (!destroyed) {
260 *mscount = machPort->mscount;
261 lck_mtx_unlock(gIOObjectPortLock);
262 } else {
263 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
264 uc->noMoreSenders();
265 }
266 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
267
268 lck_mtx_unlock(gIOObjectPortLock);
269
270 machPort->release();
271 obj->taggedRelease(OSTypeID(OSCollection));
272 }
273 } else {
274 lck_mtx_unlock(gIOObjectPortLock);
275 }
276
277 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
278 action->Aborted();
279 }
280
281 obj->release();
282
283 return destroyed;
284 }
285
286 void
287 IOMachPort::releasePortForObject( OSObject * obj,
288 ipc_kobject_type_t type )
289 {
290 IOMachPort *machPort;
291 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
292
293 assert(IKOT_IOKIT_CONNECT != type);
294
295 lck_mtx_lock(gIOObjectPortLock);
296
297 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
298
299 if (machPort && !machPort->holdDestroy) {
300 obj->retain();
301 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
302
303 lck_mtx_unlock(gIOObjectPortLock);
304
305 machPort->release();
306 obj->taggedRelease(OSTypeID(OSCollection));
307 obj->release();
308 } else {
309 lck_mtx_unlock(gIOObjectPortLock);
310 }
311 }
312
313 void
314 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
315 {
316 IOMachPort * machPort;
317
318 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
319 lck_mtx_lock(gIOObjectPortLock);
320
321 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
322
323 if (machPort) {
324 machPort->holdDestroy = true;
325 }
326
327 lck_mtx_unlock(gIOObjectPortLock);
328 }
329
330 void
331 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
332 {
333 IOMachPort::releasePortForObject(obj, type);
334 }
335
336 void
337 IOUserClient::destroyUserReferences( OSObject * obj )
338 {
339 IOMachPort *machPort;
340
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 obj->retain();
347 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
348 IOMachPortHashList *mappingBucket = NULL;
349
350 lck_mtx_lock(gIOObjectPortLock);
351
352 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
353 if (uc && uc->mappings) {
354 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
355 }
356
357 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
358
359 if (machPort == NULL) {
360 lck_mtx_unlock(gIOObjectPortLock);
361 goto end;
362 }
363
364 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
365 obj->taggedRelease(OSTypeID(OSCollection));
366
367 if (uc) {
368 uc->noMoreSenders();
369 if (uc->mappings) {
370 uc->mappings->taggedRetain(OSTypeID(OSCollection));
371 machPort->object = uc->mappings;
372 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
373 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
374
375 lck_mtx_unlock(gIOObjectPortLock);
376
377 uc->mappings->release();
378 uc->mappings = NULL;
379 } else {
380 lck_mtx_unlock(gIOObjectPortLock);
381 machPort->release();
382 }
383 } else {
384 lck_mtx_unlock(gIOObjectPortLock);
385 machPort->release();
386 }
387
388
389 end:
390
391 obj->release();
392 }
393
394 mach_port_name_t
395 IOMachPort::makeSendRightForTask( task_t task,
396 io_object_t obj, ipc_kobject_type_t type )
397 {
398 return iokit_make_send_right( task, obj, type );
399 }
400
401 void
402 IOMachPort::free( void )
403 {
404 if (port) {
405 iokit_destroy_object_port( port );
406 }
407 super::free();
408 }
409
410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411
412 class IOUserIterator : public OSIterator
413 {
414 OSDeclareDefaultStructors(IOUserIterator);
415 public:
416 OSObject * userIteratorObject;
417 IOLock * lock;
418
419 static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
420 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
421 virtual void free() APPLE_KEXT_OVERRIDE;
422
423 virtual void reset() APPLE_KEXT_OVERRIDE;
424 virtual bool isValid() APPLE_KEXT_OVERRIDE;
425 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
426 virtual OSObject * copyNextObject();
427 };
428
429 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
430
431 class IOUserNotification : public IOUserIterator
432 {
433 OSDeclareDefaultStructors(IOUserNotification);
434
435 #define holdNotify userIteratorObject
436
437 public:
438
439 virtual void free() APPLE_KEXT_OVERRIDE;
440
441 virtual void setNotification( IONotifier * obj );
442
443 virtual void reset() APPLE_KEXT_OVERRIDE;
444 virtual bool isValid() APPLE_KEXT_OVERRIDE;
445 };
446
447 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
448
449 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
450
451 IOUserIterator *
452 IOUserIterator::withIterator(OSIterator * iter)
453 {
454 IOUserIterator * me;
455
456 if (!iter) {
457 return NULL;
458 }
459
460 me = new IOUserIterator;
461 if (me && !me->init()) {
462 me->release();
463 me = NULL;
464 }
465 if (!me) {
466 return me;
467 }
468 me->userIteratorObject = iter;
469
470 return me;
471 }
472
473 bool
474 IOUserIterator::init( void )
475 {
476 if (!OSObject::init()) {
477 return false;
478 }
479
480 lock = IOLockAlloc();
481 if (!lock) {
482 return false;
483 }
484
485 return true;
486 }
487
488 void
489 IOUserIterator::free()
490 {
491 if (userIteratorObject) {
492 userIteratorObject->release();
493 }
494 if (lock) {
495 IOLockFree(lock);
496 }
497 OSObject::free();
498 }
499
500 void
501 IOUserIterator::reset()
502 {
503 IOLockLock(lock);
504 assert(OSDynamicCast(OSIterator, userIteratorObject));
505 ((OSIterator *)userIteratorObject)->reset();
506 IOLockUnlock(lock);
507 }
508
509 bool
510 IOUserIterator::isValid()
511 {
512 bool ret;
513
514 IOLockLock(lock);
515 assert(OSDynamicCast(OSIterator, userIteratorObject));
516 ret = ((OSIterator *)userIteratorObject)->isValid();
517 IOLockUnlock(lock);
518
519 return ret;
520 }
521
522 OSObject *
523 IOUserIterator::getNextObject()
524 {
525 assert(false);
526 return NULL;
527 }
528
529 OSObject *
530 IOUserIterator::copyNextObject()
531 {
532 OSObject * ret = NULL;
533
534 IOLockLock(lock);
535 if (userIteratorObject) {
536 ret = ((OSIterator *)userIteratorObject)->getNextObject();
537 if (ret) {
538 ret->retain();
539 }
540 }
541 IOLockUnlock(lock);
542
543 return ret;
544 }
545
546 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
547 extern "C" {
548 // functions called from osfmk/device/iokit_rpc.c
549
550 void
551 iokit_add_reference( io_object_t obj, ipc_kobject_type_t type )
552 {
553 IOUserClient * uc;
554
555 if (!obj) {
556 return;
557 }
558
559 if ((IKOT_IOKIT_CONNECT == type)
560 && (uc = OSDynamicCast(IOUserClient, obj))) {
561 OSIncrementAtomic(&uc->__ipc);
562 }
563
564 obj->retain();
565 }
566
567 void
568 iokit_remove_reference( io_object_t obj )
569 {
570 if (obj) {
571 obj->release();
572 }
573 }
574
575 void
576 iokit_remove_connect_reference( io_object_t obj )
577 {
578 IOUserClient * uc;
579 bool finalize = false;
580
581 if (!obj) {
582 return;
583 }
584
585 if ((uc = OSDynamicCast(IOUserClient, obj))) {
586 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
587 IOLockLock(gIOObjectPortLock);
588 if ((finalize = uc->__ipcFinal)) {
589 uc->__ipcFinal = false;
590 }
591 IOLockUnlock(gIOObjectPortLock);
592 }
593 if (finalize) {
594 uc->scheduleFinalize(true);
595 }
596 }
597
598 obj->release();
599 }
600
601 bool
602 IOUserClient::finalizeUserReferences(OSObject * obj)
603 {
604 IOUserClient * uc;
605 bool ok = true;
606
607 if ((uc = OSDynamicCast(IOUserClient, obj))) {
608 IOLockLock(gIOObjectPortLock);
609 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
610 ok = false;
611 }
612 IOLockUnlock(gIOObjectPortLock);
613 }
614 return ok;
615 }
616
617 ipc_port_t
618 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
619 {
620 IOMachPort *machPort = NULL;
621 ipc_port_t port = NULL;
622
623 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
624
625 lck_mtx_lock(gIOObjectPortLock);
626
627 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
628
629 if (__improbable(machPort == NULL)) {
630 machPort = IOMachPort::withObjectAndType(obj, type);
631 if (__improbable(machPort == NULL)) {
632 goto end;
633 }
634 SLIST_INSERT_HEAD(bucket, machPort, link);
635 } else {
636 machPort->mscount++;
637 }
638
639 iokit_retain_port(machPort->port);
640 port = machPort->port;
641
642 end:
643 lck_mtx_unlock(gIOObjectPortLock);
644
645 return port;
646 }
647
648 kern_return_t
649 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
650 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
651 {
652 IOUserClient * client;
653 IOMemoryMap * map;
654 IOUserNotification * notify;
655
656 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
657 return kIOReturnNotReady;
658 }
659
660 if (IKOT_IOKIT_CONNECT == type) {
661 if ((client = OSDynamicCast( IOUserClient, obj ))) {
662 IOStatisticsClientCall();
663 IOLockLock(client->lock);
664 client->clientDied();
665 IOLockUnlock(client->lock);
666 }
667 } else if (IKOT_IOKIT_OBJECT == type) {
668 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
669 map->taskDied();
670 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
671 notify->setNotification( NULL );
672 }
673 }
674
675 return kIOReturnSuccess;
676 }
677 }; /* extern "C" */
678
679 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
680
681 class IOServiceUserNotification : public IOUserNotification
682 {
683 OSDeclareDefaultStructors(IOServiceUserNotification);
684
685 struct PingMsg {
686 mach_msg_header_t msgHdr;
687 OSNotificationHeader64 notifyHeader;
688 };
689
690 enum { kMaxOutstanding = 1024 };
691
692 PingMsg * pingMsg;
693 vm_size_t msgSize;
694 OSArray * newSet;
695 bool armed;
696 bool ipcLogged;
697
698 public:
699
700 virtual bool init( mach_port_t port, natural_t type,
701 void * reference, vm_size_t referenceSize,
702 bool clientIs64 );
703 virtual void free() APPLE_KEXT_OVERRIDE;
704 void invalidatePort(void);
705
706 static bool _handler( void * target,
707 void * ref, IOService * newService, IONotifier * notifier );
708 virtual bool handler( void * ref, IOService * newService );
709
710 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
711 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
712 };
713
714 class IOServiceMessageUserNotification : public IOUserNotification
715 {
716 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
717
718 struct PingMsg {
719 mach_msg_header_t msgHdr;
720 mach_msg_body_t msgBody;
721 mach_msg_port_descriptor_t ports[1];
722 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
723 };
724
725 PingMsg * pingMsg;
726 vm_size_t msgSize;
727 uint8_t clientIs64;
728 int owningPID;
729 bool ipcLogged;
730
731 public:
732
733 virtual bool init( mach_port_t port, natural_t type,
734 void * reference, vm_size_t referenceSize,
735 vm_size_t extraSize,
736 bool clientIs64 );
737
738 virtual void free() APPLE_KEXT_OVERRIDE;
739 void invalidatePort(void);
740
741 static IOReturn _handler( void * target, void * ref,
742 UInt32 messageType, IOService * provider,
743 void * messageArgument, vm_size_t argSize );
744 virtual IOReturn handler( void * ref,
745 UInt32 messageType, IOService * provider,
746 void * messageArgument, vm_size_t argSize );
747
748 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
749 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
750 };
751
752 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
753
754 #undef super
755 #define super IOUserIterator
756 OSDefineMetaClass( IOUserNotification, IOUserIterator );
757 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
758
759 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
760
761 void
762 IOUserNotification::free( void )
763 {
764 if (holdNotify) {
765 assert(OSDynamicCast(IONotifier, holdNotify));
766 ((IONotifier *)holdNotify)->remove();
767 holdNotify = NULL;
768 }
769 // can't be in handler now
770
771 super::free();
772 }
773
774
775 void
776 IOUserNotification::setNotification( IONotifier * notify )
777 {
778 OSObject * previousNotify;
779
780 IOLockLock( gIOObjectPortLock);
781
782 previousNotify = holdNotify;
783 holdNotify = notify;
784
785 IOLockUnlock( gIOObjectPortLock);
786
787 if (previousNotify) {
788 assert(OSDynamicCast(IONotifier, previousNotify));
789 ((IONotifier *)previousNotify)->remove();
790 }
791 }
792
793 void
794 IOUserNotification::reset()
795 {
796 // ?
797 }
798
799 bool
800 IOUserNotification::isValid()
801 {
802 return true;
803 }
804
805 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
806
807 #undef super
808 #define super IOUserNotification
809 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
810
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
812
813 bool
814 IOServiceUserNotification::init( mach_port_t port, natural_t type,
815 void * reference, vm_size_t referenceSize,
816 bool clientIs64 )
817 {
818 if (!super::init()) {
819 return false;
820 }
821
822 newSet = OSArray::withCapacity( 1 );
823 if (!newSet) {
824 return false;
825 }
826
827 if (referenceSize > sizeof(OSAsyncReference64)) {
828 return false;
829 }
830
831 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
832 pingMsg = (PingMsg *) IOMalloc( msgSize);
833 if (!pingMsg) {
834 return false;
835 }
836
837 bzero( pingMsg, msgSize);
838
839 pingMsg->msgHdr.msgh_remote_port = port;
840 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
841 MACH_MSG_TYPE_COPY_SEND /*remote*/,
842 MACH_MSG_TYPE_MAKE_SEND /*local*/);
843 pingMsg->msgHdr.msgh_size = msgSize;
844 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
845
846 pingMsg->notifyHeader.size = 0;
847 pingMsg->notifyHeader.type = type;
848 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
849
850 return true;
851 }
852
853 void
854 IOServiceUserNotification::invalidatePort(void)
855 {
856 if (pingMsg) {
857 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
858 }
859 }
860
861 void
862 IOServiceUserNotification::free( void )
863 {
864 PingMsg * _pingMsg;
865 vm_size_t _msgSize;
866 OSArray * _newSet;
867
868 _pingMsg = pingMsg;
869 _msgSize = msgSize;
870 _newSet = newSet;
871
872 super::free();
873
874 if (_pingMsg && _msgSize) {
875 if (_pingMsg->msgHdr.msgh_remote_port) {
876 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
877 }
878 IOFree(_pingMsg, _msgSize);
879 }
880
881 if (_newSet) {
882 _newSet->release();
883 }
884 }
885
886 bool
887 IOServiceUserNotification::_handler( void * target,
888 void * ref, IOService * newService, IONotifier * notifier )
889 {
890 return ((IOServiceUserNotification *) target)->handler( ref, newService );
891 }
892
893 bool
894 IOServiceUserNotification::handler( void * ref,
895 IOService * newService )
896 {
897 unsigned int count;
898 kern_return_t kr;
899 ipc_port_t port = NULL;
900 bool sendPing = false;
901
902 IOTakeLock( lock );
903
904 count = newSet->getCount();
905 if (count < kMaxOutstanding) {
906 newSet->setObject( newService );
907 if ((sendPing = (armed && (0 == count)))) {
908 armed = false;
909 }
910 }
911
912 IOUnlock( lock );
913
914 if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
915 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
916 }
917
918 if (sendPing) {
919 if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
920 pingMsg->msgHdr.msgh_local_port = port;
921 } else {
922 pingMsg->msgHdr.msgh_local_port = NULL;
923 }
924
925 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
926 pingMsg->msgHdr.msgh_size,
927 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
928 0);
929 if (port) {
930 iokit_release_port( port );
931 }
932
933 if ((KERN_SUCCESS != kr) && !ipcLogged) {
934 ipcLogged = true;
935 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
936 }
937 }
938
939 return true;
940 }
941 OSObject *
942 IOServiceUserNotification::getNextObject()
943 {
944 assert(false);
945 return NULL;
946 }
947
948 OSObject *
949 IOServiceUserNotification::copyNextObject()
950 {
951 unsigned int count;
952 OSObject * result;
953
954 IOLockLock(lock);
955
956 count = newSet->getCount();
957 if (count) {
958 result = newSet->getObject( count - 1 );
959 result->retain();
960 newSet->removeObject( count - 1);
961 } else {
962 result = NULL;
963 armed = true;
964 }
965
966 IOLockUnlock(lock);
967
968 return result;
969 }
970
971 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
972
973 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
974
975 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
976
977 bool
978 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
979 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
980 bool client64 )
981 {
982 if (!super::init()) {
983 return false;
984 }
985
986 if (referenceSize > sizeof(OSAsyncReference64)) {
987 return false;
988 }
989
990 clientIs64 = client64;
991
992 owningPID = proc_selfpid();
993
994 extraSize += sizeof(IOServiceInterestContent64);
995 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
996 pingMsg = (PingMsg *) IOMalloc( msgSize);
997 if (!pingMsg) {
998 return false;
999 }
1000
1001 bzero( pingMsg, msgSize);
1002
1003 pingMsg->msgHdr.msgh_remote_port = port;
1004 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
1005 | MACH_MSGH_BITS(
1006 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1007 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1008 pingMsg->msgHdr.msgh_size = msgSize;
1009 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
1010
1011 pingMsg->msgBody.msgh_descriptor_count = 1;
1012
1013 pingMsg->ports[0].name = NULL;
1014 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1015 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
1016
1017 pingMsg->notifyHeader.size = extraSize;
1018 pingMsg->notifyHeader.type = type;
1019 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
1020
1021 return true;
1022 }
1023
1024 void
1025 IOServiceMessageUserNotification::invalidatePort(void)
1026 {
1027 if (pingMsg) {
1028 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
1029 }
1030 }
1031
1032 void
1033 IOServiceMessageUserNotification::free( void )
1034 {
1035 PingMsg * _pingMsg;
1036 vm_size_t _msgSize;
1037
1038 _pingMsg = pingMsg;
1039 _msgSize = msgSize;
1040
1041 super::free();
1042
1043 if (_pingMsg && _msgSize) {
1044 if (_pingMsg->msgHdr.msgh_remote_port) {
1045 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
1046 }
1047 IOFree( _pingMsg, _msgSize);
1048 }
1049 }
1050
1051 IOReturn
1052 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1053 UInt32 messageType, IOService * provider,
1054 void * argument, vm_size_t argSize )
1055 {
1056 return ((IOServiceMessageUserNotification *) target)->handler(
1057 ref, messageType, provider, argument, argSize);
1058 }
1059
1060 IOReturn
1061 IOServiceMessageUserNotification::handler( void * ref,
1062 UInt32 messageType, IOService * provider,
1063 void * messageArgument, vm_size_t callerArgSize )
1064 {
1065 enum { kLocalMsgSize = 0x100 };
1066 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1067 void * allocMsg;
1068 kern_return_t kr;
1069 vm_size_t argSize;
1070 vm_size_t thisMsgSize;
1071 ipc_port_t thisPort, providerPort;
1072 struct PingMsg * thisMsg;
1073 IOServiceInterestContent64 * data;
1074
1075 if (kIOMessageCopyClientID == messageType) {
1076 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1077 return kIOReturnSuccess;
1078 }
1079
1080 if (callerArgSize == 0) {
1081 if (clientIs64) {
1082 argSize = sizeof(data->messageArgument[0]);
1083 } else {
1084 argSize = sizeof(uint32_t);
1085 }
1086 } else {
1087 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1088 callerArgSize = kIOUserNotifyMaxMessageSize;
1089 }
1090 argSize = callerArgSize;
1091 }
1092
1093 // adjust message size for ipc restrictions
1094 natural_t type;
1095 type = pingMsg->notifyHeader.type;
1096 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1097 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1098 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1099
1100 thisMsgSize = msgSize
1101 + sizeof(IOServiceInterestContent64)
1102 - sizeof(data->messageArgument)
1103 + argSize;
1104
1105 if (thisMsgSize > sizeof(stackMsg)) {
1106 allocMsg = IOMalloc(thisMsgSize);
1107 if (!allocMsg) {
1108 return kIOReturnNoMemory;
1109 }
1110 thisMsg = (typeof(thisMsg))allocMsg;
1111 } else {
1112 allocMsg = NULL;
1113 thisMsg = (typeof(thisMsg))stackMsg;
1114 }
1115
1116 bcopy(pingMsg, thisMsg, msgSize);
1117 thisMsg->notifyHeader.type = type;
1118 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1119 // == pingMsg->notifyHeader.content;
1120 data->messageType = messageType;
1121
1122 if (callerArgSize == 0) {
1123 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1124 if (!clientIs64) {
1125 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1126 }
1127 } else {
1128 bcopy( messageArgument, data->messageArgument, callerArgSize );
1129 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1130 }
1131
1132 thisMsg->notifyHeader.type = type;
1133 thisMsg->msgHdr.msgh_size = thisMsgSize;
1134
1135 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1136 thisMsg->ports[0].name = providerPort;
1137 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1138 thisMsg->msgHdr.msgh_local_port = thisPort;
1139
1140 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1141 thisMsg->msgHdr.msgh_size,
1142 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1143 0);
1144 if (thisPort) {
1145 iokit_release_port( thisPort );
1146 }
1147 if (providerPort) {
1148 iokit_release_port( providerPort );
1149 }
1150
1151 if (allocMsg) {
1152 IOFree(allocMsg, thisMsgSize);
1153 }
1154
1155 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1156 ipcLogged = true;
1157 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1158 }
1159
1160 return kIOReturnSuccess;
1161 }
1162
1163 OSObject *
1164 IOServiceMessageUserNotification::getNextObject()
1165 {
1166 return NULL;
1167 }
1168
1169 OSObject *
1170 IOServiceMessageUserNotification::copyNextObject()
1171 {
1172 return NULL;
1173 }
1174
1175 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1176
1177 #undef super
1178 #define super IOService
1179 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1180
1181 IOLock * gIOUserClientOwnersLock;
1182
1183 void
1184 IOUserClient::initialize( void )
1185 {
1186 gIOObjectPortLock = IOLockAlloc();
1187 gIOUserClientOwnersLock = IOLockAlloc();
1188 gIOUserServerLock = IOLockAlloc();
1189 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1190 }
1191
1192 void
1193 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1194 mach_port_t wakePort,
1195 void *callback, void *refcon)
1196 {
1197 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1198 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1199 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1200 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1201 }
1202
1203 void
1204 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1205 mach_port_t wakePort,
1206 mach_vm_address_t callback, io_user_reference_t refcon)
1207 {
1208 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1209 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1210 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1211 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1212 }
1213
1214 void
1215 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1216 mach_port_t wakePort,
1217 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1218 {
1219 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1220 if (vm_map_is_64bit(get_task_map(task))) {
1221 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1222 }
1223 }
1224
1225 static OSDictionary *
1226 CopyConsoleUser(UInt32 uid)
1227 {
1228 OSArray * array;
1229 OSDictionary * user = NULL;
1230
1231 if ((array = OSDynamicCast(OSArray,
1232 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1233 for (unsigned int idx = 0;
1234 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1235 idx++) {
1236 OSNumber * num;
1237
1238 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1239 && (uid == num->unsigned32BitValue())) {
1240 user->retain();
1241 break;
1242 }
1243 }
1244 array->release();
1245 }
1246 return user;
1247 }
1248
1249 static OSDictionary *
1250 CopyUserOnConsole(void)
1251 {
1252 OSArray * array;
1253 OSDictionary * user = NULL;
1254
1255 if ((array = OSDynamicCast(OSArray,
1256 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1257 for (unsigned int idx = 0;
1258 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1259 idx++) {
1260 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1261 user->retain();
1262 break;
1263 }
1264 }
1265 array->release();
1266 }
1267 return user;
1268 }
1269
1270 IOReturn
1271 IOUserClient::clientHasAuthorization( task_t task,
1272 IOService * service )
1273 {
1274 proc_t p;
1275
1276 p = (proc_t) get_bsdtask_info(task);
1277 if (p) {
1278 uint64_t authorizationID;
1279
1280 authorizationID = proc_uniqueid(p);
1281 if (authorizationID) {
1282 if (service->getAuthorizationID() == authorizationID) {
1283 return kIOReturnSuccess;
1284 }
1285 }
1286 }
1287
1288 return kIOReturnNotPermitted;
1289 }
1290
1291 IOReturn
1292 IOUserClient::clientHasPrivilege( void * securityToken,
1293 const char * privilegeName )
1294 {
1295 kern_return_t kr;
1296 security_token_t token;
1297 mach_msg_type_number_t count;
1298 task_t task;
1299 OSDictionary * user;
1300 bool secureConsole;
1301
1302
1303 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1304 sizeof(kIOClientPrivilegeForeground))) {
1305 if (task_is_gpu_denied(current_task())) {
1306 return kIOReturnNotPrivileged;
1307 } else {
1308 return kIOReturnSuccess;
1309 }
1310 }
1311
1312 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1313 sizeof(kIOClientPrivilegeConsoleSession))) {
1314 kauth_cred_t cred;
1315 proc_t p;
1316
1317 task = (task_t) securityToken;
1318 if (!task) {
1319 task = current_task();
1320 }
1321 p = (proc_t) get_bsdtask_info(task);
1322 kr = kIOReturnNotPrivileged;
1323
1324 if (p && (cred = kauth_cred_proc_ref(p))) {
1325 user = CopyUserOnConsole();
1326 if (user) {
1327 OSNumber * num;
1328 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1329 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1330 kr = kIOReturnSuccess;
1331 }
1332 user->release();
1333 }
1334 kauth_cred_unref(&cred);
1335 }
1336 return kr;
1337 }
1338
1339 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1340 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1341 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1342 } else {
1343 task = (task_t)securityToken;
1344 }
1345
1346 count = TASK_SECURITY_TOKEN_COUNT;
1347 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1348
1349 if (KERN_SUCCESS != kr) {
1350 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1351 sizeof(kIOClientPrivilegeAdministrator))) {
1352 if (0 != token.val[0]) {
1353 kr = kIOReturnNotPrivileged;
1354 }
1355 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1356 sizeof(kIOClientPrivilegeLocalUser))) {
1357 user = CopyConsoleUser(token.val[0]);
1358 if (user) {
1359 user->release();
1360 } else {
1361 kr = kIOReturnNotPrivileged;
1362 }
1363 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1364 sizeof(kIOClientPrivilegeConsoleUser))) {
1365 user = CopyConsoleUser(token.val[0]);
1366 if (user) {
1367 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1368 kr = kIOReturnNotPrivileged;
1369 } else if (secureConsole) {
1370 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1371 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1372 kr = kIOReturnNotPrivileged;
1373 }
1374 }
1375 user->release();
1376 } else {
1377 kr = kIOReturnNotPrivileged;
1378 }
1379 } else {
1380 kr = kIOReturnUnsupported;
1381 }
1382
1383 return kr;
1384 }
1385
1386 OSDictionary *
1387 IOUserClient::copyClientEntitlements(task_t task)
1388 {
1389 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1390
1391 proc_t p = NULL;
1392 pid_t pid = 0;
1393 size_t len = 0;
1394 void *entitlements_blob = NULL;
1395 char *entitlements_data = NULL;
1396 OSObject *entitlements_obj = NULL;
1397 OSDictionary *entitlements = NULL;
1398 OSString *errorString = NULL;
1399
1400 p = (proc_t)get_bsdtask_info(task);
1401 if (p == NULL) {
1402 goto fail;
1403 }
1404 pid = proc_pid(p);
1405
1406 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1407 if (entitlements) {
1408 return entitlements;
1409 }
1410 }
1411
1412 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1413 goto fail;
1414 }
1415
1416 if (len <= offsetof(CS_GenericBlob, data)) {
1417 goto fail;
1418 }
1419
1420 /*
1421 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1422 * we'll try to parse in the kernel.
1423 */
1424 len -= offsetof(CS_GenericBlob, data);
1425 if (len > MAX_ENTITLEMENTS_LEN) {
1426 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n",
1427 proc_best_name(p), pid, len, MAX_ENTITLEMENTS_LEN);
1428 goto fail;
1429 }
1430
1431 /*
1432 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1433 * what is stored in the entitlements blob. Copy the string and
1434 * terminate it.
1435 */
1436 entitlements_data = (char *)IOMalloc(len + 1);
1437 if (entitlements_data == NULL) {
1438 goto fail;
1439 }
1440 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1441 entitlements_data[len] = '\0';
1442
1443 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1444 if (errorString != NULL) {
1445 IOLog("failed to parse entitlements for %s[%u]: %s\n",
1446 proc_best_name(p), pid, errorString->getCStringNoCopy());
1447 goto fail;
1448 }
1449 if (entitlements_obj == NULL) {
1450 goto fail;
1451 }
1452
1453 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1454 if (entitlements == NULL) {
1455 goto fail;
1456 }
1457 entitlements_obj = NULL;
1458
1459 fail:
1460 if (entitlements_data != NULL) {
1461 IOFree(entitlements_data, len + 1);
1462 }
1463 if (entitlements_obj != NULL) {
1464 entitlements_obj->release();
1465 }
1466 if (errorString != NULL) {
1467 errorString->release();
1468 }
1469 return entitlements;
1470 }
1471
1472 OSObject *
1473 IOUserClient::copyClientEntitlement( task_t task,
1474 const char * entitlement )
1475 {
1476 OSDictionary *entitlements;
1477 OSObject *value;
1478
1479 entitlements = copyClientEntitlements(task);
1480 if (entitlements == NULL) {
1481 return NULL;
1482 }
1483
1484 /* Fetch the entitlement value from the dictionary. */
1485 value = entitlements->getObject(entitlement);
1486 if (value != NULL) {
1487 value->retain();
1488 }
1489
1490 entitlements->release();
1491 return value;
1492 }
1493
1494 bool
1495 IOUserClient::init()
1496 {
1497 if (getPropertyTable() || super::init()) {
1498 return reserve();
1499 }
1500
1501 return false;
1502 }
1503
1504 bool
1505 IOUserClient::init(OSDictionary * dictionary)
1506 {
1507 if (getPropertyTable() || super::init(dictionary)) {
1508 return reserve();
1509 }
1510
1511 return false;
1512 }
1513
1514 bool
1515 IOUserClient::initWithTask(task_t owningTask,
1516 void * securityID,
1517 UInt32 type )
1518 {
1519 if (getPropertyTable() || super::init()) {
1520 return reserve();
1521 }
1522
1523 return false;
1524 }
1525
1526 bool
1527 IOUserClient::initWithTask(task_t owningTask,
1528 void * securityID,
1529 UInt32 type,
1530 OSDictionary * properties )
1531 {
1532 bool ok;
1533
1534 ok = super::init( properties );
1535 ok &= initWithTask( owningTask, securityID, type );
1536
1537 return ok;
1538 }
1539
1540 bool
1541 IOUserClient::reserve()
1542 {
1543 if (!reserved) {
1544 reserved = IONew(ExpansionData, 1);
1545 if (!reserved) {
1546 return false;
1547 }
1548 }
1549 setTerminateDefer(NULL, true);
1550 IOStatisticsRegisterCounter();
1551
1552 return true;
1553 }
1554
1555 struct IOUserClientOwner {
1556 task_t task;
1557 queue_chain_t taskLink;
1558 IOUserClient * uc;
1559 queue_chain_t ucLink;
1560 };
1561
1562 IOReturn
1563 IOUserClient::registerOwner(task_t task)
1564 {
1565 IOUserClientOwner * owner;
1566 IOReturn ret;
1567 bool newOwner;
1568
1569 IOLockLock(gIOUserClientOwnersLock);
1570
1571 newOwner = true;
1572 ret = kIOReturnSuccess;
1573
1574 if (!owners.next) {
1575 queue_init(&owners);
1576 } else {
1577 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1578 {
1579 if (task != owner->task) {
1580 continue;
1581 }
1582 newOwner = false;
1583 break;
1584 }
1585 }
1586 if (newOwner) {
1587 owner = IONew(IOUserClientOwner, 1);
1588 if (!owner) {
1589 ret = kIOReturnNoMemory;
1590 } else {
1591 owner->task = task;
1592 owner->uc = this;
1593 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1594 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1595 if (messageAppSuspended) {
1596 task_set_message_app_suspended(task, true);
1597 }
1598 }
1599 }
1600
1601 IOLockUnlock(gIOUserClientOwnersLock);
1602
1603 return ret;
1604 }
1605
1606 void
1607 IOUserClient::noMoreSenders(void)
1608 {
1609 IOUserClientOwner * owner;
1610 IOUserClientOwner * iter;
1611 queue_head_t * taskque;
1612 bool hasMessageAppSuspended;
1613
1614 IOLockLock(gIOUserClientOwnersLock);
1615
1616 if (owners.next) {
1617 while (!queue_empty(&owners)) {
1618 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1619 taskque = task_io_user_clients(owner->task);
1620 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1621 hasMessageAppSuspended = false;
1622 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1623 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1624 if (hasMessageAppSuspended) {
1625 break;
1626 }
1627 }
1628 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1629 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1630 IODelete(owner, IOUserClientOwner, 1);
1631 }
1632 owners.next = owners.prev = NULL;
1633 }
1634
1635 IOLockUnlock(gIOUserClientOwnersLock);
1636 }
1637
1638
1639 extern "C" void
1640 iokit_task_app_suspended_changed(task_t task)
1641 {
1642 queue_head_t * taskque;
1643 IOUserClientOwner * owner;
1644 OSSet * set;
1645
1646 IOLockLock(gIOUserClientOwnersLock);
1647
1648 taskque = task_io_user_clients(task);
1649 set = NULL;
1650 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1651 if (!owner->uc->messageAppSuspended) {
1652 continue;
1653 }
1654 if (!set) {
1655 set = OSSet::withCapacity(4);
1656 if (!set) {
1657 break;
1658 }
1659 }
1660 set->setObject(owner->uc);
1661 }
1662
1663 IOLockUnlock(gIOUserClientOwnersLock);
1664
1665 if (set) {
1666 set->iterateObjects(^bool (OSObject * obj) {
1667 IOUserClient * uc;
1668
1669 uc = (typeof(uc))obj;
1670 #if 0
1671 {
1672 OSString * str;
1673 str = IOCopyLogNameForPID(task_pid(task));
1674 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1675 uc->getName(), task_is_app_suspended(task));
1676 OSSafeReleaseNULL(str);
1677 }
1678 #endif
1679 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1680
1681 return false;
1682 });
1683 set->release();
1684 }
1685 }
1686
1687 extern "C" kern_return_t
1688 iokit_task_terminate(task_t task)
1689 {
1690 IOUserClientOwner * owner;
1691 IOUserClient * dead;
1692 IOUserClient * uc;
1693 queue_head_t * taskque;
1694
1695 IOLockLock(gIOUserClientOwnersLock);
1696
1697 taskque = task_io_user_clients(task);
1698 dead = NULL;
1699 while (!queue_empty(taskque)) {
1700 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1701 uc = owner->uc;
1702 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1703 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1704 if (queue_empty(&uc->owners)) {
1705 uc->retain();
1706 IOLog("destroying out of band connect for %s\n", uc->getName());
1707 // now using the uc queue head as a singly linked queue,
1708 // leaving .next as NULL to mark it empty
1709 uc->owners.next = NULL;
1710 uc->owners.prev = (queue_entry_t) dead;
1711 dead = uc;
1712 }
1713 IODelete(owner, IOUserClientOwner, 1);
1714 }
1715
1716 IOLockUnlock(gIOUserClientOwnersLock);
1717
1718 while (dead) {
1719 uc = dead;
1720 dead = (IOUserClient *)(void *) dead->owners.prev;
1721 uc->owners.prev = NULL;
1722 if (uc->sharedInstance || !uc->closed) {
1723 uc->clientDied();
1724 }
1725 uc->release();
1726 }
1727
1728 return KERN_SUCCESS;
1729 }
1730
1731 void
1732 IOUserClient::free()
1733 {
1734 if (mappings) {
1735 mappings->release();
1736 }
1737 if (lock) {
1738 IOLockFree(lock);
1739 }
1740
1741 IOStatisticsUnregisterCounter();
1742
1743 assert(!owners.next);
1744 assert(!owners.prev);
1745
1746 if (reserved) {
1747 IODelete(reserved, ExpansionData, 1);
1748 }
1749
1750 super::free();
1751 }
1752
1753 IOReturn
1754 IOUserClient::clientDied( void )
1755 {
1756 IOReturn ret = kIOReturnNotReady;
1757
1758 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1759 ret = clientClose();
1760 }
1761
1762 return ret;
1763 }
1764
1765 IOReturn
1766 IOUserClient::clientClose( void )
1767 {
1768 return kIOReturnUnsupported;
1769 }
1770
1771 IOService *
1772 IOUserClient::getService( void )
1773 {
1774 return NULL;
1775 }
1776
1777 IOReturn
1778 IOUserClient::registerNotificationPort(
1779 mach_port_t /* port */,
1780 UInt32 /* type */,
1781 UInt32 /* refCon */)
1782 {
1783 return kIOReturnUnsupported;
1784 }
1785
1786 IOReturn
1787 IOUserClient::registerNotificationPort(
1788 mach_port_t port,
1789 UInt32 type,
1790 io_user_reference_t refCon)
1791 {
1792 return registerNotificationPort(port, type, (UInt32) refCon);
1793 }
1794
1795 IOReturn
1796 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1797 semaphore_t * semaphore )
1798 {
1799 return kIOReturnUnsupported;
1800 }
1801
1802 IOReturn
1803 IOUserClient::connectClient( IOUserClient * /* client */ )
1804 {
1805 return kIOReturnUnsupported;
1806 }
1807
1808 IOReturn
1809 IOUserClient::clientMemoryForType( UInt32 type,
1810 IOOptionBits * options,
1811 IOMemoryDescriptor ** memory )
1812 {
1813 return kIOReturnUnsupported;
1814 }
1815
1816 #if !__LP64__
1817 IOMemoryMap *
1818 IOUserClient::mapClientMemory(
1819 IOOptionBits type,
1820 task_t task,
1821 IOOptionBits mapFlags,
1822 IOVirtualAddress atAddress )
1823 {
1824 return NULL;
1825 }
1826 #endif
1827
1828 IOMemoryMap *
1829 IOUserClient::mapClientMemory64(
1830 IOOptionBits type,
1831 task_t task,
1832 IOOptionBits mapFlags,
1833 mach_vm_address_t atAddress )
1834 {
1835 IOReturn err;
1836 IOOptionBits options = 0;
1837 IOMemoryDescriptor * memory = NULL;
1838 IOMemoryMap * map = NULL;
1839
1840 err = clientMemoryForType((UInt32) type, &options, &memory );
1841
1842 if (memory && (kIOReturnSuccess == err)) {
1843 FAKE_STACK_FRAME(getMetaClass());
1844
1845 options = (options & ~kIOMapUserOptionsMask)
1846 | (mapFlags & kIOMapUserOptionsMask);
1847 map = memory->createMappingInTask( task, atAddress, options );
1848 memory->release();
1849
1850 FAKE_STACK_FRAME_END();
1851 }
1852
1853 return map;
1854 }
1855
1856 IOReturn
1857 IOUserClient::exportObjectToClient(task_t task,
1858 OSObject *obj, io_object_t *clientObj)
1859 {
1860 mach_port_name_t name;
1861
1862 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1863
1864 *clientObj = (io_object_t)(uintptr_t) name;
1865
1866 if (obj) {
1867 obj->release();
1868 }
1869
1870 return kIOReturnSuccess;
1871 }
1872
1873 IOReturn
1874 IOUserClient::copyPortNameForObjectInTask(task_t task,
1875 OSObject *obj, mach_port_name_t * port_name)
1876 {
1877 mach_port_name_t name;
1878
1879 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1880
1881 *(mach_port_name_t *) port_name = name;
1882
1883 return kIOReturnSuccess;
1884 }
1885
1886 IOReturn
1887 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1888 OSObject **obj)
1889 {
1890 OSObject * object;
1891
1892 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
1893
1894 *obj = object;
1895
1896 return object ? kIOReturnSuccess : kIOReturnIPCError;
1897 }
1898
1899 IOReturn
1900 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
1901 {
1902 return iokit_mod_send_right(task, port_name, delta);
1903 }
1904
1905 IOExternalMethod *
1906 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1907 {
1908 return NULL;
1909 }
1910
1911 IOExternalAsyncMethod *
1912 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1913 {
1914 return NULL;
1915 }
1916
1917 IOExternalTrap *
1918 IOUserClient::
1919 getExternalTrapForIndex(UInt32 index)
1920 {
1921 return NULL;
1922 }
1923
1924 #pragma clang diagnostic push
1925 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1926
1927 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1928 // functions can break clients of kexts implementing getExternalMethodForIndex()
1929 IOExternalMethod *
1930 IOUserClient::
1931 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1932 {
1933 IOExternalMethod *method = getExternalMethodForIndex(index);
1934
1935 if (method) {
1936 *targetP = (IOService *) method->object;
1937 }
1938
1939 return method;
1940 }
1941
1942 IOExternalAsyncMethod *
1943 IOUserClient::
1944 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1945 {
1946 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1947
1948 if (method) {
1949 *targetP = (IOService *) method->object;
1950 }
1951
1952 return method;
1953 }
1954
1955 IOExternalTrap *
1956 IOUserClient::
1957 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1958 {
1959 IOExternalTrap *trap = getExternalTrapForIndex(index);
1960
1961 if (trap) {
1962 *targetP = trap->object;
1963 }
1964
1965 return trap;
1966 }
1967 #pragma clang diagnostic pop
1968
1969 IOReturn
1970 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1971 {
1972 mach_port_t port;
1973 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1974
1975 if (MACH_PORT_NULL != port) {
1976 iokit_release_port_send(port);
1977 }
1978
1979 return kIOReturnSuccess;
1980 }
1981
1982 IOReturn
1983 IOUserClient::releaseNotificationPort(mach_port_t port)
1984 {
1985 if (MACH_PORT_NULL != port) {
1986 iokit_release_port_send(port);
1987 }
1988
1989 return kIOReturnSuccess;
1990 }
1991
1992 IOReturn
1993 IOUserClient::sendAsyncResult(OSAsyncReference reference,
1994 IOReturn result, void *args[], UInt32 numArgs)
1995 {
1996 OSAsyncReference64 reference64;
1997 io_user_reference_t args64[kMaxAsyncArgs];
1998 unsigned int idx;
1999
2000 if (numArgs > kMaxAsyncArgs) {
2001 return kIOReturnMessageTooLarge;
2002 }
2003
2004 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2005 reference64[idx] = REF64(reference[idx]);
2006 }
2007
2008 for (idx = 0; idx < numArgs; idx++) {
2009 args64[idx] = REF64(args[idx]);
2010 }
2011
2012 return sendAsyncResult64(reference64, result, args64, numArgs);
2013 }
2014
2015 IOReturn
2016 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2017 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2018 {
2019 return _sendAsyncResult64(reference, result, args, numArgs, options);
2020 }
2021
2022 IOReturn
2023 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2024 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2025 {
2026 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2027 }
2028
2029 IOReturn
2030 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2031 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2032 {
2033 struct ReplyMsg {
2034 mach_msg_header_t msgHdr;
2035 union{
2036 struct{
2037 OSNotificationHeader notifyHdr;
2038 IOAsyncCompletionContent asyncContent;
2039 uint32_t args[kMaxAsyncArgs];
2040 } msg32;
2041 struct{
2042 OSNotificationHeader64 notifyHdr;
2043 IOAsyncCompletionContent asyncContent;
2044 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2045 } msg64;
2046 } m;
2047 };
2048 ReplyMsg replyMsg;
2049 mach_port_t replyPort;
2050 kern_return_t kr;
2051
2052 // If no reply port, do nothing.
2053 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2054 if (replyPort == MACH_PORT_NULL) {
2055 return kIOReturnSuccess;
2056 }
2057
2058 if (numArgs > kMaxAsyncArgs) {
2059 return kIOReturnMessageTooLarge;
2060 }
2061
2062 bzero(&replyMsg, sizeof(replyMsg));
2063 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2064 0 /*local*/);
2065 replyMsg.msgHdr.msgh_remote_port = replyPort;
2066 replyMsg.msgHdr.msgh_local_port = NULL;
2067 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2068 if (kIOUCAsync64Flag & reference[0]) {
2069 replyMsg.msgHdr.msgh_size =
2070 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2071 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2072 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2073 + numArgs * sizeof(io_user_reference_t);
2074 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2075 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
2076
2077 replyMsg.m.msg64.asyncContent.result = result;
2078 if (numArgs) {
2079 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2080 }
2081 } else {
2082 unsigned int idx;
2083
2084 replyMsg.msgHdr.msgh_size =
2085 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2086 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2087
2088 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2089 + numArgs * sizeof(uint32_t);
2090 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2091
2092 for (idx = 0; idx < kOSAsyncRefCount; idx++) {
2093 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2094 }
2095
2096 replyMsg.m.msg32.asyncContent.result = result;
2097
2098 for (idx = 0; idx < numArgs; idx++) {
2099 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2100 }
2101 }
2102
2103 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2104 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2105 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2106 } else {
2107 /* Fail on full queue. */
2108 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2109 replyMsg.msgHdr.msgh_size);
2110 }
2111 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2112 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2113 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2114 }
2115 return kr;
2116 }
2117
2118
2119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2120
2121 extern "C" {
2122 #define CHECK(cls, obj, out) \
2123 cls * out; \
2124 if( !(out = OSDynamicCast( cls, obj))) \
2125 return( kIOReturnBadArgument )
2126
2127 #define CHECKLOCKED(cls, obj, out) \
2128 IOUserIterator * oIter; \
2129 cls * out; \
2130 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2131 return (kIOReturnBadArgument); \
2132 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2133 return (kIOReturnBadArgument)
2134
2135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2136
2137 // Create a vm_map_copy_t or kalloc'ed data for memory
2138 // to be copied out. ipc will free after the copyout.
2139
2140 static kern_return_t
2141 copyoutkdata( const void * data, vm_size_t len,
2142 io_buf_ptr_t * buf )
2143 {
2144 kern_return_t err;
2145 vm_map_copy_t copy;
2146
2147 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2148 false /* src_destroy */, &copy);
2149
2150 assert( err == KERN_SUCCESS );
2151 if (err == KERN_SUCCESS) {
2152 *buf = (char *) copy;
2153 }
2154
2155 return err;
2156 }
2157
2158 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2159
2160 /* Routine io_server_version */
2161 kern_return_t
2162 is_io_server_version(
2163 mach_port_t master_port,
2164 uint64_t *version)
2165 {
2166 *version = IOKIT_SERVER_VERSION;
2167 return kIOReturnSuccess;
2168 }
2169
2170 /* Routine io_object_get_class */
2171 kern_return_t
2172 is_io_object_get_class(
2173 io_object_t object,
2174 io_name_t className )
2175 {
2176 const OSMetaClass* my_obj = NULL;
2177
2178 if (!object) {
2179 return kIOReturnBadArgument;
2180 }
2181
2182 my_obj = object->getMetaClass();
2183 if (!my_obj) {
2184 return kIOReturnNotFound;
2185 }
2186
2187 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2188
2189 return kIOReturnSuccess;
2190 }
2191
2192 /* Routine io_object_get_superclass */
2193 kern_return_t
2194 is_io_object_get_superclass(
2195 mach_port_t master_port,
2196 io_name_t obj_name,
2197 io_name_t class_name)
2198 {
2199 IOReturn ret;
2200 const OSMetaClass * meta;
2201 const OSMetaClass * super;
2202 const OSSymbol * name;
2203 const char * cstr;
2204
2205 if (!obj_name || !class_name) {
2206 return kIOReturnBadArgument;
2207 }
2208 if (master_port != master_device_port) {
2209 return kIOReturnNotPrivileged;
2210 }
2211
2212 ret = kIOReturnNotFound;
2213 meta = NULL;
2214 do{
2215 name = OSSymbol::withCString(obj_name);
2216 if (!name) {
2217 break;
2218 }
2219 meta = OSMetaClass::copyMetaClassWithName(name);
2220 if (!meta) {
2221 break;
2222 }
2223 super = meta->getSuperClass();
2224 if (!super) {
2225 break;
2226 }
2227 cstr = super->getClassName();
2228 if (!cstr) {
2229 break;
2230 }
2231 strlcpy(class_name, cstr, sizeof(io_name_t));
2232 ret = kIOReturnSuccess;
2233 }while (false);
2234
2235 OSSafeReleaseNULL(name);
2236 if (meta) {
2237 meta->releaseMetaClass();
2238 }
2239
2240 return ret;
2241 }
2242
2243 /* Routine io_object_get_bundle_identifier */
2244 kern_return_t
2245 is_io_object_get_bundle_identifier(
2246 mach_port_t master_port,
2247 io_name_t obj_name,
2248 io_name_t bundle_name)
2249 {
2250 IOReturn ret;
2251 const OSMetaClass * meta;
2252 const OSSymbol * name;
2253 const OSSymbol * identifier;
2254 const char * cstr;
2255
2256 if (!obj_name || !bundle_name) {
2257 return kIOReturnBadArgument;
2258 }
2259 if (master_port != master_device_port) {
2260 return kIOReturnNotPrivileged;
2261 }
2262
2263 ret = kIOReturnNotFound;
2264 meta = NULL;
2265 do{
2266 name = OSSymbol::withCString(obj_name);
2267 if (!name) {
2268 break;
2269 }
2270 meta = OSMetaClass::copyMetaClassWithName(name);
2271 if (!meta) {
2272 break;
2273 }
2274 identifier = meta->getKmodName();
2275 if (!identifier) {
2276 break;
2277 }
2278 cstr = identifier->getCStringNoCopy();
2279 if (!cstr) {
2280 break;
2281 }
2282 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2283 ret = kIOReturnSuccess;
2284 }while (false);
2285
2286 OSSafeReleaseNULL(name);
2287 if (meta) {
2288 meta->releaseMetaClass();
2289 }
2290
2291 return ret;
2292 }
2293
2294 /* Routine io_object_conforms_to */
2295 kern_return_t
2296 is_io_object_conforms_to(
2297 io_object_t object,
2298 io_name_t className,
2299 boolean_t *conforms )
2300 {
2301 if (!object) {
2302 return kIOReturnBadArgument;
2303 }
2304
2305 *conforms = (NULL != object->metaCast( className ));
2306
2307 return kIOReturnSuccess;
2308 }
2309
2310 /* Routine io_object_get_retain_count */
2311 kern_return_t
2312 is_io_object_get_retain_count(
2313 io_object_t object,
2314 uint32_t *retainCount )
2315 {
2316 if (!object) {
2317 return kIOReturnBadArgument;
2318 }
2319
2320 *retainCount = object->getRetainCount();
2321 return kIOReturnSuccess;
2322 }
2323
2324 /* Routine io_iterator_next */
2325 kern_return_t
2326 is_io_iterator_next(
2327 io_object_t iterator,
2328 io_object_t *object )
2329 {
2330 IOReturn ret;
2331 OSObject * obj;
2332 OSIterator * iter;
2333 IOUserIterator * uiter;
2334
2335 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2336 obj = uiter->copyNextObject();
2337 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2338 obj = iter->getNextObject();
2339 if (obj) {
2340 obj->retain();
2341 }
2342 } else {
2343 return kIOReturnBadArgument;
2344 }
2345
2346 if (obj) {
2347 *object = obj;
2348 ret = kIOReturnSuccess;
2349 } else {
2350 ret = kIOReturnNoDevice;
2351 }
2352
2353 return ret;
2354 }
2355
2356 /* Routine io_iterator_reset */
2357 kern_return_t
2358 is_io_iterator_reset(
2359 io_object_t iterator )
2360 {
2361 CHECK( OSIterator, iterator, iter );
2362
2363 iter->reset();
2364
2365 return kIOReturnSuccess;
2366 }
2367
2368 /* Routine io_iterator_is_valid */
2369 kern_return_t
2370 is_io_iterator_is_valid(
2371 io_object_t iterator,
2372 boolean_t *is_valid )
2373 {
2374 CHECK( OSIterator, iterator, iter );
2375
2376 *is_valid = iter->isValid();
2377
2378 return kIOReturnSuccess;
2379 }
2380
2381
2382 static kern_return_t
2383 internal_io_service_match_property_table(
2384 io_service_t _service,
2385 const char * matching,
2386 mach_msg_type_number_t matching_size,
2387 boolean_t *matches)
2388 {
2389 CHECK( IOService, _service, service );
2390
2391 kern_return_t kr;
2392 OSObject * obj;
2393 OSDictionary * dict;
2394
2395 assert(matching_size);
2396 obj = OSUnserializeXML(matching, matching_size);
2397
2398 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2399 *matches = service->passiveMatch( dict );
2400 kr = kIOReturnSuccess;
2401 } else {
2402 kr = kIOReturnBadArgument;
2403 }
2404
2405 if (obj) {
2406 obj->release();
2407 }
2408
2409 return kr;
2410 }
2411
2412 /* Routine io_service_match_property_table */
2413 kern_return_t
2414 is_io_service_match_property_table(
2415 io_service_t service,
2416 io_string_t matching,
2417 boolean_t *matches )
2418 {
2419 return kIOReturnUnsupported;
2420 }
2421
2422
2423 /* Routine io_service_match_property_table_ool */
2424 kern_return_t
2425 is_io_service_match_property_table_ool(
2426 io_object_t service,
2427 io_buf_ptr_t matching,
2428 mach_msg_type_number_t matchingCnt,
2429 kern_return_t *result,
2430 boolean_t *matches )
2431 {
2432 kern_return_t kr;
2433 vm_offset_t data;
2434 vm_map_offset_t map_data;
2435
2436 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2437 data = CAST_DOWN(vm_offset_t, map_data);
2438
2439 if (KERN_SUCCESS == kr) {
2440 // must return success after vm_map_copyout() succeeds
2441 *result = internal_io_service_match_property_table(service,
2442 (const char *)data, matchingCnt, matches );
2443 vm_deallocate( kernel_map, data, matchingCnt );
2444 }
2445
2446 return kr;
2447 }
2448
2449 /* Routine io_service_match_property_table_bin */
2450 kern_return_t
2451 is_io_service_match_property_table_bin(
2452 io_object_t service,
2453 io_struct_inband_t matching,
2454 mach_msg_type_number_t matchingCnt,
2455 boolean_t *matches)
2456 {
2457 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2458 }
2459
2460 static kern_return_t
2461 internal_io_service_get_matching_services(
2462 mach_port_t master_port,
2463 const char * matching,
2464 mach_msg_type_number_t matching_size,
2465 io_iterator_t *existing )
2466 {
2467 kern_return_t kr;
2468 OSObject * obj;
2469 OSDictionary * dict;
2470
2471 if (master_port != master_device_port) {
2472 return kIOReturnNotPrivileged;
2473 }
2474
2475 assert(matching_size);
2476 obj = OSUnserializeXML(matching, matching_size);
2477
2478 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2479 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2480 kr = kIOReturnSuccess;
2481 } else {
2482 kr = kIOReturnBadArgument;
2483 }
2484
2485 if (obj) {
2486 obj->release();
2487 }
2488
2489 return kr;
2490 }
2491
2492 /* Routine io_service_get_matching_services */
2493 kern_return_t
2494 is_io_service_get_matching_services(
2495 mach_port_t master_port,
2496 io_string_t matching,
2497 io_iterator_t *existing )
2498 {
2499 return kIOReturnUnsupported;
2500 }
2501
2502 /* Routine io_service_get_matching_services_ool */
2503 kern_return_t
2504 is_io_service_get_matching_services_ool(
2505 mach_port_t master_port,
2506 io_buf_ptr_t matching,
2507 mach_msg_type_number_t matchingCnt,
2508 kern_return_t *result,
2509 io_object_t *existing )
2510 {
2511 kern_return_t kr;
2512 vm_offset_t data;
2513 vm_map_offset_t map_data;
2514
2515 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2516 data = CAST_DOWN(vm_offset_t, map_data);
2517
2518 if (KERN_SUCCESS == kr) {
2519 // must return success after vm_map_copyout() succeeds
2520 // and mig will copy out objects on success
2521 *existing = NULL;
2522 *result = internal_io_service_get_matching_services(master_port,
2523 (const char *) data, matchingCnt, existing);
2524 vm_deallocate( kernel_map, data, matchingCnt );
2525 }
2526
2527 return kr;
2528 }
2529
2530 /* Routine io_service_get_matching_services_bin */
2531 kern_return_t
2532 is_io_service_get_matching_services_bin(
2533 mach_port_t master_port,
2534 io_struct_inband_t matching,
2535 mach_msg_type_number_t matchingCnt,
2536 io_object_t *existing)
2537 {
2538 return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2539 }
2540
2541
2542 static kern_return_t
2543 internal_io_service_get_matching_service(
2544 mach_port_t master_port,
2545 const char * matching,
2546 mach_msg_type_number_t matching_size,
2547 io_service_t *service )
2548 {
2549 kern_return_t kr;
2550 OSObject * obj;
2551 OSDictionary * dict;
2552
2553 if (master_port != master_device_port) {
2554 return kIOReturnNotPrivileged;
2555 }
2556
2557 assert(matching_size);
2558 obj = OSUnserializeXML(matching, matching_size);
2559
2560 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2561 *service = IOService::copyMatchingService( dict );
2562 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2563 } else {
2564 kr = kIOReturnBadArgument;
2565 }
2566
2567 if (obj) {
2568 obj->release();
2569 }
2570
2571 return kr;
2572 }
2573
2574 /* Routine io_service_get_matching_service */
2575 kern_return_t
2576 is_io_service_get_matching_service(
2577 mach_port_t master_port,
2578 io_string_t matching,
2579 io_service_t *service )
2580 {
2581 return kIOReturnUnsupported;
2582 }
2583
2584 /* Routine io_service_get_matching_services_ool */
2585 kern_return_t
2586 is_io_service_get_matching_service_ool(
2587 mach_port_t master_port,
2588 io_buf_ptr_t matching,
2589 mach_msg_type_number_t matchingCnt,
2590 kern_return_t *result,
2591 io_object_t *service )
2592 {
2593 kern_return_t kr;
2594 vm_offset_t data;
2595 vm_map_offset_t map_data;
2596
2597 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2598 data = CAST_DOWN(vm_offset_t, map_data);
2599
2600 if (KERN_SUCCESS == kr) {
2601 // must return success after vm_map_copyout() succeeds
2602 // and mig will copy out objects on success
2603 *service = NULL;
2604 *result = internal_io_service_get_matching_service(master_port,
2605 (const char *) data, matchingCnt, service );
2606 vm_deallocate( kernel_map, data, matchingCnt );
2607 }
2608
2609 return kr;
2610 }
2611
2612 /* Routine io_service_get_matching_service_bin */
2613 kern_return_t
2614 is_io_service_get_matching_service_bin(
2615 mach_port_t master_port,
2616 io_struct_inband_t matching,
2617 mach_msg_type_number_t matchingCnt,
2618 io_object_t *service)
2619 {
2620 return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2621 }
2622
2623 static kern_return_t
2624 internal_io_service_add_notification(
2625 mach_port_t master_port,
2626 io_name_t notification_type,
2627 const char * matching,
2628 size_t matching_size,
2629 mach_port_t port,
2630 void * reference,
2631 vm_size_t referenceSize,
2632 bool client64,
2633 io_object_t * notification )
2634 {
2635 IOServiceUserNotification * userNotify = NULL;
2636 IONotifier * notify = NULL;
2637 const OSSymbol * sym;
2638 OSDictionary * dict;
2639 IOReturn err;
2640 unsigned long int userMsgType;
2641
2642 if (master_port != master_device_port) {
2643 return kIOReturnNotPrivileged;
2644 }
2645
2646 do {
2647 err = kIOReturnNoResources;
2648
2649 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2650 return kIOReturnMessageTooLarge;
2651 }
2652
2653 if (!(sym = OSSymbol::withCString( notification_type ))) {
2654 err = kIOReturnNoResources;
2655 }
2656
2657 assert(matching_size);
2658 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2659 if (!dict) {
2660 err = kIOReturnBadArgument;
2661 continue;
2662 }
2663
2664 if ((sym == gIOPublishNotification)
2665 || (sym == gIOFirstPublishNotification)) {
2666 userMsgType = kIOServicePublishNotificationType;
2667 } else if ((sym == gIOMatchedNotification)
2668 || (sym == gIOFirstMatchNotification)) {
2669 userMsgType = kIOServiceMatchedNotificationType;
2670 } else if ((sym == gIOTerminatedNotification)
2671 || (sym == gIOWillTerminateNotification)) {
2672 userMsgType = kIOServiceTerminatedNotificationType;
2673 } else {
2674 userMsgType = kLastIOKitNotificationType;
2675 }
2676
2677 userNotify = new IOServiceUserNotification;
2678
2679 if (userNotify && !userNotify->init( port, userMsgType,
2680 reference, referenceSize, client64)) {
2681 userNotify->release();
2682 userNotify = NULL;
2683 }
2684 if (!userNotify) {
2685 continue;
2686 }
2687
2688 notify = IOService::addMatchingNotification( sym, dict,
2689 &userNotify->_handler, userNotify );
2690 if (notify) {
2691 *notification = userNotify;
2692 userNotify->setNotification( notify );
2693 err = kIOReturnSuccess;
2694 } else {
2695 err = kIOReturnUnsupported;
2696 }
2697 } while (false);
2698
2699 if ((kIOReturnSuccess != err) && userNotify) {
2700 userNotify->invalidatePort();
2701 userNotify->release();
2702 userNotify = NULL;
2703 }
2704
2705 if (sym) {
2706 sym->release();
2707 }
2708 if (dict) {
2709 dict->release();
2710 }
2711
2712 return err;
2713 }
2714
2715
2716 /* Routine io_service_add_notification */
2717 kern_return_t
2718 is_io_service_add_notification(
2719 mach_port_t master_port,
2720 io_name_t notification_type,
2721 io_string_t matching,
2722 mach_port_t port,
2723 io_async_ref_t reference,
2724 mach_msg_type_number_t referenceCnt,
2725 io_object_t * notification )
2726 {
2727 return kIOReturnUnsupported;
2728 }
2729
2730 /* Routine io_service_add_notification_64 */
2731 kern_return_t
2732 is_io_service_add_notification_64(
2733 mach_port_t master_port,
2734 io_name_t notification_type,
2735 io_string_t matching,
2736 mach_port_t wake_port,
2737 io_async_ref64_t reference,
2738 mach_msg_type_number_t referenceCnt,
2739 io_object_t *notification )
2740 {
2741 return kIOReturnUnsupported;
2742 }
2743
2744 /* Routine io_service_add_notification_bin */
2745 kern_return_t
2746 is_io_service_add_notification_bin
2747 (
2748 mach_port_t master_port,
2749 io_name_t notification_type,
2750 io_struct_inband_t matching,
2751 mach_msg_type_number_t matchingCnt,
2752 mach_port_t wake_port,
2753 io_async_ref_t reference,
2754 mach_msg_type_number_t referenceCnt,
2755 io_object_t *notification)
2756 {
2757 io_async_ref_t zreference;
2758
2759 if (referenceCnt > ASYNC_REF_COUNT) {
2760 return kIOReturnBadArgument;
2761 }
2762 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2763 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2764
2765 return internal_io_service_add_notification(master_port, notification_type,
2766 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2767 false, notification);
2768 }
2769
2770 /* Routine io_service_add_notification_bin_64 */
2771 kern_return_t
2772 is_io_service_add_notification_bin_64
2773 (
2774 mach_port_t master_port,
2775 io_name_t notification_type,
2776 io_struct_inband_t matching,
2777 mach_msg_type_number_t matchingCnt,
2778 mach_port_t wake_port,
2779 io_async_ref64_t reference,
2780 mach_msg_type_number_t referenceCnt,
2781 io_object_t *notification)
2782 {
2783 io_async_ref64_t zreference;
2784
2785 if (referenceCnt > ASYNC_REF64_COUNT) {
2786 return kIOReturnBadArgument;
2787 }
2788 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2789 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2790
2791 return internal_io_service_add_notification(master_port, notification_type,
2792 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2793 true, notification);
2794 }
2795
2796 static kern_return_t
2797 internal_io_service_add_notification_ool(
2798 mach_port_t master_port,
2799 io_name_t notification_type,
2800 io_buf_ptr_t matching,
2801 mach_msg_type_number_t matchingCnt,
2802 mach_port_t wake_port,
2803 void * reference,
2804 vm_size_t referenceSize,
2805 bool client64,
2806 kern_return_t *result,
2807 io_object_t *notification )
2808 {
2809 kern_return_t kr;
2810 vm_offset_t data;
2811 vm_map_offset_t map_data;
2812
2813 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2814 data = CAST_DOWN(vm_offset_t, map_data);
2815
2816 if (KERN_SUCCESS == kr) {
2817 // must return success after vm_map_copyout() succeeds
2818 // and mig will copy out objects on success
2819 *notification = NULL;
2820 *result = internal_io_service_add_notification( master_port, notification_type,
2821 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2822 vm_deallocate( kernel_map, data, matchingCnt );
2823 }
2824
2825 return kr;
2826 }
2827
2828 /* Routine io_service_add_notification_ool */
2829 kern_return_t
2830 is_io_service_add_notification_ool(
2831 mach_port_t master_port,
2832 io_name_t notification_type,
2833 io_buf_ptr_t matching,
2834 mach_msg_type_number_t matchingCnt,
2835 mach_port_t wake_port,
2836 io_async_ref_t reference,
2837 mach_msg_type_number_t referenceCnt,
2838 kern_return_t *result,
2839 io_object_t *notification )
2840 {
2841 io_async_ref_t zreference;
2842
2843 if (referenceCnt > ASYNC_REF_COUNT) {
2844 return kIOReturnBadArgument;
2845 }
2846 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2847 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2848
2849 return internal_io_service_add_notification_ool(master_port, notification_type,
2850 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2851 false, result, notification);
2852 }
2853
2854 /* Routine io_service_add_notification_ool_64 */
2855 kern_return_t
2856 is_io_service_add_notification_ool_64(
2857 mach_port_t master_port,
2858 io_name_t notification_type,
2859 io_buf_ptr_t matching,
2860 mach_msg_type_number_t matchingCnt,
2861 mach_port_t wake_port,
2862 io_async_ref64_t reference,
2863 mach_msg_type_number_t referenceCnt,
2864 kern_return_t *result,
2865 io_object_t *notification )
2866 {
2867 io_async_ref64_t zreference;
2868
2869 if (referenceCnt > ASYNC_REF64_COUNT) {
2870 return kIOReturnBadArgument;
2871 }
2872 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2873 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2874
2875 return internal_io_service_add_notification_ool(master_port, notification_type,
2876 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2877 true, result, notification);
2878 }
2879
2880 /* Routine io_service_add_notification_old */
2881 kern_return_t
2882 is_io_service_add_notification_old(
2883 mach_port_t master_port,
2884 io_name_t notification_type,
2885 io_string_t matching,
2886 mach_port_t port,
2887 // for binary compatibility reasons, this must be natural_t for ILP32
2888 natural_t ref,
2889 io_object_t * notification )
2890 {
2891 return is_io_service_add_notification( master_port, notification_type,
2892 matching, port, &ref, 1, notification );
2893 }
2894
2895
2896 static kern_return_t
2897 internal_io_service_add_interest_notification(
2898 io_object_t _service,
2899 io_name_t type_of_interest,
2900 mach_port_t port,
2901 void * reference,
2902 vm_size_t referenceSize,
2903 bool client64,
2904 io_object_t * notification )
2905 {
2906 IOServiceMessageUserNotification * userNotify = NULL;
2907 IONotifier * notify = NULL;
2908 const OSSymbol * sym;
2909 IOReturn err;
2910
2911 CHECK( IOService, _service, service );
2912
2913 err = kIOReturnNoResources;
2914 if ((sym = OSSymbol::withCString( type_of_interest ))) {
2915 do {
2916 userNotify = new IOServiceMessageUserNotification;
2917
2918 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2919 reference, referenceSize,
2920 kIOUserNotifyMaxMessageSize,
2921 client64 )) {
2922 userNotify->release();
2923 userNotify = NULL;
2924 }
2925 if (!userNotify) {
2926 continue;
2927 }
2928
2929 notify = service->registerInterest( sym,
2930 &userNotify->_handler, userNotify );
2931 if (notify) {
2932 *notification = userNotify;
2933 userNotify->setNotification( notify );
2934 err = kIOReturnSuccess;
2935 } else {
2936 err = kIOReturnUnsupported;
2937 }
2938
2939 sym->release();
2940 } while (false);
2941 }
2942
2943 if ((kIOReturnSuccess != err) && userNotify) {
2944 userNotify->invalidatePort();
2945 userNotify->release();
2946 userNotify = NULL;
2947 }
2948
2949 return err;
2950 }
2951
2952 /* Routine io_service_add_message_notification */
2953 kern_return_t
2954 is_io_service_add_interest_notification(
2955 io_object_t service,
2956 io_name_t type_of_interest,
2957 mach_port_t port,
2958 io_async_ref_t reference,
2959 mach_msg_type_number_t referenceCnt,
2960 io_object_t * notification )
2961 {
2962 io_async_ref_t zreference;
2963
2964 if (referenceCnt > ASYNC_REF_COUNT) {
2965 return kIOReturnBadArgument;
2966 }
2967 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2968 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2969
2970 return internal_io_service_add_interest_notification(service, type_of_interest,
2971 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
2972 }
2973
2974 /* Routine io_service_add_interest_notification_64 */
2975 kern_return_t
2976 is_io_service_add_interest_notification_64(
2977 io_object_t service,
2978 io_name_t type_of_interest,
2979 mach_port_t wake_port,
2980 io_async_ref64_t reference,
2981 mach_msg_type_number_t referenceCnt,
2982 io_object_t *notification )
2983 {
2984 io_async_ref64_t zreference;
2985
2986 if (referenceCnt > ASYNC_REF64_COUNT) {
2987 return kIOReturnBadArgument;
2988 }
2989 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2990 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2991
2992 return internal_io_service_add_interest_notification(service, type_of_interest,
2993 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
2994 }
2995
2996
2997 /* Routine io_service_acknowledge_notification */
2998 kern_return_t
2999 is_io_service_acknowledge_notification(
3000 io_object_t _service,
3001 natural_t notify_ref,
3002 natural_t response )
3003 {
3004 CHECK( IOService, _service, service );
3005
3006 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3007 (IOOptionBits) response );
3008 }
3009
3010 /* Routine io_connect_get_semaphore */
3011 kern_return_t
3012 is_io_connect_get_notification_semaphore(
3013 io_connect_t connection,
3014 natural_t notification_type,
3015 semaphore_t *semaphore )
3016 {
3017 CHECK( IOUserClient, connection, client );
3018
3019 IOStatisticsClientCall();
3020 return client->getNotificationSemaphore((UInt32) notification_type,
3021 semaphore );
3022 }
3023
3024 /* Routine io_registry_get_root_entry */
3025 kern_return_t
3026 is_io_registry_get_root_entry(
3027 mach_port_t master_port,
3028 io_object_t *root )
3029 {
3030 IORegistryEntry * entry;
3031
3032 if (master_port != master_device_port) {
3033 return kIOReturnNotPrivileged;
3034 }
3035
3036 entry = IORegistryEntry::getRegistryRoot();
3037 if (entry) {
3038 entry->retain();
3039 }
3040 *root = entry;
3041
3042 return kIOReturnSuccess;
3043 }
3044
3045 /* Routine io_registry_create_iterator */
3046 kern_return_t
3047 is_io_registry_create_iterator(
3048 mach_port_t master_port,
3049 io_name_t plane,
3050 uint32_t options,
3051 io_object_t *iterator )
3052 {
3053 if (master_port != master_device_port) {
3054 return kIOReturnNotPrivileged;
3055 }
3056
3057 *iterator = IOUserIterator::withIterator(
3058 IORegistryIterator::iterateOver(
3059 IORegistryEntry::getPlane( plane ), options ));
3060
3061 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3062 }
3063
3064 /* Routine io_registry_entry_create_iterator */
3065 kern_return_t
3066 is_io_registry_entry_create_iterator(
3067 io_object_t registry_entry,
3068 io_name_t plane,
3069 uint32_t options,
3070 io_object_t *iterator )
3071 {
3072 CHECK( IORegistryEntry, registry_entry, entry );
3073
3074 *iterator = IOUserIterator::withIterator(
3075 IORegistryIterator::iterateOver( entry,
3076 IORegistryEntry::getPlane( plane ), options ));
3077
3078 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3079 }
3080
3081 /* Routine io_registry_iterator_enter */
3082 kern_return_t
3083 is_io_registry_iterator_enter_entry(
3084 io_object_t iterator )
3085 {
3086 CHECKLOCKED( IORegistryIterator, iterator, iter );
3087
3088 IOLockLock(oIter->lock);
3089 iter->enterEntry();
3090 IOLockUnlock(oIter->lock);
3091
3092 return kIOReturnSuccess;
3093 }
3094
3095 /* Routine io_registry_iterator_exit */
3096 kern_return_t
3097 is_io_registry_iterator_exit_entry(
3098 io_object_t iterator )
3099 {
3100 bool didIt;
3101
3102 CHECKLOCKED( IORegistryIterator, iterator, iter );
3103
3104 IOLockLock(oIter->lock);
3105 didIt = iter->exitEntry();
3106 IOLockUnlock(oIter->lock);
3107
3108 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3109 }
3110
3111 /* Routine io_registry_entry_from_path */
3112 kern_return_t
3113 is_io_registry_entry_from_path(
3114 mach_port_t master_port,
3115 io_string_t path,
3116 io_object_t *registry_entry )
3117 {
3118 IORegistryEntry * entry;
3119
3120 if (master_port != master_device_port) {
3121 return kIOReturnNotPrivileged;
3122 }
3123
3124 entry = IORegistryEntry::fromPath( path );
3125
3126 *registry_entry = entry;
3127
3128 return kIOReturnSuccess;
3129 }
3130
3131
3132 /* Routine io_registry_entry_from_path */
3133 kern_return_t
3134 is_io_registry_entry_from_path_ool(
3135 mach_port_t master_port,
3136 io_string_inband_t path,
3137 io_buf_ptr_t path_ool,
3138 mach_msg_type_number_t path_oolCnt,
3139 kern_return_t *result,
3140 io_object_t *registry_entry)
3141 {
3142 IORegistryEntry * entry;
3143 vm_map_offset_t map_data;
3144 const char * cpath;
3145 IOReturn res;
3146 kern_return_t err;
3147
3148 if (master_port != master_device_port) {
3149 return kIOReturnNotPrivileged;
3150 }
3151
3152 map_data = 0;
3153 entry = NULL;
3154 res = err = KERN_SUCCESS;
3155 if (path[0]) {
3156 cpath = path;
3157 } else {
3158 if (!path_oolCnt) {
3159 return kIOReturnBadArgument;
3160 }
3161 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3162 return kIOReturnMessageTooLarge;
3163 }
3164
3165 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3166 if (KERN_SUCCESS == err) {
3167 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3168 cpath = CAST_DOWN(const char *, map_data);
3169 if (cpath[path_oolCnt - 1]) {
3170 res = kIOReturnBadArgument;
3171 }
3172 }
3173 }
3174
3175 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3176 entry = IORegistryEntry::fromPath(cpath);
3177 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3178 }
3179
3180 if (map_data) {
3181 vm_deallocate(kernel_map, map_data, path_oolCnt);
3182 }
3183
3184 if (KERN_SUCCESS != err) {
3185 res = err;
3186 }
3187 *registry_entry = entry;
3188 *result = res;
3189
3190 return err;
3191 }
3192
3193
3194 /* Routine io_registry_entry_in_plane */
3195 kern_return_t
3196 is_io_registry_entry_in_plane(
3197 io_object_t registry_entry,
3198 io_name_t plane,
3199 boolean_t *inPlane )
3200 {
3201 CHECK( IORegistryEntry, registry_entry, entry );
3202
3203 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3204
3205 return kIOReturnSuccess;
3206 }
3207
3208
3209 /* Routine io_registry_entry_get_path */
3210 kern_return_t
3211 is_io_registry_entry_get_path(
3212 io_object_t registry_entry,
3213 io_name_t plane,
3214 io_string_t path )
3215 {
3216 int length;
3217 CHECK( IORegistryEntry, registry_entry, entry );
3218
3219 length = sizeof(io_string_t);
3220 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3221 return kIOReturnSuccess;
3222 } else {
3223 return kIOReturnBadArgument;
3224 }
3225 }
3226
3227 /* Routine io_registry_entry_get_path */
3228 kern_return_t
3229 is_io_registry_entry_get_path_ool(
3230 io_object_t registry_entry,
3231 io_name_t plane,
3232 io_string_inband_t path,
3233 io_buf_ptr_t *path_ool,
3234 mach_msg_type_number_t *path_oolCnt)
3235 {
3236 enum { kMaxPath = 16384 };
3237 IOReturn err;
3238 int length;
3239 char * buf;
3240
3241 CHECK( IORegistryEntry, registry_entry, entry );
3242
3243 *path_ool = NULL;
3244 *path_oolCnt = 0;
3245 length = sizeof(io_string_inband_t);
3246 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3247 err = kIOReturnSuccess;
3248 } else {
3249 length = kMaxPath;
3250 buf = IONew(char, length);
3251 if (!buf) {
3252 err = kIOReturnNoMemory;
3253 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3254 err = kIOReturnError;
3255 } else {
3256 *path_oolCnt = length;
3257 err = copyoutkdata(buf, length, path_ool);
3258 }
3259 if (buf) {
3260 IODelete(buf, char, kMaxPath);
3261 }
3262 }
3263
3264 return err;
3265 }
3266
3267
3268 /* Routine io_registry_entry_get_name */
3269 kern_return_t
3270 is_io_registry_entry_get_name(
3271 io_object_t registry_entry,
3272 io_name_t name )
3273 {
3274 CHECK( IORegistryEntry, registry_entry, entry );
3275
3276 strncpy( name, entry->getName(), sizeof(io_name_t));
3277
3278 return kIOReturnSuccess;
3279 }
3280
3281 /* Routine io_registry_entry_get_name_in_plane */
3282 kern_return_t
3283 is_io_registry_entry_get_name_in_plane(
3284 io_object_t registry_entry,
3285 io_name_t planeName,
3286 io_name_t name )
3287 {
3288 const IORegistryPlane * plane;
3289 CHECK( IORegistryEntry, registry_entry, entry );
3290
3291 if (planeName[0]) {
3292 plane = IORegistryEntry::getPlane( planeName );
3293 } else {
3294 plane = NULL;
3295 }
3296
3297 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3298
3299 return kIOReturnSuccess;
3300 }
3301
3302 /* Routine io_registry_entry_get_location_in_plane */
3303 kern_return_t
3304 is_io_registry_entry_get_location_in_plane(
3305 io_object_t registry_entry,
3306 io_name_t planeName,
3307 io_name_t location )
3308 {
3309 const IORegistryPlane * plane;
3310 CHECK( IORegistryEntry, registry_entry, entry );
3311
3312 if (planeName[0]) {
3313 plane = IORegistryEntry::getPlane( planeName );
3314 } else {
3315 plane = NULL;
3316 }
3317
3318 const char * cstr = entry->getLocation( plane );
3319
3320 if (cstr) {
3321 strncpy( location, cstr, sizeof(io_name_t));
3322 return kIOReturnSuccess;
3323 } else {
3324 return kIOReturnNotFound;
3325 }
3326 }
3327
3328 /* Routine io_registry_entry_get_registry_entry_id */
3329 kern_return_t
3330 is_io_registry_entry_get_registry_entry_id(
3331 io_object_t registry_entry,
3332 uint64_t *entry_id )
3333 {
3334 CHECK( IORegistryEntry, registry_entry, entry );
3335
3336 *entry_id = entry->getRegistryEntryID();
3337
3338 return kIOReturnSuccess;
3339 }
3340
3341 /* Routine io_registry_entry_get_property */
3342 kern_return_t
3343 is_io_registry_entry_get_property_bytes(
3344 io_object_t registry_entry,
3345 io_name_t property_name,
3346 io_struct_inband_t buf,
3347 mach_msg_type_number_t *dataCnt )
3348 {
3349 OSObject * obj;
3350 OSData * data;
3351 OSString * str;
3352 OSBoolean * boo;
3353 OSNumber * off;
3354 UInt64 offsetBytes;
3355 unsigned int len = 0;
3356 const void * bytes = NULL;
3357 IOReturn ret = kIOReturnSuccess;
3358
3359 CHECK( IORegistryEntry, registry_entry, entry );
3360
3361 #if CONFIG_MACF
3362 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3363 return kIOReturnNotPermitted;
3364 }
3365 #endif
3366
3367 obj = entry->copyProperty(property_name);
3368 if (!obj) {
3369 return kIOReturnNoResources;
3370 }
3371
3372 // One day OSData will be a common container base class
3373 // until then...
3374 if ((data = OSDynamicCast( OSData, obj ))) {
3375 len = data->getLength();
3376 bytes = data->getBytesNoCopy();
3377 if (!data->isSerializable()) {
3378 len = 0;
3379 }
3380 } else if ((str = OSDynamicCast( OSString, obj ))) {
3381 len = str->getLength() + 1;
3382 bytes = str->getCStringNoCopy();
3383 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3384 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3385 bytes = boo->isTrue() ? "Yes" : "No";
3386 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3387 offsetBytes = off->unsigned64BitValue();
3388 len = off->numberOfBytes();
3389 if (len > sizeof(offsetBytes)) {
3390 len = sizeof(offsetBytes);
3391 }
3392 bytes = &offsetBytes;
3393 #ifdef __BIG_ENDIAN__
3394 bytes = (const void *)
3395 (((UInt32) bytes) + (sizeof(UInt64) - len));
3396 #endif
3397 } else {
3398 ret = kIOReturnBadArgument;
3399 }
3400
3401 if (bytes) {
3402 if (*dataCnt < len) {
3403 ret = kIOReturnIPCError;
3404 } else {
3405 *dataCnt = len;
3406 bcopy( bytes, buf, len );
3407 }
3408 }
3409 obj->release();
3410
3411 return ret;
3412 }
3413
3414
3415 /* Routine io_registry_entry_get_property */
3416 kern_return_t
3417 is_io_registry_entry_get_property(
3418 io_object_t registry_entry,
3419 io_name_t property_name,
3420 io_buf_ptr_t *properties,
3421 mach_msg_type_number_t *propertiesCnt )
3422 {
3423 kern_return_t err;
3424 vm_size_t len;
3425 OSObject * obj;
3426
3427 CHECK( IORegistryEntry, registry_entry, entry );
3428
3429 #if CONFIG_MACF
3430 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3431 return kIOReturnNotPermitted;
3432 }
3433 #endif
3434
3435 obj = entry->copyProperty(property_name);
3436 if (!obj) {
3437 return kIOReturnNotFound;
3438 }
3439
3440 OSSerialize * s = OSSerialize::withCapacity(4096);
3441 if (!s) {
3442 obj->release();
3443 return kIOReturnNoMemory;
3444 }
3445
3446 if (obj->serialize( s )) {
3447 len = s->getLength();
3448 *propertiesCnt = len;
3449 err = copyoutkdata( s->text(), len, properties );
3450 } else {
3451 err = kIOReturnUnsupported;
3452 }
3453
3454 s->release();
3455 obj->release();
3456
3457 return err;
3458 }
3459
3460 /* Routine io_registry_entry_get_property_recursively */
3461 kern_return_t
3462 is_io_registry_entry_get_property_recursively(
3463 io_object_t registry_entry,
3464 io_name_t plane,
3465 io_name_t property_name,
3466 uint32_t options,
3467 io_buf_ptr_t *properties,
3468 mach_msg_type_number_t *propertiesCnt )
3469 {
3470 kern_return_t err;
3471 vm_size_t len;
3472 OSObject * obj;
3473
3474 CHECK( IORegistryEntry, registry_entry, entry );
3475
3476 #if CONFIG_MACF
3477 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3478 return kIOReturnNotPermitted;
3479 }
3480 #endif
3481
3482 obj = entry->copyProperty( property_name,
3483 IORegistryEntry::getPlane( plane ), options );
3484 if (!obj) {
3485 return kIOReturnNotFound;
3486 }
3487
3488 OSSerialize * s = OSSerialize::withCapacity(4096);
3489 if (!s) {
3490 obj->release();
3491 return kIOReturnNoMemory;
3492 }
3493
3494 if (obj->serialize( s )) {
3495 len = s->getLength();
3496 *propertiesCnt = len;
3497 err = copyoutkdata( s->text(), len, properties );
3498 } else {
3499 err = kIOReturnUnsupported;
3500 }
3501
3502 s->release();
3503 obj->release();
3504
3505 return err;
3506 }
3507
3508 /* Routine io_registry_entry_get_properties */
3509 kern_return_t
3510 is_io_registry_entry_get_properties(
3511 io_object_t registry_entry,
3512 io_buf_ptr_t *properties,
3513 mach_msg_type_number_t *propertiesCnt )
3514 {
3515 return kIOReturnUnsupported;
3516 }
3517
3518 #if CONFIG_MACF
3519
3520 struct GetPropertiesEditorRef {
3521 kauth_cred_t cred;
3522 IORegistryEntry * entry;
3523 OSCollection * root;
3524 };
3525
3526 static const OSMetaClassBase *
3527 GetPropertiesEditor(void * reference,
3528 OSSerialize * s,
3529 OSCollection * container,
3530 const OSSymbol * name,
3531 const OSMetaClassBase * value)
3532 {
3533 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3534
3535 if (!ref->root) {
3536 ref->root = container;
3537 }
3538 if (ref->root == container) {
3539 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3540 value = NULL;
3541 }
3542 }
3543 if (value) {
3544 value->retain();
3545 }
3546 return value;
3547 }
3548
3549 #endif /* CONFIG_MACF */
3550
3551 /* Routine io_registry_entry_get_properties */
3552 kern_return_t
3553 is_io_registry_entry_get_properties_bin(
3554 io_object_t registry_entry,
3555 io_buf_ptr_t *properties,
3556 mach_msg_type_number_t *propertiesCnt)
3557 {
3558 kern_return_t err = kIOReturnSuccess;
3559 vm_size_t len;
3560 OSSerialize * s;
3561 OSSerialize::Editor editor = NULL;
3562 void * editRef = NULL;
3563
3564 CHECK(IORegistryEntry, registry_entry, entry);
3565
3566 #if CONFIG_MACF
3567 GetPropertiesEditorRef ref;
3568 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3569 editor = &GetPropertiesEditor;
3570 editRef = &ref;
3571 ref.cred = kauth_cred_get();
3572 ref.entry = entry;
3573 ref.root = NULL;
3574 }
3575 #endif
3576
3577 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3578 if (!s) {
3579 return kIOReturnNoMemory;
3580 }
3581
3582 if (!entry->serializeProperties(s)) {
3583 err = kIOReturnUnsupported;
3584 }
3585
3586 if (kIOReturnSuccess == err) {
3587 len = s->getLength();
3588 *propertiesCnt = len;
3589 err = copyoutkdata(s->text(), len, properties);
3590 }
3591 s->release();
3592
3593 return err;
3594 }
3595
3596 /* Routine io_registry_entry_get_property_bin */
3597 kern_return_t
3598 is_io_registry_entry_get_property_bin(
3599 io_object_t registry_entry,
3600 io_name_t plane,
3601 io_name_t property_name,
3602 uint32_t options,
3603 io_buf_ptr_t *properties,
3604 mach_msg_type_number_t *propertiesCnt )
3605 {
3606 kern_return_t err;
3607 vm_size_t len;
3608 OSObject * obj;
3609 const OSSymbol * sym;
3610
3611 CHECK( IORegistryEntry, registry_entry, entry );
3612
3613 #if CONFIG_MACF
3614 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3615 return kIOReturnNotPermitted;
3616 }
3617 #endif
3618
3619 sym = OSSymbol::withCString(property_name);
3620 if (!sym) {
3621 return kIOReturnNoMemory;
3622 }
3623
3624 if (gIORegistryEntryPropertyKeysKey == sym) {
3625 obj = entry->copyPropertyKeys();
3626 } else {
3627 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3628 obj = entry->copyProperty(property_name,
3629 IORegistryEntry::getPlane(plane), options );
3630 } else {
3631 obj = entry->copyProperty(property_name);
3632 }
3633 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3634 entry->removeProperty(sym);
3635 }
3636 }
3637
3638 sym->release();
3639 if (!obj) {
3640 return kIOReturnNotFound;
3641 }
3642
3643 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3644 if (!s) {
3645 obj->release();
3646 return kIOReturnNoMemory;
3647 }
3648
3649 if (obj->serialize( s )) {
3650 len = s->getLength();
3651 *propertiesCnt = len;
3652 err = copyoutkdata( s->text(), len, properties );
3653 } else {
3654 err = kIOReturnUnsupported;
3655 }
3656
3657 s->release();
3658 obj->release();
3659
3660 return err;
3661 }
3662
3663
3664 /* Routine io_registry_entry_set_properties */
3665 kern_return_t
3666 is_io_registry_entry_set_properties
3667 (
3668 io_object_t registry_entry,
3669 io_buf_ptr_t properties,
3670 mach_msg_type_number_t propertiesCnt,
3671 kern_return_t * result)
3672 {
3673 OSObject * obj;
3674 kern_return_t err;
3675 IOReturn res;
3676 vm_offset_t data;
3677 vm_map_offset_t map_data;
3678
3679 CHECK( IORegistryEntry, registry_entry, entry );
3680
3681 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3682 return kIOReturnMessageTooLarge;
3683 }
3684
3685 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3686 data = CAST_DOWN(vm_offset_t, map_data);
3687
3688 if (KERN_SUCCESS == err) {
3689 FAKE_STACK_FRAME(entry->getMetaClass());
3690
3691 // must return success after vm_map_copyout() succeeds
3692 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3693 vm_deallocate( kernel_map, data, propertiesCnt );
3694
3695 if (!obj) {
3696 res = kIOReturnBadArgument;
3697 }
3698 #if CONFIG_MACF
3699 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3700 registry_entry, obj)) {
3701 res = kIOReturnNotPermitted;
3702 }
3703 #endif
3704 else {
3705 res = entry->setProperties( obj );
3706 }
3707
3708 if (obj) {
3709 obj->release();
3710 }
3711
3712 FAKE_STACK_FRAME_END();
3713 } else {
3714 res = err;
3715 }
3716
3717 *result = res;
3718 return err;
3719 }
3720
3721 /* Routine io_registry_entry_get_child_iterator */
3722 kern_return_t
3723 is_io_registry_entry_get_child_iterator(
3724 io_object_t registry_entry,
3725 io_name_t plane,
3726 io_object_t *iterator )
3727 {
3728 CHECK( IORegistryEntry, registry_entry, entry );
3729
3730 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
3731 IORegistryEntry::getPlane( plane )));
3732
3733 return kIOReturnSuccess;
3734 }
3735
3736 /* Routine io_registry_entry_get_parent_iterator */
3737 kern_return_t
3738 is_io_registry_entry_get_parent_iterator(
3739 io_object_t registry_entry,
3740 io_name_t plane,
3741 io_object_t *iterator)
3742 {
3743 CHECK( IORegistryEntry, registry_entry, entry );
3744
3745 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
3746 IORegistryEntry::getPlane( plane )));
3747
3748 return kIOReturnSuccess;
3749 }
3750
3751 /* Routine io_service_get_busy_state */
3752 kern_return_t
3753 is_io_service_get_busy_state(
3754 io_object_t _service,
3755 uint32_t *busyState )
3756 {
3757 CHECK( IOService, _service, service );
3758
3759 *busyState = service->getBusyState();
3760
3761 return kIOReturnSuccess;
3762 }
3763
3764 /* Routine io_service_get_state */
3765 kern_return_t
3766 is_io_service_get_state(
3767 io_object_t _service,
3768 uint64_t *state,
3769 uint32_t *busy_state,
3770 uint64_t *accumulated_busy_time )
3771 {
3772 CHECK( IOService, _service, service );
3773
3774 *state = service->getState();
3775 *busy_state = service->getBusyState();
3776 *accumulated_busy_time = service->getAccumulatedBusyTime();
3777
3778 return kIOReturnSuccess;
3779 }
3780
3781 /* Routine io_service_wait_quiet */
3782 kern_return_t
3783 is_io_service_wait_quiet(
3784 io_object_t _service,
3785 mach_timespec_t wait_time )
3786 {
3787 uint64_t timeoutNS;
3788
3789 CHECK( IOService, _service, service );
3790
3791 timeoutNS = wait_time.tv_sec;
3792 timeoutNS *= kSecondScale;
3793 timeoutNS += wait_time.tv_nsec;
3794
3795 return service->waitQuiet(timeoutNS);
3796 }
3797
3798 /* Routine io_service_request_probe */
3799 kern_return_t
3800 is_io_service_request_probe(
3801 io_object_t _service,
3802 uint32_t options )
3803 {
3804 CHECK( IOService, _service, service );
3805
3806 return service->requestProbe( options );
3807 }
3808
3809 /* Routine io_service_get_authorization_id */
3810 kern_return_t
3811 is_io_service_get_authorization_id(
3812 io_object_t _service,
3813 uint64_t *authorization_id )
3814 {
3815 kern_return_t kr;
3816
3817 CHECK( IOService, _service, service );
3818
3819 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
3820 kIOClientPrivilegeAdministrator );
3821 if (kIOReturnSuccess != kr) {
3822 return kr;
3823 }
3824
3825 *authorization_id = service->getAuthorizationID();
3826
3827 return kr;
3828 }
3829
3830 /* Routine io_service_set_authorization_id */
3831 kern_return_t
3832 is_io_service_set_authorization_id(
3833 io_object_t _service,
3834 uint64_t authorization_id )
3835 {
3836 CHECK( IOService, _service, service );
3837
3838 return service->setAuthorizationID( authorization_id );
3839 }
3840
3841 /* Routine io_service_open_ndr */
3842 kern_return_t
3843 is_io_service_open_extended(
3844 io_object_t _service,
3845 task_t owningTask,
3846 uint32_t connect_type,
3847 NDR_record_t ndr,
3848 io_buf_ptr_t properties,
3849 mach_msg_type_number_t propertiesCnt,
3850 kern_return_t * result,
3851 io_object_t *connection )
3852 {
3853 IOUserClient * client = NULL;
3854 kern_return_t err = KERN_SUCCESS;
3855 IOReturn res = kIOReturnSuccess;
3856 OSDictionary * propertiesDict = NULL;
3857 bool crossEndian;
3858 bool disallowAccess;
3859
3860 CHECK( IOService, _service, service );
3861
3862 if (!owningTask) {
3863 return kIOReturnBadArgument;
3864 }
3865 assert(owningTask == current_task());
3866 if (owningTask != current_task()) {
3867 return kIOReturnBadArgument;
3868 }
3869
3870 do{
3871 if (properties) {
3872 return kIOReturnUnsupported;
3873 }
3874 #if 0
3875 {
3876 OSObject * obj;
3877 vm_offset_t data;
3878 vm_map_offset_t map_data;
3879
3880 if (propertiesCnt > sizeof(io_struct_inband_t)) {
3881 return kIOReturnMessageTooLarge;
3882 }
3883
3884 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3885 res = err;
3886 data = CAST_DOWN(vm_offset_t, map_data);
3887 if (KERN_SUCCESS == err) {
3888 // must return success after vm_map_copyout() succeeds
3889 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3890 vm_deallocate( kernel_map, data, propertiesCnt );
3891 propertiesDict = OSDynamicCast(OSDictionary, obj);
3892 if (!propertiesDict) {
3893 res = kIOReturnBadArgument;
3894 if (obj) {
3895 obj->release();
3896 }
3897 }
3898 }
3899 if (kIOReturnSuccess != res) {
3900 break;
3901 }
3902 }
3903 #endif
3904 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3905 if (crossEndian) {
3906 if (!propertiesDict) {
3907 propertiesDict = OSDictionary::withCapacity(4);
3908 }
3909 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3910 if (data) {
3911 if (propertiesDict) {
3912 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3913 }
3914 data->release();
3915 }
3916 }
3917
3918 res = service->newUserClient( owningTask, (void *) owningTask,
3919 connect_type, propertiesDict, &client );
3920
3921 if (propertiesDict) {
3922 propertiesDict->release();
3923 }
3924
3925 if (res == kIOReturnSuccess) {
3926 assert( OSDynamicCast(IOUserClient, client));
3927
3928 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
3929 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
3930 client->closed = false;
3931 client->lock = IOLockAlloc();
3932
3933 disallowAccess = (crossEndian
3934 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3935 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3936 if (disallowAccess) {
3937 res = kIOReturnUnsupported;
3938 }
3939 #if CONFIG_MACF
3940 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
3941 res = kIOReturnNotPermitted;
3942 }
3943 #endif
3944
3945 if (kIOReturnSuccess == res) {
3946 res = client->registerOwner(owningTask);
3947 }
3948
3949 if (kIOReturnSuccess != res) {
3950 IOStatisticsClientCall();
3951 client->clientClose();
3952 client->release();
3953 client = NULL;
3954 break;
3955 }
3956 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3957 if (creatorName) {
3958 client->setProperty(kIOUserClientCreatorKey, creatorName);
3959 creatorName->release();
3960 }
3961 client->setTerminateDefer(service, false);
3962 }
3963 }while (false);
3964
3965 *connection = client;
3966 *result = res;
3967
3968 return err;
3969 }
3970
3971 /* Routine io_service_close */
3972 kern_return_t
3973 is_io_service_close(
3974 io_object_t connection )
3975 {
3976 OSSet * mappings;
3977 if ((mappings = OSDynamicCast(OSSet, connection))) {
3978 return kIOReturnSuccess;
3979 }
3980
3981 CHECK( IOUserClient, connection, client );
3982
3983 IOStatisticsClientCall();
3984
3985 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
3986 IOLockLock(client->lock);
3987 client->clientClose();
3988 IOLockUnlock(client->lock);
3989 } else {
3990 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3991 client->getRegistryEntryID(), client->getName());
3992 }
3993
3994 return kIOReturnSuccess;
3995 }
3996
3997 /* Routine io_connect_get_service */
3998 kern_return_t
3999 is_io_connect_get_service(
4000 io_object_t connection,
4001 io_object_t *service )
4002 {
4003 IOService * theService;
4004
4005 CHECK( IOUserClient, connection, client );
4006
4007 theService = client->getService();
4008 if (theService) {
4009 theService->retain();
4010 }
4011
4012 *service = theService;
4013
4014 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4015 }
4016
4017 /* Routine io_connect_set_notification_port */
4018 kern_return_t
4019 is_io_connect_set_notification_port(
4020 io_object_t connection,
4021 uint32_t notification_type,
4022 mach_port_t port,
4023 uint32_t reference)
4024 {
4025 kern_return_t ret;
4026 CHECK( IOUserClient, connection, client );
4027
4028 IOStatisticsClientCall();
4029 IOLockLock(client->lock);
4030 ret = client->registerNotificationPort( port, notification_type,
4031 (io_user_reference_t) reference );
4032 IOLockUnlock(client->lock);
4033 return ret;
4034 }
4035
4036 /* Routine io_connect_set_notification_port */
4037 kern_return_t
4038 is_io_connect_set_notification_port_64(
4039 io_object_t connection,
4040 uint32_t notification_type,
4041 mach_port_t port,
4042 io_user_reference_t reference)
4043 {
4044 kern_return_t ret;
4045 CHECK( IOUserClient, connection, client );
4046
4047 IOStatisticsClientCall();
4048 IOLockLock(client->lock);
4049 ret = client->registerNotificationPort( port, notification_type,
4050 reference );
4051 IOLockUnlock(client->lock);
4052 return ret;
4053 }
4054
4055 /* Routine io_connect_map_memory_into_task */
4056 kern_return_t
4057 is_io_connect_map_memory_into_task
4058 (
4059 io_connect_t connection,
4060 uint32_t memory_type,
4061 task_t into_task,
4062 mach_vm_address_t *address,
4063 mach_vm_size_t *size,
4064 uint32_t flags
4065 )
4066 {
4067 IOReturn err;
4068 IOMemoryMap * map;
4069
4070 CHECK( IOUserClient, connection, client );
4071
4072 if (!into_task) {
4073 return kIOReturnBadArgument;
4074 }
4075
4076 IOStatisticsClientCall();
4077 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4078
4079 if (map) {
4080 *address = map->getAddress();
4081 if (size) {
4082 *size = map->getSize();
4083 }
4084
4085 if (client->sharedInstance
4086 || (into_task != current_task())) {
4087 // push a name out to the task owning the map,
4088 // so we can clean up maps
4089 mach_port_name_t name __unused =
4090 IOMachPort::makeSendRightForTask(
4091 into_task, map, IKOT_IOKIT_OBJECT );
4092 map->release();
4093 } else {
4094 // keep it with the user client
4095 IOLockLock( gIOObjectPortLock);
4096 if (NULL == client->mappings) {
4097 client->mappings = OSSet::withCapacity(2);
4098 }
4099 if (client->mappings) {
4100 client->mappings->setObject( map);
4101 }
4102 IOLockUnlock( gIOObjectPortLock);
4103 map->release();
4104 }
4105 err = kIOReturnSuccess;
4106 } else {
4107 err = kIOReturnBadArgument;
4108 }
4109
4110 return err;
4111 }
4112
4113 /* Routine is_io_connect_map_memory */
4114 kern_return_t
4115 is_io_connect_map_memory(
4116 io_object_t connect,
4117 uint32_t type,
4118 task_t task,
4119 uint32_t * mapAddr,
4120 uint32_t * mapSize,
4121 uint32_t flags )
4122 {
4123 IOReturn err;
4124 mach_vm_address_t address;
4125 mach_vm_size_t size;
4126
4127 address = SCALAR64(*mapAddr);
4128 size = SCALAR64(*mapSize);
4129
4130 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4131
4132 *mapAddr = SCALAR32(address);
4133 *mapSize = SCALAR32(size);
4134
4135 return err;
4136 }
4137 } /* extern "C" */
4138
4139 IOMemoryMap *
4140 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4141 {
4142 OSIterator * iter;
4143 IOMemoryMap * map = NULL;
4144
4145 IOLockLock(gIOObjectPortLock);
4146
4147 iter = OSCollectionIterator::withCollection(mappings);
4148 if (iter) {
4149 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4150 if (mem == map->getMemoryDescriptor()) {
4151 map->retain();
4152 mappings->removeObject(map);
4153 break;
4154 }
4155 }
4156 iter->release();
4157 }
4158
4159 IOLockUnlock(gIOObjectPortLock);
4160
4161 return map;
4162 }
4163
4164 extern "C" {
4165 /* Routine io_connect_unmap_memory_from_task */
4166 kern_return_t
4167 is_io_connect_unmap_memory_from_task
4168 (
4169 io_connect_t connection,
4170 uint32_t memory_type,
4171 task_t from_task,
4172 mach_vm_address_t address)
4173 {
4174 IOReturn err;
4175 IOOptionBits options = 0;
4176 IOMemoryDescriptor * memory = NULL;
4177 IOMemoryMap * map;
4178
4179 CHECK( IOUserClient, connection, client );
4180
4181 if (!from_task) {
4182 return kIOReturnBadArgument;
4183 }
4184
4185 IOStatisticsClientCall();
4186 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4187
4188 if (memory && (kIOReturnSuccess == err)) {
4189 options = (options & ~kIOMapUserOptionsMask)
4190 | kIOMapAnywhere | kIOMapReference;
4191
4192 map = memory->createMappingInTask( from_task, address, options );
4193 memory->release();
4194 if (map) {
4195 IOLockLock( gIOObjectPortLock);
4196 if (client->mappings) {
4197 client->mappings->removeObject( map);
4198 }
4199 IOLockUnlock( gIOObjectPortLock);
4200
4201 mach_port_name_t name = 0;
4202 if (from_task != current_task()) {
4203 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4204 map->release();
4205 }
4206
4207 if (name) {
4208 map->userClientUnmap();
4209 err = iokit_mod_send_right( from_task, name, -2 );
4210 err = kIOReturnSuccess;
4211 } else {
4212 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4213 }
4214 if (from_task == current_task()) {
4215 map->release();
4216 }
4217 } else {
4218 err = kIOReturnBadArgument;
4219 }
4220 }
4221
4222 return err;
4223 }
4224
4225 kern_return_t
4226 is_io_connect_unmap_memory(
4227 io_object_t connect,
4228 uint32_t type,
4229 task_t task,
4230 uint32_t mapAddr )
4231 {
4232 IOReturn err;
4233 mach_vm_address_t address;
4234
4235 address = SCALAR64(mapAddr);
4236
4237 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4238
4239 return err;
4240 }
4241
4242
4243 /* Routine io_connect_add_client */
4244 kern_return_t
4245 is_io_connect_add_client(
4246 io_object_t connection,
4247 io_object_t connect_to)
4248 {
4249 CHECK( IOUserClient, connection, client );
4250 CHECK( IOUserClient, connect_to, to );
4251
4252 IOStatisticsClientCall();
4253 return client->connectClient( to );
4254 }
4255
4256
4257 /* Routine io_connect_set_properties */
4258 kern_return_t
4259 is_io_connect_set_properties(
4260 io_object_t connection,
4261 io_buf_ptr_t properties,
4262 mach_msg_type_number_t propertiesCnt,
4263 kern_return_t * result)
4264 {
4265 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4266 }
4267
4268 /* Routine io_user_client_method */
4269 kern_return_t
4270 is_io_connect_method_var_output
4271 (
4272 io_connect_t connection,
4273 uint32_t selector,
4274 io_scalar_inband64_t scalar_input,
4275 mach_msg_type_number_t scalar_inputCnt,
4276 io_struct_inband_t inband_input,
4277 mach_msg_type_number_t inband_inputCnt,
4278 mach_vm_address_t ool_input,
4279 mach_vm_size_t ool_input_size,
4280 io_struct_inband_t inband_output,
4281 mach_msg_type_number_t *inband_outputCnt,
4282 io_scalar_inband64_t scalar_output,
4283 mach_msg_type_number_t *scalar_outputCnt,
4284 io_buf_ptr_t *var_output,
4285 mach_msg_type_number_t *var_outputCnt
4286 )
4287 {
4288 CHECK( IOUserClient, connection, client );
4289
4290 IOExternalMethodArguments args;
4291 IOReturn ret;
4292 IOMemoryDescriptor * inputMD = NULL;
4293 OSObject * structureVariableOutputData = NULL;
4294
4295 bzero(&args.__reserved[0], sizeof(args.__reserved));
4296 args.__reservedA = 0;
4297 args.version = kIOExternalMethodArgumentsCurrentVersion;
4298
4299 args.selector = selector;
4300
4301 args.asyncWakePort = MACH_PORT_NULL;
4302 args.asyncReference = NULL;
4303 args.asyncReferenceCount = 0;
4304 args.structureVariableOutputData = &structureVariableOutputData;
4305
4306 args.scalarInput = scalar_input;
4307 args.scalarInputCount = scalar_inputCnt;
4308 args.structureInput = inband_input;
4309 args.structureInputSize = inband_inputCnt;
4310
4311 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4312 return kIOReturnIPCError;
4313 }
4314
4315 if (ool_input) {
4316 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4317 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4318 current_task());
4319 }
4320
4321 args.structureInputDescriptor = inputMD;
4322
4323 args.scalarOutput = scalar_output;
4324 args.scalarOutputCount = *scalar_outputCnt;
4325 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4326 args.structureOutput = inband_output;
4327 args.structureOutputSize = *inband_outputCnt;
4328 args.structureOutputDescriptor = NULL;
4329 args.structureOutputDescriptorSize = 0;
4330
4331 IOStatisticsClientCall();
4332 ret = client->externalMethod( selector, &args );
4333
4334 *scalar_outputCnt = args.scalarOutputCount;
4335 *inband_outputCnt = args.structureOutputSize;
4336
4337 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4338 OSSerialize * serialize;
4339 OSData * data;
4340 vm_size_t len;
4341
4342 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4343 len = serialize->getLength();
4344 *var_outputCnt = len;
4345 ret = copyoutkdata(serialize->text(), len, var_output);
4346 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4347 len = data->getLength();
4348 *var_outputCnt = len;
4349 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4350 } else {
4351 ret = kIOReturnUnderrun;
4352 }
4353 }
4354
4355 if (inputMD) {
4356 inputMD->release();
4357 }
4358 if (structureVariableOutputData) {
4359 structureVariableOutputData->release();
4360 }
4361
4362 return ret;
4363 }
4364
4365 /* Routine io_user_client_method */
4366 kern_return_t
4367 is_io_connect_method
4368 (
4369 io_connect_t connection,
4370 uint32_t selector,
4371 io_scalar_inband64_t scalar_input,
4372 mach_msg_type_number_t scalar_inputCnt,
4373 io_struct_inband_t inband_input,
4374 mach_msg_type_number_t inband_inputCnt,
4375 mach_vm_address_t ool_input,
4376 mach_vm_size_t ool_input_size,
4377 io_struct_inband_t inband_output,
4378 mach_msg_type_number_t *inband_outputCnt,
4379 io_scalar_inband64_t scalar_output,
4380 mach_msg_type_number_t *scalar_outputCnt,
4381 mach_vm_address_t ool_output,
4382 mach_vm_size_t *ool_output_size
4383 )
4384 {
4385 CHECK( IOUserClient, connection, client );
4386
4387 IOExternalMethodArguments args;
4388 IOReturn ret;
4389 IOMemoryDescriptor * inputMD = NULL;
4390 IOMemoryDescriptor * outputMD = NULL;
4391
4392 bzero(&args.__reserved[0], sizeof(args.__reserved));
4393 args.__reservedA = 0;
4394 args.version = kIOExternalMethodArgumentsCurrentVersion;
4395
4396 args.selector = selector;
4397
4398 args.asyncWakePort = MACH_PORT_NULL;
4399 args.asyncReference = NULL;
4400 args.asyncReferenceCount = 0;
4401 args.structureVariableOutputData = NULL;
4402
4403 args.scalarInput = scalar_input;
4404 args.scalarInputCount = scalar_inputCnt;
4405 args.structureInput = inband_input;
4406 args.structureInputSize = inband_inputCnt;
4407
4408 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4409 return kIOReturnIPCError;
4410 }
4411 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4412 return kIOReturnIPCError;
4413 }
4414
4415 if (ool_input) {
4416 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4417 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4418 current_task());
4419 }
4420
4421 args.structureInputDescriptor = inputMD;
4422
4423 args.scalarOutput = scalar_output;
4424 args.scalarOutputCount = *scalar_outputCnt;
4425 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4426 args.structureOutput = inband_output;
4427 args.structureOutputSize = *inband_outputCnt;
4428
4429 if (ool_output && ool_output_size) {
4430 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4431 kIODirectionIn, current_task());
4432 }
4433
4434 args.structureOutputDescriptor = outputMD;
4435 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
4436
4437 IOStatisticsClientCall();
4438 ret = client->externalMethod( selector, &args );
4439
4440 *scalar_outputCnt = args.scalarOutputCount;
4441 *inband_outputCnt = args.structureOutputSize;
4442 *ool_output_size = args.structureOutputDescriptorSize;
4443
4444 if (inputMD) {
4445 inputMD->release();
4446 }
4447 if (outputMD) {
4448 outputMD->release();
4449 }
4450
4451 return ret;
4452 }
4453
4454 /* Routine io_async_user_client_method */
4455 kern_return_t
4456 is_io_connect_async_method
4457 (
4458 io_connect_t connection,
4459 mach_port_t wake_port,
4460 io_async_ref64_t reference,
4461 mach_msg_type_number_t referenceCnt,
4462 uint32_t selector,
4463 io_scalar_inband64_t scalar_input,
4464 mach_msg_type_number_t scalar_inputCnt,
4465 io_struct_inband_t inband_input,
4466 mach_msg_type_number_t inband_inputCnt,
4467 mach_vm_address_t ool_input,
4468 mach_vm_size_t ool_input_size,
4469 io_struct_inband_t inband_output,
4470 mach_msg_type_number_t *inband_outputCnt,
4471 io_scalar_inband64_t scalar_output,
4472 mach_msg_type_number_t *scalar_outputCnt,
4473 mach_vm_address_t ool_output,
4474 mach_vm_size_t * ool_output_size
4475 )
4476 {
4477 CHECK( IOUserClient, connection, client );
4478
4479 IOExternalMethodArguments args;
4480 IOReturn ret;
4481 IOMemoryDescriptor * inputMD = NULL;
4482 IOMemoryDescriptor * outputMD = NULL;
4483
4484 bzero(&args.__reserved[0], sizeof(args.__reserved));
4485 args.__reservedA = 0;
4486 args.version = kIOExternalMethodArgumentsCurrentVersion;
4487
4488 reference[0] = (io_user_reference_t) wake_port;
4489 if (vm_map_is_64bit(get_task_map(current_task()))) {
4490 reference[0] |= kIOUCAsync64Flag;
4491 }
4492
4493 args.selector = selector;
4494
4495 args.asyncWakePort = wake_port;
4496 args.asyncReference = reference;
4497 args.asyncReferenceCount = referenceCnt;
4498
4499 args.structureVariableOutputData = NULL;
4500
4501 args.scalarInput = scalar_input;
4502 args.scalarInputCount = scalar_inputCnt;
4503 args.structureInput = inband_input;
4504 args.structureInputSize = inband_inputCnt;
4505
4506 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4507 return kIOReturnIPCError;
4508 }
4509 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4510 return kIOReturnIPCError;
4511 }
4512
4513 if (ool_input) {
4514 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4515 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4516 current_task());
4517 }
4518
4519 args.structureInputDescriptor = inputMD;
4520
4521 args.scalarOutput = scalar_output;
4522 args.scalarOutputCount = *scalar_outputCnt;
4523 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4524 args.structureOutput = inband_output;
4525 args.structureOutputSize = *inband_outputCnt;
4526
4527 if (ool_output) {
4528 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4529 kIODirectionIn, current_task());
4530 }
4531
4532 args.structureOutputDescriptor = outputMD;
4533 args.structureOutputDescriptorSize = *ool_output_size;
4534
4535 IOStatisticsClientCall();
4536 ret = client->externalMethod( selector, &args );
4537
4538 *scalar_outputCnt = args.scalarOutputCount;
4539 *inband_outputCnt = args.structureOutputSize;
4540 *ool_output_size = args.structureOutputDescriptorSize;
4541
4542 if (inputMD) {
4543 inputMD->release();
4544 }
4545 if (outputMD) {
4546 outputMD->release();
4547 }
4548
4549 return ret;
4550 }
4551
4552 /* Routine io_connect_method_scalarI_scalarO */
4553 kern_return_t
4554 is_io_connect_method_scalarI_scalarO(
4555 io_object_t connect,
4556 uint32_t index,
4557 io_scalar_inband_t input,
4558 mach_msg_type_number_t inputCount,
4559 io_scalar_inband_t output,
4560 mach_msg_type_number_t * outputCount )
4561 {
4562 IOReturn err;
4563 uint32_t i;
4564 io_scalar_inband64_t _input;
4565 io_scalar_inband64_t _output;
4566
4567 mach_msg_type_number_t struct_outputCnt = 0;
4568 mach_vm_size_t ool_output_size = 0;
4569
4570 bzero(&_output[0], sizeof(_output));
4571 for (i = 0; i < inputCount; i++) {
4572 _input[i] = SCALAR64(input[i]);
4573 }
4574
4575 err = is_io_connect_method(connect, index,
4576 _input, inputCount,
4577 NULL, 0,
4578 0, 0,
4579 NULL, &struct_outputCnt,
4580 _output, outputCount,
4581 0, &ool_output_size);
4582
4583 for (i = 0; i < *outputCount; i++) {
4584 output[i] = SCALAR32(_output[i]);
4585 }
4586
4587 return err;
4588 }
4589
4590 kern_return_t
4591 shim_io_connect_method_scalarI_scalarO(
4592 IOExternalMethod * method,
4593 IOService * object,
4594 const io_user_scalar_t * input,
4595 mach_msg_type_number_t inputCount,
4596 io_user_scalar_t * output,
4597 mach_msg_type_number_t * outputCount )
4598 {
4599 IOMethod func;
4600 io_scalar_inband_t _output;
4601 IOReturn err;
4602 err = kIOReturnBadArgument;
4603
4604 bzero(&_output[0], sizeof(_output));
4605 do {
4606 if (inputCount != method->count0) {
4607 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4608 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4609 continue;
4610 }
4611 if (*outputCount != method->count1) {
4612 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4613 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4614 continue;
4615 }
4616
4617 func = method->func;
4618
4619 switch (inputCount) {
4620 case 6:
4621 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4622 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4623 break;
4624 case 5:
4625 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4626 ARG32(input[3]), ARG32(input[4]),
4627 &_output[0] );
4628 break;
4629 case 4:
4630 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4631 ARG32(input[3]),
4632 &_output[0], &_output[1] );
4633 break;
4634 case 3:
4635 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4636 &_output[0], &_output[1], &_output[2] );
4637 break;
4638 case 2:
4639 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4640 &_output[0], &_output[1], &_output[2],
4641 &_output[3] );
4642 break;
4643 case 1:
4644 err = (object->*func)( ARG32(input[0]),
4645 &_output[0], &_output[1], &_output[2],
4646 &_output[3], &_output[4] );
4647 break;
4648 case 0:
4649 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4650 &_output[3], &_output[4], &_output[5] );
4651 break;
4652
4653 default:
4654 IOLog("%s: Bad method table\n", object->getName());
4655 }
4656 }while (false);
4657
4658 uint32_t i;
4659 for (i = 0; i < *outputCount; i++) {
4660 output[i] = SCALAR32(_output[i]);
4661 }
4662
4663 return err;
4664 }
4665
4666 /* Routine io_async_method_scalarI_scalarO */
4667 kern_return_t
4668 is_io_async_method_scalarI_scalarO(
4669 io_object_t connect,
4670 mach_port_t wake_port,
4671 io_async_ref_t reference,
4672 mach_msg_type_number_t referenceCnt,
4673 uint32_t index,
4674 io_scalar_inband_t input,
4675 mach_msg_type_number_t inputCount,
4676 io_scalar_inband_t output,
4677 mach_msg_type_number_t * outputCount )
4678 {
4679 IOReturn err;
4680 uint32_t i;
4681 io_scalar_inband64_t _input;
4682 io_scalar_inband64_t _output;
4683 io_async_ref64_t _reference;
4684
4685 if (referenceCnt > ASYNC_REF64_COUNT) {
4686 return kIOReturnBadArgument;
4687 }
4688 bzero(&_output[0], sizeof(_output));
4689 for (i = 0; i < referenceCnt; i++) {
4690 _reference[i] = REF64(reference[i]);
4691 }
4692 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4693
4694 mach_msg_type_number_t struct_outputCnt = 0;
4695 mach_vm_size_t ool_output_size = 0;
4696
4697 for (i = 0; i < inputCount; i++) {
4698 _input[i] = SCALAR64(input[i]);
4699 }
4700
4701 err = is_io_connect_async_method(connect,
4702 wake_port, _reference, referenceCnt,
4703 index,
4704 _input, inputCount,
4705 NULL, 0,
4706 0, 0,
4707 NULL, &struct_outputCnt,
4708 _output, outputCount,
4709 0, &ool_output_size);
4710
4711 for (i = 0; i < *outputCount; i++) {
4712 output[i] = SCALAR32(_output[i]);
4713 }
4714
4715 return err;
4716 }
4717 /* Routine io_async_method_scalarI_structureO */
4718 kern_return_t
4719 is_io_async_method_scalarI_structureO(
4720 io_object_t connect,
4721 mach_port_t wake_port,
4722 io_async_ref_t reference,
4723 mach_msg_type_number_t referenceCnt,
4724 uint32_t index,
4725 io_scalar_inband_t input,
4726 mach_msg_type_number_t inputCount,
4727 io_struct_inband_t output,
4728 mach_msg_type_number_t * outputCount )
4729 {
4730 uint32_t i;
4731 io_scalar_inband64_t _input;
4732 io_async_ref64_t _reference;
4733
4734 if (referenceCnt > ASYNC_REF64_COUNT) {
4735 return kIOReturnBadArgument;
4736 }
4737 for (i = 0; i < referenceCnt; i++) {
4738 _reference[i] = REF64(reference[i]);
4739 }
4740 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4741
4742 mach_msg_type_number_t scalar_outputCnt = 0;
4743 mach_vm_size_t ool_output_size = 0;
4744
4745 for (i = 0; i < inputCount; i++) {
4746 _input[i] = SCALAR64(input[i]);
4747 }
4748
4749 return is_io_connect_async_method(connect,
4750 wake_port, _reference, referenceCnt,
4751 index,
4752 _input, inputCount,
4753 NULL, 0,
4754 0, 0,
4755 output, outputCount,
4756 NULL, &scalar_outputCnt,
4757 0, &ool_output_size);
4758 }
4759
4760 /* Routine io_async_method_scalarI_structureI */
4761 kern_return_t
4762 is_io_async_method_scalarI_structureI(
4763 io_connect_t connect,
4764 mach_port_t wake_port,
4765 io_async_ref_t reference,
4766 mach_msg_type_number_t referenceCnt,
4767 uint32_t index,
4768 io_scalar_inband_t input,
4769 mach_msg_type_number_t inputCount,
4770 io_struct_inband_t inputStruct,
4771 mach_msg_type_number_t inputStructCount )
4772 {
4773 uint32_t i;
4774 io_scalar_inband64_t _input;
4775 io_async_ref64_t _reference;
4776
4777 if (referenceCnt > ASYNC_REF64_COUNT) {
4778 return kIOReturnBadArgument;
4779 }
4780 for (i = 0; i < referenceCnt; i++) {
4781 _reference[i] = REF64(reference[i]);
4782 }
4783 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4784
4785 mach_msg_type_number_t scalar_outputCnt = 0;
4786 mach_msg_type_number_t inband_outputCnt = 0;
4787 mach_vm_size_t ool_output_size = 0;
4788
4789 for (i = 0; i < inputCount; i++) {
4790 _input[i] = SCALAR64(input[i]);
4791 }
4792
4793 return is_io_connect_async_method(connect,
4794 wake_port, _reference, referenceCnt,
4795 index,
4796 _input, inputCount,
4797 inputStruct, inputStructCount,
4798 0, 0,
4799 NULL, &inband_outputCnt,
4800 NULL, &scalar_outputCnt,
4801 0, &ool_output_size);
4802 }
4803
4804 /* Routine io_async_method_structureI_structureO */
4805 kern_return_t
4806 is_io_async_method_structureI_structureO(
4807 io_object_t connect,
4808 mach_port_t wake_port,
4809 io_async_ref_t reference,
4810 mach_msg_type_number_t referenceCnt,
4811 uint32_t index,
4812 io_struct_inband_t input,
4813 mach_msg_type_number_t inputCount,
4814 io_struct_inband_t output,
4815 mach_msg_type_number_t * outputCount )
4816 {
4817 uint32_t i;
4818 mach_msg_type_number_t scalar_outputCnt = 0;
4819 mach_vm_size_t ool_output_size = 0;
4820 io_async_ref64_t _reference;
4821
4822 if (referenceCnt > ASYNC_REF64_COUNT) {
4823 return kIOReturnBadArgument;
4824 }
4825 for (i = 0; i < referenceCnt; i++) {
4826 _reference[i] = REF64(reference[i]);
4827 }
4828 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4829
4830 return is_io_connect_async_method(connect,
4831 wake_port, _reference, referenceCnt,
4832 index,
4833 NULL, 0,
4834 input, inputCount,
4835 0, 0,
4836 output, outputCount,
4837 NULL, &scalar_outputCnt,
4838 0, &ool_output_size);
4839 }
4840
4841
4842 kern_return_t
4843 shim_io_async_method_scalarI_scalarO(
4844 IOExternalAsyncMethod * method,
4845 IOService * object,
4846 mach_port_t asyncWakePort,
4847 io_user_reference_t * asyncReference,
4848 uint32_t asyncReferenceCount,
4849 const io_user_scalar_t * input,
4850 mach_msg_type_number_t inputCount,
4851 io_user_scalar_t * output,
4852 mach_msg_type_number_t * outputCount )
4853 {
4854 IOAsyncMethod func;
4855 uint32_t i;
4856 io_scalar_inband_t _output;
4857 IOReturn err;
4858 io_async_ref_t reference;
4859
4860 bzero(&_output[0], sizeof(_output));
4861 for (i = 0; i < asyncReferenceCount; i++) {
4862 reference[i] = REF32(asyncReference[i]);
4863 }
4864
4865 err = kIOReturnBadArgument;
4866
4867 do {
4868 if (inputCount != method->count0) {
4869 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4870 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4871 continue;
4872 }
4873 if (*outputCount != method->count1) {
4874 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4875 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4876 continue;
4877 }
4878
4879 func = method->func;
4880
4881 switch (inputCount) {
4882 case 6:
4883 err = (object->*func)( reference,
4884 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4885 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4886 break;
4887 case 5:
4888 err = (object->*func)( reference,
4889 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4890 ARG32(input[3]), ARG32(input[4]),
4891 &_output[0] );
4892 break;
4893 case 4:
4894 err = (object->*func)( reference,
4895 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4896 ARG32(input[3]),
4897 &_output[0], &_output[1] );
4898 break;
4899 case 3:
4900 err = (object->*func)( reference,
4901 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4902 &_output[0], &_output[1], &_output[2] );
4903 break;
4904 case 2:
4905 err = (object->*func)( reference,
4906 ARG32(input[0]), ARG32(input[1]),
4907 &_output[0], &_output[1], &_output[2],
4908 &_output[3] );
4909 break;
4910 case 1:
4911 err = (object->*func)( reference,
4912 ARG32(input[0]),
4913 &_output[0], &_output[1], &_output[2],
4914 &_output[3], &_output[4] );
4915 break;
4916 case 0:
4917 err = (object->*func)( reference,
4918 &_output[0], &_output[1], &_output[2],
4919 &_output[3], &_output[4], &_output[5] );
4920 break;
4921
4922 default:
4923 IOLog("%s: Bad method table\n", object->getName());
4924 }
4925 }while (false);
4926
4927 for (i = 0; i < *outputCount; i++) {
4928 output[i] = SCALAR32(_output[i]);
4929 }
4930
4931 return err;
4932 }
4933
4934
4935 /* Routine io_connect_method_scalarI_structureO */
4936 kern_return_t
4937 is_io_connect_method_scalarI_structureO(
4938 io_object_t connect,
4939 uint32_t index,
4940 io_scalar_inband_t input,
4941 mach_msg_type_number_t inputCount,
4942 io_struct_inband_t output,
4943 mach_msg_type_number_t * outputCount )
4944 {
4945 uint32_t i;
4946 io_scalar_inband64_t _input;
4947
4948 mach_msg_type_number_t scalar_outputCnt = 0;
4949 mach_vm_size_t ool_output_size = 0;
4950
4951 for (i = 0; i < inputCount; i++) {
4952 _input[i] = SCALAR64(input[i]);
4953 }
4954
4955 return is_io_connect_method(connect, index,
4956 _input, inputCount,
4957 NULL, 0,
4958 0, 0,
4959 output, outputCount,
4960 NULL, &scalar_outputCnt,
4961 0, &ool_output_size);
4962 }
4963
4964 kern_return_t
4965 shim_io_connect_method_scalarI_structureO(
4966
4967 IOExternalMethod * method,
4968 IOService * object,
4969 const io_user_scalar_t * input,
4970 mach_msg_type_number_t inputCount,
4971 io_struct_inband_t output,
4972 IOByteCount * outputCount )
4973 {
4974 IOMethod func;
4975 IOReturn err;
4976
4977 err = kIOReturnBadArgument;
4978
4979 do {
4980 if (inputCount != method->count0) {
4981 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4982 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4983 continue;
4984 }
4985 if ((kIOUCVariableStructureSize != method->count1)
4986 && (*outputCount != method->count1)) {
4987 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4988 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4989 continue;
4990 }
4991
4992 func = method->func;
4993
4994 switch (inputCount) {
4995 case 5:
4996 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4997 ARG32(input[3]), ARG32(input[4]),
4998 output );
4999 break;
5000 case 4:
5001 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5002 ARG32(input[3]),
5003 output, (void *)outputCount );
5004 break;
5005 case 3:
5006 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5007 output, (void *)outputCount, NULL );
5008 break;
5009 case 2:
5010 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5011 output, (void *)outputCount, NULL, NULL );
5012 break;
5013 case 1:
5014 err = (object->*func)( ARG32(input[0]),
5015 output, (void *)outputCount, NULL, NULL, NULL );
5016 break;
5017 case 0:
5018 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5019 break;
5020
5021 default:
5022 IOLog("%s: Bad method table\n", object->getName());
5023 }
5024 }while (false);
5025
5026 return err;
5027 }
5028
5029
5030 kern_return_t
5031 shim_io_async_method_scalarI_structureO(
5032 IOExternalAsyncMethod * method,
5033 IOService * object,
5034 mach_port_t asyncWakePort,
5035 io_user_reference_t * asyncReference,
5036 uint32_t asyncReferenceCount,
5037 const io_user_scalar_t * input,
5038 mach_msg_type_number_t inputCount,
5039 io_struct_inband_t output,
5040 mach_msg_type_number_t * outputCount )
5041 {
5042 IOAsyncMethod func;
5043 uint32_t i;
5044 IOReturn err;
5045 io_async_ref_t reference;
5046
5047 for (i = 0; i < asyncReferenceCount; i++) {
5048 reference[i] = REF32(asyncReference[i]);
5049 }
5050
5051 err = kIOReturnBadArgument;
5052 do {
5053 if (inputCount != method->count0) {
5054 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5055 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5056 continue;
5057 }
5058 if ((kIOUCVariableStructureSize != method->count1)
5059 && (*outputCount != method->count1)) {
5060 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5061 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5062 continue;
5063 }
5064
5065 func = method->func;
5066
5067 switch (inputCount) {
5068 case 5:
5069 err = (object->*func)( reference,
5070 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5071 ARG32(input[3]), ARG32(input[4]),
5072 output );
5073 break;
5074 case 4:
5075 err = (object->*func)( reference,
5076 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5077 ARG32(input[3]),
5078 output, (void *)outputCount );
5079 break;
5080 case 3:
5081 err = (object->*func)( reference,
5082 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5083 output, (void *)outputCount, NULL );
5084 break;
5085 case 2:
5086 err = (object->*func)( reference,
5087 ARG32(input[0]), ARG32(input[1]),
5088 output, (void *)outputCount, NULL, NULL );
5089 break;
5090 case 1:
5091 err = (object->*func)( reference,
5092 ARG32(input[0]),
5093 output, (void *)outputCount, NULL, NULL, NULL );
5094 break;
5095 case 0:
5096 err = (object->*func)( reference,
5097 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5098 break;
5099
5100 default:
5101 IOLog("%s: Bad method table\n", object->getName());
5102 }
5103 }while (false);
5104
5105 return err;
5106 }
5107
5108 /* Routine io_connect_method_scalarI_structureI */
5109 kern_return_t
5110 is_io_connect_method_scalarI_structureI(
5111 io_connect_t connect,
5112 uint32_t index,
5113 io_scalar_inband_t input,
5114 mach_msg_type_number_t inputCount,
5115 io_struct_inband_t inputStruct,
5116 mach_msg_type_number_t inputStructCount )
5117 {
5118 uint32_t i;
5119 io_scalar_inband64_t _input;
5120
5121 mach_msg_type_number_t scalar_outputCnt = 0;
5122 mach_msg_type_number_t inband_outputCnt = 0;
5123 mach_vm_size_t ool_output_size = 0;
5124
5125 for (i = 0; i < inputCount; i++) {
5126 _input[i] = SCALAR64(input[i]);
5127 }
5128
5129 return is_io_connect_method(connect, index,
5130 _input, inputCount,
5131 inputStruct, inputStructCount,
5132 0, 0,
5133 NULL, &inband_outputCnt,
5134 NULL, &scalar_outputCnt,
5135 0, &ool_output_size);
5136 }
5137
5138 kern_return_t
5139 shim_io_connect_method_scalarI_structureI(
5140 IOExternalMethod * method,
5141 IOService * object,
5142 const io_user_scalar_t * input,
5143 mach_msg_type_number_t inputCount,
5144 io_struct_inband_t inputStruct,
5145 mach_msg_type_number_t inputStructCount )
5146 {
5147 IOMethod func;
5148 IOReturn err = kIOReturnBadArgument;
5149
5150 do{
5151 if (inputCount != method->count0) {
5152 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5153 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5154 continue;
5155 }
5156 if ((kIOUCVariableStructureSize != method->count1)
5157 && (inputStructCount != method->count1)) {
5158 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5159 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5160 continue;
5161 }
5162
5163 func = method->func;
5164
5165 switch (inputCount) {
5166 case 5:
5167 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5168 ARG32(input[3]), ARG32(input[4]),
5169 inputStruct );
5170 break;
5171 case 4:
5172 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5173 ARG32(input[3]),
5174 inputStruct, (void *)(uintptr_t)inputStructCount );
5175 break;
5176 case 3:
5177 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5178 inputStruct, (void *)(uintptr_t)inputStructCount,
5179 NULL );
5180 break;
5181 case 2:
5182 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5183 inputStruct, (void *)(uintptr_t)inputStructCount,
5184 NULL, NULL );
5185 break;
5186 case 1:
5187 err = (object->*func)( ARG32(input[0]),
5188 inputStruct, (void *)(uintptr_t)inputStructCount,
5189 NULL, NULL, NULL );
5190 break;
5191 case 0:
5192 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5193 NULL, NULL, NULL, NULL );
5194 break;
5195
5196 default:
5197 IOLog("%s: Bad method table\n", object->getName());
5198 }
5199 }while (false);
5200
5201 return err;
5202 }
5203
5204 kern_return_t
5205 shim_io_async_method_scalarI_structureI(
5206 IOExternalAsyncMethod * method,
5207 IOService * object,
5208 mach_port_t asyncWakePort,
5209 io_user_reference_t * asyncReference,
5210 uint32_t asyncReferenceCount,
5211 const io_user_scalar_t * input,
5212 mach_msg_type_number_t inputCount,
5213 io_struct_inband_t inputStruct,
5214 mach_msg_type_number_t inputStructCount )
5215 {
5216 IOAsyncMethod func;
5217 uint32_t i;
5218 IOReturn err = kIOReturnBadArgument;
5219 io_async_ref_t reference;
5220
5221 for (i = 0; i < asyncReferenceCount; i++) {
5222 reference[i] = REF32(asyncReference[i]);
5223 }
5224
5225 do{
5226 if (inputCount != method->count0) {
5227 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5228 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5229 continue;
5230 }
5231 if ((kIOUCVariableStructureSize != method->count1)
5232 && (inputStructCount != method->count1)) {
5233 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5234 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5235 continue;
5236 }
5237
5238 func = method->func;
5239
5240 switch (inputCount) {
5241 case 5:
5242 err = (object->*func)( reference,
5243 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5244 ARG32(input[3]), ARG32(input[4]),
5245 inputStruct );
5246 break;
5247 case 4:
5248 err = (object->*func)( reference,
5249 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5250 ARG32(input[3]),
5251 inputStruct, (void *)(uintptr_t)inputStructCount );
5252 break;
5253 case 3:
5254 err = (object->*func)( reference,
5255 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5256 inputStruct, (void *)(uintptr_t)inputStructCount,
5257 NULL );
5258 break;
5259 case 2:
5260 err = (object->*func)( reference,
5261 ARG32(input[0]), ARG32(input[1]),
5262 inputStruct, (void *)(uintptr_t)inputStructCount,
5263 NULL, NULL );
5264 break;
5265 case 1:
5266 err = (object->*func)( reference,
5267 ARG32(input[0]),
5268 inputStruct, (void *)(uintptr_t)inputStructCount,
5269 NULL, NULL, NULL );
5270 break;
5271 case 0:
5272 err = (object->*func)( reference,
5273 inputStruct, (void *)(uintptr_t)inputStructCount,
5274 NULL, NULL, NULL, NULL );
5275 break;
5276
5277 default:
5278 IOLog("%s: Bad method table\n", object->getName());
5279 }
5280 }while (false);
5281
5282 return err;
5283 }
5284
5285 /* Routine io_connect_method_structureI_structureO */
5286 kern_return_t
5287 is_io_connect_method_structureI_structureO(
5288 io_object_t connect,
5289 uint32_t index,
5290 io_struct_inband_t input,
5291 mach_msg_type_number_t inputCount,
5292 io_struct_inband_t output,
5293 mach_msg_type_number_t * outputCount )
5294 {
5295 mach_msg_type_number_t scalar_outputCnt = 0;
5296 mach_vm_size_t ool_output_size = 0;
5297
5298 return is_io_connect_method(connect, index,
5299 NULL, 0,
5300 input, inputCount,
5301 0, 0,
5302 output, outputCount,
5303 NULL, &scalar_outputCnt,
5304 0, &ool_output_size);
5305 }
5306
5307 kern_return_t
5308 shim_io_connect_method_structureI_structureO(
5309 IOExternalMethod * method,
5310 IOService * object,
5311 io_struct_inband_t input,
5312 mach_msg_type_number_t inputCount,
5313 io_struct_inband_t output,
5314 IOByteCount * outputCount )
5315 {
5316 IOMethod func;
5317 IOReturn err = kIOReturnBadArgument;
5318
5319 do{
5320 if ((kIOUCVariableStructureSize != method->count0)
5321 && (inputCount != method->count0)) {
5322 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5323 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5324 continue;
5325 }
5326 if ((kIOUCVariableStructureSize != method->count1)
5327 && (*outputCount != method->count1)) {
5328 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5329 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5330 continue;
5331 }
5332
5333 func = method->func;
5334
5335 if (method->count1) {
5336 if (method->count0) {
5337 err = (object->*func)( input, output,
5338 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5339 } else {
5340 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5341 }
5342 } else {
5343 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5344 }
5345 }while (false);
5346
5347
5348 return err;
5349 }
5350
5351 kern_return_t
5352 shim_io_async_method_structureI_structureO(
5353 IOExternalAsyncMethod * method,
5354 IOService * object,
5355 mach_port_t asyncWakePort,
5356 io_user_reference_t * asyncReference,
5357 uint32_t asyncReferenceCount,
5358 io_struct_inband_t input,
5359 mach_msg_type_number_t inputCount,
5360 io_struct_inband_t output,
5361 mach_msg_type_number_t * outputCount )
5362 {
5363 IOAsyncMethod func;
5364 uint32_t i;
5365 IOReturn err;
5366 io_async_ref_t reference;
5367
5368 for (i = 0; i < asyncReferenceCount; i++) {
5369 reference[i] = REF32(asyncReference[i]);
5370 }
5371
5372 err = kIOReturnBadArgument;
5373 do{
5374 if ((kIOUCVariableStructureSize != method->count0)
5375 && (inputCount != method->count0)) {
5376 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5377 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5378 continue;
5379 }
5380 if ((kIOUCVariableStructureSize != method->count1)
5381 && (*outputCount != method->count1)) {
5382 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5383 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5384 continue;
5385 }
5386
5387 func = method->func;
5388
5389 if (method->count1) {
5390 if (method->count0) {
5391 err = (object->*func)( reference,
5392 input, output,
5393 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5394 } else {
5395 err = (object->*func)( reference,
5396 output, outputCount, NULL, NULL, NULL, NULL );
5397 }
5398 } else {
5399 err = (object->*func)( reference,
5400 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5401 }
5402 }while (false);
5403
5404 return err;
5405 }
5406
5407 #if !NO_KEXTD
5408 bool gIOKextdClearedBusy = false;
5409 #endif
5410
5411 /* Routine io_catalog_send_data */
5412 kern_return_t
5413 is_io_catalog_send_data(
5414 mach_port_t master_port,
5415 uint32_t flag,
5416 io_buf_ptr_t inData,
5417 mach_msg_type_number_t inDataCount,
5418 kern_return_t * result)
5419 {
5420 #if NO_KEXTD
5421 return kIOReturnNotPrivileged;
5422 #else /* NO_KEXTD */
5423 OSObject * obj = NULL;
5424 vm_offset_t data;
5425 kern_return_t kr = kIOReturnError;
5426
5427 //printf("io_catalog_send_data called. flag: %d\n", flag);
5428
5429 if (master_port != master_device_port) {
5430 return kIOReturnNotPrivileged;
5431 }
5432
5433 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5434 flag != kIOCatalogKextdActive &&
5435 flag != kIOCatalogKextdFinishedLaunching) &&
5436 (!inData || !inDataCount)) {
5437 return kIOReturnBadArgument;
5438 }
5439
5440 if (!IOTaskHasEntitlement(current_task(), kOSKextManagementEntitlement)) {
5441 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5442 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5443 OSSafeReleaseNULL(taskName);
5444 // For now, fake success to not break applications relying on this function succeeding.
5445 // See <rdar://problem/32554970> for more details.
5446 return kIOReturnSuccess;
5447 }
5448
5449 if (inData) {
5450 vm_map_offset_t map_data;
5451
5452 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5453 return kIOReturnMessageTooLarge;
5454 }
5455
5456 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5457 data = CAST_DOWN(vm_offset_t, map_data);
5458
5459 if (kr != KERN_SUCCESS) {
5460 return kr;
5461 }
5462
5463 // must return success after vm_map_copyout() succeeds
5464
5465 if (inDataCount) {
5466 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5467 vm_deallocate( kernel_map, data, inDataCount );
5468 if (!obj) {
5469 *result = kIOReturnNoMemory;
5470 return KERN_SUCCESS;
5471 }
5472 }
5473 }
5474
5475 switch (flag) {
5476 case kIOCatalogResetDrivers:
5477 case kIOCatalogResetDriversNoMatch: {
5478 OSArray * array;
5479
5480 array = OSDynamicCast(OSArray, obj);
5481 if (array) {
5482 if (!gIOCatalogue->resetAndAddDrivers(array,
5483 flag == kIOCatalogResetDrivers)) {
5484 kr = kIOReturnError;
5485 }
5486 } else {
5487 kr = kIOReturnBadArgument;
5488 }
5489 }
5490 break;
5491
5492 case kIOCatalogAddDrivers:
5493 case kIOCatalogAddDriversNoMatch: {
5494 OSArray * array;
5495
5496 array = OSDynamicCast(OSArray, obj);
5497 if (array) {
5498 if (!gIOCatalogue->addDrivers( array,
5499 flag == kIOCatalogAddDrivers)) {
5500 kr = kIOReturnError;
5501 }
5502 } else {
5503 kr = kIOReturnBadArgument;
5504 }
5505 }
5506 break;
5507
5508 case kIOCatalogRemoveDrivers:
5509 case kIOCatalogRemoveDriversNoMatch: {
5510 OSDictionary * dict;
5511
5512 dict = OSDynamicCast(OSDictionary, obj);
5513 if (dict) {
5514 if (!gIOCatalogue->removeDrivers( dict,
5515 flag == kIOCatalogRemoveDrivers )) {
5516 kr = kIOReturnError;
5517 }
5518 } else {
5519 kr = kIOReturnBadArgument;
5520 }
5521 }
5522 break;
5523
5524 case kIOCatalogStartMatching__Removed:
5525 case kIOCatalogRemoveKernelLinker__Removed:
5526 kr = KERN_NOT_SUPPORTED;
5527 break;
5528
5529 case kIOCatalogKextdActive:
5530 #if !NO_KEXTD
5531 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5532 OSKext::setKextdActive();
5533
5534 /* Dump all nonloaded startup extensions; kextd will now send them
5535 * down on request.
5536 */
5537 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5538 #endif
5539 kr = kIOReturnSuccess;
5540 break;
5541
5542 case kIOCatalogKextdFinishedLaunching: {
5543 #if !NO_KEXTD
5544 if (!gIOKextdClearedBusy) {
5545 IOService::kextdLaunched();
5546 gIOKextdClearedBusy = true;
5547 }
5548 #endif
5549 kr = kIOReturnSuccess;
5550 }
5551 break;
5552
5553 default:
5554 kr = kIOReturnBadArgument;
5555 break;
5556 }
5557
5558 if (obj) {
5559 obj->release();
5560 }
5561
5562 *result = kr;
5563 return KERN_SUCCESS;
5564 #endif /* NO_KEXTD */
5565 }
5566
5567 /* Routine io_catalog_terminate */
5568 kern_return_t
5569 is_io_catalog_terminate(
5570 mach_port_t master_port,
5571 uint32_t flag,
5572 io_name_t name )
5573 {
5574 kern_return_t kr;
5575
5576 if (master_port != master_device_port) {
5577 return kIOReturnNotPrivileged;
5578 }
5579
5580 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
5581 kIOClientPrivilegeAdministrator );
5582 if (kIOReturnSuccess != kr) {
5583 return kr;
5584 }
5585
5586 switch (flag) {
5587 #if !defined(SECURE_KERNEL)
5588 case kIOCatalogServiceTerminate:
5589 OSIterator * iter;
5590 IOService * service;
5591
5592 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5593 kIORegistryIterateRecursively);
5594 if (!iter) {
5595 return kIOReturnNoMemory;
5596 }
5597
5598 do {
5599 iter->reset();
5600 while ((service = (IOService *)iter->getNextObject())) {
5601 if (service->metaCast(name)) {
5602 if (!service->terminate( kIOServiceRequired
5603 | kIOServiceSynchronous)) {
5604 kr = kIOReturnUnsupported;
5605 break;
5606 }
5607 }
5608 }
5609 } while (!service && !iter->isValid());
5610 iter->release();
5611 break;
5612
5613 case kIOCatalogModuleUnload:
5614 case kIOCatalogModuleTerminate:
5615 kr = gIOCatalogue->terminateDriversForModule(name,
5616 flag == kIOCatalogModuleUnload);
5617 break;
5618 #endif
5619
5620 default:
5621 kr = kIOReturnBadArgument;
5622 break;
5623 }
5624
5625 return kr;
5626 }
5627
5628 /* Routine io_catalog_get_data */
5629 kern_return_t
5630 is_io_catalog_get_data(
5631 mach_port_t master_port,
5632 uint32_t flag,
5633 io_buf_ptr_t *outData,
5634 mach_msg_type_number_t *outDataCount)
5635 {
5636 kern_return_t kr = kIOReturnSuccess;
5637 OSSerialize * s;
5638
5639 if (master_port != master_device_port) {
5640 return kIOReturnNotPrivileged;
5641 }
5642
5643 //printf("io_catalog_get_data called. flag: %d\n", flag);
5644
5645 s = OSSerialize::withCapacity(4096);
5646 if (!s) {
5647 return kIOReturnNoMemory;
5648 }
5649
5650 kr = gIOCatalogue->serializeData(flag, s);
5651
5652 if (kr == kIOReturnSuccess) {
5653 vm_offset_t data;
5654 vm_map_copy_t copy;
5655 vm_size_t size;
5656
5657 size = s->getLength();
5658 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5659 if (kr == kIOReturnSuccess) {
5660 bcopy(s->text(), (void *)data, size);
5661 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5662 (vm_map_size_t)size, true, &copy);
5663 *outData = (char *)copy;
5664 *outDataCount = size;
5665 }
5666 }
5667
5668 s->release();
5669
5670 return kr;
5671 }
5672
5673 /* Routine io_catalog_get_gen_count */
5674 kern_return_t
5675 is_io_catalog_get_gen_count(
5676 mach_port_t master_port,
5677 uint32_t *genCount)
5678 {
5679 if (master_port != master_device_port) {
5680 return kIOReturnNotPrivileged;
5681 }
5682
5683 //printf("io_catalog_get_gen_count called.\n");
5684
5685 if (!genCount) {
5686 return kIOReturnBadArgument;
5687 }
5688
5689 *genCount = gIOCatalogue->getGenerationCount();
5690
5691 return kIOReturnSuccess;
5692 }
5693
5694 /* Routine io_catalog_module_loaded.
5695 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5696 */
5697 kern_return_t
5698 is_io_catalog_module_loaded(
5699 mach_port_t master_port,
5700 io_name_t name)
5701 {
5702 if (master_port != master_device_port) {
5703 return kIOReturnNotPrivileged;
5704 }
5705
5706 //printf("io_catalog_module_loaded called. name %s\n", name);
5707
5708 if (!name) {
5709 return kIOReturnBadArgument;
5710 }
5711
5712 gIOCatalogue->moduleHasLoaded(name);
5713
5714 return kIOReturnSuccess;
5715 }
5716
5717 kern_return_t
5718 is_io_catalog_reset(
5719 mach_port_t master_port,
5720 uint32_t flag)
5721 {
5722 if (master_port != master_device_port) {
5723 return kIOReturnNotPrivileged;
5724 }
5725
5726 switch (flag) {
5727 case kIOCatalogResetDefault:
5728 gIOCatalogue->reset();
5729 break;
5730
5731 default:
5732 return kIOReturnBadArgument;
5733 }
5734
5735 return kIOReturnSuccess;
5736 }
5737
5738 kern_return_t
5739 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5740 {
5741 kern_return_t result = kIOReturnBadArgument;
5742 IOUserClient * userClient;
5743 OSObject * object;
5744 uintptr_t ref;
5745
5746 ref = (uintptr_t) args->userClientRef;
5747 if ((1ULL << 32) & ref) {
5748 object = iokit_lookup_uext_ref_current_task((mach_port_name_t) ref);
5749 if (object) {
5750 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5751 }
5752 OSSafeReleaseNULL(object);
5753 } else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) {
5754 IOExternalTrap *trap;
5755 IOService *target = NULL;
5756
5757 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5758
5759 if (trap && target) {
5760 IOTrap func;
5761
5762 func = trap->func;
5763
5764 if (func) {
5765 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5766 }
5767 }
5768
5769 iokit_remove_connect_reference(userClient);
5770 }
5771
5772 return result;
5773 }
5774
5775 /* Routine io_device_tree_entry_exists_with_name */
5776 kern_return_t
5777 is_io_device_tree_entry_exists_with_name(
5778 mach_port_t master_port,
5779 io_name_t name,
5780 boolean_t *exists )
5781 {
5782 OSCollectionIterator *iter;
5783
5784 if (master_port != master_device_port) {
5785 return kIOReturnNotPrivileged;
5786 }
5787
5788 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
5789 *exists = iter && iter->getNextObject();
5790 OSSafeReleaseNULL(iter);
5791
5792 return kIOReturnSuccess;
5793 }
5794 } /* extern "C" */
5795
5796 IOReturn
5797 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5798 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5799 {
5800 IOReturn err;
5801 IOService * object;
5802 IOByteCount structureOutputSize;
5803
5804 if (dispatch) {
5805 uint32_t count;
5806 count = dispatch->checkScalarInputCount;
5807 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
5808 return kIOReturnBadArgument;
5809 }
5810
5811 count = dispatch->checkStructureInputSize;
5812 if ((kIOUCVariableStructureSize != count)
5813 && (count != ((args->structureInputDescriptor)
5814 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
5815 return kIOReturnBadArgument;
5816 }
5817
5818 count = dispatch->checkScalarOutputCount;
5819 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
5820 return kIOReturnBadArgument;
5821 }
5822
5823 count = dispatch->checkStructureOutputSize;
5824 if ((kIOUCVariableStructureSize != count)
5825 && (count != ((args->structureOutputDescriptor)
5826 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
5827 return kIOReturnBadArgument;
5828 }
5829
5830 if (dispatch->function) {
5831 err = (*dispatch->function)(target, reference, args);
5832 } else {
5833 err = kIOReturnNoCompletion; /* implementator can dispatch */
5834 }
5835 return err;
5836 }
5837
5838
5839 // pre-Leopard API's don't do ool structs
5840 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
5841 err = kIOReturnIPCError;
5842 return err;
5843 }
5844
5845 structureOutputSize = args->structureOutputSize;
5846
5847 if (args->asyncWakePort) {
5848 IOExternalAsyncMethod * method;
5849 object = NULL;
5850 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
5851 return kIOReturnUnsupported;
5852 }
5853
5854 if (kIOUCForegroundOnly & method->flags) {
5855 if (task_is_gpu_denied(current_task())) {
5856 return kIOReturnNotPermitted;
5857 }
5858 }
5859
5860 switch (method->flags & kIOUCTypeMask) {
5861 case kIOUCScalarIStructI:
5862 err = shim_io_async_method_scalarI_structureI( method, object,
5863 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5864 args->scalarInput, args->scalarInputCount,
5865 (char *)args->structureInput, args->structureInputSize );
5866 break;
5867
5868 case kIOUCScalarIScalarO:
5869 err = shim_io_async_method_scalarI_scalarO( method, object,
5870 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5871 args->scalarInput, args->scalarInputCount,
5872 args->scalarOutput, &args->scalarOutputCount );
5873 break;
5874
5875 case kIOUCScalarIStructO:
5876 err = shim_io_async_method_scalarI_structureO( method, object,
5877 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5878 args->scalarInput, args->scalarInputCount,
5879 (char *) args->structureOutput, &args->structureOutputSize );
5880 break;
5881
5882
5883 case kIOUCStructIStructO:
5884 err = shim_io_async_method_structureI_structureO( method, object,
5885 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5886 (char *)args->structureInput, args->structureInputSize,
5887 (char *) args->structureOutput, &args->structureOutputSize );
5888 break;
5889
5890 default:
5891 err = kIOReturnBadArgument;
5892 break;
5893 }
5894 } else {
5895 IOExternalMethod * method;
5896 object = NULL;
5897 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
5898 return kIOReturnUnsupported;
5899 }
5900
5901 if (kIOUCForegroundOnly & method->flags) {
5902 if (task_is_gpu_denied(current_task())) {
5903 return kIOReturnNotPermitted;
5904 }
5905 }
5906
5907 switch (method->flags & kIOUCTypeMask) {
5908 case kIOUCScalarIStructI:
5909 err = shim_io_connect_method_scalarI_structureI( method, object,
5910 args->scalarInput, args->scalarInputCount,
5911 (char *) args->structureInput, args->structureInputSize );
5912 break;
5913
5914 case kIOUCScalarIScalarO:
5915 err = shim_io_connect_method_scalarI_scalarO( method, object,
5916 args->scalarInput, args->scalarInputCount,
5917 args->scalarOutput, &args->scalarOutputCount );
5918 break;
5919
5920 case kIOUCScalarIStructO:
5921 err = shim_io_connect_method_scalarI_structureO( method, object,
5922 args->scalarInput, args->scalarInputCount,
5923 (char *) args->structureOutput, &structureOutputSize );
5924 break;
5925
5926
5927 case kIOUCStructIStructO:
5928 err = shim_io_connect_method_structureI_structureO( method, object,
5929 (char *) args->structureInput, args->structureInputSize,
5930 (char *) args->structureOutput, &structureOutputSize );
5931 break;
5932
5933 default:
5934 err = kIOReturnBadArgument;
5935 break;
5936 }
5937 }
5938
5939 args->structureOutputSize = structureOutputSize;
5940
5941 return err;
5942 }
5943
5944 #if __LP64__
5945 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5946 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5947 #else
5948 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5949 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5950 #endif
5951 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5952 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5953 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5954 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5955 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5956 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5957 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5958 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5959 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5960 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5961 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5962 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5963 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5964 OSMetaClassDefineReservedUnused(IOUserClient, 15);