]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
9b3cef8cec56465beeb3491eddb91c646153c9b6
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51
52 #include <mach/sdt.h>
53 #include <os/hash.h>
54
55 #if CONFIG_MACF
56
57 extern "C" {
58 #include <security/mac_framework.h>
59 };
60 #include <sys/kauth.h>
61
62 #define IOMACF_LOG 0
63
64 #endif /* CONFIG_MACF */
65
66 #include <IOKit/assert.h>
67
68 #include "IOServicePrivate.h"
69 #include "IOKitKernelInternal.h"
70
71 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
72 #define SCALAR32(x) ((uint32_t )x)
73 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
74 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
75 #define REF32(x) ((int)(x))
76
77 enum{
78 kIOUCAsync0Flags = 3ULL,
79 kIOUCAsync64Flag = 1ULL,
80 kIOUCAsyncErrorLoggedFlag = 2ULL
81 };
82
83 #if IOKITSTATS
84
85 #define IOStatisticsRegisterCounter() \
86 do { \
87 reserved->counter = IOStatistics::registerUserClient(this); \
88 } while (0)
89
90 #define IOStatisticsUnregisterCounter() \
91 do { \
92 if (reserved) \
93 IOStatistics::unregisterUserClient(reserved->counter); \
94 } while (0)
95
96 #define IOStatisticsClientCall() \
97 do { \
98 IOStatistics::countUserClientCall(client); \
99 } while (0)
100
101 #else
102
103 #define IOStatisticsRegisterCounter()
104 #define IOStatisticsUnregisterCounter()
105 #define IOStatisticsClientCall()
106
107 #endif /* IOKITSTATS */
108
109 #if DEVELOPMENT || DEBUG
110
111 #define FAKE_STACK_FRAME(a) \
112 const void ** __frameptr; \
113 const void * __retaddr; \
114 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
115 __retaddr = __frameptr[1]; \
116 __frameptr[1] = (a);
117
118 #define FAKE_STACK_FRAME_END() \
119 __frameptr[1] = __retaddr;
120
121 #else /* DEVELOPMENT || DEBUG */
122
123 #define FAKE_STACK_FRAME(a)
124 #define FAKE_STACK_FRAME_END()
125
126 #endif /* DEVELOPMENT || DEBUG */
127
128 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
129 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
130
131 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
132
133 extern "C" {
134 #include <mach/mach_traps.h>
135 #include <vm/vm_map.h>
136 } /* extern "C" */
137
138 struct IOMachPortHashList;
139
140 static_assert(IKOT_MAX_TYPE <= 255);
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
145 class IOMachPort : public OSObject
146 {
147 OSDeclareDefaultStructors(IOMachPort);
148 public:
149 SLIST_ENTRY(IOMachPort) link;
150 ipc_port_t port;
151 OSObject* object;
152 UInt32 mscount;
153 UInt8 holdDestroy;
154 UInt8 type;
155
156 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
157
158 static IOMachPortHashList* bucketForObject(OSObject *obj,
159 ipc_kobject_type_t type);
160
161 static IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
162
163 static bool noMoreSendersForObject( OSObject * obj,
164 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
165 static void releasePortForObject( OSObject * obj,
166 ipc_kobject_type_t type );
167 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
168
169 static mach_port_name_t makeSendRightForTask( task_t task,
170 io_object_t obj, ipc_kobject_type_t type );
171
172 virtual void free() APPLE_KEXT_OVERRIDE;
173 };
174
175 #define super OSObject
176 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
177
178 static IOLock * gIOObjectPortLock;
179 IOLock * gIOUserServerLock;
180
181 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
182
183 SLIST_HEAD(IOMachPortHashList, IOMachPort);
184
185 #if CONFIG_EMBEDDED
186 #define PORT_HASH_SIZE 256
187 #else
188 #define PORT_HASH_SIZE 4096
189 #endif /* CONFIG_EMBEDDED */
190
191 IOMachPortHashList ports[PORT_HASH_SIZE];
192
193 void
194 IOMachPortInitialize(void)
195 {
196 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
197 SLIST_INIT(&ports[i]);
198 }
199 }
200
201 IOMachPortHashList*
202 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
203 {
204 return &ports[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
205 }
206
207 IOMachPort*
208 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
209 {
210 IOMachPort *machPort;
211
212 SLIST_FOREACH(machPort, bucket, link) {
213 if (machPort->object == obj && machPort->type == type) {
214 return machPort;
215 }
216 }
217 return NULL;
218 }
219
220 IOMachPort*
221 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
222 {
223 IOMachPort *machPort = NULL;
224
225 machPort = new IOMachPort;
226 if (__improbable(machPort && !machPort->init())) {
227 return NULL;
228 }
229
230 machPort->object = obj;
231 machPort->type = (typeof(machPort->type))type;
232 machPort->port = iokit_alloc_object_port(obj, type);
233
234 obj->taggedRetain(OSTypeID(OSCollection));
235 machPort->mscount++;
236
237 return machPort;
238 }
239
240 bool
241 IOMachPort::noMoreSendersForObject( OSObject * obj,
242 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
243 {
244 IOMachPort *machPort = NULL;
245 IOUserClient *uc;
246 OSAction *action;
247 bool destroyed = true;
248
249 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
250
251 obj->retain();
252
253 lck_mtx_lock(gIOObjectPortLock);
254
255 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
256
257 if (machPort) {
258 destroyed = (machPort->mscount <= *mscount);
259 if (!destroyed) {
260 *mscount = machPort->mscount;
261 lck_mtx_unlock(gIOObjectPortLock);
262 } else {
263 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
264 uc->noMoreSenders();
265 }
266 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
267
268 lck_mtx_unlock(gIOObjectPortLock);
269
270 machPort->release();
271 obj->taggedRelease(OSTypeID(OSCollection));
272 }
273 } else {
274 lck_mtx_unlock(gIOObjectPortLock);
275 }
276
277 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
278 action->Aborted();
279 }
280
281 obj->release();
282
283 return destroyed;
284 }
285
286 void
287 IOMachPort::releasePortForObject( OSObject * obj,
288 ipc_kobject_type_t type )
289 {
290 IOMachPort *machPort;
291 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
292
293 assert(IKOT_IOKIT_CONNECT != type);
294
295 lck_mtx_lock(gIOObjectPortLock);
296
297 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
298
299 if (machPort && !machPort->holdDestroy) {
300 obj->retain();
301 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
302
303 lck_mtx_unlock(gIOObjectPortLock);
304
305 machPort->release();
306 obj->taggedRelease(OSTypeID(OSCollection));
307 obj->release();
308 } else {
309 lck_mtx_unlock(gIOObjectPortLock);
310 }
311 }
312
313 void
314 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
315 {
316 IOMachPort * machPort;
317
318 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
319 lck_mtx_lock(gIOObjectPortLock);
320
321 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
322
323 if (machPort) {
324 machPort->holdDestroy = true;
325 }
326
327 lck_mtx_unlock(gIOObjectPortLock);
328 }
329
330 void
331 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
332 {
333 IOMachPort::releasePortForObject(obj, type);
334 }
335
336 void
337 IOUserClient::destroyUserReferences( OSObject * obj )
338 {
339 IOMachPort *machPort;
340
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 obj->retain();
347 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
348 IOMachPortHashList *mappingBucket = NULL;
349
350 lck_mtx_lock(gIOObjectPortLock);
351
352 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
353 if (uc && uc->mappings) {
354 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
355 }
356
357 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
358
359 if (machPort == NULL) {
360 lck_mtx_unlock(gIOObjectPortLock);
361 goto end;
362 }
363
364 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
365 obj->taggedRelease(OSTypeID(OSCollection));
366
367 if (uc) {
368 uc->noMoreSenders();
369 if (uc->mappings) {
370 uc->mappings->taggedRetain(OSTypeID(OSCollection));
371 machPort->object = uc->mappings;
372 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
373 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
374
375 lck_mtx_unlock(gIOObjectPortLock);
376
377 uc->mappings->release();
378 uc->mappings = NULL;
379 } else {
380 lck_mtx_unlock(gIOObjectPortLock);
381 machPort->release();
382 }
383 } else {
384 lck_mtx_unlock(gIOObjectPortLock);
385 machPort->release();
386 }
387
388
389 end:
390
391 obj->release();
392 }
393
394 mach_port_name_t
395 IOMachPort::makeSendRightForTask( task_t task,
396 io_object_t obj, ipc_kobject_type_t type )
397 {
398 return iokit_make_send_right( task, obj, type );
399 }
400
401 void
402 IOMachPort::free( void )
403 {
404 if (port) {
405 iokit_destroy_object_port( port );
406 }
407 super::free();
408 }
409
410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411
412 class IOUserIterator : public OSIterator
413 {
414 OSDeclareDefaultStructors(IOUserIterator);
415 public:
416 OSObject * userIteratorObject;
417 IOLock * lock;
418
419 static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
420 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
421 virtual void free() APPLE_KEXT_OVERRIDE;
422
423 virtual void reset() APPLE_KEXT_OVERRIDE;
424 virtual bool isValid() APPLE_KEXT_OVERRIDE;
425 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
426 virtual OSObject * copyNextObject();
427 };
428
429 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
430
431 class IOUserNotification : public IOUserIterator
432 {
433 OSDeclareDefaultStructors(IOUserNotification);
434
435 #define holdNotify userIteratorObject
436
437 public:
438
439 virtual void free() APPLE_KEXT_OVERRIDE;
440
441 virtual void setNotification( IONotifier * obj );
442
443 virtual void reset() APPLE_KEXT_OVERRIDE;
444 virtual bool isValid() APPLE_KEXT_OVERRIDE;
445 };
446
447 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
448
449 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
450
451 IOUserIterator *
452 IOUserIterator::withIterator(OSIterator * iter)
453 {
454 IOUserIterator * me;
455
456 if (!iter) {
457 return NULL;
458 }
459
460 me = new IOUserIterator;
461 if (me && !me->init()) {
462 me->release();
463 me = NULL;
464 }
465 if (!me) {
466 return me;
467 }
468 me->userIteratorObject = iter;
469
470 return me;
471 }
472
473 bool
474 IOUserIterator::init( void )
475 {
476 if (!OSObject::init()) {
477 return false;
478 }
479
480 lock = IOLockAlloc();
481 if (!lock) {
482 return false;
483 }
484
485 return true;
486 }
487
488 void
489 IOUserIterator::free()
490 {
491 if (userIteratorObject) {
492 userIteratorObject->release();
493 }
494 if (lock) {
495 IOLockFree(lock);
496 }
497 OSObject::free();
498 }
499
500 void
501 IOUserIterator::reset()
502 {
503 IOLockLock(lock);
504 assert(OSDynamicCast(OSIterator, userIteratorObject));
505 ((OSIterator *)userIteratorObject)->reset();
506 IOLockUnlock(lock);
507 }
508
509 bool
510 IOUserIterator::isValid()
511 {
512 bool ret;
513
514 IOLockLock(lock);
515 assert(OSDynamicCast(OSIterator, userIteratorObject));
516 ret = ((OSIterator *)userIteratorObject)->isValid();
517 IOLockUnlock(lock);
518
519 return ret;
520 }
521
522 OSObject *
523 IOUserIterator::getNextObject()
524 {
525 assert(false);
526 return NULL;
527 }
528
529 OSObject *
530 IOUserIterator::copyNextObject()
531 {
532 OSObject * ret = NULL;
533
534 IOLockLock(lock);
535 if (userIteratorObject) {
536 ret = ((OSIterator *)userIteratorObject)->getNextObject();
537 if (ret) {
538 ret->retain();
539 }
540 }
541 IOLockUnlock(lock);
542
543 return ret;
544 }
545
546 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
547 extern "C" {
548 // functions called from osfmk/device/iokit_rpc.c
549
550 void
551 iokit_add_reference( io_object_t obj, ipc_kobject_type_t type )
552 {
553 IOUserClient * uc;
554
555 if (!obj) {
556 return;
557 }
558
559 if ((IKOT_IOKIT_CONNECT == type)
560 && (uc = OSDynamicCast(IOUserClient, obj))) {
561 OSIncrementAtomic(&uc->__ipc);
562 }
563
564 obj->retain();
565 }
566
567 void
568 iokit_remove_reference( io_object_t obj )
569 {
570 if (obj) {
571 obj->release();
572 }
573 }
574
575 void
576 iokit_remove_connect_reference( io_object_t obj )
577 {
578 IOUserClient * uc;
579 bool finalize = false;
580
581 if (!obj) {
582 return;
583 }
584
585 if ((uc = OSDynamicCast(IOUserClient, obj))) {
586 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
587 IOLockLock(gIOObjectPortLock);
588 if ((finalize = uc->__ipcFinal)) {
589 uc->__ipcFinal = false;
590 }
591 IOLockUnlock(gIOObjectPortLock);
592 }
593 if (finalize) {
594 uc->scheduleFinalize(true);
595 }
596 }
597
598 obj->release();
599 }
600
601 bool
602 IOUserClient::finalizeUserReferences(OSObject * obj)
603 {
604 IOUserClient * uc;
605 bool ok = true;
606
607 if ((uc = OSDynamicCast(IOUserClient, obj))) {
608 IOLockLock(gIOObjectPortLock);
609 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
610 ok = false;
611 }
612 IOLockUnlock(gIOObjectPortLock);
613 }
614 return ok;
615 }
616
617 ipc_port_t
618 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
619 {
620 IOMachPort *machPort = NULL;
621 ipc_port_t port = NULL;
622
623 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
624
625 lck_mtx_lock(gIOObjectPortLock);
626
627 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
628
629 if (__improbable(machPort == NULL)) {
630 machPort = IOMachPort::withObjectAndType(obj, type);
631 if (__improbable(machPort == NULL)) {
632 goto end;
633 }
634 SLIST_INSERT_HEAD(bucket, machPort, link);
635 } else {
636 machPort->mscount++;
637 }
638
639 iokit_retain_port(machPort->port);
640 port = machPort->port;
641
642 end:
643 lck_mtx_unlock(gIOObjectPortLock);
644
645 return port;
646 }
647
648 kern_return_t
649 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
650 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
651 {
652 IOUserClient * client;
653 IOMemoryMap * map;
654 IOUserNotification * notify;
655
656 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
657 return kIOReturnNotReady;
658 }
659
660 if (IKOT_IOKIT_CONNECT == type) {
661 if ((client = OSDynamicCast( IOUserClient, obj ))) {
662 IOStatisticsClientCall();
663 IOLockLock(client->lock);
664 client->clientDied();
665 IOLockUnlock(client->lock);
666 }
667 } else if (IKOT_IOKIT_OBJECT == type) {
668 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
669 map->taskDied();
670 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
671 notify->setNotification( NULL );
672 }
673 }
674
675 return kIOReturnSuccess;
676 }
677 }; /* extern "C" */
678
679 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
680
681 class IOServiceUserNotification : public IOUserNotification
682 {
683 OSDeclareDefaultStructors(IOServiceUserNotification);
684
685 struct PingMsg {
686 mach_msg_header_t msgHdr;
687 OSNotificationHeader64 notifyHeader;
688 };
689
690 enum { kMaxOutstanding = 1024 };
691
692 PingMsg * pingMsg;
693 vm_size_t msgSize;
694 OSArray * newSet;
695 bool armed;
696 bool ipcLogged;
697
698 public:
699
700 virtual bool init( mach_port_t port, natural_t type,
701 void * reference, vm_size_t referenceSize,
702 bool clientIs64 );
703 virtual void free() APPLE_KEXT_OVERRIDE;
704 void invalidatePort(void);
705
706 static bool _handler( void * target,
707 void * ref, IOService * newService, IONotifier * notifier );
708 virtual bool handler( void * ref, IOService * newService );
709
710 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
711 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
712 };
713
714 class IOServiceMessageUserNotification : public IOUserNotification
715 {
716 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
717
718 struct PingMsg {
719 mach_msg_header_t msgHdr;
720 mach_msg_body_t msgBody;
721 mach_msg_port_descriptor_t ports[1];
722 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
723 };
724
725 PingMsg * pingMsg;
726 vm_size_t msgSize;
727 uint8_t clientIs64;
728 int owningPID;
729 bool ipcLogged;
730
731 public:
732
733 virtual bool init( mach_port_t port, natural_t type,
734 void * reference, vm_size_t referenceSize,
735 vm_size_t extraSize,
736 bool clientIs64 );
737
738 virtual void free() APPLE_KEXT_OVERRIDE;
739 void invalidatePort(void);
740
741 static IOReturn _handler( void * target, void * ref,
742 UInt32 messageType, IOService * provider,
743 void * messageArgument, vm_size_t argSize );
744 virtual IOReturn handler( void * ref,
745 UInt32 messageType, IOService * provider,
746 void * messageArgument, vm_size_t argSize );
747
748 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
749 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
750 };
751
752 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
753
754 #undef super
755 #define super IOUserIterator
756 OSDefineMetaClass( IOUserNotification, IOUserIterator );
757 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
758
759 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
760
761 void
762 IOUserNotification::free( void )
763 {
764 if (holdNotify) {
765 assert(OSDynamicCast(IONotifier, holdNotify));
766 ((IONotifier *)holdNotify)->remove();
767 holdNotify = NULL;
768 }
769 // can't be in handler now
770
771 super::free();
772 }
773
774
775 void
776 IOUserNotification::setNotification( IONotifier * notify )
777 {
778 OSObject * previousNotify;
779
780 IOLockLock( gIOObjectPortLock);
781
782 previousNotify = holdNotify;
783 holdNotify = notify;
784
785 IOLockUnlock( gIOObjectPortLock);
786
787 if (previousNotify) {
788 assert(OSDynamicCast(IONotifier, previousNotify));
789 ((IONotifier *)previousNotify)->remove();
790 }
791 }
792
793 void
794 IOUserNotification::reset()
795 {
796 // ?
797 }
798
799 bool
800 IOUserNotification::isValid()
801 {
802 return true;
803 }
804
805 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
806
807 #undef super
808 #define super IOUserNotification
809 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
810
811 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
812
813 bool
814 IOServiceUserNotification::init( mach_port_t port, natural_t type,
815 void * reference, vm_size_t referenceSize,
816 bool clientIs64 )
817 {
818 if (!super::init()) {
819 return false;
820 }
821
822 newSet = OSArray::withCapacity( 1 );
823 if (!newSet) {
824 return false;
825 }
826
827 if (referenceSize > sizeof(OSAsyncReference64)) {
828 return false;
829 }
830
831 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
832 pingMsg = (PingMsg *) IOMalloc( msgSize);
833 if (!pingMsg) {
834 return false;
835 }
836
837 bzero( pingMsg, msgSize);
838
839 pingMsg->msgHdr.msgh_remote_port = port;
840 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
841 MACH_MSG_TYPE_COPY_SEND /*remote*/,
842 MACH_MSG_TYPE_MAKE_SEND /*local*/);
843 pingMsg->msgHdr.msgh_size = msgSize;
844 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
845
846 pingMsg->notifyHeader.size = 0;
847 pingMsg->notifyHeader.type = type;
848 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
849
850 return true;
851 }
852
853 void
854 IOServiceUserNotification::invalidatePort(void)
855 {
856 if (pingMsg) {
857 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
858 }
859 }
860
861 void
862 IOServiceUserNotification::free( void )
863 {
864 PingMsg * _pingMsg;
865 vm_size_t _msgSize;
866 OSArray * _newSet;
867
868 _pingMsg = pingMsg;
869 _msgSize = msgSize;
870 _newSet = newSet;
871
872 super::free();
873
874 if (_pingMsg && _msgSize) {
875 if (_pingMsg->msgHdr.msgh_remote_port) {
876 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
877 }
878 IOFree(_pingMsg, _msgSize);
879 }
880
881 if (_newSet) {
882 _newSet->release();
883 }
884 }
885
886 bool
887 IOServiceUserNotification::_handler( void * target,
888 void * ref, IOService * newService, IONotifier * notifier )
889 {
890 return ((IOServiceUserNotification *) target)->handler( ref, newService );
891 }
892
893 bool
894 IOServiceUserNotification::handler( void * ref,
895 IOService * newService )
896 {
897 unsigned int count;
898 kern_return_t kr;
899 ipc_port_t port = NULL;
900 bool sendPing = false;
901
902 IOTakeLock( lock );
903
904 count = newSet->getCount();
905 if (count < kMaxOutstanding) {
906 newSet->setObject( newService );
907 if ((sendPing = (armed && (0 == count)))) {
908 armed = false;
909 }
910 }
911
912 IOUnlock( lock );
913
914 if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
915 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
916 }
917
918 if (sendPing) {
919 if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
920 pingMsg->msgHdr.msgh_local_port = port;
921 } else {
922 pingMsg->msgHdr.msgh_local_port = NULL;
923 }
924
925 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
926 pingMsg->msgHdr.msgh_size,
927 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
928 0);
929 if (port) {
930 iokit_release_port( port );
931 }
932
933 if ((KERN_SUCCESS != kr) && !ipcLogged) {
934 ipcLogged = true;
935 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
936 }
937 }
938
939 return true;
940 }
941 OSObject *
942 IOServiceUserNotification::getNextObject()
943 {
944 assert(false);
945 return NULL;
946 }
947
948 OSObject *
949 IOServiceUserNotification::copyNextObject()
950 {
951 unsigned int count;
952 OSObject * result;
953
954 IOLockLock(lock);
955
956 count = newSet->getCount();
957 if (count) {
958 result = newSet->getObject( count - 1 );
959 result->retain();
960 newSet->removeObject( count - 1);
961 } else {
962 result = NULL;
963 armed = true;
964 }
965
966 IOLockUnlock(lock);
967
968 return result;
969 }
970
971 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
972
973 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
974
975 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
976
977 bool
978 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
979 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
980 bool client64 )
981 {
982 if (!super::init()) {
983 return false;
984 }
985
986 if (referenceSize > sizeof(OSAsyncReference64)) {
987 return false;
988 }
989
990 clientIs64 = client64;
991
992 owningPID = proc_selfpid();
993
994 extraSize += sizeof(IOServiceInterestContent64);
995 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
996 pingMsg = (PingMsg *) IOMalloc( msgSize);
997 if (!pingMsg) {
998 return false;
999 }
1000
1001 bzero( pingMsg, msgSize);
1002
1003 pingMsg->msgHdr.msgh_remote_port = port;
1004 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
1005 | MACH_MSGH_BITS(
1006 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1007 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1008 pingMsg->msgHdr.msgh_size = msgSize;
1009 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
1010
1011 pingMsg->msgBody.msgh_descriptor_count = 1;
1012
1013 pingMsg->ports[0].name = NULL;
1014 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1015 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
1016
1017 pingMsg->notifyHeader.size = extraSize;
1018 pingMsg->notifyHeader.type = type;
1019 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
1020
1021 return true;
1022 }
1023
1024 void
1025 IOServiceMessageUserNotification::invalidatePort(void)
1026 {
1027 if (pingMsg) {
1028 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
1029 }
1030 }
1031
1032 void
1033 IOServiceMessageUserNotification::free( void )
1034 {
1035 PingMsg * _pingMsg;
1036 vm_size_t _msgSize;
1037
1038 _pingMsg = pingMsg;
1039 _msgSize = msgSize;
1040
1041 super::free();
1042
1043 if (_pingMsg && _msgSize) {
1044 if (_pingMsg->msgHdr.msgh_remote_port) {
1045 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
1046 }
1047 IOFree( _pingMsg, _msgSize);
1048 }
1049 }
1050
1051 IOReturn
1052 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1053 UInt32 messageType, IOService * provider,
1054 void * argument, vm_size_t argSize )
1055 {
1056 return ((IOServiceMessageUserNotification *) target)->handler(
1057 ref, messageType, provider, argument, argSize);
1058 }
1059
1060 IOReturn
1061 IOServiceMessageUserNotification::handler( void * ref,
1062 UInt32 messageType, IOService * provider,
1063 void * messageArgument, vm_size_t callerArgSize )
1064 {
1065 enum { kLocalMsgSize = 0x100 };
1066 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1067 void * allocMsg;
1068 kern_return_t kr;
1069 vm_size_t argSize;
1070 vm_size_t thisMsgSize;
1071 ipc_port_t thisPort, providerPort;
1072 struct PingMsg * thisMsg;
1073 IOServiceInterestContent64 * data;
1074
1075 if (kIOMessageCopyClientID == messageType) {
1076 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1077 return kIOReturnSuccess;
1078 }
1079
1080 if (callerArgSize == 0) {
1081 if (clientIs64) {
1082 argSize = sizeof(data->messageArgument[0]);
1083 } else {
1084 argSize = sizeof(uint32_t);
1085 }
1086 } else {
1087 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1088 callerArgSize = kIOUserNotifyMaxMessageSize;
1089 }
1090 argSize = callerArgSize;
1091 }
1092
1093 // adjust message size for ipc restrictions
1094 natural_t type;
1095 type = pingMsg->notifyHeader.type;
1096 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1097 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1098 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1099
1100 thisMsgSize = msgSize
1101 + sizeof(IOServiceInterestContent64)
1102 - sizeof(data->messageArgument)
1103 + argSize;
1104
1105 if (thisMsgSize > sizeof(stackMsg)) {
1106 allocMsg = IOMalloc(thisMsgSize);
1107 if (!allocMsg) {
1108 return kIOReturnNoMemory;
1109 }
1110 thisMsg = (typeof(thisMsg))allocMsg;
1111 } else {
1112 allocMsg = NULL;
1113 thisMsg = (typeof(thisMsg))stackMsg;
1114 }
1115
1116 bcopy(pingMsg, thisMsg, msgSize);
1117 thisMsg->notifyHeader.type = type;
1118 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1119 // == pingMsg->notifyHeader.content;
1120 data->messageType = messageType;
1121
1122 if (callerArgSize == 0) {
1123 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1124 if (!clientIs64) {
1125 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1126 }
1127 } else {
1128 bcopy( messageArgument, data->messageArgument, callerArgSize );
1129 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1130 }
1131
1132 thisMsg->notifyHeader.type = type;
1133 thisMsg->msgHdr.msgh_size = thisMsgSize;
1134
1135 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1136 thisMsg->ports[0].name = providerPort;
1137 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1138 thisMsg->msgHdr.msgh_local_port = thisPort;
1139
1140 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1141 thisMsg->msgHdr.msgh_size,
1142 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1143 0);
1144 if (thisPort) {
1145 iokit_release_port( thisPort );
1146 }
1147 if (providerPort) {
1148 iokit_release_port( providerPort );
1149 }
1150
1151 if (allocMsg) {
1152 IOFree(allocMsg, thisMsgSize);
1153 }
1154
1155 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1156 ipcLogged = true;
1157 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1158 }
1159
1160 return kIOReturnSuccess;
1161 }
1162
1163 OSObject *
1164 IOServiceMessageUserNotification::getNextObject()
1165 {
1166 return NULL;
1167 }
1168
1169 OSObject *
1170 IOServiceMessageUserNotification::copyNextObject()
1171 {
1172 return NULL;
1173 }
1174
1175 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1176
1177 #undef super
1178 #define super IOService
1179 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1180
1181 IOLock * gIOUserClientOwnersLock;
1182
1183 void
1184 IOUserClient::initialize( void )
1185 {
1186 gIOObjectPortLock = IOLockAlloc();
1187 gIOUserClientOwnersLock = IOLockAlloc();
1188 gIOUserServerLock = IOLockAlloc();
1189 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1190 }
1191
1192 void
1193 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1194 mach_port_t wakePort,
1195 void *callback, void *refcon)
1196 {
1197 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1198 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1199 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1200 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1201 }
1202
1203 void
1204 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1205 mach_port_t wakePort,
1206 mach_vm_address_t callback, io_user_reference_t refcon)
1207 {
1208 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1209 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1210 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1211 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1212 }
1213
1214 void
1215 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1216 mach_port_t wakePort,
1217 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1218 {
1219 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1220 if (vm_map_is_64bit(get_task_map(task))) {
1221 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1222 }
1223 }
1224
1225 static OSDictionary *
1226 CopyConsoleUser(UInt32 uid)
1227 {
1228 OSArray * array;
1229 OSDictionary * user = NULL;
1230
1231 if ((array = OSDynamicCast(OSArray,
1232 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1233 for (unsigned int idx = 0;
1234 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1235 idx++) {
1236 OSNumber * num;
1237
1238 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1239 && (uid == num->unsigned32BitValue())) {
1240 user->retain();
1241 break;
1242 }
1243 }
1244 array->release();
1245 }
1246 return user;
1247 }
1248
1249 static OSDictionary *
1250 CopyUserOnConsole(void)
1251 {
1252 OSArray * array;
1253 OSDictionary * user = NULL;
1254
1255 if ((array = OSDynamicCast(OSArray,
1256 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1257 for (unsigned int idx = 0;
1258 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1259 idx++) {
1260 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1261 user->retain();
1262 break;
1263 }
1264 }
1265 array->release();
1266 }
1267 return user;
1268 }
1269
1270 IOReturn
1271 IOUserClient::clientHasAuthorization( task_t task,
1272 IOService * service )
1273 {
1274 proc_t p;
1275
1276 p = (proc_t) get_bsdtask_info(task);
1277 if (p) {
1278 uint64_t authorizationID;
1279
1280 authorizationID = proc_uniqueid(p);
1281 if (authorizationID) {
1282 if (service->getAuthorizationID() == authorizationID) {
1283 return kIOReturnSuccess;
1284 }
1285 }
1286 }
1287
1288 return kIOReturnNotPermitted;
1289 }
1290
1291 IOReturn
1292 IOUserClient::clientHasPrivilege( void * securityToken,
1293 const char * privilegeName )
1294 {
1295 kern_return_t kr;
1296 security_token_t token;
1297 mach_msg_type_number_t count;
1298 task_t task;
1299 OSDictionary * user;
1300 bool secureConsole;
1301
1302
1303 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1304 sizeof(kIOClientPrivilegeForeground))) {
1305 if (task_is_gpu_denied(current_task())) {
1306 return kIOReturnNotPrivileged;
1307 } else {
1308 return kIOReturnSuccess;
1309 }
1310 }
1311
1312 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1313 sizeof(kIOClientPrivilegeConsoleSession))) {
1314 kauth_cred_t cred;
1315 proc_t p;
1316
1317 task = (task_t) securityToken;
1318 if (!task) {
1319 task = current_task();
1320 }
1321 p = (proc_t) get_bsdtask_info(task);
1322 kr = kIOReturnNotPrivileged;
1323
1324 if (p && (cred = kauth_cred_proc_ref(p))) {
1325 user = CopyUserOnConsole();
1326 if (user) {
1327 OSNumber * num;
1328 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1329 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1330 kr = kIOReturnSuccess;
1331 }
1332 user->release();
1333 }
1334 kauth_cred_unref(&cred);
1335 }
1336 return kr;
1337 }
1338
1339 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1340 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1341 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1342 } else {
1343 task = (task_t)securityToken;
1344 }
1345
1346 count = TASK_SECURITY_TOKEN_COUNT;
1347 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1348
1349 if (KERN_SUCCESS != kr) {
1350 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1351 sizeof(kIOClientPrivilegeAdministrator))) {
1352 if (0 != token.val[0]) {
1353 kr = kIOReturnNotPrivileged;
1354 }
1355 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1356 sizeof(kIOClientPrivilegeLocalUser))) {
1357 user = CopyConsoleUser(token.val[0]);
1358 if (user) {
1359 user->release();
1360 } else {
1361 kr = kIOReturnNotPrivileged;
1362 }
1363 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1364 sizeof(kIOClientPrivilegeConsoleUser))) {
1365 user = CopyConsoleUser(token.val[0]);
1366 if (user) {
1367 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1368 kr = kIOReturnNotPrivileged;
1369 } else if (secureConsole) {
1370 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1371 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1372 kr = kIOReturnNotPrivileged;
1373 }
1374 }
1375 user->release();
1376 } else {
1377 kr = kIOReturnNotPrivileged;
1378 }
1379 } else {
1380 kr = kIOReturnUnsupported;
1381 }
1382
1383 return kr;
1384 }
1385
1386 OSDictionary *
1387 IOUserClient::copyClientEntitlements(task_t task)
1388 {
1389 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1390
1391 proc_t p = NULL;
1392 pid_t pid = 0;
1393 size_t len = 0;
1394 void *entitlements_blob = NULL;
1395 char *entitlements_data = NULL;
1396 OSObject *entitlements_obj = NULL;
1397 OSDictionary *entitlements = NULL;
1398 OSString *errorString = NULL;
1399
1400 p = (proc_t)get_bsdtask_info(task);
1401 if (p == NULL) {
1402 goto fail;
1403 }
1404 pid = proc_pid(p);
1405
1406 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1407 if (entitlements) {
1408 return entitlements;
1409 }
1410 }
1411
1412 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1413 goto fail;
1414 }
1415
1416 if (len <= offsetof(CS_GenericBlob, data)) {
1417 goto fail;
1418 }
1419
1420 /*
1421 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1422 * we'll try to parse in the kernel.
1423 */
1424 len -= offsetof(CS_GenericBlob, data);
1425 if (len > MAX_ENTITLEMENTS_LEN) {
1426 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n",
1427 proc_best_name(p), pid, len, MAX_ENTITLEMENTS_LEN);
1428 goto fail;
1429 }
1430
1431 /*
1432 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1433 * what is stored in the entitlements blob. Copy the string and
1434 * terminate it.
1435 */
1436 entitlements_data = (char *)IOMalloc(len + 1);
1437 if (entitlements_data == NULL) {
1438 goto fail;
1439 }
1440 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1441 entitlements_data[len] = '\0';
1442
1443 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1444 if (errorString != NULL) {
1445 IOLog("failed to parse entitlements for %s[%u]: %s\n",
1446 proc_best_name(p), pid, errorString->getCStringNoCopy());
1447 goto fail;
1448 }
1449 if (entitlements_obj == NULL) {
1450 goto fail;
1451 }
1452
1453 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1454 if (entitlements == NULL) {
1455 goto fail;
1456 }
1457 entitlements_obj = NULL;
1458
1459 fail:
1460 if (entitlements_data != NULL) {
1461 IOFree(entitlements_data, len + 1);
1462 }
1463 if (entitlements_obj != NULL) {
1464 entitlements_obj->release();
1465 }
1466 if (errorString != NULL) {
1467 errorString->release();
1468 }
1469 return entitlements;
1470 }
1471
1472 OSObject *
1473 IOUserClient::copyClientEntitlement( task_t task,
1474 const char * entitlement )
1475 {
1476 OSDictionary *entitlements;
1477 OSObject *value;
1478
1479 entitlements = copyClientEntitlements(task);
1480 if (entitlements == NULL) {
1481 return NULL;
1482 }
1483
1484 /* Fetch the entitlement value from the dictionary. */
1485 value = entitlements->getObject(entitlement);
1486 if (value != NULL) {
1487 value->retain();
1488 }
1489
1490 entitlements->release();
1491 return value;
1492 }
1493
1494 bool
1495 IOUserClient::init()
1496 {
1497 if (getPropertyTable() || super::init()) {
1498 return reserve();
1499 }
1500
1501 return false;
1502 }
1503
1504 bool
1505 IOUserClient::init(OSDictionary * dictionary)
1506 {
1507 if (getPropertyTable() || super::init(dictionary)) {
1508 return reserve();
1509 }
1510
1511 return false;
1512 }
1513
1514 bool
1515 IOUserClient::initWithTask(task_t owningTask,
1516 void * securityID,
1517 UInt32 type )
1518 {
1519 if (getPropertyTable() || super::init()) {
1520 return reserve();
1521 }
1522
1523 return false;
1524 }
1525
1526 bool
1527 IOUserClient::initWithTask(task_t owningTask,
1528 void * securityID,
1529 UInt32 type,
1530 OSDictionary * properties )
1531 {
1532 bool ok;
1533
1534 ok = super::init( properties );
1535 ok &= initWithTask( owningTask, securityID, type );
1536
1537 return ok;
1538 }
1539
1540 bool
1541 IOUserClient::reserve()
1542 {
1543 if (!reserved) {
1544 reserved = IONew(ExpansionData, 1);
1545 if (!reserved) {
1546 return false;
1547 }
1548 }
1549 setTerminateDefer(NULL, true);
1550 IOStatisticsRegisterCounter();
1551
1552 return true;
1553 }
1554
1555 struct IOUserClientOwner {
1556 task_t task;
1557 queue_chain_t taskLink;
1558 IOUserClient * uc;
1559 queue_chain_t ucLink;
1560 };
1561
1562 IOReturn
1563 IOUserClient::registerOwner(task_t task)
1564 {
1565 IOUserClientOwner * owner;
1566 IOReturn ret;
1567 bool newOwner;
1568
1569 IOLockLock(gIOUserClientOwnersLock);
1570
1571 newOwner = true;
1572 ret = kIOReturnSuccess;
1573
1574 if (!owners.next) {
1575 queue_init(&owners);
1576 } else {
1577 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1578 {
1579 if (task != owner->task) {
1580 continue;
1581 }
1582 newOwner = false;
1583 break;
1584 }
1585 }
1586 if (newOwner) {
1587 owner = IONew(IOUserClientOwner, 1);
1588 if (!owner) {
1589 ret = kIOReturnNoMemory;
1590 } else {
1591 owner->task = task;
1592 owner->uc = this;
1593 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1594 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1595 if (messageAppSuspended) {
1596 task_set_message_app_suspended(task, true);
1597 }
1598 }
1599 }
1600
1601 IOLockUnlock(gIOUserClientOwnersLock);
1602
1603 return ret;
1604 }
1605
1606 void
1607 IOUserClient::noMoreSenders(void)
1608 {
1609 IOUserClientOwner * owner;
1610 IOUserClientOwner * iter;
1611 queue_head_t * taskque;
1612 bool hasMessageAppSuspended;
1613
1614 IOLockLock(gIOUserClientOwnersLock);
1615
1616 if (owners.next) {
1617 while (!queue_empty(&owners)) {
1618 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1619 taskque = task_io_user_clients(owner->task);
1620 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1621 hasMessageAppSuspended = false;
1622 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1623 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1624 if (hasMessageAppSuspended) {
1625 break;
1626 }
1627 }
1628 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1629 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1630 IODelete(owner, IOUserClientOwner, 1);
1631 }
1632 owners.next = owners.prev = NULL;
1633 }
1634
1635 IOLockUnlock(gIOUserClientOwnersLock);
1636 }
1637
1638
1639 extern "C" void
1640 iokit_task_app_suspended_changed(task_t task)
1641 {
1642 queue_head_t * taskque;
1643 IOUserClientOwner * owner;
1644 OSSet * set;
1645
1646 IOLockLock(gIOUserClientOwnersLock);
1647
1648 taskque = task_io_user_clients(task);
1649 set = NULL;
1650 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1651 if (!owner->uc->messageAppSuspended) {
1652 continue;
1653 }
1654 if (!set) {
1655 set = OSSet::withCapacity(4);
1656 if (!set) {
1657 break;
1658 }
1659 }
1660 set->setObject(owner->uc);
1661 }
1662
1663 IOLockUnlock(gIOUserClientOwnersLock);
1664
1665 if (set) {
1666 set->iterateObjects(^bool (OSObject * obj) {
1667 IOUserClient * uc;
1668
1669 uc = (typeof(uc))obj;
1670 #if 0
1671 {
1672 OSString * str;
1673 str = IOCopyLogNameForPID(task_pid(task));
1674 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1675 uc->getName(), task_is_app_suspended(task));
1676 OSSafeReleaseNULL(str);
1677 }
1678 #endif
1679 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1680
1681 return false;
1682 });
1683 set->release();
1684 }
1685 }
1686
1687 extern "C" kern_return_t
1688 iokit_task_terminate(task_t task)
1689 {
1690 IOUserClientOwner * owner;
1691 IOUserClient * dead;
1692 IOUserClient * uc;
1693 queue_head_t * taskque;
1694
1695 IOLockLock(gIOUserClientOwnersLock);
1696
1697 taskque = task_io_user_clients(task);
1698 dead = NULL;
1699 while (!queue_empty(taskque)) {
1700 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1701 uc = owner->uc;
1702 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1703 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1704 if (queue_empty(&uc->owners)) {
1705 uc->retain();
1706 IOLog("destroying out of band connect for %s\n", uc->getName());
1707 // now using the uc queue head as a singly linked queue,
1708 // leaving .next as NULL to mark it empty
1709 uc->owners.next = NULL;
1710 uc->owners.prev = (queue_entry_t) dead;
1711 dead = uc;
1712 }
1713 IODelete(owner, IOUserClientOwner, 1);
1714 }
1715
1716 IOLockUnlock(gIOUserClientOwnersLock);
1717
1718 while (dead) {
1719 uc = dead;
1720 dead = (IOUserClient *)(void *) dead->owners.prev;
1721 uc->owners.prev = NULL;
1722 if (uc->sharedInstance || !uc->closed) {
1723 uc->clientDied();
1724 }
1725 uc->release();
1726 }
1727
1728 return KERN_SUCCESS;
1729 }
1730
1731 void
1732 IOUserClient::free()
1733 {
1734 if (mappings) {
1735 mappings->release();
1736 }
1737 if (lock) {
1738 IOLockFree(lock);
1739 }
1740
1741 IOStatisticsUnregisterCounter();
1742
1743 assert(!owners.next);
1744 assert(!owners.prev);
1745
1746 if (reserved) {
1747 IODelete(reserved, ExpansionData, 1);
1748 }
1749
1750 super::free();
1751 }
1752
1753 IOReturn
1754 IOUserClient::clientDied( void )
1755 {
1756 IOReturn ret = kIOReturnNotReady;
1757
1758 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1759 ret = clientClose();
1760 }
1761
1762 return ret;
1763 }
1764
1765 IOReturn
1766 IOUserClient::clientClose( void )
1767 {
1768 return kIOReturnUnsupported;
1769 }
1770
1771 IOService *
1772 IOUserClient::getService( void )
1773 {
1774 return NULL;
1775 }
1776
1777 IOReturn
1778 IOUserClient::registerNotificationPort(
1779 mach_port_t /* port */,
1780 UInt32 /* type */,
1781 UInt32 /* refCon */)
1782 {
1783 return kIOReturnUnsupported;
1784 }
1785
1786 IOReturn
1787 IOUserClient::registerNotificationPort(
1788 mach_port_t port,
1789 UInt32 type,
1790 io_user_reference_t refCon)
1791 {
1792 return registerNotificationPort(port, type, (UInt32) refCon);
1793 }
1794
1795 IOReturn
1796 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1797 semaphore_t * semaphore )
1798 {
1799 return kIOReturnUnsupported;
1800 }
1801
1802 IOReturn
1803 IOUserClient::connectClient( IOUserClient * /* client */ )
1804 {
1805 return kIOReturnUnsupported;
1806 }
1807
1808 IOReturn
1809 IOUserClient::clientMemoryForType( UInt32 type,
1810 IOOptionBits * options,
1811 IOMemoryDescriptor ** memory )
1812 {
1813 return kIOReturnUnsupported;
1814 }
1815
1816 #if !__LP64__
1817 IOMemoryMap *
1818 IOUserClient::mapClientMemory(
1819 IOOptionBits type,
1820 task_t task,
1821 IOOptionBits mapFlags,
1822 IOVirtualAddress atAddress )
1823 {
1824 return NULL;
1825 }
1826 #endif
1827
1828 IOMemoryMap *
1829 IOUserClient::mapClientMemory64(
1830 IOOptionBits type,
1831 task_t task,
1832 IOOptionBits mapFlags,
1833 mach_vm_address_t atAddress )
1834 {
1835 IOReturn err;
1836 IOOptionBits options = 0;
1837 IOMemoryDescriptor * memory = NULL;
1838 IOMemoryMap * map = NULL;
1839
1840 err = clientMemoryForType((UInt32) type, &options, &memory );
1841
1842 if (memory && (kIOReturnSuccess == err)) {
1843 FAKE_STACK_FRAME(getMetaClass());
1844
1845 options = (options & ~kIOMapUserOptionsMask)
1846 | (mapFlags & kIOMapUserOptionsMask);
1847 map = memory->createMappingInTask( task, atAddress, options );
1848 memory->release();
1849
1850 FAKE_STACK_FRAME_END();
1851 }
1852
1853 return map;
1854 }
1855
1856 IOReturn
1857 IOUserClient::exportObjectToClient(task_t task,
1858 OSObject *obj, io_object_t *clientObj)
1859 {
1860 mach_port_name_t name;
1861
1862 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1863
1864 *clientObj = (io_object_t)(uintptr_t) name;
1865
1866 if (obj) {
1867 obj->release();
1868 }
1869
1870 return kIOReturnSuccess;
1871 }
1872
1873 IOReturn
1874 IOUserClient::copyPortNameForObjectInTask(task_t task,
1875 OSObject *obj, mach_port_name_t * port_name)
1876 {
1877 mach_port_name_t name;
1878
1879 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1880
1881 *(mach_port_name_t *) port_name = name;
1882
1883 return kIOReturnSuccess;
1884 }
1885
1886 IOReturn
1887 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1888 OSObject **obj)
1889 {
1890 OSObject * object;
1891
1892 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
1893
1894 *obj = object;
1895
1896 return object ? kIOReturnSuccess : kIOReturnIPCError;
1897 }
1898
1899 IOReturn
1900 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
1901 {
1902 return iokit_mod_send_right(task, port_name, delta);
1903 }
1904
1905 IOExternalMethod *
1906 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1907 {
1908 return NULL;
1909 }
1910
1911 IOExternalAsyncMethod *
1912 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1913 {
1914 return NULL;
1915 }
1916
1917 IOExternalTrap *
1918 IOUserClient::
1919 getExternalTrapForIndex(UInt32 index)
1920 {
1921 return NULL;
1922 }
1923
1924 #pragma clang diagnostic push
1925 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1926
1927 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1928 // functions can break clients of kexts implementing getExternalMethodForIndex()
1929 IOExternalMethod *
1930 IOUserClient::
1931 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1932 {
1933 IOExternalMethod *method = getExternalMethodForIndex(index);
1934
1935 if (method) {
1936 *targetP = (IOService *) method->object;
1937 }
1938
1939 return method;
1940 }
1941
1942 IOExternalAsyncMethod *
1943 IOUserClient::
1944 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1945 {
1946 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1947
1948 if (method) {
1949 *targetP = (IOService *) method->object;
1950 }
1951
1952 return method;
1953 }
1954
1955 IOExternalTrap *
1956 IOUserClient::
1957 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1958 {
1959 IOExternalTrap *trap = getExternalTrapForIndex(index);
1960
1961 if (trap) {
1962 *targetP = trap->object;
1963 }
1964
1965 return trap;
1966 }
1967 #pragma clang diagnostic pop
1968
1969 IOReturn
1970 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1971 {
1972 mach_port_t port;
1973 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1974
1975 if (MACH_PORT_NULL != port) {
1976 iokit_release_port_send(port);
1977 }
1978
1979 return kIOReturnSuccess;
1980 }
1981
1982 IOReturn
1983 IOUserClient::releaseNotificationPort(mach_port_t port)
1984 {
1985 if (MACH_PORT_NULL != port) {
1986 iokit_release_port_send(port);
1987 }
1988
1989 return kIOReturnSuccess;
1990 }
1991
1992 IOReturn
1993 IOUserClient::sendAsyncResult(OSAsyncReference reference,
1994 IOReturn result, void *args[], UInt32 numArgs)
1995 {
1996 OSAsyncReference64 reference64;
1997 io_user_reference_t args64[kMaxAsyncArgs];
1998 unsigned int idx;
1999
2000 if (numArgs > kMaxAsyncArgs) {
2001 return kIOReturnMessageTooLarge;
2002 }
2003
2004 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2005 reference64[idx] = REF64(reference[idx]);
2006 }
2007
2008 for (idx = 0; idx < numArgs; idx++) {
2009 args64[idx] = REF64(args[idx]);
2010 }
2011
2012 return sendAsyncResult64(reference64, result, args64, numArgs);
2013 }
2014
2015 IOReturn
2016 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2017 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2018 {
2019 return _sendAsyncResult64(reference, result, args, numArgs, options);
2020 }
2021
2022 IOReturn
2023 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2024 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2025 {
2026 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2027 }
2028
2029 IOReturn
2030 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2031 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2032 {
2033 struct ReplyMsg {
2034 mach_msg_header_t msgHdr;
2035 union{
2036 struct{
2037 OSNotificationHeader notifyHdr;
2038 IOAsyncCompletionContent asyncContent;
2039 uint32_t args[kMaxAsyncArgs];
2040 } msg32;
2041 struct{
2042 OSNotificationHeader64 notifyHdr;
2043 IOAsyncCompletionContent asyncContent;
2044 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2045 } msg64;
2046 } m;
2047 };
2048 ReplyMsg replyMsg;
2049 mach_port_t replyPort;
2050 kern_return_t kr;
2051
2052 // If no reply port, do nothing.
2053 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2054 if (replyPort == MACH_PORT_NULL) {
2055 return kIOReturnSuccess;
2056 }
2057
2058 if (numArgs > kMaxAsyncArgs) {
2059 return kIOReturnMessageTooLarge;
2060 }
2061
2062 bzero(&replyMsg, sizeof(replyMsg));
2063 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2064 0 /*local*/);
2065 replyMsg.msgHdr.msgh_remote_port = replyPort;
2066 replyMsg.msgHdr.msgh_local_port = NULL;
2067 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2068 if (kIOUCAsync64Flag & reference[0]) {
2069 replyMsg.msgHdr.msgh_size =
2070 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2071 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2072 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2073 + numArgs * sizeof(io_user_reference_t);
2074 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2075 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2076 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2077
2078 replyMsg.m.msg64.asyncContent.result = result;
2079 if (numArgs) {
2080 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2081 }
2082 } else {
2083 unsigned int idx;
2084
2085 replyMsg.msgHdr.msgh_size =
2086 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2087 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2088
2089 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2090 + numArgs * sizeof(uint32_t);
2091 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2092
2093 /* Skip reference[0] which is left as 0 from the earlier bzero */
2094 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2095 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2096 }
2097
2098 replyMsg.m.msg32.asyncContent.result = result;
2099
2100 for (idx = 0; idx < numArgs; idx++) {
2101 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2102 }
2103 }
2104
2105 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2106 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2107 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2108 } else {
2109 /* Fail on full queue. */
2110 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2111 replyMsg.msgHdr.msgh_size);
2112 }
2113 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2114 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2115 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2116 }
2117 return kr;
2118 }
2119
2120
2121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2122
2123 extern "C" {
2124 #define CHECK(cls, obj, out) \
2125 cls * out; \
2126 if( !(out = OSDynamicCast( cls, obj))) \
2127 return( kIOReturnBadArgument )
2128
2129 #define CHECKLOCKED(cls, obj, out) \
2130 IOUserIterator * oIter; \
2131 cls * out; \
2132 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2133 return (kIOReturnBadArgument); \
2134 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2135 return (kIOReturnBadArgument)
2136
2137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2138
2139 // Create a vm_map_copy_t or kalloc'ed data for memory
2140 // to be copied out. ipc will free after the copyout.
2141
2142 static kern_return_t
2143 copyoutkdata( const void * data, vm_size_t len,
2144 io_buf_ptr_t * buf )
2145 {
2146 kern_return_t err;
2147 vm_map_copy_t copy;
2148
2149 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2150 false /* src_destroy */, &copy);
2151
2152 assert( err == KERN_SUCCESS );
2153 if (err == KERN_SUCCESS) {
2154 *buf = (char *) copy;
2155 }
2156
2157 return err;
2158 }
2159
2160 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2161
2162 /* Routine io_server_version */
2163 kern_return_t
2164 is_io_server_version(
2165 mach_port_t master_port,
2166 uint64_t *version)
2167 {
2168 *version = IOKIT_SERVER_VERSION;
2169 return kIOReturnSuccess;
2170 }
2171
2172 /* Routine io_object_get_class */
2173 kern_return_t
2174 is_io_object_get_class(
2175 io_object_t object,
2176 io_name_t className )
2177 {
2178 const OSMetaClass* my_obj = NULL;
2179
2180 if (!object) {
2181 return kIOReturnBadArgument;
2182 }
2183
2184 my_obj = object->getMetaClass();
2185 if (!my_obj) {
2186 return kIOReturnNotFound;
2187 }
2188
2189 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2190
2191 return kIOReturnSuccess;
2192 }
2193
2194 /* Routine io_object_get_superclass */
2195 kern_return_t
2196 is_io_object_get_superclass(
2197 mach_port_t master_port,
2198 io_name_t obj_name,
2199 io_name_t class_name)
2200 {
2201 IOReturn ret;
2202 const OSMetaClass * meta;
2203 const OSMetaClass * super;
2204 const OSSymbol * name;
2205 const char * cstr;
2206
2207 if (!obj_name || !class_name) {
2208 return kIOReturnBadArgument;
2209 }
2210 if (master_port != master_device_port) {
2211 return kIOReturnNotPrivileged;
2212 }
2213
2214 ret = kIOReturnNotFound;
2215 meta = NULL;
2216 do{
2217 name = OSSymbol::withCString(obj_name);
2218 if (!name) {
2219 break;
2220 }
2221 meta = OSMetaClass::copyMetaClassWithName(name);
2222 if (!meta) {
2223 break;
2224 }
2225 super = meta->getSuperClass();
2226 if (!super) {
2227 break;
2228 }
2229 cstr = super->getClassName();
2230 if (!cstr) {
2231 break;
2232 }
2233 strlcpy(class_name, cstr, sizeof(io_name_t));
2234 ret = kIOReturnSuccess;
2235 }while (false);
2236
2237 OSSafeReleaseNULL(name);
2238 if (meta) {
2239 meta->releaseMetaClass();
2240 }
2241
2242 return ret;
2243 }
2244
2245 /* Routine io_object_get_bundle_identifier */
2246 kern_return_t
2247 is_io_object_get_bundle_identifier(
2248 mach_port_t master_port,
2249 io_name_t obj_name,
2250 io_name_t bundle_name)
2251 {
2252 IOReturn ret;
2253 const OSMetaClass * meta;
2254 const OSSymbol * name;
2255 const OSSymbol * identifier;
2256 const char * cstr;
2257
2258 if (!obj_name || !bundle_name) {
2259 return kIOReturnBadArgument;
2260 }
2261 if (master_port != master_device_port) {
2262 return kIOReturnNotPrivileged;
2263 }
2264
2265 ret = kIOReturnNotFound;
2266 meta = NULL;
2267 do{
2268 name = OSSymbol::withCString(obj_name);
2269 if (!name) {
2270 break;
2271 }
2272 meta = OSMetaClass::copyMetaClassWithName(name);
2273 if (!meta) {
2274 break;
2275 }
2276 identifier = meta->getKmodName();
2277 if (!identifier) {
2278 break;
2279 }
2280 cstr = identifier->getCStringNoCopy();
2281 if (!cstr) {
2282 break;
2283 }
2284 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2285 ret = kIOReturnSuccess;
2286 }while (false);
2287
2288 OSSafeReleaseNULL(name);
2289 if (meta) {
2290 meta->releaseMetaClass();
2291 }
2292
2293 return ret;
2294 }
2295
2296 /* Routine io_object_conforms_to */
2297 kern_return_t
2298 is_io_object_conforms_to(
2299 io_object_t object,
2300 io_name_t className,
2301 boolean_t *conforms )
2302 {
2303 if (!object) {
2304 return kIOReturnBadArgument;
2305 }
2306
2307 *conforms = (NULL != object->metaCast( className ));
2308
2309 return kIOReturnSuccess;
2310 }
2311
2312 /* Routine io_object_get_retain_count */
2313 kern_return_t
2314 is_io_object_get_retain_count(
2315 io_object_t object,
2316 uint32_t *retainCount )
2317 {
2318 if (!object) {
2319 return kIOReturnBadArgument;
2320 }
2321
2322 *retainCount = object->getRetainCount();
2323 return kIOReturnSuccess;
2324 }
2325
2326 /* Routine io_iterator_next */
2327 kern_return_t
2328 is_io_iterator_next(
2329 io_object_t iterator,
2330 io_object_t *object )
2331 {
2332 IOReturn ret;
2333 OSObject * obj;
2334 OSIterator * iter;
2335 IOUserIterator * uiter;
2336
2337 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2338 obj = uiter->copyNextObject();
2339 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2340 obj = iter->getNextObject();
2341 if (obj) {
2342 obj->retain();
2343 }
2344 } else {
2345 return kIOReturnBadArgument;
2346 }
2347
2348 if (obj) {
2349 *object = obj;
2350 ret = kIOReturnSuccess;
2351 } else {
2352 ret = kIOReturnNoDevice;
2353 }
2354
2355 return ret;
2356 }
2357
2358 /* Routine io_iterator_reset */
2359 kern_return_t
2360 is_io_iterator_reset(
2361 io_object_t iterator )
2362 {
2363 CHECK( OSIterator, iterator, iter );
2364
2365 iter->reset();
2366
2367 return kIOReturnSuccess;
2368 }
2369
2370 /* Routine io_iterator_is_valid */
2371 kern_return_t
2372 is_io_iterator_is_valid(
2373 io_object_t iterator,
2374 boolean_t *is_valid )
2375 {
2376 CHECK( OSIterator, iterator, iter );
2377
2378 *is_valid = iter->isValid();
2379
2380 return kIOReturnSuccess;
2381 }
2382
2383
2384 static kern_return_t
2385 internal_io_service_match_property_table(
2386 io_service_t _service,
2387 const char * matching,
2388 mach_msg_type_number_t matching_size,
2389 boolean_t *matches)
2390 {
2391 CHECK( IOService, _service, service );
2392
2393 kern_return_t kr;
2394 OSObject * obj;
2395 OSDictionary * dict;
2396
2397 assert(matching_size);
2398 obj = OSUnserializeXML(matching, matching_size);
2399
2400 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2401 *matches = service->passiveMatch( dict );
2402 kr = kIOReturnSuccess;
2403 } else {
2404 kr = kIOReturnBadArgument;
2405 }
2406
2407 if (obj) {
2408 obj->release();
2409 }
2410
2411 return kr;
2412 }
2413
2414 /* Routine io_service_match_property_table */
2415 kern_return_t
2416 is_io_service_match_property_table(
2417 io_service_t service,
2418 io_string_t matching,
2419 boolean_t *matches )
2420 {
2421 return kIOReturnUnsupported;
2422 }
2423
2424
2425 /* Routine io_service_match_property_table_ool */
2426 kern_return_t
2427 is_io_service_match_property_table_ool(
2428 io_object_t service,
2429 io_buf_ptr_t matching,
2430 mach_msg_type_number_t matchingCnt,
2431 kern_return_t *result,
2432 boolean_t *matches )
2433 {
2434 kern_return_t kr;
2435 vm_offset_t data;
2436 vm_map_offset_t map_data;
2437
2438 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2439 data = CAST_DOWN(vm_offset_t, map_data);
2440
2441 if (KERN_SUCCESS == kr) {
2442 // must return success after vm_map_copyout() succeeds
2443 *result = internal_io_service_match_property_table(service,
2444 (const char *)data, matchingCnt, matches );
2445 vm_deallocate( kernel_map, data, matchingCnt );
2446 }
2447
2448 return kr;
2449 }
2450
2451 /* Routine io_service_match_property_table_bin */
2452 kern_return_t
2453 is_io_service_match_property_table_bin(
2454 io_object_t service,
2455 io_struct_inband_t matching,
2456 mach_msg_type_number_t matchingCnt,
2457 boolean_t *matches)
2458 {
2459 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2460 }
2461
2462 static kern_return_t
2463 internal_io_service_get_matching_services(
2464 mach_port_t master_port,
2465 const char * matching,
2466 mach_msg_type_number_t matching_size,
2467 io_iterator_t *existing )
2468 {
2469 kern_return_t kr;
2470 OSObject * obj;
2471 OSDictionary * dict;
2472
2473 if (master_port != master_device_port) {
2474 return kIOReturnNotPrivileged;
2475 }
2476
2477 assert(matching_size);
2478 obj = OSUnserializeXML(matching, matching_size);
2479
2480 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2481 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2482 kr = kIOReturnSuccess;
2483 } else {
2484 kr = kIOReturnBadArgument;
2485 }
2486
2487 if (obj) {
2488 obj->release();
2489 }
2490
2491 return kr;
2492 }
2493
2494 /* Routine io_service_get_matching_services */
2495 kern_return_t
2496 is_io_service_get_matching_services(
2497 mach_port_t master_port,
2498 io_string_t matching,
2499 io_iterator_t *existing )
2500 {
2501 return kIOReturnUnsupported;
2502 }
2503
2504 /* Routine io_service_get_matching_services_ool */
2505 kern_return_t
2506 is_io_service_get_matching_services_ool(
2507 mach_port_t master_port,
2508 io_buf_ptr_t matching,
2509 mach_msg_type_number_t matchingCnt,
2510 kern_return_t *result,
2511 io_object_t *existing )
2512 {
2513 kern_return_t kr;
2514 vm_offset_t data;
2515 vm_map_offset_t map_data;
2516
2517 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2518 data = CAST_DOWN(vm_offset_t, map_data);
2519
2520 if (KERN_SUCCESS == kr) {
2521 // must return success after vm_map_copyout() succeeds
2522 // and mig will copy out objects on success
2523 *existing = NULL;
2524 *result = internal_io_service_get_matching_services(master_port,
2525 (const char *) data, matchingCnt, existing);
2526 vm_deallocate( kernel_map, data, matchingCnt );
2527 }
2528
2529 return kr;
2530 }
2531
2532 /* Routine io_service_get_matching_services_bin */
2533 kern_return_t
2534 is_io_service_get_matching_services_bin(
2535 mach_port_t master_port,
2536 io_struct_inband_t matching,
2537 mach_msg_type_number_t matchingCnt,
2538 io_object_t *existing)
2539 {
2540 return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2541 }
2542
2543
2544 static kern_return_t
2545 internal_io_service_get_matching_service(
2546 mach_port_t master_port,
2547 const char * matching,
2548 mach_msg_type_number_t matching_size,
2549 io_service_t *service )
2550 {
2551 kern_return_t kr;
2552 OSObject * obj;
2553 OSDictionary * dict;
2554
2555 if (master_port != master_device_port) {
2556 return kIOReturnNotPrivileged;
2557 }
2558
2559 assert(matching_size);
2560 obj = OSUnserializeXML(matching, matching_size);
2561
2562 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2563 *service = IOService::copyMatchingService( dict );
2564 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2565 } else {
2566 kr = kIOReturnBadArgument;
2567 }
2568
2569 if (obj) {
2570 obj->release();
2571 }
2572
2573 return kr;
2574 }
2575
2576 /* Routine io_service_get_matching_service */
2577 kern_return_t
2578 is_io_service_get_matching_service(
2579 mach_port_t master_port,
2580 io_string_t matching,
2581 io_service_t *service )
2582 {
2583 return kIOReturnUnsupported;
2584 }
2585
2586 /* Routine io_service_get_matching_services_ool */
2587 kern_return_t
2588 is_io_service_get_matching_service_ool(
2589 mach_port_t master_port,
2590 io_buf_ptr_t matching,
2591 mach_msg_type_number_t matchingCnt,
2592 kern_return_t *result,
2593 io_object_t *service )
2594 {
2595 kern_return_t kr;
2596 vm_offset_t data;
2597 vm_map_offset_t map_data;
2598
2599 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2600 data = CAST_DOWN(vm_offset_t, map_data);
2601
2602 if (KERN_SUCCESS == kr) {
2603 // must return success after vm_map_copyout() succeeds
2604 // and mig will copy out objects on success
2605 *service = NULL;
2606 *result = internal_io_service_get_matching_service(master_port,
2607 (const char *) data, matchingCnt, service );
2608 vm_deallocate( kernel_map, data, matchingCnt );
2609 }
2610
2611 return kr;
2612 }
2613
2614 /* Routine io_service_get_matching_service_bin */
2615 kern_return_t
2616 is_io_service_get_matching_service_bin(
2617 mach_port_t master_port,
2618 io_struct_inband_t matching,
2619 mach_msg_type_number_t matchingCnt,
2620 io_object_t *service)
2621 {
2622 return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2623 }
2624
2625 static kern_return_t
2626 internal_io_service_add_notification(
2627 mach_port_t master_port,
2628 io_name_t notification_type,
2629 const char * matching,
2630 size_t matching_size,
2631 mach_port_t port,
2632 void * reference,
2633 vm_size_t referenceSize,
2634 bool client64,
2635 io_object_t * notification )
2636 {
2637 IOServiceUserNotification * userNotify = NULL;
2638 IONotifier * notify = NULL;
2639 const OSSymbol * sym;
2640 OSDictionary * dict;
2641 IOReturn err;
2642 unsigned long int userMsgType;
2643
2644 if (master_port != master_device_port) {
2645 return kIOReturnNotPrivileged;
2646 }
2647
2648 do {
2649 err = kIOReturnNoResources;
2650
2651 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2652 return kIOReturnMessageTooLarge;
2653 }
2654
2655 if (!(sym = OSSymbol::withCString( notification_type ))) {
2656 err = kIOReturnNoResources;
2657 }
2658
2659 assert(matching_size);
2660 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2661 if (!dict) {
2662 err = kIOReturnBadArgument;
2663 continue;
2664 }
2665
2666 if ((sym == gIOPublishNotification)
2667 || (sym == gIOFirstPublishNotification)) {
2668 userMsgType = kIOServicePublishNotificationType;
2669 } else if ((sym == gIOMatchedNotification)
2670 || (sym == gIOFirstMatchNotification)) {
2671 userMsgType = kIOServiceMatchedNotificationType;
2672 } else if ((sym == gIOTerminatedNotification)
2673 || (sym == gIOWillTerminateNotification)) {
2674 userMsgType = kIOServiceTerminatedNotificationType;
2675 } else {
2676 userMsgType = kLastIOKitNotificationType;
2677 }
2678
2679 userNotify = new IOServiceUserNotification;
2680
2681 if (userNotify && !userNotify->init( port, userMsgType,
2682 reference, referenceSize, client64)) {
2683 userNotify->release();
2684 userNotify = NULL;
2685 }
2686 if (!userNotify) {
2687 continue;
2688 }
2689
2690 notify = IOService::addMatchingNotification( sym, dict,
2691 &userNotify->_handler, userNotify );
2692 if (notify) {
2693 *notification = userNotify;
2694 userNotify->setNotification( notify );
2695 err = kIOReturnSuccess;
2696 } else {
2697 err = kIOReturnUnsupported;
2698 }
2699 } while (false);
2700
2701 if ((kIOReturnSuccess != err) && userNotify) {
2702 userNotify->invalidatePort();
2703 userNotify->release();
2704 userNotify = NULL;
2705 }
2706
2707 if (sym) {
2708 sym->release();
2709 }
2710 if (dict) {
2711 dict->release();
2712 }
2713
2714 return err;
2715 }
2716
2717
2718 /* Routine io_service_add_notification */
2719 kern_return_t
2720 is_io_service_add_notification(
2721 mach_port_t master_port,
2722 io_name_t notification_type,
2723 io_string_t matching,
2724 mach_port_t port,
2725 io_async_ref_t reference,
2726 mach_msg_type_number_t referenceCnt,
2727 io_object_t * notification )
2728 {
2729 return kIOReturnUnsupported;
2730 }
2731
2732 /* Routine io_service_add_notification_64 */
2733 kern_return_t
2734 is_io_service_add_notification_64(
2735 mach_port_t master_port,
2736 io_name_t notification_type,
2737 io_string_t matching,
2738 mach_port_t wake_port,
2739 io_async_ref64_t reference,
2740 mach_msg_type_number_t referenceCnt,
2741 io_object_t *notification )
2742 {
2743 return kIOReturnUnsupported;
2744 }
2745
2746 /* Routine io_service_add_notification_bin */
2747 kern_return_t
2748 is_io_service_add_notification_bin
2749 (
2750 mach_port_t master_port,
2751 io_name_t notification_type,
2752 io_struct_inband_t matching,
2753 mach_msg_type_number_t matchingCnt,
2754 mach_port_t wake_port,
2755 io_async_ref_t reference,
2756 mach_msg_type_number_t referenceCnt,
2757 io_object_t *notification)
2758 {
2759 io_async_ref_t zreference;
2760
2761 if (referenceCnt > ASYNC_REF_COUNT) {
2762 return kIOReturnBadArgument;
2763 }
2764 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2765 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2766
2767 return internal_io_service_add_notification(master_port, notification_type,
2768 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2769 false, notification);
2770 }
2771
2772 /* Routine io_service_add_notification_bin_64 */
2773 kern_return_t
2774 is_io_service_add_notification_bin_64
2775 (
2776 mach_port_t master_port,
2777 io_name_t notification_type,
2778 io_struct_inband_t matching,
2779 mach_msg_type_number_t matchingCnt,
2780 mach_port_t wake_port,
2781 io_async_ref64_t reference,
2782 mach_msg_type_number_t referenceCnt,
2783 io_object_t *notification)
2784 {
2785 io_async_ref64_t zreference;
2786
2787 if (referenceCnt > ASYNC_REF64_COUNT) {
2788 return kIOReturnBadArgument;
2789 }
2790 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2791 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2792
2793 return internal_io_service_add_notification(master_port, notification_type,
2794 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2795 true, notification);
2796 }
2797
2798 static kern_return_t
2799 internal_io_service_add_notification_ool(
2800 mach_port_t master_port,
2801 io_name_t notification_type,
2802 io_buf_ptr_t matching,
2803 mach_msg_type_number_t matchingCnt,
2804 mach_port_t wake_port,
2805 void * reference,
2806 vm_size_t referenceSize,
2807 bool client64,
2808 kern_return_t *result,
2809 io_object_t *notification )
2810 {
2811 kern_return_t kr;
2812 vm_offset_t data;
2813 vm_map_offset_t map_data;
2814
2815 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2816 data = CAST_DOWN(vm_offset_t, map_data);
2817
2818 if (KERN_SUCCESS == kr) {
2819 // must return success after vm_map_copyout() succeeds
2820 // and mig will copy out objects on success
2821 *notification = NULL;
2822 *result = internal_io_service_add_notification( master_port, notification_type,
2823 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2824 vm_deallocate( kernel_map, data, matchingCnt );
2825 }
2826
2827 return kr;
2828 }
2829
2830 /* Routine io_service_add_notification_ool */
2831 kern_return_t
2832 is_io_service_add_notification_ool(
2833 mach_port_t master_port,
2834 io_name_t notification_type,
2835 io_buf_ptr_t matching,
2836 mach_msg_type_number_t matchingCnt,
2837 mach_port_t wake_port,
2838 io_async_ref_t reference,
2839 mach_msg_type_number_t referenceCnt,
2840 kern_return_t *result,
2841 io_object_t *notification )
2842 {
2843 io_async_ref_t zreference;
2844
2845 if (referenceCnt > ASYNC_REF_COUNT) {
2846 return kIOReturnBadArgument;
2847 }
2848 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2849 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2850
2851 return internal_io_service_add_notification_ool(master_port, notification_type,
2852 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2853 false, result, notification);
2854 }
2855
2856 /* Routine io_service_add_notification_ool_64 */
2857 kern_return_t
2858 is_io_service_add_notification_ool_64(
2859 mach_port_t master_port,
2860 io_name_t notification_type,
2861 io_buf_ptr_t matching,
2862 mach_msg_type_number_t matchingCnt,
2863 mach_port_t wake_port,
2864 io_async_ref64_t reference,
2865 mach_msg_type_number_t referenceCnt,
2866 kern_return_t *result,
2867 io_object_t *notification )
2868 {
2869 io_async_ref64_t zreference;
2870
2871 if (referenceCnt > ASYNC_REF64_COUNT) {
2872 return kIOReturnBadArgument;
2873 }
2874 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2875 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2876
2877 return internal_io_service_add_notification_ool(master_port, notification_type,
2878 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2879 true, result, notification);
2880 }
2881
2882 /* Routine io_service_add_notification_old */
2883 kern_return_t
2884 is_io_service_add_notification_old(
2885 mach_port_t master_port,
2886 io_name_t notification_type,
2887 io_string_t matching,
2888 mach_port_t port,
2889 // for binary compatibility reasons, this must be natural_t for ILP32
2890 natural_t ref,
2891 io_object_t * notification )
2892 {
2893 return is_io_service_add_notification( master_port, notification_type,
2894 matching, port, &ref, 1, notification );
2895 }
2896
2897
2898 static kern_return_t
2899 internal_io_service_add_interest_notification(
2900 io_object_t _service,
2901 io_name_t type_of_interest,
2902 mach_port_t port,
2903 void * reference,
2904 vm_size_t referenceSize,
2905 bool client64,
2906 io_object_t * notification )
2907 {
2908 IOServiceMessageUserNotification * userNotify = NULL;
2909 IONotifier * notify = NULL;
2910 const OSSymbol * sym;
2911 IOReturn err;
2912
2913 CHECK( IOService, _service, service );
2914
2915 err = kIOReturnNoResources;
2916 if ((sym = OSSymbol::withCString( type_of_interest ))) {
2917 do {
2918 userNotify = new IOServiceMessageUserNotification;
2919
2920 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2921 reference, referenceSize,
2922 kIOUserNotifyMaxMessageSize,
2923 client64 )) {
2924 userNotify->release();
2925 userNotify = NULL;
2926 }
2927 if (!userNotify) {
2928 continue;
2929 }
2930
2931 notify = service->registerInterest( sym,
2932 &userNotify->_handler, userNotify );
2933 if (notify) {
2934 *notification = userNotify;
2935 userNotify->setNotification( notify );
2936 err = kIOReturnSuccess;
2937 } else {
2938 err = kIOReturnUnsupported;
2939 }
2940
2941 sym->release();
2942 } while (false);
2943 }
2944
2945 if ((kIOReturnSuccess != err) && userNotify) {
2946 userNotify->invalidatePort();
2947 userNotify->release();
2948 userNotify = NULL;
2949 }
2950
2951 return err;
2952 }
2953
2954 /* Routine io_service_add_message_notification */
2955 kern_return_t
2956 is_io_service_add_interest_notification(
2957 io_object_t service,
2958 io_name_t type_of_interest,
2959 mach_port_t port,
2960 io_async_ref_t reference,
2961 mach_msg_type_number_t referenceCnt,
2962 io_object_t * notification )
2963 {
2964 io_async_ref_t zreference;
2965
2966 if (referenceCnt > ASYNC_REF_COUNT) {
2967 return kIOReturnBadArgument;
2968 }
2969 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2970 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2971
2972 return internal_io_service_add_interest_notification(service, type_of_interest,
2973 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
2974 }
2975
2976 /* Routine io_service_add_interest_notification_64 */
2977 kern_return_t
2978 is_io_service_add_interest_notification_64(
2979 io_object_t service,
2980 io_name_t type_of_interest,
2981 mach_port_t wake_port,
2982 io_async_ref64_t reference,
2983 mach_msg_type_number_t referenceCnt,
2984 io_object_t *notification )
2985 {
2986 io_async_ref64_t zreference;
2987
2988 if (referenceCnt > ASYNC_REF64_COUNT) {
2989 return kIOReturnBadArgument;
2990 }
2991 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2992 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2993
2994 return internal_io_service_add_interest_notification(service, type_of_interest,
2995 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
2996 }
2997
2998
2999 /* Routine io_service_acknowledge_notification */
3000 kern_return_t
3001 is_io_service_acknowledge_notification(
3002 io_object_t _service,
3003 natural_t notify_ref,
3004 natural_t response )
3005 {
3006 CHECK( IOService, _service, service );
3007
3008 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3009 (IOOptionBits) response );
3010 }
3011
3012 /* Routine io_connect_get_semaphore */
3013 kern_return_t
3014 is_io_connect_get_notification_semaphore(
3015 io_connect_t connection,
3016 natural_t notification_type,
3017 semaphore_t *semaphore )
3018 {
3019 CHECK( IOUserClient, connection, client );
3020
3021 IOStatisticsClientCall();
3022 return client->getNotificationSemaphore((UInt32) notification_type,
3023 semaphore );
3024 }
3025
3026 /* Routine io_registry_get_root_entry */
3027 kern_return_t
3028 is_io_registry_get_root_entry(
3029 mach_port_t master_port,
3030 io_object_t *root )
3031 {
3032 IORegistryEntry * entry;
3033
3034 if (master_port != master_device_port) {
3035 return kIOReturnNotPrivileged;
3036 }
3037
3038 entry = IORegistryEntry::getRegistryRoot();
3039 if (entry) {
3040 entry->retain();
3041 }
3042 *root = entry;
3043
3044 return kIOReturnSuccess;
3045 }
3046
3047 /* Routine io_registry_create_iterator */
3048 kern_return_t
3049 is_io_registry_create_iterator(
3050 mach_port_t master_port,
3051 io_name_t plane,
3052 uint32_t options,
3053 io_object_t *iterator )
3054 {
3055 if (master_port != master_device_port) {
3056 return kIOReturnNotPrivileged;
3057 }
3058
3059 *iterator = IOUserIterator::withIterator(
3060 IORegistryIterator::iterateOver(
3061 IORegistryEntry::getPlane( plane ), options ));
3062
3063 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3064 }
3065
3066 /* Routine io_registry_entry_create_iterator */
3067 kern_return_t
3068 is_io_registry_entry_create_iterator(
3069 io_object_t registry_entry,
3070 io_name_t plane,
3071 uint32_t options,
3072 io_object_t *iterator )
3073 {
3074 CHECK( IORegistryEntry, registry_entry, entry );
3075
3076 *iterator = IOUserIterator::withIterator(
3077 IORegistryIterator::iterateOver( entry,
3078 IORegistryEntry::getPlane( plane ), options ));
3079
3080 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3081 }
3082
3083 /* Routine io_registry_iterator_enter */
3084 kern_return_t
3085 is_io_registry_iterator_enter_entry(
3086 io_object_t iterator )
3087 {
3088 CHECKLOCKED( IORegistryIterator, iterator, iter );
3089
3090 IOLockLock(oIter->lock);
3091 iter->enterEntry();
3092 IOLockUnlock(oIter->lock);
3093
3094 return kIOReturnSuccess;
3095 }
3096
3097 /* Routine io_registry_iterator_exit */
3098 kern_return_t
3099 is_io_registry_iterator_exit_entry(
3100 io_object_t iterator )
3101 {
3102 bool didIt;
3103
3104 CHECKLOCKED( IORegistryIterator, iterator, iter );
3105
3106 IOLockLock(oIter->lock);
3107 didIt = iter->exitEntry();
3108 IOLockUnlock(oIter->lock);
3109
3110 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3111 }
3112
3113 /* Routine io_registry_entry_from_path */
3114 kern_return_t
3115 is_io_registry_entry_from_path(
3116 mach_port_t master_port,
3117 io_string_t path,
3118 io_object_t *registry_entry )
3119 {
3120 IORegistryEntry * entry;
3121
3122 if (master_port != master_device_port) {
3123 return kIOReturnNotPrivileged;
3124 }
3125
3126 entry = IORegistryEntry::fromPath( path );
3127
3128 *registry_entry = entry;
3129
3130 return kIOReturnSuccess;
3131 }
3132
3133
3134 /* Routine io_registry_entry_from_path */
3135 kern_return_t
3136 is_io_registry_entry_from_path_ool(
3137 mach_port_t master_port,
3138 io_string_inband_t path,
3139 io_buf_ptr_t path_ool,
3140 mach_msg_type_number_t path_oolCnt,
3141 kern_return_t *result,
3142 io_object_t *registry_entry)
3143 {
3144 IORegistryEntry * entry;
3145 vm_map_offset_t map_data;
3146 const char * cpath;
3147 IOReturn res;
3148 kern_return_t err;
3149
3150 if (master_port != master_device_port) {
3151 return kIOReturnNotPrivileged;
3152 }
3153
3154 map_data = 0;
3155 entry = NULL;
3156 res = err = KERN_SUCCESS;
3157 if (path[0]) {
3158 cpath = path;
3159 } else {
3160 if (!path_oolCnt) {
3161 return kIOReturnBadArgument;
3162 }
3163 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3164 return kIOReturnMessageTooLarge;
3165 }
3166
3167 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3168 if (KERN_SUCCESS == err) {
3169 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3170 cpath = CAST_DOWN(const char *, map_data);
3171 if (cpath[path_oolCnt - 1]) {
3172 res = kIOReturnBadArgument;
3173 }
3174 }
3175 }
3176
3177 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3178 entry = IORegistryEntry::fromPath(cpath);
3179 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3180 }
3181
3182 if (map_data) {
3183 vm_deallocate(kernel_map, map_data, path_oolCnt);
3184 }
3185
3186 if (KERN_SUCCESS != err) {
3187 res = err;
3188 }
3189 *registry_entry = entry;
3190 *result = res;
3191
3192 return err;
3193 }
3194
3195
3196 /* Routine io_registry_entry_in_plane */
3197 kern_return_t
3198 is_io_registry_entry_in_plane(
3199 io_object_t registry_entry,
3200 io_name_t plane,
3201 boolean_t *inPlane )
3202 {
3203 CHECK( IORegistryEntry, registry_entry, entry );
3204
3205 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3206
3207 return kIOReturnSuccess;
3208 }
3209
3210
3211 /* Routine io_registry_entry_get_path */
3212 kern_return_t
3213 is_io_registry_entry_get_path(
3214 io_object_t registry_entry,
3215 io_name_t plane,
3216 io_string_t path )
3217 {
3218 int length;
3219 CHECK( IORegistryEntry, registry_entry, entry );
3220
3221 length = sizeof(io_string_t);
3222 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3223 return kIOReturnSuccess;
3224 } else {
3225 return kIOReturnBadArgument;
3226 }
3227 }
3228
3229 /* Routine io_registry_entry_get_path */
3230 kern_return_t
3231 is_io_registry_entry_get_path_ool(
3232 io_object_t registry_entry,
3233 io_name_t plane,
3234 io_string_inband_t path,
3235 io_buf_ptr_t *path_ool,
3236 mach_msg_type_number_t *path_oolCnt)
3237 {
3238 enum { kMaxPath = 16384 };
3239 IOReturn err;
3240 int length;
3241 char * buf;
3242
3243 CHECK( IORegistryEntry, registry_entry, entry );
3244
3245 *path_ool = NULL;
3246 *path_oolCnt = 0;
3247 length = sizeof(io_string_inband_t);
3248 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3249 err = kIOReturnSuccess;
3250 } else {
3251 length = kMaxPath;
3252 buf = IONew(char, length);
3253 if (!buf) {
3254 err = kIOReturnNoMemory;
3255 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3256 err = kIOReturnError;
3257 } else {
3258 *path_oolCnt = length;
3259 err = copyoutkdata(buf, length, path_ool);
3260 }
3261 if (buf) {
3262 IODelete(buf, char, kMaxPath);
3263 }
3264 }
3265
3266 return err;
3267 }
3268
3269
3270 /* Routine io_registry_entry_get_name */
3271 kern_return_t
3272 is_io_registry_entry_get_name(
3273 io_object_t registry_entry,
3274 io_name_t name )
3275 {
3276 CHECK( IORegistryEntry, registry_entry, entry );
3277
3278 strncpy( name, entry->getName(), sizeof(io_name_t));
3279
3280 return kIOReturnSuccess;
3281 }
3282
3283 /* Routine io_registry_entry_get_name_in_plane */
3284 kern_return_t
3285 is_io_registry_entry_get_name_in_plane(
3286 io_object_t registry_entry,
3287 io_name_t planeName,
3288 io_name_t name )
3289 {
3290 const IORegistryPlane * plane;
3291 CHECK( IORegistryEntry, registry_entry, entry );
3292
3293 if (planeName[0]) {
3294 plane = IORegistryEntry::getPlane( planeName );
3295 } else {
3296 plane = NULL;
3297 }
3298
3299 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3300
3301 return kIOReturnSuccess;
3302 }
3303
3304 /* Routine io_registry_entry_get_location_in_plane */
3305 kern_return_t
3306 is_io_registry_entry_get_location_in_plane(
3307 io_object_t registry_entry,
3308 io_name_t planeName,
3309 io_name_t location )
3310 {
3311 const IORegistryPlane * plane;
3312 CHECK( IORegistryEntry, registry_entry, entry );
3313
3314 if (planeName[0]) {
3315 plane = IORegistryEntry::getPlane( planeName );
3316 } else {
3317 plane = NULL;
3318 }
3319
3320 const char * cstr = entry->getLocation( plane );
3321
3322 if (cstr) {
3323 strncpy( location, cstr, sizeof(io_name_t));
3324 return kIOReturnSuccess;
3325 } else {
3326 return kIOReturnNotFound;
3327 }
3328 }
3329
3330 /* Routine io_registry_entry_get_registry_entry_id */
3331 kern_return_t
3332 is_io_registry_entry_get_registry_entry_id(
3333 io_object_t registry_entry,
3334 uint64_t *entry_id )
3335 {
3336 CHECK( IORegistryEntry, registry_entry, entry );
3337
3338 *entry_id = entry->getRegistryEntryID();
3339
3340 return kIOReturnSuccess;
3341 }
3342
3343 /* Routine io_registry_entry_get_property */
3344 kern_return_t
3345 is_io_registry_entry_get_property_bytes(
3346 io_object_t registry_entry,
3347 io_name_t property_name,
3348 io_struct_inband_t buf,
3349 mach_msg_type_number_t *dataCnt )
3350 {
3351 OSObject * obj;
3352 OSData * data;
3353 OSString * str;
3354 OSBoolean * boo;
3355 OSNumber * off;
3356 UInt64 offsetBytes;
3357 unsigned int len = 0;
3358 const void * bytes = NULL;
3359 IOReturn ret = kIOReturnSuccess;
3360
3361 CHECK( IORegistryEntry, registry_entry, entry );
3362
3363 #if CONFIG_MACF
3364 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3365 return kIOReturnNotPermitted;
3366 }
3367 #endif
3368
3369 obj = entry->copyProperty(property_name);
3370 if (!obj) {
3371 return kIOReturnNoResources;
3372 }
3373
3374 // One day OSData will be a common container base class
3375 // until then...
3376 if ((data = OSDynamicCast( OSData, obj ))) {
3377 len = data->getLength();
3378 bytes = data->getBytesNoCopy();
3379 if (!data->isSerializable()) {
3380 len = 0;
3381 }
3382 } else if ((str = OSDynamicCast( OSString, obj ))) {
3383 len = str->getLength() + 1;
3384 bytes = str->getCStringNoCopy();
3385 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3386 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3387 bytes = boo->isTrue() ? "Yes" : "No";
3388 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3389 offsetBytes = off->unsigned64BitValue();
3390 len = off->numberOfBytes();
3391 if (len > sizeof(offsetBytes)) {
3392 len = sizeof(offsetBytes);
3393 }
3394 bytes = &offsetBytes;
3395 #ifdef __BIG_ENDIAN__
3396 bytes = (const void *)
3397 (((UInt32) bytes) + (sizeof(UInt64) - len));
3398 #endif
3399 } else {
3400 ret = kIOReturnBadArgument;
3401 }
3402
3403 if (bytes) {
3404 if (*dataCnt < len) {
3405 ret = kIOReturnIPCError;
3406 } else {
3407 *dataCnt = len;
3408 bcopy( bytes, buf, len );
3409 }
3410 }
3411 obj->release();
3412
3413 return ret;
3414 }
3415
3416
3417 /* Routine io_registry_entry_get_property */
3418 kern_return_t
3419 is_io_registry_entry_get_property(
3420 io_object_t registry_entry,
3421 io_name_t property_name,
3422 io_buf_ptr_t *properties,
3423 mach_msg_type_number_t *propertiesCnt )
3424 {
3425 kern_return_t err;
3426 vm_size_t len;
3427 OSObject * obj;
3428
3429 CHECK( IORegistryEntry, registry_entry, entry );
3430
3431 #if CONFIG_MACF
3432 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3433 return kIOReturnNotPermitted;
3434 }
3435 #endif
3436
3437 obj = entry->copyProperty(property_name);
3438 if (!obj) {
3439 return kIOReturnNotFound;
3440 }
3441
3442 OSSerialize * s = OSSerialize::withCapacity(4096);
3443 if (!s) {
3444 obj->release();
3445 return kIOReturnNoMemory;
3446 }
3447
3448 if (obj->serialize( s )) {
3449 len = s->getLength();
3450 *propertiesCnt = len;
3451 err = copyoutkdata( s->text(), len, properties );
3452 } else {
3453 err = kIOReturnUnsupported;
3454 }
3455
3456 s->release();
3457 obj->release();
3458
3459 return err;
3460 }
3461
3462 /* Routine io_registry_entry_get_property_recursively */
3463 kern_return_t
3464 is_io_registry_entry_get_property_recursively(
3465 io_object_t registry_entry,
3466 io_name_t plane,
3467 io_name_t property_name,
3468 uint32_t options,
3469 io_buf_ptr_t *properties,
3470 mach_msg_type_number_t *propertiesCnt )
3471 {
3472 kern_return_t err;
3473 vm_size_t len;
3474 OSObject * obj;
3475
3476 CHECK( IORegistryEntry, registry_entry, entry );
3477
3478 #if CONFIG_MACF
3479 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3480 return kIOReturnNotPermitted;
3481 }
3482 #endif
3483
3484 obj = entry->copyProperty( property_name,
3485 IORegistryEntry::getPlane( plane ), options );
3486 if (!obj) {
3487 return kIOReturnNotFound;
3488 }
3489
3490 OSSerialize * s = OSSerialize::withCapacity(4096);
3491 if (!s) {
3492 obj->release();
3493 return kIOReturnNoMemory;
3494 }
3495
3496 if (obj->serialize( s )) {
3497 len = s->getLength();
3498 *propertiesCnt = len;
3499 err = copyoutkdata( s->text(), len, properties );
3500 } else {
3501 err = kIOReturnUnsupported;
3502 }
3503
3504 s->release();
3505 obj->release();
3506
3507 return err;
3508 }
3509
3510 /* Routine io_registry_entry_get_properties */
3511 kern_return_t
3512 is_io_registry_entry_get_properties(
3513 io_object_t registry_entry,
3514 io_buf_ptr_t *properties,
3515 mach_msg_type_number_t *propertiesCnt )
3516 {
3517 return kIOReturnUnsupported;
3518 }
3519
3520 #if CONFIG_MACF
3521
3522 struct GetPropertiesEditorRef {
3523 kauth_cred_t cred;
3524 IORegistryEntry * entry;
3525 OSCollection * root;
3526 };
3527
3528 static const OSMetaClassBase *
3529 GetPropertiesEditor(void * reference,
3530 OSSerialize * s,
3531 OSCollection * container,
3532 const OSSymbol * name,
3533 const OSMetaClassBase * value)
3534 {
3535 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3536
3537 if (!ref->root) {
3538 ref->root = container;
3539 }
3540 if (ref->root == container) {
3541 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3542 value = NULL;
3543 }
3544 }
3545 if (value) {
3546 value->retain();
3547 }
3548 return value;
3549 }
3550
3551 #endif /* CONFIG_MACF */
3552
3553 /* Routine io_registry_entry_get_properties_bin_buf */
3554 kern_return_t
3555 is_io_registry_entry_get_properties_bin_buf(
3556 io_object_t registry_entry,
3557 mach_vm_address_t buf,
3558 mach_vm_size_t *bufsize,
3559 io_buf_ptr_t *properties,
3560 mach_msg_type_number_t *propertiesCnt)
3561 {
3562 kern_return_t err = kIOReturnSuccess;
3563 vm_size_t len;
3564 OSSerialize * s;
3565 OSSerialize::Editor editor = NULL;
3566 void * editRef = NULL;
3567
3568 CHECK(IORegistryEntry, registry_entry, entry);
3569
3570 #if CONFIG_MACF
3571 GetPropertiesEditorRef ref;
3572 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3573 editor = &GetPropertiesEditor;
3574 editRef = &ref;
3575 ref.cred = kauth_cred_get();
3576 ref.entry = entry;
3577 ref.root = NULL;
3578 }
3579 #endif
3580
3581 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3582 if (!s) {
3583 return kIOReturnNoMemory;
3584 }
3585
3586 if (!entry->serializeProperties(s)) {
3587 err = kIOReturnUnsupported;
3588 }
3589
3590 if (kIOReturnSuccess == err) {
3591 len = s->getLength();
3592 if (buf && bufsize && len <= *bufsize) {
3593 *bufsize = len;
3594 *propertiesCnt = 0;
3595 *properties = nullptr;
3596 if (copyout(s->text(), buf, len)) {
3597 err = kIOReturnVMError;
3598 } else {
3599 err = kIOReturnSuccess;
3600 }
3601 } else {
3602 if (bufsize) {
3603 *bufsize = 0;
3604 }
3605 *propertiesCnt = len;
3606 err = copyoutkdata( s->text(), len, properties );
3607 }
3608 }
3609 s->release();
3610
3611 return err;
3612 }
3613
3614 /* Routine io_registry_entry_get_properties_bin */
3615 kern_return_t
3616 is_io_registry_entry_get_properties_bin(
3617 io_object_t registry_entry,
3618 io_buf_ptr_t *properties,
3619 mach_msg_type_number_t *propertiesCnt)
3620 {
3621 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3622 0, NULL, properties, propertiesCnt);
3623 }
3624
3625 /* Routine io_registry_entry_get_property_bin_buf */
3626 kern_return_t
3627 is_io_registry_entry_get_property_bin_buf(
3628 io_object_t registry_entry,
3629 io_name_t plane,
3630 io_name_t property_name,
3631 uint32_t options,
3632 mach_vm_address_t buf,
3633 mach_vm_size_t *bufsize,
3634 io_buf_ptr_t *properties,
3635 mach_msg_type_number_t *propertiesCnt )
3636 {
3637 kern_return_t err;
3638 vm_size_t len;
3639 OSObject * obj;
3640 const OSSymbol * sym;
3641
3642 CHECK( IORegistryEntry, registry_entry, entry );
3643
3644 #if CONFIG_MACF
3645 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3646 return kIOReturnNotPermitted;
3647 }
3648 #endif
3649
3650 sym = OSSymbol::withCString(property_name);
3651 if (!sym) {
3652 return kIOReturnNoMemory;
3653 }
3654
3655 if (gIORegistryEntryPropertyKeysKey == sym) {
3656 obj = entry->copyPropertyKeys();
3657 } else {
3658 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3659 obj = entry->copyProperty(property_name,
3660 IORegistryEntry::getPlane(plane), options );
3661 } else {
3662 obj = entry->copyProperty(property_name);
3663 }
3664 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3665 entry->removeProperty(sym);
3666 }
3667 }
3668
3669 sym->release();
3670 if (!obj) {
3671 return kIOReturnNotFound;
3672 }
3673
3674 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3675 if (!s) {
3676 obj->release();
3677 return kIOReturnNoMemory;
3678 }
3679
3680 if (obj->serialize( s )) {
3681 len = s->getLength();
3682 if (buf && bufsize && len <= *bufsize) {
3683 *bufsize = len;
3684 *propertiesCnt = 0;
3685 *properties = nullptr;
3686 if (copyout(s->text(), buf, len)) {
3687 err = kIOReturnVMError;
3688 } else {
3689 err = kIOReturnSuccess;
3690 }
3691 } else {
3692 if (bufsize) {
3693 *bufsize = 0;
3694 }
3695 *propertiesCnt = len;
3696 err = copyoutkdata( s->text(), len, properties );
3697 }
3698 } else {
3699 err = kIOReturnUnsupported;
3700 }
3701
3702 s->release();
3703 obj->release();
3704
3705 return err;
3706 }
3707
3708 /* Routine io_registry_entry_get_property_bin */
3709 kern_return_t
3710 is_io_registry_entry_get_property_bin(
3711 io_object_t registry_entry,
3712 io_name_t plane,
3713 io_name_t property_name,
3714 uint32_t options,
3715 io_buf_ptr_t *properties,
3716 mach_msg_type_number_t *propertiesCnt )
3717 {
3718 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3719 property_name, options, 0, NULL, properties, propertiesCnt);
3720 }
3721
3722
3723 /* Routine io_registry_entry_set_properties */
3724 kern_return_t
3725 is_io_registry_entry_set_properties
3726 (
3727 io_object_t registry_entry,
3728 io_buf_ptr_t properties,
3729 mach_msg_type_number_t propertiesCnt,
3730 kern_return_t * result)
3731 {
3732 OSObject * obj;
3733 kern_return_t err;
3734 IOReturn res;
3735 vm_offset_t data;
3736 vm_map_offset_t map_data;
3737
3738 CHECK( IORegistryEntry, registry_entry, entry );
3739
3740 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3741 return kIOReturnMessageTooLarge;
3742 }
3743
3744 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3745 data = CAST_DOWN(vm_offset_t, map_data);
3746
3747 if (KERN_SUCCESS == err) {
3748 FAKE_STACK_FRAME(entry->getMetaClass());
3749
3750 // must return success after vm_map_copyout() succeeds
3751 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3752 vm_deallocate( kernel_map, data, propertiesCnt );
3753
3754 if (!obj) {
3755 res = kIOReturnBadArgument;
3756 }
3757 #if CONFIG_MACF
3758 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3759 registry_entry, obj)) {
3760 res = kIOReturnNotPermitted;
3761 }
3762 #endif
3763 else {
3764 res = entry->setProperties( obj );
3765 }
3766
3767 if (obj) {
3768 obj->release();
3769 }
3770
3771 FAKE_STACK_FRAME_END();
3772 } else {
3773 res = err;
3774 }
3775
3776 *result = res;
3777 return err;
3778 }
3779
3780 /* Routine io_registry_entry_get_child_iterator */
3781 kern_return_t
3782 is_io_registry_entry_get_child_iterator(
3783 io_object_t registry_entry,
3784 io_name_t plane,
3785 io_object_t *iterator )
3786 {
3787 CHECK( IORegistryEntry, registry_entry, entry );
3788
3789 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
3790 IORegistryEntry::getPlane( plane )));
3791
3792 return kIOReturnSuccess;
3793 }
3794
3795 /* Routine io_registry_entry_get_parent_iterator */
3796 kern_return_t
3797 is_io_registry_entry_get_parent_iterator(
3798 io_object_t registry_entry,
3799 io_name_t plane,
3800 io_object_t *iterator)
3801 {
3802 CHECK( IORegistryEntry, registry_entry, entry );
3803
3804 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
3805 IORegistryEntry::getPlane( plane )));
3806
3807 return kIOReturnSuccess;
3808 }
3809
3810 /* Routine io_service_get_busy_state */
3811 kern_return_t
3812 is_io_service_get_busy_state(
3813 io_object_t _service,
3814 uint32_t *busyState )
3815 {
3816 CHECK( IOService, _service, service );
3817
3818 *busyState = service->getBusyState();
3819
3820 return kIOReturnSuccess;
3821 }
3822
3823 /* Routine io_service_get_state */
3824 kern_return_t
3825 is_io_service_get_state(
3826 io_object_t _service,
3827 uint64_t *state,
3828 uint32_t *busy_state,
3829 uint64_t *accumulated_busy_time )
3830 {
3831 CHECK( IOService, _service, service );
3832
3833 *state = service->getState();
3834 *busy_state = service->getBusyState();
3835 *accumulated_busy_time = service->getAccumulatedBusyTime();
3836
3837 return kIOReturnSuccess;
3838 }
3839
3840 /* Routine io_service_wait_quiet */
3841 kern_return_t
3842 is_io_service_wait_quiet(
3843 io_object_t _service,
3844 mach_timespec_t wait_time )
3845 {
3846 uint64_t timeoutNS;
3847
3848 CHECK( IOService, _service, service );
3849
3850 timeoutNS = wait_time.tv_sec;
3851 timeoutNS *= kSecondScale;
3852 timeoutNS += wait_time.tv_nsec;
3853
3854 return service->waitQuiet(timeoutNS);
3855 }
3856
3857 /* Routine io_service_request_probe */
3858 kern_return_t
3859 is_io_service_request_probe(
3860 io_object_t _service,
3861 uint32_t options )
3862 {
3863 CHECK( IOService, _service, service );
3864
3865 return service->requestProbe( options );
3866 }
3867
3868 /* Routine io_service_get_authorization_id */
3869 kern_return_t
3870 is_io_service_get_authorization_id(
3871 io_object_t _service,
3872 uint64_t *authorization_id )
3873 {
3874 kern_return_t kr;
3875
3876 CHECK( IOService, _service, service );
3877
3878 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
3879 kIOClientPrivilegeAdministrator );
3880 if (kIOReturnSuccess != kr) {
3881 return kr;
3882 }
3883
3884 *authorization_id = service->getAuthorizationID();
3885
3886 return kr;
3887 }
3888
3889 /* Routine io_service_set_authorization_id */
3890 kern_return_t
3891 is_io_service_set_authorization_id(
3892 io_object_t _service,
3893 uint64_t authorization_id )
3894 {
3895 CHECK( IOService, _service, service );
3896
3897 return service->setAuthorizationID( authorization_id );
3898 }
3899
3900 /* Routine io_service_open_ndr */
3901 kern_return_t
3902 is_io_service_open_extended(
3903 io_object_t _service,
3904 task_t owningTask,
3905 uint32_t connect_type,
3906 NDR_record_t ndr,
3907 io_buf_ptr_t properties,
3908 mach_msg_type_number_t propertiesCnt,
3909 kern_return_t * result,
3910 io_object_t *connection )
3911 {
3912 IOUserClient * client = NULL;
3913 kern_return_t err = KERN_SUCCESS;
3914 IOReturn res = kIOReturnSuccess;
3915 OSDictionary * propertiesDict = NULL;
3916 bool crossEndian;
3917 bool disallowAccess;
3918
3919 CHECK( IOService, _service, service );
3920
3921 if (!owningTask) {
3922 return kIOReturnBadArgument;
3923 }
3924 assert(owningTask == current_task());
3925 if (owningTask != current_task()) {
3926 return kIOReturnBadArgument;
3927 }
3928
3929 do{
3930 if (properties) {
3931 return kIOReturnUnsupported;
3932 }
3933 #if 0
3934 {
3935 OSObject * obj;
3936 vm_offset_t data;
3937 vm_map_offset_t map_data;
3938
3939 if (propertiesCnt > sizeof(io_struct_inband_t)) {
3940 return kIOReturnMessageTooLarge;
3941 }
3942
3943 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3944 res = err;
3945 data = CAST_DOWN(vm_offset_t, map_data);
3946 if (KERN_SUCCESS == err) {
3947 // must return success after vm_map_copyout() succeeds
3948 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3949 vm_deallocate( kernel_map, data, propertiesCnt );
3950 propertiesDict = OSDynamicCast(OSDictionary, obj);
3951 if (!propertiesDict) {
3952 res = kIOReturnBadArgument;
3953 if (obj) {
3954 obj->release();
3955 }
3956 }
3957 }
3958 if (kIOReturnSuccess != res) {
3959 break;
3960 }
3961 }
3962 #endif
3963 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3964 if (crossEndian) {
3965 if (!propertiesDict) {
3966 propertiesDict = OSDictionary::withCapacity(4);
3967 }
3968 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3969 if (data) {
3970 if (propertiesDict) {
3971 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3972 }
3973 data->release();
3974 }
3975 }
3976
3977 res = service->newUserClient( owningTask, (void *) owningTask,
3978 connect_type, propertiesDict, &client );
3979
3980 if (propertiesDict) {
3981 propertiesDict->release();
3982 }
3983
3984 if (res == kIOReturnSuccess) {
3985 assert( OSDynamicCast(IOUserClient, client));
3986
3987 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
3988 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
3989 client->closed = false;
3990 client->lock = IOLockAlloc();
3991
3992 disallowAccess = (crossEndian
3993 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3994 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3995 if (disallowAccess) {
3996 res = kIOReturnUnsupported;
3997 }
3998 #if CONFIG_MACF
3999 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4000 res = kIOReturnNotPermitted;
4001 }
4002 #endif
4003
4004 if (kIOReturnSuccess == res) {
4005 res = client->registerOwner(owningTask);
4006 }
4007
4008 if (kIOReturnSuccess != res) {
4009 IOStatisticsClientCall();
4010 client->clientClose();
4011 client->release();
4012 client = NULL;
4013 break;
4014 }
4015 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4016 if (creatorName) {
4017 client->setProperty(kIOUserClientCreatorKey, creatorName);
4018 creatorName->release();
4019 }
4020 client->setTerminateDefer(service, false);
4021 }
4022 }while (false);
4023
4024 *connection = client;
4025 *result = res;
4026
4027 return err;
4028 }
4029
4030 /* Routine io_service_close */
4031 kern_return_t
4032 is_io_service_close(
4033 io_object_t connection )
4034 {
4035 OSSet * mappings;
4036 if ((mappings = OSDynamicCast(OSSet, connection))) {
4037 return kIOReturnSuccess;
4038 }
4039
4040 CHECK( IOUserClient, connection, client );
4041
4042 IOStatisticsClientCall();
4043
4044 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4045 IOLockLock(client->lock);
4046 client->clientClose();
4047 IOLockUnlock(client->lock);
4048 } else {
4049 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4050 client->getRegistryEntryID(), client->getName());
4051 }
4052
4053 return kIOReturnSuccess;
4054 }
4055
4056 /* Routine io_connect_get_service */
4057 kern_return_t
4058 is_io_connect_get_service(
4059 io_object_t connection,
4060 io_object_t *service )
4061 {
4062 IOService * theService;
4063
4064 CHECK( IOUserClient, connection, client );
4065
4066 theService = client->getService();
4067 if (theService) {
4068 theService->retain();
4069 }
4070
4071 *service = theService;
4072
4073 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4074 }
4075
4076 /* Routine io_connect_set_notification_port */
4077 kern_return_t
4078 is_io_connect_set_notification_port(
4079 io_object_t connection,
4080 uint32_t notification_type,
4081 mach_port_t port,
4082 uint32_t reference)
4083 {
4084 kern_return_t ret;
4085 CHECK( IOUserClient, connection, client );
4086
4087 IOStatisticsClientCall();
4088 IOLockLock(client->lock);
4089 ret = client->registerNotificationPort( port, notification_type,
4090 (io_user_reference_t) reference );
4091 IOLockUnlock(client->lock);
4092 return ret;
4093 }
4094
4095 /* Routine io_connect_set_notification_port */
4096 kern_return_t
4097 is_io_connect_set_notification_port_64(
4098 io_object_t connection,
4099 uint32_t notification_type,
4100 mach_port_t port,
4101 io_user_reference_t reference)
4102 {
4103 kern_return_t ret;
4104 CHECK( IOUserClient, connection, client );
4105
4106 IOStatisticsClientCall();
4107 IOLockLock(client->lock);
4108 ret = client->registerNotificationPort( port, notification_type,
4109 reference );
4110 IOLockUnlock(client->lock);
4111 return ret;
4112 }
4113
4114 /* Routine io_connect_map_memory_into_task */
4115 kern_return_t
4116 is_io_connect_map_memory_into_task
4117 (
4118 io_connect_t connection,
4119 uint32_t memory_type,
4120 task_t into_task,
4121 mach_vm_address_t *address,
4122 mach_vm_size_t *size,
4123 uint32_t flags
4124 )
4125 {
4126 IOReturn err;
4127 IOMemoryMap * map;
4128
4129 CHECK( IOUserClient, connection, client );
4130
4131 if (!into_task) {
4132 return kIOReturnBadArgument;
4133 }
4134
4135 IOStatisticsClientCall();
4136 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4137
4138 if (map) {
4139 *address = map->getAddress();
4140 if (size) {
4141 *size = map->getSize();
4142 }
4143
4144 if (client->sharedInstance
4145 || (into_task != current_task())) {
4146 // push a name out to the task owning the map,
4147 // so we can clean up maps
4148 mach_port_name_t name __unused =
4149 IOMachPort::makeSendRightForTask(
4150 into_task, map, IKOT_IOKIT_OBJECT );
4151 map->release();
4152 } else {
4153 // keep it with the user client
4154 IOLockLock( gIOObjectPortLock);
4155 if (NULL == client->mappings) {
4156 client->mappings = OSSet::withCapacity(2);
4157 }
4158 if (client->mappings) {
4159 client->mappings->setObject( map);
4160 }
4161 IOLockUnlock( gIOObjectPortLock);
4162 map->release();
4163 }
4164 err = kIOReturnSuccess;
4165 } else {
4166 err = kIOReturnBadArgument;
4167 }
4168
4169 return err;
4170 }
4171
4172 /* Routine is_io_connect_map_memory */
4173 kern_return_t
4174 is_io_connect_map_memory(
4175 io_object_t connect,
4176 uint32_t type,
4177 task_t task,
4178 uint32_t * mapAddr,
4179 uint32_t * mapSize,
4180 uint32_t flags )
4181 {
4182 IOReturn err;
4183 mach_vm_address_t address;
4184 mach_vm_size_t size;
4185
4186 address = SCALAR64(*mapAddr);
4187 size = SCALAR64(*mapSize);
4188
4189 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4190
4191 *mapAddr = SCALAR32(address);
4192 *mapSize = SCALAR32(size);
4193
4194 return err;
4195 }
4196 } /* extern "C" */
4197
4198 IOMemoryMap *
4199 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4200 {
4201 OSIterator * iter;
4202 IOMemoryMap * map = NULL;
4203
4204 IOLockLock(gIOObjectPortLock);
4205
4206 iter = OSCollectionIterator::withCollection(mappings);
4207 if (iter) {
4208 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4209 if (mem == map->getMemoryDescriptor()) {
4210 map->retain();
4211 mappings->removeObject(map);
4212 break;
4213 }
4214 }
4215 iter->release();
4216 }
4217
4218 IOLockUnlock(gIOObjectPortLock);
4219
4220 return map;
4221 }
4222
4223 extern "C" {
4224 /* Routine io_connect_unmap_memory_from_task */
4225 kern_return_t
4226 is_io_connect_unmap_memory_from_task
4227 (
4228 io_connect_t connection,
4229 uint32_t memory_type,
4230 task_t from_task,
4231 mach_vm_address_t address)
4232 {
4233 IOReturn err;
4234 IOOptionBits options = 0;
4235 IOMemoryDescriptor * memory = NULL;
4236 IOMemoryMap * map;
4237
4238 CHECK( IOUserClient, connection, client );
4239
4240 if (!from_task) {
4241 return kIOReturnBadArgument;
4242 }
4243
4244 IOStatisticsClientCall();
4245 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4246
4247 if (memory && (kIOReturnSuccess == err)) {
4248 options = (options & ~kIOMapUserOptionsMask)
4249 | kIOMapAnywhere | kIOMapReference;
4250
4251 map = memory->createMappingInTask( from_task, address, options );
4252 memory->release();
4253 if (map) {
4254 IOLockLock( gIOObjectPortLock);
4255 if (client->mappings) {
4256 client->mappings->removeObject( map);
4257 }
4258 IOLockUnlock( gIOObjectPortLock);
4259
4260 mach_port_name_t name = 0;
4261 if (from_task != current_task()) {
4262 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4263 map->release();
4264 }
4265
4266 if (name) {
4267 map->userClientUnmap();
4268 err = iokit_mod_send_right( from_task, name, -2 );
4269 err = kIOReturnSuccess;
4270 } else {
4271 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4272 }
4273 if (from_task == current_task()) {
4274 map->release();
4275 }
4276 } else {
4277 err = kIOReturnBadArgument;
4278 }
4279 }
4280
4281 return err;
4282 }
4283
4284 kern_return_t
4285 is_io_connect_unmap_memory(
4286 io_object_t connect,
4287 uint32_t type,
4288 task_t task,
4289 uint32_t mapAddr )
4290 {
4291 IOReturn err;
4292 mach_vm_address_t address;
4293
4294 address = SCALAR64(mapAddr);
4295
4296 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4297
4298 return err;
4299 }
4300
4301
4302 /* Routine io_connect_add_client */
4303 kern_return_t
4304 is_io_connect_add_client(
4305 io_object_t connection,
4306 io_object_t connect_to)
4307 {
4308 CHECK( IOUserClient, connection, client );
4309 CHECK( IOUserClient, connect_to, to );
4310
4311 IOStatisticsClientCall();
4312 return client->connectClient( to );
4313 }
4314
4315
4316 /* Routine io_connect_set_properties */
4317 kern_return_t
4318 is_io_connect_set_properties(
4319 io_object_t connection,
4320 io_buf_ptr_t properties,
4321 mach_msg_type_number_t propertiesCnt,
4322 kern_return_t * result)
4323 {
4324 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4325 }
4326
4327 /* Routine io_user_client_method */
4328 kern_return_t
4329 is_io_connect_method_var_output
4330 (
4331 io_connect_t connection,
4332 uint32_t selector,
4333 io_scalar_inband64_t scalar_input,
4334 mach_msg_type_number_t scalar_inputCnt,
4335 io_struct_inband_t inband_input,
4336 mach_msg_type_number_t inband_inputCnt,
4337 mach_vm_address_t ool_input,
4338 mach_vm_size_t ool_input_size,
4339 io_struct_inband_t inband_output,
4340 mach_msg_type_number_t *inband_outputCnt,
4341 io_scalar_inband64_t scalar_output,
4342 mach_msg_type_number_t *scalar_outputCnt,
4343 io_buf_ptr_t *var_output,
4344 mach_msg_type_number_t *var_outputCnt
4345 )
4346 {
4347 CHECK( IOUserClient, connection, client );
4348
4349 IOExternalMethodArguments args;
4350 IOReturn ret;
4351 IOMemoryDescriptor * inputMD = NULL;
4352 OSObject * structureVariableOutputData = NULL;
4353
4354 bzero(&args.__reserved[0], sizeof(args.__reserved));
4355 args.__reservedA = 0;
4356 args.version = kIOExternalMethodArgumentsCurrentVersion;
4357
4358 args.selector = selector;
4359
4360 args.asyncWakePort = MACH_PORT_NULL;
4361 args.asyncReference = NULL;
4362 args.asyncReferenceCount = 0;
4363 args.structureVariableOutputData = &structureVariableOutputData;
4364
4365 args.scalarInput = scalar_input;
4366 args.scalarInputCount = scalar_inputCnt;
4367 args.structureInput = inband_input;
4368 args.structureInputSize = inband_inputCnt;
4369
4370 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4371 return kIOReturnIPCError;
4372 }
4373
4374 if (ool_input) {
4375 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4376 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4377 current_task());
4378 }
4379
4380 args.structureInputDescriptor = inputMD;
4381
4382 args.scalarOutput = scalar_output;
4383 args.scalarOutputCount = *scalar_outputCnt;
4384 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4385 args.structureOutput = inband_output;
4386 args.structureOutputSize = *inband_outputCnt;
4387 args.structureOutputDescriptor = NULL;
4388 args.structureOutputDescriptorSize = 0;
4389
4390 IOStatisticsClientCall();
4391 ret = client->externalMethod( selector, &args );
4392
4393 *scalar_outputCnt = args.scalarOutputCount;
4394 *inband_outputCnt = args.structureOutputSize;
4395
4396 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4397 OSSerialize * serialize;
4398 OSData * data;
4399 vm_size_t len;
4400
4401 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4402 len = serialize->getLength();
4403 *var_outputCnt = len;
4404 ret = copyoutkdata(serialize->text(), len, var_output);
4405 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4406 len = data->getLength();
4407 *var_outputCnt = len;
4408 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4409 } else {
4410 ret = kIOReturnUnderrun;
4411 }
4412 }
4413
4414 if (inputMD) {
4415 inputMD->release();
4416 }
4417 if (structureVariableOutputData) {
4418 structureVariableOutputData->release();
4419 }
4420
4421 return ret;
4422 }
4423
4424 /* Routine io_user_client_method */
4425 kern_return_t
4426 is_io_connect_method
4427 (
4428 io_connect_t connection,
4429 uint32_t selector,
4430 io_scalar_inband64_t scalar_input,
4431 mach_msg_type_number_t scalar_inputCnt,
4432 io_struct_inband_t inband_input,
4433 mach_msg_type_number_t inband_inputCnt,
4434 mach_vm_address_t ool_input,
4435 mach_vm_size_t ool_input_size,
4436 io_struct_inband_t inband_output,
4437 mach_msg_type_number_t *inband_outputCnt,
4438 io_scalar_inband64_t scalar_output,
4439 mach_msg_type_number_t *scalar_outputCnt,
4440 mach_vm_address_t ool_output,
4441 mach_vm_size_t *ool_output_size
4442 )
4443 {
4444 CHECK( IOUserClient, connection, client );
4445
4446 IOExternalMethodArguments args;
4447 IOReturn ret;
4448 IOMemoryDescriptor * inputMD = NULL;
4449 IOMemoryDescriptor * outputMD = NULL;
4450
4451 bzero(&args.__reserved[0], sizeof(args.__reserved));
4452 args.__reservedA = 0;
4453 args.version = kIOExternalMethodArgumentsCurrentVersion;
4454
4455 args.selector = selector;
4456
4457 args.asyncWakePort = MACH_PORT_NULL;
4458 args.asyncReference = NULL;
4459 args.asyncReferenceCount = 0;
4460 args.structureVariableOutputData = NULL;
4461
4462 args.scalarInput = scalar_input;
4463 args.scalarInputCount = scalar_inputCnt;
4464 args.structureInput = inband_input;
4465 args.structureInputSize = inband_inputCnt;
4466
4467 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4468 return kIOReturnIPCError;
4469 }
4470 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4471 return kIOReturnIPCError;
4472 }
4473
4474 if (ool_input) {
4475 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4476 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4477 current_task());
4478 }
4479
4480 args.structureInputDescriptor = inputMD;
4481
4482 args.scalarOutput = scalar_output;
4483 args.scalarOutputCount = *scalar_outputCnt;
4484 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4485 args.structureOutput = inband_output;
4486 args.structureOutputSize = *inband_outputCnt;
4487
4488 if (ool_output && ool_output_size) {
4489 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4490 kIODirectionIn, current_task());
4491 }
4492
4493 args.structureOutputDescriptor = outputMD;
4494 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
4495
4496 IOStatisticsClientCall();
4497 ret = client->externalMethod( selector, &args );
4498
4499 *scalar_outputCnt = args.scalarOutputCount;
4500 *inband_outputCnt = args.structureOutputSize;
4501 *ool_output_size = args.structureOutputDescriptorSize;
4502
4503 if (inputMD) {
4504 inputMD->release();
4505 }
4506 if (outputMD) {
4507 outputMD->release();
4508 }
4509
4510 return ret;
4511 }
4512
4513 /* Routine io_async_user_client_method */
4514 kern_return_t
4515 is_io_connect_async_method
4516 (
4517 io_connect_t connection,
4518 mach_port_t wake_port,
4519 io_async_ref64_t reference,
4520 mach_msg_type_number_t referenceCnt,
4521 uint32_t selector,
4522 io_scalar_inband64_t scalar_input,
4523 mach_msg_type_number_t scalar_inputCnt,
4524 io_struct_inband_t inband_input,
4525 mach_msg_type_number_t inband_inputCnt,
4526 mach_vm_address_t ool_input,
4527 mach_vm_size_t ool_input_size,
4528 io_struct_inband_t inband_output,
4529 mach_msg_type_number_t *inband_outputCnt,
4530 io_scalar_inband64_t scalar_output,
4531 mach_msg_type_number_t *scalar_outputCnt,
4532 mach_vm_address_t ool_output,
4533 mach_vm_size_t * ool_output_size
4534 )
4535 {
4536 CHECK( IOUserClient, connection, client );
4537
4538 IOExternalMethodArguments args;
4539 IOReturn ret;
4540 IOMemoryDescriptor * inputMD = NULL;
4541 IOMemoryDescriptor * outputMD = NULL;
4542
4543 bzero(&args.__reserved[0], sizeof(args.__reserved));
4544 args.__reservedA = 0;
4545 args.version = kIOExternalMethodArgumentsCurrentVersion;
4546
4547 reference[0] = (io_user_reference_t) wake_port;
4548 if (vm_map_is_64bit(get_task_map(current_task()))) {
4549 reference[0] |= kIOUCAsync64Flag;
4550 }
4551
4552 args.selector = selector;
4553
4554 args.asyncWakePort = wake_port;
4555 args.asyncReference = reference;
4556 args.asyncReferenceCount = referenceCnt;
4557
4558 args.structureVariableOutputData = NULL;
4559
4560 args.scalarInput = scalar_input;
4561 args.scalarInputCount = scalar_inputCnt;
4562 args.structureInput = inband_input;
4563 args.structureInputSize = inband_inputCnt;
4564
4565 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4566 return kIOReturnIPCError;
4567 }
4568 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4569 return kIOReturnIPCError;
4570 }
4571
4572 if (ool_input) {
4573 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4574 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4575 current_task());
4576 }
4577
4578 args.structureInputDescriptor = inputMD;
4579
4580 args.scalarOutput = scalar_output;
4581 args.scalarOutputCount = *scalar_outputCnt;
4582 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4583 args.structureOutput = inband_output;
4584 args.structureOutputSize = *inband_outputCnt;
4585
4586 if (ool_output) {
4587 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4588 kIODirectionIn, current_task());
4589 }
4590
4591 args.structureOutputDescriptor = outputMD;
4592 args.structureOutputDescriptorSize = *ool_output_size;
4593
4594 IOStatisticsClientCall();
4595 ret = client->externalMethod( selector, &args );
4596
4597 *scalar_outputCnt = args.scalarOutputCount;
4598 *inband_outputCnt = args.structureOutputSize;
4599 *ool_output_size = args.structureOutputDescriptorSize;
4600
4601 if (inputMD) {
4602 inputMD->release();
4603 }
4604 if (outputMD) {
4605 outputMD->release();
4606 }
4607
4608 return ret;
4609 }
4610
4611 /* Routine io_connect_method_scalarI_scalarO */
4612 kern_return_t
4613 is_io_connect_method_scalarI_scalarO(
4614 io_object_t connect,
4615 uint32_t index,
4616 io_scalar_inband_t input,
4617 mach_msg_type_number_t inputCount,
4618 io_scalar_inband_t output,
4619 mach_msg_type_number_t * outputCount )
4620 {
4621 IOReturn err;
4622 uint32_t i;
4623 io_scalar_inband64_t _input;
4624 io_scalar_inband64_t _output;
4625
4626 mach_msg_type_number_t struct_outputCnt = 0;
4627 mach_vm_size_t ool_output_size = 0;
4628
4629 bzero(&_output[0], sizeof(_output));
4630 for (i = 0; i < inputCount; i++) {
4631 _input[i] = SCALAR64(input[i]);
4632 }
4633
4634 err = is_io_connect_method(connect, index,
4635 _input, inputCount,
4636 NULL, 0,
4637 0, 0,
4638 NULL, &struct_outputCnt,
4639 _output, outputCount,
4640 0, &ool_output_size);
4641
4642 for (i = 0; i < *outputCount; i++) {
4643 output[i] = SCALAR32(_output[i]);
4644 }
4645
4646 return err;
4647 }
4648
4649 kern_return_t
4650 shim_io_connect_method_scalarI_scalarO(
4651 IOExternalMethod * method,
4652 IOService * object,
4653 const io_user_scalar_t * input,
4654 mach_msg_type_number_t inputCount,
4655 io_user_scalar_t * output,
4656 mach_msg_type_number_t * outputCount )
4657 {
4658 IOMethod func;
4659 io_scalar_inband_t _output;
4660 IOReturn err;
4661 err = kIOReturnBadArgument;
4662
4663 bzero(&_output[0], sizeof(_output));
4664 do {
4665 if (inputCount != method->count0) {
4666 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4667 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4668 continue;
4669 }
4670 if (*outputCount != method->count1) {
4671 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4672 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4673 continue;
4674 }
4675
4676 func = method->func;
4677
4678 switch (inputCount) {
4679 case 6:
4680 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4681 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4682 break;
4683 case 5:
4684 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4685 ARG32(input[3]), ARG32(input[4]),
4686 &_output[0] );
4687 break;
4688 case 4:
4689 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4690 ARG32(input[3]),
4691 &_output[0], &_output[1] );
4692 break;
4693 case 3:
4694 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4695 &_output[0], &_output[1], &_output[2] );
4696 break;
4697 case 2:
4698 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4699 &_output[0], &_output[1], &_output[2],
4700 &_output[3] );
4701 break;
4702 case 1:
4703 err = (object->*func)( ARG32(input[0]),
4704 &_output[0], &_output[1], &_output[2],
4705 &_output[3], &_output[4] );
4706 break;
4707 case 0:
4708 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4709 &_output[3], &_output[4], &_output[5] );
4710 break;
4711
4712 default:
4713 IOLog("%s: Bad method table\n", object->getName());
4714 }
4715 }while (false);
4716
4717 uint32_t i;
4718 for (i = 0; i < *outputCount; i++) {
4719 output[i] = SCALAR32(_output[i]);
4720 }
4721
4722 return err;
4723 }
4724
4725 /* Routine io_async_method_scalarI_scalarO */
4726 kern_return_t
4727 is_io_async_method_scalarI_scalarO(
4728 io_object_t connect,
4729 mach_port_t wake_port,
4730 io_async_ref_t reference,
4731 mach_msg_type_number_t referenceCnt,
4732 uint32_t index,
4733 io_scalar_inband_t input,
4734 mach_msg_type_number_t inputCount,
4735 io_scalar_inband_t output,
4736 mach_msg_type_number_t * outputCount )
4737 {
4738 IOReturn err;
4739 uint32_t i;
4740 io_scalar_inband64_t _input;
4741 io_scalar_inband64_t _output;
4742 io_async_ref64_t _reference;
4743
4744 if (referenceCnt > ASYNC_REF64_COUNT) {
4745 return kIOReturnBadArgument;
4746 }
4747 bzero(&_output[0], sizeof(_output));
4748 for (i = 0; i < referenceCnt; i++) {
4749 _reference[i] = REF64(reference[i]);
4750 }
4751 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4752
4753 mach_msg_type_number_t struct_outputCnt = 0;
4754 mach_vm_size_t ool_output_size = 0;
4755
4756 for (i = 0; i < inputCount; i++) {
4757 _input[i] = SCALAR64(input[i]);
4758 }
4759
4760 err = is_io_connect_async_method(connect,
4761 wake_port, _reference, referenceCnt,
4762 index,
4763 _input, inputCount,
4764 NULL, 0,
4765 0, 0,
4766 NULL, &struct_outputCnt,
4767 _output, outputCount,
4768 0, &ool_output_size);
4769
4770 for (i = 0; i < *outputCount; i++) {
4771 output[i] = SCALAR32(_output[i]);
4772 }
4773
4774 return err;
4775 }
4776 /* Routine io_async_method_scalarI_structureO */
4777 kern_return_t
4778 is_io_async_method_scalarI_structureO(
4779 io_object_t connect,
4780 mach_port_t wake_port,
4781 io_async_ref_t reference,
4782 mach_msg_type_number_t referenceCnt,
4783 uint32_t index,
4784 io_scalar_inband_t input,
4785 mach_msg_type_number_t inputCount,
4786 io_struct_inband_t output,
4787 mach_msg_type_number_t * outputCount )
4788 {
4789 uint32_t i;
4790 io_scalar_inband64_t _input;
4791 io_async_ref64_t _reference;
4792
4793 if (referenceCnt > ASYNC_REF64_COUNT) {
4794 return kIOReturnBadArgument;
4795 }
4796 for (i = 0; i < referenceCnt; i++) {
4797 _reference[i] = REF64(reference[i]);
4798 }
4799 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4800
4801 mach_msg_type_number_t scalar_outputCnt = 0;
4802 mach_vm_size_t ool_output_size = 0;
4803
4804 for (i = 0; i < inputCount; i++) {
4805 _input[i] = SCALAR64(input[i]);
4806 }
4807
4808 return is_io_connect_async_method(connect,
4809 wake_port, _reference, referenceCnt,
4810 index,
4811 _input, inputCount,
4812 NULL, 0,
4813 0, 0,
4814 output, outputCount,
4815 NULL, &scalar_outputCnt,
4816 0, &ool_output_size);
4817 }
4818
4819 /* Routine io_async_method_scalarI_structureI */
4820 kern_return_t
4821 is_io_async_method_scalarI_structureI(
4822 io_connect_t connect,
4823 mach_port_t wake_port,
4824 io_async_ref_t reference,
4825 mach_msg_type_number_t referenceCnt,
4826 uint32_t index,
4827 io_scalar_inband_t input,
4828 mach_msg_type_number_t inputCount,
4829 io_struct_inband_t inputStruct,
4830 mach_msg_type_number_t inputStructCount )
4831 {
4832 uint32_t i;
4833 io_scalar_inband64_t _input;
4834 io_async_ref64_t _reference;
4835
4836 if (referenceCnt > ASYNC_REF64_COUNT) {
4837 return kIOReturnBadArgument;
4838 }
4839 for (i = 0; i < referenceCnt; i++) {
4840 _reference[i] = REF64(reference[i]);
4841 }
4842 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4843
4844 mach_msg_type_number_t scalar_outputCnt = 0;
4845 mach_msg_type_number_t inband_outputCnt = 0;
4846 mach_vm_size_t ool_output_size = 0;
4847
4848 for (i = 0; i < inputCount; i++) {
4849 _input[i] = SCALAR64(input[i]);
4850 }
4851
4852 return is_io_connect_async_method(connect,
4853 wake_port, _reference, referenceCnt,
4854 index,
4855 _input, inputCount,
4856 inputStruct, inputStructCount,
4857 0, 0,
4858 NULL, &inband_outputCnt,
4859 NULL, &scalar_outputCnt,
4860 0, &ool_output_size);
4861 }
4862
4863 /* Routine io_async_method_structureI_structureO */
4864 kern_return_t
4865 is_io_async_method_structureI_structureO(
4866 io_object_t connect,
4867 mach_port_t wake_port,
4868 io_async_ref_t reference,
4869 mach_msg_type_number_t referenceCnt,
4870 uint32_t index,
4871 io_struct_inband_t input,
4872 mach_msg_type_number_t inputCount,
4873 io_struct_inband_t output,
4874 mach_msg_type_number_t * outputCount )
4875 {
4876 uint32_t i;
4877 mach_msg_type_number_t scalar_outputCnt = 0;
4878 mach_vm_size_t ool_output_size = 0;
4879 io_async_ref64_t _reference;
4880
4881 if (referenceCnt > ASYNC_REF64_COUNT) {
4882 return kIOReturnBadArgument;
4883 }
4884 for (i = 0; i < referenceCnt; i++) {
4885 _reference[i] = REF64(reference[i]);
4886 }
4887 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4888
4889 return is_io_connect_async_method(connect,
4890 wake_port, _reference, referenceCnt,
4891 index,
4892 NULL, 0,
4893 input, inputCount,
4894 0, 0,
4895 output, outputCount,
4896 NULL, &scalar_outputCnt,
4897 0, &ool_output_size);
4898 }
4899
4900
4901 kern_return_t
4902 shim_io_async_method_scalarI_scalarO(
4903 IOExternalAsyncMethod * method,
4904 IOService * object,
4905 mach_port_t asyncWakePort,
4906 io_user_reference_t * asyncReference,
4907 uint32_t asyncReferenceCount,
4908 const io_user_scalar_t * input,
4909 mach_msg_type_number_t inputCount,
4910 io_user_scalar_t * output,
4911 mach_msg_type_number_t * outputCount )
4912 {
4913 IOAsyncMethod func;
4914 uint32_t i;
4915 io_scalar_inband_t _output;
4916 IOReturn err;
4917 io_async_ref_t reference;
4918
4919 bzero(&_output[0], sizeof(_output));
4920 for (i = 0; i < asyncReferenceCount; i++) {
4921 reference[i] = REF32(asyncReference[i]);
4922 }
4923
4924 err = kIOReturnBadArgument;
4925
4926 do {
4927 if (inputCount != method->count0) {
4928 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4929 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4930 continue;
4931 }
4932 if (*outputCount != method->count1) {
4933 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4934 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4935 continue;
4936 }
4937
4938 func = method->func;
4939
4940 switch (inputCount) {
4941 case 6:
4942 err = (object->*func)( reference,
4943 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4944 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4945 break;
4946 case 5:
4947 err = (object->*func)( reference,
4948 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4949 ARG32(input[3]), ARG32(input[4]),
4950 &_output[0] );
4951 break;
4952 case 4:
4953 err = (object->*func)( reference,
4954 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4955 ARG32(input[3]),
4956 &_output[0], &_output[1] );
4957 break;
4958 case 3:
4959 err = (object->*func)( reference,
4960 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4961 &_output[0], &_output[1], &_output[2] );
4962 break;
4963 case 2:
4964 err = (object->*func)( reference,
4965 ARG32(input[0]), ARG32(input[1]),
4966 &_output[0], &_output[1], &_output[2],
4967 &_output[3] );
4968 break;
4969 case 1:
4970 err = (object->*func)( reference,
4971 ARG32(input[0]),
4972 &_output[0], &_output[1], &_output[2],
4973 &_output[3], &_output[4] );
4974 break;
4975 case 0:
4976 err = (object->*func)( reference,
4977 &_output[0], &_output[1], &_output[2],
4978 &_output[3], &_output[4], &_output[5] );
4979 break;
4980
4981 default:
4982 IOLog("%s: Bad method table\n", object->getName());
4983 }
4984 }while (false);
4985
4986 for (i = 0; i < *outputCount; i++) {
4987 output[i] = SCALAR32(_output[i]);
4988 }
4989
4990 return err;
4991 }
4992
4993
4994 /* Routine io_connect_method_scalarI_structureO */
4995 kern_return_t
4996 is_io_connect_method_scalarI_structureO(
4997 io_object_t connect,
4998 uint32_t index,
4999 io_scalar_inband_t input,
5000 mach_msg_type_number_t inputCount,
5001 io_struct_inband_t output,
5002 mach_msg_type_number_t * outputCount )
5003 {
5004 uint32_t i;
5005 io_scalar_inband64_t _input;
5006
5007 mach_msg_type_number_t scalar_outputCnt = 0;
5008 mach_vm_size_t ool_output_size = 0;
5009
5010 for (i = 0; i < inputCount; i++) {
5011 _input[i] = SCALAR64(input[i]);
5012 }
5013
5014 return is_io_connect_method(connect, index,
5015 _input, inputCount,
5016 NULL, 0,
5017 0, 0,
5018 output, outputCount,
5019 NULL, &scalar_outputCnt,
5020 0, &ool_output_size);
5021 }
5022
5023 kern_return_t
5024 shim_io_connect_method_scalarI_structureO(
5025
5026 IOExternalMethod * method,
5027 IOService * object,
5028 const io_user_scalar_t * input,
5029 mach_msg_type_number_t inputCount,
5030 io_struct_inband_t output,
5031 IOByteCount * outputCount )
5032 {
5033 IOMethod func;
5034 IOReturn err;
5035
5036 err = kIOReturnBadArgument;
5037
5038 do {
5039 if (inputCount != method->count0) {
5040 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5041 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5042 continue;
5043 }
5044 if ((kIOUCVariableStructureSize != method->count1)
5045 && (*outputCount != method->count1)) {
5046 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5047 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5048 continue;
5049 }
5050
5051 func = method->func;
5052
5053 switch (inputCount) {
5054 case 5:
5055 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5056 ARG32(input[3]), ARG32(input[4]),
5057 output );
5058 break;
5059 case 4:
5060 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5061 ARG32(input[3]),
5062 output, (void *)outputCount );
5063 break;
5064 case 3:
5065 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5066 output, (void *)outputCount, NULL );
5067 break;
5068 case 2:
5069 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5070 output, (void *)outputCount, NULL, NULL );
5071 break;
5072 case 1:
5073 err = (object->*func)( ARG32(input[0]),
5074 output, (void *)outputCount, NULL, NULL, NULL );
5075 break;
5076 case 0:
5077 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5078 break;
5079
5080 default:
5081 IOLog("%s: Bad method table\n", object->getName());
5082 }
5083 }while (false);
5084
5085 return err;
5086 }
5087
5088
5089 kern_return_t
5090 shim_io_async_method_scalarI_structureO(
5091 IOExternalAsyncMethod * method,
5092 IOService * object,
5093 mach_port_t asyncWakePort,
5094 io_user_reference_t * asyncReference,
5095 uint32_t asyncReferenceCount,
5096 const io_user_scalar_t * input,
5097 mach_msg_type_number_t inputCount,
5098 io_struct_inband_t output,
5099 mach_msg_type_number_t * outputCount )
5100 {
5101 IOAsyncMethod func;
5102 uint32_t i;
5103 IOReturn err;
5104 io_async_ref_t reference;
5105
5106 for (i = 0; i < asyncReferenceCount; i++) {
5107 reference[i] = REF32(asyncReference[i]);
5108 }
5109
5110 err = kIOReturnBadArgument;
5111 do {
5112 if (inputCount != method->count0) {
5113 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5114 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5115 continue;
5116 }
5117 if ((kIOUCVariableStructureSize != method->count1)
5118 && (*outputCount != method->count1)) {
5119 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5120 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5121 continue;
5122 }
5123
5124 func = method->func;
5125
5126 switch (inputCount) {
5127 case 5:
5128 err = (object->*func)( reference,
5129 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5130 ARG32(input[3]), ARG32(input[4]),
5131 output );
5132 break;
5133 case 4:
5134 err = (object->*func)( reference,
5135 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5136 ARG32(input[3]),
5137 output, (void *)outputCount );
5138 break;
5139 case 3:
5140 err = (object->*func)( reference,
5141 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5142 output, (void *)outputCount, NULL );
5143 break;
5144 case 2:
5145 err = (object->*func)( reference,
5146 ARG32(input[0]), ARG32(input[1]),
5147 output, (void *)outputCount, NULL, NULL );
5148 break;
5149 case 1:
5150 err = (object->*func)( reference,
5151 ARG32(input[0]),
5152 output, (void *)outputCount, NULL, NULL, NULL );
5153 break;
5154 case 0:
5155 err = (object->*func)( reference,
5156 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5157 break;
5158
5159 default:
5160 IOLog("%s: Bad method table\n", object->getName());
5161 }
5162 }while (false);
5163
5164 return err;
5165 }
5166
5167 /* Routine io_connect_method_scalarI_structureI */
5168 kern_return_t
5169 is_io_connect_method_scalarI_structureI(
5170 io_connect_t connect,
5171 uint32_t index,
5172 io_scalar_inband_t input,
5173 mach_msg_type_number_t inputCount,
5174 io_struct_inband_t inputStruct,
5175 mach_msg_type_number_t inputStructCount )
5176 {
5177 uint32_t i;
5178 io_scalar_inband64_t _input;
5179
5180 mach_msg_type_number_t scalar_outputCnt = 0;
5181 mach_msg_type_number_t inband_outputCnt = 0;
5182 mach_vm_size_t ool_output_size = 0;
5183
5184 for (i = 0; i < inputCount; i++) {
5185 _input[i] = SCALAR64(input[i]);
5186 }
5187
5188 return is_io_connect_method(connect, index,
5189 _input, inputCount,
5190 inputStruct, inputStructCount,
5191 0, 0,
5192 NULL, &inband_outputCnt,
5193 NULL, &scalar_outputCnt,
5194 0, &ool_output_size);
5195 }
5196
5197 kern_return_t
5198 shim_io_connect_method_scalarI_structureI(
5199 IOExternalMethod * method,
5200 IOService * object,
5201 const io_user_scalar_t * input,
5202 mach_msg_type_number_t inputCount,
5203 io_struct_inband_t inputStruct,
5204 mach_msg_type_number_t inputStructCount )
5205 {
5206 IOMethod func;
5207 IOReturn err = kIOReturnBadArgument;
5208
5209 do{
5210 if (inputCount != method->count0) {
5211 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5212 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5213 continue;
5214 }
5215 if ((kIOUCVariableStructureSize != method->count1)
5216 && (inputStructCount != method->count1)) {
5217 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5218 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5219 continue;
5220 }
5221
5222 func = method->func;
5223
5224 switch (inputCount) {
5225 case 5:
5226 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5227 ARG32(input[3]), ARG32(input[4]),
5228 inputStruct );
5229 break;
5230 case 4:
5231 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5232 ARG32(input[3]),
5233 inputStruct, (void *)(uintptr_t)inputStructCount );
5234 break;
5235 case 3:
5236 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5237 inputStruct, (void *)(uintptr_t)inputStructCount,
5238 NULL );
5239 break;
5240 case 2:
5241 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5242 inputStruct, (void *)(uintptr_t)inputStructCount,
5243 NULL, NULL );
5244 break;
5245 case 1:
5246 err = (object->*func)( ARG32(input[0]),
5247 inputStruct, (void *)(uintptr_t)inputStructCount,
5248 NULL, NULL, NULL );
5249 break;
5250 case 0:
5251 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5252 NULL, NULL, NULL, NULL );
5253 break;
5254
5255 default:
5256 IOLog("%s: Bad method table\n", object->getName());
5257 }
5258 }while (false);
5259
5260 return err;
5261 }
5262
5263 kern_return_t
5264 shim_io_async_method_scalarI_structureI(
5265 IOExternalAsyncMethod * method,
5266 IOService * object,
5267 mach_port_t asyncWakePort,
5268 io_user_reference_t * asyncReference,
5269 uint32_t asyncReferenceCount,
5270 const io_user_scalar_t * input,
5271 mach_msg_type_number_t inputCount,
5272 io_struct_inband_t inputStruct,
5273 mach_msg_type_number_t inputStructCount )
5274 {
5275 IOAsyncMethod func;
5276 uint32_t i;
5277 IOReturn err = kIOReturnBadArgument;
5278 io_async_ref_t reference;
5279
5280 for (i = 0; i < asyncReferenceCount; i++) {
5281 reference[i] = REF32(asyncReference[i]);
5282 }
5283
5284 do{
5285 if (inputCount != method->count0) {
5286 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5287 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5288 continue;
5289 }
5290 if ((kIOUCVariableStructureSize != method->count1)
5291 && (inputStructCount != method->count1)) {
5292 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5293 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5294 continue;
5295 }
5296
5297 func = method->func;
5298
5299 switch (inputCount) {
5300 case 5:
5301 err = (object->*func)( reference,
5302 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5303 ARG32(input[3]), ARG32(input[4]),
5304 inputStruct );
5305 break;
5306 case 4:
5307 err = (object->*func)( reference,
5308 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5309 ARG32(input[3]),
5310 inputStruct, (void *)(uintptr_t)inputStructCount );
5311 break;
5312 case 3:
5313 err = (object->*func)( reference,
5314 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5315 inputStruct, (void *)(uintptr_t)inputStructCount,
5316 NULL );
5317 break;
5318 case 2:
5319 err = (object->*func)( reference,
5320 ARG32(input[0]), ARG32(input[1]),
5321 inputStruct, (void *)(uintptr_t)inputStructCount,
5322 NULL, NULL );
5323 break;
5324 case 1:
5325 err = (object->*func)( reference,
5326 ARG32(input[0]),
5327 inputStruct, (void *)(uintptr_t)inputStructCount,
5328 NULL, NULL, NULL );
5329 break;
5330 case 0:
5331 err = (object->*func)( reference,
5332 inputStruct, (void *)(uintptr_t)inputStructCount,
5333 NULL, NULL, NULL, NULL );
5334 break;
5335
5336 default:
5337 IOLog("%s: Bad method table\n", object->getName());
5338 }
5339 }while (false);
5340
5341 return err;
5342 }
5343
5344 /* Routine io_connect_method_structureI_structureO */
5345 kern_return_t
5346 is_io_connect_method_structureI_structureO(
5347 io_object_t connect,
5348 uint32_t index,
5349 io_struct_inband_t input,
5350 mach_msg_type_number_t inputCount,
5351 io_struct_inband_t output,
5352 mach_msg_type_number_t * outputCount )
5353 {
5354 mach_msg_type_number_t scalar_outputCnt = 0;
5355 mach_vm_size_t ool_output_size = 0;
5356
5357 return is_io_connect_method(connect, index,
5358 NULL, 0,
5359 input, inputCount,
5360 0, 0,
5361 output, outputCount,
5362 NULL, &scalar_outputCnt,
5363 0, &ool_output_size);
5364 }
5365
5366 kern_return_t
5367 shim_io_connect_method_structureI_structureO(
5368 IOExternalMethod * method,
5369 IOService * object,
5370 io_struct_inband_t input,
5371 mach_msg_type_number_t inputCount,
5372 io_struct_inband_t output,
5373 IOByteCount * outputCount )
5374 {
5375 IOMethod func;
5376 IOReturn err = kIOReturnBadArgument;
5377
5378 do{
5379 if ((kIOUCVariableStructureSize != method->count0)
5380 && (inputCount != method->count0)) {
5381 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5382 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5383 continue;
5384 }
5385 if ((kIOUCVariableStructureSize != method->count1)
5386 && (*outputCount != method->count1)) {
5387 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5388 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5389 continue;
5390 }
5391
5392 func = method->func;
5393
5394 if (method->count1) {
5395 if (method->count0) {
5396 err = (object->*func)( input, output,
5397 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5398 } else {
5399 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5400 }
5401 } else {
5402 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5403 }
5404 }while (false);
5405
5406
5407 return err;
5408 }
5409
5410 kern_return_t
5411 shim_io_async_method_structureI_structureO(
5412 IOExternalAsyncMethod * method,
5413 IOService * object,
5414 mach_port_t asyncWakePort,
5415 io_user_reference_t * asyncReference,
5416 uint32_t asyncReferenceCount,
5417 io_struct_inband_t input,
5418 mach_msg_type_number_t inputCount,
5419 io_struct_inband_t output,
5420 mach_msg_type_number_t * outputCount )
5421 {
5422 IOAsyncMethod func;
5423 uint32_t i;
5424 IOReturn err;
5425 io_async_ref_t reference;
5426
5427 for (i = 0; i < asyncReferenceCount; i++) {
5428 reference[i] = REF32(asyncReference[i]);
5429 }
5430
5431 err = kIOReturnBadArgument;
5432 do{
5433 if ((kIOUCVariableStructureSize != method->count0)
5434 && (inputCount != method->count0)) {
5435 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5436 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5437 continue;
5438 }
5439 if ((kIOUCVariableStructureSize != method->count1)
5440 && (*outputCount != method->count1)) {
5441 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5442 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5443 continue;
5444 }
5445
5446 func = method->func;
5447
5448 if (method->count1) {
5449 if (method->count0) {
5450 err = (object->*func)( reference,
5451 input, output,
5452 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5453 } else {
5454 err = (object->*func)( reference,
5455 output, outputCount, NULL, NULL, NULL, NULL );
5456 }
5457 } else {
5458 err = (object->*func)( reference,
5459 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5460 }
5461 }while (false);
5462
5463 return err;
5464 }
5465
5466 #if !NO_KEXTD
5467 bool gIOKextdClearedBusy = false;
5468 #endif
5469
5470 /* Routine io_catalog_send_data */
5471 kern_return_t
5472 is_io_catalog_send_data(
5473 mach_port_t master_port,
5474 uint32_t flag,
5475 io_buf_ptr_t inData,
5476 mach_msg_type_number_t inDataCount,
5477 kern_return_t * result)
5478 {
5479 #if NO_KEXTD
5480 return kIOReturnNotPrivileged;
5481 #else /* NO_KEXTD */
5482 OSObject * obj = NULL;
5483 vm_offset_t data;
5484 kern_return_t kr = kIOReturnError;
5485
5486 //printf("io_catalog_send_data called. flag: %d\n", flag);
5487
5488 if (master_port != master_device_port) {
5489 return kIOReturnNotPrivileged;
5490 }
5491
5492 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5493 flag != kIOCatalogKextdActive &&
5494 flag != kIOCatalogKextdFinishedLaunching) &&
5495 (!inData || !inDataCount)) {
5496 return kIOReturnBadArgument;
5497 }
5498
5499 if (!IOTaskHasEntitlement(current_task(), kOSKextManagementEntitlement)) {
5500 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5501 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5502 OSSafeReleaseNULL(taskName);
5503 // For now, fake success to not break applications relying on this function succeeding.
5504 // See <rdar://problem/32554970> for more details.
5505 return kIOReturnSuccess;
5506 }
5507
5508 if (inData) {
5509 vm_map_offset_t map_data;
5510
5511 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5512 return kIOReturnMessageTooLarge;
5513 }
5514
5515 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5516 data = CAST_DOWN(vm_offset_t, map_data);
5517
5518 if (kr != KERN_SUCCESS) {
5519 return kr;
5520 }
5521
5522 // must return success after vm_map_copyout() succeeds
5523
5524 if (inDataCount) {
5525 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5526 vm_deallocate( kernel_map, data, inDataCount );
5527 if (!obj) {
5528 *result = kIOReturnNoMemory;
5529 return KERN_SUCCESS;
5530 }
5531 }
5532 }
5533
5534 switch (flag) {
5535 case kIOCatalogResetDrivers:
5536 case kIOCatalogResetDriversNoMatch: {
5537 OSArray * array;
5538
5539 array = OSDynamicCast(OSArray, obj);
5540 if (array) {
5541 if (!gIOCatalogue->resetAndAddDrivers(array,
5542 flag == kIOCatalogResetDrivers)) {
5543 kr = kIOReturnError;
5544 }
5545 } else {
5546 kr = kIOReturnBadArgument;
5547 }
5548 }
5549 break;
5550
5551 case kIOCatalogAddDrivers:
5552 case kIOCatalogAddDriversNoMatch: {
5553 OSArray * array;
5554
5555 array = OSDynamicCast(OSArray, obj);
5556 if (array) {
5557 if (!gIOCatalogue->addDrivers( array,
5558 flag == kIOCatalogAddDrivers)) {
5559 kr = kIOReturnError;
5560 }
5561 } else {
5562 kr = kIOReturnBadArgument;
5563 }
5564 }
5565 break;
5566
5567 case kIOCatalogRemoveDrivers:
5568 case kIOCatalogRemoveDriversNoMatch: {
5569 OSDictionary * dict;
5570
5571 dict = OSDynamicCast(OSDictionary, obj);
5572 if (dict) {
5573 if (!gIOCatalogue->removeDrivers( dict,
5574 flag == kIOCatalogRemoveDrivers )) {
5575 kr = kIOReturnError;
5576 }
5577 } else {
5578 kr = kIOReturnBadArgument;
5579 }
5580 }
5581 break;
5582
5583 case kIOCatalogStartMatching__Removed:
5584 case kIOCatalogRemoveKernelLinker__Removed:
5585 kr = KERN_NOT_SUPPORTED;
5586 break;
5587
5588 case kIOCatalogKextdActive:
5589 #if !NO_KEXTD
5590 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5591 OSKext::setKextdActive();
5592
5593 /* Dump all nonloaded startup extensions; kextd will now send them
5594 * down on request.
5595 */
5596 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5597 #endif
5598 kr = kIOReturnSuccess;
5599 break;
5600
5601 case kIOCatalogKextdFinishedLaunching: {
5602 #if !NO_KEXTD
5603 if (!gIOKextdClearedBusy) {
5604 IOService::kextdLaunched();
5605 gIOKextdClearedBusy = true;
5606 }
5607 #endif
5608 kr = kIOReturnSuccess;
5609 }
5610 break;
5611
5612 default:
5613 kr = kIOReturnBadArgument;
5614 break;
5615 }
5616
5617 if (obj) {
5618 obj->release();
5619 }
5620
5621 *result = kr;
5622 return KERN_SUCCESS;
5623 #endif /* NO_KEXTD */
5624 }
5625
5626 /* Routine io_catalog_terminate */
5627 kern_return_t
5628 is_io_catalog_terminate(
5629 mach_port_t master_port,
5630 uint32_t flag,
5631 io_name_t name )
5632 {
5633 kern_return_t kr;
5634
5635 if (master_port != master_device_port) {
5636 return kIOReturnNotPrivileged;
5637 }
5638
5639 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
5640 kIOClientPrivilegeAdministrator );
5641 if (kIOReturnSuccess != kr) {
5642 return kr;
5643 }
5644
5645 switch (flag) {
5646 #if !defined(SECURE_KERNEL)
5647 case kIOCatalogServiceTerminate:
5648 OSIterator * iter;
5649 IOService * service;
5650
5651 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5652 kIORegistryIterateRecursively);
5653 if (!iter) {
5654 return kIOReturnNoMemory;
5655 }
5656
5657 do {
5658 iter->reset();
5659 while ((service = (IOService *)iter->getNextObject())) {
5660 if (service->metaCast(name)) {
5661 if (!service->terminate( kIOServiceRequired
5662 | kIOServiceSynchronous)) {
5663 kr = kIOReturnUnsupported;
5664 break;
5665 }
5666 }
5667 }
5668 } while (!service && !iter->isValid());
5669 iter->release();
5670 break;
5671
5672 case kIOCatalogModuleUnload:
5673 case kIOCatalogModuleTerminate:
5674 kr = gIOCatalogue->terminateDriversForModule(name,
5675 flag == kIOCatalogModuleUnload);
5676 break;
5677 #endif
5678
5679 default:
5680 kr = kIOReturnBadArgument;
5681 break;
5682 }
5683
5684 return kr;
5685 }
5686
5687 /* Routine io_catalog_get_data */
5688 kern_return_t
5689 is_io_catalog_get_data(
5690 mach_port_t master_port,
5691 uint32_t flag,
5692 io_buf_ptr_t *outData,
5693 mach_msg_type_number_t *outDataCount)
5694 {
5695 kern_return_t kr = kIOReturnSuccess;
5696 OSSerialize * s;
5697
5698 if (master_port != master_device_port) {
5699 return kIOReturnNotPrivileged;
5700 }
5701
5702 //printf("io_catalog_get_data called. flag: %d\n", flag);
5703
5704 s = OSSerialize::withCapacity(4096);
5705 if (!s) {
5706 return kIOReturnNoMemory;
5707 }
5708
5709 kr = gIOCatalogue->serializeData(flag, s);
5710
5711 if (kr == kIOReturnSuccess) {
5712 vm_offset_t data;
5713 vm_map_copy_t copy;
5714 vm_size_t size;
5715
5716 size = s->getLength();
5717 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5718 if (kr == kIOReturnSuccess) {
5719 bcopy(s->text(), (void *)data, size);
5720 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5721 (vm_map_size_t)size, true, &copy);
5722 *outData = (char *)copy;
5723 *outDataCount = size;
5724 }
5725 }
5726
5727 s->release();
5728
5729 return kr;
5730 }
5731
5732 /* Routine io_catalog_get_gen_count */
5733 kern_return_t
5734 is_io_catalog_get_gen_count(
5735 mach_port_t master_port,
5736 uint32_t *genCount)
5737 {
5738 if (master_port != master_device_port) {
5739 return kIOReturnNotPrivileged;
5740 }
5741
5742 //printf("io_catalog_get_gen_count called.\n");
5743
5744 if (!genCount) {
5745 return kIOReturnBadArgument;
5746 }
5747
5748 *genCount = gIOCatalogue->getGenerationCount();
5749
5750 return kIOReturnSuccess;
5751 }
5752
5753 /* Routine io_catalog_module_loaded.
5754 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5755 */
5756 kern_return_t
5757 is_io_catalog_module_loaded(
5758 mach_port_t master_port,
5759 io_name_t name)
5760 {
5761 if (master_port != master_device_port) {
5762 return kIOReturnNotPrivileged;
5763 }
5764
5765 //printf("io_catalog_module_loaded called. name %s\n", name);
5766
5767 if (!name) {
5768 return kIOReturnBadArgument;
5769 }
5770
5771 gIOCatalogue->moduleHasLoaded(name);
5772
5773 return kIOReturnSuccess;
5774 }
5775
5776 kern_return_t
5777 is_io_catalog_reset(
5778 mach_port_t master_port,
5779 uint32_t flag)
5780 {
5781 if (master_port != master_device_port) {
5782 return kIOReturnNotPrivileged;
5783 }
5784
5785 switch (flag) {
5786 case kIOCatalogResetDefault:
5787 gIOCatalogue->reset();
5788 break;
5789
5790 default:
5791 return kIOReturnBadArgument;
5792 }
5793
5794 return kIOReturnSuccess;
5795 }
5796
5797 kern_return_t
5798 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5799 {
5800 kern_return_t result = kIOReturnBadArgument;
5801 IOUserClient * userClient;
5802 OSObject * object;
5803 uintptr_t ref;
5804
5805 ref = (uintptr_t) args->userClientRef;
5806 if ((1ULL << 32) & ref) {
5807 object = iokit_lookup_uext_ref_current_task((mach_port_name_t) ref);
5808 if (object) {
5809 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5810 }
5811 OSSafeReleaseNULL(object);
5812 } else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) {
5813 IOExternalTrap *trap;
5814 IOService *target = NULL;
5815
5816 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5817
5818 if (trap && target) {
5819 IOTrap func;
5820
5821 func = trap->func;
5822
5823 if (func) {
5824 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5825 }
5826 }
5827
5828 iokit_remove_connect_reference(userClient);
5829 }
5830
5831 return result;
5832 }
5833
5834 /* Routine io_device_tree_entry_exists_with_name */
5835 kern_return_t
5836 is_io_device_tree_entry_exists_with_name(
5837 mach_port_t master_port,
5838 io_name_t name,
5839 boolean_t *exists )
5840 {
5841 OSCollectionIterator *iter;
5842
5843 if (master_port != master_device_port) {
5844 return kIOReturnNotPrivileged;
5845 }
5846
5847 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
5848 *exists = iter && iter->getNextObject();
5849 OSSafeReleaseNULL(iter);
5850
5851 return kIOReturnSuccess;
5852 }
5853 } /* extern "C" */
5854
5855 IOReturn
5856 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5857 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5858 {
5859 IOReturn err;
5860 IOService * object;
5861 IOByteCount structureOutputSize;
5862
5863 if (dispatch) {
5864 uint32_t count;
5865 count = dispatch->checkScalarInputCount;
5866 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
5867 return kIOReturnBadArgument;
5868 }
5869
5870 count = dispatch->checkStructureInputSize;
5871 if ((kIOUCVariableStructureSize != count)
5872 && (count != ((args->structureInputDescriptor)
5873 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
5874 return kIOReturnBadArgument;
5875 }
5876
5877 count = dispatch->checkScalarOutputCount;
5878 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
5879 return kIOReturnBadArgument;
5880 }
5881
5882 count = dispatch->checkStructureOutputSize;
5883 if ((kIOUCVariableStructureSize != count)
5884 && (count != ((args->structureOutputDescriptor)
5885 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
5886 return kIOReturnBadArgument;
5887 }
5888
5889 if (dispatch->function) {
5890 err = (*dispatch->function)(target, reference, args);
5891 } else {
5892 err = kIOReturnNoCompletion; /* implementator can dispatch */
5893 }
5894 return err;
5895 }
5896
5897
5898 // pre-Leopard API's don't do ool structs
5899 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
5900 err = kIOReturnIPCError;
5901 return err;
5902 }
5903
5904 structureOutputSize = args->structureOutputSize;
5905
5906 if (args->asyncWakePort) {
5907 IOExternalAsyncMethod * method;
5908 object = NULL;
5909 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
5910 return kIOReturnUnsupported;
5911 }
5912
5913 if (kIOUCForegroundOnly & method->flags) {
5914 if (task_is_gpu_denied(current_task())) {
5915 return kIOReturnNotPermitted;
5916 }
5917 }
5918
5919 switch (method->flags & kIOUCTypeMask) {
5920 case kIOUCScalarIStructI:
5921 err = shim_io_async_method_scalarI_structureI( method, object,
5922 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5923 args->scalarInput, args->scalarInputCount,
5924 (char *)args->structureInput, args->structureInputSize );
5925 break;
5926
5927 case kIOUCScalarIScalarO:
5928 err = shim_io_async_method_scalarI_scalarO( method, object,
5929 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5930 args->scalarInput, args->scalarInputCount,
5931 args->scalarOutput, &args->scalarOutputCount );
5932 break;
5933
5934 case kIOUCScalarIStructO:
5935 err = shim_io_async_method_scalarI_structureO( method, object,
5936 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5937 args->scalarInput, args->scalarInputCount,
5938 (char *) args->structureOutput, &args->structureOutputSize );
5939 break;
5940
5941
5942 case kIOUCStructIStructO:
5943 err = shim_io_async_method_structureI_structureO( method, object,
5944 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5945 (char *)args->structureInput, args->structureInputSize,
5946 (char *) args->structureOutput, &args->structureOutputSize );
5947 break;
5948
5949 default:
5950 err = kIOReturnBadArgument;
5951 break;
5952 }
5953 } else {
5954 IOExternalMethod * method;
5955 object = NULL;
5956 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
5957 return kIOReturnUnsupported;
5958 }
5959
5960 if (kIOUCForegroundOnly & method->flags) {
5961 if (task_is_gpu_denied(current_task())) {
5962 return kIOReturnNotPermitted;
5963 }
5964 }
5965
5966 switch (method->flags & kIOUCTypeMask) {
5967 case kIOUCScalarIStructI:
5968 err = shim_io_connect_method_scalarI_structureI( method, object,
5969 args->scalarInput, args->scalarInputCount,
5970 (char *) args->structureInput, args->structureInputSize );
5971 break;
5972
5973 case kIOUCScalarIScalarO:
5974 err = shim_io_connect_method_scalarI_scalarO( method, object,
5975 args->scalarInput, args->scalarInputCount,
5976 args->scalarOutput, &args->scalarOutputCount );
5977 break;
5978
5979 case kIOUCScalarIStructO:
5980 err = shim_io_connect_method_scalarI_structureO( method, object,
5981 args->scalarInput, args->scalarInputCount,
5982 (char *) args->structureOutput, &structureOutputSize );
5983 break;
5984
5985
5986 case kIOUCStructIStructO:
5987 err = shim_io_connect_method_structureI_structureO( method, object,
5988 (char *) args->structureInput, args->structureInputSize,
5989 (char *) args->structureOutput, &structureOutputSize );
5990 break;
5991
5992 default:
5993 err = kIOReturnBadArgument;
5994 break;
5995 }
5996 }
5997
5998 args->structureOutputSize = structureOutputSize;
5999
6000 return err;
6001 }
6002
6003 #if __LP64__
6004 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6005 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6006 #else
6007 OSMetaClassDefineReservedUsed(IOUserClient, 0);
6008 OSMetaClassDefineReservedUsed(IOUserClient, 1);
6009 #endif
6010 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6011 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6012 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6013 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6014 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6015 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6016 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6017 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6018 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6019 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6020 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6021 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6022 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6023 OSMetaClassDefineReservedUnused(IOUserClient, 15);