]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-6153.121.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/IOUserServer.h>
45 #include <IOKit/system.h>
46 #include <libkern/OSDebug.h>
47 #include <DriverKit/OSAction.h>
48 #include <sys/proc.h>
49 #include <sys/kauth.h>
50 #include <sys/codesign.h>
51
52 #include <mach/sdt.h>
53 #include <os/hash.h>
54
55 #if CONFIG_MACF
56
57 extern "C" {
58 #include <security/mac_framework.h>
59 };
60 #include <sys/kauth.h>
61
62 #define IOMACF_LOG 0
63
64 #endif /* CONFIG_MACF */
65
66 #include <IOKit/assert.h>
67
68 #include "IOServicePrivate.h"
69 #include "IOKitKernelInternal.h"
70
71 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
72 #define SCALAR32(x) ((uint32_t )x)
73 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
74 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
75 #define REF32(x) ((int)(x))
76
77 enum{
78 kIOUCAsync0Flags = 3ULL,
79 kIOUCAsync64Flag = 1ULL,
80 kIOUCAsyncErrorLoggedFlag = 2ULL
81 };
82
83 #if IOKITSTATS
84
85 #define IOStatisticsRegisterCounter() \
86 do { \
87 reserved->counter = IOStatistics::registerUserClient(this); \
88 } while (0)
89
90 #define IOStatisticsUnregisterCounter() \
91 do { \
92 if (reserved) \
93 IOStatistics::unregisterUserClient(reserved->counter); \
94 } while (0)
95
96 #define IOStatisticsClientCall() \
97 do { \
98 IOStatistics::countUserClientCall(client); \
99 } while (0)
100
101 #else
102
103 #define IOStatisticsRegisterCounter()
104 #define IOStatisticsUnregisterCounter()
105 #define IOStatisticsClientCall()
106
107 #endif /* IOKITSTATS */
108
109 #if DEVELOPMENT || DEBUG
110
111 #define FAKE_STACK_FRAME(a) \
112 const void ** __frameptr; \
113 const void * __retaddr; \
114 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
115 __retaddr = __frameptr[1]; \
116 __frameptr[1] = (a);
117
118 #define FAKE_STACK_FRAME_END() \
119 __frameptr[1] = __retaddr;
120
121 #else /* DEVELOPMENT || DEBUG */
122
123 #define FAKE_STACK_FRAME(a)
124 #define FAKE_STACK_FRAME_END()
125
126 #endif /* DEVELOPMENT || DEBUG */
127
128 #define ASYNC_REF_COUNT (sizeof(io_async_ref_t) / sizeof(natural_t))
129 #define ASYNC_REF64_COUNT (sizeof(io_async_ref64_t) / sizeof(io_user_reference_t))
130
131 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
132
133 extern "C" {
134 #include <mach/mach_traps.h>
135 #include <vm/vm_map.h>
136 } /* extern "C" */
137
138 struct IOMachPortHashList;
139
140 static_assert(IKOT_MAX_TYPE <= 255);
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
145 class IOMachPort : public OSObject
146 {
147 OSDeclareDefaultStructors(IOMachPort);
148 public:
149 SLIST_ENTRY(IOMachPort) link;
150 ipc_port_t port;
151 OSObject* object;
152 UInt32 mscount;
153 UInt8 holdDestroy;
154 UInt8 type;
155
156 static IOMachPort* withObjectAndType(OSObject *obj, ipc_kobject_type_t type);
157
158 static IOMachPortHashList* bucketForObject(OSObject *obj,
159 ipc_kobject_type_t type);
160
161 static IOMachPort* portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type);
162
163 static bool noMoreSendersForObject( OSObject * obj,
164 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
165 static void releasePortForObject( OSObject * obj,
166 ipc_kobject_type_t type );
167 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
168
169 static mach_port_name_t makeSendRightForTask( task_t task,
170 io_object_t obj, ipc_kobject_type_t type );
171
172 virtual void free() APPLE_KEXT_OVERRIDE;
173 };
174
175 #define super OSObject
176 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
177
178 static IOLock * gIOObjectPortLock;
179 IOLock * gIOUserServerLock;
180
181 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
182
183 SLIST_HEAD(IOMachPortHashList, IOMachPort);
184
185 #if CONFIG_EMBEDDED
186 #define PORT_HASH_SIZE 256
187 #else
188 #define PORT_HASH_SIZE 4096
189 #endif /* CONFIG_EMBEDDED */
190
191 IOMachPortHashList ports[PORT_HASH_SIZE];
192
193 void
194 IOMachPortInitialize(void)
195 {
196 for (size_t i = 0; i < PORT_HASH_SIZE; i++) {
197 SLIST_INIT(&ports[i]);
198 }
199 }
200
201 IOMachPortHashList*
202 IOMachPort::bucketForObject(OSObject *obj, ipc_kobject_type_t type )
203 {
204 return &ports[os_hash_kernel_pointer(obj) % PORT_HASH_SIZE];
205 }
206
207 IOMachPort*
208 IOMachPort::portForObjectInBucket(IOMachPortHashList *bucket, OSObject *obj, ipc_kobject_type_t type)
209 {
210 IOMachPort *machPort;
211
212 SLIST_FOREACH(machPort, bucket, link) {
213 if (machPort->object == obj && machPort->type == type) {
214 return machPort;
215 }
216 }
217 return NULL;
218 }
219
220 IOMachPort*
221 IOMachPort::withObjectAndType(OSObject *obj, ipc_kobject_type_t type)
222 {
223 IOMachPort *machPort = NULL;
224
225 machPort = new IOMachPort;
226 if (__improbable(machPort && !machPort->init())) {
227 return NULL;
228 }
229
230 machPort->object = obj;
231 machPort->type = (typeof(machPort->type))type;
232 machPort->port = iokit_alloc_object_port(obj, type);
233
234 obj->taggedRetain(OSTypeID(OSCollection));
235 machPort->mscount++;
236
237 return machPort;
238 }
239
240 bool
241 IOMachPort::noMoreSendersForObject( OSObject * obj,
242 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
243 {
244 IOMachPort *machPort = NULL;
245 IOUserClient *uc;
246 OSAction *action;
247 bool destroyed = true;
248
249 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
250
251 obj->retain();
252
253 lck_mtx_lock(gIOObjectPortLock);
254
255 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
256
257 if (machPort) {
258 destroyed = (machPort->mscount <= *mscount);
259 if (!destroyed) {
260 *mscount = machPort->mscount;
261 lck_mtx_unlock(gIOObjectPortLock);
262 } else {
263 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj))) {
264 uc->noMoreSenders();
265 }
266 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
267
268 lck_mtx_unlock(gIOObjectPortLock);
269
270 machPort->release();
271 obj->taggedRelease(OSTypeID(OSCollection));
272 }
273 } else {
274 lck_mtx_unlock(gIOObjectPortLock);
275 }
276
277 if ((IKOT_UEXT_OBJECT == type) && (action = OSDynamicCast(OSAction, obj))) {
278 action->Aborted();
279 }
280
281 obj->release();
282
283 return destroyed;
284 }
285
286 void
287 IOMachPort::releasePortForObject( OSObject * obj,
288 ipc_kobject_type_t type )
289 {
290 IOMachPort *machPort;
291 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
292
293 assert(IKOT_IOKIT_CONNECT != type);
294
295 lck_mtx_lock(gIOObjectPortLock);
296
297 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
298
299 if (machPort && !machPort->holdDestroy) {
300 obj->retain();
301 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
302
303 lck_mtx_unlock(gIOObjectPortLock);
304
305 machPort->release();
306 obj->taggedRelease(OSTypeID(OSCollection));
307 obj->release();
308 } else {
309 lck_mtx_unlock(gIOObjectPortLock);
310 }
311 }
312
313 void
314 IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
315 {
316 IOMachPort * machPort;
317
318 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
319 lck_mtx_lock(gIOObjectPortLock);
320
321 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
322
323 if (machPort) {
324 machPort->holdDestroy = true;
325 }
326
327 lck_mtx_unlock(gIOObjectPortLock);
328 }
329
330 void
331 IOMachPortDestroyUserReferences(OSObject * obj, natural_t type)
332 {
333 IOMachPort::releasePortForObject(obj, type);
334 }
335
336 void
337 IOUserClient::destroyUserReferences( OSObject * obj )
338 {
339 IOMachPort *machPort;
340
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 obj->retain();
347 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, IKOT_IOKIT_CONNECT);
348 IOMachPortHashList *mappingBucket = NULL;
349
350 lck_mtx_lock(gIOObjectPortLock);
351
352 IOUserClient * uc = OSDynamicCast(IOUserClient, obj);
353 if (uc && uc->mappings) {
354 mappingBucket = IOMachPort::bucketForObject(uc->mappings, IKOT_IOKIT_CONNECT);
355 }
356
357 machPort = IOMachPort::portForObjectInBucket(bucket, obj, IKOT_IOKIT_CONNECT);
358
359 if (machPort == NULL) {
360 lck_mtx_unlock(gIOObjectPortLock);
361 goto end;
362 }
363
364 SLIST_REMOVE(bucket, machPort, IOMachPort, link);
365 obj->taggedRelease(OSTypeID(OSCollection));
366
367 if (uc) {
368 uc->noMoreSenders();
369 if (uc->mappings) {
370 uc->mappings->taggedRetain(OSTypeID(OSCollection));
371 machPort->object = uc->mappings;
372 SLIST_INSERT_HEAD(mappingBucket, machPort, link);
373 iokit_switch_object_port(machPort->port, uc->mappings, IKOT_IOKIT_CONNECT);
374
375 lck_mtx_unlock(gIOObjectPortLock);
376
377 uc->mappings->release();
378 uc->mappings = NULL;
379 } else {
380 lck_mtx_unlock(gIOObjectPortLock);
381 machPort->release();
382 }
383 } else {
384 lck_mtx_unlock(gIOObjectPortLock);
385 machPort->release();
386 }
387
388
389 end:
390
391 obj->release();
392 }
393
394 mach_port_name_t
395 IOMachPort::makeSendRightForTask( task_t task,
396 io_object_t obj, ipc_kobject_type_t type )
397 {
398 return iokit_make_send_right( task, obj, type );
399 }
400
401 void
402 IOMachPort::free( void )
403 {
404 if (port) {
405 iokit_destroy_object_port( port );
406 }
407 super::free();
408 }
409
410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
411
412 class IOUserIterator : public OSIterator
413 {
414 OSDeclareDefaultStructors(IOUserIterator);
415 public:
416 OSObject * userIteratorObject;
417 IOLock * lock;
418
419 static IOUserIterator * withIterator(LIBKERN_CONSUMED OSIterator * iter);
420 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
421 virtual void free() APPLE_KEXT_OVERRIDE;
422
423 virtual void reset() APPLE_KEXT_OVERRIDE;
424 virtual bool isValid() APPLE_KEXT_OVERRIDE;
425 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
426 virtual OSObject * copyNextObject();
427 };
428
429 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
430
431 class IOUserNotification : public IOUserIterator
432 {
433 OSDeclareDefaultStructors(IOUserNotification);
434
435 #define holdNotify userIteratorObject
436
437 public:
438
439 virtual void free() APPLE_KEXT_OVERRIDE;
440
441 virtual void setNotification( IONotifier * obj );
442
443 virtual void reset() APPLE_KEXT_OVERRIDE;
444 virtual bool isValid() APPLE_KEXT_OVERRIDE;
445 };
446
447 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
448
449 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
450
451 IOUserIterator *
452 IOUserIterator::withIterator(OSIterator * iter)
453 {
454 IOUserIterator * me;
455
456 if (!iter) {
457 return NULL;
458 }
459
460 me = new IOUserIterator;
461 if (me && !me->init()) {
462 me->release();
463 me = NULL;
464 }
465 if (!me) {
466 return me;
467 }
468 me->userIteratorObject = iter;
469
470 return me;
471 }
472
473 bool
474 IOUserIterator::init( void )
475 {
476 if (!OSObject::init()) {
477 return false;
478 }
479
480 lock = IOLockAlloc();
481 if (!lock) {
482 return false;
483 }
484
485 return true;
486 }
487
488 void
489 IOUserIterator::free()
490 {
491 if (userIteratorObject) {
492 userIteratorObject->release();
493 }
494 if (lock) {
495 IOLockFree(lock);
496 }
497 OSObject::free();
498 }
499
500 void
501 IOUserIterator::reset()
502 {
503 IOLockLock(lock);
504 assert(OSDynamicCast(OSIterator, userIteratorObject));
505 ((OSIterator *)userIteratorObject)->reset();
506 IOLockUnlock(lock);
507 }
508
509 bool
510 IOUserIterator::isValid()
511 {
512 bool ret;
513
514 IOLockLock(lock);
515 assert(OSDynamicCast(OSIterator, userIteratorObject));
516 ret = ((OSIterator *)userIteratorObject)->isValid();
517 IOLockUnlock(lock);
518
519 return ret;
520 }
521
522 OSObject *
523 IOUserIterator::getNextObject()
524 {
525 assert(false);
526 return NULL;
527 }
528
529 OSObject *
530 IOUserIterator::copyNextObject()
531 {
532 OSObject * ret = NULL;
533
534 IOLockLock(lock);
535 if (userIteratorObject) {
536 ret = ((OSIterator *)userIteratorObject)->getNextObject();
537 if (ret) {
538 ret->retain();
539 }
540 }
541 IOLockUnlock(lock);
542
543 return ret;
544 }
545
546 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
547 extern "C" {
548 // functions called from osfmk/device/iokit_rpc.c
549
550 void
551 iokit_port_object_description(io_object_t obj, kobject_description_t desc)
552 {
553 IORegistryEntry * regEntry;
554 IOUserNotification * __unused noti;
555 _IOServiceNotifier * __unused serviceNoti;
556 OSSerialize * __unused s;
557
558 if ((regEntry = OSDynamicCast(IORegistryEntry, obj))) {
559 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(0x%qx)", obj->getMetaClass()->getClassName(), regEntry->getRegistryEntryID());
560 #if DEVELOPMENT || DEBUG
561 } else if ((noti = OSDynamicCast(IOUserNotification, obj))
562 && ((serviceNoti = OSDynamicCast(_IOServiceNotifier, noti->holdNotify)))) {
563 s = OSSerialize::withCapacity(page_size);
564 if (s && serviceNoti->matching->serialize(s)) {
565 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s(%s)", obj->getMetaClass()->getClassName(), s->text());
566 }
567 OSSafeReleaseNULL(s);
568 #endif /* DEVELOPMENT || DEBUG */
569 } else {
570 snprintf(desc, KOBJECT_DESCRIPTION_LENGTH, "%s", obj->getMetaClass()->getClassName());
571 }
572 }
573
574 void
575 iokit_add_reference( io_object_t obj, natural_t type )
576 {
577 IOUserClient * uc;
578
579 if (!obj) {
580 return;
581 }
582
583 if ((IKOT_IOKIT_CONNECT == type)
584 && (uc = OSDynamicCast(IOUserClient, obj))) {
585 OSIncrementAtomic(&uc->__ipc);
586 }
587
588 obj->retain();
589 }
590
591 void
592 iokit_remove_reference( io_object_t obj )
593 {
594 if (obj) {
595 obj->release();
596 }
597 }
598
599 void
600 iokit_remove_connect_reference( io_object_t obj )
601 {
602 IOUserClient * uc;
603 bool finalize = false;
604
605 if (!obj) {
606 return;
607 }
608
609 if ((uc = OSDynamicCast(IOUserClient, obj))) {
610 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive()) {
611 IOLockLock(gIOObjectPortLock);
612 if ((finalize = uc->__ipcFinal)) {
613 uc->__ipcFinal = false;
614 }
615 IOLockUnlock(gIOObjectPortLock);
616 }
617 if (finalize) {
618 uc->scheduleFinalize(true);
619 }
620 }
621
622 obj->release();
623 }
624
625 bool
626 IOUserClient::finalizeUserReferences(OSObject * obj)
627 {
628 IOUserClient * uc;
629 bool ok = true;
630
631 if ((uc = OSDynamicCast(IOUserClient, obj))) {
632 IOLockLock(gIOObjectPortLock);
633 if ((uc->__ipcFinal = (0 != uc->__ipc))) {
634 ok = false;
635 }
636 IOLockUnlock(gIOObjectPortLock);
637 }
638 return ok;
639 }
640
641 ipc_port_t
642 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
643 {
644 IOMachPort *machPort = NULL;
645 ipc_port_t port = NULL;
646
647 IOMachPortHashList *bucket = IOMachPort::bucketForObject(obj, type);
648
649 lck_mtx_lock(gIOObjectPortLock);
650
651 machPort = IOMachPort::portForObjectInBucket(bucket, obj, type);
652
653 if (__improbable(machPort == NULL)) {
654 machPort = IOMachPort::withObjectAndType(obj, type);
655 if (__improbable(machPort == NULL)) {
656 goto end;
657 }
658 SLIST_INSERT_HEAD(bucket, machPort, link);
659 } else {
660 machPort->mscount++;
661 }
662
663 iokit_retain_port(machPort->port);
664 port = machPort->port;
665
666 end:
667 lck_mtx_unlock(gIOObjectPortLock);
668
669 return port;
670 }
671
672 kern_return_t
673 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
674 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
675 {
676 IOUserClient * client;
677 IOMemoryMap * map;
678 IOUserNotification * notify;
679
680 if (!IOMachPort::noMoreSendersForObject( obj, type, mscount )) {
681 return kIOReturnNotReady;
682 }
683
684 if (IKOT_IOKIT_CONNECT == type) {
685 if ((client = OSDynamicCast( IOUserClient, obj ))) {
686 IOStatisticsClientCall();
687 IOLockLock(client->lock);
688 client->clientDied();
689 IOLockUnlock(client->lock);
690 }
691 } else if (IKOT_IOKIT_OBJECT == type) {
692 if ((map = OSDynamicCast( IOMemoryMap, obj ))) {
693 map->taskDied();
694 } else if ((notify = OSDynamicCast( IOUserNotification, obj ))) {
695 notify->setNotification( NULL );
696 }
697 }
698
699 return kIOReturnSuccess;
700 }
701 }; /* extern "C" */
702
703 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
704
705 class IOServiceUserNotification : public IOUserNotification
706 {
707 OSDeclareDefaultStructors(IOServiceUserNotification);
708
709 struct PingMsg {
710 mach_msg_header_t msgHdr;
711 OSNotificationHeader64 notifyHeader;
712 };
713
714 enum { kMaxOutstanding = 1024 };
715
716 PingMsg * pingMsg;
717 vm_size_t msgSize;
718 OSArray * newSet;
719 bool armed;
720 bool ipcLogged;
721
722 public:
723
724 virtual bool init( mach_port_t port, natural_t type,
725 void * reference, vm_size_t referenceSize,
726 bool clientIs64 );
727 virtual void free() APPLE_KEXT_OVERRIDE;
728 void invalidatePort(void);
729
730 static bool _handler( void * target,
731 void * ref, IOService * newService, IONotifier * notifier );
732 virtual bool handler( void * ref, IOService * newService );
733
734 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
735 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
736 };
737
738 class IOServiceMessageUserNotification : public IOUserNotification
739 {
740 OSDeclareDefaultStructors(IOServiceMessageUserNotification);
741
742 struct PingMsg {
743 mach_msg_header_t msgHdr;
744 mach_msg_body_t msgBody;
745 mach_msg_port_descriptor_t ports[1];
746 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
747 };
748
749 PingMsg * pingMsg;
750 vm_size_t msgSize;
751 uint8_t clientIs64;
752 int owningPID;
753 bool ipcLogged;
754
755 public:
756
757 virtual bool init( mach_port_t port, natural_t type,
758 void * reference, vm_size_t referenceSize,
759 vm_size_t extraSize,
760 bool clientIs64 );
761
762 virtual void free() APPLE_KEXT_OVERRIDE;
763 void invalidatePort(void);
764
765 static IOReturn _handler( void * target, void * ref,
766 UInt32 messageType, IOService * provider,
767 void * messageArgument, vm_size_t argSize );
768 virtual IOReturn handler( void * ref,
769 UInt32 messageType, IOService * provider,
770 void * messageArgument, vm_size_t argSize );
771
772 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
773 virtual OSObject * copyNextObject() APPLE_KEXT_OVERRIDE;
774 };
775
776 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
777
778 #undef super
779 #define super IOUserIterator
780 OSDefineMetaClass( IOUserNotification, IOUserIterator );
781 OSDefineAbstractStructors( IOUserNotification, IOUserIterator );
782
783 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
784
785 void
786 IOUserNotification::free( void )
787 {
788 if (holdNotify) {
789 assert(OSDynamicCast(IONotifier, holdNotify));
790 ((IONotifier *)holdNotify)->remove();
791 holdNotify = NULL;
792 }
793 // can't be in handler now
794
795 super::free();
796 }
797
798
799 void
800 IOUserNotification::setNotification( IONotifier * notify )
801 {
802 OSObject * previousNotify;
803
804 IOLockLock( gIOObjectPortLock);
805
806 previousNotify = holdNotify;
807 holdNotify = notify;
808
809 IOLockUnlock( gIOObjectPortLock);
810
811 if (previousNotify) {
812 assert(OSDynamicCast(IONotifier, previousNotify));
813 ((IONotifier *)previousNotify)->remove();
814 }
815 }
816
817 void
818 IOUserNotification::reset()
819 {
820 // ?
821 }
822
823 bool
824 IOUserNotification::isValid()
825 {
826 return true;
827 }
828
829 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
830
831 #undef super
832 #define super IOUserNotification
833 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
834
835 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
836
837 bool
838 IOServiceUserNotification::init( mach_port_t port, natural_t type,
839 void * reference, vm_size_t referenceSize,
840 bool clientIs64 )
841 {
842 if (!super::init()) {
843 return false;
844 }
845
846 newSet = OSArray::withCapacity( 1 );
847 if (!newSet) {
848 return false;
849 }
850
851 if (referenceSize > sizeof(OSAsyncReference64)) {
852 return false;
853 }
854
855 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
856 pingMsg = (PingMsg *) IOMalloc( msgSize);
857 if (!pingMsg) {
858 return false;
859 }
860
861 bzero( pingMsg, msgSize);
862
863 pingMsg->msgHdr.msgh_remote_port = port;
864 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
865 MACH_MSG_TYPE_COPY_SEND /*remote*/,
866 MACH_MSG_TYPE_MAKE_SEND /*local*/);
867 pingMsg->msgHdr.msgh_size = msgSize;
868 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
869
870 pingMsg->notifyHeader.size = 0;
871 pingMsg->notifyHeader.type = type;
872 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
873
874 return true;
875 }
876
877 void
878 IOServiceUserNotification::invalidatePort(void)
879 {
880 if (pingMsg) {
881 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
882 }
883 }
884
885 void
886 IOServiceUserNotification::free( void )
887 {
888 PingMsg * _pingMsg;
889 vm_size_t _msgSize;
890 OSArray * _newSet;
891
892 _pingMsg = pingMsg;
893 _msgSize = msgSize;
894 _newSet = newSet;
895
896 super::free();
897
898 if (_pingMsg && _msgSize) {
899 if (_pingMsg->msgHdr.msgh_remote_port) {
900 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
901 }
902 IOFree(_pingMsg, _msgSize);
903 }
904
905 if (_newSet) {
906 _newSet->release();
907 }
908 }
909
910 bool
911 IOServiceUserNotification::_handler( void * target,
912 void * ref, IOService * newService, IONotifier * notifier )
913 {
914 return ((IOServiceUserNotification *) target)->handler( ref, newService );
915 }
916
917 bool
918 IOServiceUserNotification::handler( void * ref,
919 IOService * newService )
920 {
921 unsigned int count;
922 kern_return_t kr;
923 ipc_port_t port = NULL;
924 bool sendPing = false;
925
926 IOTakeLock( lock );
927
928 count = newSet->getCount();
929 if (count < kMaxOutstanding) {
930 newSet->setObject( newService );
931 if ((sendPing = (armed && (0 == count)))) {
932 armed = false;
933 }
934 }
935
936 IOUnlock( lock );
937
938 if (kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type) {
939 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
940 }
941
942 if (sendPing) {
943 if ((port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ))) {
944 pingMsg->msgHdr.msgh_local_port = port;
945 } else {
946 pingMsg->msgHdr.msgh_local_port = NULL;
947 }
948
949 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
950 pingMsg->msgHdr.msgh_size,
951 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
952 0);
953 if (port) {
954 iokit_release_port( port );
955 }
956
957 if ((KERN_SUCCESS != kr) && !ipcLogged) {
958 ipcLogged = true;
959 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
960 }
961 }
962
963 return true;
964 }
965 OSObject *
966 IOServiceUserNotification::getNextObject()
967 {
968 assert(false);
969 return NULL;
970 }
971
972 OSObject *
973 IOServiceUserNotification::copyNextObject()
974 {
975 unsigned int count;
976 OSObject * result;
977
978 IOLockLock(lock);
979
980 count = newSet->getCount();
981 if (count) {
982 result = newSet->getObject( count - 1 );
983 result->retain();
984 newSet->removeObject( count - 1);
985 } else {
986 result = NULL;
987 armed = true;
988 }
989
990 IOLockUnlock(lock);
991
992 return result;
993 }
994
995 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
996
997 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
998
999 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1000
1001 bool
1002 IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
1003 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
1004 bool client64 )
1005 {
1006 if (!super::init()) {
1007 return false;
1008 }
1009
1010 if (referenceSize > sizeof(OSAsyncReference64)) {
1011 return false;
1012 }
1013
1014 clientIs64 = client64;
1015
1016 owningPID = proc_selfpid();
1017
1018 extraSize += sizeof(IOServiceInterestContent64);
1019 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
1020 pingMsg = (PingMsg *) IOMalloc( msgSize);
1021 if (!pingMsg) {
1022 return false;
1023 }
1024
1025 bzero( pingMsg, msgSize);
1026
1027 pingMsg->msgHdr.msgh_remote_port = port;
1028 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
1029 | MACH_MSGH_BITS(
1030 MACH_MSG_TYPE_COPY_SEND /*remote*/,
1031 MACH_MSG_TYPE_MAKE_SEND /*local*/);
1032 pingMsg->msgHdr.msgh_size = msgSize;
1033 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
1034
1035 pingMsg->msgBody.msgh_descriptor_count = 1;
1036
1037 pingMsg->ports[0].name = NULL;
1038 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
1039 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
1040
1041 pingMsg->notifyHeader.size = extraSize;
1042 pingMsg->notifyHeader.type = type;
1043 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
1044
1045 return true;
1046 }
1047
1048 void
1049 IOServiceMessageUserNotification::invalidatePort(void)
1050 {
1051 if (pingMsg) {
1052 pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
1053 }
1054 }
1055
1056 void
1057 IOServiceMessageUserNotification::free( void )
1058 {
1059 PingMsg * _pingMsg;
1060 vm_size_t _msgSize;
1061
1062 _pingMsg = pingMsg;
1063 _msgSize = msgSize;
1064
1065 super::free();
1066
1067 if (_pingMsg && _msgSize) {
1068 if (_pingMsg->msgHdr.msgh_remote_port) {
1069 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
1070 }
1071 IOFree( _pingMsg, _msgSize);
1072 }
1073 }
1074
1075 IOReturn
1076 IOServiceMessageUserNotification::_handler( void * target, void * ref,
1077 UInt32 messageType, IOService * provider,
1078 void * argument, vm_size_t argSize )
1079 {
1080 return ((IOServiceMessageUserNotification *) target)->handler(
1081 ref, messageType, provider, argument, argSize);
1082 }
1083
1084 IOReturn
1085 IOServiceMessageUserNotification::handler( void * ref,
1086 UInt32 messageType, IOService * provider,
1087 void * messageArgument, vm_size_t callerArgSize )
1088 {
1089 enum { kLocalMsgSize = 0x100 };
1090 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
1091 void * allocMsg;
1092 kern_return_t kr;
1093 vm_size_t argSize;
1094 vm_size_t thisMsgSize;
1095 ipc_port_t thisPort, providerPort;
1096 struct PingMsg * thisMsg;
1097 IOServiceInterestContent64 * data;
1098
1099 if (kIOMessageCopyClientID == messageType) {
1100 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1101 return kIOReturnSuccess;
1102 }
1103
1104 if (callerArgSize == 0) {
1105 if (clientIs64) {
1106 argSize = sizeof(data->messageArgument[0]);
1107 } else {
1108 argSize = sizeof(uint32_t);
1109 }
1110 } else {
1111 if (callerArgSize > kIOUserNotifyMaxMessageSize) {
1112 callerArgSize = kIOUserNotifyMaxMessageSize;
1113 }
1114 argSize = callerArgSize;
1115 }
1116
1117 // adjust message size for ipc restrictions
1118 natural_t type;
1119 type = pingMsg->notifyHeader.type;
1120 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1121 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1122 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1123
1124 thisMsgSize = msgSize
1125 + sizeof(IOServiceInterestContent64)
1126 - sizeof(data->messageArgument)
1127 + argSize;
1128
1129 if (thisMsgSize > sizeof(stackMsg)) {
1130 allocMsg = IOMalloc(thisMsgSize);
1131 if (!allocMsg) {
1132 return kIOReturnNoMemory;
1133 }
1134 thisMsg = (typeof(thisMsg))allocMsg;
1135 } else {
1136 allocMsg = NULL;
1137 thisMsg = (typeof(thisMsg))stackMsg;
1138 }
1139
1140 bcopy(pingMsg, thisMsg, msgSize);
1141 thisMsg->notifyHeader.type = type;
1142 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1143 // == pingMsg->notifyHeader.content;
1144 data->messageType = messageType;
1145
1146 if (callerArgSize == 0) {
1147 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1148 if (!clientIs64) {
1149 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1150 }
1151 } else {
1152 bcopy( messageArgument, data->messageArgument, callerArgSize );
1153 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1154 }
1155
1156 thisMsg->notifyHeader.type = type;
1157 thisMsg->msgHdr.msgh_size = thisMsgSize;
1158
1159 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1160 thisMsg->ports[0].name = providerPort;
1161 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1162 thisMsg->msgHdr.msgh_local_port = thisPort;
1163
1164 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1165 thisMsg->msgHdr.msgh_size,
1166 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1167 0);
1168 if (thisPort) {
1169 iokit_release_port( thisPort );
1170 }
1171 if (providerPort) {
1172 iokit_release_port( providerPort );
1173 }
1174
1175 if (allocMsg) {
1176 IOFree(allocMsg, thisMsgSize);
1177 }
1178
1179 if ((KERN_SUCCESS != kr) && !ipcLogged) {
1180 ipcLogged = true;
1181 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1182 }
1183
1184 return kIOReturnSuccess;
1185 }
1186
1187 OSObject *
1188 IOServiceMessageUserNotification::getNextObject()
1189 {
1190 return NULL;
1191 }
1192
1193 OSObject *
1194 IOServiceMessageUserNotification::copyNextObject()
1195 {
1196 return NULL;
1197 }
1198
1199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1200
1201 #undef super
1202 #define super IOService
1203 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1204
1205 IOLock * gIOUserClientOwnersLock;
1206
1207 void
1208 IOUserClient::initialize( void )
1209 {
1210 gIOObjectPortLock = IOLockAlloc();
1211 gIOUserClientOwnersLock = IOLockAlloc();
1212 gIOUserServerLock = IOLockAlloc();
1213 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1214 }
1215
1216 void
1217 IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1218 mach_port_t wakePort,
1219 void *callback, void *refcon)
1220 {
1221 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1222 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1223 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1224 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1225 }
1226
1227 void
1228 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1229 mach_port_t wakePort,
1230 mach_vm_address_t callback, io_user_reference_t refcon)
1231 {
1232 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1233 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1234 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1235 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1236 }
1237
1238 void
1239 IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1240 mach_port_t wakePort,
1241 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1242 {
1243 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1244 if (vm_map_is_64bit(get_task_map(task))) {
1245 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1246 }
1247 }
1248
1249 static OSDictionary *
1250 CopyConsoleUser(UInt32 uid)
1251 {
1252 OSArray * array;
1253 OSDictionary * user = NULL;
1254
1255 if ((array = OSDynamicCast(OSArray,
1256 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1257 for (unsigned int idx = 0;
1258 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1259 idx++) {
1260 OSNumber * num;
1261
1262 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1263 && (uid == num->unsigned32BitValue())) {
1264 user->retain();
1265 break;
1266 }
1267 }
1268 array->release();
1269 }
1270 return user;
1271 }
1272
1273 static OSDictionary *
1274 CopyUserOnConsole(void)
1275 {
1276 OSArray * array;
1277 OSDictionary * user = NULL;
1278
1279 if ((array = OSDynamicCast(OSArray,
1280 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) {
1281 for (unsigned int idx = 0;
1282 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1283 idx++) {
1284 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) {
1285 user->retain();
1286 break;
1287 }
1288 }
1289 array->release();
1290 }
1291 return user;
1292 }
1293
1294 IOReturn
1295 IOUserClient::clientHasAuthorization( task_t task,
1296 IOService * service )
1297 {
1298 proc_t p;
1299
1300 p = (proc_t) get_bsdtask_info(task);
1301 if (p) {
1302 uint64_t authorizationID;
1303
1304 authorizationID = proc_uniqueid(p);
1305 if (authorizationID) {
1306 if (service->getAuthorizationID() == authorizationID) {
1307 return kIOReturnSuccess;
1308 }
1309 }
1310 }
1311
1312 return kIOReturnNotPermitted;
1313 }
1314
1315 IOReturn
1316 IOUserClient::clientHasPrivilege( void * securityToken,
1317 const char * privilegeName )
1318 {
1319 kern_return_t kr;
1320 security_token_t token;
1321 mach_msg_type_number_t count;
1322 task_t task;
1323 OSDictionary * user;
1324 bool secureConsole;
1325
1326
1327 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1328 sizeof(kIOClientPrivilegeForeground))) {
1329 if (task_is_gpu_denied(current_task())) {
1330 return kIOReturnNotPrivileged;
1331 } else {
1332 return kIOReturnSuccess;
1333 }
1334 }
1335
1336 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1337 sizeof(kIOClientPrivilegeConsoleSession))) {
1338 kauth_cred_t cred;
1339 proc_t p;
1340
1341 task = (task_t) securityToken;
1342 if (!task) {
1343 task = current_task();
1344 }
1345 p = (proc_t) get_bsdtask_info(task);
1346 kr = kIOReturnNotPrivileged;
1347
1348 if (p && (cred = kauth_cred_proc_ref(p))) {
1349 user = CopyUserOnConsole();
1350 if (user) {
1351 OSNumber * num;
1352 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1353 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue())) {
1354 kr = kIOReturnSuccess;
1355 }
1356 user->release();
1357 }
1358 kauth_cred_unref(&cred);
1359 }
1360 return kr;
1361 }
1362
1363 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1364 sizeof(kIOClientPrivilegeSecureConsoleProcess)))) {
1365 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1366 } else {
1367 task = (task_t)securityToken;
1368 }
1369
1370 count = TASK_SECURITY_TOKEN_COUNT;
1371 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1372
1373 if (KERN_SUCCESS != kr) {
1374 } else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1375 sizeof(kIOClientPrivilegeAdministrator))) {
1376 if (0 != token.val[0]) {
1377 kr = kIOReturnNotPrivileged;
1378 }
1379 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1380 sizeof(kIOClientPrivilegeLocalUser))) {
1381 user = CopyConsoleUser(token.val[0]);
1382 if (user) {
1383 user->release();
1384 } else {
1385 kr = kIOReturnNotPrivileged;
1386 }
1387 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1388 sizeof(kIOClientPrivilegeConsoleUser))) {
1389 user = CopyConsoleUser(token.val[0]);
1390 if (user) {
1391 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) {
1392 kr = kIOReturnNotPrivileged;
1393 } else if (secureConsole) {
1394 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1395 if (pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) {
1396 kr = kIOReturnNotPrivileged;
1397 }
1398 }
1399 user->release();
1400 } else {
1401 kr = kIOReturnNotPrivileged;
1402 }
1403 } else {
1404 kr = kIOReturnUnsupported;
1405 }
1406
1407 return kr;
1408 }
1409
1410 OSDictionary *
1411 IOUserClient::copyClientEntitlements(task_t task)
1412 {
1413 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1414
1415 proc_t p = NULL;
1416 pid_t pid = 0;
1417 size_t len = 0;
1418 void *entitlements_blob = NULL;
1419 char *entitlements_data = NULL;
1420 OSObject *entitlements_obj = NULL;
1421 OSDictionary *entitlements = NULL;
1422 OSString *errorString = NULL;
1423
1424 p = (proc_t)get_bsdtask_info(task);
1425 if (p == NULL) {
1426 goto fail;
1427 }
1428 pid = proc_pid(p);
1429
1430 if (cs_entitlements_dictionary_copy(p, (void **)&entitlements) == 0) {
1431 if (entitlements) {
1432 return entitlements;
1433 }
1434 }
1435
1436 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0) {
1437 goto fail;
1438 }
1439
1440 if (len <= offsetof(CS_GenericBlob, data)) {
1441 goto fail;
1442 }
1443
1444 /*
1445 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1446 * we'll try to parse in the kernel.
1447 */
1448 len -= offsetof(CS_GenericBlob, data);
1449 if (len > MAX_ENTITLEMENTS_LEN) {
1450 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n",
1451 proc_best_name(p), pid, len, MAX_ENTITLEMENTS_LEN);
1452 goto fail;
1453 }
1454
1455 /*
1456 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1457 * what is stored in the entitlements blob. Copy the string and
1458 * terminate it.
1459 */
1460 entitlements_data = (char *)IOMalloc(len + 1);
1461 if (entitlements_data == NULL) {
1462 goto fail;
1463 }
1464 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1465 entitlements_data[len] = '\0';
1466
1467 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1468 if (errorString != NULL) {
1469 IOLog("failed to parse entitlements for %s[%u]: %s\n",
1470 proc_best_name(p), pid, errorString->getCStringNoCopy());
1471 goto fail;
1472 }
1473 if (entitlements_obj == NULL) {
1474 goto fail;
1475 }
1476
1477 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1478 if (entitlements == NULL) {
1479 goto fail;
1480 }
1481 entitlements_obj = NULL;
1482
1483 fail:
1484 if (entitlements_data != NULL) {
1485 IOFree(entitlements_data, len + 1);
1486 }
1487 if (entitlements_obj != NULL) {
1488 entitlements_obj->release();
1489 }
1490 if (errorString != NULL) {
1491 errorString->release();
1492 }
1493 return entitlements;
1494 }
1495
1496 OSObject *
1497 IOUserClient::copyClientEntitlement( task_t task,
1498 const char * entitlement )
1499 {
1500 OSDictionary *entitlements;
1501 OSObject *value;
1502
1503 entitlements = copyClientEntitlements(task);
1504 if (entitlements == NULL) {
1505 return NULL;
1506 }
1507
1508 /* Fetch the entitlement value from the dictionary. */
1509 value = entitlements->getObject(entitlement);
1510 if (value != NULL) {
1511 value->retain();
1512 }
1513
1514 entitlements->release();
1515 return value;
1516 }
1517
1518 bool
1519 IOUserClient::init()
1520 {
1521 if (getPropertyTable() || super::init()) {
1522 return reserve();
1523 }
1524
1525 return false;
1526 }
1527
1528 bool
1529 IOUserClient::init(OSDictionary * dictionary)
1530 {
1531 if (getPropertyTable() || super::init(dictionary)) {
1532 return reserve();
1533 }
1534
1535 return false;
1536 }
1537
1538 bool
1539 IOUserClient::initWithTask(task_t owningTask,
1540 void * securityID,
1541 UInt32 type )
1542 {
1543 if (getPropertyTable() || super::init()) {
1544 return reserve();
1545 }
1546
1547 return false;
1548 }
1549
1550 bool
1551 IOUserClient::initWithTask(task_t owningTask,
1552 void * securityID,
1553 UInt32 type,
1554 OSDictionary * properties )
1555 {
1556 bool ok;
1557
1558 ok = super::init( properties );
1559 ok &= initWithTask( owningTask, securityID, type );
1560
1561 return ok;
1562 }
1563
1564 bool
1565 IOUserClient::reserve()
1566 {
1567 if (!reserved) {
1568 reserved = IONew(ExpansionData, 1);
1569 if (!reserved) {
1570 return false;
1571 }
1572 }
1573 setTerminateDefer(NULL, true);
1574 IOStatisticsRegisterCounter();
1575
1576 return true;
1577 }
1578
1579 struct IOUserClientOwner {
1580 task_t task;
1581 queue_chain_t taskLink;
1582 IOUserClient * uc;
1583 queue_chain_t ucLink;
1584 };
1585
1586 IOReturn
1587 IOUserClient::registerOwner(task_t task)
1588 {
1589 IOUserClientOwner * owner;
1590 IOReturn ret;
1591 bool newOwner;
1592
1593 IOLockLock(gIOUserClientOwnersLock);
1594
1595 newOwner = true;
1596 ret = kIOReturnSuccess;
1597
1598 if (!owners.next) {
1599 queue_init(&owners);
1600 } else {
1601 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1602 {
1603 if (task != owner->task) {
1604 continue;
1605 }
1606 newOwner = false;
1607 break;
1608 }
1609 }
1610 if (newOwner) {
1611 owner = IONew(IOUserClientOwner, 1);
1612 if (!owner) {
1613 ret = kIOReturnNoMemory;
1614 } else {
1615 owner->task = task;
1616 owner->uc = this;
1617 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1618 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1619 if (messageAppSuspended) {
1620 task_set_message_app_suspended(task, true);
1621 }
1622 }
1623 }
1624
1625 IOLockUnlock(gIOUserClientOwnersLock);
1626
1627 return ret;
1628 }
1629
1630 void
1631 IOUserClient::noMoreSenders(void)
1632 {
1633 IOUserClientOwner * owner;
1634 IOUserClientOwner * iter;
1635 queue_head_t * taskque;
1636 bool hasMessageAppSuspended;
1637
1638 IOLockLock(gIOUserClientOwnersLock);
1639
1640 if (owners.next) {
1641 while (!queue_empty(&owners)) {
1642 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1643 taskque = task_io_user_clients(owner->task);
1644 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1645 hasMessageAppSuspended = false;
1646 queue_iterate(taskque, iter, IOUserClientOwner *, taskLink) {
1647 hasMessageAppSuspended = iter->uc->messageAppSuspended;
1648 if (hasMessageAppSuspended) {
1649 break;
1650 }
1651 }
1652 task_set_message_app_suspended(owner->task, hasMessageAppSuspended);
1653 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1654 IODelete(owner, IOUserClientOwner, 1);
1655 }
1656 owners.next = owners.prev = NULL;
1657 }
1658
1659 IOLockUnlock(gIOUserClientOwnersLock);
1660 }
1661
1662
1663 extern "C" void
1664 iokit_task_app_suspended_changed(task_t task)
1665 {
1666 queue_head_t * taskque;
1667 IOUserClientOwner * owner;
1668 OSSet * set;
1669
1670 IOLockLock(gIOUserClientOwnersLock);
1671
1672 taskque = task_io_user_clients(task);
1673 set = NULL;
1674 queue_iterate(taskque, owner, IOUserClientOwner *, taskLink) {
1675 if (!owner->uc->messageAppSuspended) {
1676 continue;
1677 }
1678 if (!set) {
1679 set = OSSet::withCapacity(4);
1680 if (!set) {
1681 break;
1682 }
1683 }
1684 set->setObject(owner->uc);
1685 }
1686
1687 IOLockUnlock(gIOUserClientOwnersLock);
1688
1689 if (set) {
1690 set->iterateObjects(^bool (OSObject * obj) {
1691 IOUserClient * uc;
1692
1693 uc = (typeof(uc))obj;
1694 #if 0
1695 {
1696 OSString * str;
1697 str = IOCopyLogNameForPID(task_pid(task));
1698 IOLog("iokit_task_app_suspended_changed(%s) %s %d\n", str ? str->getCStringNoCopy() : "",
1699 uc->getName(), task_is_app_suspended(task));
1700 OSSafeReleaseNULL(str);
1701 }
1702 #endif
1703 uc->message(kIOMessageTaskAppSuspendedChange, NULL);
1704
1705 return false;
1706 });
1707 set->release();
1708 }
1709 }
1710
1711 extern "C" kern_return_t
1712 iokit_task_terminate(task_t task)
1713 {
1714 IOUserClientOwner * owner;
1715 IOUserClient * dead;
1716 IOUserClient * uc;
1717 queue_head_t * taskque;
1718
1719 IOLockLock(gIOUserClientOwnersLock);
1720
1721 taskque = task_io_user_clients(task);
1722 dead = NULL;
1723 while (!queue_empty(taskque)) {
1724 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1725 uc = owner->uc;
1726 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1727 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1728 if (queue_empty(&uc->owners)) {
1729 uc->retain();
1730 IOLog("destroying out of band connect for %s\n", uc->getName());
1731 // now using the uc queue head as a singly linked queue,
1732 // leaving .next as NULL to mark it empty
1733 uc->owners.next = NULL;
1734 uc->owners.prev = (queue_entry_t) dead;
1735 dead = uc;
1736 }
1737 IODelete(owner, IOUserClientOwner, 1);
1738 }
1739
1740 IOLockUnlock(gIOUserClientOwnersLock);
1741
1742 while (dead) {
1743 uc = dead;
1744 dead = (IOUserClient *)(void *) dead->owners.prev;
1745 uc->owners.prev = NULL;
1746 if (uc->sharedInstance || !uc->closed) {
1747 uc->clientDied();
1748 }
1749 uc->release();
1750 }
1751
1752 return KERN_SUCCESS;
1753 }
1754
1755 void
1756 IOUserClient::free()
1757 {
1758 if (mappings) {
1759 mappings->release();
1760 }
1761 if (lock) {
1762 IOLockFree(lock);
1763 }
1764
1765 IOStatisticsUnregisterCounter();
1766
1767 assert(!owners.next);
1768 assert(!owners.prev);
1769
1770 if (reserved) {
1771 IODelete(reserved, ExpansionData, 1);
1772 }
1773
1774 super::free();
1775 }
1776
1777 IOReturn
1778 IOUserClient::clientDied( void )
1779 {
1780 IOReturn ret = kIOReturnNotReady;
1781
1782 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed)) {
1783 ret = clientClose();
1784 }
1785
1786 return ret;
1787 }
1788
1789 IOReturn
1790 IOUserClient::clientClose( void )
1791 {
1792 return kIOReturnUnsupported;
1793 }
1794
1795 IOService *
1796 IOUserClient::getService( void )
1797 {
1798 return NULL;
1799 }
1800
1801 IOReturn
1802 IOUserClient::registerNotificationPort(
1803 mach_port_t /* port */,
1804 UInt32 /* type */,
1805 UInt32 /* refCon */)
1806 {
1807 return kIOReturnUnsupported;
1808 }
1809
1810 IOReturn
1811 IOUserClient::registerNotificationPort(
1812 mach_port_t port,
1813 UInt32 type,
1814 io_user_reference_t refCon)
1815 {
1816 return registerNotificationPort(port, type, (UInt32) refCon);
1817 }
1818
1819 IOReturn
1820 IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1821 semaphore_t * semaphore )
1822 {
1823 return kIOReturnUnsupported;
1824 }
1825
1826 IOReturn
1827 IOUserClient::connectClient( IOUserClient * /* client */ )
1828 {
1829 return kIOReturnUnsupported;
1830 }
1831
1832 IOReturn
1833 IOUserClient::clientMemoryForType( UInt32 type,
1834 IOOptionBits * options,
1835 IOMemoryDescriptor ** memory )
1836 {
1837 return kIOReturnUnsupported;
1838 }
1839
1840 #if !__LP64__
1841 IOMemoryMap *
1842 IOUserClient::mapClientMemory(
1843 IOOptionBits type,
1844 task_t task,
1845 IOOptionBits mapFlags,
1846 IOVirtualAddress atAddress )
1847 {
1848 return NULL;
1849 }
1850 #endif
1851
1852 IOMemoryMap *
1853 IOUserClient::mapClientMemory64(
1854 IOOptionBits type,
1855 task_t task,
1856 IOOptionBits mapFlags,
1857 mach_vm_address_t atAddress )
1858 {
1859 IOReturn err;
1860 IOOptionBits options = 0;
1861 IOMemoryDescriptor * memory = NULL;
1862 IOMemoryMap * map = NULL;
1863
1864 err = clientMemoryForType((UInt32) type, &options, &memory );
1865
1866 if (memory && (kIOReturnSuccess == err)) {
1867 FAKE_STACK_FRAME(getMetaClass());
1868
1869 options = (options & ~kIOMapUserOptionsMask)
1870 | (mapFlags & kIOMapUserOptionsMask);
1871 map = memory->createMappingInTask( task, atAddress, options );
1872 memory->release();
1873
1874 FAKE_STACK_FRAME_END();
1875 }
1876
1877 return map;
1878 }
1879
1880 IOReturn
1881 IOUserClient::exportObjectToClient(task_t task,
1882 OSObject *obj, io_object_t *clientObj)
1883 {
1884 mach_port_name_t name;
1885
1886 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1887
1888 *clientObj = (io_object_t)(uintptr_t) name;
1889
1890 if (obj) {
1891 obj->release();
1892 }
1893
1894 return kIOReturnSuccess;
1895 }
1896
1897 IOReturn
1898 IOUserClient::copyPortNameForObjectInTask(task_t task,
1899 OSObject *obj, mach_port_name_t * port_name)
1900 {
1901 mach_port_name_t name;
1902
1903 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_IDENT );
1904
1905 *(mach_port_name_t *) port_name = name;
1906
1907 return kIOReturnSuccess;
1908 }
1909
1910 IOReturn
1911 IOUserClient::copyObjectForPortNameInTask(task_t task, mach_port_name_t port_name,
1912 OSObject **obj)
1913 {
1914 OSObject * object;
1915
1916 object = iokit_lookup_object_with_port_name(port_name, IKOT_IOKIT_IDENT, task);
1917
1918 *obj = object;
1919
1920 return object ? kIOReturnSuccess : kIOReturnIPCError;
1921 }
1922
1923 IOReturn
1924 IOUserClient::adjustPortNameReferencesInTask(task_t task, mach_port_name_t port_name, mach_port_delta_t delta)
1925 {
1926 return iokit_mod_send_right(task, port_name, delta);
1927 }
1928
1929 IOExternalMethod *
1930 IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1931 {
1932 return NULL;
1933 }
1934
1935 IOExternalAsyncMethod *
1936 IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1937 {
1938 return NULL;
1939 }
1940
1941 IOExternalTrap *
1942 IOUserClient::
1943 getExternalTrapForIndex(UInt32 index)
1944 {
1945 return NULL;
1946 }
1947
1948 #pragma clang diagnostic push
1949 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1950
1951 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1952 // functions can break clients of kexts implementing getExternalMethodForIndex()
1953 IOExternalMethod *
1954 IOUserClient::
1955 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1956 {
1957 IOExternalMethod *method = getExternalMethodForIndex(index);
1958
1959 if (method) {
1960 *targetP = (IOService *) method->object;
1961 }
1962
1963 return method;
1964 }
1965
1966 IOExternalAsyncMethod *
1967 IOUserClient::
1968 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1969 {
1970 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1971
1972 if (method) {
1973 *targetP = (IOService *) method->object;
1974 }
1975
1976 return method;
1977 }
1978
1979 IOExternalTrap *
1980 IOUserClient::
1981 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1982 {
1983 IOExternalTrap *trap = getExternalTrapForIndex(index);
1984
1985 if (trap) {
1986 *targetP = trap->object;
1987 }
1988
1989 return trap;
1990 }
1991 #pragma clang diagnostic pop
1992
1993 IOReturn
1994 IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1995 {
1996 mach_port_t port;
1997 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1998
1999 if (MACH_PORT_NULL != port) {
2000 iokit_release_port_send(port);
2001 }
2002
2003 return kIOReturnSuccess;
2004 }
2005
2006 IOReturn
2007 IOUserClient::releaseNotificationPort(mach_port_t port)
2008 {
2009 if (MACH_PORT_NULL != port) {
2010 iokit_release_port_send(port);
2011 }
2012
2013 return kIOReturnSuccess;
2014 }
2015
2016 IOReturn
2017 IOUserClient::sendAsyncResult(OSAsyncReference reference,
2018 IOReturn result, void *args[], UInt32 numArgs)
2019 {
2020 OSAsyncReference64 reference64;
2021 io_user_reference_t args64[kMaxAsyncArgs];
2022 unsigned int idx;
2023
2024 if (numArgs > kMaxAsyncArgs) {
2025 return kIOReturnMessageTooLarge;
2026 }
2027
2028 for (idx = 0; idx < kOSAsyncRef64Count; idx++) {
2029 reference64[idx] = REF64(reference[idx]);
2030 }
2031
2032 for (idx = 0; idx < numArgs; idx++) {
2033 args64[idx] = REF64(args[idx]);
2034 }
2035
2036 return sendAsyncResult64(reference64, result, args64, numArgs);
2037 }
2038
2039 IOReturn
2040 IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
2041 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2042 {
2043 return _sendAsyncResult64(reference, result, args, numArgs, options);
2044 }
2045
2046 IOReturn
2047 IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
2048 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
2049 {
2050 return _sendAsyncResult64(reference, result, args, numArgs, 0);
2051 }
2052
2053 IOReturn
2054 IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
2055 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
2056 {
2057 struct ReplyMsg {
2058 mach_msg_header_t msgHdr;
2059 union{
2060 struct{
2061 OSNotificationHeader notifyHdr;
2062 IOAsyncCompletionContent asyncContent;
2063 uint32_t args[kMaxAsyncArgs];
2064 } msg32;
2065 struct{
2066 OSNotificationHeader64 notifyHdr;
2067 IOAsyncCompletionContent asyncContent;
2068 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
2069 } msg64;
2070 } m;
2071 };
2072 ReplyMsg replyMsg;
2073 mach_port_t replyPort;
2074 kern_return_t kr;
2075
2076 // If no reply port, do nothing.
2077 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
2078 if (replyPort == MACH_PORT_NULL) {
2079 return kIOReturnSuccess;
2080 }
2081
2082 if (numArgs > kMaxAsyncArgs) {
2083 return kIOReturnMessageTooLarge;
2084 }
2085
2086 bzero(&replyMsg, sizeof(replyMsg));
2087 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
2088 0 /*local*/);
2089 replyMsg.msgHdr.msgh_remote_port = replyPort;
2090 replyMsg.msgHdr.msgh_local_port = NULL;
2091 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
2092 if (kIOUCAsync64Flag & reference[0]) {
2093 replyMsg.msgHdr.msgh_size =
2094 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
2095 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
2096 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2097 + numArgs * sizeof(io_user_reference_t);
2098 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
2099 /* Copy reference except for reference[0], which is left as 0 from the earlier bzero */
2100 bcopy(&reference[1], &replyMsg.m.msg64.notifyHdr.reference[1], sizeof(OSAsyncReference64) - sizeof(reference[0]));
2101
2102 replyMsg.m.msg64.asyncContent.result = result;
2103 if (numArgs) {
2104 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
2105 }
2106 } else {
2107 unsigned int idx;
2108
2109 replyMsg.msgHdr.msgh_size =
2110 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
2111 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
2112
2113 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
2114 + numArgs * sizeof(uint32_t);
2115 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
2116
2117 /* Skip reference[0] which is left as 0 from the earlier bzero */
2118 for (idx = 1; idx < kOSAsyncRefCount; idx++) {
2119 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
2120 }
2121
2122 replyMsg.m.msg32.asyncContent.result = result;
2123
2124 for (idx = 0; idx < numArgs; idx++) {
2125 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
2126 }
2127 }
2128
2129 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
2130 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
2131 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
2132 } else {
2133 /* Fail on full queue. */
2134 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
2135 replyMsg.msgHdr.msgh_size);
2136 }
2137 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0])) {
2138 reference[0] |= kIOUCAsyncErrorLoggedFlag;
2139 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
2140 }
2141 return kr;
2142 }
2143
2144
2145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2146
2147 extern "C" {
2148 #define CHECK(cls, obj, out) \
2149 cls * out; \
2150 if( !(out = OSDynamicCast( cls, obj))) \
2151 return( kIOReturnBadArgument )
2152
2153 #define CHECKLOCKED(cls, obj, out) \
2154 IOUserIterator * oIter; \
2155 cls * out; \
2156 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
2157 return (kIOReturnBadArgument); \
2158 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
2159 return (kIOReturnBadArgument)
2160
2161 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2162
2163 // Create a vm_map_copy_t or kalloc'ed data for memory
2164 // to be copied out. ipc will free after the copyout.
2165
2166 static kern_return_t
2167 copyoutkdata( const void * data, vm_size_t len,
2168 io_buf_ptr_t * buf )
2169 {
2170 kern_return_t err;
2171 vm_map_copy_t copy;
2172
2173 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2174 false /* src_destroy */, &copy);
2175
2176 assert( err == KERN_SUCCESS );
2177 if (err == KERN_SUCCESS) {
2178 *buf = (char *) copy;
2179 }
2180
2181 return err;
2182 }
2183
2184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2185
2186 /* Routine io_server_version */
2187 kern_return_t
2188 is_io_server_version(
2189 mach_port_t master_port,
2190 uint64_t *version)
2191 {
2192 *version = IOKIT_SERVER_VERSION;
2193 return kIOReturnSuccess;
2194 }
2195
2196 /* Routine io_object_get_class */
2197 kern_return_t
2198 is_io_object_get_class(
2199 io_object_t object,
2200 io_name_t className )
2201 {
2202 const OSMetaClass* my_obj = NULL;
2203
2204 if (!object) {
2205 return kIOReturnBadArgument;
2206 }
2207
2208 my_obj = object->getMetaClass();
2209 if (!my_obj) {
2210 return kIOReturnNotFound;
2211 }
2212
2213 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
2214
2215 return kIOReturnSuccess;
2216 }
2217
2218 /* Routine io_object_get_superclass */
2219 kern_return_t
2220 is_io_object_get_superclass(
2221 mach_port_t master_port,
2222 io_name_t obj_name,
2223 io_name_t class_name)
2224 {
2225 IOReturn ret;
2226 const OSMetaClass * meta;
2227 const OSMetaClass * super;
2228 const OSSymbol * name;
2229 const char * cstr;
2230
2231 if (!obj_name || !class_name) {
2232 return kIOReturnBadArgument;
2233 }
2234 if (master_port != master_device_port) {
2235 return kIOReturnNotPrivileged;
2236 }
2237
2238 ret = kIOReturnNotFound;
2239 meta = NULL;
2240 do{
2241 name = OSSymbol::withCString(obj_name);
2242 if (!name) {
2243 break;
2244 }
2245 meta = OSMetaClass::copyMetaClassWithName(name);
2246 if (!meta) {
2247 break;
2248 }
2249 super = meta->getSuperClass();
2250 if (!super) {
2251 break;
2252 }
2253 cstr = super->getClassName();
2254 if (!cstr) {
2255 break;
2256 }
2257 strlcpy(class_name, cstr, sizeof(io_name_t));
2258 ret = kIOReturnSuccess;
2259 }while (false);
2260
2261 OSSafeReleaseNULL(name);
2262 if (meta) {
2263 meta->releaseMetaClass();
2264 }
2265
2266 return ret;
2267 }
2268
2269 /* Routine io_object_get_bundle_identifier */
2270 kern_return_t
2271 is_io_object_get_bundle_identifier(
2272 mach_port_t master_port,
2273 io_name_t obj_name,
2274 io_name_t bundle_name)
2275 {
2276 IOReturn ret;
2277 const OSMetaClass * meta;
2278 const OSSymbol * name;
2279 const OSSymbol * identifier;
2280 const char * cstr;
2281
2282 if (!obj_name || !bundle_name) {
2283 return kIOReturnBadArgument;
2284 }
2285 if (master_port != master_device_port) {
2286 return kIOReturnNotPrivileged;
2287 }
2288
2289 ret = kIOReturnNotFound;
2290 meta = NULL;
2291 do{
2292 name = OSSymbol::withCString(obj_name);
2293 if (!name) {
2294 break;
2295 }
2296 meta = OSMetaClass::copyMetaClassWithName(name);
2297 if (!meta) {
2298 break;
2299 }
2300 identifier = meta->getKmodName();
2301 if (!identifier) {
2302 break;
2303 }
2304 cstr = identifier->getCStringNoCopy();
2305 if (!cstr) {
2306 break;
2307 }
2308 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2309 ret = kIOReturnSuccess;
2310 }while (false);
2311
2312 OSSafeReleaseNULL(name);
2313 if (meta) {
2314 meta->releaseMetaClass();
2315 }
2316
2317 return ret;
2318 }
2319
2320 /* Routine io_object_conforms_to */
2321 kern_return_t
2322 is_io_object_conforms_to(
2323 io_object_t object,
2324 io_name_t className,
2325 boolean_t *conforms )
2326 {
2327 if (!object) {
2328 return kIOReturnBadArgument;
2329 }
2330
2331 *conforms = (NULL != object->metaCast( className ));
2332
2333 return kIOReturnSuccess;
2334 }
2335
2336 /* Routine io_object_get_retain_count */
2337 kern_return_t
2338 is_io_object_get_retain_count(
2339 io_object_t object,
2340 uint32_t *retainCount )
2341 {
2342 if (!object) {
2343 return kIOReturnBadArgument;
2344 }
2345
2346 *retainCount = object->getRetainCount();
2347 return kIOReturnSuccess;
2348 }
2349
2350 /* Routine io_iterator_next */
2351 kern_return_t
2352 is_io_iterator_next(
2353 io_object_t iterator,
2354 io_object_t *object )
2355 {
2356 IOReturn ret;
2357 OSObject * obj;
2358 OSIterator * iter;
2359 IOUserIterator * uiter;
2360
2361 if ((uiter = OSDynamicCast(IOUserIterator, iterator))) {
2362 obj = uiter->copyNextObject();
2363 } else if ((iter = OSDynamicCast(OSIterator, iterator))) {
2364 obj = iter->getNextObject();
2365 if (obj) {
2366 obj->retain();
2367 }
2368 } else {
2369 return kIOReturnBadArgument;
2370 }
2371
2372 if (obj) {
2373 *object = obj;
2374 ret = kIOReturnSuccess;
2375 } else {
2376 ret = kIOReturnNoDevice;
2377 }
2378
2379 return ret;
2380 }
2381
2382 /* Routine io_iterator_reset */
2383 kern_return_t
2384 is_io_iterator_reset(
2385 io_object_t iterator )
2386 {
2387 CHECK( OSIterator, iterator, iter );
2388
2389 iter->reset();
2390
2391 return kIOReturnSuccess;
2392 }
2393
2394 /* Routine io_iterator_is_valid */
2395 kern_return_t
2396 is_io_iterator_is_valid(
2397 io_object_t iterator,
2398 boolean_t *is_valid )
2399 {
2400 CHECK( OSIterator, iterator, iter );
2401
2402 *is_valid = iter->isValid();
2403
2404 return kIOReturnSuccess;
2405 }
2406
2407
2408 static kern_return_t
2409 internal_io_service_match_property_table(
2410 io_service_t _service,
2411 const char * matching,
2412 mach_msg_type_number_t matching_size,
2413 boolean_t *matches)
2414 {
2415 CHECK( IOService, _service, service );
2416
2417 kern_return_t kr;
2418 OSObject * obj;
2419 OSDictionary * dict;
2420
2421 assert(matching_size);
2422 obj = OSUnserializeXML(matching, matching_size);
2423
2424 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2425 *matches = service->passiveMatch( dict );
2426 kr = kIOReturnSuccess;
2427 } else {
2428 kr = kIOReturnBadArgument;
2429 }
2430
2431 if (obj) {
2432 obj->release();
2433 }
2434
2435 return kr;
2436 }
2437
2438 /* Routine io_service_match_property_table */
2439 kern_return_t
2440 is_io_service_match_property_table(
2441 io_service_t service,
2442 io_string_t matching,
2443 boolean_t *matches )
2444 {
2445 return kIOReturnUnsupported;
2446 }
2447
2448
2449 /* Routine io_service_match_property_table_ool */
2450 kern_return_t
2451 is_io_service_match_property_table_ool(
2452 io_object_t service,
2453 io_buf_ptr_t matching,
2454 mach_msg_type_number_t matchingCnt,
2455 kern_return_t *result,
2456 boolean_t *matches )
2457 {
2458 kern_return_t kr;
2459 vm_offset_t data;
2460 vm_map_offset_t map_data;
2461
2462 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2463 data = CAST_DOWN(vm_offset_t, map_data);
2464
2465 if (KERN_SUCCESS == kr) {
2466 // must return success after vm_map_copyout() succeeds
2467 *result = internal_io_service_match_property_table(service,
2468 (const char *)data, matchingCnt, matches );
2469 vm_deallocate( kernel_map, data, matchingCnt );
2470 }
2471
2472 return kr;
2473 }
2474
2475 /* Routine io_service_match_property_table_bin */
2476 kern_return_t
2477 is_io_service_match_property_table_bin(
2478 io_object_t service,
2479 io_struct_inband_t matching,
2480 mach_msg_type_number_t matchingCnt,
2481 boolean_t *matches)
2482 {
2483 return internal_io_service_match_property_table(service, matching, matchingCnt, matches);
2484 }
2485
2486 static kern_return_t
2487 internal_io_service_get_matching_services(
2488 mach_port_t master_port,
2489 const char * matching,
2490 mach_msg_type_number_t matching_size,
2491 io_iterator_t *existing )
2492 {
2493 kern_return_t kr;
2494 OSObject * obj;
2495 OSDictionary * dict;
2496
2497 if (master_port != master_device_port) {
2498 return kIOReturnNotPrivileged;
2499 }
2500
2501 assert(matching_size);
2502 obj = OSUnserializeXML(matching, matching_size);
2503
2504 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2505 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2506 kr = kIOReturnSuccess;
2507 } else {
2508 kr = kIOReturnBadArgument;
2509 }
2510
2511 if (obj) {
2512 obj->release();
2513 }
2514
2515 return kr;
2516 }
2517
2518 /* Routine io_service_get_matching_services */
2519 kern_return_t
2520 is_io_service_get_matching_services(
2521 mach_port_t master_port,
2522 io_string_t matching,
2523 io_iterator_t *existing )
2524 {
2525 return kIOReturnUnsupported;
2526 }
2527
2528 /* Routine io_service_get_matching_services_ool */
2529 kern_return_t
2530 is_io_service_get_matching_services_ool(
2531 mach_port_t master_port,
2532 io_buf_ptr_t matching,
2533 mach_msg_type_number_t matchingCnt,
2534 kern_return_t *result,
2535 io_object_t *existing )
2536 {
2537 kern_return_t kr;
2538 vm_offset_t data;
2539 vm_map_offset_t map_data;
2540
2541 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2542 data = CAST_DOWN(vm_offset_t, map_data);
2543
2544 if (KERN_SUCCESS == kr) {
2545 // must return success after vm_map_copyout() succeeds
2546 // and mig will copy out objects on success
2547 *existing = NULL;
2548 *result = internal_io_service_get_matching_services(master_port,
2549 (const char *) data, matchingCnt, existing);
2550 vm_deallocate( kernel_map, data, matchingCnt );
2551 }
2552
2553 return kr;
2554 }
2555
2556 /* Routine io_service_get_matching_services_bin */
2557 kern_return_t
2558 is_io_service_get_matching_services_bin(
2559 mach_port_t master_port,
2560 io_struct_inband_t matching,
2561 mach_msg_type_number_t matchingCnt,
2562 io_object_t *existing)
2563 {
2564 return internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing);
2565 }
2566
2567
2568 static kern_return_t
2569 internal_io_service_get_matching_service(
2570 mach_port_t master_port,
2571 const char * matching,
2572 mach_msg_type_number_t matching_size,
2573 io_service_t *service )
2574 {
2575 kern_return_t kr;
2576 OSObject * obj;
2577 OSDictionary * dict;
2578
2579 if (master_port != master_device_port) {
2580 return kIOReturnNotPrivileged;
2581 }
2582
2583 assert(matching_size);
2584 obj = OSUnserializeXML(matching, matching_size);
2585
2586 if ((dict = OSDynamicCast( OSDictionary, obj))) {
2587 *service = IOService::copyMatchingService( dict );
2588 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2589 } else {
2590 kr = kIOReturnBadArgument;
2591 }
2592
2593 if (obj) {
2594 obj->release();
2595 }
2596
2597 return kr;
2598 }
2599
2600 /* Routine io_service_get_matching_service */
2601 kern_return_t
2602 is_io_service_get_matching_service(
2603 mach_port_t master_port,
2604 io_string_t matching,
2605 io_service_t *service )
2606 {
2607 return kIOReturnUnsupported;
2608 }
2609
2610 /* Routine io_service_get_matching_services_ool */
2611 kern_return_t
2612 is_io_service_get_matching_service_ool(
2613 mach_port_t master_port,
2614 io_buf_ptr_t matching,
2615 mach_msg_type_number_t matchingCnt,
2616 kern_return_t *result,
2617 io_object_t *service )
2618 {
2619 kern_return_t kr;
2620 vm_offset_t data;
2621 vm_map_offset_t map_data;
2622
2623 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2624 data = CAST_DOWN(vm_offset_t, map_data);
2625
2626 if (KERN_SUCCESS == kr) {
2627 // must return success after vm_map_copyout() succeeds
2628 // and mig will copy out objects on success
2629 *service = NULL;
2630 *result = internal_io_service_get_matching_service(master_port,
2631 (const char *) data, matchingCnt, service );
2632 vm_deallocate( kernel_map, data, matchingCnt );
2633 }
2634
2635 return kr;
2636 }
2637
2638 /* Routine io_service_get_matching_service_bin */
2639 kern_return_t
2640 is_io_service_get_matching_service_bin(
2641 mach_port_t master_port,
2642 io_struct_inband_t matching,
2643 mach_msg_type_number_t matchingCnt,
2644 io_object_t *service)
2645 {
2646 return internal_io_service_get_matching_service(master_port, matching, matchingCnt, service);
2647 }
2648
2649 static kern_return_t
2650 internal_io_service_add_notification(
2651 mach_port_t master_port,
2652 io_name_t notification_type,
2653 const char * matching,
2654 size_t matching_size,
2655 mach_port_t port,
2656 void * reference,
2657 vm_size_t referenceSize,
2658 bool client64,
2659 io_object_t * notification )
2660 {
2661 IOServiceUserNotification * userNotify = NULL;
2662 IONotifier * notify = NULL;
2663 const OSSymbol * sym;
2664 OSDictionary * dict;
2665 IOReturn err;
2666 unsigned long int userMsgType;
2667
2668 if (master_port != master_device_port) {
2669 return kIOReturnNotPrivileged;
2670 }
2671
2672 do {
2673 err = kIOReturnNoResources;
2674
2675 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
2676 return kIOReturnMessageTooLarge;
2677 }
2678
2679 if (!(sym = OSSymbol::withCString( notification_type ))) {
2680 err = kIOReturnNoResources;
2681 }
2682
2683 assert(matching_size);
2684 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2685 if (!dict) {
2686 err = kIOReturnBadArgument;
2687 continue;
2688 }
2689
2690 if ((sym == gIOPublishNotification)
2691 || (sym == gIOFirstPublishNotification)) {
2692 userMsgType = kIOServicePublishNotificationType;
2693 } else if ((sym == gIOMatchedNotification)
2694 || (sym == gIOFirstMatchNotification)) {
2695 userMsgType = kIOServiceMatchedNotificationType;
2696 } else if ((sym == gIOTerminatedNotification)
2697 || (sym == gIOWillTerminateNotification)) {
2698 userMsgType = kIOServiceTerminatedNotificationType;
2699 } else {
2700 userMsgType = kLastIOKitNotificationType;
2701 }
2702
2703 userNotify = new IOServiceUserNotification;
2704
2705 if (userNotify && !userNotify->init( port, userMsgType,
2706 reference, referenceSize, client64)) {
2707 userNotify->release();
2708 userNotify = NULL;
2709 }
2710 if (!userNotify) {
2711 continue;
2712 }
2713
2714 notify = IOService::addMatchingNotification( sym, dict,
2715 &userNotify->_handler, userNotify );
2716 if (notify) {
2717 *notification = userNotify;
2718 userNotify->setNotification( notify );
2719 err = kIOReturnSuccess;
2720 } else {
2721 err = kIOReturnUnsupported;
2722 }
2723 } while (false);
2724
2725 if ((kIOReturnSuccess != err) && userNotify) {
2726 userNotify->invalidatePort();
2727 userNotify->release();
2728 userNotify = NULL;
2729 }
2730
2731 if (sym) {
2732 sym->release();
2733 }
2734 if (dict) {
2735 dict->release();
2736 }
2737
2738 return err;
2739 }
2740
2741
2742 /* Routine io_service_add_notification */
2743 kern_return_t
2744 is_io_service_add_notification(
2745 mach_port_t master_port,
2746 io_name_t notification_type,
2747 io_string_t matching,
2748 mach_port_t port,
2749 io_async_ref_t reference,
2750 mach_msg_type_number_t referenceCnt,
2751 io_object_t * notification )
2752 {
2753 return kIOReturnUnsupported;
2754 }
2755
2756 /* Routine io_service_add_notification_64 */
2757 kern_return_t
2758 is_io_service_add_notification_64(
2759 mach_port_t master_port,
2760 io_name_t notification_type,
2761 io_string_t matching,
2762 mach_port_t wake_port,
2763 io_async_ref64_t reference,
2764 mach_msg_type_number_t referenceCnt,
2765 io_object_t *notification )
2766 {
2767 return kIOReturnUnsupported;
2768 }
2769
2770 /* Routine io_service_add_notification_bin */
2771 kern_return_t
2772 is_io_service_add_notification_bin
2773 (
2774 mach_port_t master_port,
2775 io_name_t notification_type,
2776 io_struct_inband_t matching,
2777 mach_msg_type_number_t matchingCnt,
2778 mach_port_t wake_port,
2779 io_async_ref_t reference,
2780 mach_msg_type_number_t referenceCnt,
2781 io_object_t *notification)
2782 {
2783 io_async_ref_t zreference;
2784
2785 if (referenceCnt > ASYNC_REF_COUNT) {
2786 return kIOReturnBadArgument;
2787 }
2788 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2789 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2790
2791 return internal_io_service_add_notification(master_port, notification_type,
2792 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2793 false, notification);
2794 }
2795
2796 /* Routine io_service_add_notification_bin_64 */
2797 kern_return_t
2798 is_io_service_add_notification_bin_64
2799 (
2800 mach_port_t master_port,
2801 io_name_t notification_type,
2802 io_struct_inband_t matching,
2803 mach_msg_type_number_t matchingCnt,
2804 mach_port_t wake_port,
2805 io_async_ref64_t reference,
2806 mach_msg_type_number_t referenceCnt,
2807 io_object_t *notification)
2808 {
2809 io_async_ref64_t zreference;
2810
2811 if (referenceCnt > ASYNC_REF64_COUNT) {
2812 return kIOReturnBadArgument;
2813 }
2814 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2815 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2816
2817 return internal_io_service_add_notification(master_port, notification_type,
2818 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2819 true, notification);
2820 }
2821
2822 static kern_return_t
2823 internal_io_service_add_notification_ool(
2824 mach_port_t master_port,
2825 io_name_t notification_type,
2826 io_buf_ptr_t matching,
2827 mach_msg_type_number_t matchingCnt,
2828 mach_port_t wake_port,
2829 void * reference,
2830 vm_size_t referenceSize,
2831 bool client64,
2832 kern_return_t *result,
2833 io_object_t *notification )
2834 {
2835 kern_return_t kr;
2836 vm_offset_t data;
2837 vm_map_offset_t map_data;
2838
2839 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2840 data = CAST_DOWN(vm_offset_t, map_data);
2841
2842 if (KERN_SUCCESS == kr) {
2843 // must return success after vm_map_copyout() succeeds
2844 // and mig will copy out objects on success
2845 *notification = NULL;
2846 *result = internal_io_service_add_notification( master_port, notification_type,
2847 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2848 vm_deallocate( kernel_map, data, matchingCnt );
2849 }
2850
2851 return kr;
2852 }
2853
2854 /* Routine io_service_add_notification_ool */
2855 kern_return_t
2856 is_io_service_add_notification_ool(
2857 mach_port_t master_port,
2858 io_name_t notification_type,
2859 io_buf_ptr_t matching,
2860 mach_msg_type_number_t matchingCnt,
2861 mach_port_t wake_port,
2862 io_async_ref_t reference,
2863 mach_msg_type_number_t referenceCnt,
2864 kern_return_t *result,
2865 io_object_t *notification )
2866 {
2867 io_async_ref_t zreference;
2868
2869 if (referenceCnt > ASYNC_REF_COUNT) {
2870 return kIOReturnBadArgument;
2871 }
2872 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2873 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2874
2875 return internal_io_service_add_notification_ool(master_port, notification_type,
2876 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
2877 false, result, notification);
2878 }
2879
2880 /* Routine io_service_add_notification_ool_64 */
2881 kern_return_t
2882 is_io_service_add_notification_ool_64(
2883 mach_port_t master_port,
2884 io_name_t notification_type,
2885 io_buf_ptr_t matching,
2886 mach_msg_type_number_t matchingCnt,
2887 mach_port_t wake_port,
2888 io_async_ref64_t reference,
2889 mach_msg_type_number_t referenceCnt,
2890 kern_return_t *result,
2891 io_object_t *notification )
2892 {
2893 io_async_ref64_t zreference;
2894
2895 if (referenceCnt > ASYNC_REF64_COUNT) {
2896 return kIOReturnBadArgument;
2897 }
2898 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2899 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
2900
2901 return internal_io_service_add_notification_ool(master_port, notification_type,
2902 matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
2903 true, result, notification);
2904 }
2905
2906 /* Routine io_service_add_notification_old */
2907 kern_return_t
2908 is_io_service_add_notification_old(
2909 mach_port_t master_port,
2910 io_name_t notification_type,
2911 io_string_t matching,
2912 mach_port_t port,
2913 // for binary compatibility reasons, this must be natural_t for ILP32
2914 natural_t ref,
2915 io_object_t * notification )
2916 {
2917 return is_io_service_add_notification( master_port, notification_type,
2918 matching, port, &ref, 1, notification );
2919 }
2920
2921
2922 static kern_return_t
2923 internal_io_service_add_interest_notification(
2924 io_object_t _service,
2925 io_name_t type_of_interest,
2926 mach_port_t port,
2927 void * reference,
2928 vm_size_t referenceSize,
2929 bool client64,
2930 io_object_t * notification )
2931 {
2932 IOServiceMessageUserNotification * userNotify = NULL;
2933 IONotifier * notify = NULL;
2934 const OSSymbol * sym;
2935 IOReturn err;
2936
2937 CHECK( IOService, _service, service );
2938
2939 err = kIOReturnNoResources;
2940 if ((sym = OSSymbol::withCString( type_of_interest ))) {
2941 do {
2942 userNotify = new IOServiceMessageUserNotification;
2943
2944 if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2945 reference, referenceSize,
2946 kIOUserNotifyMaxMessageSize,
2947 client64 )) {
2948 userNotify->release();
2949 userNotify = NULL;
2950 }
2951 if (!userNotify) {
2952 continue;
2953 }
2954
2955 notify = service->registerInterest( sym,
2956 &userNotify->_handler, userNotify );
2957 if (notify) {
2958 *notification = userNotify;
2959 userNotify->setNotification( notify );
2960 err = kIOReturnSuccess;
2961 } else {
2962 err = kIOReturnUnsupported;
2963 }
2964
2965 sym->release();
2966 } while (false);
2967 }
2968
2969 if ((kIOReturnSuccess != err) && userNotify) {
2970 userNotify->invalidatePort();
2971 userNotify->release();
2972 userNotify = NULL;
2973 }
2974
2975 return err;
2976 }
2977
2978 /* Routine io_service_add_message_notification */
2979 kern_return_t
2980 is_io_service_add_interest_notification(
2981 io_object_t service,
2982 io_name_t type_of_interest,
2983 mach_port_t port,
2984 io_async_ref_t reference,
2985 mach_msg_type_number_t referenceCnt,
2986 io_object_t * notification )
2987 {
2988 io_async_ref_t zreference;
2989
2990 if (referenceCnt > ASYNC_REF_COUNT) {
2991 return kIOReturnBadArgument;
2992 }
2993 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
2994 bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
2995
2996 return internal_io_service_add_interest_notification(service, type_of_interest,
2997 port, &zreference[0], sizeof(io_async_ref_t), false, notification);
2998 }
2999
3000 /* Routine io_service_add_interest_notification_64 */
3001 kern_return_t
3002 is_io_service_add_interest_notification_64(
3003 io_object_t service,
3004 io_name_t type_of_interest,
3005 mach_port_t wake_port,
3006 io_async_ref64_t reference,
3007 mach_msg_type_number_t referenceCnt,
3008 io_object_t *notification )
3009 {
3010 io_async_ref64_t zreference;
3011
3012 if (referenceCnt > ASYNC_REF64_COUNT) {
3013 return kIOReturnBadArgument;
3014 }
3015 bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
3016 bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
3017
3018 return internal_io_service_add_interest_notification(service, type_of_interest,
3019 wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
3020 }
3021
3022
3023 /* Routine io_service_acknowledge_notification */
3024 kern_return_t
3025 is_io_service_acknowledge_notification(
3026 io_object_t _service,
3027 natural_t notify_ref,
3028 natural_t response )
3029 {
3030 CHECK( IOService, _service, service );
3031
3032 return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
3033 (IOOptionBits) response );
3034 }
3035
3036 /* Routine io_connect_get_semaphore */
3037 kern_return_t
3038 is_io_connect_get_notification_semaphore(
3039 io_connect_t connection,
3040 natural_t notification_type,
3041 semaphore_t *semaphore )
3042 {
3043 CHECK( IOUserClient, connection, client );
3044
3045 IOStatisticsClientCall();
3046 return client->getNotificationSemaphore((UInt32) notification_type,
3047 semaphore );
3048 }
3049
3050 /* Routine io_registry_get_root_entry */
3051 kern_return_t
3052 is_io_registry_get_root_entry(
3053 mach_port_t master_port,
3054 io_object_t *root )
3055 {
3056 IORegistryEntry * entry;
3057
3058 if (master_port != master_device_port) {
3059 return kIOReturnNotPrivileged;
3060 }
3061
3062 entry = IORegistryEntry::getRegistryRoot();
3063 if (entry) {
3064 entry->retain();
3065 }
3066 *root = entry;
3067
3068 return kIOReturnSuccess;
3069 }
3070
3071 /* Routine io_registry_create_iterator */
3072 kern_return_t
3073 is_io_registry_create_iterator(
3074 mach_port_t master_port,
3075 io_name_t plane,
3076 uint32_t options,
3077 io_object_t *iterator )
3078 {
3079 if (master_port != master_device_port) {
3080 return kIOReturnNotPrivileged;
3081 }
3082
3083 *iterator = IOUserIterator::withIterator(
3084 IORegistryIterator::iterateOver(
3085 IORegistryEntry::getPlane( plane ), options ));
3086
3087 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3088 }
3089
3090 /* Routine io_registry_entry_create_iterator */
3091 kern_return_t
3092 is_io_registry_entry_create_iterator(
3093 io_object_t registry_entry,
3094 io_name_t plane,
3095 uint32_t options,
3096 io_object_t *iterator )
3097 {
3098 CHECK( IORegistryEntry, registry_entry, entry );
3099
3100 *iterator = IOUserIterator::withIterator(
3101 IORegistryIterator::iterateOver( entry,
3102 IORegistryEntry::getPlane( plane ), options ));
3103
3104 return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
3105 }
3106
3107 /* Routine io_registry_iterator_enter */
3108 kern_return_t
3109 is_io_registry_iterator_enter_entry(
3110 io_object_t iterator )
3111 {
3112 CHECKLOCKED( IORegistryIterator, iterator, iter );
3113
3114 IOLockLock(oIter->lock);
3115 iter->enterEntry();
3116 IOLockUnlock(oIter->lock);
3117
3118 return kIOReturnSuccess;
3119 }
3120
3121 /* Routine io_registry_iterator_exit */
3122 kern_return_t
3123 is_io_registry_iterator_exit_entry(
3124 io_object_t iterator )
3125 {
3126 bool didIt;
3127
3128 CHECKLOCKED( IORegistryIterator, iterator, iter );
3129
3130 IOLockLock(oIter->lock);
3131 didIt = iter->exitEntry();
3132 IOLockUnlock(oIter->lock);
3133
3134 return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
3135 }
3136
3137 /* Routine io_registry_entry_from_path */
3138 kern_return_t
3139 is_io_registry_entry_from_path(
3140 mach_port_t master_port,
3141 io_string_t path,
3142 io_object_t *registry_entry )
3143 {
3144 IORegistryEntry * entry;
3145
3146 if (master_port != master_device_port) {
3147 return kIOReturnNotPrivileged;
3148 }
3149
3150 entry = IORegistryEntry::fromPath( path );
3151
3152 *registry_entry = entry;
3153
3154 return kIOReturnSuccess;
3155 }
3156
3157
3158 /* Routine io_registry_entry_from_path */
3159 kern_return_t
3160 is_io_registry_entry_from_path_ool(
3161 mach_port_t master_port,
3162 io_string_inband_t path,
3163 io_buf_ptr_t path_ool,
3164 mach_msg_type_number_t path_oolCnt,
3165 kern_return_t *result,
3166 io_object_t *registry_entry)
3167 {
3168 IORegistryEntry * entry;
3169 vm_map_offset_t map_data;
3170 const char * cpath;
3171 IOReturn res;
3172 kern_return_t err;
3173
3174 if (master_port != master_device_port) {
3175 return kIOReturnNotPrivileged;
3176 }
3177
3178 map_data = 0;
3179 entry = NULL;
3180 res = err = KERN_SUCCESS;
3181 if (path[0]) {
3182 cpath = path;
3183 } else {
3184 if (!path_oolCnt) {
3185 return kIOReturnBadArgument;
3186 }
3187 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
3188 return kIOReturnMessageTooLarge;
3189 }
3190
3191 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
3192 if (KERN_SUCCESS == err) {
3193 // must return success to mig after vm_map_copyout() succeeds, so result is actual
3194 cpath = CAST_DOWN(const char *, map_data);
3195 if (cpath[path_oolCnt - 1]) {
3196 res = kIOReturnBadArgument;
3197 }
3198 }
3199 }
3200
3201 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
3202 entry = IORegistryEntry::fromPath(cpath);
3203 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
3204 }
3205
3206 if (map_data) {
3207 vm_deallocate(kernel_map, map_data, path_oolCnt);
3208 }
3209
3210 if (KERN_SUCCESS != err) {
3211 res = err;
3212 }
3213 *registry_entry = entry;
3214 *result = res;
3215
3216 return err;
3217 }
3218
3219
3220 /* Routine io_registry_entry_in_plane */
3221 kern_return_t
3222 is_io_registry_entry_in_plane(
3223 io_object_t registry_entry,
3224 io_name_t plane,
3225 boolean_t *inPlane )
3226 {
3227 CHECK( IORegistryEntry, registry_entry, entry );
3228
3229 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
3230
3231 return kIOReturnSuccess;
3232 }
3233
3234
3235 /* Routine io_registry_entry_get_path */
3236 kern_return_t
3237 is_io_registry_entry_get_path(
3238 io_object_t registry_entry,
3239 io_name_t plane,
3240 io_string_t path )
3241 {
3242 int length;
3243 CHECK( IORegistryEntry, registry_entry, entry );
3244
3245 length = sizeof(io_string_t);
3246 if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
3247 return kIOReturnSuccess;
3248 } else {
3249 return kIOReturnBadArgument;
3250 }
3251 }
3252
3253 /* Routine io_registry_entry_get_path */
3254 kern_return_t
3255 is_io_registry_entry_get_path_ool(
3256 io_object_t registry_entry,
3257 io_name_t plane,
3258 io_string_inband_t path,
3259 io_buf_ptr_t *path_ool,
3260 mach_msg_type_number_t *path_oolCnt)
3261 {
3262 enum { kMaxPath = 16384 };
3263 IOReturn err;
3264 int length;
3265 char * buf;
3266
3267 CHECK( IORegistryEntry, registry_entry, entry );
3268
3269 *path_ool = NULL;
3270 *path_oolCnt = 0;
3271 length = sizeof(io_string_inband_t);
3272 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
3273 err = kIOReturnSuccess;
3274 } else {
3275 length = kMaxPath;
3276 buf = IONew(char, length);
3277 if (!buf) {
3278 err = kIOReturnNoMemory;
3279 } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
3280 err = kIOReturnError;
3281 } else {
3282 *path_oolCnt = length;
3283 err = copyoutkdata(buf, length, path_ool);
3284 }
3285 if (buf) {
3286 IODelete(buf, char, kMaxPath);
3287 }
3288 }
3289
3290 return err;
3291 }
3292
3293
3294 /* Routine io_registry_entry_get_name */
3295 kern_return_t
3296 is_io_registry_entry_get_name(
3297 io_object_t registry_entry,
3298 io_name_t name )
3299 {
3300 CHECK( IORegistryEntry, registry_entry, entry );
3301
3302 strncpy( name, entry->getName(), sizeof(io_name_t));
3303
3304 return kIOReturnSuccess;
3305 }
3306
3307 /* Routine io_registry_entry_get_name_in_plane */
3308 kern_return_t
3309 is_io_registry_entry_get_name_in_plane(
3310 io_object_t registry_entry,
3311 io_name_t planeName,
3312 io_name_t name )
3313 {
3314 const IORegistryPlane * plane;
3315 CHECK( IORegistryEntry, registry_entry, entry );
3316
3317 if (planeName[0]) {
3318 plane = IORegistryEntry::getPlane( planeName );
3319 } else {
3320 plane = NULL;
3321 }
3322
3323 strncpy( name, entry->getName( plane), sizeof(io_name_t));
3324
3325 return kIOReturnSuccess;
3326 }
3327
3328 /* Routine io_registry_entry_get_location_in_plane */
3329 kern_return_t
3330 is_io_registry_entry_get_location_in_plane(
3331 io_object_t registry_entry,
3332 io_name_t planeName,
3333 io_name_t location )
3334 {
3335 const IORegistryPlane * plane;
3336 CHECK( IORegistryEntry, registry_entry, entry );
3337
3338 if (planeName[0]) {
3339 plane = IORegistryEntry::getPlane( planeName );
3340 } else {
3341 plane = NULL;
3342 }
3343
3344 const char * cstr = entry->getLocation( plane );
3345
3346 if (cstr) {
3347 strncpy( location, cstr, sizeof(io_name_t));
3348 return kIOReturnSuccess;
3349 } else {
3350 return kIOReturnNotFound;
3351 }
3352 }
3353
3354 /* Routine io_registry_entry_get_registry_entry_id */
3355 kern_return_t
3356 is_io_registry_entry_get_registry_entry_id(
3357 io_object_t registry_entry,
3358 uint64_t *entry_id )
3359 {
3360 CHECK( IORegistryEntry, registry_entry, entry );
3361
3362 *entry_id = entry->getRegistryEntryID();
3363
3364 return kIOReturnSuccess;
3365 }
3366
3367 /* Routine io_registry_entry_get_property */
3368 kern_return_t
3369 is_io_registry_entry_get_property_bytes(
3370 io_object_t registry_entry,
3371 io_name_t property_name,
3372 io_struct_inband_t buf,
3373 mach_msg_type_number_t *dataCnt )
3374 {
3375 OSObject * obj;
3376 OSData * data;
3377 OSString * str;
3378 OSBoolean * boo;
3379 OSNumber * off;
3380 UInt64 offsetBytes;
3381 unsigned int len = 0;
3382 const void * bytes = NULL;
3383 IOReturn ret = kIOReturnSuccess;
3384
3385 CHECK( IORegistryEntry, registry_entry, entry );
3386
3387 #if CONFIG_MACF
3388 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3389 return kIOReturnNotPermitted;
3390 }
3391 #endif
3392
3393 obj = entry->copyProperty(property_name);
3394 if (!obj) {
3395 return kIOReturnNoResources;
3396 }
3397
3398 // One day OSData will be a common container base class
3399 // until then...
3400 if ((data = OSDynamicCast( OSData, obj ))) {
3401 len = data->getLength();
3402 bytes = data->getBytesNoCopy();
3403 if (!data->isSerializable()) {
3404 len = 0;
3405 }
3406 } else if ((str = OSDynamicCast( OSString, obj ))) {
3407 len = str->getLength() + 1;
3408 bytes = str->getCStringNoCopy();
3409 } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
3410 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
3411 bytes = boo->isTrue() ? "Yes" : "No";
3412 } else if ((off = OSDynamicCast( OSNumber, obj ))) {
3413 offsetBytes = off->unsigned64BitValue();
3414 len = off->numberOfBytes();
3415 if (len > sizeof(offsetBytes)) {
3416 len = sizeof(offsetBytes);
3417 }
3418 bytes = &offsetBytes;
3419 #ifdef __BIG_ENDIAN__
3420 bytes = (const void *)
3421 (((UInt32) bytes) + (sizeof(UInt64) - len));
3422 #endif
3423 } else {
3424 ret = kIOReturnBadArgument;
3425 }
3426
3427 if (bytes) {
3428 if (*dataCnt < len) {
3429 ret = kIOReturnIPCError;
3430 } else {
3431 *dataCnt = len;
3432 bcopy( bytes, buf, len );
3433 }
3434 }
3435 obj->release();
3436
3437 return ret;
3438 }
3439
3440
3441 /* Routine io_registry_entry_get_property */
3442 kern_return_t
3443 is_io_registry_entry_get_property(
3444 io_object_t registry_entry,
3445 io_name_t property_name,
3446 io_buf_ptr_t *properties,
3447 mach_msg_type_number_t *propertiesCnt )
3448 {
3449 kern_return_t err;
3450 vm_size_t len;
3451 OSObject * obj;
3452
3453 CHECK( IORegistryEntry, registry_entry, entry );
3454
3455 #if CONFIG_MACF
3456 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3457 return kIOReturnNotPermitted;
3458 }
3459 #endif
3460
3461 obj = entry->copyProperty(property_name);
3462 if (!obj) {
3463 return kIOReturnNotFound;
3464 }
3465
3466 OSSerialize * s = OSSerialize::withCapacity(4096);
3467 if (!s) {
3468 obj->release();
3469 return kIOReturnNoMemory;
3470 }
3471
3472 if (obj->serialize( s )) {
3473 len = s->getLength();
3474 *propertiesCnt = len;
3475 err = copyoutkdata( s->text(), len, properties );
3476 } else {
3477 err = kIOReturnUnsupported;
3478 }
3479
3480 s->release();
3481 obj->release();
3482
3483 return err;
3484 }
3485
3486 /* Routine io_registry_entry_get_property_recursively */
3487 kern_return_t
3488 is_io_registry_entry_get_property_recursively(
3489 io_object_t registry_entry,
3490 io_name_t plane,
3491 io_name_t property_name,
3492 uint32_t options,
3493 io_buf_ptr_t *properties,
3494 mach_msg_type_number_t *propertiesCnt )
3495 {
3496 kern_return_t err;
3497 vm_size_t len;
3498 OSObject * obj;
3499
3500 CHECK( IORegistryEntry, registry_entry, entry );
3501
3502 #if CONFIG_MACF
3503 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3504 return kIOReturnNotPermitted;
3505 }
3506 #endif
3507
3508 obj = entry->copyProperty( property_name,
3509 IORegistryEntry::getPlane( plane ), options );
3510 if (!obj) {
3511 return kIOReturnNotFound;
3512 }
3513
3514 OSSerialize * s = OSSerialize::withCapacity(4096);
3515 if (!s) {
3516 obj->release();
3517 return kIOReturnNoMemory;
3518 }
3519
3520 if (obj->serialize( s )) {
3521 len = s->getLength();
3522 *propertiesCnt = len;
3523 err = copyoutkdata( s->text(), len, properties );
3524 } else {
3525 err = kIOReturnUnsupported;
3526 }
3527
3528 s->release();
3529 obj->release();
3530
3531 return err;
3532 }
3533
3534 /* Routine io_registry_entry_get_properties */
3535 kern_return_t
3536 is_io_registry_entry_get_properties(
3537 io_object_t registry_entry,
3538 io_buf_ptr_t *properties,
3539 mach_msg_type_number_t *propertiesCnt )
3540 {
3541 return kIOReturnUnsupported;
3542 }
3543
3544 #if CONFIG_MACF
3545
3546 struct GetPropertiesEditorRef {
3547 kauth_cred_t cred;
3548 IORegistryEntry * entry;
3549 OSCollection * root;
3550 };
3551
3552 static const OSMetaClassBase *
3553 GetPropertiesEditor(void * reference,
3554 OSSerialize * s,
3555 OSCollection * container,
3556 const OSSymbol * name,
3557 const OSMetaClassBase * value)
3558 {
3559 GetPropertiesEditorRef * ref = (typeof(ref))reference;
3560
3561 if (!ref->root) {
3562 ref->root = container;
3563 }
3564 if (ref->root == container) {
3565 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
3566 value = NULL;
3567 }
3568 }
3569 if (value) {
3570 value->retain();
3571 }
3572 return value;
3573 }
3574
3575 #endif /* CONFIG_MACF */
3576
3577 /* Routine io_registry_entry_get_properties_bin_buf */
3578 kern_return_t
3579 is_io_registry_entry_get_properties_bin_buf(
3580 io_object_t registry_entry,
3581 mach_vm_address_t buf,
3582 mach_vm_size_t *bufsize,
3583 io_buf_ptr_t *properties,
3584 mach_msg_type_number_t *propertiesCnt)
3585 {
3586 kern_return_t err = kIOReturnSuccess;
3587 vm_size_t len;
3588 OSSerialize * s;
3589 OSSerialize::Editor editor = NULL;
3590 void * editRef = NULL;
3591
3592 CHECK(IORegistryEntry, registry_entry, entry);
3593
3594 #if CONFIG_MACF
3595 GetPropertiesEditorRef ref;
3596 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3597 editor = &GetPropertiesEditor;
3598 editRef = &ref;
3599 ref.cred = kauth_cred_get();
3600 ref.entry = entry;
3601 ref.root = NULL;
3602 }
3603 #endif
3604
3605 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3606 if (!s) {
3607 return kIOReturnNoMemory;
3608 }
3609
3610 if (!entry->serializeProperties(s)) {
3611 err = kIOReturnUnsupported;
3612 }
3613
3614 if (kIOReturnSuccess == err) {
3615 len = s->getLength();
3616 if (buf && bufsize && len <= *bufsize) {
3617 *bufsize = len;
3618 *propertiesCnt = 0;
3619 *properties = nullptr;
3620 if (copyout(s->text(), buf, len)) {
3621 err = kIOReturnVMError;
3622 } else {
3623 err = kIOReturnSuccess;
3624 }
3625 } else {
3626 if (bufsize) {
3627 *bufsize = 0;
3628 }
3629 *propertiesCnt = len;
3630 err = copyoutkdata( s->text(), len, properties );
3631 }
3632 }
3633 s->release();
3634
3635 return err;
3636 }
3637
3638 /* Routine io_registry_entry_get_properties_bin */
3639 kern_return_t
3640 is_io_registry_entry_get_properties_bin(
3641 io_object_t registry_entry,
3642 io_buf_ptr_t *properties,
3643 mach_msg_type_number_t *propertiesCnt)
3644 {
3645 return is_io_registry_entry_get_properties_bin_buf(registry_entry,
3646 0, NULL, properties, propertiesCnt);
3647 }
3648
3649 /* Routine io_registry_entry_get_property_bin_buf */
3650 kern_return_t
3651 is_io_registry_entry_get_property_bin_buf(
3652 io_object_t registry_entry,
3653 io_name_t plane,
3654 io_name_t property_name,
3655 uint32_t options,
3656 mach_vm_address_t buf,
3657 mach_vm_size_t *bufsize,
3658 io_buf_ptr_t *properties,
3659 mach_msg_type_number_t *propertiesCnt )
3660 {
3661 kern_return_t err;
3662 vm_size_t len;
3663 OSObject * obj;
3664 const OSSymbol * sym;
3665
3666 CHECK( IORegistryEntry, registry_entry, entry );
3667
3668 #if CONFIG_MACF
3669 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
3670 return kIOReturnNotPermitted;
3671 }
3672 #endif
3673
3674 sym = OSSymbol::withCString(property_name);
3675 if (!sym) {
3676 return kIOReturnNoMemory;
3677 }
3678
3679 if (gIORegistryEntryPropertyKeysKey == sym) {
3680 obj = entry->copyPropertyKeys();
3681 } else {
3682 if ((kIORegistryIterateRecursively & options) && plane[0]) {
3683 obj = entry->copyProperty(property_name,
3684 IORegistryEntry::getPlane(plane), options );
3685 } else {
3686 obj = entry->copyProperty(property_name);
3687 }
3688 if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
3689 entry->removeProperty(sym);
3690 }
3691 }
3692
3693 sym->release();
3694 if (!obj) {
3695 return kIOReturnNotFound;
3696 }
3697
3698 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3699 if (!s) {
3700 obj->release();
3701 return kIOReturnNoMemory;
3702 }
3703
3704 if (obj->serialize( s )) {
3705 len = s->getLength();
3706 if (buf && bufsize && len <= *bufsize) {
3707 *bufsize = len;
3708 *propertiesCnt = 0;
3709 *properties = nullptr;
3710 if (copyout(s->text(), buf, len)) {
3711 err = kIOReturnVMError;
3712 } else {
3713 err = kIOReturnSuccess;
3714 }
3715 } else {
3716 if (bufsize) {
3717 *bufsize = 0;
3718 }
3719 *propertiesCnt = len;
3720 err = copyoutkdata( s->text(), len, properties );
3721 }
3722 } else {
3723 err = kIOReturnUnsupported;
3724 }
3725
3726 s->release();
3727 obj->release();
3728
3729 return err;
3730 }
3731
3732 /* Routine io_registry_entry_get_property_bin */
3733 kern_return_t
3734 is_io_registry_entry_get_property_bin(
3735 io_object_t registry_entry,
3736 io_name_t plane,
3737 io_name_t property_name,
3738 uint32_t options,
3739 io_buf_ptr_t *properties,
3740 mach_msg_type_number_t *propertiesCnt )
3741 {
3742 return is_io_registry_entry_get_property_bin_buf(registry_entry, plane,
3743 property_name, options, 0, NULL, properties, propertiesCnt);
3744 }
3745
3746
3747 /* Routine io_registry_entry_set_properties */
3748 kern_return_t
3749 is_io_registry_entry_set_properties
3750 (
3751 io_object_t registry_entry,
3752 io_buf_ptr_t properties,
3753 mach_msg_type_number_t propertiesCnt,
3754 kern_return_t * result)
3755 {
3756 OSObject * obj;
3757 kern_return_t err;
3758 IOReturn res;
3759 vm_offset_t data;
3760 vm_map_offset_t map_data;
3761
3762 CHECK( IORegistryEntry, registry_entry, entry );
3763
3764 if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
3765 return kIOReturnMessageTooLarge;
3766 }
3767
3768 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3769 data = CAST_DOWN(vm_offset_t, map_data);
3770
3771 if (KERN_SUCCESS == err) {
3772 FAKE_STACK_FRAME(entry->getMetaClass());
3773
3774 // must return success after vm_map_copyout() succeeds
3775 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3776 vm_deallocate( kernel_map, data, propertiesCnt );
3777
3778 if (!obj) {
3779 res = kIOReturnBadArgument;
3780 }
3781 #if CONFIG_MACF
3782 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3783 registry_entry, obj)) {
3784 res = kIOReturnNotPermitted;
3785 }
3786 #endif
3787 else {
3788 res = entry->setProperties( obj );
3789 }
3790
3791 if (obj) {
3792 obj->release();
3793 }
3794
3795 FAKE_STACK_FRAME_END();
3796 } else {
3797 res = err;
3798 }
3799
3800 *result = res;
3801 return err;
3802 }
3803
3804 /* Routine io_registry_entry_get_child_iterator */
3805 kern_return_t
3806 is_io_registry_entry_get_child_iterator(
3807 io_object_t registry_entry,
3808 io_name_t plane,
3809 io_object_t *iterator )
3810 {
3811 CHECK( IORegistryEntry, registry_entry, entry );
3812
3813 *iterator = IOUserIterator::withIterator(entry->getChildIterator(
3814 IORegistryEntry::getPlane( plane )));
3815
3816 return kIOReturnSuccess;
3817 }
3818
3819 /* Routine io_registry_entry_get_parent_iterator */
3820 kern_return_t
3821 is_io_registry_entry_get_parent_iterator(
3822 io_object_t registry_entry,
3823 io_name_t plane,
3824 io_object_t *iterator)
3825 {
3826 CHECK( IORegistryEntry, registry_entry, entry );
3827
3828 *iterator = IOUserIterator::withIterator(entry->getParentIterator(
3829 IORegistryEntry::getPlane( plane )));
3830
3831 return kIOReturnSuccess;
3832 }
3833
3834 /* Routine io_service_get_busy_state */
3835 kern_return_t
3836 is_io_service_get_busy_state(
3837 io_object_t _service,
3838 uint32_t *busyState )
3839 {
3840 CHECK( IOService, _service, service );
3841
3842 *busyState = service->getBusyState();
3843
3844 return kIOReturnSuccess;
3845 }
3846
3847 /* Routine io_service_get_state */
3848 kern_return_t
3849 is_io_service_get_state(
3850 io_object_t _service,
3851 uint64_t *state,
3852 uint32_t *busy_state,
3853 uint64_t *accumulated_busy_time )
3854 {
3855 CHECK( IOService, _service, service );
3856
3857 *state = service->getState();
3858 *busy_state = service->getBusyState();
3859 *accumulated_busy_time = service->getAccumulatedBusyTime();
3860
3861 return kIOReturnSuccess;
3862 }
3863
3864 /* Routine io_service_wait_quiet */
3865 kern_return_t
3866 is_io_service_wait_quiet(
3867 io_object_t _service,
3868 mach_timespec_t wait_time )
3869 {
3870 uint64_t timeoutNS;
3871
3872 CHECK( IOService, _service, service );
3873
3874 timeoutNS = wait_time.tv_sec;
3875 timeoutNS *= kSecondScale;
3876 timeoutNS += wait_time.tv_nsec;
3877
3878 return service->waitQuiet(timeoutNS);
3879 }
3880
3881 /* Routine io_service_request_probe */
3882 kern_return_t
3883 is_io_service_request_probe(
3884 io_object_t _service,
3885 uint32_t options )
3886 {
3887 CHECK( IOService, _service, service );
3888
3889 return service->requestProbe( options );
3890 }
3891
3892 /* Routine io_service_get_authorization_id */
3893 kern_return_t
3894 is_io_service_get_authorization_id(
3895 io_object_t _service,
3896 uint64_t *authorization_id )
3897 {
3898 kern_return_t kr;
3899
3900 CHECK( IOService, _service, service );
3901
3902 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
3903 kIOClientPrivilegeAdministrator );
3904 if (kIOReturnSuccess != kr) {
3905 return kr;
3906 }
3907
3908 *authorization_id = service->getAuthorizationID();
3909
3910 return kr;
3911 }
3912
3913 /* Routine io_service_set_authorization_id */
3914 kern_return_t
3915 is_io_service_set_authorization_id(
3916 io_object_t _service,
3917 uint64_t authorization_id )
3918 {
3919 CHECK( IOService, _service, service );
3920
3921 return service->setAuthorizationID( authorization_id );
3922 }
3923
3924 /* Routine io_service_open_ndr */
3925 kern_return_t
3926 is_io_service_open_extended(
3927 io_object_t _service,
3928 task_t owningTask,
3929 uint32_t connect_type,
3930 NDR_record_t ndr,
3931 io_buf_ptr_t properties,
3932 mach_msg_type_number_t propertiesCnt,
3933 kern_return_t * result,
3934 io_object_t *connection )
3935 {
3936 IOUserClient * client = NULL;
3937 kern_return_t err = KERN_SUCCESS;
3938 IOReturn res = kIOReturnSuccess;
3939 OSDictionary * propertiesDict = NULL;
3940 bool crossEndian;
3941 bool disallowAccess;
3942
3943 CHECK( IOService, _service, service );
3944
3945 if (!owningTask) {
3946 return kIOReturnBadArgument;
3947 }
3948 assert(owningTask == current_task());
3949 if (owningTask != current_task()) {
3950 return kIOReturnBadArgument;
3951 }
3952
3953 do{
3954 if (properties) {
3955 return kIOReturnUnsupported;
3956 }
3957 #if 0
3958 {
3959 OSObject * obj;
3960 vm_offset_t data;
3961 vm_map_offset_t map_data;
3962
3963 if (propertiesCnt > sizeof(io_struct_inband_t)) {
3964 return kIOReturnMessageTooLarge;
3965 }
3966
3967 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3968 res = err;
3969 data = CAST_DOWN(vm_offset_t, map_data);
3970 if (KERN_SUCCESS == err) {
3971 // must return success after vm_map_copyout() succeeds
3972 obj = OSUnserializeXML((const char *) data, propertiesCnt );
3973 vm_deallocate( kernel_map, data, propertiesCnt );
3974 propertiesDict = OSDynamicCast(OSDictionary, obj);
3975 if (!propertiesDict) {
3976 res = kIOReturnBadArgument;
3977 if (obj) {
3978 obj->release();
3979 }
3980 }
3981 }
3982 if (kIOReturnSuccess != res) {
3983 break;
3984 }
3985 }
3986 #endif
3987 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3988 if (crossEndian) {
3989 if (!propertiesDict) {
3990 propertiesDict = OSDictionary::withCapacity(4);
3991 }
3992 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3993 if (data) {
3994 if (propertiesDict) {
3995 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3996 }
3997 data->release();
3998 }
3999 }
4000
4001 res = service->newUserClient( owningTask, (void *) owningTask,
4002 connect_type, propertiesDict, &client );
4003
4004 if (propertiesDict) {
4005 propertiesDict->release();
4006 }
4007
4008 if (res == kIOReturnSuccess) {
4009 assert( OSDynamicCast(IOUserClient, client));
4010
4011 client->sharedInstance = (NULL != client->getProperty(kIOUserClientSharedInstanceKey));
4012 client->messageAppSuspended = (NULL != client->getProperty(kIOUserClientMessageAppSuspendedKey));
4013 client->closed = false;
4014 client->lock = IOLockAlloc();
4015
4016 disallowAccess = (crossEndian
4017 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
4018 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
4019 if (disallowAccess) {
4020 res = kIOReturnUnsupported;
4021 }
4022 #if CONFIG_MACF
4023 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
4024 res = kIOReturnNotPermitted;
4025 }
4026 #endif
4027
4028 if (kIOReturnSuccess == res) {
4029 res = client->registerOwner(owningTask);
4030 }
4031
4032 if (kIOReturnSuccess != res) {
4033 IOStatisticsClientCall();
4034 client->clientClose();
4035 client->release();
4036 client = NULL;
4037 break;
4038 }
4039 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
4040 if (creatorName) {
4041 client->setProperty(kIOUserClientCreatorKey, creatorName);
4042 creatorName->release();
4043 }
4044 client->setTerminateDefer(service, false);
4045 }
4046 }while (false);
4047
4048 *connection = client;
4049 *result = res;
4050
4051 return err;
4052 }
4053
4054 /* Routine io_service_close */
4055 kern_return_t
4056 is_io_service_close(
4057 io_object_t connection )
4058 {
4059 OSSet * mappings;
4060 if ((mappings = OSDynamicCast(OSSet, connection))) {
4061 return kIOReturnSuccess;
4062 }
4063
4064 CHECK( IOUserClient, connection, client );
4065
4066 IOStatisticsClientCall();
4067
4068 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
4069 IOLockLock(client->lock);
4070 client->clientClose();
4071 IOLockUnlock(client->lock);
4072 } else {
4073 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
4074 client->getRegistryEntryID(), client->getName());
4075 }
4076
4077 return kIOReturnSuccess;
4078 }
4079
4080 /* Routine io_connect_get_service */
4081 kern_return_t
4082 is_io_connect_get_service(
4083 io_object_t connection,
4084 io_object_t *service )
4085 {
4086 IOService * theService;
4087
4088 CHECK( IOUserClient, connection, client );
4089
4090 theService = client->getService();
4091 if (theService) {
4092 theService->retain();
4093 }
4094
4095 *service = theService;
4096
4097 return theService ? kIOReturnSuccess : kIOReturnUnsupported;
4098 }
4099
4100 /* Routine io_connect_set_notification_port */
4101 kern_return_t
4102 is_io_connect_set_notification_port(
4103 io_object_t connection,
4104 uint32_t notification_type,
4105 mach_port_t port,
4106 uint32_t reference)
4107 {
4108 kern_return_t ret;
4109 CHECK( IOUserClient, connection, client );
4110
4111 IOStatisticsClientCall();
4112 IOLockLock(client->lock);
4113 ret = client->registerNotificationPort( port, notification_type,
4114 (io_user_reference_t) reference );
4115 IOLockUnlock(client->lock);
4116 return ret;
4117 }
4118
4119 /* Routine io_connect_set_notification_port */
4120 kern_return_t
4121 is_io_connect_set_notification_port_64(
4122 io_object_t connection,
4123 uint32_t notification_type,
4124 mach_port_t port,
4125 io_user_reference_t reference)
4126 {
4127 kern_return_t ret;
4128 CHECK( IOUserClient, connection, client );
4129
4130 IOStatisticsClientCall();
4131 IOLockLock(client->lock);
4132 ret = client->registerNotificationPort( port, notification_type,
4133 reference );
4134 IOLockUnlock(client->lock);
4135 return ret;
4136 }
4137
4138 /* Routine io_connect_map_memory_into_task */
4139 kern_return_t
4140 is_io_connect_map_memory_into_task
4141 (
4142 io_connect_t connection,
4143 uint32_t memory_type,
4144 task_t into_task,
4145 mach_vm_address_t *address,
4146 mach_vm_size_t *size,
4147 uint32_t flags
4148 )
4149 {
4150 IOReturn err;
4151 IOMemoryMap * map;
4152
4153 CHECK( IOUserClient, connection, client );
4154
4155 if (!into_task) {
4156 return kIOReturnBadArgument;
4157 }
4158
4159 IOStatisticsClientCall();
4160 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
4161
4162 if (map) {
4163 *address = map->getAddress();
4164 if (size) {
4165 *size = map->getSize();
4166 }
4167
4168 if (client->sharedInstance
4169 || (into_task != current_task())) {
4170 // push a name out to the task owning the map,
4171 // so we can clean up maps
4172 mach_port_name_t name __unused =
4173 IOMachPort::makeSendRightForTask(
4174 into_task, map, IKOT_IOKIT_OBJECT );
4175 map->release();
4176 } else {
4177 // keep it with the user client
4178 IOLockLock( gIOObjectPortLock);
4179 if (NULL == client->mappings) {
4180 client->mappings = OSSet::withCapacity(2);
4181 }
4182 if (client->mappings) {
4183 client->mappings->setObject( map);
4184 }
4185 IOLockUnlock( gIOObjectPortLock);
4186 map->release();
4187 }
4188 err = kIOReturnSuccess;
4189 } else {
4190 err = kIOReturnBadArgument;
4191 }
4192
4193 return err;
4194 }
4195
4196 /* Routine is_io_connect_map_memory */
4197 kern_return_t
4198 is_io_connect_map_memory(
4199 io_object_t connect,
4200 uint32_t type,
4201 task_t task,
4202 uint32_t * mapAddr,
4203 uint32_t * mapSize,
4204 uint32_t flags )
4205 {
4206 IOReturn err;
4207 mach_vm_address_t address;
4208 mach_vm_size_t size;
4209
4210 address = SCALAR64(*mapAddr);
4211 size = SCALAR64(*mapSize);
4212
4213 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
4214
4215 *mapAddr = SCALAR32(address);
4216 *mapSize = SCALAR32(size);
4217
4218 return err;
4219 }
4220 } /* extern "C" */
4221
4222 IOMemoryMap *
4223 IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
4224 {
4225 OSIterator * iter;
4226 IOMemoryMap * map = NULL;
4227
4228 IOLockLock(gIOObjectPortLock);
4229
4230 iter = OSCollectionIterator::withCollection(mappings);
4231 if (iter) {
4232 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
4233 if (mem == map->getMemoryDescriptor()) {
4234 map->retain();
4235 mappings->removeObject(map);
4236 break;
4237 }
4238 }
4239 iter->release();
4240 }
4241
4242 IOLockUnlock(gIOObjectPortLock);
4243
4244 return map;
4245 }
4246
4247 extern "C" {
4248 /* Routine io_connect_unmap_memory_from_task */
4249 kern_return_t
4250 is_io_connect_unmap_memory_from_task
4251 (
4252 io_connect_t connection,
4253 uint32_t memory_type,
4254 task_t from_task,
4255 mach_vm_address_t address)
4256 {
4257 IOReturn err;
4258 IOOptionBits options = 0;
4259 IOMemoryDescriptor * memory = NULL;
4260 IOMemoryMap * map;
4261
4262 CHECK( IOUserClient, connection, client );
4263
4264 if (!from_task) {
4265 return kIOReturnBadArgument;
4266 }
4267
4268 IOStatisticsClientCall();
4269 err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
4270
4271 if (memory && (kIOReturnSuccess == err)) {
4272 options = (options & ~kIOMapUserOptionsMask)
4273 | kIOMapAnywhere | kIOMapReference;
4274
4275 map = memory->createMappingInTask( from_task, address, options );
4276 memory->release();
4277 if (map) {
4278 IOLockLock( gIOObjectPortLock);
4279 if (client->mappings) {
4280 client->mappings->removeObject( map);
4281 }
4282 IOLockUnlock( gIOObjectPortLock);
4283
4284 mach_port_name_t name = 0;
4285 if (from_task != current_task()) {
4286 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
4287 map->release();
4288 }
4289
4290 if (name) {
4291 map->userClientUnmap();
4292 err = iokit_mod_send_right( from_task, name, -2 );
4293 err = kIOReturnSuccess;
4294 } else {
4295 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
4296 }
4297 if (from_task == current_task()) {
4298 map->release();
4299 }
4300 } else {
4301 err = kIOReturnBadArgument;
4302 }
4303 }
4304
4305 return err;
4306 }
4307
4308 kern_return_t
4309 is_io_connect_unmap_memory(
4310 io_object_t connect,
4311 uint32_t type,
4312 task_t task,
4313 uint32_t mapAddr )
4314 {
4315 IOReturn err;
4316 mach_vm_address_t address;
4317
4318 address = SCALAR64(mapAddr);
4319
4320 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
4321
4322 return err;
4323 }
4324
4325
4326 /* Routine io_connect_add_client */
4327 kern_return_t
4328 is_io_connect_add_client(
4329 io_object_t connection,
4330 io_object_t connect_to)
4331 {
4332 CHECK( IOUserClient, connection, client );
4333 CHECK( IOUserClient, connect_to, to );
4334
4335 IOStatisticsClientCall();
4336 return client->connectClient( to );
4337 }
4338
4339
4340 /* Routine io_connect_set_properties */
4341 kern_return_t
4342 is_io_connect_set_properties(
4343 io_object_t connection,
4344 io_buf_ptr_t properties,
4345 mach_msg_type_number_t propertiesCnt,
4346 kern_return_t * result)
4347 {
4348 return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
4349 }
4350
4351 /* Routine io_user_client_method */
4352 kern_return_t
4353 is_io_connect_method_var_output
4354 (
4355 io_connect_t connection,
4356 uint32_t selector,
4357 io_scalar_inband64_t scalar_input,
4358 mach_msg_type_number_t scalar_inputCnt,
4359 io_struct_inband_t inband_input,
4360 mach_msg_type_number_t inband_inputCnt,
4361 mach_vm_address_t ool_input,
4362 mach_vm_size_t ool_input_size,
4363 io_struct_inband_t inband_output,
4364 mach_msg_type_number_t *inband_outputCnt,
4365 io_scalar_inband64_t scalar_output,
4366 mach_msg_type_number_t *scalar_outputCnt,
4367 io_buf_ptr_t *var_output,
4368 mach_msg_type_number_t *var_outputCnt
4369 )
4370 {
4371 CHECK( IOUserClient, connection, client );
4372
4373 IOExternalMethodArguments args;
4374 IOReturn ret;
4375 IOMemoryDescriptor * inputMD = NULL;
4376 OSObject * structureVariableOutputData = NULL;
4377
4378 bzero(&args.__reserved[0], sizeof(args.__reserved));
4379 args.__reservedA = 0;
4380 args.version = kIOExternalMethodArgumentsCurrentVersion;
4381
4382 args.selector = selector;
4383
4384 args.asyncWakePort = MACH_PORT_NULL;
4385 args.asyncReference = NULL;
4386 args.asyncReferenceCount = 0;
4387 args.structureVariableOutputData = &structureVariableOutputData;
4388
4389 args.scalarInput = scalar_input;
4390 args.scalarInputCount = scalar_inputCnt;
4391 args.structureInput = inband_input;
4392 args.structureInputSize = inband_inputCnt;
4393
4394 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4395 return kIOReturnIPCError;
4396 }
4397
4398 if (ool_input) {
4399 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4400 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4401 current_task());
4402 }
4403
4404 args.structureInputDescriptor = inputMD;
4405
4406 args.scalarOutput = scalar_output;
4407 args.scalarOutputCount = *scalar_outputCnt;
4408 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4409 args.structureOutput = inband_output;
4410 args.structureOutputSize = *inband_outputCnt;
4411 args.structureOutputDescriptor = NULL;
4412 args.structureOutputDescriptorSize = 0;
4413
4414 IOStatisticsClientCall();
4415 ret = client->externalMethod( selector, &args );
4416
4417 *scalar_outputCnt = args.scalarOutputCount;
4418 *inband_outputCnt = args.structureOutputSize;
4419
4420 if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
4421 OSSerialize * serialize;
4422 OSData * data;
4423 vm_size_t len;
4424
4425 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
4426 len = serialize->getLength();
4427 *var_outputCnt = len;
4428 ret = copyoutkdata(serialize->text(), len, var_output);
4429 } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
4430 len = data->getLength();
4431 *var_outputCnt = len;
4432 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
4433 } else {
4434 ret = kIOReturnUnderrun;
4435 }
4436 }
4437
4438 if (inputMD) {
4439 inputMD->release();
4440 }
4441 if (structureVariableOutputData) {
4442 structureVariableOutputData->release();
4443 }
4444
4445 return ret;
4446 }
4447
4448 /* Routine io_user_client_method */
4449 kern_return_t
4450 is_io_connect_method
4451 (
4452 io_connect_t connection,
4453 uint32_t selector,
4454 io_scalar_inband64_t scalar_input,
4455 mach_msg_type_number_t scalar_inputCnt,
4456 io_struct_inband_t inband_input,
4457 mach_msg_type_number_t inband_inputCnt,
4458 mach_vm_address_t ool_input,
4459 mach_vm_size_t ool_input_size,
4460 io_struct_inband_t inband_output,
4461 mach_msg_type_number_t *inband_outputCnt,
4462 io_scalar_inband64_t scalar_output,
4463 mach_msg_type_number_t *scalar_outputCnt,
4464 mach_vm_address_t ool_output,
4465 mach_vm_size_t *ool_output_size
4466 )
4467 {
4468 CHECK( IOUserClient, connection, client );
4469
4470 IOExternalMethodArguments args;
4471 IOReturn ret;
4472 IOMemoryDescriptor * inputMD = NULL;
4473 IOMemoryDescriptor * outputMD = NULL;
4474
4475 bzero(&args.__reserved[0], sizeof(args.__reserved));
4476 args.__reservedA = 0;
4477 args.version = kIOExternalMethodArgumentsCurrentVersion;
4478
4479 args.selector = selector;
4480
4481 args.asyncWakePort = MACH_PORT_NULL;
4482 args.asyncReference = NULL;
4483 args.asyncReferenceCount = 0;
4484 args.structureVariableOutputData = NULL;
4485
4486 args.scalarInput = scalar_input;
4487 args.scalarInputCount = scalar_inputCnt;
4488 args.structureInput = inband_input;
4489 args.structureInputSize = inband_inputCnt;
4490
4491 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4492 return kIOReturnIPCError;
4493 }
4494 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4495 return kIOReturnIPCError;
4496 }
4497
4498 if (ool_input) {
4499 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4500 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4501 current_task());
4502 }
4503
4504 args.structureInputDescriptor = inputMD;
4505
4506 args.scalarOutput = scalar_output;
4507 args.scalarOutputCount = *scalar_outputCnt;
4508 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4509 args.structureOutput = inband_output;
4510 args.structureOutputSize = *inband_outputCnt;
4511
4512 if (ool_output && ool_output_size) {
4513 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4514 kIODirectionIn, current_task());
4515 }
4516
4517 args.structureOutputDescriptor = outputMD;
4518 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
4519
4520 IOStatisticsClientCall();
4521 ret = client->externalMethod( selector, &args );
4522
4523 *scalar_outputCnt = args.scalarOutputCount;
4524 *inband_outputCnt = args.structureOutputSize;
4525 *ool_output_size = args.structureOutputDescriptorSize;
4526
4527 if (inputMD) {
4528 inputMD->release();
4529 }
4530 if (outputMD) {
4531 outputMD->release();
4532 }
4533
4534 return ret;
4535 }
4536
4537 /* Routine io_async_user_client_method */
4538 kern_return_t
4539 is_io_connect_async_method
4540 (
4541 io_connect_t connection,
4542 mach_port_t wake_port,
4543 io_async_ref64_t reference,
4544 mach_msg_type_number_t referenceCnt,
4545 uint32_t selector,
4546 io_scalar_inband64_t scalar_input,
4547 mach_msg_type_number_t scalar_inputCnt,
4548 io_struct_inband_t inband_input,
4549 mach_msg_type_number_t inband_inputCnt,
4550 mach_vm_address_t ool_input,
4551 mach_vm_size_t ool_input_size,
4552 io_struct_inband_t inband_output,
4553 mach_msg_type_number_t *inband_outputCnt,
4554 io_scalar_inband64_t scalar_output,
4555 mach_msg_type_number_t *scalar_outputCnt,
4556 mach_vm_address_t ool_output,
4557 mach_vm_size_t * ool_output_size
4558 )
4559 {
4560 CHECK( IOUserClient, connection, client );
4561
4562 IOExternalMethodArguments args;
4563 IOReturn ret;
4564 IOMemoryDescriptor * inputMD = NULL;
4565 IOMemoryDescriptor * outputMD = NULL;
4566
4567 bzero(&args.__reserved[0], sizeof(args.__reserved));
4568 args.__reservedA = 0;
4569 args.version = kIOExternalMethodArgumentsCurrentVersion;
4570
4571 reference[0] = (io_user_reference_t) wake_port;
4572 if (vm_map_is_64bit(get_task_map(current_task()))) {
4573 reference[0] |= kIOUCAsync64Flag;
4574 }
4575
4576 args.selector = selector;
4577
4578 args.asyncWakePort = wake_port;
4579 args.asyncReference = reference;
4580 args.asyncReferenceCount = referenceCnt;
4581
4582 args.structureVariableOutputData = NULL;
4583
4584 args.scalarInput = scalar_input;
4585 args.scalarInputCount = scalar_inputCnt;
4586 args.structureInput = inband_input;
4587 args.structureInputSize = inband_inputCnt;
4588
4589 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
4590 return kIOReturnIPCError;
4591 }
4592 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
4593 return kIOReturnIPCError;
4594 }
4595
4596 if (ool_input) {
4597 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4598 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4599 current_task());
4600 }
4601
4602 args.structureInputDescriptor = inputMD;
4603
4604 args.scalarOutput = scalar_output;
4605 args.scalarOutputCount = *scalar_outputCnt;
4606 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4607 args.structureOutput = inband_output;
4608 args.structureOutputSize = *inband_outputCnt;
4609
4610 if (ool_output) {
4611 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4612 kIODirectionIn, current_task());
4613 }
4614
4615 args.structureOutputDescriptor = outputMD;
4616 args.structureOutputDescriptorSize = *ool_output_size;
4617
4618 IOStatisticsClientCall();
4619 ret = client->externalMethod( selector, &args );
4620
4621 *scalar_outputCnt = args.scalarOutputCount;
4622 *inband_outputCnt = args.structureOutputSize;
4623 *ool_output_size = args.structureOutputDescriptorSize;
4624
4625 if (inputMD) {
4626 inputMD->release();
4627 }
4628 if (outputMD) {
4629 outputMD->release();
4630 }
4631
4632 return ret;
4633 }
4634
4635 /* Routine io_connect_method_scalarI_scalarO */
4636 kern_return_t
4637 is_io_connect_method_scalarI_scalarO(
4638 io_object_t connect,
4639 uint32_t index,
4640 io_scalar_inband_t input,
4641 mach_msg_type_number_t inputCount,
4642 io_scalar_inband_t output,
4643 mach_msg_type_number_t * outputCount )
4644 {
4645 IOReturn err;
4646 uint32_t i;
4647 io_scalar_inband64_t _input;
4648 io_scalar_inband64_t _output;
4649
4650 mach_msg_type_number_t struct_outputCnt = 0;
4651 mach_vm_size_t ool_output_size = 0;
4652
4653 bzero(&_output[0], sizeof(_output));
4654 for (i = 0; i < inputCount; i++) {
4655 _input[i] = SCALAR64(input[i]);
4656 }
4657
4658 err = is_io_connect_method(connect, index,
4659 _input, inputCount,
4660 NULL, 0,
4661 0, 0,
4662 NULL, &struct_outputCnt,
4663 _output, outputCount,
4664 0, &ool_output_size);
4665
4666 for (i = 0; i < *outputCount; i++) {
4667 output[i] = SCALAR32(_output[i]);
4668 }
4669
4670 return err;
4671 }
4672
4673 kern_return_t
4674 shim_io_connect_method_scalarI_scalarO(
4675 IOExternalMethod * method,
4676 IOService * object,
4677 const io_user_scalar_t * input,
4678 mach_msg_type_number_t inputCount,
4679 io_user_scalar_t * output,
4680 mach_msg_type_number_t * outputCount )
4681 {
4682 IOMethod func;
4683 io_scalar_inband_t _output;
4684 IOReturn err;
4685 err = kIOReturnBadArgument;
4686
4687 bzero(&_output[0], sizeof(_output));
4688 do {
4689 if (inputCount != method->count0) {
4690 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4691 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4692 continue;
4693 }
4694 if (*outputCount != method->count1) {
4695 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4696 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4697 continue;
4698 }
4699
4700 func = method->func;
4701
4702 switch (inputCount) {
4703 case 6:
4704 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4705 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4706 break;
4707 case 5:
4708 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4709 ARG32(input[3]), ARG32(input[4]),
4710 &_output[0] );
4711 break;
4712 case 4:
4713 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4714 ARG32(input[3]),
4715 &_output[0], &_output[1] );
4716 break;
4717 case 3:
4718 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4719 &_output[0], &_output[1], &_output[2] );
4720 break;
4721 case 2:
4722 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4723 &_output[0], &_output[1], &_output[2],
4724 &_output[3] );
4725 break;
4726 case 1:
4727 err = (object->*func)( ARG32(input[0]),
4728 &_output[0], &_output[1], &_output[2],
4729 &_output[3], &_output[4] );
4730 break;
4731 case 0:
4732 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4733 &_output[3], &_output[4], &_output[5] );
4734 break;
4735
4736 default:
4737 IOLog("%s: Bad method table\n", object->getName());
4738 }
4739 }while (false);
4740
4741 uint32_t i;
4742 for (i = 0; i < *outputCount; i++) {
4743 output[i] = SCALAR32(_output[i]);
4744 }
4745
4746 return err;
4747 }
4748
4749 /* Routine io_async_method_scalarI_scalarO */
4750 kern_return_t
4751 is_io_async_method_scalarI_scalarO(
4752 io_object_t connect,
4753 mach_port_t wake_port,
4754 io_async_ref_t reference,
4755 mach_msg_type_number_t referenceCnt,
4756 uint32_t index,
4757 io_scalar_inband_t input,
4758 mach_msg_type_number_t inputCount,
4759 io_scalar_inband_t output,
4760 mach_msg_type_number_t * outputCount )
4761 {
4762 IOReturn err;
4763 uint32_t i;
4764 io_scalar_inband64_t _input;
4765 io_scalar_inband64_t _output;
4766 io_async_ref64_t _reference;
4767
4768 if (referenceCnt > ASYNC_REF64_COUNT) {
4769 return kIOReturnBadArgument;
4770 }
4771 bzero(&_output[0], sizeof(_output));
4772 for (i = 0; i < referenceCnt; i++) {
4773 _reference[i] = REF64(reference[i]);
4774 }
4775 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4776
4777 mach_msg_type_number_t struct_outputCnt = 0;
4778 mach_vm_size_t ool_output_size = 0;
4779
4780 for (i = 0; i < inputCount; i++) {
4781 _input[i] = SCALAR64(input[i]);
4782 }
4783
4784 err = is_io_connect_async_method(connect,
4785 wake_port, _reference, referenceCnt,
4786 index,
4787 _input, inputCount,
4788 NULL, 0,
4789 0, 0,
4790 NULL, &struct_outputCnt,
4791 _output, outputCount,
4792 0, &ool_output_size);
4793
4794 for (i = 0; i < *outputCount; i++) {
4795 output[i] = SCALAR32(_output[i]);
4796 }
4797
4798 return err;
4799 }
4800 /* Routine io_async_method_scalarI_structureO */
4801 kern_return_t
4802 is_io_async_method_scalarI_structureO(
4803 io_object_t connect,
4804 mach_port_t wake_port,
4805 io_async_ref_t reference,
4806 mach_msg_type_number_t referenceCnt,
4807 uint32_t index,
4808 io_scalar_inband_t input,
4809 mach_msg_type_number_t inputCount,
4810 io_struct_inband_t output,
4811 mach_msg_type_number_t * outputCount )
4812 {
4813 uint32_t i;
4814 io_scalar_inband64_t _input;
4815 io_async_ref64_t _reference;
4816
4817 if (referenceCnt > ASYNC_REF64_COUNT) {
4818 return kIOReturnBadArgument;
4819 }
4820 for (i = 0; i < referenceCnt; i++) {
4821 _reference[i] = REF64(reference[i]);
4822 }
4823 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4824
4825 mach_msg_type_number_t scalar_outputCnt = 0;
4826 mach_vm_size_t ool_output_size = 0;
4827
4828 for (i = 0; i < inputCount; i++) {
4829 _input[i] = SCALAR64(input[i]);
4830 }
4831
4832 return is_io_connect_async_method(connect,
4833 wake_port, _reference, referenceCnt,
4834 index,
4835 _input, inputCount,
4836 NULL, 0,
4837 0, 0,
4838 output, outputCount,
4839 NULL, &scalar_outputCnt,
4840 0, &ool_output_size);
4841 }
4842
4843 /* Routine io_async_method_scalarI_structureI */
4844 kern_return_t
4845 is_io_async_method_scalarI_structureI(
4846 io_connect_t connect,
4847 mach_port_t wake_port,
4848 io_async_ref_t reference,
4849 mach_msg_type_number_t referenceCnt,
4850 uint32_t index,
4851 io_scalar_inband_t input,
4852 mach_msg_type_number_t inputCount,
4853 io_struct_inband_t inputStruct,
4854 mach_msg_type_number_t inputStructCount )
4855 {
4856 uint32_t i;
4857 io_scalar_inband64_t _input;
4858 io_async_ref64_t _reference;
4859
4860 if (referenceCnt > ASYNC_REF64_COUNT) {
4861 return kIOReturnBadArgument;
4862 }
4863 for (i = 0; i < referenceCnt; i++) {
4864 _reference[i] = REF64(reference[i]);
4865 }
4866 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4867
4868 mach_msg_type_number_t scalar_outputCnt = 0;
4869 mach_msg_type_number_t inband_outputCnt = 0;
4870 mach_vm_size_t ool_output_size = 0;
4871
4872 for (i = 0; i < inputCount; i++) {
4873 _input[i] = SCALAR64(input[i]);
4874 }
4875
4876 return is_io_connect_async_method(connect,
4877 wake_port, _reference, referenceCnt,
4878 index,
4879 _input, inputCount,
4880 inputStruct, inputStructCount,
4881 0, 0,
4882 NULL, &inband_outputCnt,
4883 NULL, &scalar_outputCnt,
4884 0, &ool_output_size);
4885 }
4886
4887 /* Routine io_async_method_structureI_structureO */
4888 kern_return_t
4889 is_io_async_method_structureI_structureO(
4890 io_object_t connect,
4891 mach_port_t wake_port,
4892 io_async_ref_t reference,
4893 mach_msg_type_number_t referenceCnt,
4894 uint32_t index,
4895 io_struct_inband_t input,
4896 mach_msg_type_number_t inputCount,
4897 io_struct_inband_t output,
4898 mach_msg_type_number_t * outputCount )
4899 {
4900 uint32_t i;
4901 mach_msg_type_number_t scalar_outputCnt = 0;
4902 mach_vm_size_t ool_output_size = 0;
4903 io_async_ref64_t _reference;
4904
4905 if (referenceCnt > ASYNC_REF64_COUNT) {
4906 return kIOReturnBadArgument;
4907 }
4908 for (i = 0; i < referenceCnt; i++) {
4909 _reference[i] = REF64(reference[i]);
4910 }
4911 bzero(&_reference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(_reference[0]));
4912
4913 return is_io_connect_async_method(connect,
4914 wake_port, _reference, referenceCnt,
4915 index,
4916 NULL, 0,
4917 input, inputCount,
4918 0, 0,
4919 output, outputCount,
4920 NULL, &scalar_outputCnt,
4921 0, &ool_output_size);
4922 }
4923
4924
4925 kern_return_t
4926 shim_io_async_method_scalarI_scalarO(
4927 IOExternalAsyncMethod * method,
4928 IOService * object,
4929 mach_port_t asyncWakePort,
4930 io_user_reference_t * asyncReference,
4931 uint32_t asyncReferenceCount,
4932 const io_user_scalar_t * input,
4933 mach_msg_type_number_t inputCount,
4934 io_user_scalar_t * output,
4935 mach_msg_type_number_t * outputCount )
4936 {
4937 IOAsyncMethod func;
4938 uint32_t i;
4939 io_scalar_inband_t _output;
4940 IOReturn err;
4941 io_async_ref_t reference;
4942
4943 bzero(&_output[0], sizeof(_output));
4944 for (i = 0; i < asyncReferenceCount; i++) {
4945 reference[i] = REF32(asyncReference[i]);
4946 }
4947
4948 err = kIOReturnBadArgument;
4949
4950 do {
4951 if (inputCount != method->count0) {
4952 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4953 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4954 continue;
4955 }
4956 if (*outputCount != method->count1) {
4957 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4958 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4959 continue;
4960 }
4961
4962 func = method->func;
4963
4964 switch (inputCount) {
4965 case 6:
4966 err = (object->*func)( reference,
4967 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4968 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
4969 break;
4970 case 5:
4971 err = (object->*func)( reference,
4972 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4973 ARG32(input[3]), ARG32(input[4]),
4974 &_output[0] );
4975 break;
4976 case 4:
4977 err = (object->*func)( reference,
4978 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4979 ARG32(input[3]),
4980 &_output[0], &_output[1] );
4981 break;
4982 case 3:
4983 err = (object->*func)( reference,
4984 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4985 &_output[0], &_output[1], &_output[2] );
4986 break;
4987 case 2:
4988 err = (object->*func)( reference,
4989 ARG32(input[0]), ARG32(input[1]),
4990 &_output[0], &_output[1], &_output[2],
4991 &_output[3] );
4992 break;
4993 case 1:
4994 err = (object->*func)( reference,
4995 ARG32(input[0]),
4996 &_output[0], &_output[1], &_output[2],
4997 &_output[3], &_output[4] );
4998 break;
4999 case 0:
5000 err = (object->*func)( reference,
5001 &_output[0], &_output[1], &_output[2],
5002 &_output[3], &_output[4], &_output[5] );
5003 break;
5004
5005 default:
5006 IOLog("%s: Bad method table\n", object->getName());
5007 }
5008 }while (false);
5009
5010 for (i = 0; i < *outputCount; i++) {
5011 output[i] = SCALAR32(_output[i]);
5012 }
5013
5014 return err;
5015 }
5016
5017
5018 /* Routine io_connect_method_scalarI_structureO */
5019 kern_return_t
5020 is_io_connect_method_scalarI_structureO(
5021 io_object_t connect,
5022 uint32_t index,
5023 io_scalar_inband_t input,
5024 mach_msg_type_number_t inputCount,
5025 io_struct_inband_t output,
5026 mach_msg_type_number_t * outputCount )
5027 {
5028 uint32_t i;
5029 io_scalar_inband64_t _input;
5030
5031 mach_msg_type_number_t scalar_outputCnt = 0;
5032 mach_vm_size_t ool_output_size = 0;
5033
5034 for (i = 0; i < inputCount; i++) {
5035 _input[i] = SCALAR64(input[i]);
5036 }
5037
5038 return is_io_connect_method(connect, index,
5039 _input, inputCount,
5040 NULL, 0,
5041 0, 0,
5042 output, outputCount,
5043 NULL, &scalar_outputCnt,
5044 0, &ool_output_size);
5045 }
5046
5047 kern_return_t
5048 shim_io_connect_method_scalarI_structureO(
5049
5050 IOExternalMethod * method,
5051 IOService * object,
5052 const io_user_scalar_t * input,
5053 mach_msg_type_number_t inputCount,
5054 io_struct_inband_t output,
5055 IOByteCount * outputCount )
5056 {
5057 IOMethod func;
5058 IOReturn err;
5059
5060 err = kIOReturnBadArgument;
5061
5062 do {
5063 if (inputCount != method->count0) {
5064 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5065 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5066 continue;
5067 }
5068 if ((kIOUCVariableStructureSize != method->count1)
5069 && (*outputCount != method->count1)) {
5070 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5071 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5072 continue;
5073 }
5074
5075 func = method->func;
5076
5077 switch (inputCount) {
5078 case 5:
5079 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5080 ARG32(input[3]), ARG32(input[4]),
5081 output );
5082 break;
5083 case 4:
5084 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5085 ARG32(input[3]),
5086 output, (void *)outputCount );
5087 break;
5088 case 3:
5089 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5090 output, (void *)outputCount, NULL );
5091 break;
5092 case 2:
5093 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5094 output, (void *)outputCount, NULL, NULL );
5095 break;
5096 case 1:
5097 err = (object->*func)( ARG32(input[0]),
5098 output, (void *)outputCount, NULL, NULL, NULL );
5099 break;
5100 case 0:
5101 err = (object->*func)( output, (void *)outputCount, NULL, NULL, NULL, NULL );
5102 break;
5103
5104 default:
5105 IOLog("%s: Bad method table\n", object->getName());
5106 }
5107 }while (false);
5108
5109 return err;
5110 }
5111
5112
5113 kern_return_t
5114 shim_io_async_method_scalarI_structureO(
5115 IOExternalAsyncMethod * method,
5116 IOService * object,
5117 mach_port_t asyncWakePort,
5118 io_user_reference_t * asyncReference,
5119 uint32_t asyncReferenceCount,
5120 const io_user_scalar_t * input,
5121 mach_msg_type_number_t inputCount,
5122 io_struct_inband_t output,
5123 mach_msg_type_number_t * outputCount )
5124 {
5125 IOAsyncMethod func;
5126 uint32_t i;
5127 IOReturn err;
5128 io_async_ref_t reference;
5129
5130 for (i = 0; i < asyncReferenceCount; i++) {
5131 reference[i] = REF32(asyncReference[i]);
5132 }
5133
5134 err = kIOReturnBadArgument;
5135 do {
5136 if (inputCount != method->count0) {
5137 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5138 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5139 continue;
5140 }
5141 if ((kIOUCVariableStructureSize != method->count1)
5142 && (*outputCount != method->count1)) {
5143 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5144 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5145 continue;
5146 }
5147
5148 func = method->func;
5149
5150 switch (inputCount) {
5151 case 5:
5152 err = (object->*func)( reference,
5153 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5154 ARG32(input[3]), ARG32(input[4]),
5155 output );
5156 break;
5157 case 4:
5158 err = (object->*func)( reference,
5159 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5160 ARG32(input[3]),
5161 output, (void *)outputCount );
5162 break;
5163 case 3:
5164 err = (object->*func)( reference,
5165 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5166 output, (void *)outputCount, NULL );
5167 break;
5168 case 2:
5169 err = (object->*func)( reference,
5170 ARG32(input[0]), ARG32(input[1]),
5171 output, (void *)outputCount, NULL, NULL );
5172 break;
5173 case 1:
5174 err = (object->*func)( reference,
5175 ARG32(input[0]),
5176 output, (void *)outputCount, NULL, NULL, NULL );
5177 break;
5178 case 0:
5179 err = (object->*func)( reference,
5180 output, (void *)outputCount, NULL, NULL, NULL, NULL );
5181 break;
5182
5183 default:
5184 IOLog("%s: Bad method table\n", object->getName());
5185 }
5186 }while (false);
5187
5188 return err;
5189 }
5190
5191 /* Routine io_connect_method_scalarI_structureI */
5192 kern_return_t
5193 is_io_connect_method_scalarI_structureI(
5194 io_connect_t connect,
5195 uint32_t index,
5196 io_scalar_inband_t input,
5197 mach_msg_type_number_t inputCount,
5198 io_struct_inband_t inputStruct,
5199 mach_msg_type_number_t inputStructCount )
5200 {
5201 uint32_t i;
5202 io_scalar_inband64_t _input;
5203
5204 mach_msg_type_number_t scalar_outputCnt = 0;
5205 mach_msg_type_number_t inband_outputCnt = 0;
5206 mach_vm_size_t ool_output_size = 0;
5207
5208 for (i = 0; i < inputCount; i++) {
5209 _input[i] = SCALAR64(input[i]);
5210 }
5211
5212 return is_io_connect_method(connect, index,
5213 _input, inputCount,
5214 inputStruct, inputStructCount,
5215 0, 0,
5216 NULL, &inband_outputCnt,
5217 NULL, &scalar_outputCnt,
5218 0, &ool_output_size);
5219 }
5220
5221 kern_return_t
5222 shim_io_connect_method_scalarI_structureI(
5223 IOExternalMethod * method,
5224 IOService * object,
5225 const io_user_scalar_t * input,
5226 mach_msg_type_number_t inputCount,
5227 io_struct_inband_t inputStruct,
5228 mach_msg_type_number_t inputStructCount )
5229 {
5230 IOMethod func;
5231 IOReturn err = kIOReturnBadArgument;
5232
5233 do{
5234 if (inputCount != method->count0) {
5235 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5236 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5237 continue;
5238 }
5239 if ((kIOUCVariableStructureSize != method->count1)
5240 && (inputStructCount != method->count1)) {
5241 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5242 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5243 continue;
5244 }
5245
5246 func = method->func;
5247
5248 switch (inputCount) {
5249 case 5:
5250 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5251 ARG32(input[3]), ARG32(input[4]),
5252 inputStruct );
5253 break;
5254 case 4:
5255 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
5256 ARG32(input[3]),
5257 inputStruct, (void *)(uintptr_t)inputStructCount );
5258 break;
5259 case 3:
5260 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5261 inputStruct, (void *)(uintptr_t)inputStructCount,
5262 NULL );
5263 break;
5264 case 2:
5265 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
5266 inputStruct, (void *)(uintptr_t)inputStructCount,
5267 NULL, NULL );
5268 break;
5269 case 1:
5270 err = (object->*func)( ARG32(input[0]),
5271 inputStruct, (void *)(uintptr_t)inputStructCount,
5272 NULL, NULL, NULL );
5273 break;
5274 case 0:
5275 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
5276 NULL, NULL, NULL, NULL );
5277 break;
5278
5279 default:
5280 IOLog("%s: Bad method table\n", object->getName());
5281 }
5282 }while (false);
5283
5284 return err;
5285 }
5286
5287 kern_return_t
5288 shim_io_async_method_scalarI_structureI(
5289 IOExternalAsyncMethod * method,
5290 IOService * object,
5291 mach_port_t asyncWakePort,
5292 io_user_reference_t * asyncReference,
5293 uint32_t asyncReferenceCount,
5294 const io_user_scalar_t * input,
5295 mach_msg_type_number_t inputCount,
5296 io_struct_inband_t inputStruct,
5297 mach_msg_type_number_t inputStructCount )
5298 {
5299 IOAsyncMethod func;
5300 uint32_t i;
5301 IOReturn err = kIOReturnBadArgument;
5302 io_async_ref_t reference;
5303
5304 for (i = 0; i < asyncReferenceCount; i++) {
5305 reference[i] = REF32(asyncReference[i]);
5306 }
5307
5308 do{
5309 if (inputCount != method->count0) {
5310 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
5311 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5312 continue;
5313 }
5314 if ((kIOUCVariableStructureSize != method->count1)
5315 && (inputStructCount != method->count1)) {
5316 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5317 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
5318 continue;
5319 }
5320
5321 func = method->func;
5322
5323 switch (inputCount) {
5324 case 5:
5325 err = (object->*func)( reference,
5326 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5327 ARG32(input[3]), ARG32(input[4]),
5328 inputStruct );
5329 break;
5330 case 4:
5331 err = (object->*func)( reference,
5332 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5333 ARG32(input[3]),
5334 inputStruct, (void *)(uintptr_t)inputStructCount );
5335 break;
5336 case 3:
5337 err = (object->*func)( reference,
5338 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
5339 inputStruct, (void *)(uintptr_t)inputStructCount,
5340 NULL );
5341 break;
5342 case 2:
5343 err = (object->*func)( reference,
5344 ARG32(input[0]), ARG32(input[1]),
5345 inputStruct, (void *)(uintptr_t)inputStructCount,
5346 NULL, NULL );
5347 break;
5348 case 1:
5349 err = (object->*func)( reference,
5350 ARG32(input[0]),
5351 inputStruct, (void *)(uintptr_t)inputStructCount,
5352 NULL, NULL, NULL );
5353 break;
5354 case 0:
5355 err = (object->*func)( reference,
5356 inputStruct, (void *)(uintptr_t)inputStructCount,
5357 NULL, NULL, NULL, NULL );
5358 break;
5359
5360 default:
5361 IOLog("%s: Bad method table\n", object->getName());
5362 }
5363 }while (false);
5364
5365 return err;
5366 }
5367
5368 /* Routine io_connect_method_structureI_structureO */
5369 kern_return_t
5370 is_io_connect_method_structureI_structureO(
5371 io_object_t connect,
5372 uint32_t index,
5373 io_struct_inband_t input,
5374 mach_msg_type_number_t inputCount,
5375 io_struct_inband_t output,
5376 mach_msg_type_number_t * outputCount )
5377 {
5378 mach_msg_type_number_t scalar_outputCnt = 0;
5379 mach_vm_size_t ool_output_size = 0;
5380
5381 return is_io_connect_method(connect, index,
5382 NULL, 0,
5383 input, inputCount,
5384 0, 0,
5385 output, outputCount,
5386 NULL, &scalar_outputCnt,
5387 0, &ool_output_size);
5388 }
5389
5390 kern_return_t
5391 shim_io_connect_method_structureI_structureO(
5392 IOExternalMethod * method,
5393 IOService * object,
5394 io_struct_inband_t input,
5395 mach_msg_type_number_t inputCount,
5396 io_struct_inband_t output,
5397 IOByteCount * outputCount )
5398 {
5399 IOMethod func;
5400 IOReturn err = kIOReturnBadArgument;
5401
5402 do{
5403 if ((kIOUCVariableStructureSize != method->count0)
5404 && (inputCount != method->count0)) {
5405 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5406 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5407 continue;
5408 }
5409 if ((kIOUCVariableStructureSize != method->count1)
5410 && (*outputCount != method->count1)) {
5411 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5412 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5413 continue;
5414 }
5415
5416 func = method->func;
5417
5418 if (method->count1) {
5419 if (method->count0) {
5420 err = (object->*func)( input, output,
5421 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5422 } else {
5423 err = (object->*func)( output, outputCount, NULL, NULL, NULL, NULL );
5424 }
5425 } else {
5426 err = (object->*func)( input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5427 }
5428 }while (false);
5429
5430
5431 return err;
5432 }
5433
5434 kern_return_t
5435 shim_io_async_method_structureI_structureO(
5436 IOExternalAsyncMethod * method,
5437 IOService * object,
5438 mach_port_t asyncWakePort,
5439 io_user_reference_t * asyncReference,
5440 uint32_t asyncReferenceCount,
5441 io_struct_inband_t input,
5442 mach_msg_type_number_t inputCount,
5443 io_struct_inband_t output,
5444 mach_msg_type_number_t * outputCount )
5445 {
5446 IOAsyncMethod func;
5447 uint32_t i;
5448 IOReturn err;
5449 io_async_ref_t reference;
5450
5451 for (i = 0; i < asyncReferenceCount; i++) {
5452 reference[i] = REF32(asyncReference[i]);
5453 }
5454
5455 err = kIOReturnBadArgument;
5456 do{
5457 if ((kIOUCVariableStructureSize != method->count0)
5458 && (inputCount != method->count0)) {
5459 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
5460 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
5461 continue;
5462 }
5463 if ((kIOUCVariableStructureSize != method->count1)
5464 && (*outputCount != method->count1)) {
5465 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
5466 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
5467 continue;
5468 }
5469
5470 func = method->func;
5471
5472 if (method->count1) {
5473 if (method->count0) {
5474 err = (object->*func)( reference,
5475 input, output,
5476 (void *)(uintptr_t)inputCount, outputCount, NULL, NULL );
5477 } else {
5478 err = (object->*func)( reference,
5479 output, outputCount, NULL, NULL, NULL, NULL );
5480 }
5481 } else {
5482 err = (object->*func)( reference,
5483 input, (void *)(uintptr_t)inputCount, NULL, NULL, NULL, NULL );
5484 }
5485 }while (false);
5486
5487 return err;
5488 }
5489
5490 #if !NO_KEXTD
5491 bool gIOKextdClearedBusy = false;
5492 #endif
5493
5494 /* Routine io_catalog_send_data */
5495 kern_return_t
5496 is_io_catalog_send_data(
5497 mach_port_t master_port,
5498 uint32_t flag,
5499 io_buf_ptr_t inData,
5500 mach_msg_type_number_t inDataCount,
5501 kern_return_t * result)
5502 {
5503 #if NO_KEXTD
5504 return kIOReturnNotPrivileged;
5505 #else /* NO_KEXTD */
5506 OSObject * obj = NULL;
5507 vm_offset_t data;
5508 kern_return_t kr = kIOReturnError;
5509
5510 //printf("io_catalog_send_data called. flag: %d\n", flag);
5511
5512 if (master_port != master_device_port) {
5513 return kIOReturnNotPrivileged;
5514 }
5515
5516 if ((flag != kIOCatalogRemoveKernelLinker__Removed &&
5517 flag != kIOCatalogKextdActive &&
5518 flag != kIOCatalogKextdFinishedLaunching) &&
5519 (!inData || !inDataCount)) {
5520 return kIOReturnBadArgument;
5521 }
5522
5523 if (!IOTaskHasEntitlement(current_task(), kOSKextManagementEntitlement)) {
5524 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
5525 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
5526 OSSafeReleaseNULL(taskName);
5527 // For now, fake success to not break applications relying on this function succeeding.
5528 // See <rdar://problem/32554970> for more details.
5529 return kIOReturnSuccess;
5530 }
5531
5532 if (inData) {
5533 vm_map_offset_t map_data;
5534
5535 if (inDataCount > sizeof(io_struct_inband_t) * 1024) {
5536 return kIOReturnMessageTooLarge;
5537 }
5538
5539 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
5540 data = CAST_DOWN(vm_offset_t, map_data);
5541
5542 if (kr != KERN_SUCCESS) {
5543 return kr;
5544 }
5545
5546 // must return success after vm_map_copyout() succeeds
5547
5548 if (inDataCount) {
5549 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
5550 vm_deallocate( kernel_map, data, inDataCount );
5551 if (!obj) {
5552 *result = kIOReturnNoMemory;
5553 return KERN_SUCCESS;
5554 }
5555 }
5556 }
5557
5558 switch (flag) {
5559 case kIOCatalogResetDrivers:
5560 case kIOCatalogResetDriversNoMatch: {
5561 OSArray * array;
5562
5563 array = OSDynamicCast(OSArray, obj);
5564 if (array) {
5565 if (!gIOCatalogue->resetAndAddDrivers(array,
5566 flag == kIOCatalogResetDrivers)) {
5567 kr = kIOReturnError;
5568 }
5569 } else {
5570 kr = kIOReturnBadArgument;
5571 }
5572 }
5573 break;
5574
5575 case kIOCatalogAddDrivers:
5576 case kIOCatalogAddDriversNoMatch: {
5577 OSArray * array;
5578
5579 array = OSDynamicCast(OSArray, obj);
5580 if (array) {
5581 if (!gIOCatalogue->addDrivers( array,
5582 flag == kIOCatalogAddDrivers)) {
5583 kr = kIOReturnError;
5584 }
5585 } else {
5586 kr = kIOReturnBadArgument;
5587 }
5588 }
5589 break;
5590
5591 case kIOCatalogRemoveDrivers:
5592 case kIOCatalogRemoveDriversNoMatch: {
5593 OSDictionary * dict;
5594
5595 dict = OSDynamicCast(OSDictionary, obj);
5596 if (dict) {
5597 if (!gIOCatalogue->removeDrivers( dict,
5598 flag == kIOCatalogRemoveDrivers )) {
5599 kr = kIOReturnError;
5600 }
5601 } else {
5602 kr = kIOReturnBadArgument;
5603 }
5604 }
5605 break;
5606
5607 case kIOCatalogStartMatching__Removed:
5608 case kIOCatalogRemoveKernelLinker__Removed:
5609 kr = KERN_NOT_SUPPORTED;
5610 break;
5611
5612 case kIOCatalogKextdActive:
5613 #if !NO_KEXTD
5614 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5615 OSKext::setKextdActive();
5616
5617 /* Dump all nonloaded startup extensions; kextd will now send them
5618 * down on request.
5619 */
5620 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5621 #endif
5622 kr = kIOReturnSuccess;
5623 break;
5624
5625 case kIOCatalogKextdFinishedLaunching: {
5626 #if !NO_KEXTD
5627 if (!gIOKextdClearedBusy) {
5628 IOService::kextdLaunched();
5629 gIOKextdClearedBusy = true;
5630 }
5631 #endif
5632 kr = kIOReturnSuccess;
5633 }
5634 break;
5635
5636 default:
5637 kr = kIOReturnBadArgument;
5638 break;
5639 }
5640
5641 if (obj) {
5642 obj->release();
5643 }
5644
5645 *result = kr;
5646 return KERN_SUCCESS;
5647 #endif /* NO_KEXTD */
5648 }
5649
5650 /* Routine io_catalog_terminate */
5651 kern_return_t
5652 is_io_catalog_terminate(
5653 mach_port_t master_port,
5654 uint32_t flag,
5655 io_name_t name )
5656 {
5657 kern_return_t kr;
5658
5659 if (master_port != master_device_port) {
5660 return kIOReturnNotPrivileged;
5661 }
5662
5663 kr = IOUserClient::clientHasPrivilege((void *) current_task(),
5664 kIOClientPrivilegeAdministrator );
5665 if (kIOReturnSuccess != kr) {
5666 return kr;
5667 }
5668
5669 switch (flag) {
5670 #if !defined(SECURE_KERNEL)
5671 case kIOCatalogServiceTerminate:
5672 kr = gIOCatalogue->terminateDrivers(NULL, name);
5673 break;
5674
5675 case kIOCatalogModuleUnload:
5676 case kIOCatalogModuleTerminate:
5677 kr = gIOCatalogue->terminateDriversForModule(name,
5678 flag == kIOCatalogModuleUnload);
5679 break;
5680 #endif
5681
5682 default:
5683 kr = kIOReturnBadArgument;
5684 break;
5685 }
5686
5687 return kr;
5688 }
5689
5690 /* Routine io_catalog_get_data */
5691 kern_return_t
5692 is_io_catalog_get_data(
5693 mach_port_t master_port,
5694 uint32_t flag,
5695 io_buf_ptr_t *outData,
5696 mach_msg_type_number_t *outDataCount)
5697 {
5698 kern_return_t kr = kIOReturnSuccess;
5699 OSSerialize * s;
5700
5701 if (master_port != master_device_port) {
5702 return kIOReturnNotPrivileged;
5703 }
5704
5705 //printf("io_catalog_get_data called. flag: %d\n", flag);
5706
5707 s = OSSerialize::withCapacity(4096);
5708 if (!s) {
5709 return kIOReturnNoMemory;
5710 }
5711
5712 kr = gIOCatalogue->serializeData(flag, s);
5713
5714 if (kr == kIOReturnSuccess) {
5715 vm_offset_t data;
5716 vm_map_copy_t copy;
5717 vm_size_t size;
5718
5719 size = s->getLength();
5720 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5721 if (kr == kIOReturnSuccess) {
5722 bcopy(s->text(), (void *)data, size);
5723 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5724 (vm_map_size_t)size, true, &copy);
5725 *outData = (char *)copy;
5726 *outDataCount = size;
5727 }
5728 }
5729
5730 s->release();
5731
5732 return kr;
5733 }
5734
5735 /* Routine io_catalog_get_gen_count */
5736 kern_return_t
5737 is_io_catalog_get_gen_count(
5738 mach_port_t master_port,
5739 uint32_t *genCount)
5740 {
5741 if (master_port != master_device_port) {
5742 return kIOReturnNotPrivileged;
5743 }
5744
5745 //printf("io_catalog_get_gen_count called.\n");
5746
5747 if (!genCount) {
5748 return kIOReturnBadArgument;
5749 }
5750
5751 *genCount = gIOCatalogue->getGenerationCount();
5752
5753 return kIOReturnSuccess;
5754 }
5755
5756 /* Routine io_catalog_module_loaded.
5757 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5758 */
5759 kern_return_t
5760 is_io_catalog_module_loaded(
5761 mach_port_t master_port,
5762 io_name_t name)
5763 {
5764 if (master_port != master_device_port) {
5765 return kIOReturnNotPrivileged;
5766 }
5767
5768 //printf("io_catalog_module_loaded called. name %s\n", name);
5769
5770 if (!name) {
5771 return kIOReturnBadArgument;
5772 }
5773
5774 gIOCatalogue->moduleHasLoaded(name);
5775
5776 return kIOReturnSuccess;
5777 }
5778
5779 kern_return_t
5780 is_io_catalog_reset(
5781 mach_port_t master_port,
5782 uint32_t flag)
5783 {
5784 if (master_port != master_device_port) {
5785 return kIOReturnNotPrivileged;
5786 }
5787
5788 switch (flag) {
5789 case kIOCatalogResetDefault:
5790 gIOCatalogue->reset();
5791 break;
5792
5793 default:
5794 return kIOReturnBadArgument;
5795 }
5796
5797 return kIOReturnSuccess;
5798 }
5799
5800 kern_return_t
5801 iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5802 {
5803 kern_return_t result = kIOReturnBadArgument;
5804 IOUserClient * userClient;
5805 OSObject * object;
5806 uintptr_t ref;
5807
5808 ref = (uintptr_t) args->userClientRef;
5809 if ((1ULL << 32) & ref) {
5810 object = iokit_lookup_uext_ref_current_task((mach_port_name_t) ref);
5811 if (object) {
5812 result = IOUserServerUEXTTrap(object, args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5813 }
5814 OSSafeReleaseNULL(object);
5815 } else if ((userClient = OSDynamicCast(IOUserClient, iokit_lookup_connect_ref_current_task((mach_port_name_t) ref)))) {
5816 IOExternalTrap *trap;
5817 IOService *target = NULL;
5818
5819 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5820
5821 if (trap && target) {
5822 IOTrap func;
5823
5824 func = trap->func;
5825
5826 if (func) {
5827 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5828 }
5829 }
5830
5831 iokit_remove_connect_reference(userClient);
5832 }
5833
5834 return result;
5835 }
5836
5837 /* Routine io_device_tree_entry_exists_with_name */
5838 kern_return_t
5839 is_io_device_tree_entry_exists_with_name(
5840 mach_port_t master_port,
5841 io_name_t name,
5842 boolean_t *exists )
5843 {
5844 OSCollectionIterator *iter;
5845
5846 if (master_port != master_device_port) {
5847 return kIOReturnNotPrivileged;
5848 }
5849
5850 iter = IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive, name);
5851 *exists = iter && iter->getNextObject();
5852 OSSafeReleaseNULL(iter);
5853
5854 return kIOReturnSuccess;
5855 }
5856 } /* extern "C" */
5857
5858 IOReturn
5859 IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5860 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5861 {
5862 IOReturn err;
5863 IOService * object;
5864 IOByteCount structureOutputSize;
5865
5866 if (dispatch) {
5867 uint32_t count;
5868 count = dispatch->checkScalarInputCount;
5869 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) {
5870 return kIOReturnBadArgument;
5871 }
5872
5873 count = dispatch->checkStructureInputSize;
5874 if ((kIOUCVariableStructureSize != count)
5875 && (count != ((args->structureInputDescriptor)
5876 ? args->structureInputDescriptor->getLength() : args->structureInputSize))) {
5877 return kIOReturnBadArgument;
5878 }
5879
5880 count = dispatch->checkScalarOutputCount;
5881 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) {
5882 return kIOReturnBadArgument;
5883 }
5884
5885 count = dispatch->checkStructureOutputSize;
5886 if ((kIOUCVariableStructureSize != count)
5887 && (count != ((args->structureOutputDescriptor)
5888 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) {
5889 return kIOReturnBadArgument;
5890 }
5891
5892 if (dispatch->function) {
5893 err = (*dispatch->function)(target, reference, args);
5894 } else {
5895 err = kIOReturnNoCompletion; /* implementator can dispatch */
5896 }
5897 return err;
5898 }
5899
5900
5901 // pre-Leopard API's don't do ool structs
5902 if (args->structureInputDescriptor || args->structureOutputDescriptor) {
5903 err = kIOReturnIPCError;
5904 return err;
5905 }
5906
5907 structureOutputSize = args->structureOutputSize;
5908
5909 if (args->asyncWakePort) {
5910 IOExternalAsyncMethod * method;
5911 object = NULL;
5912 if (!(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object) {
5913 return kIOReturnUnsupported;
5914 }
5915
5916 if (kIOUCForegroundOnly & method->flags) {
5917 if (task_is_gpu_denied(current_task())) {
5918 return kIOReturnNotPermitted;
5919 }
5920 }
5921
5922 switch (method->flags & kIOUCTypeMask) {
5923 case kIOUCScalarIStructI:
5924 err = shim_io_async_method_scalarI_structureI( method, object,
5925 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5926 args->scalarInput, args->scalarInputCount,
5927 (char *)args->structureInput, args->structureInputSize );
5928 break;
5929
5930 case kIOUCScalarIScalarO:
5931 err = shim_io_async_method_scalarI_scalarO( method, object,
5932 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5933 args->scalarInput, args->scalarInputCount,
5934 args->scalarOutput, &args->scalarOutputCount );
5935 break;
5936
5937 case kIOUCScalarIStructO:
5938 err = shim_io_async_method_scalarI_structureO( method, object,
5939 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5940 args->scalarInput, args->scalarInputCount,
5941 (char *) args->structureOutput, &args->structureOutputSize );
5942 break;
5943
5944
5945 case kIOUCStructIStructO:
5946 err = shim_io_async_method_structureI_structureO( method, object,
5947 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5948 (char *)args->structureInput, args->structureInputSize,
5949 (char *) args->structureOutput, &args->structureOutputSize );
5950 break;
5951
5952 default:
5953 err = kIOReturnBadArgument;
5954 break;
5955 }
5956 } else {
5957 IOExternalMethod * method;
5958 object = NULL;
5959 if (!(method = getTargetAndMethodForIndex(&object, selector)) || !object) {
5960 return kIOReturnUnsupported;
5961 }
5962
5963 if (kIOUCForegroundOnly & method->flags) {
5964 if (task_is_gpu_denied(current_task())) {
5965 return kIOReturnNotPermitted;
5966 }
5967 }
5968
5969 switch (method->flags & kIOUCTypeMask) {
5970 case kIOUCScalarIStructI:
5971 err = shim_io_connect_method_scalarI_structureI( method, object,
5972 args->scalarInput, args->scalarInputCount,
5973 (char *) args->structureInput, args->structureInputSize );
5974 break;
5975
5976 case kIOUCScalarIScalarO:
5977 err = shim_io_connect_method_scalarI_scalarO( method, object,
5978 args->scalarInput, args->scalarInputCount,
5979 args->scalarOutput, &args->scalarOutputCount );
5980 break;
5981
5982 case kIOUCScalarIStructO:
5983 err = shim_io_connect_method_scalarI_structureO( method, object,
5984 args->scalarInput, args->scalarInputCount,
5985 (char *) args->structureOutput, &structureOutputSize );
5986 break;
5987
5988
5989 case kIOUCStructIStructO:
5990 err = shim_io_connect_method_structureI_structureO( method, object,
5991 (char *) args->structureInput, args->structureInputSize,
5992 (char *) args->structureOutput, &structureOutputSize );
5993 break;
5994
5995 default:
5996 err = kIOReturnBadArgument;
5997 break;
5998 }
5999 }
6000
6001 args->structureOutputSize = structureOutputSize;
6002
6003 return err;
6004 }
6005
6006 #if __LP64__
6007 OSMetaClassDefineReservedUnused(IOUserClient, 0);
6008 OSMetaClassDefineReservedUnused(IOUserClient, 1);
6009 #else
6010 OSMetaClassDefineReservedUsed(IOUserClient, 0);
6011 OSMetaClassDefineReservedUsed(IOUserClient, 1);
6012 #endif
6013 OSMetaClassDefineReservedUnused(IOUserClient, 2);
6014 OSMetaClassDefineReservedUnused(IOUserClient, 3);
6015 OSMetaClassDefineReservedUnused(IOUserClient, 4);
6016 OSMetaClassDefineReservedUnused(IOUserClient, 5);
6017 OSMetaClassDefineReservedUnused(IOUserClient, 6);
6018 OSMetaClassDefineReservedUnused(IOUserClient, 7);
6019 OSMetaClassDefineReservedUnused(IOUserClient, 8);
6020 OSMetaClassDefineReservedUnused(IOUserClient, 9);
6021 OSMetaClassDefineReservedUnused(IOUserClient, 10);
6022 OSMetaClassDefineReservedUnused(IOUserClient, 11);
6023 OSMetaClassDefineReservedUnused(IOUserClient, 12);
6024 OSMetaClassDefineReservedUnused(IOUserClient, 13);
6025 OSMetaClassDefineReservedUnused(IOUserClient, 14);
6026 OSMetaClassDefineReservedUnused(IOUserClient, 15);