]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
ad83881cae1f8d98c97f562e181b20a007c4e2da
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/system.h>
44 #include <libkern/OSDebug.h>
45 #include <sys/proc.h>
46 #include <sys/kauth.h>
47 #include <sys/codesign.h>
48
49 #include <mach/sdt.h>
50
51 #if CONFIG_MACF
52
53 extern "C" {
54 #include <security/mac_framework.h>
55 };
56 #include <sys/kauth.h>
57
58 #define IOMACF_LOG 0
59
60 #endif /* CONFIG_MACF */
61
62 #include <IOKit/assert.h>
63
64 #include "IOServicePrivate.h"
65 #include "IOKitKernelInternal.h"
66
67 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
68 #define SCALAR32(x) ((uint32_t )x)
69 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
70 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
71 #define REF32(x) ((int)(x))
72
73 enum
74 {
75 kIOUCAsync0Flags = 3ULL,
76 kIOUCAsync64Flag = 1ULL,
77 kIOUCAsyncErrorLoggedFlag = 2ULL
78 };
79
80 #if IOKITSTATS
81
82 #define IOStatisticsRegisterCounter() \
83 do { \
84 reserved->counter = IOStatistics::registerUserClient(this); \
85 } while (0)
86
87 #define IOStatisticsUnregisterCounter() \
88 do { \
89 if (reserved) \
90 IOStatistics::unregisterUserClient(reserved->counter); \
91 } while (0)
92
93 #define IOStatisticsClientCall() \
94 do { \
95 IOStatistics::countUserClientCall(client); \
96 } while (0)
97
98 #else
99
100 #define IOStatisticsRegisterCounter()
101 #define IOStatisticsUnregisterCounter()
102 #define IOStatisticsClientCall()
103
104 #endif /* IOKITSTATS */
105
106 #if DEVELOPMENT || DEBUG
107
108 #define FAKE_STACK_FRAME(a) \
109 const void ** __frameptr; \
110 const void * __retaddr; \
111 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
112 __retaddr = __frameptr[1]; \
113 __frameptr[1] = (a);
114
115 #define FAKE_STACK_FRAME_END() \
116 __frameptr[1] = __retaddr;
117
118 #else /* DEVELOPMENT || DEBUG */
119
120 #define FAKE_STACK_FRAME(a)
121 #define FAKE_STACK_FRAME_END()
122
123 #endif /* DEVELOPMENT || DEBUG */
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 // definitions we should get from osfmk
128
129 //typedef struct ipc_port * ipc_port_t;
130 typedef natural_t ipc_kobject_type_t;
131
132 #define IKOT_IOKIT_SPARE 27
133 #define IKOT_IOKIT_CONNECT 29
134 #define IKOT_IOKIT_OBJECT 30
135
136 extern "C" {
137
138 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
139 ipc_kobject_type_t type );
140
141 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
142
143 extern mach_port_name_t iokit_make_send_right( task_t task,
144 io_object_t obj, ipc_kobject_type_t type );
145
146 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
147
148 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
149
150 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
151
152 extern ipc_port_t master_device_port;
153
154 extern void iokit_retain_port( ipc_port_t port );
155 extern void iokit_release_port( ipc_port_t port );
156 extern void iokit_release_port_send( ipc_port_t port );
157
158 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
159
160 #include <mach/mach_traps.h>
161 #include <vm/vm_map.h>
162
163 } /* extern "C" */
164
165
166 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
167
168 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
169
170 class IOMachPort : public OSObject
171 {
172 OSDeclareDefaultStructors(IOMachPort)
173 public:
174 OSObject * object;
175 ipc_port_t port;
176 UInt32 mscount;
177 UInt8 holdDestroy;
178
179 static IOMachPort * portForObject( OSObject * obj,
180 ipc_kobject_type_t type );
181 static bool noMoreSendersForObject( OSObject * obj,
182 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
183 static void releasePortForObject( OSObject * obj,
184 ipc_kobject_type_t type );
185 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
186
187 static OSDictionary * dictForType( ipc_kobject_type_t type );
188
189 static mach_port_name_t makeSendRightForTask( task_t task,
190 io_object_t obj, ipc_kobject_type_t type );
191
192 virtual void free() APPLE_KEXT_OVERRIDE;
193 };
194
195 #define super OSObject
196 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
197
198 static IOLock * gIOObjectPortLock;
199
200 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
201
202 // not in dictForType() for debugging ease
203 static OSDictionary * gIOObjectPorts;
204 static OSDictionary * gIOConnectPorts;
205
206 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
207 {
208 OSDictionary ** dict;
209
210 if( IKOT_IOKIT_OBJECT == type )
211 dict = &gIOObjectPorts;
212 else if( IKOT_IOKIT_CONNECT == type )
213 dict = &gIOConnectPorts;
214 else
215 return( 0 );
216
217 if( 0 == *dict)
218 *dict = OSDictionary::withCapacity( 1 );
219
220 return( *dict );
221 }
222
223 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
224 ipc_kobject_type_t type )
225 {
226 IOMachPort * inst = 0;
227 OSDictionary * dict;
228
229 IOTakeLock( gIOObjectPortLock);
230
231 do {
232
233 dict = dictForType( type );
234 if( !dict)
235 continue;
236
237 if( (inst = (IOMachPort *)
238 dict->getObject( (const OSSymbol *) obj ))) {
239 inst->mscount++;
240 inst->retain();
241 continue;
242 }
243
244 inst = new IOMachPort;
245 if( inst && !inst->init()) {
246 inst = 0;
247 continue;
248 }
249
250 inst->port = iokit_alloc_object_port( obj, type );
251 if( inst->port) {
252 // retains obj
253 dict->setObject( (const OSSymbol *) obj, inst );
254 inst->mscount++;
255
256 } else {
257 inst->release();
258 inst = 0;
259 }
260
261 } while( false );
262
263 IOUnlock( gIOObjectPortLock);
264
265 return( inst );
266 }
267
268 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
269 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
270 {
271 OSDictionary * dict;
272 IOMachPort * machPort;
273 IOUserClient * uc;
274 bool destroyed = true;
275
276 IOTakeLock( gIOObjectPortLock);
277
278 if( (dict = dictForType( type ))) {
279 obj->retain();
280
281 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if( machPort) {
283 destroyed = (machPort->mscount <= *mscount);
284 if (!destroyed) *mscount = machPort->mscount;
285 else
286 {
287 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
288 {
289 uc->noMoreSenders();
290 }
291 dict->removeObject( (const OSSymbol *) obj );
292 }
293 }
294 obj->release();
295 }
296
297 IOUnlock( gIOObjectPortLock);
298
299 return( destroyed );
300 }
301
302 void IOMachPort::releasePortForObject( OSObject * obj,
303 ipc_kobject_type_t type )
304 {
305 OSDictionary * dict;
306 IOMachPort * machPort;
307
308 assert(IKOT_IOKIT_CONNECT != type);
309
310 IOTakeLock( gIOObjectPortLock);
311
312 if( (dict = dictForType( type ))) {
313 obj->retain();
314 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
315 if( machPort && !machPort->holdDestroy)
316 dict->removeObject( (const OSSymbol *) obj );
317 obj->release();
318 }
319
320 IOUnlock( gIOObjectPortLock);
321 }
322
323 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
324 {
325 OSDictionary * dict;
326 IOMachPort * machPort;
327
328 IOLockLock( gIOObjectPortLock );
329
330 if( (dict = dictForType( type ))) {
331 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
332 if( machPort)
333 machPort->holdDestroy = true;
334 }
335
336 IOLockUnlock( gIOObjectPortLock );
337 }
338
339 void IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 OSDictionary * dict;
347
348 IOTakeLock( gIOObjectPortLock);
349 obj->retain();
350
351 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
352 {
353 IOMachPort * port;
354 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
355 if (port)
356 {
357 IOUserClient * uc;
358 if ((uc = OSDynamicCast(IOUserClient, obj)))
359 {
360 uc->noMoreSenders();
361 if (uc->mappings)
362 {
363 dict->setObject((const OSSymbol *) uc->mappings, port);
364 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
365
366 uc->mappings->release();
367 uc->mappings = 0;
368 }
369 }
370 dict->removeObject( (const OSSymbol *) obj );
371 }
372 }
373 obj->release();
374 IOUnlock( gIOObjectPortLock);
375 }
376
377 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
378 io_object_t obj, ipc_kobject_type_t type )
379 {
380 return( iokit_make_send_right( task, obj, type ));
381 }
382
383 void IOMachPort::free( void )
384 {
385 if( port)
386 iokit_destroy_object_port( port );
387 super::free();
388 }
389
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 class IOUserIterator : public OSIterator
393 {
394 OSDeclareDefaultStructors(IOUserIterator)
395 public:
396 OSObject * userIteratorObject;
397 IOLock * lock;
398
399 static IOUserIterator * withIterator(OSIterator * iter);
400 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
401 virtual void free() APPLE_KEXT_OVERRIDE;
402
403 virtual void reset() APPLE_KEXT_OVERRIDE;
404 virtual bool isValid() APPLE_KEXT_OVERRIDE;
405 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
406 };
407
408 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
409
410 class IOUserNotification : public IOUserIterator
411 {
412 OSDeclareDefaultStructors(IOUserNotification)
413
414 #define holdNotify userIteratorObject
415
416 public:
417
418 virtual void free() APPLE_KEXT_OVERRIDE;
419
420 virtual void setNotification( IONotifier * obj );
421
422 virtual void reset() APPLE_KEXT_OVERRIDE;
423 virtual bool isValid() APPLE_KEXT_OVERRIDE;
424 };
425
426 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
427
428 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
429
430 IOUserIterator *
431 IOUserIterator::withIterator(OSIterator * iter)
432 {
433 IOUserIterator * me;
434
435 if (!iter) return (0);
436
437 me = new IOUserIterator;
438 if (me && !me->init())
439 {
440 me->release();
441 me = 0;
442 }
443 if (!me) return me;
444 me->userIteratorObject = iter;
445
446 return (me);
447 }
448
449 bool
450 IOUserIterator::init( void )
451 {
452 if (!OSObject::init()) return (false);
453
454 lock = IOLockAlloc();
455 if( !lock)
456 return( false );
457
458 return (true);
459 }
460
461 void
462 IOUserIterator::free()
463 {
464 if (userIteratorObject) userIteratorObject->release();
465 if (lock) IOLockFree(lock);
466 OSObject::free();
467 }
468
469 void
470 IOUserIterator::reset()
471 {
472 IOLockLock(lock);
473 assert(OSDynamicCast(OSIterator, userIteratorObject));
474 ((OSIterator *)userIteratorObject)->reset();
475 IOLockUnlock(lock);
476 }
477
478 bool
479 IOUserIterator::isValid()
480 {
481 bool ret;
482
483 IOLockLock(lock);
484 assert(OSDynamicCast(OSIterator, userIteratorObject));
485 ret = ((OSIterator *)userIteratorObject)->isValid();
486 IOLockUnlock(lock);
487
488 return (ret);
489 }
490
491 OSObject *
492 IOUserIterator::getNextObject()
493 {
494 OSObject * ret;
495
496 IOLockLock(lock);
497 assert(OSDynamicCast(OSIterator, userIteratorObject));
498 ret = ((OSIterator *)userIteratorObject)->getNextObject();
499 IOLockUnlock(lock);
500
501 return (ret);
502 }
503
504 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
505 extern "C" {
506
507 // functions called from osfmk/device/iokit_rpc.c
508
509 void
510 iokit_add_reference( io_object_t obj )
511 {
512 if( obj)
513 obj->retain();
514 }
515
516 void
517 iokit_remove_reference( io_object_t obj )
518 {
519 if( obj)
520 obj->release();
521 }
522
523 void
524 iokit_add_connect_reference( io_object_t obj )
525 {
526 IOUserClient * uc;
527
528 if (!obj) return;
529
530 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
531
532 obj->retain();
533 }
534
535 void
536 iokit_remove_connect_reference( io_object_t obj )
537 {
538 IOUserClient * uc;
539 bool finalize = false;
540
541 if (!obj) return;
542
543 if ((uc = OSDynamicCast(IOUserClient, obj)))
544 {
545 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
546 {
547 IOLockLock(gIOObjectPortLock);
548 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
549 IOLockUnlock(gIOObjectPortLock);
550 }
551 if (finalize) uc->scheduleFinalize(true);
552 }
553
554 obj->release();
555 }
556
557 bool
558 IOUserClient::finalizeUserReferences(OSObject * obj)
559 {
560 IOUserClient * uc;
561 bool ok = true;
562
563 if ((uc = OSDynamicCast(IOUserClient, obj)))
564 {
565 IOLockLock(gIOObjectPortLock);
566 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
567 IOLockUnlock(gIOObjectPortLock);
568 }
569 return (ok);
570 }
571
572 ipc_port_t
573 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
574 {
575 IOMachPort * machPort;
576 ipc_port_t port;
577
578 if( (machPort = IOMachPort::portForObject( obj, type ))) {
579
580 port = machPort->port;
581 if( port)
582 iokit_retain_port( port );
583
584 machPort->release();
585
586 } else
587 port = NULL;
588
589 return( port );
590 }
591
592 kern_return_t
593 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
594 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
595 {
596 IOUserClient * client;
597 IOMemoryMap * map;
598 IOUserNotification * notify;
599
600 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
601 return( kIOReturnNotReady );
602
603 if( IKOT_IOKIT_CONNECT == type)
604 {
605 if( (client = OSDynamicCast( IOUserClient, obj )))
606 {
607 IOStatisticsClientCall();
608 IOLockLock(client->lock);
609 client->clientDied();
610 IOLockUnlock(client->lock);
611 }
612 }
613 else if( IKOT_IOKIT_OBJECT == type)
614 {
615 if( (map = OSDynamicCast( IOMemoryMap, obj )))
616 map->taskDied();
617 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
618 notify->setNotification( 0 );
619 }
620
621 return( kIOReturnSuccess );
622 }
623
624 }; /* extern "C" */
625
626 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
627
628 class IOServiceUserNotification : public IOUserNotification
629 {
630 OSDeclareDefaultStructors(IOServiceUserNotification)
631
632 struct PingMsg {
633 mach_msg_header_t msgHdr;
634 OSNotificationHeader64 notifyHeader;
635 };
636
637 enum { kMaxOutstanding = 1024 };
638
639 PingMsg * pingMsg;
640 vm_size_t msgSize;
641 OSArray * newSet;
642 OSObject * lastEntry;
643 bool armed;
644 bool ipcLogged;
645
646 public:
647
648 virtual bool init( mach_port_t port, natural_t type,
649 void * reference, vm_size_t referenceSize,
650 bool clientIs64 );
651 virtual void free() APPLE_KEXT_OVERRIDE;
652 void invalidatePort(void);
653
654 static bool _handler( void * target,
655 void * ref, IOService * newService, IONotifier * notifier );
656 virtual bool handler( void * ref, IOService * newService );
657
658 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
659 };
660
661 class IOServiceMessageUserNotification : public IOUserNotification
662 {
663 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
664
665 struct PingMsg {
666 mach_msg_header_t msgHdr;
667 mach_msg_body_t msgBody;
668 mach_msg_port_descriptor_t ports[1];
669 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
670 };
671
672 PingMsg * pingMsg;
673 vm_size_t msgSize;
674 uint8_t clientIs64;
675 int owningPID;
676 bool ipcLogged;
677
678 public:
679
680 virtual bool init( mach_port_t port, natural_t type,
681 void * reference, vm_size_t referenceSize,
682 vm_size_t extraSize,
683 bool clientIs64 );
684
685 virtual void free() APPLE_KEXT_OVERRIDE;
686 void invalidatePort(void);
687
688 static IOReturn _handler( void * target, void * ref,
689 UInt32 messageType, IOService * provider,
690 void * messageArgument, vm_size_t argSize );
691 virtual IOReturn handler( void * ref,
692 UInt32 messageType, IOService * provider,
693 void * messageArgument, vm_size_t argSize );
694
695 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
696 };
697
698 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
699
700 #undef super
701 #define super IOUserIterator
702 OSDefineMetaClass( IOUserNotification, IOUserIterator )
703 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
704
705 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
706
707 void IOUserNotification::free( void )
708 {
709 if (holdNotify)
710 {
711 assert(OSDynamicCast(IONotifier, holdNotify));
712 ((IONotifier *)holdNotify)->remove();
713 holdNotify = 0;
714 }
715 // can't be in handler now
716
717 super::free();
718 }
719
720
721 void IOUserNotification::setNotification( IONotifier * notify )
722 {
723 OSObject * previousNotify;
724
725 IOLockLock( gIOObjectPortLock);
726
727 previousNotify = holdNotify;
728 holdNotify = notify;
729
730 IOLockUnlock( gIOObjectPortLock);
731
732 if( previousNotify)
733 {
734 assert(OSDynamicCast(IONotifier, previousNotify));
735 ((IONotifier *)previousNotify)->remove();
736 }
737 }
738
739 void IOUserNotification::reset()
740 {
741 // ?
742 }
743
744 bool IOUserNotification::isValid()
745 {
746 return( true );
747 }
748
749 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
750
751 #undef super
752 #define super IOUserNotification
753 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
754
755 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
756
757 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
758 void * reference, vm_size_t referenceSize,
759 bool clientIs64 )
760 {
761 if( !super::init())
762 return( false );
763
764 newSet = OSArray::withCapacity( 1 );
765 if( !newSet)
766 return( false );
767
768 if (referenceSize > sizeof(OSAsyncReference64))
769 return( false );
770
771 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
772 pingMsg = (PingMsg *) IOMalloc( msgSize);
773 if( !pingMsg)
774 return( false );
775
776 bzero( pingMsg, msgSize);
777
778 pingMsg->msgHdr.msgh_remote_port = port;
779 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
780 MACH_MSG_TYPE_COPY_SEND /*remote*/,
781 MACH_MSG_TYPE_MAKE_SEND /*local*/);
782 pingMsg->msgHdr.msgh_size = msgSize;
783 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
784
785 pingMsg->notifyHeader.size = 0;
786 pingMsg->notifyHeader.type = type;
787 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
788
789 return( true );
790 }
791
792 void IOServiceUserNotification::invalidatePort(void)
793 {
794 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
795 }
796
797 void IOServiceUserNotification::free( void )
798 {
799 PingMsg * _pingMsg;
800 vm_size_t _msgSize;
801 OSArray * _newSet;
802 OSObject * _lastEntry;
803
804 _pingMsg = pingMsg;
805 _msgSize = msgSize;
806 _lastEntry = lastEntry;
807 _newSet = newSet;
808
809 super::free();
810
811 if( _pingMsg && _msgSize) {
812 if (_pingMsg->msgHdr.msgh_remote_port) {
813 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
814 }
815 IOFree(_pingMsg, _msgSize);
816 }
817
818 if( _lastEntry)
819 _lastEntry->release();
820
821 if( _newSet)
822 _newSet->release();
823 }
824
825 bool IOServiceUserNotification::_handler( void * target,
826 void * ref, IOService * newService, IONotifier * notifier )
827 {
828 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
829 }
830
831 bool IOServiceUserNotification::handler( void * ref,
832 IOService * newService )
833 {
834 unsigned int count;
835 kern_return_t kr;
836 ipc_port_t port = NULL;
837 bool sendPing = false;
838
839 IOTakeLock( lock );
840
841 count = newSet->getCount();
842 if( count < kMaxOutstanding) {
843
844 newSet->setObject( newService );
845 if( (sendPing = (armed && (0 == count))))
846 armed = false;
847 }
848
849 IOUnlock( lock );
850
851 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
852 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
853
854 if( sendPing) {
855 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
856 pingMsg->msgHdr.msgh_local_port = port;
857 else
858 pingMsg->msgHdr.msgh_local_port = NULL;
859
860 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
861 pingMsg->msgHdr.msgh_size,
862 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
863 0);
864 if( port)
865 iokit_release_port( port );
866
867 if( (KERN_SUCCESS != kr) && !ipcLogged)
868 {
869 ipcLogged = true;
870 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
871 }
872 }
873
874 return( true );
875 }
876
877 OSObject * IOServiceUserNotification::getNextObject()
878 {
879 unsigned int count;
880 OSObject * result;
881 OSObject * releaseEntry;
882
883 IOLockLock(lock);
884
885 releaseEntry = lastEntry;
886 count = newSet->getCount();
887 if( count ) {
888 result = newSet->getObject( count - 1 );
889 result->retain();
890 newSet->removeObject( count - 1);
891 } else {
892 result = 0;
893 armed = true;
894 }
895 lastEntry = result;
896
897 IOLockUnlock(lock);
898
899 if (releaseEntry) releaseEntry->release();
900
901 return( result );
902 }
903
904 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
905
906 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
907
908 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
909
910 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
911 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
912 bool client64 )
913 {
914 if( !super::init())
915 return( false );
916
917 if (referenceSize > sizeof(OSAsyncReference64))
918 return( false );
919
920 clientIs64 = client64;
921
922 owningPID = proc_selfpid();
923
924 extraSize += sizeof(IOServiceInterestContent64);
925 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
926 pingMsg = (PingMsg *) IOMalloc( msgSize);
927 if( !pingMsg)
928 return( false );
929
930 bzero( pingMsg, msgSize);
931
932 pingMsg->msgHdr.msgh_remote_port = port;
933 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
934 | MACH_MSGH_BITS(
935 MACH_MSG_TYPE_COPY_SEND /*remote*/,
936 MACH_MSG_TYPE_MAKE_SEND /*local*/);
937 pingMsg->msgHdr.msgh_size = msgSize;
938 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
939
940 pingMsg->msgBody.msgh_descriptor_count = 1;
941
942 pingMsg->ports[0].name = 0;
943 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
944 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
945
946 pingMsg->notifyHeader.size = extraSize;
947 pingMsg->notifyHeader.type = type;
948 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
949
950 return( true );
951 }
952
953 void IOServiceMessageUserNotification::invalidatePort(void)
954 {
955 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
956 }
957
958 void IOServiceMessageUserNotification::free( void )
959 {
960 PingMsg * _pingMsg;
961 vm_size_t _msgSize;
962
963 _pingMsg = pingMsg;
964 _msgSize = msgSize;
965
966 super::free();
967
968 if( _pingMsg && _msgSize) {
969 if (_pingMsg->msgHdr.msgh_remote_port) {
970 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
971 }
972 IOFree( _pingMsg, _msgSize);
973 }
974 }
975
976 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
977 UInt32 messageType, IOService * provider,
978 void * argument, vm_size_t argSize )
979 {
980 return( ((IOServiceMessageUserNotification *) target)->handler(
981 ref, messageType, provider, argument, argSize));
982 }
983
984 IOReturn IOServiceMessageUserNotification::handler( void * ref,
985 UInt32 messageType, IOService * provider,
986 void * messageArgument, vm_size_t callerArgSize )
987 {
988 enum { kLocalMsgSize = 0x100 };
989 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
990 void * allocMsg;
991 kern_return_t kr;
992 vm_size_t argSize;
993 vm_size_t thisMsgSize;
994 ipc_port_t thisPort, providerPort;
995 struct PingMsg * thisMsg;
996 IOServiceInterestContent64 * data;
997
998 if (kIOMessageCopyClientID == messageType)
999 {
1000 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
1001 return (kIOReturnSuccess);
1002 }
1003
1004 if (callerArgSize == 0)
1005 {
1006 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
1007 else argSize = sizeof(uint32_t);
1008 }
1009 else
1010 {
1011 if( callerArgSize > kIOUserNotifyMaxMessageSize)
1012 callerArgSize = kIOUserNotifyMaxMessageSize;
1013 argSize = callerArgSize;
1014 }
1015
1016 // adjust message size for ipc restrictions
1017 natural_t type;
1018 type = pingMsg->notifyHeader.type;
1019 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1020 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1021 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1022
1023 thisMsgSize = msgSize
1024 + sizeof( IOServiceInterestContent64 )
1025 - sizeof( data->messageArgument)
1026 + argSize;
1027
1028 if (thisMsgSize > sizeof(stackMsg))
1029 {
1030 allocMsg = IOMalloc(thisMsgSize);
1031 if (!allocMsg) return (kIOReturnNoMemory);
1032 thisMsg = (typeof(thisMsg)) allocMsg;
1033 }
1034 else
1035 {
1036 allocMsg = 0;
1037 thisMsg = (typeof(thisMsg)) stackMsg;
1038 }
1039
1040 bcopy(pingMsg, thisMsg, msgSize);
1041 thisMsg->notifyHeader.type = type;
1042 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1043 // == pingMsg->notifyHeader.content;
1044 data->messageType = messageType;
1045
1046 if (callerArgSize == 0)
1047 {
1048 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1049 if (!clientIs64)
1050 {
1051 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1052 }
1053 }
1054 else
1055 {
1056 bcopy( messageArgument, data->messageArgument, callerArgSize );
1057 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1058 }
1059
1060 thisMsg->notifyHeader.type = type;
1061 thisMsg->msgHdr.msgh_size = thisMsgSize;
1062
1063 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1064 thisMsg->ports[0].name = providerPort;
1065 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1066 thisMsg->msgHdr.msgh_local_port = thisPort;
1067
1068 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1069 thisMsg->msgHdr.msgh_size,
1070 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1071 0);
1072 if( thisPort)
1073 iokit_release_port( thisPort );
1074 if( providerPort)
1075 iokit_release_port( providerPort );
1076
1077 if (allocMsg)
1078 IOFree(allocMsg, thisMsgSize);
1079
1080 if((KERN_SUCCESS != kr) && !ipcLogged)
1081 {
1082 ipcLogged = true;
1083 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1084 }
1085
1086 return( kIOReturnSuccess );
1087 }
1088
1089 OSObject * IOServiceMessageUserNotification::getNextObject()
1090 {
1091 return( 0 );
1092 }
1093
1094 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1095
1096 #undef super
1097 #define super IOService
1098 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1099
1100 IOLock * gIOUserClientOwnersLock;
1101
1102 void IOUserClient::initialize( void )
1103 {
1104 gIOObjectPortLock = IOLockAlloc();
1105 gIOUserClientOwnersLock = IOLockAlloc();
1106 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1107 }
1108
1109 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1110 mach_port_t wakePort,
1111 void *callback, void *refcon)
1112 {
1113 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1114 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1115 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1116 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1117 }
1118
1119 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1120 mach_port_t wakePort,
1121 mach_vm_address_t callback, io_user_reference_t refcon)
1122 {
1123 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1124 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1125 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1126 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1127 }
1128
1129 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1130 mach_port_t wakePort,
1131 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1132 {
1133 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1134 if (vm_map_is_64bit(get_task_map(task))) {
1135 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1136 }
1137 }
1138
1139 static OSDictionary * CopyConsoleUser(UInt32 uid)
1140 {
1141 OSArray * array;
1142 OSDictionary * user = 0;
1143
1144 if ((array = OSDynamicCast(OSArray,
1145 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1146 {
1147 for (unsigned int idx = 0;
1148 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1149 idx++) {
1150 OSNumber * num;
1151
1152 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1153 && (uid == num->unsigned32BitValue())) {
1154 user->retain();
1155 break;
1156 }
1157 }
1158 array->release();
1159 }
1160 return user;
1161 }
1162
1163 static OSDictionary * CopyUserOnConsole(void)
1164 {
1165 OSArray * array;
1166 OSDictionary * user = 0;
1167
1168 if ((array = OSDynamicCast(OSArray,
1169 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1170 {
1171 for (unsigned int idx = 0;
1172 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1173 idx++)
1174 {
1175 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1176 {
1177 user->retain();
1178 break;
1179 }
1180 }
1181 array->release();
1182 }
1183 return (user);
1184 }
1185
1186 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1187 IOService * service )
1188 {
1189 proc_t p;
1190
1191 p = (proc_t) get_bsdtask_info(task);
1192 if (p)
1193 {
1194 uint64_t authorizationID;
1195
1196 authorizationID = proc_uniqueid(p);
1197 if (authorizationID)
1198 {
1199 if (service->getAuthorizationID() == authorizationID)
1200 {
1201 return (kIOReturnSuccess);
1202 }
1203 }
1204 }
1205
1206 return (kIOReturnNotPermitted);
1207 }
1208
1209 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1210 const char * privilegeName )
1211 {
1212 kern_return_t kr;
1213 security_token_t token;
1214 mach_msg_type_number_t count;
1215 task_t task;
1216 OSDictionary * user;
1217 bool secureConsole;
1218
1219
1220 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1221 sizeof(kIOClientPrivilegeForeground)))
1222 {
1223 if (task_is_gpu_denied(current_task()))
1224 return (kIOReturnNotPrivileged);
1225 else
1226 return (kIOReturnSuccess);
1227 }
1228
1229 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1230 sizeof(kIOClientPrivilegeConsoleSession)))
1231 {
1232 kauth_cred_t cred;
1233 proc_t p;
1234
1235 task = (task_t) securityToken;
1236 if (!task)
1237 task = current_task();
1238 p = (proc_t) get_bsdtask_info(task);
1239 kr = kIOReturnNotPrivileged;
1240
1241 if (p && (cred = kauth_cred_proc_ref(p)))
1242 {
1243 user = CopyUserOnConsole();
1244 if (user)
1245 {
1246 OSNumber * num;
1247 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1248 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1249 {
1250 kr = kIOReturnSuccess;
1251 }
1252 user->release();
1253 }
1254 kauth_cred_unref(&cred);
1255 }
1256 return (kr);
1257 }
1258
1259 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1260 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1261 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1262 else
1263 task = (task_t)securityToken;
1264
1265 count = TASK_SECURITY_TOKEN_COUNT;
1266 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1267
1268 if (KERN_SUCCESS != kr)
1269 {}
1270 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1271 sizeof(kIOClientPrivilegeAdministrator))) {
1272 if (0 != token.val[0])
1273 kr = kIOReturnNotPrivileged;
1274 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1275 sizeof(kIOClientPrivilegeLocalUser))) {
1276 user = CopyConsoleUser(token.val[0]);
1277 if ( user )
1278 user->release();
1279 else
1280 kr = kIOReturnNotPrivileged;
1281 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1282 sizeof(kIOClientPrivilegeConsoleUser))) {
1283 user = CopyConsoleUser(token.val[0]);
1284 if ( user ) {
1285 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1286 kr = kIOReturnNotPrivileged;
1287 else if ( secureConsole ) {
1288 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1289 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1290 kr = kIOReturnNotPrivileged;
1291 }
1292 user->release();
1293 }
1294 else
1295 kr = kIOReturnNotPrivileged;
1296 } else
1297 kr = kIOReturnUnsupported;
1298
1299 return (kr);
1300 }
1301
1302 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1303 const char * entitlement )
1304 {
1305 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1306
1307 proc_t p = NULL;
1308 pid_t pid = 0;
1309 char procname[MAXCOMLEN + 1] = "";
1310 size_t len = 0;
1311 void *entitlements_blob = NULL;
1312 char *entitlements_data = NULL;
1313 OSObject *entitlements_obj = NULL;
1314 OSDictionary *entitlements = NULL;
1315 OSString *errorString = NULL;
1316 OSObject *value = NULL;
1317
1318 p = (proc_t)get_bsdtask_info(task);
1319 if (p == NULL)
1320 goto fail;
1321 pid = proc_pid(p);
1322 proc_name(pid, procname, (int)sizeof(procname));
1323
1324 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1325 goto fail;
1326
1327 if (len <= offsetof(CS_GenericBlob, data))
1328 goto fail;
1329
1330 /*
1331 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1332 * we'll try to parse in the kernel.
1333 */
1334 len -= offsetof(CS_GenericBlob, data);
1335 if (len > MAX_ENTITLEMENTS_LEN) {
1336 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1337 goto fail;
1338 }
1339
1340 /*
1341 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1342 * what is stored in the entitlements blob. Copy the string and
1343 * terminate it.
1344 */
1345 entitlements_data = (char *)IOMalloc(len + 1);
1346 if (entitlements_data == NULL)
1347 goto fail;
1348 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1349 entitlements_data[len] = '\0';
1350
1351 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1352 if (errorString != NULL) {
1353 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1354 goto fail;
1355 }
1356 if (entitlements_obj == NULL)
1357 goto fail;
1358
1359 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1360 if (entitlements == NULL)
1361 goto fail;
1362
1363 /* Fetch the entitlement value from the dictionary. */
1364 value = entitlements->getObject(entitlement);
1365 if (value != NULL)
1366 value->retain();
1367
1368 fail:
1369 if (entitlements_data != NULL)
1370 IOFree(entitlements_data, len + 1);
1371 if (entitlements_obj != NULL)
1372 entitlements_obj->release();
1373 if (errorString != NULL)
1374 errorString->release();
1375 return value;
1376 }
1377
1378 bool IOUserClient::init()
1379 {
1380 if (getPropertyTable() || super::init())
1381 return reserve();
1382
1383 return false;
1384 }
1385
1386 bool IOUserClient::init(OSDictionary * dictionary)
1387 {
1388 if (getPropertyTable() || super::init(dictionary))
1389 return reserve();
1390
1391 return false;
1392 }
1393
1394 bool IOUserClient::initWithTask(task_t owningTask,
1395 void * securityID,
1396 UInt32 type )
1397 {
1398 if (getPropertyTable() || super::init())
1399 return reserve();
1400
1401 return false;
1402 }
1403
1404 bool IOUserClient::initWithTask(task_t owningTask,
1405 void * securityID,
1406 UInt32 type,
1407 OSDictionary * properties )
1408 {
1409 bool ok;
1410
1411 ok = super::init( properties );
1412 ok &= initWithTask( owningTask, securityID, type );
1413
1414 return( ok );
1415 }
1416
1417 bool IOUserClient::reserve()
1418 {
1419 if(!reserved) {
1420 reserved = IONew(ExpansionData, 1);
1421 if (!reserved) {
1422 return false;
1423 }
1424 }
1425 setTerminateDefer(NULL, true);
1426 IOStatisticsRegisterCounter();
1427
1428 return true;
1429 }
1430
1431 struct IOUserClientOwner
1432 {
1433 task_t task;
1434 queue_chain_t taskLink;
1435 IOUserClient * uc;
1436 queue_chain_t ucLink;
1437 };
1438
1439 IOReturn
1440 IOUserClient::registerOwner(task_t task)
1441 {
1442 IOUserClientOwner * owner;
1443 IOReturn ret;
1444 bool newOwner;
1445
1446 IOLockLock(gIOUserClientOwnersLock);
1447
1448 newOwner = true;
1449 ret = kIOReturnSuccess;
1450
1451 if (!owners.next) queue_init(&owners);
1452 else
1453 {
1454 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1455 {
1456 if (task != owner->task) continue;
1457 newOwner = false;
1458 break;
1459 }
1460 }
1461 if (newOwner)
1462 {
1463 owner = IONew(IOUserClientOwner, 1);
1464 if (!newOwner) ret = kIOReturnNoMemory;
1465 else
1466 {
1467 owner->task = task;
1468 owner->uc = this;
1469 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1470 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1471 }
1472 }
1473
1474 IOLockUnlock(gIOUserClientOwnersLock);
1475
1476 return (ret);
1477 }
1478
1479 void
1480 IOUserClient::noMoreSenders(void)
1481 {
1482 IOUserClientOwner * owner;
1483
1484 IOLockLock(gIOUserClientOwnersLock);
1485
1486 if (owners.next)
1487 {
1488 while (!queue_empty(&owners))
1489 {
1490 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1491 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1492 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1493 IODelete(owner, IOUserClientOwner, 1);
1494 }
1495 owners.next = owners.prev = NULL;
1496 }
1497
1498 IOLockUnlock(gIOUserClientOwnersLock);
1499 }
1500
1501 extern "C" kern_return_t
1502 iokit_task_terminate(task_t task)
1503 {
1504 IOUserClientOwner * owner;
1505 IOUserClient * dead;
1506 IOUserClient * uc;
1507 queue_head_t * taskque;
1508
1509 IOLockLock(gIOUserClientOwnersLock);
1510
1511 taskque = task_io_user_clients(task);
1512 dead = NULL;
1513 while (!queue_empty(taskque))
1514 {
1515 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1516 uc = owner->uc;
1517 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1518 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1519 if (queue_empty(&uc->owners))
1520 {
1521 uc->retain();
1522 IOLog("destroying out of band connect for %s\n", uc->getName());
1523 // now using the uc queue head as a singly linked queue,
1524 // leaving .next as NULL to mark it empty
1525 uc->owners.next = NULL;
1526 uc->owners.prev = (queue_entry_t) dead;
1527 dead = uc;
1528 }
1529 IODelete(owner, IOUserClientOwner, 1);
1530 }
1531
1532 IOLockUnlock(gIOUserClientOwnersLock);
1533
1534 while (dead)
1535 {
1536 uc = dead;
1537 dead = (IOUserClient *)(void *) dead->owners.prev;
1538 uc->owners.prev = NULL;
1539 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1540 uc->release();
1541 }
1542
1543 return (KERN_SUCCESS);
1544 }
1545
1546 void IOUserClient::free()
1547 {
1548 if( mappings) mappings->release();
1549 if (lock) IOLockFree(lock);
1550
1551 IOStatisticsUnregisterCounter();
1552
1553 assert(!owners.next);
1554 assert(!owners.prev);
1555
1556 if (reserved) IODelete(reserved, ExpansionData, 1);
1557
1558 super::free();
1559 }
1560
1561 IOReturn IOUserClient::clientDied( void )
1562 {
1563 IOReturn ret = kIOReturnNotReady;
1564
1565 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1566 {
1567 ret = clientClose();
1568 }
1569
1570 return (ret);
1571 }
1572
1573 IOReturn IOUserClient::clientClose( void )
1574 {
1575 return( kIOReturnUnsupported );
1576 }
1577
1578 IOService * IOUserClient::getService( void )
1579 {
1580 return( 0 );
1581 }
1582
1583 IOReturn IOUserClient::registerNotificationPort(
1584 mach_port_t /* port */,
1585 UInt32 /* type */,
1586 UInt32 /* refCon */)
1587 {
1588 return( kIOReturnUnsupported);
1589 }
1590
1591 IOReturn IOUserClient::registerNotificationPort(
1592 mach_port_t port,
1593 UInt32 type,
1594 io_user_reference_t refCon)
1595 {
1596 return (registerNotificationPort(port, type, (UInt32) refCon));
1597 }
1598
1599 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1600 semaphore_t * semaphore )
1601 {
1602 return( kIOReturnUnsupported);
1603 }
1604
1605 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1606 {
1607 return( kIOReturnUnsupported);
1608 }
1609
1610 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1611 IOOptionBits * options,
1612 IOMemoryDescriptor ** memory )
1613 {
1614 return( kIOReturnUnsupported);
1615 }
1616
1617 #if !__LP64__
1618 IOMemoryMap * IOUserClient::mapClientMemory(
1619 IOOptionBits type,
1620 task_t task,
1621 IOOptionBits mapFlags,
1622 IOVirtualAddress atAddress )
1623 {
1624 return (NULL);
1625 }
1626 #endif
1627
1628 IOMemoryMap * IOUserClient::mapClientMemory64(
1629 IOOptionBits type,
1630 task_t task,
1631 IOOptionBits mapFlags,
1632 mach_vm_address_t atAddress )
1633 {
1634 IOReturn err;
1635 IOOptionBits options = 0;
1636 IOMemoryDescriptor * memory = 0;
1637 IOMemoryMap * map = 0;
1638
1639 err = clientMemoryForType( (UInt32) type, &options, &memory );
1640
1641 if( memory && (kIOReturnSuccess == err)) {
1642
1643 FAKE_STACK_FRAME(getMetaClass());
1644
1645 options = (options & ~kIOMapUserOptionsMask)
1646 | (mapFlags & kIOMapUserOptionsMask);
1647 map = memory->createMappingInTask( task, atAddress, options );
1648 memory->release();
1649
1650 FAKE_STACK_FRAME_END();
1651 }
1652
1653 return( map );
1654 }
1655
1656 IOReturn IOUserClient::exportObjectToClient(task_t task,
1657 OSObject *obj, io_object_t *clientObj)
1658 {
1659 mach_port_name_t name;
1660
1661 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1662
1663 *(mach_port_name_t *)clientObj = name;
1664 return kIOReturnSuccess;
1665 }
1666
1667 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1668 {
1669 return( 0 );
1670 }
1671
1672 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1673 {
1674 return( 0 );
1675 }
1676
1677 IOExternalTrap * IOUserClient::
1678 getExternalTrapForIndex(UInt32 index)
1679 {
1680 return NULL;
1681 }
1682
1683 #pragma clang diagnostic push
1684 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1685
1686 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1687 // functions can break clients of kexts implementing getExternalMethodForIndex()
1688 IOExternalMethod * IOUserClient::
1689 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1690 {
1691 IOExternalMethod *method = getExternalMethodForIndex(index);
1692
1693 if (method)
1694 *targetP = (IOService *) method->object;
1695
1696 return method;
1697 }
1698
1699 IOExternalAsyncMethod * IOUserClient::
1700 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1701 {
1702 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1703
1704 if (method)
1705 *targetP = (IOService *) method->object;
1706
1707 return method;
1708 }
1709
1710 IOExternalTrap * IOUserClient::
1711 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1712 {
1713 IOExternalTrap *trap = getExternalTrapForIndex(index);
1714
1715 if (trap) {
1716 *targetP = trap->object;
1717 }
1718
1719 return trap;
1720 }
1721 #pragma clang diagnostic pop
1722
1723 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1724 {
1725 mach_port_t port;
1726 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1727
1728 if (MACH_PORT_NULL != port)
1729 iokit_release_port_send(port);
1730
1731 return (kIOReturnSuccess);
1732 }
1733
1734 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1735 {
1736 if (MACH_PORT_NULL != port)
1737 iokit_release_port_send(port);
1738
1739 return (kIOReturnSuccess);
1740 }
1741
1742 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1743 IOReturn result, void *args[], UInt32 numArgs)
1744 {
1745 OSAsyncReference64 reference64;
1746 io_user_reference_t args64[kMaxAsyncArgs];
1747 unsigned int idx;
1748
1749 if (numArgs > kMaxAsyncArgs)
1750 return kIOReturnMessageTooLarge;
1751
1752 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1753 reference64[idx] = REF64(reference[idx]);
1754
1755 for (idx = 0; idx < numArgs; idx++)
1756 args64[idx] = REF64(args[idx]);
1757
1758 return (sendAsyncResult64(reference64, result, args64, numArgs));
1759 }
1760
1761 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1762 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1763 {
1764 return _sendAsyncResult64(reference, result, args, numArgs, options);
1765 }
1766
1767 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1768 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1769 {
1770 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1771 }
1772
1773 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1774 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1775 {
1776 struct ReplyMsg
1777 {
1778 mach_msg_header_t msgHdr;
1779 union
1780 {
1781 struct
1782 {
1783 OSNotificationHeader notifyHdr;
1784 IOAsyncCompletionContent asyncContent;
1785 uint32_t args[kMaxAsyncArgs];
1786 } msg32;
1787 struct
1788 {
1789 OSNotificationHeader64 notifyHdr;
1790 IOAsyncCompletionContent asyncContent;
1791 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1792 } msg64;
1793 } m;
1794 };
1795 ReplyMsg replyMsg;
1796 mach_port_t replyPort;
1797 kern_return_t kr;
1798
1799 // If no reply port, do nothing.
1800 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1801 if (replyPort == MACH_PORT_NULL)
1802 return kIOReturnSuccess;
1803
1804 if (numArgs > kMaxAsyncArgs)
1805 return kIOReturnMessageTooLarge;
1806
1807 bzero(&replyMsg, sizeof(replyMsg));
1808 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1809 0 /*local*/);
1810 replyMsg.msgHdr.msgh_remote_port = replyPort;
1811 replyMsg.msgHdr.msgh_local_port = 0;
1812 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1813 if (kIOUCAsync64Flag & reference[0])
1814 {
1815 replyMsg.msgHdr.msgh_size =
1816 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1817 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1818 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1819 + numArgs * sizeof(io_user_reference_t);
1820 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1821 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1822
1823 replyMsg.m.msg64.asyncContent.result = result;
1824 if (numArgs)
1825 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1826 }
1827 else
1828 {
1829 unsigned int idx;
1830
1831 replyMsg.msgHdr.msgh_size =
1832 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1833 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1834
1835 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1836 + numArgs * sizeof(uint32_t);
1837 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1838
1839 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1840 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1841
1842 replyMsg.m.msg32.asyncContent.result = result;
1843
1844 for (idx = 0; idx < numArgs; idx++)
1845 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1846 }
1847
1848 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1849 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1850 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1851 } else {
1852 /* Fail on full queue. */
1853 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1854 replyMsg.msgHdr.msgh_size);
1855 }
1856 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1857 {
1858 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1859 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1860 }
1861 return kr;
1862 }
1863
1864
1865 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1866
1867 extern "C" {
1868
1869 #define CHECK(cls,obj,out) \
1870 cls * out; \
1871 if( !(out = OSDynamicCast( cls, obj))) \
1872 return( kIOReturnBadArgument )
1873
1874 #define CHECKLOCKED(cls,obj,out) \
1875 IOUserIterator * oIter; \
1876 cls * out; \
1877 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1878 return (kIOReturnBadArgument); \
1879 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1880 return (kIOReturnBadArgument)
1881
1882 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1883
1884 // Create a vm_map_copy_t or kalloc'ed data for memory
1885 // to be copied out. ipc will free after the copyout.
1886
1887 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1888 io_buf_ptr_t * buf )
1889 {
1890 kern_return_t err;
1891 vm_map_copy_t copy;
1892
1893 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1894 false /* src_destroy */, &copy);
1895
1896 assert( err == KERN_SUCCESS );
1897 if( err == KERN_SUCCESS )
1898 *buf = (char *) copy;
1899
1900 return( err );
1901 }
1902
1903 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1904
1905 /* Routine io_server_version */
1906 kern_return_t is_io_server_version(
1907 mach_port_t master_port,
1908 uint64_t *version)
1909 {
1910 *version = IOKIT_SERVER_VERSION;
1911 return (kIOReturnSuccess);
1912 }
1913
1914 /* Routine io_object_get_class */
1915 kern_return_t is_io_object_get_class(
1916 io_object_t object,
1917 io_name_t className )
1918 {
1919 const OSMetaClass* my_obj = NULL;
1920
1921 if( !object)
1922 return( kIOReturnBadArgument );
1923
1924 my_obj = object->getMetaClass();
1925 if (!my_obj) {
1926 return (kIOReturnNotFound);
1927 }
1928
1929 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1930
1931 return( kIOReturnSuccess );
1932 }
1933
1934 /* Routine io_object_get_superclass */
1935 kern_return_t is_io_object_get_superclass(
1936 mach_port_t master_port,
1937 io_name_t obj_name,
1938 io_name_t class_name)
1939 {
1940 IOReturn ret;
1941 const OSMetaClass * meta;
1942 const OSMetaClass * super;
1943 const OSSymbol * name;
1944 const char * cstr;
1945
1946 if (!obj_name || !class_name) return (kIOReturnBadArgument);
1947 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1948
1949 ret = kIOReturnNotFound;
1950 meta = 0;
1951 do
1952 {
1953 name = OSSymbol::withCString(obj_name);
1954 if (!name) break;
1955 meta = OSMetaClass::copyMetaClassWithName(name);
1956 if (!meta) break;
1957 super = meta->getSuperClass();
1958 if (!super) break;
1959 cstr = super->getClassName();
1960 if (!cstr) break;
1961 strlcpy(class_name, cstr, sizeof(io_name_t));
1962 ret = kIOReturnSuccess;
1963 }
1964 while (false);
1965
1966 OSSafeReleaseNULL(name);
1967 if (meta) meta->releaseMetaClass();
1968
1969 return (ret);
1970 }
1971
1972 /* Routine io_object_get_bundle_identifier */
1973 kern_return_t is_io_object_get_bundle_identifier(
1974 mach_port_t master_port,
1975 io_name_t obj_name,
1976 io_name_t bundle_name)
1977 {
1978 IOReturn ret;
1979 const OSMetaClass * meta;
1980 const OSSymbol * name;
1981 const OSSymbol * identifier;
1982 const char * cstr;
1983
1984 if (!obj_name || !bundle_name) return (kIOReturnBadArgument);
1985 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1986
1987 ret = kIOReturnNotFound;
1988 meta = 0;
1989 do
1990 {
1991 name = OSSymbol::withCString(obj_name);
1992 if (!name) break;
1993 meta = OSMetaClass::copyMetaClassWithName(name);
1994 if (!meta) break;
1995 identifier = meta->getKmodName();
1996 if (!identifier) break;
1997 cstr = identifier->getCStringNoCopy();
1998 if (!cstr) break;
1999 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
2000 ret = kIOReturnSuccess;
2001 }
2002 while (false);
2003
2004 OSSafeReleaseNULL(name);
2005 if (meta) meta->releaseMetaClass();
2006
2007 return (ret);
2008 }
2009
2010 /* Routine io_object_conforms_to */
2011 kern_return_t is_io_object_conforms_to(
2012 io_object_t object,
2013 io_name_t className,
2014 boolean_t *conforms )
2015 {
2016 if( !object)
2017 return( kIOReturnBadArgument );
2018
2019 *conforms = (0 != object->metaCast( className ));
2020
2021 return( kIOReturnSuccess );
2022 }
2023
2024 /* Routine io_object_get_retain_count */
2025 kern_return_t is_io_object_get_retain_count(
2026 io_object_t object,
2027 uint32_t *retainCount )
2028 {
2029 if( !object)
2030 return( kIOReturnBadArgument );
2031
2032 *retainCount = object->getRetainCount();
2033 return( kIOReturnSuccess );
2034 }
2035
2036 /* Routine io_iterator_next */
2037 kern_return_t is_io_iterator_next(
2038 io_object_t iterator,
2039 io_object_t *object )
2040 {
2041 IOReturn ret;
2042 OSObject * obj;
2043
2044 CHECK( OSIterator, iterator, iter );
2045
2046 obj = iter->getNextObject();
2047 if( obj) {
2048 obj->retain();
2049 *object = obj;
2050 ret = kIOReturnSuccess;
2051 } else
2052 ret = kIOReturnNoDevice;
2053
2054 return (ret);
2055 }
2056
2057 /* Routine io_iterator_reset */
2058 kern_return_t is_io_iterator_reset(
2059 io_object_t iterator )
2060 {
2061 CHECK( OSIterator, iterator, iter );
2062
2063 iter->reset();
2064
2065 return( kIOReturnSuccess );
2066 }
2067
2068 /* Routine io_iterator_is_valid */
2069 kern_return_t is_io_iterator_is_valid(
2070 io_object_t iterator,
2071 boolean_t *is_valid )
2072 {
2073 CHECK( OSIterator, iterator, iter );
2074
2075 *is_valid = iter->isValid();
2076
2077 return( kIOReturnSuccess );
2078 }
2079
2080
2081 static kern_return_t internal_io_service_match_property_table(
2082 io_service_t _service,
2083 const char * matching,
2084 mach_msg_type_number_t matching_size,
2085 boolean_t *matches)
2086 {
2087 CHECK( IOService, _service, service );
2088
2089 kern_return_t kr;
2090 OSObject * obj;
2091 OSDictionary * dict;
2092
2093 assert(matching_size);
2094 obj = OSUnserializeXML(matching, matching_size);
2095
2096 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2097 *matches = service->passiveMatch( dict );
2098 kr = kIOReturnSuccess;
2099 } else
2100 kr = kIOReturnBadArgument;
2101
2102 if( obj)
2103 obj->release();
2104
2105 return( kr );
2106 }
2107
2108 /* Routine io_service_match_property_table */
2109 kern_return_t is_io_service_match_property_table(
2110 io_service_t service,
2111 io_string_t matching,
2112 boolean_t *matches )
2113 {
2114 return (kIOReturnUnsupported);
2115 }
2116
2117
2118 /* Routine io_service_match_property_table_ool */
2119 kern_return_t is_io_service_match_property_table_ool(
2120 io_object_t service,
2121 io_buf_ptr_t matching,
2122 mach_msg_type_number_t matchingCnt,
2123 kern_return_t *result,
2124 boolean_t *matches )
2125 {
2126 kern_return_t kr;
2127 vm_offset_t data;
2128 vm_map_offset_t map_data;
2129
2130 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2131 data = CAST_DOWN(vm_offset_t, map_data);
2132
2133 if( KERN_SUCCESS == kr) {
2134 // must return success after vm_map_copyout() succeeds
2135 *result = internal_io_service_match_property_table(service,
2136 (const char *)data, matchingCnt, matches );
2137 vm_deallocate( kernel_map, data, matchingCnt );
2138 }
2139
2140 return( kr );
2141 }
2142
2143 /* Routine io_service_match_property_table_bin */
2144 kern_return_t is_io_service_match_property_table_bin(
2145 io_object_t service,
2146 io_struct_inband_t matching,
2147 mach_msg_type_number_t matchingCnt,
2148 boolean_t *matches)
2149 {
2150 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2151 }
2152
2153 static kern_return_t internal_io_service_get_matching_services(
2154 mach_port_t master_port,
2155 const char * matching,
2156 mach_msg_type_number_t matching_size,
2157 io_iterator_t *existing )
2158 {
2159 kern_return_t kr;
2160 OSObject * obj;
2161 OSDictionary * dict;
2162
2163 if( master_port != master_device_port)
2164 return( kIOReturnNotPrivileged);
2165
2166 assert(matching_size);
2167 obj = OSUnserializeXML(matching, matching_size);
2168
2169 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2170 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2171 kr = kIOReturnSuccess;
2172 } else
2173 kr = kIOReturnBadArgument;
2174
2175 if( obj)
2176 obj->release();
2177
2178 return( kr );
2179 }
2180
2181 /* Routine io_service_get_matching_services */
2182 kern_return_t is_io_service_get_matching_services(
2183 mach_port_t master_port,
2184 io_string_t matching,
2185 io_iterator_t *existing )
2186 {
2187 return (kIOReturnUnsupported);
2188 }
2189
2190 /* Routine io_service_get_matching_services_ool */
2191 kern_return_t is_io_service_get_matching_services_ool(
2192 mach_port_t master_port,
2193 io_buf_ptr_t matching,
2194 mach_msg_type_number_t matchingCnt,
2195 kern_return_t *result,
2196 io_object_t *existing )
2197 {
2198 kern_return_t kr;
2199 vm_offset_t data;
2200 vm_map_offset_t map_data;
2201
2202 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2203 data = CAST_DOWN(vm_offset_t, map_data);
2204
2205 if( KERN_SUCCESS == kr) {
2206 // must return success after vm_map_copyout() succeeds
2207 // and mig will copy out objects on success
2208 *existing = 0;
2209 *result = internal_io_service_get_matching_services(master_port,
2210 (const char *) data, matchingCnt, existing);
2211 vm_deallocate( kernel_map, data, matchingCnt );
2212 }
2213
2214 return( kr );
2215 }
2216
2217 /* Routine io_service_get_matching_services_bin */
2218 kern_return_t is_io_service_get_matching_services_bin(
2219 mach_port_t master_port,
2220 io_struct_inband_t matching,
2221 mach_msg_type_number_t matchingCnt,
2222 io_object_t *existing)
2223 {
2224 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2225 }
2226
2227
2228 static kern_return_t internal_io_service_get_matching_service(
2229 mach_port_t master_port,
2230 const char * matching,
2231 mach_msg_type_number_t matching_size,
2232 io_service_t *service )
2233 {
2234 kern_return_t kr;
2235 OSObject * obj;
2236 OSDictionary * dict;
2237
2238 if( master_port != master_device_port)
2239 return( kIOReturnNotPrivileged);
2240
2241 assert(matching_size);
2242 obj = OSUnserializeXML(matching, matching_size);
2243
2244 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2245 *service = IOService::copyMatchingService( dict );
2246 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2247 } else
2248 kr = kIOReturnBadArgument;
2249
2250 if( obj)
2251 obj->release();
2252
2253 return( kr );
2254 }
2255
2256 /* Routine io_service_get_matching_service */
2257 kern_return_t is_io_service_get_matching_service(
2258 mach_port_t master_port,
2259 io_string_t matching,
2260 io_service_t *service )
2261 {
2262 return (kIOReturnUnsupported);
2263 }
2264
2265 /* Routine io_service_get_matching_services_ool */
2266 kern_return_t is_io_service_get_matching_service_ool(
2267 mach_port_t master_port,
2268 io_buf_ptr_t matching,
2269 mach_msg_type_number_t matchingCnt,
2270 kern_return_t *result,
2271 io_object_t *service )
2272 {
2273 kern_return_t kr;
2274 vm_offset_t data;
2275 vm_map_offset_t map_data;
2276
2277 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2278 data = CAST_DOWN(vm_offset_t, map_data);
2279
2280 if( KERN_SUCCESS == kr) {
2281 // must return success after vm_map_copyout() succeeds
2282 // and mig will copy out objects on success
2283 *service = 0;
2284 *result = internal_io_service_get_matching_service(master_port,
2285 (const char *) data, matchingCnt, service );
2286 vm_deallocate( kernel_map, data, matchingCnt );
2287 }
2288
2289 return( kr );
2290 }
2291
2292 /* Routine io_service_get_matching_service_bin */
2293 kern_return_t is_io_service_get_matching_service_bin(
2294 mach_port_t master_port,
2295 io_struct_inband_t matching,
2296 mach_msg_type_number_t matchingCnt,
2297 io_object_t *service)
2298 {
2299 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2300 }
2301
2302 static kern_return_t internal_io_service_add_notification(
2303 mach_port_t master_port,
2304 io_name_t notification_type,
2305 const char * matching,
2306 size_t matching_size,
2307 mach_port_t port,
2308 void * reference,
2309 vm_size_t referenceSize,
2310 bool client64,
2311 io_object_t * notification )
2312 {
2313 IOServiceUserNotification * userNotify = 0;
2314 IONotifier * notify = 0;
2315 const OSSymbol * sym;
2316 OSDictionary * dict;
2317 IOReturn err;
2318 unsigned long int userMsgType;
2319
2320 if( master_port != master_device_port)
2321 return( kIOReturnNotPrivileged);
2322
2323 do {
2324 err = kIOReturnNoResources;
2325
2326 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2327
2328 if( !(sym = OSSymbol::withCString( notification_type )))
2329 err = kIOReturnNoResources;
2330
2331 assert(matching_size);
2332 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2333 if (!dict) {
2334 err = kIOReturnBadArgument;
2335 continue;
2336 }
2337
2338 if( (sym == gIOPublishNotification)
2339 || (sym == gIOFirstPublishNotification))
2340 userMsgType = kIOServicePublishNotificationType;
2341 else if( (sym == gIOMatchedNotification)
2342 || (sym == gIOFirstMatchNotification))
2343 userMsgType = kIOServiceMatchedNotificationType;
2344 else if ((sym == gIOTerminatedNotification)
2345 || (sym == gIOWillTerminateNotification))
2346 userMsgType = kIOServiceTerminatedNotificationType;
2347 else
2348 userMsgType = kLastIOKitNotificationType;
2349
2350 userNotify = new IOServiceUserNotification;
2351
2352 if( userNotify && !userNotify->init( port, userMsgType,
2353 reference, referenceSize, client64)) {
2354 userNotify->release();
2355 userNotify = 0;
2356 }
2357 if( !userNotify)
2358 continue;
2359
2360 notify = IOService::addMatchingNotification( sym, dict,
2361 &userNotify->_handler, userNotify );
2362 if( notify) {
2363 *notification = userNotify;
2364 userNotify->setNotification( notify );
2365 err = kIOReturnSuccess;
2366 } else
2367 err = kIOReturnUnsupported;
2368
2369 } while( false );
2370
2371 if ((kIOReturnSuccess != err) && userNotify)
2372 {
2373 userNotify->invalidatePort();
2374 userNotify->release();
2375 userNotify = 0;
2376 }
2377
2378 if( sym)
2379 sym->release();
2380 if( dict)
2381 dict->release();
2382
2383 return( err );
2384 }
2385
2386
2387 /* Routine io_service_add_notification */
2388 kern_return_t is_io_service_add_notification(
2389 mach_port_t master_port,
2390 io_name_t notification_type,
2391 io_string_t matching,
2392 mach_port_t port,
2393 io_async_ref_t reference,
2394 mach_msg_type_number_t referenceCnt,
2395 io_object_t * notification )
2396 {
2397 return (kIOReturnUnsupported);
2398 }
2399
2400 /* Routine io_service_add_notification_64 */
2401 kern_return_t is_io_service_add_notification_64(
2402 mach_port_t master_port,
2403 io_name_t notification_type,
2404 io_string_t matching,
2405 mach_port_t wake_port,
2406 io_async_ref64_t reference,
2407 mach_msg_type_number_t referenceCnt,
2408 io_object_t *notification )
2409 {
2410 return (kIOReturnUnsupported);
2411 }
2412
2413 /* Routine io_service_add_notification_bin */
2414 kern_return_t is_io_service_add_notification_bin
2415 (
2416 mach_port_t master_port,
2417 io_name_t notification_type,
2418 io_struct_inband_t matching,
2419 mach_msg_type_number_t matchingCnt,
2420 mach_port_t wake_port,
2421 io_async_ref_t reference,
2422 mach_msg_type_number_t referenceCnt,
2423 io_object_t *notification)
2424 {
2425 return (internal_io_service_add_notification(master_port, notification_type,
2426 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2427 false, notification));
2428 }
2429
2430 /* Routine io_service_add_notification_bin_64 */
2431 kern_return_t is_io_service_add_notification_bin_64
2432 (
2433 mach_port_t master_port,
2434 io_name_t notification_type,
2435 io_struct_inband_t matching,
2436 mach_msg_type_number_t matchingCnt,
2437 mach_port_t wake_port,
2438 io_async_ref64_t reference,
2439 mach_msg_type_number_t referenceCnt,
2440 io_object_t *notification)
2441 {
2442 return (internal_io_service_add_notification(master_port, notification_type,
2443 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2444 true, notification));
2445 }
2446
2447 static kern_return_t internal_io_service_add_notification_ool(
2448 mach_port_t master_port,
2449 io_name_t notification_type,
2450 io_buf_ptr_t matching,
2451 mach_msg_type_number_t matchingCnt,
2452 mach_port_t wake_port,
2453 void * reference,
2454 vm_size_t referenceSize,
2455 bool client64,
2456 kern_return_t *result,
2457 io_object_t *notification )
2458 {
2459 kern_return_t kr;
2460 vm_offset_t data;
2461 vm_map_offset_t map_data;
2462
2463 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2464 data = CAST_DOWN(vm_offset_t, map_data);
2465
2466 if( KERN_SUCCESS == kr) {
2467 // must return success after vm_map_copyout() succeeds
2468 // and mig will copy out objects on success
2469 *notification = 0;
2470 *result = internal_io_service_add_notification( master_port, notification_type,
2471 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2472 vm_deallocate( kernel_map, data, matchingCnt );
2473 }
2474
2475 return( kr );
2476 }
2477
2478 /* Routine io_service_add_notification_ool */
2479 kern_return_t is_io_service_add_notification_ool(
2480 mach_port_t master_port,
2481 io_name_t notification_type,
2482 io_buf_ptr_t matching,
2483 mach_msg_type_number_t matchingCnt,
2484 mach_port_t wake_port,
2485 io_async_ref_t reference,
2486 mach_msg_type_number_t referenceCnt,
2487 kern_return_t *result,
2488 io_object_t *notification )
2489 {
2490 return (internal_io_service_add_notification_ool(master_port, notification_type,
2491 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2492 false, result, notification));
2493 }
2494
2495 /* Routine io_service_add_notification_ool_64 */
2496 kern_return_t is_io_service_add_notification_ool_64(
2497 mach_port_t master_port,
2498 io_name_t notification_type,
2499 io_buf_ptr_t matching,
2500 mach_msg_type_number_t matchingCnt,
2501 mach_port_t wake_port,
2502 io_async_ref64_t reference,
2503 mach_msg_type_number_t referenceCnt,
2504 kern_return_t *result,
2505 io_object_t *notification )
2506 {
2507 return (internal_io_service_add_notification_ool(master_port, notification_type,
2508 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2509 true, result, notification));
2510 }
2511
2512 /* Routine io_service_add_notification_old */
2513 kern_return_t is_io_service_add_notification_old(
2514 mach_port_t master_port,
2515 io_name_t notification_type,
2516 io_string_t matching,
2517 mach_port_t port,
2518 // for binary compatibility reasons, this must be natural_t for ILP32
2519 natural_t ref,
2520 io_object_t * notification )
2521 {
2522 return( is_io_service_add_notification( master_port, notification_type,
2523 matching, port, &ref, 1, notification ));
2524 }
2525
2526
2527 static kern_return_t internal_io_service_add_interest_notification(
2528 io_object_t _service,
2529 io_name_t type_of_interest,
2530 mach_port_t port,
2531 void * reference,
2532 vm_size_t referenceSize,
2533 bool client64,
2534 io_object_t * notification )
2535 {
2536
2537 IOServiceMessageUserNotification * userNotify = 0;
2538 IONotifier * notify = 0;
2539 const OSSymbol * sym;
2540 IOReturn err;
2541
2542 CHECK( IOService, _service, service );
2543
2544 err = kIOReturnNoResources;
2545 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2546
2547 userNotify = new IOServiceMessageUserNotification;
2548
2549 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2550 reference, referenceSize,
2551 kIOUserNotifyMaxMessageSize,
2552 client64 )) {
2553 userNotify->release();
2554 userNotify = 0;
2555 }
2556 if( !userNotify)
2557 continue;
2558
2559 notify = service->registerInterest( sym,
2560 &userNotify->_handler, userNotify );
2561 if( notify) {
2562 *notification = userNotify;
2563 userNotify->setNotification( notify );
2564 err = kIOReturnSuccess;
2565 } else
2566 err = kIOReturnUnsupported;
2567
2568 sym->release();
2569
2570 } while( false );
2571
2572 if ((kIOReturnSuccess != err) && userNotify)
2573 {
2574 userNotify->invalidatePort();
2575 userNotify->release();
2576 userNotify = 0;
2577 }
2578
2579 return( err );
2580 }
2581
2582 /* Routine io_service_add_message_notification */
2583 kern_return_t is_io_service_add_interest_notification(
2584 io_object_t service,
2585 io_name_t type_of_interest,
2586 mach_port_t port,
2587 io_async_ref_t reference,
2588 mach_msg_type_number_t referenceCnt,
2589 io_object_t * notification )
2590 {
2591 return (internal_io_service_add_interest_notification(service, type_of_interest,
2592 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2593 }
2594
2595 /* Routine io_service_add_interest_notification_64 */
2596 kern_return_t is_io_service_add_interest_notification_64(
2597 io_object_t service,
2598 io_name_t type_of_interest,
2599 mach_port_t wake_port,
2600 io_async_ref64_t reference,
2601 mach_msg_type_number_t referenceCnt,
2602 io_object_t *notification )
2603 {
2604 return (internal_io_service_add_interest_notification(service, type_of_interest,
2605 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2606 }
2607
2608
2609 /* Routine io_service_acknowledge_notification */
2610 kern_return_t is_io_service_acknowledge_notification(
2611 io_object_t _service,
2612 natural_t notify_ref,
2613 natural_t response )
2614 {
2615 CHECK( IOService, _service, service );
2616
2617 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2618 (IOOptionBits) response ));
2619
2620 }
2621
2622 /* Routine io_connect_get_semaphore */
2623 kern_return_t is_io_connect_get_notification_semaphore(
2624 io_connect_t connection,
2625 natural_t notification_type,
2626 semaphore_t *semaphore )
2627 {
2628 CHECK( IOUserClient, connection, client );
2629
2630 IOStatisticsClientCall();
2631 return( client->getNotificationSemaphore( (UInt32) notification_type,
2632 semaphore ));
2633 }
2634
2635 /* Routine io_registry_get_root_entry */
2636 kern_return_t is_io_registry_get_root_entry(
2637 mach_port_t master_port,
2638 io_object_t *root )
2639 {
2640 IORegistryEntry * entry;
2641
2642 if( master_port != master_device_port)
2643 return( kIOReturnNotPrivileged);
2644
2645 entry = IORegistryEntry::getRegistryRoot();
2646 if( entry)
2647 entry->retain();
2648 *root = entry;
2649
2650 return( kIOReturnSuccess );
2651 }
2652
2653 /* Routine io_registry_create_iterator */
2654 kern_return_t is_io_registry_create_iterator(
2655 mach_port_t master_port,
2656 io_name_t plane,
2657 uint32_t options,
2658 io_object_t *iterator )
2659 {
2660 if( master_port != master_device_port)
2661 return( kIOReturnNotPrivileged);
2662
2663 *iterator = IOUserIterator::withIterator(
2664 IORegistryIterator::iterateOver(
2665 IORegistryEntry::getPlane( plane ), options ));
2666
2667 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2668 }
2669
2670 /* Routine io_registry_entry_create_iterator */
2671 kern_return_t is_io_registry_entry_create_iterator(
2672 io_object_t registry_entry,
2673 io_name_t plane,
2674 uint32_t options,
2675 io_object_t *iterator )
2676 {
2677 CHECK( IORegistryEntry, registry_entry, entry );
2678
2679 *iterator = IOUserIterator::withIterator(
2680 IORegistryIterator::iterateOver( entry,
2681 IORegistryEntry::getPlane( plane ), options ));
2682
2683 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2684 }
2685
2686 /* Routine io_registry_iterator_enter */
2687 kern_return_t is_io_registry_iterator_enter_entry(
2688 io_object_t iterator )
2689 {
2690 CHECKLOCKED( IORegistryIterator, iterator, iter );
2691
2692 IOLockLock(oIter->lock);
2693 iter->enterEntry();
2694 IOLockUnlock(oIter->lock);
2695
2696 return( kIOReturnSuccess );
2697 }
2698
2699 /* Routine io_registry_iterator_exit */
2700 kern_return_t is_io_registry_iterator_exit_entry(
2701 io_object_t iterator )
2702 {
2703 bool didIt;
2704
2705 CHECKLOCKED( IORegistryIterator, iterator, iter );
2706
2707 IOLockLock(oIter->lock);
2708 didIt = iter->exitEntry();
2709 IOLockUnlock(oIter->lock);
2710
2711 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2712 }
2713
2714 /* Routine io_registry_entry_from_path */
2715 kern_return_t is_io_registry_entry_from_path(
2716 mach_port_t master_port,
2717 io_string_t path,
2718 io_object_t *registry_entry )
2719 {
2720 IORegistryEntry * entry;
2721
2722 if( master_port != master_device_port)
2723 return( kIOReturnNotPrivileged);
2724
2725 entry = IORegistryEntry::fromPath( path );
2726
2727 *registry_entry = entry;
2728
2729 return( kIOReturnSuccess );
2730 }
2731
2732
2733 /* Routine io_registry_entry_from_path */
2734 kern_return_t is_io_registry_entry_from_path_ool(
2735 mach_port_t master_port,
2736 io_string_inband_t path,
2737 io_buf_ptr_t path_ool,
2738 mach_msg_type_number_t path_oolCnt,
2739 kern_return_t *result,
2740 io_object_t *registry_entry)
2741 {
2742 IORegistryEntry * entry;
2743 vm_map_offset_t map_data;
2744 const char * cpath;
2745 IOReturn res;
2746 kern_return_t err;
2747
2748 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2749
2750 map_data = 0;
2751 entry = 0;
2752 res = err = KERN_SUCCESS;
2753 if (path[0]) cpath = path;
2754 else
2755 {
2756 if (!path_oolCnt) return(kIOReturnBadArgument);
2757 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2758
2759 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2760 if (KERN_SUCCESS == err)
2761 {
2762 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2763 cpath = CAST_DOWN(const char *, map_data);
2764 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2765 }
2766 }
2767
2768 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2769 {
2770 entry = IORegistryEntry::fromPath(cpath);
2771 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2772 }
2773
2774 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2775
2776 if (KERN_SUCCESS != err) res = err;
2777 *registry_entry = entry;
2778 *result = res;
2779
2780 return (err);
2781 }
2782
2783
2784 /* Routine io_registry_entry_in_plane */
2785 kern_return_t is_io_registry_entry_in_plane(
2786 io_object_t registry_entry,
2787 io_name_t plane,
2788 boolean_t *inPlane )
2789 {
2790 CHECK( IORegistryEntry, registry_entry, entry );
2791
2792 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2793
2794 return( kIOReturnSuccess );
2795 }
2796
2797
2798 /* Routine io_registry_entry_get_path */
2799 kern_return_t is_io_registry_entry_get_path(
2800 io_object_t registry_entry,
2801 io_name_t plane,
2802 io_string_t path )
2803 {
2804 int length;
2805 CHECK( IORegistryEntry, registry_entry, entry );
2806
2807 length = sizeof( io_string_t);
2808 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2809 return( kIOReturnSuccess );
2810 else
2811 return( kIOReturnBadArgument );
2812 }
2813
2814 /* Routine io_registry_entry_get_path */
2815 kern_return_t is_io_registry_entry_get_path_ool(
2816 io_object_t registry_entry,
2817 io_name_t plane,
2818 io_string_inband_t path,
2819 io_buf_ptr_t *path_ool,
2820 mach_msg_type_number_t *path_oolCnt)
2821 {
2822 enum { kMaxPath = 16384 };
2823 IOReturn err;
2824 int length;
2825 char * buf;
2826
2827 CHECK( IORegistryEntry, registry_entry, entry );
2828
2829 *path_ool = NULL;
2830 *path_oolCnt = 0;
2831 length = sizeof(io_string_inband_t);
2832 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2833 else
2834 {
2835 length = kMaxPath;
2836 buf = IONew(char, length);
2837 if (!buf) err = kIOReturnNoMemory;
2838 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2839 else
2840 {
2841 *path_oolCnt = length;
2842 err = copyoutkdata(buf, length, path_ool);
2843 }
2844 if (buf) IODelete(buf, char, kMaxPath);
2845 }
2846
2847 return (err);
2848 }
2849
2850
2851 /* Routine io_registry_entry_get_name */
2852 kern_return_t is_io_registry_entry_get_name(
2853 io_object_t registry_entry,
2854 io_name_t name )
2855 {
2856 CHECK( IORegistryEntry, registry_entry, entry );
2857
2858 strncpy( name, entry->getName(), sizeof( io_name_t));
2859
2860 return( kIOReturnSuccess );
2861 }
2862
2863 /* Routine io_registry_entry_get_name_in_plane */
2864 kern_return_t is_io_registry_entry_get_name_in_plane(
2865 io_object_t registry_entry,
2866 io_name_t planeName,
2867 io_name_t name )
2868 {
2869 const IORegistryPlane * plane;
2870 CHECK( IORegistryEntry, registry_entry, entry );
2871
2872 if( planeName[0])
2873 plane = IORegistryEntry::getPlane( planeName );
2874 else
2875 plane = 0;
2876
2877 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2878
2879 return( kIOReturnSuccess );
2880 }
2881
2882 /* Routine io_registry_entry_get_location_in_plane */
2883 kern_return_t is_io_registry_entry_get_location_in_plane(
2884 io_object_t registry_entry,
2885 io_name_t planeName,
2886 io_name_t location )
2887 {
2888 const IORegistryPlane * plane;
2889 CHECK( IORegistryEntry, registry_entry, entry );
2890
2891 if( planeName[0])
2892 plane = IORegistryEntry::getPlane( planeName );
2893 else
2894 plane = 0;
2895
2896 const char * cstr = entry->getLocation( plane );
2897
2898 if( cstr) {
2899 strncpy( location, cstr, sizeof( io_name_t));
2900 return( kIOReturnSuccess );
2901 } else
2902 return( kIOReturnNotFound );
2903 }
2904
2905 /* Routine io_registry_entry_get_registry_entry_id */
2906 kern_return_t is_io_registry_entry_get_registry_entry_id(
2907 io_object_t registry_entry,
2908 uint64_t *entry_id )
2909 {
2910 CHECK( IORegistryEntry, registry_entry, entry );
2911
2912 *entry_id = entry->getRegistryEntryID();
2913
2914 return (kIOReturnSuccess);
2915 }
2916
2917 /* Routine io_registry_entry_get_property */
2918 kern_return_t is_io_registry_entry_get_property_bytes(
2919 io_object_t registry_entry,
2920 io_name_t property_name,
2921 io_struct_inband_t buf,
2922 mach_msg_type_number_t *dataCnt )
2923 {
2924 OSObject * obj;
2925 OSData * data;
2926 OSString * str;
2927 OSBoolean * boo;
2928 OSNumber * off;
2929 UInt64 offsetBytes;
2930 unsigned int len = 0;
2931 const void * bytes = 0;
2932 IOReturn ret = kIOReturnSuccess;
2933
2934 CHECK( IORegistryEntry, registry_entry, entry );
2935
2936 #if CONFIG_MACF
2937 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2938 return kIOReturnNotPermitted;
2939 #endif
2940
2941 obj = entry->copyProperty(property_name);
2942 if( !obj)
2943 return( kIOReturnNoResources );
2944
2945 // One day OSData will be a common container base class
2946 // until then...
2947 if( (data = OSDynamicCast( OSData, obj ))) {
2948 len = data->getLength();
2949 bytes = data->getBytesNoCopy();
2950 if (!data->isSerializable()) len = 0;
2951
2952 } else if( (str = OSDynamicCast( OSString, obj ))) {
2953 len = str->getLength() + 1;
2954 bytes = str->getCStringNoCopy();
2955
2956 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2957 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2958 bytes = boo->isTrue() ? "Yes" : "No";
2959
2960 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2961 offsetBytes = off->unsigned64BitValue();
2962 len = off->numberOfBytes();
2963 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2964 bytes = &offsetBytes;
2965 #ifdef __BIG_ENDIAN__
2966 bytes = (const void *)
2967 (((UInt32) bytes) + (sizeof( UInt64) - len));
2968 #endif
2969
2970 } else
2971 ret = kIOReturnBadArgument;
2972
2973 if( bytes) {
2974 if( *dataCnt < len)
2975 ret = kIOReturnIPCError;
2976 else {
2977 *dataCnt = len;
2978 bcopy( bytes, buf, len );
2979 }
2980 }
2981 obj->release();
2982
2983 return( ret );
2984 }
2985
2986
2987 /* Routine io_registry_entry_get_property */
2988 kern_return_t is_io_registry_entry_get_property(
2989 io_object_t registry_entry,
2990 io_name_t property_name,
2991 io_buf_ptr_t *properties,
2992 mach_msg_type_number_t *propertiesCnt )
2993 {
2994 kern_return_t err;
2995 vm_size_t len;
2996 OSObject * obj;
2997
2998 CHECK( IORegistryEntry, registry_entry, entry );
2999
3000 #if CONFIG_MACF
3001 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3002 return kIOReturnNotPermitted;
3003 #endif
3004
3005 obj = entry->copyProperty(property_name);
3006 if( !obj)
3007 return( kIOReturnNotFound );
3008
3009 OSSerialize * s = OSSerialize::withCapacity(4096);
3010 if( !s) {
3011 obj->release();
3012 return( kIOReturnNoMemory );
3013 }
3014
3015 if( obj->serialize( s )) {
3016 len = s->getLength();
3017 *propertiesCnt = len;
3018 err = copyoutkdata( s->text(), len, properties );
3019
3020 } else
3021 err = kIOReturnUnsupported;
3022
3023 s->release();
3024 obj->release();
3025
3026 return( err );
3027 }
3028
3029 /* Routine io_registry_entry_get_property_recursively */
3030 kern_return_t is_io_registry_entry_get_property_recursively(
3031 io_object_t registry_entry,
3032 io_name_t plane,
3033 io_name_t property_name,
3034 uint32_t options,
3035 io_buf_ptr_t *properties,
3036 mach_msg_type_number_t *propertiesCnt )
3037 {
3038 kern_return_t err;
3039 vm_size_t len;
3040 OSObject * obj;
3041
3042 CHECK( IORegistryEntry, registry_entry, entry );
3043
3044 #if CONFIG_MACF
3045 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3046 return kIOReturnNotPermitted;
3047 #endif
3048
3049 obj = entry->copyProperty( property_name,
3050 IORegistryEntry::getPlane( plane ), options );
3051 if( !obj)
3052 return( kIOReturnNotFound );
3053
3054 OSSerialize * s = OSSerialize::withCapacity(4096);
3055 if( !s) {
3056 obj->release();
3057 return( kIOReturnNoMemory );
3058 }
3059
3060 if( obj->serialize( s )) {
3061 len = s->getLength();
3062 *propertiesCnt = len;
3063 err = copyoutkdata( s->text(), len, properties );
3064
3065 } else
3066 err = kIOReturnUnsupported;
3067
3068 s->release();
3069 obj->release();
3070
3071 return( err );
3072 }
3073
3074 /* Routine io_registry_entry_get_properties */
3075 kern_return_t is_io_registry_entry_get_properties(
3076 io_object_t registry_entry,
3077 io_buf_ptr_t *properties,
3078 mach_msg_type_number_t *propertiesCnt )
3079 {
3080 return (kIOReturnUnsupported);
3081 }
3082
3083 #if CONFIG_MACF
3084
3085 struct GetPropertiesEditorRef
3086 {
3087 kauth_cred_t cred;
3088 IORegistryEntry * entry;
3089 OSCollection * root;
3090 };
3091
3092 static const OSMetaClassBase *
3093 GetPropertiesEditor(void * reference,
3094 OSSerialize * s,
3095 OSCollection * container,
3096 const OSSymbol * name,
3097 const OSMetaClassBase * value)
3098 {
3099 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3100
3101 if (!ref->root) ref->root = container;
3102 if (ref->root == container)
3103 {
3104 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3105 {
3106 value = 0;
3107 }
3108 }
3109 if (value) value->retain();
3110 return (value);
3111 }
3112
3113 #endif /* CONFIG_MACF */
3114
3115 /* Routine io_registry_entry_get_properties */
3116 kern_return_t is_io_registry_entry_get_properties_bin(
3117 io_object_t registry_entry,
3118 io_buf_ptr_t *properties,
3119 mach_msg_type_number_t *propertiesCnt)
3120 {
3121 kern_return_t err = kIOReturnSuccess;
3122 vm_size_t len;
3123 OSSerialize * s;
3124 OSSerialize::Editor editor = 0;
3125 void * editRef = 0;
3126
3127 CHECK(IORegistryEntry, registry_entry, entry);
3128
3129 #if CONFIG_MACF
3130 GetPropertiesEditorRef ref;
3131 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3132 {
3133 editor = &GetPropertiesEditor;
3134 editRef = &ref;
3135 ref.cred = kauth_cred_get();
3136 ref.entry = entry;
3137 ref.root = 0;
3138 }
3139 #endif
3140
3141 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3142 if (!s) return (kIOReturnNoMemory);
3143
3144 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3145
3146 if (kIOReturnSuccess == err)
3147 {
3148 len = s->getLength();
3149 *propertiesCnt = len;
3150 err = copyoutkdata(s->text(), len, properties);
3151 }
3152 s->release();
3153
3154 return (err);
3155 }
3156
3157 /* Routine io_registry_entry_get_property_bin */
3158 kern_return_t is_io_registry_entry_get_property_bin(
3159 io_object_t registry_entry,
3160 io_name_t plane,
3161 io_name_t property_name,
3162 uint32_t options,
3163 io_buf_ptr_t *properties,
3164 mach_msg_type_number_t *propertiesCnt )
3165 {
3166 kern_return_t err;
3167 vm_size_t len;
3168 OSObject * obj;
3169 const OSSymbol * sym;
3170
3171 CHECK( IORegistryEntry, registry_entry, entry );
3172
3173 #if CONFIG_MACF
3174 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3175 return kIOReturnNotPermitted;
3176 #endif
3177
3178 sym = OSSymbol::withCString(property_name);
3179 if (!sym) return (kIOReturnNoMemory);
3180
3181 if (gIORegistryEntryPropertyKeysKey == sym)
3182 {
3183 obj = entry->copyPropertyKeys();
3184 }
3185 else
3186 {
3187 if ((kIORegistryIterateRecursively & options) && plane[0])
3188 {
3189 obj = entry->copyProperty(property_name,
3190 IORegistryEntry::getPlane(plane), options );
3191 }
3192 else
3193 {
3194 obj = entry->copyProperty(property_name);
3195 }
3196 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3197 }
3198
3199 sym->release();
3200 if (!obj) return (kIOReturnNotFound);
3201
3202 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3203 if( !s) {
3204 obj->release();
3205 return( kIOReturnNoMemory );
3206 }
3207
3208 if( obj->serialize( s )) {
3209 len = s->getLength();
3210 *propertiesCnt = len;
3211 err = copyoutkdata( s->text(), len, properties );
3212
3213 } else err = kIOReturnUnsupported;
3214
3215 s->release();
3216 obj->release();
3217
3218 return( err );
3219 }
3220
3221
3222 /* Routine io_registry_entry_set_properties */
3223 kern_return_t is_io_registry_entry_set_properties
3224 (
3225 io_object_t registry_entry,
3226 io_buf_ptr_t properties,
3227 mach_msg_type_number_t propertiesCnt,
3228 kern_return_t * result)
3229 {
3230 OSObject * obj;
3231 kern_return_t err;
3232 IOReturn res;
3233 vm_offset_t data;
3234 vm_map_offset_t map_data;
3235
3236 CHECK( IORegistryEntry, registry_entry, entry );
3237
3238 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3239 return( kIOReturnMessageTooLarge);
3240
3241 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3242 data = CAST_DOWN(vm_offset_t, map_data);
3243
3244 if( KERN_SUCCESS == err) {
3245
3246 FAKE_STACK_FRAME(entry->getMetaClass());
3247
3248 // must return success after vm_map_copyout() succeeds
3249 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3250 vm_deallocate( kernel_map, data, propertiesCnt );
3251
3252 if (!obj)
3253 res = kIOReturnBadArgument;
3254 #if CONFIG_MACF
3255 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3256 registry_entry, obj))
3257 {
3258 res = kIOReturnNotPermitted;
3259 }
3260 #endif
3261 else
3262 {
3263 res = entry->setProperties( obj );
3264 }
3265
3266 if (obj)
3267 obj->release();
3268
3269 FAKE_STACK_FRAME_END();
3270
3271 } else
3272 res = err;
3273
3274 *result = res;
3275 return( err );
3276 }
3277
3278 /* Routine io_registry_entry_get_child_iterator */
3279 kern_return_t is_io_registry_entry_get_child_iterator(
3280 io_object_t registry_entry,
3281 io_name_t plane,
3282 io_object_t *iterator )
3283 {
3284 CHECK( IORegistryEntry, registry_entry, entry );
3285
3286 *iterator = entry->getChildIterator(
3287 IORegistryEntry::getPlane( plane ));
3288
3289 return( kIOReturnSuccess );
3290 }
3291
3292 /* Routine io_registry_entry_get_parent_iterator */
3293 kern_return_t is_io_registry_entry_get_parent_iterator(
3294 io_object_t registry_entry,
3295 io_name_t plane,
3296 io_object_t *iterator)
3297 {
3298 CHECK( IORegistryEntry, registry_entry, entry );
3299
3300 *iterator = entry->getParentIterator(
3301 IORegistryEntry::getPlane( plane ));
3302
3303 return( kIOReturnSuccess );
3304 }
3305
3306 /* Routine io_service_get_busy_state */
3307 kern_return_t is_io_service_get_busy_state(
3308 io_object_t _service,
3309 uint32_t *busyState )
3310 {
3311 CHECK( IOService, _service, service );
3312
3313 *busyState = service->getBusyState();
3314
3315 return( kIOReturnSuccess );
3316 }
3317
3318 /* Routine io_service_get_state */
3319 kern_return_t is_io_service_get_state(
3320 io_object_t _service,
3321 uint64_t *state,
3322 uint32_t *busy_state,
3323 uint64_t *accumulated_busy_time )
3324 {
3325 CHECK( IOService, _service, service );
3326
3327 *state = service->getState();
3328 *busy_state = service->getBusyState();
3329 *accumulated_busy_time = service->getAccumulatedBusyTime();
3330
3331 return( kIOReturnSuccess );
3332 }
3333
3334 /* Routine io_service_wait_quiet */
3335 kern_return_t is_io_service_wait_quiet(
3336 io_object_t _service,
3337 mach_timespec_t wait_time )
3338 {
3339 uint64_t timeoutNS;
3340
3341 CHECK( IOService, _service, service );
3342
3343 timeoutNS = wait_time.tv_sec;
3344 timeoutNS *= kSecondScale;
3345 timeoutNS += wait_time.tv_nsec;
3346
3347 return( service->waitQuiet(timeoutNS) );
3348 }
3349
3350 /* Routine io_service_request_probe */
3351 kern_return_t is_io_service_request_probe(
3352 io_object_t _service,
3353 uint32_t options )
3354 {
3355 CHECK( IOService, _service, service );
3356
3357 return( service->requestProbe( options ));
3358 }
3359
3360 /* Routine io_service_get_authorization_id */
3361 kern_return_t is_io_service_get_authorization_id(
3362 io_object_t _service,
3363 uint64_t *authorization_id )
3364 {
3365 kern_return_t kr;
3366
3367 CHECK( IOService, _service, service );
3368
3369 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3370 kIOClientPrivilegeAdministrator );
3371 if( kIOReturnSuccess != kr)
3372 return( kr );
3373
3374 *authorization_id = service->getAuthorizationID();
3375
3376 return( kr );
3377 }
3378
3379 /* Routine io_service_set_authorization_id */
3380 kern_return_t is_io_service_set_authorization_id(
3381 io_object_t _service,
3382 uint64_t authorization_id )
3383 {
3384 CHECK( IOService, _service, service );
3385
3386 return( service->setAuthorizationID( authorization_id ) );
3387 }
3388
3389 /* Routine io_service_open_ndr */
3390 kern_return_t is_io_service_open_extended(
3391 io_object_t _service,
3392 task_t owningTask,
3393 uint32_t connect_type,
3394 NDR_record_t ndr,
3395 io_buf_ptr_t properties,
3396 mach_msg_type_number_t propertiesCnt,
3397 kern_return_t * result,
3398 io_object_t *connection )
3399 {
3400 IOUserClient * client = 0;
3401 kern_return_t err = KERN_SUCCESS;
3402 IOReturn res = kIOReturnSuccess;
3403 OSDictionary * propertiesDict = 0;
3404 bool crossEndian;
3405 bool disallowAccess;
3406
3407 CHECK( IOService, _service, service );
3408
3409 if (!owningTask) return (kIOReturnBadArgument);
3410 assert(owningTask == current_task());
3411 if (owningTask != current_task()) return (kIOReturnBadArgument);
3412
3413 do
3414 {
3415 if (properties) return (kIOReturnUnsupported);
3416 #if 0
3417 {
3418 OSObject * obj;
3419 vm_offset_t data;
3420 vm_map_offset_t map_data;
3421
3422 if( propertiesCnt > sizeof(io_struct_inband_t))
3423 return( kIOReturnMessageTooLarge);
3424
3425 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3426 res = err;
3427 data = CAST_DOWN(vm_offset_t, map_data);
3428 if (KERN_SUCCESS == err)
3429 {
3430 // must return success after vm_map_copyout() succeeds
3431 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3432 vm_deallocate( kernel_map, data, propertiesCnt );
3433 propertiesDict = OSDynamicCast(OSDictionary, obj);
3434 if (!propertiesDict)
3435 {
3436 res = kIOReturnBadArgument;
3437 if (obj)
3438 obj->release();
3439 }
3440 }
3441 if (kIOReturnSuccess != res)
3442 break;
3443 }
3444 #endif
3445 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3446 if (crossEndian)
3447 {
3448 if (!propertiesDict)
3449 propertiesDict = OSDictionary::withCapacity(4);
3450 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3451 if (data)
3452 {
3453 if (propertiesDict)
3454 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3455 data->release();
3456 }
3457 }
3458
3459 res = service->newUserClient( owningTask, (void *) owningTask,
3460 connect_type, propertiesDict, &client );
3461
3462 if (propertiesDict)
3463 propertiesDict->release();
3464
3465 if (res == kIOReturnSuccess)
3466 {
3467 assert( OSDynamicCast(IOUserClient, client) );
3468
3469 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3470 client->closed = false;
3471 client->lock = IOLockAlloc();
3472
3473 disallowAccess = (crossEndian
3474 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3475 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3476 if (disallowAccess) res = kIOReturnUnsupported;
3477 #if CONFIG_MACF
3478 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3479 res = kIOReturnNotPermitted;
3480 #endif
3481
3482 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3483
3484 if (kIOReturnSuccess != res)
3485 {
3486 IOStatisticsClientCall();
3487 client->clientClose();
3488 client->release();
3489 client = 0;
3490 break;
3491 }
3492 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3493 if (creatorName)
3494 {
3495 client->setProperty(kIOUserClientCreatorKey, creatorName);
3496 creatorName->release();
3497 }
3498 client->setTerminateDefer(service, false);
3499 }
3500 }
3501 while (false);
3502
3503 *connection = client;
3504 *result = res;
3505
3506 return (err);
3507 }
3508
3509 /* Routine io_service_close */
3510 kern_return_t is_io_service_close(
3511 io_object_t connection )
3512 {
3513 OSSet * mappings;
3514 if ((mappings = OSDynamicCast(OSSet, connection)))
3515 return( kIOReturnSuccess );
3516
3517 CHECK( IOUserClient, connection, client );
3518
3519 IOStatisticsClientCall();
3520
3521 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3522 {
3523 IOLockLock(client->lock);
3524 client->clientClose();
3525 IOLockUnlock(client->lock);
3526 }
3527 else
3528 {
3529 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3530 client->getRegistryEntryID(), client->getName());
3531 }
3532
3533 return( kIOReturnSuccess );
3534 }
3535
3536 /* Routine io_connect_get_service */
3537 kern_return_t is_io_connect_get_service(
3538 io_object_t connection,
3539 io_object_t *service )
3540 {
3541 IOService * theService;
3542
3543 CHECK( IOUserClient, connection, client );
3544
3545 theService = client->getService();
3546 if( theService)
3547 theService->retain();
3548
3549 *service = theService;
3550
3551 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3552 }
3553
3554 /* Routine io_connect_set_notification_port */
3555 kern_return_t is_io_connect_set_notification_port(
3556 io_object_t connection,
3557 uint32_t notification_type,
3558 mach_port_t port,
3559 uint32_t reference)
3560 {
3561 kern_return_t ret;
3562 CHECK( IOUserClient, connection, client );
3563
3564 IOStatisticsClientCall();
3565 IOLockLock(client->lock);
3566 ret = client->registerNotificationPort( port, notification_type,
3567 (io_user_reference_t) reference );
3568 IOLockUnlock(client->lock);
3569 return (ret);
3570 }
3571
3572 /* Routine io_connect_set_notification_port */
3573 kern_return_t is_io_connect_set_notification_port_64(
3574 io_object_t connection,
3575 uint32_t notification_type,
3576 mach_port_t port,
3577 io_user_reference_t reference)
3578 {
3579 kern_return_t ret;
3580 CHECK( IOUserClient, connection, client );
3581
3582 IOStatisticsClientCall();
3583 IOLockLock(client->lock);
3584 ret = client->registerNotificationPort( port, notification_type,
3585 reference );
3586 IOLockUnlock(client->lock);
3587 return (ret);
3588 }
3589
3590 /* Routine io_connect_map_memory_into_task */
3591 kern_return_t is_io_connect_map_memory_into_task
3592 (
3593 io_connect_t connection,
3594 uint32_t memory_type,
3595 task_t into_task,
3596 mach_vm_address_t *address,
3597 mach_vm_size_t *size,
3598 uint32_t flags
3599 )
3600 {
3601 IOReturn err;
3602 IOMemoryMap * map;
3603
3604 CHECK( IOUserClient, connection, client );
3605
3606 if (!into_task) return (kIOReturnBadArgument);
3607
3608 IOStatisticsClientCall();
3609 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3610
3611 if( map) {
3612 *address = map->getAddress();
3613 if( size)
3614 *size = map->getSize();
3615
3616 if( client->sharedInstance
3617 || (into_task != current_task())) {
3618 // push a name out to the task owning the map,
3619 // so we can clean up maps
3620 mach_port_name_t name __unused =
3621 IOMachPort::makeSendRightForTask(
3622 into_task, map, IKOT_IOKIT_OBJECT );
3623
3624 } else {
3625 // keep it with the user client
3626 IOLockLock( gIOObjectPortLock);
3627 if( 0 == client->mappings)
3628 client->mappings = OSSet::withCapacity(2);
3629 if( client->mappings)
3630 client->mappings->setObject( map);
3631 IOLockUnlock( gIOObjectPortLock);
3632 map->release();
3633 }
3634 err = kIOReturnSuccess;
3635
3636 } else
3637 err = kIOReturnBadArgument;
3638
3639 return( err );
3640 }
3641
3642 /* Routine is_io_connect_map_memory */
3643 kern_return_t is_io_connect_map_memory(
3644 io_object_t connect,
3645 uint32_t type,
3646 task_t task,
3647 uint32_t * mapAddr,
3648 uint32_t * mapSize,
3649 uint32_t flags )
3650 {
3651 IOReturn err;
3652 mach_vm_address_t address;
3653 mach_vm_size_t size;
3654
3655 address = SCALAR64(*mapAddr);
3656 size = SCALAR64(*mapSize);
3657
3658 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3659
3660 *mapAddr = SCALAR32(address);
3661 *mapSize = SCALAR32(size);
3662
3663 return (err);
3664 }
3665
3666 } /* extern "C" */
3667
3668 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3669 {
3670 OSIterator * iter;
3671 IOMemoryMap * map = 0;
3672
3673 IOLockLock(gIOObjectPortLock);
3674
3675 iter = OSCollectionIterator::withCollection(mappings);
3676 if(iter)
3677 {
3678 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3679 {
3680 if(mem == map->getMemoryDescriptor())
3681 {
3682 map->retain();
3683 mappings->removeObject(map);
3684 break;
3685 }
3686 }
3687 iter->release();
3688 }
3689
3690 IOLockUnlock(gIOObjectPortLock);
3691
3692 return (map);
3693 }
3694
3695 extern "C" {
3696
3697 /* Routine io_connect_unmap_memory_from_task */
3698 kern_return_t is_io_connect_unmap_memory_from_task
3699 (
3700 io_connect_t connection,
3701 uint32_t memory_type,
3702 task_t from_task,
3703 mach_vm_address_t address)
3704 {
3705 IOReturn err;
3706 IOOptionBits options = 0;
3707 IOMemoryDescriptor * memory = 0;
3708 IOMemoryMap * map;
3709
3710 CHECK( IOUserClient, connection, client );
3711
3712 if (!from_task) return (kIOReturnBadArgument);
3713
3714 IOStatisticsClientCall();
3715 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3716
3717 if( memory && (kIOReturnSuccess == err)) {
3718
3719 options = (options & ~kIOMapUserOptionsMask)
3720 | kIOMapAnywhere | kIOMapReference;
3721
3722 map = memory->createMappingInTask( from_task, address, options );
3723 memory->release();
3724 if( map)
3725 {
3726 IOLockLock( gIOObjectPortLock);
3727 if( client->mappings)
3728 client->mappings->removeObject( map);
3729 IOLockUnlock( gIOObjectPortLock);
3730
3731 mach_port_name_t name = 0;
3732 if (from_task != current_task())
3733 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3734 if (name)
3735 {
3736 map->userClientUnmap();
3737 err = iokit_mod_send_right( from_task, name, -2 );
3738 err = kIOReturnSuccess;
3739 }
3740 else
3741 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3742 if (from_task == current_task())
3743 map->release();
3744 }
3745 else
3746 err = kIOReturnBadArgument;
3747 }
3748
3749 return( err );
3750 }
3751
3752 kern_return_t is_io_connect_unmap_memory(
3753 io_object_t connect,
3754 uint32_t type,
3755 task_t task,
3756 uint32_t mapAddr )
3757 {
3758 IOReturn err;
3759 mach_vm_address_t address;
3760
3761 address = SCALAR64(mapAddr);
3762
3763 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3764
3765 return (err);
3766 }
3767
3768
3769 /* Routine io_connect_add_client */
3770 kern_return_t is_io_connect_add_client(
3771 io_object_t connection,
3772 io_object_t connect_to)
3773 {
3774 CHECK( IOUserClient, connection, client );
3775 CHECK( IOUserClient, connect_to, to );
3776
3777 IOStatisticsClientCall();
3778 return( client->connectClient( to ) );
3779 }
3780
3781
3782 /* Routine io_connect_set_properties */
3783 kern_return_t is_io_connect_set_properties(
3784 io_object_t connection,
3785 io_buf_ptr_t properties,
3786 mach_msg_type_number_t propertiesCnt,
3787 kern_return_t * result)
3788 {
3789 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3790 }
3791
3792 /* Routine io_user_client_method */
3793 kern_return_t is_io_connect_method_var_output
3794 (
3795 io_connect_t connection,
3796 uint32_t selector,
3797 io_scalar_inband64_t scalar_input,
3798 mach_msg_type_number_t scalar_inputCnt,
3799 io_struct_inband_t inband_input,
3800 mach_msg_type_number_t inband_inputCnt,
3801 mach_vm_address_t ool_input,
3802 mach_vm_size_t ool_input_size,
3803 io_struct_inband_t inband_output,
3804 mach_msg_type_number_t *inband_outputCnt,
3805 io_scalar_inband64_t scalar_output,
3806 mach_msg_type_number_t *scalar_outputCnt,
3807 io_buf_ptr_t *var_output,
3808 mach_msg_type_number_t *var_outputCnt
3809 )
3810 {
3811 CHECK( IOUserClient, connection, client );
3812
3813 IOExternalMethodArguments args;
3814 IOReturn ret;
3815 IOMemoryDescriptor * inputMD = 0;
3816 OSObject * structureVariableOutputData = 0;
3817
3818 bzero(&args.__reserved[0], sizeof(args.__reserved));
3819 args.__reservedA = 0;
3820 args.version = kIOExternalMethodArgumentsCurrentVersion;
3821
3822 args.selector = selector;
3823
3824 args.asyncWakePort = MACH_PORT_NULL;
3825 args.asyncReference = 0;
3826 args.asyncReferenceCount = 0;
3827 args.structureVariableOutputData = &structureVariableOutputData;
3828
3829 args.scalarInput = scalar_input;
3830 args.scalarInputCount = scalar_inputCnt;
3831 args.structureInput = inband_input;
3832 args.structureInputSize = inband_inputCnt;
3833
3834 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3835
3836 if (ool_input)
3837 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3838 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3839 current_task());
3840
3841 args.structureInputDescriptor = inputMD;
3842
3843 args.scalarOutput = scalar_output;
3844 args.scalarOutputCount = *scalar_outputCnt;
3845 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3846 args.structureOutput = inband_output;
3847 args.structureOutputSize = *inband_outputCnt;
3848 args.structureOutputDescriptor = NULL;
3849 args.structureOutputDescriptorSize = 0;
3850
3851 IOStatisticsClientCall();
3852 ret = client->externalMethod( selector, &args );
3853
3854 *scalar_outputCnt = args.scalarOutputCount;
3855 *inband_outputCnt = args.structureOutputSize;
3856
3857 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3858 {
3859 OSSerialize * serialize;
3860 OSData * data;
3861 vm_size_t len;
3862
3863 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3864 {
3865 len = serialize->getLength();
3866 *var_outputCnt = len;
3867 ret = copyoutkdata(serialize->text(), len, var_output);
3868 }
3869 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3870 {
3871 len = data->getLength();
3872 *var_outputCnt = len;
3873 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3874 }
3875 else
3876 {
3877 ret = kIOReturnUnderrun;
3878 }
3879 }
3880
3881 if (inputMD)
3882 inputMD->release();
3883 if (structureVariableOutputData)
3884 structureVariableOutputData->release();
3885
3886 return (ret);
3887 }
3888
3889 /* Routine io_user_client_method */
3890 kern_return_t is_io_connect_method
3891 (
3892 io_connect_t connection,
3893 uint32_t selector,
3894 io_scalar_inband64_t scalar_input,
3895 mach_msg_type_number_t scalar_inputCnt,
3896 io_struct_inband_t inband_input,
3897 mach_msg_type_number_t inband_inputCnt,
3898 mach_vm_address_t ool_input,
3899 mach_vm_size_t ool_input_size,
3900 io_struct_inband_t inband_output,
3901 mach_msg_type_number_t *inband_outputCnt,
3902 io_scalar_inband64_t scalar_output,
3903 mach_msg_type_number_t *scalar_outputCnt,
3904 mach_vm_address_t ool_output,
3905 mach_vm_size_t *ool_output_size
3906 )
3907 {
3908 CHECK( IOUserClient, connection, client );
3909
3910 IOExternalMethodArguments args;
3911 IOReturn ret;
3912 IOMemoryDescriptor * inputMD = 0;
3913 IOMemoryDescriptor * outputMD = 0;
3914
3915 bzero(&args.__reserved[0], sizeof(args.__reserved));
3916 args.__reservedA = 0;
3917 args.version = kIOExternalMethodArgumentsCurrentVersion;
3918
3919 args.selector = selector;
3920
3921 args.asyncWakePort = MACH_PORT_NULL;
3922 args.asyncReference = 0;
3923 args.asyncReferenceCount = 0;
3924 args.structureVariableOutputData = 0;
3925
3926 args.scalarInput = scalar_input;
3927 args.scalarInputCount = scalar_inputCnt;
3928 args.structureInput = inband_input;
3929 args.structureInputSize = inband_inputCnt;
3930
3931 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3932 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3933
3934 if (ool_input)
3935 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3936 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3937 current_task());
3938
3939 args.structureInputDescriptor = inputMD;
3940
3941 args.scalarOutput = scalar_output;
3942 args.scalarOutputCount = *scalar_outputCnt;
3943 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3944 args.structureOutput = inband_output;
3945 args.structureOutputSize = *inband_outputCnt;
3946
3947 if (ool_output && ool_output_size)
3948 {
3949 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3950 kIODirectionIn, current_task());
3951 }
3952
3953 args.structureOutputDescriptor = outputMD;
3954 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3955
3956 IOStatisticsClientCall();
3957 ret = client->externalMethod( selector, &args );
3958
3959 *scalar_outputCnt = args.scalarOutputCount;
3960 *inband_outputCnt = args.structureOutputSize;
3961 *ool_output_size = args.structureOutputDescriptorSize;
3962
3963 if (inputMD)
3964 inputMD->release();
3965 if (outputMD)
3966 outputMD->release();
3967
3968 return (ret);
3969 }
3970
3971 /* Routine io_async_user_client_method */
3972 kern_return_t is_io_connect_async_method
3973 (
3974 io_connect_t connection,
3975 mach_port_t wake_port,
3976 io_async_ref64_t reference,
3977 mach_msg_type_number_t referenceCnt,
3978 uint32_t selector,
3979 io_scalar_inband64_t scalar_input,
3980 mach_msg_type_number_t scalar_inputCnt,
3981 io_struct_inband_t inband_input,
3982 mach_msg_type_number_t inband_inputCnt,
3983 mach_vm_address_t ool_input,
3984 mach_vm_size_t ool_input_size,
3985 io_struct_inband_t inband_output,
3986 mach_msg_type_number_t *inband_outputCnt,
3987 io_scalar_inband64_t scalar_output,
3988 mach_msg_type_number_t *scalar_outputCnt,
3989 mach_vm_address_t ool_output,
3990 mach_vm_size_t * ool_output_size
3991 )
3992 {
3993 CHECK( IOUserClient, connection, client );
3994
3995 IOExternalMethodArguments args;
3996 IOReturn ret;
3997 IOMemoryDescriptor * inputMD = 0;
3998 IOMemoryDescriptor * outputMD = 0;
3999
4000 bzero(&args.__reserved[0], sizeof(args.__reserved));
4001 args.__reservedA = 0;
4002 args.version = kIOExternalMethodArgumentsCurrentVersion;
4003
4004 reference[0] = (io_user_reference_t) wake_port;
4005 if (vm_map_is_64bit(get_task_map(current_task())))
4006 reference[0] |= kIOUCAsync64Flag;
4007
4008 args.selector = selector;
4009
4010 args.asyncWakePort = wake_port;
4011 args.asyncReference = reference;
4012 args.asyncReferenceCount = referenceCnt;
4013
4014 args.structureVariableOutputData = 0;
4015
4016 args.scalarInput = scalar_input;
4017 args.scalarInputCount = scalar_inputCnt;
4018 args.structureInput = inband_input;
4019 args.structureInputSize = inband_inputCnt;
4020
4021 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4022 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4023
4024 if (ool_input)
4025 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4026 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4027 current_task());
4028
4029 args.structureInputDescriptor = inputMD;
4030
4031 args.scalarOutput = scalar_output;
4032 args.scalarOutputCount = *scalar_outputCnt;
4033 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4034 args.structureOutput = inband_output;
4035 args.structureOutputSize = *inband_outputCnt;
4036
4037 if (ool_output)
4038 {
4039 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4040 kIODirectionIn, current_task());
4041 }
4042
4043 args.structureOutputDescriptor = outputMD;
4044 args.structureOutputDescriptorSize = *ool_output_size;
4045
4046 IOStatisticsClientCall();
4047 ret = client->externalMethod( selector, &args );
4048
4049 *inband_outputCnt = args.structureOutputSize;
4050 *ool_output_size = args.structureOutputDescriptorSize;
4051
4052 if (inputMD)
4053 inputMD->release();
4054 if (outputMD)
4055 outputMD->release();
4056
4057 return (ret);
4058 }
4059
4060 /* Routine io_connect_method_scalarI_scalarO */
4061 kern_return_t is_io_connect_method_scalarI_scalarO(
4062 io_object_t connect,
4063 uint32_t index,
4064 io_scalar_inband_t input,
4065 mach_msg_type_number_t inputCount,
4066 io_scalar_inband_t output,
4067 mach_msg_type_number_t * outputCount )
4068 {
4069 IOReturn err;
4070 uint32_t i;
4071 io_scalar_inband64_t _input;
4072 io_scalar_inband64_t _output;
4073
4074 mach_msg_type_number_t struct_outputCnt = 0;
4075 mach_vm_size_t ool_output_size = 0;
4076
4077 bzero(&_output[0], sizeof(_output));
4078 for (i = 0; i < inputCount; i++)
4079 _input[i] = SCALAR64(input[i]);
4080
4081 err = is_io_connect_method(connect, index,
4082 _input, inputCount,
4083 NULL, 0,
4084 0, 0,
4085 NULL, &struct_outputCnt,
4086 _output, outputCount,
4087 0, &ool_output_size);
4088
4089 for (i = 0; i < *outputCount; i++)
4090 output[i] = SCALAR32(_output[i]);
4091
4092 return (err);
4093 }
4094
4095 kern_return_t shim_io_connect_method_scalarI_scalarO(
4096 IOExternalMethod * method,
4097 IOService * object,
4098 const io_user_scalar_t * input,
4099 mach_msg_type_number_t inputCount,
4100 io_user_scalar_t * output,
4101 mach_msg_type_number_t * outputCount )
4102 {
4103 IOMethod func;
4104 io_scalar_inband_t _output;
4105 IOReturn err;
4106 err = kIOReturnBadArgument;
4107
4108 bzero(&_output[0], sizeof(_output));
4109 do {
4110
4111 if( inputCount != method->count0)
4112 {
4113 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4114 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4115 continue;
4116 }
4117 if( *outputCount != method->count1)
4118 {
4119 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4120 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4121 continue;
4122 }
4123
4124 func = method->func;
4125
4126 switch( inputCount) {
4127
4128 case 6:
4129 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4130 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4131 break;
4132 case 5:
4133 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4134 ARG32(input[3]), ARG32(input[4]),
4135 &_output[0] );
4136 break;
4137 case 4:
4138 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4139 ARG32(input[3]),
4140 &_output[0], &_output[1] );
4141 break;
4142 case 3:
4143 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4144 &_output[0], &_output[1], &_output[2] );
4145 break;
4146 case 2:
4147 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4148 &_output[0], &_output[1], &_output[2],
4149 &_output[3] );
4150 break;
4151 case 1:
4152 err = (object->*func)( ARG32(input[0]),
4153 &_output[0], &_output[1], &_output[2],
4154 &_output[3], &_output[4] );
4155 break;
4156 case 0:
4157 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4158 &_output[3], &_output[4], &_output[5] );
4159 break;
4160
4161 default:
4162 IOLog("%s: Bad method table\n", object->getName());
4163 }
4164 }
4165 while( false);
4166
4167 uint32_t i;
4168 for (i = 0; i < *outputCount; i++)
4169 output[i] = SCALAR32(_output[i]);
4170
4171 return( err);
4172 }
4173
4174 /* Routine io_async_method_scalarI_scalarO */
4175 kern_return_t is_io_async_method_scalarI_scalarO(
4176 io_object_t connect,
4177 mach_port_t wake_port,
4178 io_async_ref_t reference,
4179 mach_msg_type_number_t referenceCnt,
4180 uint32_t index,
4181 io_scalar_inband_t input,
4182 mach_msg_type_number_t inputCount,
4183 io_scalar_inband_t output,
4184 mach_msg_type_number_t * outputCount )
4185 {
4186 IOReturn err;
4187 uint32_t i;
4188 io_scalar_inband64_t _input;
4189 io_scalar_inband64_t _output;
4190 io_async_ref64_t _reference;
4191
4192 bzero(&_output[0], sizeof(_output));
4193 for (i = 0; i < referenceCnt; i++)
4194 _reference[i] = REF64(reference[i]);
4195
4196 mach_msg_type_number_t struct_outputCnt = 0;
4197 mach_vm_size_t ool_output_size = 0;
4198
4199 for (i = 0; i < inputCount; i++)
4200 _input[i] = SCALAR64(input[i]);
4201
4202 err = is_io_connect_async_method(connect,
4203 wake_port, _reference, referenceCnt,
4204 index,
4205 _input, inputCount,
4206 NULL, 0,
4207 0, 0,
4208 NULL, &struct_outputCnt,
4209 _output, outputCount,
4210 0, &ool_output_size);
4211
4212 for (i = 0; i < *outputCount; i++)
4213 output[i] = SCALAR32(_output[i]);
4214
4215 return (err);
4216 }
4217 /* Routine io_async_method_scalarI_structureO */
4218 kern_return_t is_io_async_method_scalarI_structureO(
4219 io_object_t connect,
4220 mach_port_t wake_port,
4221 io_async_ref_t reference,
4222 mach_msg_type_number_t referenceCnt,
4223 uint32_t index,
4224 io_scalar_inband_t input,
4225 mach_msg_type_number_t inputCount,
4226 io_struct_inband_t output,
4227 mach_msg_type_number_t * outputCount )
4228 {
4229 uint32_t i;
4230 io_scalar_inband64_t _input;
4231 io_async_ref64_t _reference;
4232
4233 for (i = 0; i < referenceCnt; i++)
4234 _reference[i] = REF64(reference[i]);
4235
4236 mach_msg_type_number_t scalar_outputCnt = 0;
4237 mach_vm_size_t ool_output_size = 0;
4238
4239 for (i = 0; i < inputCount; i++)
4240 _input[i] = SCALAR64(input[i]);
4241
4242 return (is_io_connect_async_method(connect,
4243 wake_port, _reference, referenceCnt,
4244 index,
4245 _input, inputCount,
4246 NULL, 0,
4247 0, 0,
4248 output, outputCount,
4249 NULL, &scalar_outputCnt,
4250 0, &ool_output_size));
4251 }
4252
4253 /* Routine io_async_method_scalarI_structureI */
4254 kern_return_t is_io_async_method_scalarI_structureI(
4255 io_connect_t connect,
4256 mach_port_t wake_port,
4257 io_async_ref_t reference,
4258 mach_msg_type_number_t referenceCnt,
4259 uint32_t index,
4260 io_scalar_inband_t input,
4261 mach_msg_type_number_t inputCount,
4262 io_struct_inband_t inputStruct,
4263 mach_msg_type_number_t inputStructCount )
4264 {
4265 uint32_t i;
4266 io_scalar_inband64_t _input;
4267 io_async_ref64_t _reference;
4268
4269 for (i = 0; i < referenceCnt; i++)
4270 _reference[i] = REF64(reference[i]);
4271
4272 mach_msg_type_number_t scalar_outputCnt = 0;
4273 mach_msg_type_number_t inband_outputCnt = 0;
4274 mach_vm_size_t ool_output_size = 0;
4275
4276 for (i = 0; i < inputCount; i++)
4277 _input[i] = SCALAR64(input[i]);
4278
4279 return (is_io_connect_async_method(connect,
4280 wake_port, _reference, referenceCnt,
4281 index,
4282 _input, inputCount,
4283 inputStruct, inputStructCount,
4284 0, 0,
4285 NULL, &inband_outputCnt,
4286 NULL, &scalar_outputCnt,
4287 0, &ool_output_size));
4288 }
4289
4290 /* Routine io_async_method_structureI_structureO */
4291 kern_return_t is_io_async_method_structureI_structureO(
4292 io_object_t connect,
4293 mach_port_t wake_port,
4294 io_async_ref_t reference,
4295 mach_msg_type_number_t referenceCnt,
4296 uint32_t index,
4297 io_struct_inband_t input,
4298 mach_msg_type_number_t inputCount,
4299 io_struct_inband_t output,
4300 mach_msg_type_number_t * outputCount )
4301 {
4302 uint32_t i;
4303 mach_msg_type_number_t scalar_outputCnt = 0;
4304 mach_vm_size_t ool_output_size = 0;
4305 io_async_ref64_t _reference;
4306
4307 for (i = 0; i < referenceCnt; i++)
4308 _reference[i] = REF64(reference[i]);
4309
4310 return (is_io_connect_async_method(connect,
4311 wake_port, _reference, referenceCnt,
4312 index,
4313 NULL, 0,
4314 input, inputCount,
4315 0, 0,
4316 output, outputCount,
4317 NULL, &scalar_outputCnt,
4318 0, &ool_output_size));
4319 }
4320
4321
4322 kern_return_t shim_io_async_method_scalarI_scalarO(
4323 IOExternalAsyncMethod * method,
4324 IOService * object,
4325 mach_port_t asyncWakePort,
4326 io_user_reference_t * asyncReference,
4327 uint32_t asyncReferenceCount,
4328 const io_user_scalar_t * input,
4329 mach_msg_type_number_t inputCount,
4330 io_user_scalar_t * output,
4331 mach_msg_type_number_t * outputCount )
4332 {
4333 IOAsyncMethod func;
4334 uint32_t i;
4335 io_scalar_inband_t _output;
4336 IOReturn err;
4337 io_async_ref_t reference;
4338
4339 bzero(&_output[0], sizeof(_output));
4340 for (i = 0; i < asyncReferenceCount; i++)
4341 reference[i] = REF32(asyncReference[i]);
4342
4343 err = kIOReturnBadArgument;
4344
4345 do {
4346
4347 if( inputCount != method->count0)
4348 {
4349 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4350 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4351 continue;
4352 }
4353 if( *outputCount != method->count1)
4354 {
4355 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4356 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4357 continue;
4358 }
4359
4360 func = method->func;
4361
4362 switch( inputCount) {
4363
4364 case 6:
4365 err = (object->*func)( reference,
4366 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4367 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4368 break;
4369 case 5:
4370 err = (object->*func)( reference,
4371 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4372 ARG32(input[3]), ARG32(input[4]),
4373 &_output[0] );
4374 break;
4375 case 4:
4376 err = (object->*func)( reference,
4377 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4378 ARG32(input[3]),
4379 &_output[0], &_output[1] );
4380 break;
4381 case 3:
4382 err = (object->*func)( reference,
4383 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4384 &_output[0], &_output[1], &_output[2] );
4385 break;
4386 case 2:
4387 err = (object->*func)( reference,
4388 ARG32(input[0]), ARG32(input[1]),
4389 &_output[0], &_output[1], &_output[2],
4390 &_output[3] );
4391 break;
4392 case 1:
4393 err = (object->*func)( reference,
4394 ARG32(input[0]),
4395 &_output[0], &_output[1], &_output[2],
4396 &_output[3], &_output[4] );
4397 break;
4398 case 0:
4399 err = (object->*func)( reference,
4400 &_output[0], &_output[1], &_output[2],
4401 &_output[3], &_output[4], &_output[5] );
4402 break;
4403
4404 default:
4405 IOLog("%s: Bad method table\n", object->getName());
4406 }
4407 }
4408 while( false);
4409
4410 for (i = 0; i < *outputCount; i++)
4411 output[i] = SCALAR32(_output[i]);
4412
4413 return( err);
4414 }
4415
4416
4417 /* Routine io_connect_method_scalarI_structureO */
4418 kern_return_t is_io_connect_method_scalarI_structureO(
4419 io_object_t connect,
4420 uint32_t index,
4421 io_scalar_inband_t input,
4422 mach_msg_type_number_t inputCount,
4423 io_struct_inband_t output,
4424 mach_msg_type_number_t * outputCount )
4425 {
4426 uint32_t i;
4427 io_scalar_inband64_t _input;
4428
4429 mach_msg_type_number_t scalar_outputCnt = 0;
4430 mach_vm_size_t ool_output_size = 0;
4431
4432 for (i = 0; i < inputCount; i++)
4433 _input[i] = SCALAR64(input[i]);
4434
4435 return (is_io_connect_method(connect, index,
4436 _input, inputCount,
4437 NULL, 0,
4438 0, 0,
4439 output, outputCount,
4440 NULL, &scalar_outputCnt,
4441 0, &ool_output_size));
4442 }
4443
4444 kern_return_t shim_io_connect_method_scalarI_structureO(
4445
4446 IOExternalMethod * method,
4447 IOService * object,
4448 const io_user_scalar_t * input,
4449 mach_msg_type_number_t inputCount,
4450 io_struct_inband_t output,
4451 IOByteCount * outputCount )
4452 {
4453 IOMethod func;
4454 IOReturn err;
4455
4456 err = kIOReturnBadArgument;
4457
4458 do {
4459 if( inputCount != method->count0)
4460 {
4461 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4462 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4463 continue;
4464 }
4465 if( (kIOUCVariableStructureSize != method->count1)
4466 && (*outputCount != method->count1))
4467 {
4468 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4469 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4470 continue;
4471 }
4472
4473 func = method->func;
4474
4475 switch( inputCount) {
4476
4477 case 5:
4478 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4479 ARG32(input[3]), ARG32(input[4]),
4480 output );
4481 break;
4482 case 4:
4483 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4484 ARG32(input[3]),
4485 output, (void *)outputCount );
4486 break;
4487 case 3:
4488 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4489 output, (void *)outputCount, 0 );
4490 break;
4491 case 2:
4492 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4493 output, (void *)outputCount, 0, 0 );
4494 break;
4495 case 1:
4496 err = (object->*func)( ARG32(input[0]),
4497 output, (void *)outputCount, 0, 0, 0 );
4498 break;
4499 case 0:
4500 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4501 break;
4502
4503 default:
4504 IOLog("%s: Bad method table\n", object->getName());
4505 }
4506 }
4507 while( false);
4508
4509 return( err);
4510 }
4511
4512
4513 kern_return_t shim_io_async_method_scalarI_structureO(
4514 IOExternalAsyncMethod * method,
4515 IOService * object,
4516 mach_port_t asyncWakePort,
4517 io_user_reference_t * asyncReference,
4518 uint32_t asyncReferenceCount,
4519 const io_user_scalar_t * input,
4520 mach_msg_type_number_t inputCount,
4521 io_struct_inband_t output,
4522 mach_msg_type_number_t * outputCount )
4523 {
4524 IOAsyncMethod func;
4525 uint32_t i;
4526 IOReturn err;
4527 io_async_ref_t reference;
4528
4529 for (i = 0; i < asyncReferenceCount; i++)
4530 reference[i] = REF32(asyncReference[i]);
4531
4532 err = kIOReturnBadArgument;
4533 do {
4534 if( inputCount != method->count0)
4535 {
4536 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4537 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4538 continue;
4539 }
4540 if( (kIOUCVariableStructureSize != method->count1)
4541 && (*outputCount != method->count1))
4542 {
4543 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4544 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4545 continue;
4546 }
4547
4548 func = method->func;
4549
4550 switch( inputCount) {
4551
4552 case 5:
4553 err = (object->*func)( reference,
4554 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4555 ARG32(input[3]), ARG32(input[4]),
4556 output );
4557 break;
4558 case 4:
4559 err = (object->*func)( reference,
4560 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4561 ARG32(input[3]),
4562 output, (void *)outputCount );
4563 break;
4564 case 3:
4565 err = (object->*func)( reference,
4566 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4567 output, (void *)outputCount, 0 );
4568 break;
4569 case 2:
4570 err = (object->*func)( reference,
4571 ARG32(input[0]), ARG32(input[1]),
4572 output, (void *)outputCount, 0, 0 );
4573 break;
4574 case 1:
4575 err = (object->*func)( reference,
4576 ARG32(input[0]),
4577 output, (void *)outputCount, 0, 0, 0 );
4578 break;
4579 case 0:
4580 err = (object->*func)( reference,
4581 output, (void *)outputCount, 0, 0, 0, 0 );
4582 break;
4583
4584 default:
4585 IOLog("%s: Bad method table\n", object->getName());
4586 }
4587 }
4588 while( false);
4589
4590 return( err);
4591 }
4592
4593 /* Routine io_connect_method_scalarI_structureI */
4594 kern_return_t is_io_connect_method_scalarI_structureI(
4595 io_connect_t connect,
4596 uint32_t index,
4597 io_scalar_inband_t input,
4598 mach_msg_type_number_t inputCount,
4599 io_struct_inband_t inputStruct,
4600 mach_msg_type_number_t inputStructCount )
4601 {
4602 uint32_t i;
4603 io_scalar_inband64_t _input;
4604
4605 mach_msg_type_number_t scalar_outputCnt = 0;
4606 mach_msg_type_number_t inband_outputCnt = 0;
4607 mach_vm_size_t ool_output_size = 0;
4608
4609 for (i = 0; i < inputCount; i++)
4610 _input[i] = SCALAR64(input[i]);
4611
4612 return (is_io_connect_method(connect, index,
4613 _input, inputCount,
4614 inputStruct, inputStructCount,
4615 0, 0,
4616 NULL, &inband_outputCnt,
4617 NULL, &scalar_outputCnt,
4618 0, &ool_output_size));
4619 }
4620
4621 kern_return_t shim_io_connect_method_scalarI_structureI(
4622 IOExternalMethod * method,
4623 IOService * object,
4624 const io_user_scalar_t * input,
4625 mach_msg_type_number_t inputCount,
4626 io_struct_inband_t inputStruct,
4627 mach_msg_type_number_t inputStructCount )
4628 {
4629 IOMethod func;
4630 IOReturn err = kIOReturnBadArgument;
4631
4632 do
4633 {
4634 if (inputCount != method->count0)
4635 {
4636 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4637 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4638 continue;
4639 }
4640 if( (kIOUCVariableStructureSize != method->count1)
4641 && (inputStructCount != method->count1))
4642 {
4643 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4644 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4645 continue;
4646 }
4647
4648 func = method->func;
4649
4650 switch( inputCount) {
4651
4652 case 5:
4653 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4654 ARG32(input[3]), ARG32(input[4]),
4655 inputStruct );
4656 break;
4657 case 4:
4658 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4659 ARG32(input[3]),
4660 inputStruct, (void *)(uintptr_t)inputStructCount );
4661 break;
4662 case 3:
4663 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4664 inputStruct, (void *)(uintptr_t)inputStructCount,
4665 0 );
4666 break;
4667 case 2:
4668 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4669 inputStruct, (void *)(uintptr_t)inputStructCount,
4670 0, 0 );
4671 break;
4672 case 1:
4673 err = (object->*func)( ARG32(input[0]),
4674 inputStruct, (void *)(uintptr_t)inputStructCount,
4675 0, 0, 0 );
4676 break;
4677 case 0:
4678 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4679 0, 0, 0, 0 );
4680 break;
4681
4682 default:
4683 IOLog("%s: Bad method table\n", object->getName());
4684 }
4685 }
4686 while (false);
4687
4688 return( err);
4689 }
4690
4691 kern_return_t shim_io_async_method_scalarI_structureI(
4692 IOExternalAsyncMethod * method,
4693 IOService * object,
4694 mach_port_t asyncWakePort,
4695 io_user_reference_t * asyncReference,
4696 uint32_t asyncReferenceCount,
4697 const io_user_scalar_t * input,
4698 mach_msg_type_number_t inputCount,
4699 io_struct_inband_t inputStruct,
4700 mach_msg_type_number_t inputStructCount )
4701 {
4702 IOAsyncMethod func;
4703 uint32_t i;
4704 IOReturn err = kIOReturnBadArgument;
4705 io_async_ref_t reference;
4706
4707 for (i = 0; i < asyncReferenceCount; i++)
4708 reference[i] = REF32(asyncReference[i]);
4709
4710 do
4711 {
4712 if (inputCount != method->count0)
4713 {
4714 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4715 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4716 continue;
4717 }
4718 if( (kIOUCVariableStructureSize != method->count1)
4719 && (inputStructCount != method->count1))
4720 {
4721 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4722 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4723 continue;
4724 }
4725
4726 func = method->func;
4727
4728 switch( inputCount) {
4729
4730 case 5:
4731 err = (object->*func)( reference,
4732 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4733 ARG32(input[3]), ARG32(input[4]),
4734 inputStruct );
4735 break;
4736 case 4:
4737 err = (object->*func)( reference,
4738 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4739 ARG32(input[3]),
4740 inputStruct, (void *)(uintptr_t)inputStructCount );
4741 break;
4742 case 3:
4743 err = (object->*func)( reference,
4744 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4745 inputStruct, (void *)(uintptr_t)inputStructCount,
4746 0 );
4747 break;
4748 case 2:
4749 err = (object->*func)( reference,
4750 ARG32(input[0]), ARG32(input[1]),
4751 inputStruct, (void *)(uintptr_t)inputStructCount,
4752 0, 0 );
4753 break;
4754 case 1:
4755 err = (object->*func)( reference,
4756 ARG32(input[0]),
4757 inputStruct, (void *)(uintptr_t)inputStructCount,
4758 0, 0, 0 );
4759 break;
4760 case 0:
4761 err = (object->*func)( reference,
4762 inputStruct, (void *)(uintptr_t)inputStructCount,
4763 0, 0, 0, 0 );
4764 break;
4765
4766 default:
4767 IOLog("%s: Bad method table\n", object->getName());
4768 }
4769 }
4770 while (false);
4771
4772 return( err);
4773 }
4774
4775 /* Routine io_connect_method_structureI_structureO */
4776 kern_return_t is_io_connect_method_structureI_structureO(
4777 io_object_t connect,
4778 uint32_t index,
4779 io_struct_inband_t input,
4780 mach_msg_type_number_t inputCount,
4781 io_struct_inband_t output,
4782 mach_msg_type_number_t * outputCount )
4783 {
4784 mach_msg_type_number_t scalar_outputCnt = 0;
4785 mach_vm_size_t ool_output_size = 0;
4786
4787 return (is_io_connect_method(connect, index,
4788 NULL, 0,
4789 input, inputCount,
4790 0, 0,
4791 output, outputCount,
4792 NULL, &scalar_outputCnt,
4793 0, &ool_output_size));
4794 }
4795
4796 kern_return_t shim_io_connect_method_structureI_structureO(
4797 IOExternalMethod * method,
4798 IOService * object,
4799 io_struct_inband_t input,
4800 mach_msg_type_number_t inputCount,
4801 io_struct_inband_t output,
4802 IOByteCount * outputCount )
4803 {
4804 IOMethod func;
4805 IOReturn err = kIOReturnBadArgument;
4806
4807 do
4808 {
4809 if( (kIOUCVariableStructureSize != method->count0)
4810 && (inputCount != method->count0))
4811 {
4812 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4813 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4814 continue;
4815 }
4816 if( (kIOUCVariableStructureSize != method->count1)
4817 && (*outputCount != method->count1))
4818 {
4819 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4820 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4821 continue;
4822 }
4823
4824 func = method->func;
4825
4826 if( method->count1) {
4827 if( method->count0) {
4828 err = (object->*func)( input, output,
4829 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4830 } else {
4831 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4832 }
4833 } else {
4834 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4835 }
4836 }
4837 while( false);
4838
4839
4840 return( err);
4841 }
4842
4843 kern_return_t shim_io_async_method_structureI_structureO(
4844 IOExternalAsyncMethod * method,
4845 IOService * object,
4846 mach_port_t asyncWakePort,
4847 io_user_reference_t * asyncReference,
4848 uint32_t asyncReferenceCount,
4849 io_struct_inband_t input,
4850 mach_msg_type_number_t inputCount,
4851 io_struct_inband_t output,
4852 mach_msg_type_number_t * outputCount )
4853 {
4854 IOAsyncMethod func;
4855 uint32_t i;
4856 IOReturn err;
4857 io_async_ref_t reference;
4858
4859 for (i = 0; i < asyncReferenceCount; i++)
4860 reference[i] = REF32(asyncReference[i]);
4861
4862 err = kIOReturnBadArgument;
4863 do
4864 {
4865 if( (kIOUCVariableStructureSize != method->count0)
4866 && (inputCount != method->count0))
4867 {
4868 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4869 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4870 continue;
4871 }
4872 if( (kIOUCVariableStructureSize != method->count1)
4873 && (*outputCount != method->count1))
4874 {
4875 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4876 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4877 continue;
4878 }
4879
4880 func = method->func;
4881
4882 if( method->count1) {
4883 if( method->count0) {
4884 err = (object->*func)( reference,
4885 input, output,
4886 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4887 } else {
4888 err = (object->*func)( reference,
4889 output, outputCount, 0, 0, 0, 0 );
4890 }
4891 } else {
4892 err = (object->*func)( reference,
4893 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4894 }
4895 }
4896 while( false);
4897
4898 return( err);
4899 }
4900
4901 #if !NO_KEXTD
4902 bool gIOKextdClearedBusy = false;
4903 #endif
4904
4905 /* Routine io_catalog_send_data */
4906 kern_return_t is_io_catalog_send_data(
4907 mach_port_t master_port,
4908 uint32_t flag,
4909 io_buf_ptr_t inData,
4910 mach_msg_type_number_t inDataCount,
4911 kern_return_t * result)
4912 {
4913 #if NO_KEXTD
4914 return kIOReturnNotPrivileged;
4915 #else /* NO_KEXTD */
4916 OSObject * obj = 0;
4917 vm_offset_t data;
4918 kern_return_t kr = kIOReturnError;
4919
4920 //printf("io_catalog_send_data called. flag: %d\n", flag);
4921
4922 if( master_port != master_device_port)
4923 return kIOReturnNotPrivileged;
4924
4925 if( (flag != kIOCatalogRemoveKernelLinker &&
4926 flag != kIOCatalogKextdActive &&
4927 flag != kIOCatalogKextdFinishedLaunching) &&
4928 ( !inData || !inDataCount) )
4929 {
4930 return kIOReturnBadArgument;
4931 }
4932
4933 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management"))
4934 {
4935 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4936 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4937 OSSafeReleaseNULL(taskName);
4938 // For now, fake success to not break applications relying on this function succeeding.
4939 // See <rdar://problem/32554970> for more details.
4940 return kIOReturnSuccess;
4941 }
4942
4943 if (inData) {
4944 vm_map_offset_t map_data;
4945
4946 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4947 return( kIOReturnMessageTooLarge);
4948
4949 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4950 data = CAST_DOWN(vm_offset_t, map_data);
4951
4952 if( kr != KERN_SUCCESS)
4953 return kr;
4954
4955 // must return success after vm_map_copyout() succeeds
4956
4957 if( inDataCount ) {
4958 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4959 vm_deallocate( kernel_map, data, inDataCount );
4960 if( !obj) {
4961 *result = kIOReturnNoMemory;
4962 return( KERN_SUCCESS);
4963 }
4964 }
4965 }
4966
4967 switch ( flag ) {
4968 case kIOCatalogResetDrivers:
4969 case kIOCatalogResetDriversNoMatch: {
4970 OSArray * array;
4971
4972 array = OSDynamicCast(OSArray, obj);
4973 if (array) {
4974 if ( !gIOCatalogue->resetAndAddDrivers(array,
4975 flag == kIOCatalogResetDrivers) ) {
4976
4977 kr = kIOReturnError;
4978 }
4979 } else {
4980 kr = kIOReturnBadArgument;
4981 }
4982 }
4983 break;
4984
4985 case kIOCatalogAddDrivers:
4986 case kIOCatalogAddDriversNoMatch: {
4987 OSArray * array;
4988
4989 array = OSDynamicCast(OSArray, obj);
4990 if ( array ) {
4991 if ( !gIOCatalogue->addDrivers( array ,
4992 flag == kIOCatalogAddDrivers) ) {
4993 kr = kIOReturnError;
4994 }
4995 }
4996 else {
4997 kr = kIOReturnBadArgument;
4998 }
4999 }
5000 break;
5001
5002 case kIOCatalogRemoveDrivers:
5003 case kIOCatalogRemoveDriversNoMatch: {
5004 OSDictionary * dict;
5005
5006 dict = OSDynamicCast(OSDictionary, obj);
5007 if ( dict ) {
5008 if ( !gIOCatalogue->removeDrivers( dict,
5009 flag == kIOCatalogRemoveDrivers ) ) {
5010 kr = kIOReturnError;
5011 }
5012 }
5013 else {
5014 kr = kIOReturnBadArgument;
5015 }
5016 }
5017 break;
5018
5019 case kIOCatalogStartMatching: {
5020 OSDictionary * dict;
5021
5022 dict = OSDynamicCast(OSDictionary, obj);
5023 if ( dict ) {
5024 if ( !gIOCatalogue->startMatching( dict ) ) {
5025 kr = kIOReturnError;
5026 }
5027 }
5028 else {
5029 kr = kIOReturnBadArgument;
5030 }
5031 }
5032 break;
5033
5034 case kIOCatalogRemoveKernelLinker:
5035 kr = KERN_NOT_SUPPORTED;
5036 break;
5037
5038 case kIOCatalogKextdActive:
5039 #if !NO_KEXTD
5040 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5041 OSKext::setKextdActive();
5042
5043 /* Dump all nonloaded startup extensions; kextd will now send them
5044 * down on request.
5045 */
5046 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5047 #endif
5048 kr = kIOReturnSuccess;
5049 break;
5050
5051 case kIOCatalogKextdFinishedLaunching: {
5052 #if !NO_KEXTD
5053 if (!gIOKextdClearedBusy) {
5054 IOService * serviceRoot = IOService::getServiceRoot();
5055 if (serviceRoot) {
5056 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
5057 serviceRoot->adjustBusy(-1);
5058 gIOKextdClearedBusy = true;
5059 }
5060 }
5061 #endif
5062 kr = kIOReturnSuccess;
5063 }
5064 break;
5065
5066 default:
5067 kr = kIOReturnBadArgument;
5068 break;
5069 }
5070
5071 if (obj) obj->release();
5072
5073 *result = kr;
5074 return( KERN_SUCCESS);
5075 #endif /* NO_KEXTD */
5076 }
5077
5078 /* Routine io_catalog_terminate */
5079 kern_return_t is_io_catalog_terminate(
5080 mach_port_t master_port,
5081 uint32_t flag,
5082 io_name_t name )
5083 {
5084 kern_return_t kr;
5085
5086 if( master_port != master_device_port )
5087 return kIOReturnNotPrivileged;
5088
5089 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5090 kIOClientPrivilegeAdministrator );
5091 if( kIOReturnSuccess != kr)
5092 return( kr );
5093
5094 switch ( flag ) {
5095 #if !defined(SECURE_KERNEL)
5096 case kIOCatalogServiceTerminate:
5097 OSIterator * iter;
5098 IOService * service;
5099
5100 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5101 kIORegistryIterateRecursively);
5102 if ( !iter )
5103 return kIOReturnNoMemory;
5104
5105 do {
5106 iter->reset();
5107 while( (service = (IOService *)iter->getNextObject()) ) {
5108 if( service->metaCast(name)) {
5109 if ( !service->terminate( kIOServiceRequired
5110 | kIOServiceSynchronous) ) {
5111 kr = kIOReturnUnsupported;
5112 break;
5113 }
5114 }
5115 }
5116 } while( !service && !iter->isValid());
5117 iter->release();
5118 break;
5119
5120 case kIOCatalogModuleUnload:
5121 case kIOCatalogModuleTerminate:
5122 kr = gIOCatalogue->terminateDriversForModule(name,
5123 flag == kIOCatalogModuleUnload);
5124 break;
5125 #endif
5126
5127 default:
5128 kr = kIOReturnBadArgument;
5129 break;
5130 }
5131
5132 return( kr );
5133 }
5134
5135 /* Routine io_catalog_get_data */
5136 kern_return_t is_io_catalog_get_data(
5137 mach_port_t master_port,
5138 uint32_t flag,
5139 io_buf_ptr_t *outData,
5140 mach_msg_type_number_t *outDataCount)
5141 {
5142 kern_return_t kr = kIOReturnSuccess;
5143 OSSerialize * s;
5144
5145 if( master_port != master_device_port)
5146 return kIOReturnNotPrivileged;
5147
5148 //printf("io_catalog_get_data called. flag: %d\n", flag);
5149
5150 s = OSSerialize::withCapacity(4096);
5151 if ( !s )
5152 return kIOReturnNoMemory;
5153
5154 kr = gIOCatalogue->serializeData(flag, s);
5155
5156 if ( kr == kIOReturnSuccess ) {
5157 vm_offset_t data;
5158 vm_map_copy_t copy;
5159 vm_size_t size;
5160
5161 size = s->getLength();
5162 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5163 if ( kr == kIOReturnSuccess ) {
5164 bcopy(s->text(), (void *)data, size);
5165 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5166 (vm_map_size_t)size, true, &copy);
5167 *outData = (char *)copy;
5168 *outDataCount = size;
5169 }
5170 }
5171
5172 s->release();
5173
5174 return kr;
5175 }
5176
5177 /* Routine io_catalog_get_gen_count */
5178 kern_return_t is_io_catalog_get_gen_count(
5179 mach_port_t master_port,
5180 uint32_t *genCount)
5181 {
5182 if( master_port != master_device_port)
5183 return kIOReturnNotPrivileged;
5184
5185 //printf("io_catalog_get_gen_count called.\n");
5186
5187 if ( !genCount )
5188 return kIOReturnBadArgument;
5189
5190 *genCount = gIOCatalogue->getGenerationCount();
5191
5192 return kIOReturnSuccess;
5193 }
5194
5195 /* Routine io_catalog_module_loaded.
5196 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5197 */
5198 kern_return_t is_io_catalog_module_loaded(
5199 mach_port_t master_port,
5200 io_name_t name)
5201 {
5202 if( master_port != master_device_port)
5203 return kIOReturnNotPrivileged;
5204
5205 //printf("io_catalog_module_loaded called. name %s\n", name);
5206
5207 if ( !name )
5208 return kIOReturnBadArgument;
5209
5210 gIOCatalogue->moduleHasLoaded(name);
5211
5212 return kIOReturnSuccess;
5213 }
5214
5215 kern_return_t is_io_catalog_reset(
5216 mach_port_t master_port,
5217 uint32_t flag)
5218 {
5219 if( master_port != master_device_port)
5220 return kIOReturnNotPrivileged;
5221
5222 switch ( flag ) {
5223 case kIOCatalogResetDefault:
5224 gIOCatalogue->reset();
5225 break;
5226
5227 default:
5228 return kIOReturnBadArgument;
5229 }
5230
5231 return kIOReturnSuccess;
5232 }
5233
5234 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5235 {
5236 kern_return_t result = kIOReturnBadArgument;
5237 IOUserClient *userClient;
5238
5239 if ((userClient = OSDynamicCast(IOUserClient,
5240 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5241 IOExternalTrap *trap;
5242 IOService *target = NULL;
5243
5244 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5245
5246 if (trap && target) {
5247 IOTrap func;
5248
5249 func = trap->func;
5250
5251 if (func) {
5252 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5253 }
5254 }
5255
5256 iokit_remove_connect_reference(userClient);
5257 }
5258
5259 return result;
5260 }
5261
5262 } /* extern "C" */
5263
5264 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5265 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5266 {
5267 IOReturn err;
5268 IOService * object;
5269 IOByteCount structureOutputSize;
5270
5271 if (dispatch)
5272 {
5273 uint32_t count;
5274 count = dispatch->checkScalarInputCount;
5275 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5276 {
5277 return (kIOReturnBadArgument);
5278 }
5279
5280 count = dispatch->checkStructureInputSize;
5281 if ((kIOUCVariableStructureSize != count)
5282 && (count != ((args->structureInputDescriptor)
5283 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5284 {
5285 return (kIOReturnBadArgument);
5286 }
5287
5288 count = dispatch->checkScalarOutputCount;
5289 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5290 {
5291 return (kIOReturnBadArgument);
5292 }
5293
5294 count = dispatch->checkStructureOutputSize;
5295 if ((kIOUCVariableStructureSize != count)
5296 && (count != ((args->structureOutputDescriptor)
5297 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5298 {
5299 return (kIOReturnBadArgument);
5300 }
5301
5302 if (dispatch->function)
5303 err = (*dispatch->function)(target, reference, args);
5304 else
5305 err = kIOReturnNoCompletion; /* implementator can dispatch */
5306
5307 return (err);
5308 }
5309
5310
5311 // pre-Leopard API's don't do ool structs
5312 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5313 {
5314 err = kIOReturnIPCError;
5315 return (err);
5316 }
5317
5318 structureOutputSize = args->structureOutputSize;
5319
5320 if (args->asyncWakePort)
5321 {
5322 IOExternalAsyncMethod * method;
5323 object = 0;
5324 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5325 return (kIOReturnUnsupported);
5326
5327 if (kIOUCForegroundOnly & method->flags)
5328 {
5329 if (task_is_gpu_denied(current_task()))
5330 return (kIOReturnNotPermitted);
5331 }
5332
5333 switch (method->flags & kIOUCTypeMask)
5334 {
5335 case kIOUCScalarIStructI:
5336 err = shim_io_async_method_scalarI_structureI( method, object,
5337 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5338 args->scalarInput, args->scalarInputCount,
5339 (char *)args->structureInput, args->structureInputSize );
5340 break;
5341
5342 case kIOUCScalarIScalarO:
5343 err = shim_io_async_method_scalarI_scalarO( method, object,
5344 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5345 args->scalarInput, args->scalarInputCount,
5346 args->scalarOutput, &args->scalarOutputCount );
5347 break;
5348
5349 case kIOUCScalarIStructO:
5350 err = shim_io_async_method_scalarI_structureO( method, object,
5351 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5352 args->scalarInput, args->scalarInputCount,
5353 (char *) args->structureOutput, &args->structureOutputSize );
5354 break;
5355
5356
5357 case kIOUCStructIStructO:
5358 err = shim_io_async_method_structureI_structureO( method, object,
5359 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5360 (char *)args->structureInput, args->structureInputSize,
5361 (char *) args->structureOutput, &args->structureOutputSize );
5362 break;
5363
5364 default:
5365 err = kIOReturnBadArgument;
5366 break;
5367 }
5368 }
5369 else
5370 {
5371 IOExternalMethod * method;
5372 object = 0;
5373 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5374 return (kIOReturnUnsupported);
5375
5376 if (kIOUCForegroundOnly & method->flags)
5377 {
5378 if (task_is_gpu_denied(current_task()))
5379 return (kIOReturnNotPermitted);
5380 }
5381
5382 switch (method->flags & kIOUCTypeMask)
5383 {
5384 case kIOUCScalarIStructI:
5385 err = shim_io_connect_method_scalarI_structureI( method, object,
5386 args->scalarInput, args->scalarInputCount,
5387 (char *) args->structureInput, args->structureInputSize );
5388 break;
5389
5390 case kIOUCScalarIScalarO:
5391 err = shim_io_connect_method_scalarI_scalarO( method, object,
5392 args->scalarInput, args->scalarInputCount,
5393 args->scalarOutput, &args->scalarOutputCount );
5394 break;
5395
5396 case kIOUCScalarIStructO:
5397 err = shim_io_connect_method_scalarI_structureO( method, object,
5398 args->scalarInput, args->scalarInputCount,
5399 (char *) args->structureOutput, &structureOutputSize );
5400 break;
5401
5402
5403 case kIOUCStructIStructO:
5404 err = shim_io_connect_method_structureI_structureO( method, object,
5405 (char *) args->structureInput, args->structureInputSize,
5406 (char *) args->structureOutput, &structureOutputSize );
5407 break;
5408
5409 default:
5410 err = kIOReturnBadArgument;
5411 break;
5412 }
5413 }
5414
5415 args->structureOutputSize = structureOutputSize;
5416
5417 return (err);
5418 }
5419
5420 #if __LP64__
5421 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5422 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5423 #else
5424 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5425 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5426 #endif
5427 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5428 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5429 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5430 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5431 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5432 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5433 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5434 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5435 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5436 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5437 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5438 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5439 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5440 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5441