]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
1faa211e2af7c2f2a5819536e5e43efa8257f645
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #if CONFIG_MACF
49
50 extern "C" {
51 #include <security/mac_framework.h>
52 };
53 #include <sys/kauth.h>
54
55 #define IOMACF_LOG 0
56
57 #endif /* CONFIG_MACF */
58
59 #include <IOKit/assert.h>
60
61 #include "IOServicePrivate.h"
62 #include "IOKitKernelInternal.h"
63
64 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
65 #define SCALAR32(x) ((uint32_t )x)
66 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
67 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
68 #define REF32(x) ((int)(x))
69
70 enum
71 {
72 kIOUCAsync0Flags = 3ULL,
73 kIOUCAsync64Flag = 1ULL
74 };
75
76 #if IOKITSTATS
77
78 #define IOStatisticsRegisterCounter() \
79 do { \
80 reserved->counter = IOStatistics::registerUserClient(this); \
81 } while (0)
82
83 #define IOStatisticsUnregisterCounter() \
84 do { \
85 if (reserved) \
86 IOStatistics::unregisterUserClient(reserved->counter); \
87 } while (0)
88
89 #define IOStatisticsClientCall() \
90 do { \
91 IOStatistics::countUserClientCall(client); \
92 } while (0)
93
94 #else
95
96 #define IOStatisticsRegisterCounter()
97 #define IOStatisticsUnregisterCounter()
98 #define IOStatisticsClientCall()
99
100 #endif /* IOKITSTATS */
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 // definitions we should get from osfmk
105
106 //typedef struct ipc_port * ipc_port_t;
107 typedef natural_t ipc_kobject_type_t;
108
109 #define IKOT_IOKIT_SPARE 27
110 #define IKOT_IOKIT_CONNECT 29
111 #define IKOT_IOKIT_OBJECT 30
112
113 extern "C" {
114
115 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
116 ipc_kobject_type_t type );
117
118 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
119
120 extern mach_port_name_t iokit_make_send_right( task_t task,
121 io_object_t obj, ipc_kobject_type_t type );
122
123 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
124
125 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
126
127 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
128
129 extern ipc_port_t master_device_port;
130
131 extern void iokit_retain_port( ipc_port_t port );
132 extern void iokit_release_port( ipc_port_t port );
133 extern void iokit_release_port_send( ipc_port_t port );
134
135 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
136
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139
140 } /* extern "C" */
141
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146
147 class IOMachPort : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort)
150 public:
151 OSObject * object;
152 ipc_port_t port;
153 UInt32 mscount;
154 UInt8 holdDestroy;
155
156 static IOMachPort * portForObject( OSObject * obj,
157 ipc_kobject_type_t type );
158 static bool noMoreSendersForObject( OSObject * obj,
159 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
160 static void releasePortForObject( OSObject * obj,
161 ipc_kobject_type_t type );
162 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
163
164 static OSDictionary * dictForType( ipc_kobject_type_t type );
165
166 static mach_port_name_t makeSendRightForTask( task_t task,
167 io_object_t obj, ipc_kobject_type_t type );
168
169 virtual void free() APPLE_KEXT_OVERRIDE;
170 };
171
172 #define super OSObject
173 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
174
175 static IOLock * gIOObjectPortLock;
176
177 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
178
179 // not in dictForType() for debugging ease
180 static OSDictionary * gIOObjectPorts;
181 static OSDictionary * gIOConnectPorts;
182
183 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
184 {
185 OSDictionary ** dict;
186
187 if( IKOT_IOKIT_OBJECT == type )
188 dict = &gIOObjectPorts;
189 else if( IKOT_IOKIT_CONNECT == type )
190 dict = &gIOConnectPorts;
191 else
192 return( 0 );
193
194 if( 0 == *dict)
195 *dict = OSDictionary::withCapacity( 1 );
196
197 return( *dict );
198 }
199
200 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
201 ipc_kobject_type_t type )
202 {
203 IOMachPort * inst = 0;
204 OSDictionary * dict;
205
206 IOTakeLock( gIOObjectPortLock);
207
208 do {
209
210 dict = dictForType( type );
211 if( !dict)
212 continue;
213
214 if( (inst = (IOMachPort *)
215 dict->getObject( (const OSSymbol *) obj ))) {
216 inst->mscount++;
217 inst->retain();
218 continue;
219 }
220
221 inst = new IOMachPort;
222 if( inst && !inst->init()) {
223 inst = 0;
224 continue;
225 }
226
227 inst->port = iokit_alloc_object_port( obj, type );
228 if( inst->port) {
229 // retains obj
230 dict->setObject( (const OSSymbol *) obj, inst );
231 inst->mscount++;
232
233 } else {
234 inst->release();
235 inst = 0;
236 }
237
238 } while( false );
239
240 IOUnlock( gIOObjectPortLock);
241
242 return( inst );
243 }
244
245 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 OSDictionary * dict;
249 IOMachPort * machPort;
250 bool destroyed = true;
251
252 IOTakeLock( gIOObjectPortLock);
253
254 if( (dict = dictForType( type ))) {
255 obj->retain();
256
257 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
258 if( machPort) {
259 destroyed = (machPort->mscount <= *mscount);
260 if( destroyed)
261 dict->removeObject( (const OSSymbol *) obj );
262 else
263 *mscount = machPort->mscount;
264 }
265 obj->release();
266 }
267
268 IOUnlock( gIOObjectPortLock);
269
270 return( destroyed );
271 }
272
273 void IOMachPort::releasePortForObject( OSObject * obj,
274 ipc_kobject_type_t type )
275 {
276 OSDictionary * dict;
277 IOMachPort * machPort;
278
279 IOTakeLock( gIOObjectPortLock);
280
281 if( (dict = dictForType( type ))) {
282 obj->retain();
283 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
284 if( machPort && !machPort->holdDestroy)
285 dict->removeObject( (const OSSymbol *) obj );
286 obj->release();
287 }
288
289 IOUnlock( gIOObjectPortLock);
290 }
291
292 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
293 {
294 OSDictionary * dict;
295 IOMachPort * machPort;
296
297 IOLockLock( gIOObjectPortLock );
298
299 if( (dict = dictForType( type ))) {
300 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
301 if( machPort)
302 machPort->holdDestroy = true;
303 }
304
305 IOLockUnlock( gIOObjectPortLock );
306 }
307
308 void IOUserClient::destroyUserReferences( OSObject * obj )
309 {
310 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
311
312 // panther, 3160200
313 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
314
315 OSDictionary * dict;
316
317 IOTakeLock( gIOObjectPortLock);
318 obj->retain();
319
320 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
321 {
322 IOMachPort * port;
323 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
324 if (port)
325 {
326 IOUserClient * uc;
327 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
328 {
329 dict->setObject((const OSSymbol *) uc->mappings, port);
330 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
331
332 uc->mappings->release();
333 uc->mappings = 0;
334 }
335 dict->removeObject( (const OSSymbol *) obj );
336 }
337 }
338 obj->release();
339 IOUnlock( gIOObjectPortLock);
340 }
341
342 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
343 io_object_t obj, ipc_kobject_type_t type )
344 {
345 return( iokit_make_send_right( task, obj, type ));
346 }
347
348 void IOMachPort::free( void )
349 {
350 if( port)
351 iokit_destroy_object_port( port );
352 super::free();
353 }
354
355 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
356
357 class IOUserIterator : public OSIterator
358 {
359 OSDeclareDefaultStructors(IOUserIterator)
360 public:
361 OSObject * userIteratorObject;
362 IOLock * lock;
363
364 static IOUserIterator * withIterator(OSIterator * iter);
365 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
366 virtual void free() APPLE_KEXT_OVERRIDE;
367
368 virtual void reset() APPLE_KEXT_OVERRIDE;
369 virtual bool isValid() APPLE_KEXT_OVERRIDE;
370 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
371 };
372
373 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
374
375 class IOUserNotification : public IOUserIterator
376 {
377 OSDeclareDefaultStructors(IOUserNotification)
378
379 #define holdNotify userIteratorObject
380
381 public:
382
383 virtual void free() APPLE_KEXT_OVERRIDE;
384
385 virtual void setNotification( IONotifier * obj );
386
387 virtual void reset() APPLE_KEXT_OVERRIDE;
388 virtual bool isValid() APPLE_KEXT_OVERRIDE;
389 };
390
391 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
392
393 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
394
395 IOUserIterator *
396 IOUserIterator::withIterator(OSIterator * iter)
397 {
398 IOUserIterator * me;
399
400 if (!iter) return (0);
401
402 me = new IOUserIterator;
403 if (me && !me->init())
404 {
405 me->release();
406 me = 0;
407 }
408 if (!me) return me;
409 me->userIteratorObject = iter;
410
411 return (me);
412 }
413
414 bool
415 IOUserIterator::init( void )
416 {
417 if (!OSObject::init()) return (false);
418
419 lock = IOLockAlloc();
420 if( !lock)
421 return( false );
422
423 return (true);
424 }
425
426 void
427 IOUserIterator::free()
428 {
429 if (userIteratorObject) userIteratorObject->release();
430 if (lock) IOLockFree(lock);
431 OSObject::free();
432 }
433
434 void
435 IOUserIterator::reset()
436 {
437 IOLockLock(lock);
438 assert(OSDynamicCast(OSIterator, userIteratorObject));
439 ((OSIterator *)userIteratorObject)->reset();
440 IOLockUnlock(lock);
441 }
442
443 bool
444 IOUserIterator::isValid()
445 {
446 bool ret;
447
448 IOLockLock(lock);
449 assert(OSDynamicCast(OSIterator, userIteratorObject));
450 ret = ((OSIterator *)userIteratorObject)->isValid();
451 IOLockUnlock(lock);
452
453 return (ret);
454 }
455
456 OSObject *
457 IOUserIterator::getNextObject()
458 {
459 OSObject * ret;
460
461 IOLockLock(lock);
462 assert(OSDynamicCast(OSIterator, userIteratorObject));
463 ret = ((OSIterator *)userIteratorObject)->getNextObject();
464 IOLockUnlock(lock);
465
466 return (ret);
467 }
468
469 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
470 extern "C" {
471
472 // functions called from osfmk/device/iokit_rpc.c
473
474 void
475 iokit_add_reference( io_object_t obj )
476 {
477 if( obj)
478 obj->retain();
479 }
480
481 void
482 iokit_remove_reference( io_object_t obj )
483 {
484 if( obj)
485 obj->release();
486 }
487
488 void
489 iokit_add_connect_reference( io_object_t obj )
490 {
491 IOUserClient * uc;
492
493 if (!obj) return;
494
495 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
496
497 obj->retain();
498 }
499
500 void
501 iokit_remove_connect_reference( io_object_t obj )
502 {
503 IOUserClient * uc;
504 bool finalize = false;
505
506 if (!obj) return;
507
508 if ((uc = OSDynamicCast(IOUserClient, obj)))
509 {
510 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
511 {
512 IOLockLock(gIOObjectPortLock);
513 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
514 IOLockUnlock(gIOObjectPortLock);
515 }
516 if (finalize) uc->scheduleFinalize(true);
517 }
518
519 obj->release();
520 }
521
522 bool
523 IOUserClient::finalizeUserReferences(OSObject * obj)
524 {
525 IOUserClient * uc;
526 bool ok = true;
527
528 if ((uc = OSDynamicCast(IOUserClient, obj)))
529 {
530 IOLockLock(gIOObjectPortLock);
531 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
532 IOLockUnlock(gIOObjectPortLock);
533 }
534 return (ok);
535 }
536
537 ipc_port_t
538 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
539 {
540 IOMachPort * machPort;
541 ipc_port_t port;
542
543 if( (machPort = IOMachPort::portForObject( obj, type ))) {
544
545 port = machPort->port;
546 if( port)
547 iokit_retain_port( port );
548
549 machPort->release();
550
551 } else
552 port = NULL;
553
554 return( port );
555 }
556
557 kern_return_t
558 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
559 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
560 {
561 IOUserClient * client;
562 IOMemoryMap * map;
563 IOUserNotification * notify;
564
565 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
566 return( kIOReturnNotReady );
567
568 if( IKOT_IOKIT_CONNECT == type)
569 {
570 if( (client = OSDynamicCast( IOUserClient, obj ))) {
571 IOStatisticsClientCall();
572 client->clientDied();
573 }
574 }
575 else if( IKOT_IOKIT_OBJECT == type)
576 {
577 if( (map = OSDynamicCast( IOMemoryMap, obj )))
578 map->taskDied();
579 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
580 notify->setNotification( 0 );
581 }
582
583 return( kIOReturnSuccess );
584 }
585
586 }; /* extern "C" */
587
588 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
589
590 class IOServiceUserNotification : public IOUserNotification
591 {
592 OSDeclareDefaultStructors(IOServiceUserNotification)
593
594 struct PingMsg {
595 mach_msg_header_t msgHdr;
596 OSNotificationHeader64 notifyHeader;
597 };
598
599 enum { kMaxOutstanding = 1024 };
600
601 PingMsg * pingMsg;
602 vm_size_t msgSize;
603 OSArray * newSet;
604 OSObject * lastEntry;
605 bool armed;
606
607 public:
608
609 virtual bool init( mach_port_t port, natural_t type,
610 void * reference, vm_size_t referenceSize,
611 bool clientIs64 );
612 virtual void free() APPLE_KEXT_OVERRIDE;
613
614 static bool _handler( void * target,
615 void * ref, IOService * newService, IONotifier * notifier );
616 virtual bool handler( void * ref, IOService * newService );
617
618 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
619 };
620
621 class IOServiceMessageUserNotification : public IOUserNotification
622 {
623 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
624
625 struct PingMsg {
626 mach_msg_header_t msgHdr;
627 mach_msg_body_t msgBody;
628 mach_msg_port_descriptor_t ports[1];
629 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
630 };
631
632 PingMsg * pingMsg;
633 vm_size_t msgSize;
634 uint8_t clientIs64;
635 int owningPID;
636
637 public:
638
639 virtual bool init( mach_port_t port, natural_t type,
640 void * reference, vm_size_t referenceSize,
641 vm_size_t extraSize,
642 bool clientIs64 );
643
644 virtual void free() APPLE_KEXT_OVERRIDE;
645
646 static IOReturn _handler( void * target, void * ref,
647 UInt32 messageType, IOService * provider,
648 void * messageArgument, vm_size_t argSize );
649 virtual IOReturn handler( void * ref,
650 UInt32 messageType, IOService * provider,
651 void * messageArgument, vm_size_t argSize );
652
653 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
654 };
655
656 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
657
658 #undef super
659 #define super IOUserIterator
660 OSDefineMetaClass( IOUserNotification, IOUserIterator )
661 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
662
663 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
664
665 void IOUserNotification::free( void )
666 {
667 if (holdNotify)
668 {
669 assert(OSDynamicCast(IONotifier, holdNotify));
670 ((IONotifier *)holdNotify)->remove();
671 holdNotify = 0;
672 }
673 // can't be in handler now
674
675 super::free();
676 }
677
678
679 void IOUserNotification::setNotification( IONotifier * notify )
680 {
681 OSObject * previousNotify;
682
683 IOLockLock( gIOObjectPortLock);
684
685 previousNotify = holdNotify;
686 holdNotify = notify;
687
688 IOLockUnlock( gIOObjectPortLock);
689
690 if( previousNotify)
691 {
692 assert(OSDynamicCast(IONotifier, previousNotify));
693 ((IONotifier *)previousNotify)->remove();
694 }
695 }
696
697 void IOUserNotification::reset()
698 {
699 // ?
700 }
701
702 bool IOUserNotification::isValid()
703 {
704 return( true );
705 }
706
707 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
708
709 #undef super
710 #define super IOUserNotification
711 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
712
713 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
714
715 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
716 void * reference, vm_size_t referenceSize,
717 bool clientIs64 )
718 {
719 if( !super::init())
720 return( false );
721
722 newSet = OSArray::withCapacity( 1 );
723 if( !newSet)
724 return( false );
725
726 if (referenceSize > sizeof(OSAsyncReference64))
727 return( false );
728
729 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
730 pingMsg = (PingMsg *) IOMalloc( msgSize);
731 if( !pingMsg)
732 return( false );
733
734 bzero( pingMsg, msgSize);
735
736 pingMsg->msgHdr.msgh_remote_port = port;
737 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
738 MACH_MSG_TYPE_COPY_SEND /*remote*/,
739 MACH_MSG_TYPE_MAKE_SEND /*local*/);
740 pingMsg->msgHdr.msgh_size = msgSize;
741 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
742
743 pingMsg->notifyHeader.size = 0;
744 pingMsg->notifyHeader.type = type;
745 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
746
747 return( true );
748 }
749
750 void IOServiceUserNotification::free( void )
751 {
752 PingMsg * _pingMsg;
753 vm_size_t _msgSize;
754 OSArray * _newSet;
755 OSObject * _lastEntry;
756
757 _pingMsg = pingMsg;
758 _msgSize = msgSize;
759 _lastEntry = lastEntry;
760 _newSet = newSet;
761
762 super::free();
763
764 if( _pingMsg && _msgSize) {
765 if (_pingMsg->msgHdr.msgh_remote_port) {
766 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
767 }
768 IOFree(_pingMsg, _msgSize);
769 }
770
771 if( _lastEntry)
772 _lastEntry->release();
773
774 if( _newSet)
775 _newSet->release();
776 }
777
778 bool IOServiceUserNotification::_handler( void * target,
779 void * ref, IOService * newService, IONotifier * notifier )
780 {
781 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
782 }
783
784 bool IOServiceUserNotification::handler( void * ref,
785 IOService * newService )
786 {
787 unsigned int count;
788 kern_return_t kr;
789 ipc_port_t port = NULL;
790 bool sendPing = false;
791
792 IOTakeLock( lock );
793
794 count = newSet->getCount();
795 if( count < kMaxOutstanding) {
796
797 newSet->setObject( newService );
798 if( (sendPing = (armed && (0 == count))))
799 armed = false;
800 }
801
802 IOUnlock( lock );
803
804 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
805 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
806
807 if( sendPing) {
808 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
809 pingMsg->msgHdr.msgh_local_port = port;
810 else
811 pingMsg->msgHdr.msgh_local_port = NULL;
812
813 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
814 pingMsg->msgHdr.msgh_size,
815 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
816 0);
817 if( port)
818 iokit_release_port( port );
819
820 if( KERN_SUCCESS != kr)
821 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
822 }
823
824 return( true );
825 }
826
827 OSObject * IOServiceUserNotification::getNextObject()
828 {
829 unsigned int count;
830 OSObject * result;
831
832 IOTakeLock( lock );
833
834 if( lastEntry)
835 lastEntry->release();
836
837 count = newSet->getCount();
838 if( count ) {
839 result = newSet->getObject( count - 1 );
840 result->retain();
841 newSet->removeObject( count - 1);
842 } else {
843 result = 0;
844 armed = true;
845 }
846 lastEntry = result;
847
848 IOUnlock( lock );
849
850 return( result );
851 }
852
853 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
854
855 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
856
857 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
858
859 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
860 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
861 bool client64 )
862 {
863 if( !super::init())
864 return( false );
865
866 if (referenceSize > sizeof(OSAsyncReference64))
867 return( false );
868
869 clientIs64 = client64;
870
871 owningPID = proc_selfpid();
872
873 extraSize += sizeof(IOServiceInterestContent64);
874 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
875 pingMsg = (PingMsg *) IOMalloc( msgSize);
876 if( !pingMsg)
877 return( false );
878
879 bzero( pingMsg, msgSize);
880
881 pingMsg->msgHdr.msgh_remote_port = port;
882 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
883 | MACH_MSGH_BITS(
884 MACH_MSG_TYPE_COPY_SEND /*remote*/,
885 MACH_MSG_TYPE_MAKE_SEND /*local*/);
886 pingMsg->msgHdr.msgh_size = msgSize;
887 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
888
889 pingMsg->msgBody.msgh_descriptor_count = 1;
890
891 pingMsg->ports[0].name = 0;
892 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
893 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
894
895 pingMsg->notifyHeader.size = extraSize;
896 pingMsg->notifyHeader.type = type;
897 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
898
899 return( true );
900 }
901
902 void IOServiceMessageUserNotification::free( void )
903 {
904 PingMsg * _pingMsg;
905 vm_size_t _msgSize;
906
907 _pingMsg = pingMsg;
908 _msgSize = msgSize;
909
910 super::free();
911
912 if( _pingMsg && _msgSize) {
913 if (_pingMsg->msgHdr.msgh_remote_port) {
914 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
915 }
916 IOFree( _pingMsg, _msgSize);
917 }
918 }
919
920 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
921 UInt32 messageType, IOService * provider,
922 void * argument, vm_size_t argSize )
923 {
924 return( ((IOServiceMessageUserNotification *) target)->handler(
925 ref, messageType, provider, argument, argSize));
926 }
927
928 IOReturn IOServiceMessageUserNotification::handler( void * ref,
929 UInt32 messageType, IOService * provider,
930 void * messageArgument, vm_size_t argSize )
931 {
932 kern_return_t kr;
933 ipc_port_t thisPort, providerPort;
934 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
935 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
936 // == pingMsg->notifyHeader.content;
937
938 if (kIOMessageCopyClientID == messageType)
939 {
940 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
941 return (kIOReturnSuccess);
942 }
943
944 data->messageType = messageType;
945
946 if( argSize == 0)
947 {
948 data->messageArgument[0] = (io_user_reference_t) messageArgument;
949 if (clientIs64)
950 argSize = sizeof(data->messageArgument[0]);
951 else
952 {
953 data->messageArgument[0] |= (data->messageArgument[0] << 32);
954 argSize = sizeof(uint32_t);
955 }
956 }
957 else
958 {
959 if( argSize > kIOUserNotifyMaxMessageSize)
960 argSize = kIOUserNotifyMaxMessageSize;
961 bcopy( messageArgument, data->messageArgument, argSize );
962 }
963
964 // adjust message size for ipc restrictions
965 natural_t type;
966 type = pingMsg->notifyHeader.type;
967 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
968 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
969 pingMsg->notifyHeader.type = type;
970 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
971
972 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
973 + sizeof( IOServiceInterestContent64 )
974 - sizeof( data->messageArgument)
975 + argSize;
976
977 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
978 pingMsg->ports[0].name = providerPort;
979 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
980 pingMsg->msgHdr.msgh_local_port = thisPort;
981 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
982 pingMsg->msgHdr.msgh_size,
983 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
984 0);
985 if( thisPort)
986 iokit_release_port( thisPort );
987 if( providerPort)
988 iokit_release_port( providerPort );
989
990 if( KERN_SUCCESS != kr)
991 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
992
993 return( kIOReturnSuccess );
994 }
995
996 OSObject * IOServiceMessageUserNotification::getNextObject()
997 {
998 return( 0 );
999 }
1000
1001 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1002
1003 #undef super
1004 #define super IOService
1005 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1006
1007 void IOUserClient::initialize( void )
1008 {
1009 gIOObjectPortLock = IOLockAlloc();
1010
1011 assert( gIOObjectPortLock );
1012 }
1013
1014 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1015 mach_port_t wakePort,
1016 void *callback, void *refcon)
1017 {
1018 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1019 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1020 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1021 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1022 }
1023
1024 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1025 mach_port_t wakePort,
1026 mach_vm_address_t callback, io_user_reference_t refcon)
1027 {
1028 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1029 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1030 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1031 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1032 }
1033
1034 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1035 mach_port_t wakePort,
1036 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1037 {
1038 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1039 if (vm_map_is_64bit(get_task_map(task))) {
1040 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1041 }
1042 }
1043
1044 static OSDictionary * CopyConsoleUser(UInt32 uid)
1045 {
1046 OSArray * array;
1047 OSDictionary * user = 0;
1048
1049 if ((array = OSDynamicCast(OSArray,
1050 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1051 {
1052 for (unsigned int idx = 0;
1053 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1054 idx++) {
1055 OSNumber * num;
1056
1057 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1058 && (uid == num->unsigned32BitValue())) {
1059 user->retain();
1060 break;
1061 }
1062 }
1063 array->release();
1064 }
1065 return user;
1066 }
1067
1068 static OSDictionary * CopyUserOnConsole(void)
1069 {
1070 OSArray * array;
1071 OSDictionary * user = 0;
1072
1073 if ((array = OSDynamicCast(OSArray,
1074 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1075 {
1076 for (unsigned int idx = 0;
1077 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1078 idx++)
1079 {
1080 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1081 {
1082 user->retain();
1083 break;
1084 }
1085 }
1086 array->release();
1087 }
1088 return (user);
1089 }
1090
1091 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1092 IOService * service )
1093 {
1094 proc_t p;
1095
1096 p = (proc_t) get_bsdtask_info(task);
1097 if (p)
1098 {
1099 uint64_t authorizationID;
1100
1101 authorizationID = proc_uniqueid(p);
1102 if (authorizationID)
1103 {
1104 if (service->getAuthorizationID() == authorizationID)
1105 {
1106 return (kIOReturnSuccess);
1107 }
1108 }
1109 }
1110
1111 return (kIOReturnNotPermitted);
1112 }
1113
1114 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1115 const char * privilegeName )
1116 {
1117 kern_return_t kr;
1118 security_token_t token;
1119 mach_msg_type_number_t count;
1120 task_t task;
1121 OSDictionary * user;
1122 bool secureConsole;
1123
1124
1125 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1126 sizeof(kIOClientPrivilegeForeground)))
1127 {
1128 if (task_is_gpu_denied(current_task()))
1129 return (kIOReturnNotPrivileged);
1130 else
1131 return (kIOReturnSuccess);
1132 }
1133
1134 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1135 sizeof(kIOClientPrivilegeConsoleSession)))
1136 {
1137 kauth_cred_t cred;
1138 proc_t p;
1139
1140 task = (task_t) securityToken;
1141 if (!task)
1142 task = current_task();
1143 p = (proc_t) get_bsdtask_info(task);
1144 kr = kIOReturnNotPrivileged;
1145
1146 if (p && (cred = kauth_cred_proc_ref(p)))
1147 {
1148 user = CopyUserOnConsole();
1149 if (user)
1150 {
1151 OSNumber * num;
1152 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1153 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1154 {
1155 kr = kIOReturnSuccess;
1156 }
1157 user->release();
1158 }
1159 kauth_cred_unref(&cred);
1160 }
1161 return (kr);
1162 }
1163
1164 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1165 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1166 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1167 else
1168 task = (task_t)securityToken;
1169
1170 count = TASK_SECURITY_TOKEN_COUNT;
1171 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1172
1173 if (KERN_SUCCESS != kr)
1174 {}
1175 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1176 sizeof(kIOClientPrivilegeAdministrator))) {
1177 if (0 != token.val[0])
1178 kr = kIOReturnNotPrivileged;
1179 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1180 sizeof(kIOClientPrivilegeLocalUser))) {
1181 user = CopyConsoleUser(token.val[0]);
1182 if ( user )
1183 user->release();
1184 else
1185 kr = kIOReturnNotPrivileged;
1186 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1187 sizeof(kIOClientPrivilegeConsoleUser))) {
1188 user = CopyConsoleUser(token.val[0]);
1189 if ( user ) {
1190 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1191 kr = kIOReturnNotPrivileged;
1192 else if ( secureConsole ) {
1193 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1194 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1195 kr = kIOReturnNotPrivileged;
1196 }
1197 user->release();
1198 }
1199 else
1200 kr = kIOReturnNotPrivileged;
1201 } else
1202 kr = kIOReturnUnsupported;
1203
1204 return (kr);
1205 }
1206
1207 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1208 const char * entitlement )
1209 {
1210 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1211
1212 proc_t p = NULL;
1213 pid_t pid = 0;
1214 char procname[MAXCOMLEN + 1] = "";
1215 size_t len = 0;
1216 void *entitlements_blob = NULL;
1217 char *entitlements_data = NULL;
1218 OSObject *entitlements_obj = NULL;
1219 OSDictionary *entitlements = NULL;
1220 OSString *errorString = NULL;
1221 OSObject *value = NULL;
1222
1223 p = (proc_t)get_bsdtask_info(task);
1224 if (p == NULL)
1225 goto fail;
1226 pid = proc_pid(p);
1227 proc_name(pid, procname, (int)sizeof(procname));
1228
1229 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1230 goto fail;
1231
1232 if (len <= offsetof(CS_GenericBlob, data))
1233 goto fail;
1234
1235 /*
1236 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1237 * we'll try to parse in the kernel.
1238 */
1239 len -= offsetof(CS_GenericBlob, data);
1240 if (len > MAX_ENTITLEMENTS_LEN) {
1241 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1242 goto fail;
1243 }
1244
1245 /*
1246 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1247 * what is stored in the entitlements blob. Copy the string and
1248 * terminate it.
1249 */
1250 entitlements_data = (char *)IOMalloc(len + 1);
1251 if (entitlements_data == NULL)
1252 goto fail;
1253 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1254 entitlements_data[len] = '\0';
1255
1256 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1257 if (errorString != NULL) {
1258 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1259 goto fail;
1260 }
1261 if (entitlements_obj == NULL)
1262 goto fail;
1263
1264 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1265 if (entitlements == NULL)
1266 goto fail;
1267
1268 /* Fetch the entitlement value from the dictionary. */
1269 value = entitlements->getObject(entitlement);
1270 if (value != NULL)
1271 value->retain();
1272
1273 fail:
1274 if (entitlements_data != NULL)
1275 IOFree(entitlements_data, len + 1);
1276 if (entitlements_obj != NULL)
1277 entitlements_obj->release();
1278 if (errorString != NULL)
1279 errorString->release();
1280 return value;
1281 }
1282
1283 bool IOUserClient::init()
1284 {
1285 if (getPropertyTable() || super::init())
1286 return reserve();
1287
1288 return false;
1289 }
1290
1291 bool IOUserClient::init(OSDictionary * dictionary)
1292 {
1293 if (getPropertyTable() || super::init(dictionary))
1294 return reserve();
1295
1296 return false;
1297 }
1298
1299 bool IOUserClient::initWithTask(task_t owningTask,
1300 void * securityID,
1301 UInt32 type )
1302 {
1303 if (getPropertyTable() || super::init())
1304 return reserve();
1305
1306 return false;
1307 }
1308
1309 bool IOUserClient::initWithTask(task_t owningTask,
1310 void * securityID,
1311 UInt32 type,
1312 OSDictionary * properties )
1313 {
1314 bool ok;
1315
1316 ok = super::init( properties );
1317 ok &= initWithTask( owningTask, securityID, type );
1318
1319 return( ok );
1320 }
1321
1322 bool IOUserClient::reserve()
1323 {
1324 if(!reserved) {
1325 reserved = IONew(ExpansionData, 1);
1326 if (!reserved) {
1327 return false;
1328 }
1329 }
1330 setTerminateDefer(NULL, true);
1331 IOStatisticsRegisterCounter();
1332
1333 return true;
1334 }
1335
1336 void IOUserClient::free()
1337 {
1338 if( mappings)
1339 mappings->release();
1340
1341 IOStatisticsUnregisterCounter();
1342
1343 if (reserved)
1344 IODelete(reserved, ExpansionData, 1);
1345
1346 super::free();
1347 }
1348
1349 IOReturn IOUserClient::clientDied( void )
1350 {
1351 IOReturn ret = kIOReturnNotReady;
1352
1353 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1354 {
1355 ret = clientClose();
1356 }
1357
1358 return (ret);
1359 }
1360
1361 IOReturn IOUserClient::clientClose( void )
1362 {
1363 return( kIOReturnUnsupported );
1364 }
1365
1366 IOService * IOUserClient::getService( void )
1367 {
1368 return( 0 );
1369 }
1370
1371 IOReturn IOUserClient::registerNotificationPort(
1372 mach_port_t /* port */,
1373 UInt32 /* type */,
1374 UInt32 /* refCon */)
1375 {
1376 return( kIOReturnUnsupported);
1377 }
1378
1379 IOReturn IOUserClient::registerNotificationPort(
1380 mach_port_t port,
1381 UInt32 type,
1382 io_user_reference_t refCon)
1383 {
1384 return (registerNotificationPort(port, type, (UInt32) refCon));
1385 }
1386
1387 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1388 semaphore_t * semaphore )
1389 {
1390 return( kIOReturnUnsupported);
1391 }
1392
1393 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1394 {
1395 return( kIOReturnUnsupported);
1396 }
1397
1398 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1399 IOOptionBits * options,
1400 IOMemoryDescriptor ** memory )
1401 {
1402 return( kIOReturnUnsupported);
1403 }
1404
1405 #if !__LP64__
1406 IOMemoryMap * IOUserClient::mapClientMemory(
1407 IOOptionBits type,
1408 task_t task,
1409 IOOptionBits mapFlags,
1410 IOVirtualAddress atAddress )
1411 {
1412 return (NULL);
1413 }
1414 #endif
1415
1416 IOMemoryMap * IOUserClient::mapClientMemory64(
1417 IOOptionBits type,
1418 task_t task,
1419 IOOptionBits mapFlags,
1420 mach_vm_address_t atAddress )
1421 {
1422 IOReturn err;
1423 IOOptionBits options = 0;
1424 IOMemoryDescriptor * memory;
1425 IOMemoryMap * map = 0;
1426
1427 err = clientMemoryForType( (UInt32) type, &options, &memory );
1428
1429 if( memory && (kIOReturnSuccess == err)) {
1430
1431 options = (options & ~kIOMapUserOptionsMask)
1432 | (mapFlags & kIOMapUserOptionsMask);
1433 map = memory->createMappingInTask( task, atAddress, options );
1434 memory->release();
1435 }
1436
1437 return( map );
1438 }
1439
1440 IOReturn IOUserClient::exportObjectToClient(task_t task,
1441 OSObject *obj, io_object_t *clientObj)
1442 {
1443 mach_port_name_t name;
1444
1445 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1446
1447 *(mach_port_name_t *)clientObj = name;
1448 return kIOReturnSuccess;
1449 }
1450
1451 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1452 {
1453 return( 0 );
1454 }
1455
1456 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1457 {
1458 return( 0 );
1459 }
1460
1461 IOExternalMethod * IOUserClient::
1462 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1463 {
1464 IOExternalMethod *method = getExternalMethodForIndex(index);
1465
1466 if (method)
1467 *targetP = (IOService *) method->object;
1468
1469 return method;
1470 }
1471
1472 IOExternalAsyncMethod * IOUserClient::
1473 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1474 {
1475 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1476
1477 if (method)
1478 *targetP = (IOService *) method->object;
1479
1480 return method;
1481 }
1482
1483 IOExternalTrap * IOUserClient::
1484 getExternalTrapForIndex(UInt32 index)
1485 {
1486 return NULL;
1487 }
1488
1489 IOExternalTrap * IOUserClient::
1490 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1491 {
1492 IOExternalTrap *trap = getExternalTrapForIndex(index);
1493
1494 if (trap) {
1495 *targetP = trap->object;
1496 }
1497
1498 return trap;
1499 }
1500
1501 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1502 {
1503 mach_port_t port;
1504 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1505
1506 if (MACH_PORT_NULL != port)
1507 iokit_release_port_send(port);
1508
1509 return (kIOReturnSuccess);
1510 }
1511
1512 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1513 {
1514 if (MACH_PORT_NULL != port)
1515 iokit_release_port_send(port);
1516
1517 return (kIOReturnSuccess);
1518 }
1519
1520 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1521 IOReturn result, void *args[], UInt32 numArgs)
1522 {
1523 OSAsyncReference64 reference64;
1524 io_user_reference_t args64[kMaxAsyncArgs];
1525 unsigned int idx;
1526
1527 if (numArgs > kMaxAsyncArgs)
1528 return kIOReturnMessageTooLarge;
1529
1530 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1531 reference64[idx] = REF64(reference[idx]);
1532
1533 for (idx = 0; idx < numArgs; idx++)
1534 args64[idx] = REF64(args[idx]);
1535
1536 return (sendAsyncResult64(reference64, result, args64, numArgs));
1537 }
1538
1539 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1540 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1541 {
1542 return _sendAsyncResult64(reference, result, args, numArgs, options);
1543 }
1544
1545 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1546 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1547 {
1548 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1549 }
1550
1551 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1552 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1553 {
1554 struct ReplyMsg
1555 {
1556 mach_msg_header_t msgHdr;
1557 union
1558 {
1559 struct
1560 {
1561 OSNotificationHeader notifyHdr;
1562 IOAsyncCompletionContent asyncContent;
1563 uint32_t args[kMaxAsyncArgs];
1564 } msg32;
1565 struct
1566 {
1567 OSNotificationHeader64 notifyHdr;
1568 IOAsyncCompletionContent asyncContent;
1569 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1570 } msg64;
1571 } m;
1572 };
1573 ReplyMsg replyMsg;
1574 mach_port_t replyPort;
1575 kern_return_t kr;
1576
1577 // If no reply port, do nothing.
1578 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1579 if (replyPort == MACH_PORT_NULL)
1580 return kIOReturnSuccess;
1581
1582 if (numArgs > kMaxAsyncArgs)
1583 return kIOReturnMessageTooLarge;
1584
1585 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1586 0 /*local*/);
1587 replyMsg.msgHdr.msgh_remote_port = replyPort;
1588 replyMsg.msgHdr.msgh_local_port = 0;
1589 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1590 if (kIOUCAsync64Flag & reference[0])
1591 {
1592 replyMsg.msgHdr.msgh_size =
1593 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1594 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1595 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1596 + numArgs * sizeof(io_user_reference_t);
1597 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1598 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1599
1600 replyMsg.m.msg64.asyncContent.result = result;
1601 if (numArgs)
1602 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1603 }
1604 else
1605 {
1606 unsigned int idx;
1607
1608 replyMsg.msgHdr.msgh_size =
1609 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1610 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1611
1612 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1613 + numArgs * sizeof(uint32_t);
1614 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1615
1616 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1617 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1618
1619 replyMsg.m.msg32.asyncContent.result = result;
1620
1621 for (idx = 0; idx < numArgs; idx++)
1622 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1623 }
1624
1625 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1626 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1627 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1628 } else {
1629 /* Fail on full queue. */
1630 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1631 replyMsg.msgHdr.msgh_size);
1632 }
1633 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr))
1634 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1635 return kr;
1636 }
1637
1638
1639 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1640
1641 extern "C" {
1642
1643 #define CHECK(cls,obj,out) \
1644 cls * out; \
1645 if( !(out = OSDynamicCast( cls, obj))) \
1646 return( kIOReturnBadArgument )
1647
1648 #define CHECKLOCKED(cls,obj,out) \
1649 IOUserIterator * oIter; \
1650 cls * out; \
1651 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1652 return (kIOReturnBadArgument); \
1653 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1654 return (kIOReturnBadArgument)
1655
1656 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1657
1658 // Create a vm_map_copy_t or kalloc'ed data for memory
1659 // to be copied out. ipc will free after the copyout.
1660
1661 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1662 io_buf_ptr_t * buf )
1663 {
1664 kern_return_t err;
1665 vm_map_copy_t copy;
1666
1667 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1668 false /* src_destroy */, &copy);
1669
1670 assert( err == KERN_SUCCESS );
1671 if( err == KERN_SUCCESS )
1672 *buf = (char *) copy;
1673
1674 return( err );
1675 }
1676
1677 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1678
1679 /* Routine io_server_version */
1680 kern_return_t is_io_server_version(
1681 mach_port_t master_port,
1682 uint64_t *version)
1683 {
1684 *version = IOKIT_SERVER_VERSION;
1685 return (kIOReturnSuccess);
1686 }
1687
1688 /* Routine io_object_get_class */
1689 kern_return_t is_io_object_get_class(
1690 io_object_t object,
1691 io_name_t className )
1692 {
1693 const OSMetaClass* my_obj = NULL;
1694
1695 if( !object)
1696 return( kIOReturnBadArgument );
1697
1698 my_obj = object->getMetaClass();
1699 if (!my_obj) {
1700 return (kIOReturnNotFound);
1701 }
1702
1703 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1704
1705 return( kIOReturnSuccess );
1706 }
1707
1708 /* Routine io_object_get_superclass */
1709 kern_return_t is_io_object_get_superclass(
1710 mach_port_t master_port,
1711 io_name_t obj_name,
1712 io_name_t class_name)
1713 {
1714 const OSMetaClass* my_obj = NULL;
1715 const OSMetaClass* superclass = NULL;
1716 const OSSymbol *my_name = NULL;
1717 const char *my_cstr = NULL;
1718
1719 if (!obj_name || !class_name)
1720 return (kIOReturnBadArgument);
1721
1722 if( master_port != master_device_port)
1723 return( kIOReturnNotPrivileged);
1724
1725 my_name = OSSymbol::withCString(obj_name);
1726
1727 if (my_name) {
1728 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1729 my_name->release();
1730 }
1731 if (my_obj) {
1732 superclass = my_obj->getSuperClass();
1733 }
1734
1735 if (!superclass) {
1736 return( kIOReturnNotFound );
1737 }
1738
1739 my_cstr = superclass->getClassName();
1740
1741 if (my_cstr) {
1742 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1743 return( kIOReturnSuccess );
1744 }
1745 return (kIOReturnNotFound);
1746 }
1747
1748 /* Routine io_object_get_bundle_identifier */
1749 kern_return_t is_io_object_get_bundle_identifier(
1750 mach_port_t master_port,
1751 io_name_t obj_name,
1752 io_name_t bundle_name)
1753 {
1754 const OSMetaClass* my_obj = NULL;
1755 const OSSymbol *my_name = NULL;
1756 const OSSymbol *identifier = NULL;
1757 const char *my_cstr = NULL;
1758
1759 if (!obj_name || !bundle_name)
1760 return (kIOReturnBadArgument);
1761
1762 if( master_port != master_device_port)
1763 return( kIOReturnNotPrivileged);
1764
1765 my_name = OSSymbol::withCString(obj_name);
1766
1767 if (my_name) {
1768 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1769 my_name->release();
1770 }
1771
1772 if (my_obj) {
1773 identifier = my_obj->getKmodName();
1774 }
1775 if (!identifier) {
1776 return( kIOReturnNotFound );
1777 }
1778
1779 my_cstr = identifier->getCStringNoCopy();
1780 if (my_cstr) {
1781 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1782 return( kIOReturnSuccess );
1783 }
1784
1785 return (kIOReturnBadArgument);
1786 }
1787
1788 /* Routine io_object_conforms_to */
1789 kern_return_t is_io_object_conforms_to(
1790 io_object_t object,
1791 io_name_t className,
1792 boolean_t *conforms )
1793 {
1794 if( !object)
1795 return( kIOReturnBadArgument );
1796
1797 *conforms = (0 != object->metaCast( className ));
1798
1799 return( kIOReturnSuccess );
1800 }
1801
1802 /* Routine io_object_get_retain_count */
1803 kern_return_t is_io_object_get_retain_count(
1804 io_object_t object,
1805 uint32_t *retainCount )
1806 {
1807 if( !object)
1808 return( kIOReturnBadArgument );
1809
1810 *retainCount = object->getRetainCount();
1811 return( kIOReturnSuccess );
1812 }
1813
1814 /* Routine io_iterator_next */
1815 kern_return_t is_io_iterator_next(
1816 io_object_t iterator,
1817 io_object_t *object )
1818 {
1819 IOReturn ret;
1820 OSObject * obj;
1821
1822 CHECK( OSIterator, iterator, iter );
1823
1824 obj = iter->getNextObject();
1825 if( obj) {
1826 obj->retain();
1827 *object = obj;
1828 ret = kIOReturnSuccess;
1829 } else
1830 ret = kIOReturnNoDevice;
1831
1832 return (ret);
1833 }
1834
1835 /* Routine io_iterator_reset */
1836 kern_return_t is_io_iterator_reset(
1837 io_object_t iterator )
1838 {
1839 CHECK( OSIterator, iterator, iter );
1840
1841 iter->reset();
1842
1843 return( kIOReturnSuccess );
1844 }
1845
1846 /* Routine io_iterator_is_valid */
1847 kern_return_t is_io_iterator_is_valid(
1848 io_object_t iterator,
1849 boolean_t *is_valid )
1850 {
1851 CHECK( OSIterator, iterator, iter );
1852
1853 *is_valid = iter->isValid();
1854
1855 return( kIOReturnSuccess );
1856 }
1857
1858
1859 static kern_return_t internal_io_service_match_property_table(
1860 io_service_t _service,
1861 const char * matching,
1862 mach_msg_type_number_t matching_size,
1863 boolean_t *matches)
1864 {
1865 CHECK( IOService, _service, service );
1866
1867 kern_return_t kr;
1868 OSObject * obj;
1869 OSDictionary * dict;
1870
1871 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1872 : OSUnserializeXML(matching);
1873 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1874 *matches = service->passiveMatch( dict );
1875 kr = kIOReturnSuccess;
1876 } else
1877 kr = kIOReturnBadArgument;
1878
1879 if( obj)
1880 obj->release();
1881
1882 return( kr );
1883 }
1884
1885 /* Routine io_service_match_property_table */
1886 kern_return_t is_io_service_match_property_table(
1887 io_service_t service,
1888 io_string_t matching,
1889 boolean_t *matches )
1890 {
1891 return (internal_io_service_match_property_table(service, matching, 0, matches));
1892 }
1893
1894
1895 /* Routine io_service_match_property_table_ool */
1896 kern_return_t is_io_service_match_property_table_ool(
1897 io_object_t service,
1898 io_buf_ptr_t matching,
1899 mach_msg_type_number_t matchingCnt,
1900 kern_return_t *result,
1901 boolean_t *matches )
1902 {
1903 kern_return_t kr;
1904 vm_offset_t data;
1905 vm_map_offset_t map_data;
1906
1907 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1908 data = CAST_DOWN(vm_offset_t, map_data);
1909
1910 if( KERN_SUCCESS == kr) {
1911 // must return success after vm_map_copyout() succeeds
1912 *result = internal_io_service_match_property_table(service,
1913 (const char *)data, matchingCnt, matches );
1914 vm_deallocate( kernel_map, data, matchingCnt );
1915 }
1916
1917 return( kr );
1918 }
1919
1920 /* Routine io_service_match_property_table_bin */
1921 kern_return_t is_io_service_match_property_table_bin(
1922 io_object_t service,
1923 io_struct_inband_t matching,
1924 mach_msg_type_number_t matchingCnt,
1925 boolean_t *matches)
1926 {
1927 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
1928 }
1929
1930 static kern_return_t internal_io_service_get_matching_services(
1931 mach_port_t master_port,
1932 const char * matching,
1933 mach_msg_type_number_t matching_size,
1934 io_iterator_t *existing )
1935 {
1936 kern_return_t kr;
1937 OSObject * obj;
1938 OSDictionary * dict;
1939
1940 if( master_port != master_device_port)
1941 return( kIOReturnNotPrivileged);
1942
1943 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1944 : OSUnserializeXML(matching);
1945 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1946 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
1947 kr = kIOReturnSuccess;
1948 } else
1949 kr = kIOReturnBadArgument;
1950
1951 if( obj)
1952 obj->release();
1953
1954 return( kr );
1955 }
1956
1957 /* Routine io_service_get_matching_services */
1958 kern_return_t is_io_service_get_matching_services(
1959 mach_port_t master_port,
1960 io_string_t matching,
1961 io_iterator_t *existing )
1962 {
1963 return (internal_io_service_get_matching_services(master_port, matching, 0, existing));
1964 }
1965
1966 /* Routine io_service_get_matching_services_ool */
1967 kern_return_t is_io_service_get_matching_services_ool(
1968 mach_port_t master_port,
1969 io_buf_ptr_t matching,
1970 mach_msg_type_number_t matchingCnt,
1971 kern_return_t *result,
1972 io_object_t *existing )
1973 {
1974 kern_return_t kr;
1975 vm_offset_t data;
1976 vm_map_offset_t map_data;
1977
1978 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1979 data = CAST_DOWN(vm_offset_t, map_data);
1980
1981 if( KERN_SUCCESS == kr) {
1982 // must return success after vm_map_copyout() succeeds
1983 // and mig will copy out objects on success
1984 *existing = 0;
1985 *result = internal_io_service_get_matching_services(master_port,
1986 (const char *) data, matchingCnt, existing);
1987 vm_deallocate( kernel_map, data, matchingCnt );
1988 }
1989
1990 return( kr );
1991 }
1992
1993 /* Routine io_service_get_matching_services_bin */
1994 kern_return_t is_io_service_get_matching_services_bin(
1995 mach_port_t master_port,
1996 io_struct_inband_t matching,
1997 mach_msg_type_number_t matchingCnt,
1998 io_object_t *existing)
1999 {
2000 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2001 }
2002
2003
2004 static kern_return_t internal_io_service_get_matching_service(
2005 mach_port_t master_port,
2006 const char * matching,
2007 mach_msg_type_number_t matching_size,
2008 io_service_t *service )
2009 {
2010 kern_return_t kr;
2011 OSObject * obj;
2012 OSDictionary * dict;
2013
2014 if( master_port != master_device_port)
2015 return( kIOReturnNotPrivileged);
2016
2017 obj = matching_size ? OSUnserializeXML(matching, matching_size)
2018 : OSUnserializeXML(matching);
2019 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2020 *service = IOService::copyMatchingService( dict );
2021 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2022 } else
2023 kr = kIOReturnBadArgument;
2024
2025 if( obj)
2026 obj->release();
2027
2028 return( kr );
2029 }
2030
2031 /* Routine io_service_get_matching_service */
2032 kern_return_t is_io_service_get_matching_service(
2033 mach_port_t master_port,
2034 io_string_t matching,
2035 io_service_t *service )
2036 {
2037 return (internal_io_service_get_matching_service(master_port, matching, 0, service));
2038 }
2039
2040 /* Routine io_service_get_matching_services_ool */
2041 kern_return_t is_io_service_get_matching_service_ool(
2042 mach_port_t master_port,
2043 io_buf_ptr_t matching,
2044 mach_msg_type_number_t matchingCnt,
2045 kern_return_t *result,
2046 io_object_t *service )
2047 {
2048 kern_return_t kr;
2049 vm_offset_t data;
2050 vm_map_offset_t map_data;
2051
2052 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2053 data = CAST_DOWN(vm_offset_t, map_data);
2054
2055 if( KERN_SUCCESS == kr) {
2056 // must return success after vm_map_copyout() succeeds
2057 // and mig will copy out objects on success
2058 *service = 0;
2059 *result = internal_io_service_get_matching_service(master_port,
2060 (const char *) data, matchingCnt, service );
2061 vm_deallocate( kernel_map, data, matchingCnt );
2062 }
2063
2064 return( kr );
2065 }
2066
2067 /* Routine io_service_get_matching_service_bin */
2068 kern_return_t is_io_service_get_matching_service_bin(
2069 mach_port_t master_port,
2070 io_struct_inband_t matching,
2071 mach_msg_type_number_t matchingCnt,
2072 io_object_t *service)
2073 {
2074 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2075 }
2076
2077 static kern_return_t internal_io_service_add_notification(
2078 mach_port_t master_port,
2079 io_name_t notification_type,
2080 const char * matching,
2081 size_t matching_size,
2082 mach_port_t port,
2083 void * reference,
2084 vm_size_t referenceSize,
2085 bool client64,
2086 io_object_t * notification )
2087 {
2088 IOServiceUserNotification * userNotify = 0;
2089 IONotifier * notify = 0;
2090 const OSSymbol * sym;
2091 OSDictionary * dict;
2092 IOReturn err;
2093 unsigned long int userMsgType;
2094
2095 if( master_port != master_device_port)
2096 return( kIOReturnNotPrivileged);
2097
2098 do {
2099 err = kIOReturnNoResources;
2100
2101 if( !(sym = OSSymbol::withCString( notification_type )))
2102 err = kIOReturnNoResources;
2103
2104 if (matching_size)
2105 {
2106 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2107 }
2108 else
2109 {
2110 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching));
2111 }
2112
2113 if (!dict) {
2114 err = kIOReturnBadArgument;
2115 continue;
2116 }
2117
2118 if( (sym == gIOPublishNotification)
2119 || (sym == gIOFirstPublishNotification))
2120 userMsgType = kIOServicePublishNotificationType;
2121 else if( (sym == gIOMatchedNotification)
2122 || (sym == gIOFirstMatchNotification))
2123 userMsgType = kIOServiceMatchedNotificationType;
2124 else if( sym == gIOTerminatedNotification)
2125 userMsgType = kIOServiceTerminatedNotificationType;
2126 else
2127 userMsgType = kLastIOKitNotificationType;
2128
2129 userNotify = new IOServiceUserNotification;
2130
2131 if( userNotify && !userNotify->init( port, userMsgType,
2132 reference, referenceSize, client64)) {
2133 iokit_release_port_send(port);
2134 userNotify->release();
2135 userNotify = 0;
2136 }
2137 if( !userNotify)
2138 continue;
2139
2140 notify = IOService::addMatchingNotification( sym, dict,
2141 &userNotify->_handler, userNotify );
2142 if( notify) {
2143 *notification = userNotify;
2144 userNotify->setNotification( notify );
2145 err = kIOReturnSuccess;
2146 } else
2147 err = kIOReturnUnsupported;
2148
2149 } while( false );
2150
2151 if( sym)
2152 sym->release();
2153 if( dict)
2154 dict->release();
2155
2156 return( err );
2157 }
2158
2159
2160 /* Routine io_service_add_notification */
2161 kern_return_t is_io_service_add_notification(
2162 mach_port_t master_port,
2163 io_name_t notification_type,
2164 io_string_t matching,
2165 mach_port_t port,
2166 io_async_ref_t reference,
2167 mach_msg_type_number_t referenceCnt,
2168 io_object_t * notification )
2169 {
2170 return (internal_io_service_add_notification(master_port, notification_type,
2171 matching, 0, port, &reference[0], sizeof(io_async_ref_t),
2172 false, notification));
2173 }
2174
2175 /* Routine io_service_add_notification_64 */
2176 kern_return_t is_io_service_add_notification_64(
2177 mach_port_t master_port,
2178 io_name_t notification_type,
2179 io_string_t matching,
2180 mach_port_t wake_port,
2181 io_async_ref64_t reference,
2182 mach_msg_type_number_t referenceCnt,
2183 io_object_t *notification )
2184 {
2185 return (internal_io_service_add_notification(master_port, notification_type,
2186 matching, 0, wake_port, &reference[0], sizeof(io_async_ref64_t),
2187 true, notification));
2188 }
2189
2190 /* Routine io_service_add_notification_bin */
2191 kern_return_t is_io_service_add_notification_bin
2192 (
2193 mach_port_t master_port,
2194 io_name_t notification_type,
2195 io_struct_inband_t matching,
2196 mach_msg_type_number_t matchingCnt,
2197 mach_port_t wake_port,
2198 io_async_ref_t reference,
2199 mach_msg_type_number_t referenceCnt,
2200 io_object_t *notification)
2201 {
2202 return (internal_io_service_add_notification(master_port, notification_type,
2203 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2204 false, notification));
2205 }
2206
2207 /* Routine io_service_add_notification_bin_64 */
2208 kern_return_t is_io_service_add_notification_bin_64
2209 (
2210 mach_port_t master_port,
2211 io_name_t notification_type,
2212 io_struct_inband_t matching,
2213 mach_msg_type_number_t matchingCnt,
2214 mach_port_t wake_port,
2215 io_async_ref64_t reference,
2216 mach_msg_type_number_t referenceCnt,
2217 io_object_t *notification)
2218 {
2219 return (internal_io_service_add_notification(master_port, notification_type,
2220 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2221 true, notification));
2222 }
2223
2224 static kern_return_t internal_io_service_add_notification_ool(
2225 mach_port_t master_port,
2226 io_name_t notification_type,
2227 io_buf_ptr_t matching,
2228 mach_msg_type_number_t matchingCnt,
2229 mach_port_t wake_port,
2230 void * reference,
2231 vm_size_t referenceSize,
2232 bool client64,
2233 kern_return_t *result,
2234 io_object_t *notification )
2235 {
2236 kern_return_t kr;
2237 vm_offset_t data;
2238 vm_map_offset_t map_data;
2239
2240 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2241 data = CAST_DOWN(vm_offset_t, map_data);
2242
2243 if( KERN_SUCCESS == kr) {
2244 // must return success after vm_map_copyout() succeeds
2245 // and mig will copy out objects on success
2246 *notification = 0;
2247 *result = internal_io_service_add_notification( master_port, notification_type,
2248 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2249 vm_deallocate( kernel_map, data, matchingCnt );
2250 }
2251
2252 return( kr );
2253 }
2254
2255 /* Routine io_service_add_notification_ool */
2256 kern_return_t is_io_service_add_notification_ool(
2257 mach_port_t master_port,
2258 io_name_t notification_type,
2259 io_buf_ptr_t matching,
2260 mach_msg_type_number_t matchingCnt,
2261 mach_port_t wake_port,
2262 io_async_ref_t reference,
2263 mach_msg_type_number_t referenceCnt,
2264 kern_return_t *result,
2265 io_object_t *notification )
2266 {
2267 return (internal_io_service_add_notification_ool(master_port, notification_type,
2268 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2269 false, result, notification));
2270 }
2271
2272 /* Routine io_service_add_notification_ool_64 */
2273 kern_return_t is_io_service_add_notification_ool_64(
2274 mach_port_t master_port,
2275 io_name_t notification_type,
2276 io_buf_ptr_t matching,
2277 mach_msg_type_number_t matchingCnt,
2278 mach_port_t wake_port,
2279 io_async_ref64_t reference,
2280 mach_msg_type_number_t referenceCnt,
2281 kern_return_t *result,
2282 io_object_t *notification )
2283 {
2284 return (internal_io_service_add_notification_ool(master_port, notification_type,
2285 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2286 true, result, notification));
2287 }
2288
2289 /* Routine io_service_add_notification_old */
2290 kern_return_t is_io_service_add_notification_old(
2291 mach_port_t master_port,
2292 io_name_t notification_type,
2293 io_string_t matching,
2294 mach_port_t port,
2295 // for binary compatibility reasons, this must be natural_t for ILP32
2296 natural_t ref,
2297 io_object_t * notification )
2298 {
2299 return( is_io_service_add_notification( master_port, notification_type,
2300 matching, port, &ref, 1, notification ));
2301 }
2302
2303
2304 static kern_return_t internal_io_service_add_interest_notification(
2305 io_object_t _service,
2306 io_name_t type_of_interest,
2307 mach_port_t port,
2308 void * reference,
2309 vm_size_t referenceSize,
2310 bool client64,
2311 io_object_t * notification )
2312 {
2313
2314 IOServiceMessageUserNotification * userNotify = 0;
2315 IONotifier * notify = 0;
2316 const OSSymbol * sym;
2317 IOReturn err;
2318
2319 CHECK( IOService, _service, service );
2320
2321 err = kIOReturnNoResources;
2322 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2323
2324 userNotify = new IOServiceMessageUserNotification;
2325
2326 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2327 reference, referenceSize,
2328 kIOUserNotifyMaxMessageSize,
2329 client64 )) {
2330 iokit_release_port_send(port);
2331 userNotify->release();
2332 userNotify = 0;
2333 }
2334 if( !userNotify)
2335 continue;
2336
2337 notify = service->registerInterest( sym,
2338 &userNotify->_handler, userNotify );
2339 if( notify) {
2340 *notification = userNotify;
2341 userNotify->setNotification( notify );
2342 err = kIOReturnSuccess;
2343 } else
2344 err = kIOReturnUnsupported;
2345
2346 sym->release();
2347
2348 } while( false );
2349
2350 return( err );
2351 }
2352
2353 /* Routine io_service_add_message_notification */
2354 kern_return_t is_io_service_add_interest_notification(
2355 io_object_t service,
2356 io_name_t type_of_interest,
2357 mach_port_t port,
2358 io_async_ref_t reference,
2359 mach_msg_type_number_t referenceCnt,
2360 io_object_t * notification )
2361 {
2362 return (internal_io_service_add_interest_notification(service, type_of_interest,
2363 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2364 }
2365
2366 /* Routine io_service_add_interest_notification_64 */
2367 kern_return_t is_io_service_add_interest_notification_64(
2368 io_object_t service,
2369 io_name_t type_of_interest,
2370 mach_port_t wake_port,
2371 io_async_ref64_t reference,
2372 mach_msg_type_number_t referenceCnt,
2373 io_object_t *notification )
2374 {
2375 return (internal_io_service_add_interest_notification(service, type_of_interest,
2376 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2377 }
2378
2379
2380 /* Routine io_service_acknowledge_notification */
2381 kern_return_t is_io_service_acknowledge_notification(
2382 io_object_t _service,
2383 natural_t notify_ref,
2384 natural_t response )
2385 {
2386 CHECK( IOService, _service, service );
2387
2388 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2389 (IOOptionBits) response ));
2390
2391 }
2392
2393 /* Routine io_connect_get_semaphore */
2394 kern_return_t is_io_connect_get_notification_semaphore(
2395 io_connect_t connection,
2396 natural_t notification_type,
2397 semaphore_t *semaphore )
2398 {
2399 CHECK( IOUserClient, connection, client );
2400
2401 IOStatisticsClientCall();
2402 return( client->getNotificationSemaphore( (UInt32) notification_type,
2403 semaphore ));
2404 }
2405
2406 /* Routine io_registry_get_root_entry */
2407 kern_return_t is_io_registry_get_root_entry(
2408 mach_port_t master_port,
2409 io_object_t *root )
2410 {
2411 IORegistryEntry * entry;
2412
2413 if( master_port != master_device_port)
2414 return( kIOReturnNotPrivileged);
2415
2416 entry = IORegistryEntry::getRegistryRoot();
2417 if( entry)
2418 entry->retain();
2419 *root = entry;
2420
2421 return( kIOReturnSuccess );
2422 }
2423
2424 /* Routine io_registry_create_iterator */
2425 kern_return_t is_io_registry_create_iterator(
2426 mach_port_t master_port,
2427 io_name_t plane,
2428 uint32_t options,
2429 io_object_t *iterator )
2430 {
2431 if( master_port != master_device_port)
2432 return( kIOReturnNotPrivileged);
2433
2434 *iterator = IOUserIterator::withIterator(
2435 IORegistryIterator::iterateOver(
2436 IORegistryEntry::getPlane( plane ), options ));
2437
2438 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2439 }
2440
2441 /* Routine io_registry_entry_create_iterator */
2442 kern_return_t is_io_registry_entry_create_iterator(
2443 io_object_t registry_entry,
2444 io_name_t plane,
2445 uint32_t options,
2446 io_object_t *iterator )
2447 {
2448 CHECK( IORegistryEntry, registry_entry, entry );
2449
2450 *iterator = IOUserIterator::withIterator(
2451 IORegistryIterator::iterateOver( entry,
2452 IORegistryEntry::getPlane( plane ), options ));
2453
2454 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2455 }
2456
2457 /* Routine io_registry_iterator_enter */
2458 kern_return_t is_io_registry_iterator_enter_entry(
2459 io_object_t iterator )
2460 {
2461 CHECKLOCKED( IORegistryIterator, iterator, iter );
2462
2463 IOLockLock(oIter->lock);
2464 iter->enterEntry();
2465 IOLockUnlock(oIter->lock);
2466
2467 return( kIOReturnSuccess );
2468 }
2469
2470 /* Routine io_registry_iterator_exit */
2471 kern_return_t is_io_registry_iterator_exit_entry(
2472 io_object_t iterator )
2473 {
2474 bool didIt;
2475
2476 CHECKLOCKED( IORegistryIterator, iterator, iter );
2477
2478 IOLockLock(oIter->lock);
2479 didIt = iter->exitEntry();
2480 IOLockUnlock(oIter->lock);
2481
2482 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2483 }
2484
2485 /* Routine io_registry_entry_from_path */
2486 kern_return_t is_io_registry_entry_from_path(
2487 mach_port_t master_port,
2488 io_string_t path,
2489 io_object_t *registry_entry )
2490 {
2491 IORegistryEntry * entry;
2492
2493 if( master_port != master_device_port)
2494 return( kIOReturnNotPrivileged);
2495
2496 entry = IORegistryEntry::fromPath( path );
2497
2498 *registry_entry = entry;
2499
2500 return( kIOReturnSuccess );
2501 }
2502
2503
2504 /* Routine io_registry_entry_from_path */
2505 kern_return_t is_io_registry_entry_from_path_ool(
2506 mach_port_t master_port,
2507 io_string_inband_t path,
2508 io_buf_ptr_t path_ool,
2509 mach_msg_type_number_t path_oolCnt,
2510 kern_return_t *result,
2511 io_object_t *registry_entry)
2512 {
2513 IORegistryEntry * entry;
2514 vm_map_offset_t map_data;
2515 const char * cpath;
2516 IOReturn res;
2517 kern_return_t err;
2518
2519 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2520
2521 map_data = 0;
2522 entry = 0;
2523 res = err = KERN_SUCCESS;
2524 if (path[0]) cpath = path;
2525 else
2526 {
2527 if (!path_oolCnt) return(kIOReturnBadArgument);
2528 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2529
2530 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2531 if (KERN_SUCCESS == err)
2532 {
2533 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2534 cpath = CAST_DOWN(const char *, map_data);
2535 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2536 }
2537 }
2538
2539 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2540 {
2541 entry = IORegistryEntry::fromPath(cpath);
2542 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2543 }
2544
2545 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2546
2547 if (KERN_SUCCESS != err) res = err;
2548 *registry_entry = entry;
2549 *result = res;
2550
2551 return (err);
2552 }
2553
2554
2555 /* Routine io_registry_entry_in_plane */
2556 kern_return_t is_io_registry_entry_in_plane(
2557 io_object_t registry_entry,
2558 io_name_t plane,
2559 boolean_t *inPlane )
2560 {
2561 CHECK( IORegistryEntry, registry_entry, entry );
2562
2563 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2564
2565 return( kIOReturnSuccess );
2566 }
2567
2568
2569 /* Routine io_registry_entry_get_path */
2570 kern_return_t is_io_registry_entry_get_path(
2571 io_object_t registry_entry,
2572 io_name_t plane,
2573 io_string_t path )
2574 {
2575 int length;
2576 CHECK( IORegistryEntry, registry_entry, entry );
2577
2578 length = sizeof( io_string_t);
2579 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2580 return( kIOReturnSuccess );
2581 else
2582 return( kIOReturnBadArgument );
2583 }
2584
2585 /* Routine io_registry_entry_get_path */
2586 kern_return_t is_io_registry_entry_get_path_ool(
2587 io_object_t registry_entry,
2588 io_name_t plane,
2589 io_string_inband_t path,
2590 io_buf_ptr_t *path_ool,
2591 mach_msg_type_number_t *path_oolCnt)
2592 {
2593 enum { kMaxPath = 16384 };
2594 IOReturn err;
2595 int length;
2596 char * buf;
2597
2598 CHECK( IORegistryEntry, registry_entry, entry );
2599
2600 *path_ool = NULL;
2601 *path_oolCnt = 0;
2602 length = sizeof(io_string_inband_t);
2603 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2604 else
2605 {
2606 length = kMaxPath;
2607 buf = IONew(char, length);
2608 if (!buf) err = kIOReturnNoMemory;
2609 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2610 else
2611 {
2612 *path_oolCnt = length;
2613 err = copyoutkdata(buf, length, path_ool);
2614 }
2615 if (buf) IODelete(buf, char, kMaxPath);
2616 }
2617
2618 return (err);
2619 }
2620
2621
2622 /* Routine io_registry_entry_get_name */
2623 kern_return_t is_io_registry_entry_get_name(
2624 io_object_t registry_entry,
2625 io_name_t name )
2626 {
2627 CHECK( IORegistryEntry, registry_entry, entry );
2628
2629 strncpy( name, entry->getName(), sizeof( io_name_t));
2630
2631 return( kIOReturnSuccess );
2632 }
2633
2634 /* Routine io_registry_entry_get_name_in_plane */
2635 kern_return_t is_io_registry_entry_get_name_in_plane(
2636 io_object_t registry_entry,
2637 io_name_t planeName,
2638 io_name_t name )
2639 {
2640 const IORegistryPlane * plane;
2641 CHECK( IORegistryEntry, registry_entry, entry );
2642
2643 if( planeName[0])
2644 plane = IORegistryEntry::getPlane( planeName );
2645 else
2646 plane = 0;
2647
2648 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2649
2650 return( kIOReturnSuccess );
2651 }
2652
2653 /* Routine io_registry_entry_get_location_in_plane */
2654 kern_return_t is_io_registry_entry_get_location_in_plane(
2655 io_object_t registry_entry,
2656 io_name_t planeName,
2657 io_name_t location )
2658 {
2659 const IORegistryPlane * plane;
2660 CHECK( IORegistryEntry, registry_entry, entry );
2661
2662 if( planeName[0])
2663 plane = IORegistryEntry::getPlane( planeName );
2664 else
2665 plane = 0;
2666
2667 const char * cstr = entry->getLocation( plane );
2668
2669 if( cstr) {
2670 strncpy( location, cstr, sizeof( io_name_t));
2671 return( kIOReturnSuccess );
2672 } else
2673 return( kIOReturnNotFound );
2674 }
2675
2676 /* Routine io_registry_entry_get_registry_entry_id */
2677 kern_return_t is_io_registry_entry_get_registry_entry_id(
2678 io_object_t registry_entry,
2679 uint64_t *entry_id )
2680 {
2681 CHECK( IORegistryEntry, registry_entry, entry );
2682
2683 *entry_id = entry->getRegistryEntryID();
2684
2685 return (kIOReturnSuccess);
2686 }
2687
2688 /* Routine io_registry_entry_get_property */
2689 kern_return_t is_io_registry_entry_get_property_bytes(
2690 io_object_t registry_entry,
2691 io_name_t property_name,
2692 io_struct_inband_t buf,
2693 mach_msg_type_number_t *dataCnt )
2694 {
2695 OSObject * obj;
2696 OSData * data;
2697 OSString * str;
2698 OSBoolean * boo;
2699 OSNumber * off;
2700 UInt64 offsetBytes;
2701 unsigned int len = 0;
2702 const void * bytes = 0;
2703 IOReturn ret = kIOReturnSuccess;
2704
2705 CHECK( IORegistryEntry, registry_entry, entry );
2706
2707 #if CONFIG_MACF
2708 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2709 return kIOReturnNotPermitted;
2710 #endif
2711
2712 obj = entry->copyProperty(property_name);
2713 if( !obj)
2714 return( kIOReturnNoResources );
2715
2716 // One day OSData will be a common container base class
2717 // until then...
2718 if( (data = OSDynamicCast( OSData, obj ))) {
2719 len = data->getLength();
2720 bytes = data->getBytesNoCopy();
2721
2722 } else if( (str = OSDynamicCast( OSString, obj ))) {
2723 len = str->getLength() + 1;
2724 bytes = str->getCStringNoCopy();
2725
2726 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2727 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2728 bytes = boo->isTrue() ? "Yes" : "No";
2729
2730 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2731 offsetBytes = off->unsigned64BitValue();
2732 len = off->numberOfBytes();
2733 bytes = &offsetBytes;
2734 #ifdef __BIG_ENDIAN__
2735 bytes = (const void *)
2736 (((UInt32) bytes) + (sizeof( UInt64) - len));
2737 #endif
2738
2739 } else
2740 ret = kIOReturnBadArgument;
2741
2742 if( bytes) {
2743 if( *dataCnt < len)
2744 ret = kIOReturnIPCError;
2745 else {
2746 *dataCnt = len;
2747 bcopy( bytes, buf, len );
2748 }
2749 }
2750 obj->release();
2751
2752 return( ret );
2753 }
2754
2755
2756 /* Routine io_registry_entry_get_property */
2757 kern_return_t is_io_registry_entry_get_property(
2758 io_object_t registry_entry,
2759 io_name_t property_name,
2760 io_buf_ptr_t *properties,
2761 mach_msg_type_number_t *propertiesCnt )
2762 {
2763 kern_return_t err;
2764 vm_size_t len;
2765 OSObject * obj;
2766
2767 CHECK( IORegistryEntry, registry_entry, entry );
2768
2769 #if CONFIG_MACF
2770 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2771 return kIOReturnNotPermitted;
2772 #endif
2773
2774 obj = entry->copyProperty(property_name);
2775 if( !obj)
2776 return( kIOReturnNotFound );
2777
2778 OSSerialize * s = OSSerialize::withCapacity(4096);
2779 if( !s) {
2780 obj->release();
2781 return( kIOReturnNoMemory );
2782 }
2783
2784 if( obj->serialize( s )) {
2785 len = s->getLength();
2786 *propertiesCnt = len;
2787 err = copyoutkdata( s->text(), len, properties );
2788
2789 } else
2790 err = kIOReturnUnsupported;
2791
2792 s->release();
2793 obj->release();
2794
2795 return( err );
2796 }
2797
2798 /* Routine io_registry_entry_get_property_recursively */
2799 kern_return_t is_io_registry_entry_get_property_recursively(
2800 io_object_t registry_entry,
2801 io_name_t plane,
2802 io_name_t property_name,
2803 uint32_t options,
2804 io_buf_ptr_t *properties,
2805 mach_msg_type_number_t *propertiesCnt )
2806 {
2807 kern_return_t err;
2808 vm_size_t len;
2809 OSObject * obj;
2810
2811 CHECK( IORegistryEntry, registry_entry, entry );
2812
2813 #if CONFIG_MACF
2814 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2815 return kIOReturnNotPermitted;
2816 #endif
2817
2818 obj = entry->copyProperty( property_name,
2819 IORegistryEntry::getPlane( plane ), options);
2820 if( !obj)
2821 return( kIOReturnNotFound );
2822
2823 OSSerialize * s = OSSerialize::withCapacity(4096);
2824 if( !s) {
2825 obj->release();
2826 return( kIOReturnNoMemory );
2827 }
2828
2829 if( obj->serialize( s )) {
2830 len = s->getLength();
2831 *propertiesCnt = len;
2832 err = copyoutkdata( s->text(), len, properties );
2833
2834 } else
2835 err = kIOReturnUnsupported;
2836
2837 s->release();
2838 obj->release();
2839
2840 return( err );
2841 }
2842
2843 #if CONFIG_MACF
2844
2845 static kern_return_t
2846 filteredProperties(IORegistryEntry *entry, OSDictionary *properties, OSDictionary **filteredp)
2847 {
2848 kern_return_t err = 0;
2849 OSDictionary *filtered = NULL;
2850 OSCollectionIterator *iter = NULL;
2851 OSSymbol *key;
2852 OSObject *p;
2853 kauth_cred_t cred = kauth_cred_get();
2854
2855 if (properties == NULL)
2856 return kIOReturnUnsupported;
2857
2858 if ((iter = OSCollectionIterator::withCollection(properties)) == NULL ||
2859 (filtered = OSDictionary::withCapacity(properties->getCapacity())) == NULL) {
2860 err = kIOReturnNoMemory;
2861 goto out;
2862 }
2863
2864 while ((p = iter->getNextObject()) != NULL) {
2865 if ((key = OSDynamicCast(OSSymbol, p)) == NULL ||
2866 mac_iokit_check_get_property(cred, entry, key->getCStringNoCopy()) != 0)
2867 continue;
2868 filtered->setObject(key, properties->getObject(key));
2869 }
2870
2871 out:
2872 if (iter != NULL)
2873 iter->release();
2874 *filteredp = filtered;
2875 return err;
2876 }
2877
2878 #endif
2879
2880 /* Routine io_registry_entry_get_properties */
2881 kern_return_t is_io_registry_entry_get_properties(
2882 io_object_t registry_entry,
2883 io_buf_ptr_t *properties,
2884 mach_msg_type_number_t *propertiesCnt )
2885 {
2886 kern_return_t err = 0;
2887 vm_size_t len;
2888
2889 CHECK( IORegistryEntry, registry_entry, entry );
2890
2891 OSSerialize * s = OSSerialize::withCapacity(4096);
2892 if( !s)
2893 return( kIOReturnNoMemory );
2894
2895 if (!entry->serializeProperties(s))
2896 err = kIOReturnUnsupported;
2897
2898 #if CONFIG_MACF
2899 if (!err && mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
2900 OSObject *propobj = OSUnserializeXML(s->text(), s->getLength());
2901 OSDictionary *filteredprops = NULL;
2902 err = filteredProperties(entry, OSDynamicCast(OSDictionary, propobj), &filteredprops);
2903 if (propobj) propobj->release();
2904
2905 if (!err) {
2906 s->clearText();
2907 if (!filteredprops->serialize(s))
2908 err = kIOReturnUnsupported;
2909 }
2910 if (filteredprops != NULL)
2911 filteredprops->release();
2912 }
2913 #endif /* CONFIG_MACF */
2914
2915 if (!err) {
2916 len = s->getLength();
2917 *propertiesCnt = len;
2918 err = copyoutkdata( s->text(), len, properties );
2919 }
2920
2921 s->release();
2922 return( err );
2923 }
2924
2925 #if CONFIG_MACF
2926
2927 struct GetPropertiesEditorRef
2928 {
2929 kauth_cred_t cred;
2930 IORegistryEntry * entry;
2931 OSCollection * root;
2932 };
2933
2934 static const OSMetaClassBase *
2935 GetPropertiesEditor(void * reference,
2936 OSSerialize * s,
2937 OSCollection * container,
2938 const OSSymbol * name,
2939 const OSMetaClassBase * value)
2940 {
2941 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
2942
2943 if (!ref->root) ref->root = container;
2944 if (ref->root == container)
2945 {
2946 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
2947 {
2948 value = 0;
2949 }
2950 }
2951 if (value) value->retain();
2952 return (value);
2953 }
2954
2955 #endif /* CONFIG_MACF */
2956
2957 /* Routine io_registry_entry_get_properties */
2958 kern_return_t is_io_registry_entry_get_properties_bin(
2959 io_object_t registry_entry,
2960 io_buf_ptr_t *properties,
2961 mach_msg_type_number_t *propertiesCnt)
2962 {
2963 kern_return_t err = kIOReturnSuccess;
2964 vm_size_t len;
2965 OSSerialize * s;
2966 OSSerialize::Editor editor = 0;
2967 void * editRef = 0;
2968
2969 CHECK(IORegistryEntry, registry_entry, entry);
2970
2971 #if CONFIG_MACF
2972 GetPropertiesEditorRef ref;
2973 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
2974 {
2975 editor = &GetPropertiesEditor;
2976 editRef = &ref;
2977 ref.cred = kauth_cred_get();
2978 ref.entry = entry;
2979 ref.root = 0;
2980 }
2981 #endif
2982
2983 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
2984 if (!s) return (kIOReturnNoMemory);
2985
2986 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
2987
2988 if (kIOReturnSuccess == err)
2989 {
2990 len = s->getLength();
2991 *propertiesCnt = len;
2992 err = copyoutkdata(s->text(), len, properties);
2993 }
2994 s->release();
2995
2996 return (err);
2997 }
2998
2999 /* Routine io_registry_entry_get_property_bin */
3000 kern_return_t is_io_registry_entry_get_property_bin(
3001 io_object_t registry_entry,
3002 io_name_t plane,
3003 io_name_t property_name,
3004 uint32_t options,
3005 io_buf_ptr_t *properties,
3006 mach_msg_type_number_t *propertiesCnt )
3007 {
3008 kern_return_t err;
3009 vm_size_t len;
3010 OSObject * obj;
3011 const OSSymbol * sym;
3012
3013 CHECK( IORegistryEntry, registry_entry, entry );
3014
3015 #if CONFIG_MACF
3016 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3017 return kIOReturnNotPermitted;
3018 #endif
3019
3020 if ((kIORegistryIterateRecursively & options) && plane[0])
3021 {
3022 obj = entry->copyProperty(property_name,
3023 IORegistryEntry::getPlane(plane), options);
3024 }
3025 else
3026 {
3027 obj = entry->copyProperty(property_name);
3028 }
3029
3030 if( !obj)
3031 return( kIOReturnNotFound );
3032
3033 sym = OSSymbol::withCString(property_name);
3034 if (sym)
3035 {
3036 if (gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3037 sym->release();
3038 }
3039
3040 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3041 if( !s) {
3042 obj->release();
3043 return( kIOReturnNoMemory );
3044 }
3045
3046 if( obj->serialize( s )) {
3047 len = s->getLength();
3048 *propertiesCnt = len;
3049 err = copyoutkdata( s->text(), len, properties );
3050
3051 } else err = kIOReturnUnsupported;
3052
3053 s->release();
3054 obj->release();
3055
3056 return( err );
3057 }
3058
3059
3060 /* Routine io_registry_entry_set_properties */
3061 kern_return_t is_io_registry_entry_set_properties
3062 (
3063 io_object_t registry_entry,
3064 io_buf_ptr_t properties,
3065 mach_msg_type_number_t propertiesCnt,
3066 kern_return_t * result)
3067 {
3068 OSObject * obj;
3069 kern_return_t err;
3070 IOReturn res;
3071 vm_offset_t data;
3072 vm_map_offset_t map_data;
3073
3074 CHECK( IORegistryEntry, registry_entry, entry );
3075
3076 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3077 return( kIOReturnMessageTooLarge);
3078
3079 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3080 data = CAST_DOWN(vm_offset_t, map_data);
3081
3082 if( KERN_SUCCESS == err) {
3083
3084 // must return success after vm_map_copyout() succeeds
3085 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3086 vm_deallocate( kernel_map, data, propertiesCnt );
3087
3088 if (!obj)
3089 res = kIOReturnBadArgument;
3090 #if CONFIG_MACF
3091 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3092 registry_entry, obj))
3093 {
3094 res = kIOReturnNotPermitted;
3095 }
3096 #endif
3097 else
3098 {
3099 res = entry->setProperties( obj );
3100 }
3101
3102 if (obj)
3103 obj->release();
3104 } else
3105 res = err;
3106
3107 *result = res;
3108 return( err );
3109 }
3110
3111 /* Routine io_registry_entry_get_child_iterator */
3112 kern_return_t is_io_registry_entry_get_child_iterator(
3113 io_object_t registry_entry,
3114 io_name_t plane,
3115 io_object_t *iterator )
3116 {
3117 CHECK( IORegistryEntry, registry_entry, entry );
3118
3119 *iterator = entry->getChildIterator(
3120 IORegistryEntry::getPlane( plane ));
3121
3122 return( kIOReturnSuccess );
3123 }
3124
3125 /* Routine io_registry_entry_get_parent_iterator */
3126 kern_return_t is_io_registry_entry_get_parent_iterator(
3127 io_object_t registry_entry,
3128 io_name_t plane,
3129 io_object_t *iterator)
3130 {
3131 CHECK( IORegistryEntry, registry_entry, entry );
3132
3133 *iterator = entry->getParentIterator(
3134 IORegistryEntry::getPlane( plane ));
3135
3136 return( kIOReturnSuccess );
3137 }
3138
3139 /* Routine io_service_get_busy_state */
3140 kern_return_t is_io_service_get_busy_state(
3141 io_object_t _service,
3142 uint32_t *busyState )
3143 {
3144 CHECK( IOService, _service, service );
3145
3146 *busyState = service->getBusyState();
3147
3148 return( kIOReturnSuccess );
3149 }
3150
3151 /* Routine io_service_get_state */
3152 kern_return_t is_io_service_get_state(
3153 io_object_t _service,
3154 uint64_t *state,
3155 uint32_t *busy_state,
3156 uint64_t *accumulated_busy_time )
3157 {
3158 CHECK( IOService, _service, service );
3159
3160 *state = service->getState();
3161 *busy_state = service->getBusyState();
3162 *accumulated_busy_time = service->getAccumulatedBusyTime();
3163
3164 return( kIOReturnSuccess );
3165 }
3166
3167 /* Routine io_service_wait_quiet */
3168 kern_return_t is_io_service_wait_quiet(
3169 io_object_t _service,
3170 mach_timespec_t wait_time )
3171 {
3172 uint64_t timeoutNS;
3173
3174 CHECK( IOService, _service, service );
3175
3176 timeoutNS = wait_time.tv_sec;
3177 timeoutNS *= kSecondScale;
3178 timeoutNS += wait_time.tv_nsec;
3179
3180 return( service->waitQuiet(timeoutNS) );
3181 }
3182
3183 /* Routine io_service_request_probe */
3184 kern_return_t is_io_service_request_probe(
3185 io_object_t _service,
3186 uint32_t options )
3187 {
3188 CHECK( IOService, _service, service );
3189
3190 return( service->requestProbe( options ));
3191 }
3192
3193 /* Routine io_service_get_authorization_id */
3194 kern_return_t is_io_service_get_authorization_id(
3195 io_object_t _service,
3196 uint64_t *authorization_id )
3197 {
3198 kern_return_t kr;
3199
3200 CHECK( IOService, _service, service );
3201
3202 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3203 kIOClientPrivilegeAdministrator );
3204 if( kIOReturnSuccess != kr)
3205 return( kr );
3206
3207 *authorization_id = service->getAuthorizationID();
3208
3209 return( kr );
3210 }
3211
3212 /* Routine io_service_set_authorization_id */
3213 kern_return_t is_io_service_set_authorization_id(
3214 io_object_t _service,
3215 uint64_t authorization_id )
3216 {
3217 CHECK( IOService, _service, service );
3218
3219 return( service->setAuthorizationID( authorization_id ) );
3220 }
3221
3222 /* Routine io_service_open_ndr */
3223 kern_return_t is_io_service_open_extended(
3224 io_object_t _service,
3225 task_t owningTask,
3226 uint32_t connect_type,
3227 NDR_record_t ndr,
3228 io_buf_ptr_t properties,
3229 mach_msg_type_number_t propertiesCnt,
3230 kern_return_t * result,
3231 io_object_t *connection )
3232 {
3233 IOUserClient * client = 0;
3234 kern_return_t err = KERN_SUCCESS;
3235 IOReturn res = kIOReturnSuccess;
3236 OSDictionary * propertiesDict = 0;
3237 bool crossEndian;
3238 bool disallowAccess;
3239
3240 CHECK( IOService, _service, service );
3241
3242 if (!owningTask) return (kIOReturnBadArgument);
3243
3244 do
3245 {
3246 if (properties)
3247 {
3248 OSObject * obj;
3249 vm_offset_t data;
3250 vm_map_offset_t map_data;
3251
3252 if( propertiesCnt > sizeof(io_struct_inband_t))
3253 return( kIOReturnMessageTooLarge);
3254
3255 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3256 res = err;
3257 data = CAST_DOWN(vm_offset_t, map_data);
3258 if (KERN_SUCCESS == err)
3259 {
3260 // must return success after vm_map_copyout() succeeds
3261 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3262 vm_deallocate( kernel_map, data, propertiesCnt );
3263 propertiesDict = OSDynamicCast(OSDictionary, obj);
3264 if (!propertiesDict)
3265 {
3266 res = kIOReturnBadArgument;
3267 if (obj)
3268 obj->release();
3269 }
3270 }
3271 if (kIOReturnSuccess != res)
3272 break;
3273 }
3274
3275 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3276 if (crossEndian)
3277 {
3278 if (!propertiesDict)
3279 propertiesDict = OSDictionary::withCapacity(4);
3280 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3281 if (data)
3282 {
3283 if (propertiesDict)
3284 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3285 data->release();
3286 }
3287 }
3288
3289 res = service->newUserClient( owningTask, (void *) owningTask,
3290 connect_type, propertiesDict, &client );
3291
3292 if (propertiesDict)
3293 propertiesDict->release();
3294
3295 if (res == kIOReturnSuccess)
3296 {
3297 assert( OSDynamicCast(IOUserClient, client) );
3298
3299 disallowAccess = (crossEndian
3300 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3301 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3302 if (disallowAccess) res = kIOReturnUnsupported;
3303 #if CONFIG_MACF
3304 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3305 res = kIOReturnNotPermitted;
3306 #endif
3307 if (kIOReturnSuccess != res)
3308 {
3309 IOStatisticsClientCall();
3310 client->clientClose();
3311 client->release();
3312 client = 0;
3313 break;
3314 }
3315 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3316 client->closed = false;
3317 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3318 if (creatorName)
3319 {
3320 client->setProperty(kIOUserClientCreatorKey, creatorName);
3321 creatorName->release();
3322 }
3323 client->setTerminateDefer(service, false);
3324 }
3325 }
3326 while (false);
3327
3328 *connection = client;
3329 *result = res;
3330
3331 return (err);
3332 }
3333
3334 /* Routine io_service_close */
3335 kern_return_t is_io_service_close(
3336 io_object_t connection )
3337 {
3338 OSSet * mappings;
3339 if ((mappings = OSDynamicCast(OSSet, connection)))
3340 return( kIOReturnSuccess );
3341
3342 CHECK( IOUserClient, connection, client );
3343
3344 IOStatisticsClientCall();
3345
3346 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3347 {
3348 client->clientClose();
3349 }
3350 else
3351 {
3352 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3353 client->getRegistryEntryID(), client->getName());
3354 }
3355
3356 return( kIOReturnSuccess );
3357 }
3358
3359 /* Routine io_connect_get_service */
3360 kern_return_t is_io_connect_get_service(
3361 io_object_t connection,
3362 io_object_t *service )
3363 {
3364 IOService * theService;
3365
3366 CHECK( IOUserClient, connection, client );
3367
3368 theService = client->getService();
3369 if( theService)
3370 theService->retain();
3371
3372 *service = theService;
3373
3374 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3375 }
3376
3377 /* Routine io_connect_set_notification_port */
3378 kern_return_t is_io_connect_set_notification_port(
3379 io_object_t connection,
3380 uint32_t notification_type,
3381 mach_port_t port,
3382 uint32_t reference)
3383 {
3384 CHECK( IOUserClient, connection, client );
3385
3386 IOStatisticsClientCall();
3387 return( client->registerNotificationPort( port, notification_type,
3388 (io_user_reference_t) reference ));
3389 }
3390
3391 /* Routine io_connect_set_notification_port */
3392 kern_return_t is_io_connect_set_notification_port_64(
3393 io_object_t connection,
3394 uint32_t notification_type,
3395 mach_port_t port,
3396 io_user_reference_t reference)
3397 {
3398 CHECK( IOUserClient, connection, client );
3399
3400 IOStatisticsClientCall();
3401 return( client->registerNotificationPort( port, notification_type,
3402 reference ));
3403 }
3404
3405 /* Routine io_connect_map_memory_into_task */
3406 kern_return_t is_io_connect_map_memory_into_task
3407 (
3408 io_connect_t connection,
3409 uint32_t memory_type,
3410 task_t into_task,
3411 mach_vm_address_t *address,
3412 mach_vm_size_t *size,
3413 uint32_t flags
3414 )
3415 {
3416 IOReturn err;
3417 IOMemoryMap * map;
3418
3419 CHECK( IOUserClient, connection, client );
3420
3421 if (!into_task) return (kIOReturnBadArgument);
3422
3423 IOStatisticsClientCall();
3424 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3425
3426 if( map) {
3427 *address = map->getAddress();
3428 if( size)
3429 *size = map->getSize();
3430
3431 if( client->sharedInstance
3432 || (into_task != current_task())) {
3433 // push a name out to the task owning the map,
3434 // so we can clean up maps
3435 mach_port_name_t name __unused =
3436 IOMachPort::makeSendRightForTask(
3437 into_task, map, IKOT_IOKIT_OBJECT );
3438
3439 } else {
3440 // keep it with the user client
3441 IOLockLock( gIOObjectPortLock);
3442 if( 0 == client->mappings)
3443 client->mappings = OSSet::withCapacity(2);
3444 if( client->mappings)
3445 client->mappings->setObject( map);
3446 IOLockUnlock( gIOObjectPortLock);
3447 map->release();
3448 }
3449 err = kIOReturnSuccess;
3450
3451 } else
3452 err = kIOReturnBadArgument;
3453
3454 return( err );
3455 }
3456
3457 /* Routine is_io_connect_map_memory */
3458 kern_return_t is_io_connect_map_memory(
3459 io_object_t connect,
3460 uint32_t type,
3461 task_t task,
3462 uint32_t * mapAddr,
3463 uint32_t * mapSize,
3464 uint32_t flags )
3465 {
3466 IOReturn err;
3467 mach_vm_address_t address;
3468 mach_vm_size_t size;
3469
3470 address = SCALAR64(*mapAddr);
3471 size = SCALAR64(*mapSize);
3472
3473 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3474
3475 *mapAddr = SCALAR32(address);
3476 *mapSize = SCALAR32(size);
3477
3478 return (err);
3479 }
3480
3481 } /* extern "C" */
3482
3483 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3484 {
3485 OSIterator * iter;
3486 IOMemoryMap * map = 0;
3487
3488 IOLockLock(gIOObjectPortLock);
3489
3490 iter = OSCollectionIterator::withCollection(mappings);
3491 if(iter)
3492 {
3493 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3494 {
3495 if(mem == map->getMemoryDescriptor())
3496 {
3497 map->retain();
3498 mappings->removeObject(map);
3499 break;
3500 }
3501 }
3502 iter->release();
3503 }
3504
3505 IOLockUnlock(gIOObjectPortLock);
3506
3507 return (map);
3508 }
3509
3510 extern "C" {
3511
3512 /* Routine io_connect_unmap_memory_from_task */
3513 kern_return_t is_io_connect_unmap_memory_from_task
3514 (
3515 io_connect_t connection,
3516 uint32_t memory_type,
3517 task_t from_task,
3518 mach_vm_address_t address)
3519 {
3520 IOReturn err;
3521 IOOptionBits options = 0;
3522 IOMemoryDescriptor * memory;
3523 IOMemoryMap * map;
3524
3525 CHECK( IOUserClient, connection, client );
3526
3527 if (!from_task) return (kIOReturnBadArgument);
3528
3529 IOStatisticsClientCall();
3530 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3531
3532 if( memory && (kIOReturnSuccess == err)) {
3533
3534 options = (options & ~kIOMapUserOptionsMask)
3535 | kIOMapAnywhere | kIOMapReference;
3536
3537 map = memory->createMappingInTask( from_task, address, options );
3538 memory->release();
3539 if( map)
3540 {
3541 IOLockLock( gIOObjectPortLock);
3542 if( client->mappings)
3543 client->mappings->removeObject( map);
3544 IOLockUnlock( gIOObjectPortLock);
3545
3546 mach_port_name_t name = 0;
3547 if (from_task != current_task())
3548 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3549 if (name)
3550 {
3551 map->userClientUnmap();
3552 err = iokit_mod_send_right( from_task, name, -2 );
3553 err = kIOReturnSuccess;
3554 }
3555 else
3556 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3557 if (from_task == current_task())
3558 map->release();
3559 }
3560 else
3561 err = kIOReturnBadArgument;
3562 }
3563
3564 return( err );
3565 }
3566
3567 kern_return_t is_io_connect_unmap_memory(
3568 io_object_t connect,
3569 uint32_t type,
3570 task_t task,
3571 uint32_t mapAddr )
3572 {
3573 IOReturn err;
3574 mach_vm_address_t address;
3575
3576 address = SCALAR64(mapAddr);
3577
3578 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3579
3580 return (err);
3581 }
3582
3583
3584 /* Routine io_connect_add_client */
3585 kern_return_t is_io_connect_add_client(
3586 io_object_t connection,
3587 io_object_t connect_to)
3588 {
3589 CHECK( IOUserClient, connection, client );
3590 CHECK( IOUserClient, connect_to, to );
3591
3592 IOStatisticsClientCall();
3593 return( client->connectClient( to ) );
3594 }
3595
3596
3597 /* Routine io_connect_set_properties */
3598 kern_return_t is_io_connect_set_properties(
3599 io_object_t connection,
3600 io_buf_ptr_t properties,
3601 mach_msg_type_number_t propertiesCnt,
3602 kern_return_t * result)
3603 {
3604 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3605 }
3606
3607 /* Routine io_user_client_method */
3608 kern_return_t is_io_connect_method_var_output
3609 (
3610 io_connect_t connection,
3611 uint32_t selector,
3612 io_scalar_inband64_t scalar_input,
3613 mach_msg_type_number_t scalar_inputCnt,
3614 io_struct_inband_t inband_input,
3615 mach_msg_type_number_t inband_inputCnt,
3616 mach_vm_address_t ool_input,
3617 mach_vm_size_t ool_input_size,
3618 io_struct_inband_t inband_output,
3619 mach_msg_type_number_t *inband_outputCnt,
3620 io_scalar_inband64_t scalar_output,
3621 mach_msg_type_number_t *scalar_outputCnt,
3622 io_buf_ptr_t *var_output,
3623 mach_msg_type_number_t *var_outputCnt
3624 )
3625 {
3626 CHECK( IOUserClient, connection, client );
3627
3628 IOExternalMethodArguments args;
3629 IOReturn ret;
3630 IOMemoryDescriptor * inputMD = 0;
3631 OSObject * structureVariableOutputData = 0;
3632
3633 bzero(&args.__reserved[0], sizeof(args.__reserved));
3634 args.version = kIOExternalMethodArgumentsCurrentVersion;
3635
3636 args.selector = selector;
3637
3638 args.asyncWakePort = MACH_PORT_NULL;
3639 args.asyncReference = 0;
3640 args.asyncReferenceCount = 0;
3641 args.structureVariableOutputData = &structureVariableOutputData;
3642
3643 args.scalarInput = scalar_input;
3644 args.scalarInputCount = scalar_inputCnt;
3645 args.structureInput = inband_input;
3646 args.structureInputSize = inband_inputCnt;
3647
3648 if (ool_input)
3649 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3650 kIODirectionOut, current_task());
3651
3652 args.structureInputDescriptor = inputMD;
3653
3654 args.scalarOutput = scalar_output;
3655 args.scalarOutputCount = *scalar_outputCnt;
3656 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3657 args.structureOutput = inband_output;
3658 args.structureOutputSize = *inband_outputCnt;
3659 args.structureOutputDescriptor = NULL;
3660 args.structureOutputDescriptorSize = 0;
3661
3662 IOStatisticsClientCall();
3663 ret = client->externalMethod( selector, &args );
3664
3665 *scalar_outputCnt = args.scalarOutputCount;
3666 *inband_outputCnt = args.structureOutputSize;
3667
3668 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3669 {
3670 OSSerialize * serialize;
3671 OSData * data;
3672 vm_size_t len;
3673
3674 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3675 {
3676 len = serialize->getLength();
3677 *var_outputCnt = len;
3678 ret = copyoutkdata(serialize->text(), len, var_output);
3679 }
3680 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3681 {
3682 len = data->getLength();
3683 *var_outputCnt = len;
3684 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3685 }
3686 else
3687 {
3688 ret = kIOReturnUnderrun;
3689 }
3690 }
3691
3692 if (inputMD)
3693 inputMD->release();
3694 if (structureVariableOutputData)
3695 structureVariableOutputData->release();
3696
3697 return (ret);
3698 }
3699
3700 /* Routine io_user_client_method */
3701 kern_return_t is_io_connect_method
3702 (
3703 io_connect_t connection,
3704 uint32_t selector,
3705 io_scalar_inband64_t scalar_input,
3706 mach_msg_type_number_t scalar_inputCnt,
3707 io_struct_inband_t inband_input,
3708 mach_msg_type_number_t inband_inputCnt,
3709 mach_vm_address_t ool_input,
3710 mach_vm_size_t ool_input_size,
3711 io_struct_inband_t inband_output,
3712 mach_msg_type_number_t *inband_outputCnt,
3713 io_scalar_inband64_t scalar_output,
3714 mach_msg_type_number_t *scalar_outputCnt,
3715 mach_vm_address_t ool_output,
3716 mach_vm_size_t *ool_output_size
3717 )
3718 {
3719 CHECK( IOUserClient, connection, client );
3720
3721 IOExternalMethodArguments args;
3722 IOReturn ret;
3723 IOMemoryDescriptor * inputMD = 0;
3724 IOMemoryDescriptor * outputMD = 0;
3725
3726 bzero(&args.__reserved[0], sizeof(args.__reserved));
3727 args.version = kIOExternalMethodArgumentsCurrentVersion;
3728
3729 args.selector = selector;
3730
3731 args.asyncWakePort = MACH_PORT_NULL;
3732 args.asyncReference = 0;
3733 args.asyncReferenceCount = 0;
3734 args.structureVariableOutputData = 0;
3735
3736 args.scalarInput = scalar_input;
3737 args.scalarInputCount = scalar_inputCnt;
3738 args.structureInput = inband_input;
3739 args.structureInputSize = inband_inputCnt;
3740
3741 if (ool_input)
3742 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3743 kIODirectionOut, current_task());
3744
3745 args.structureInputDescriptor = inputMD;
3746
3747 args.scalarOutput = scalar_output;
3748 args.scalarOutputCount = *scalar_outputCnt;
3749 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3750 args.structureOutput = inband_output;
3751 args.structureOutputSize = *inband_outputCnt;
3752
3753 if (ool_output && ool_output_size)
3754 {
3755 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3756 kIODirectionIn, current_task());
3757 }
3758
3759 args.structureOutputDescriptor = outputMD;
3760 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3761
3762 IOStatisticsClientCall();
3763 ret = client->externalMethod( selector, &args );
3764
3765 *scalar_outputCnt = args.scalarOutputCount;
3766 *inband_outputCnt = args.structureOutputSize;
3767 *ool_output_size = args.structureOutputDescriptorSize;
3768
3769 if (inputMD)
3770 inputMD->release();
3771 if (outputMD)
3772 outputMD->release();
3773
3774 return (ret);
3775 }
3776
3777 /* Routine io_async_user_client_method */
3778 kern_return_t is_io_connect_async_method
3779 (
3780 io_connect_t connection,
3781 mach_port_t wake_port,
3782 io_async_ref64_t reference,
3783 mach_msg_type_number_t referenceCnt,
3784 uint32_t selector,
3785 io_scalar_inband64_t scalar_input,
3786 mach_msg_type_number_t scalar_inputCnt,
3787 io_struct_inband_t inband_input,
3788 mach_msg_type_number_t inband_inputCnt,
3789 mach_vm_address_t ool_input,
3790 mach_vm_size_t ool_input_size,
3791 io_struct_inband_t inband_output,
3792 mach_msg_type_number_t *inband_outputCnt,
3793 io_scalar_inband64_t scalar_output,
3794 mach_msg_type_number_t *scalar_outputCnt,
3795 mach_vm_address_t ool_output,
3796 mach_vm_size_t * ool_output_size
3797 )
3798 {
3799 CHECK( IOUserClient, connection, client );
3800
3801 IOExternalMethodArguments args;
3802 IOReturn ret;
3803 IOMemoryDescriptor * inputMD = 0;
3804 IOMemoryDescriptor * outputMD = 0;
3805
3806 bzero(&args.__reserved[0], sizeof(args.__reserved));
3807 args.version = kIOExternalMethodArgumentsCurrentVersion;
3808
3809 reference[0] = (io_user_reference_t) wake_port;
3810 if (vm_map_is_64bit(get_task_map(current_task())))
3811 reference[0] |= kIOUCAsync64Flag;
3812
3813 args.selector = selector;
3814
3815 args.asyncWakePort = wake_port;
3816 args.asyncReference = reference;
3817 args.asyncReferenceCount = referenceCnt;
3818
3819 args.scalarInput = scalar_input;
3820 args.scalarInputCount = scalar_inputCnt;
3821 args.structureInput = inband_input;
3822 args.structureInputSize = inband_inputCnt;
3823
3824 if (ool_input)
3825 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3826 kIODirectionOut, current_task());
3827
3828 args.structureInputDescriptor = inputMD;
3829
3830 args.scalarOutput = scalar_output;
3831 args.scalarOutputCount = *scalar_outputCnt;
3832 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3833 args.structureOutput = inband_output;
3834 args.structureOutputSize = *inband_outputCnt;
3835
3836 if (ool_output)
3837 {
3838 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3839 kIODirectionIn, current_task());
3840 }
3841
3842 args.structureOutputDescriptor = outputMD;
3843 args.structureOutputDescriptorSize = *ool_output_size;
3844
3845 IOStatisticsClientCall();
3846 ret = client->externalMethod( selector, &args );
3847
3848 *inband_outputCnt = args.structureOutputSize;
3849 *ool_output_size = args.structureOutputDescriptorSize;
3850
3851 if (inputMD)
3852 inputMD->release();
3853 if (outputMD)
3854 outputMD->release();
3855
3856 return (ret);
3857 }
3858
3859 /* Routine io_connect_method_scalarI_scalarO */
3860 kern_return_t is_io_connect_method_scalarI_scalarO(
3861 io_object_t connect,
3862 uint32_t index,
3863 io_scalar_inband_t input,
3864 mach_msg_type_number_t inputCount,
3865 io_scalar_inband_t output,
3866 mach_msg_type_number_t * outputCount )
3867 {
3868 IOReturn err;
3869 uint32_t i;
3870 io_scalar_inband64_t _input;
3871 io_scalar_inband64_t _output;
3872
3873 mach_msg_type_number_t struct_outputCnt = 0;
3874 mach_vm_size_t ool_output_size = 0;
3875
3876 bzero(&_output[0], sizeof(_output));
3877 for (i = 0; i < inputCount; i++)
3878 _input[i] = SCALAR64(input[i]);
3879
3880 err = is_io_connect_method(connect, index,
3881 _input, inputCount,
3882 NULL, 0,
3883 0, 0,
3884 NULL, &struct_outputCnt,
3885 _output, outputCount,
3886 0, &ool_output_size);
3887
3888 for (i = 0; i < *outputCount; i++)
3889 output[i] = SCALAR32(_output[i]);
3890
3891 return (err);
3892 }
3893
3894 kern_return_t shim_io_connect_method_scalarI_scalarO(
3895 IOExternalMethod * method,
3896 IOService * object,
3897 const io_user_scalar_t * input,
3898 mach_msg_type_number_t inputCount,
3899 io_user_scalar_t * output,
3900 mach_msg_type_number_t * outputCount )
3901 {
3902 IOMethod func;
3903 io_scalar_inband_t _output;
3904 IOReturn err;
3905 err = kIOReturnBadArgument;
3906
3907 bzero(&_output[0], sizeof(_output));
3908 do {
3909
3910 if( inputCount != method->count0)
3911 {
3912 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3913 continue;
3914 }
3915 if( *outputCount != method->count1)
3916 {
3917 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3918 continue;
3919 }
3920
3921 func = method->func;
3922
3923 switch( inputCount) {
3924
3925 case 6:
3926 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3927 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3928 break;
3929 case 5:
3930 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3931 ARG32(input[3]), ARG32(input[4]),
3932 &_output[0] );
3933 break;
3934 case 4:
3935 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3936 ARG32(input[3]),
3937 &_output[0], &_output[1] );
3938 break;
3939 case 3:
3940 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3941 &_output[0], &_output[1], &_output[2] );
3942 break;
3943 case 2:
3944 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3945 &_output[0], &_output[1], &_output[2],
3946 &_output[3] );
3947 break;
3948 case 1:
3949 err = (object->*func)( ARG32(input[0]),
3950 &_output[0], &_output[1], &_output[2],
3951 &_output[3], &_output[4] );
3952 break;
3953 case 0:
3954 err = (object->*func)( &_output[0], &_output[1], &_output[2],
3955 &_output[3], &_output[4], &_output[5] );
3956 break;
3957
3958 default:
3959 IOLog("%s: Bad method table\n", object->getName());
3960 }
3961 }
3962 while( false);
3963
3964 uint32_t i;
3965 for (i = 0; i < *outputCount; i++)
3966 output[i] = SCALAR32(_output[i]);
3967
3968 return( err);
3969 }
3970
3971 /* Routine io_async_method_scalarI_scalarO */
3972 kern_return_t is_io_async_method_scalarI_scalarO(
3973 io_object_t connect,
3974 mach_port_t wake_port,
3975 io_async_ref_t reference,
3976 mach_msg_type_number_t referenceCnt,
3977 uint32_t index,
3978 io_scalar_inband_t input,
3979 mach_msg_type_number_t inputCount,
3980 io_scalar_inband_t output,
3981 mach_msg_type_number_t * outputCount )
3982 {
3983 IOReturn err;
3984 uint32_t i;
3985 io_scalar_inband64_t _input;
3986 io_scalar_inband64_t _output;
3987 io_async_ref64_t _reference;
3988
3989 bzero(&_output[0], sizeof(_output));
3990 for (i = 0; i < referenceCnt; i++)
3991 _reference[i] = REF64(reference[i]);
3992
3993 mach_msg_type_number_t struct_outputCnt = 0;
3994 mach_vm_size_t ool_output_size = 0;
3995
3996 for (i = 0; i < inputCount; i++)
3997 _input[i] = SCALAR64(input[i]);
3998
3999 err = is_io_connect_async_method(connect,
4000 wake_port, _reference, referenceCnt,
4001 index,
4002 _input, inputCount,
4003 NULL, 0,
4004 0, 0,
4005 NULL, &struct_outputCnt,
4006 _output, outputCount,
4007 0, &ool_output_size);
4008
4009 for (i = 0; i < *outputCount; i++)
4010 output[i] = SCALAR32(_output[i]);
4011
4012 return (err);
4013 }
4014 /* Routine io_async_method_scalarI_structureO */
4015 kern_return_t is_io_async_method_scalarI_structureO(
4016 io_object_t connect,
4017 mach_port_t wake_port,
4018 io_async_ref_t reference,
4019 mach_msg_type_number_t referenceCnt,
4020 uint32_t index,
4021 io_scalar_inband_t input,
4022 mach_msg_type_number_t inputCount,
4023 io_struct_inband_t output,
4024 mach_msg_type_number_t * outputCount )
4025 {
4026 uint32_t i;
4027 io_scalar_inband64_t _input;
4028 io_async_ref64_t _reference;
4029
4030 for (i = 0; i < referenceCnt; i++)
4031 _reference[i] = REF64(reference[i]);
4032
4033 mach_msg_type_number_t scalar_outputCnt = 0;
4034 mach_vm_size_t ool_output_size = 0;
4035
4036 for (i = 0; i < inputCount; i++)
4037 _input[i] = SCALAR64(input[i]);
4038
4039 return (is_io_connect_async_method(connect,
4040 wake_port, _reference, referenceCnt,
4041 index,
4042 _input, inputCount,
4043 NULL, 0,
4044 0, 0,
4045 output, outputCount,
4046 NULL, &scalar_outputCnt,
4047 0, &ool_output_size));
4048 }
4049
4050 /* Routine io_async_method_scalarI_structureI */
4051 kern_return_t is_io_async_method_scalarI_structureI(
4052 io_connect_t connect,
4053 mach_port_t wake_port,
4054 io_async_ref_t reference,
4055 mach_msg_type_number_t referenceCnt,
4056 uint32_t index,
4057 io_scalar_inband_t input,
4058 mach_msg_type_number_t inputCount,
4059 io_struct_inband_t inputStruct,
4060 mach_msg_type_number_t inputStructCount )
4061 {
4062 uint32_t i;
4063 io_scalar_inband64_t _input;
4064 io_async_ref64_t _reference;
4065
4066 for (i = 0; i < referenceCnt; i++)
4067 _reference[i] = REF64(reference[i]);
4068
4069 mach_msg_type_number_t scalar_outputCnt = 0;
4070 mach_msg_type_number_t inband_outputCnt = 0;
4071 mach_vm_size_t ool_output_size = 0;
4072
4073 for (i = 0; i < inputCount; i++)
4074 _input[i] = SCALAR64(input[i]);
4075
4076 return (is_io_connect_async_method(connect,
4077 wake_port, _reference, referenceCnt,
4078 index,
4079 _input, inputCount,
4080 inputStruct, inputStructCount,
4081 0, 0,
4082 NULL, &inband_outputCnt,
4083 NULL, &scalar_outputCnt,
4084 0, &ool_output_size));
4085 }
4086
4087 /* Routine io_async_method_structureI_structureO */
4088 kern_return_t is_io_async_method_structureI_structureO(
4089 io_object_t connect,
4090 mach_port_t wake_port,
4091 io_async_ref_t reference,
4092 mach_msg_type_number_t referenceCnt,
4093 uint32_t index,
4094 io_struct_inband_t input,
4095 mach_msg_type_number_t inputCount,
4096 io_struct_inband_t output,
4097 mach_msg_type_number_t * outputCount )
4098 {
4099 uint32_t i;
4100 mach_msg_type_number_t scalar_outputCnt = 0;
4101 mach_vm_size_t ool_output_size = 0;
4102 io_async_ref64_t _reference;
4103
4104 for (i = 0; i < referenceCnt; i++)
4105 _reference[i] = REF64(reference[i]);
4106
4107 return (is_io_connect_async_method(connect,
4108 wake_port, _reference, referenceCnt,
4109 index,
4110 NULL, 0,
4111 input, inputCount,
4112 0, 0,
4113 output, outputCount,
4114 NULL, &scalar_outputCnt,
4115 0, &ool_output_size));
4116 }
4117
4118
4119 kern_return_t shim_io_async_method_scalarI_scalarO(
4120 IOExternalAsyncMethod * method,
4121 IOService * object,
4122 mach_port_t asyncWakePort,
4123 io_user_reference_t * asyncReference,
4124 uint32_t asyncReferenceCount,
4125 const io_user_scalar_t * input,
4126 mach_msg_type_number_t inputCount,
4127 io_user_scalar_t * output,
4128 mach_msg_type_number_t * outputCount )
4129 {
4130 IOAsyncMethod func;
4131 uint32_t i;
4132 io_scalar_inband_t _output;
4133 IOReturn err;
4134 io_async_ref_t reference;
4135
4136 bzero(&_output[0], sizeof(_output));
4137 for (i = 0; i < asyncReferenceCount; i++)
4138 reference[i] = REF32(asyncReference[i]);
4139
4140 err = kIOReturnBadArgument;
4141
4142 do {
4143
4144 if( inputCount != method->count0)
4145 {
4146 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4147 continue;
4148 }
4149 if( *outputCount != method->count1)
4150 {
4151 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4152 continue;
4153 }
4154
4155 func = method->func;
4156
4157 switch( inputCount) {
4158
4159 case 6:
4160 err = (object->*func)( reference,
4161 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4162 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4163 break;
4164 case 5:
4165 err = (object->*func)( reference,
4166 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4167 ARG32(input[3]), ARG32(input[4]),
4168 &_output[0] );
4169 break;
4170 case 4:
4171 err = (object->*func)( reference,
4172 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4173 ARG32(input[3]),
4174 &_output[0], &_output[1] );
4175 break;
4176 case 3:
4177 err = (object->*func)( reference,
4178 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4179 &_output[0], &_output[1], &_output[2] );
4180 break;
4181 case 2:
4182 err = (object->*func)( reference,
4183 ARG32(input[0]), ARG32(input[1]),
4184 &_output[0], &_output[1], &_output[2],
4185 &_output[3] );
4186 break;
4187 case 1:
4188 err = (object->*func)( reference,
4189 ARG32(input[0]),
4190 &_output[0], &_output[1], &_output[2],
4191 &_output[3], &_output[4] );
4192 break;
4193 case 0:
4194 err = (object->*func)( reference,
4195 &_output[0], &_output[1], &_output[2],
4196 &_output[3], &_output[4], &_output[5] );
4197 break;
4198
4199 default:
4200 IOLog("%s: Bad method table\n", object->getName());
4201 }
4202 }
4203 while( false);
4204
4205 for (i = 0; i < *outputCount; i++)
4206 output[i] = SCALAR32(_output[i]);
4207
4208 return( err);
4209 }
4210
4211
4212 /* Routine io_connect_method_scalarI_structureO */
4213 kern_return_t is_io_connect_method_scalarI_structureO(
4214 io_object_t connect,
4215 uint32_t index,
4216 io_scalar_inband_t input,
4217 mach_msg_type_number_t inputCount,
4218 io_struct_inband_t output,
4219 mach_msg_type_number_t * outputCount )
4220 {
4221 uint32_t i;
4222 io_scalar_inband64_t _input;
4223
4224 mach_msg_type_number_t scalar_outputCnt = 0;
4225 mach_vm_size_t ool_output_size = 0;
4226
4227 for (i = 0; i < inputCount; i++)
4228 _input[i] = SCALAR64(input[i]);
4229
4230 return (is_io_connect_method(connect, index,
4231 _input, inputCount,
4232 NULL, 0,
4233 0, 0,
4234 output, outputCount,
4235 NULL, &scalar_outputCnt,
4236 0, &ool_output_size));
4237 }
4238
4239 kern_return_t shim_io_connect_method_scalarI_structureO(
4240
4241 IOExternalMethod * method,
4242 IOService * object,
4243 const io_user_scalar_t * input,
4244 mach_msg_type_number_t inputCount,
4245 io_struct_inband_t output,
4246 IOByteCount * outputCount )
4247 {
4248 IOMethod func;
4249 IOReturn err;
4250
4251 err = kIOReturnBadArgument;
4252
4253 do {
4254 if( inputCount != method->count0)
4255 {
4256 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4257 continue;
4258 }
4259 if( (kIOUCVariableStructureSize != method->count1)
4260 && (*outputCount != method->count1))
4261 {
4262 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4263 continue;
4264 }
4265
4266 func = method->func;
4267
4268 switch( inputCount) {
4269
4270 case 5:
4271 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4272 ARG32(input[3]), ARG32(input[4]),
4273 output );
4274 break;
4275 case 4:
4276 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4277 ARG32(input[3]),
4278 output, (void *)outputCount );
4279 break;
4280 case 3:
4281 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4282 output, (void *)outputCount, 0 );
4283 break;
4284 case 2:
4285 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4286 output, (void *)outputCount, 0, 0 );
4287 break;
4288 case 1:
4289 err = (object->*func)( ARG32(input[0]),
4290 output, (void *)outputCount, 0, 0, 0 );
4291 break;
4292 case 0:
4293 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4294 break;
4295
4296 default:
4297 IOLog("%s: Bad method table\n", object->getName());
4298 }
4299 }
4300 while( false);
4301
4302 return( err);
4303 }
4304
4305
4306 kern_return_t shim_io_async_method_scalarI_structureO(
4307 IOExternalAsyncMethod * method,
4308 IOService * object,
4309 mach_port_t asyncWakePort,
4310 io_user_reference_t * asyncReference,
4311 uint32_t asyncReferenceCount,
4312 const io_user_scalar_t * input,
4313 mach_msg_type_number_t inputCount,
4314 io_struct_inband_t output,
4315 mach_msg_type_number_t * outputCount )
4316 {
4317 IOAsyncMethod func;
4318 uint32_t i;
4319 IOReturn err;
4320 io_async_ref_t reference;
4321
4322 for (i = 0; i < asyncReferenceCount; i++)
4323 reference[i] = REF32(asyncReference[i]);
4324
4325 err = kIOReturnBadArgument;
4326 do {
4327 if( inputCount != method->count0)
4328 {
4329 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4330 continue;
4331 }
4332 if( (kIOUCVariableStructureSize != method->count1)
4333 && (*outputCount != method->count1))
4334 {
4335 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4336 continue;
4337 }
4338
4339 func = method->func;
4340
4341 switch( inputCount) {
4342
4343 case 5:
4344 err = (object->*func)( reference,
4345 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4346 ARG32(input[3]), ARG32(input[4]),
4347 output );
4348 break;
4349 case 4:
4350 err = (object->*func)( reference,
4351 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4352 ARG32(input[3]),
4353 output, (void *)outputCount );
4354 break;
4355 case 3:
4356 err = (object->*func)( reference,
4357 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4358 output, (void *)outputCount, 0 );
4359 break;
4360 case 2:
4361 err = (object->*func)( reference,
4362 ARG32(input[0]), ARG32(input[1]),
4363 output, (void *)outputCount, 0, 0 );
4364 break;
4365 case 1:
4366 err = (object->*func)( reference,
4367 ARG32(input[0]),
4368 output, (void *)outputCount, 0, 0, 0 );
4369 break;
4370 case 0:
4371 err = (object->*func)( reference,
4372 output, (void *)outputCount, 0, 0, 0, 0 );
4373 break;
4374
4375 default:
4376 IOLog("%s: Bad method table\n", object->getName());
4377 }
4378 }
4379 while( false);
4380
4381 return( err);
4382 }
4383
4384 /* Routine io_connect_method_scalarI_structureI */
4385 kern_return_t is_io_connect_method_scalarI_structureI(
4386 io_connect_t connect,
4387 uint32_t index,
4388 io_scalar_inband_t input,
4389 mach_msg_type_number_t inputCount,
4390 io_struct_inband_t inputStruct,
4391 mach_msg_type_number_t inputStructCount )
4392 {
4393 uint32_t i;
4394 io_scalar_inband64_t _input;
4395
4396 mach_msg_type_number_t scalar_outputCnt = 0;
4397 mach_msg_type_number_t inband_outputCnt = 0;
4398 mach_vm_size_t ool_output_size = 0;
4399
4400 for (i = 0; i < inputCount; i++)
4401 _input[i] = SCALAR64(input[i]);
4402
4403 return (is_io_connect_method(connect, index,
4404 _input, inputCount,
4405 inputStruct, inputStructCount,
4406 0, 0,
4407 NULL, &inband_outputCnt,
4408 NULL, &scalar_outputCnt,
4409 0, &ool_output_size));
4410 }
4411
4412 kern_return_t shim_io_connect_method_scalarI_structureI(
4413 IOExternalMethod * method,
4414 IOService * object,
4415 const io_user_scalar_t * input,
4416 mach_msg_type_number_t inputCount,
4417 io_struct_inband_t inputStruct,
4418 mach_msg_type_number_t inputStructCount )
4419 {
4420 IOMethod func;
4421 IOReturn err = kIOReturnBadArgument;
4422
4423 do
4424 {
4425 if (inputCount != method->count0)
4426 {
4427 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4428 continue;
4429 }
4430 if( (kIOUCVariableStructureSize != method->count1)
4431 && (inputStructCount != method->count1))
4432 {
4433 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4434 continue;
4435 }
4436
4437 func = method->func;
4438
4439 switch( inputCount) {
4440
4441 case 5:
4442 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4443 ARG32(input[3]), ARG32(input[4]),
4444 inputStruct );
4445 break;
4446 case 4:
4447 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4448 ARG32(input[3]),
4449 inputStruct, (void *)(uintptr_t)inputStructCount );
4450 break;
4451 case 3:
4452 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4453 inputStruct, (void *)(uintptr_t)inputStructCount,
4454 0 );
4455 break;
4456 case 2:
4457 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4458 inputStruct, (void *)(uintptr_t)inputStructCount,
4459 0, 0 );
4460 break;
4461 case 1:
4462 err = (object->*func)( ARG32(input[0]),
4463 inputStruct, (void *)(uintptr_t)inputStructCount,
4464 0, 0, 0 );
4465 break;
4466 case 0:
4467 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4468 0, 0, 0, 0 );
4469 break;
4470
4471 default:
4472 IOLog("%s: Bad method table\n", object->getName());
4473 }
4474 }
4475 while (false);
4476
4477 return( err);
4478 }
4479
4480 kern_return_t shim_io_async_method_scalarI_structureI(
4481 IOExternalAsyncMethod * method,
4482 IOService * object,
4483 mach_port_t asyncWakePort,
4484 io_user_reference_t * asyncReference,
4485 uint32_t asyncReferenceCount,
4486 const io_user_scalar_t * input,
4487 mach_msg_type_number_t inputCount,
4488 io_struct_inband_t inputStruct,
4489 mach_msg_type_number_t inputStructCount )
4490 {
4491 IOAsyncMethod func;
4492 uint32_t i;
4493 IOReturn err = kIOReturnBadArgument;
4494 io_async_ref_t reference;
4495
4496 for (i = 0; i < asyncReferenceCount; i++)
4497 reference[i] = REF32(asyncReference[i]);
4498
4499 do
4500 {
4501 if (inputCount != method->count0)
4502 {
4503 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4504 continue;
4505 }
4506 if( (kIOUCVariableStructureSize != method->count1)
4507 && (inputStructCount != method->count1))
4508 {
4509 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4510 continue;
4511 }
4512
4513 func = method->func;
4514
4515 switch( inputCount) {
4516
4517 case 5:
4518 err = (object->*func)( reference,
4519 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4520 ARG32(input[3]), ARG32(input[4]),
4521 inputStruct );
4522 break;
4523 case 4:
4524 err = (object->*func)( reference,
4525 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4526 ARG32(input[3]),
4527 inputStruct, (void *)(uintptr_t)inputStructCount );
4528 break;
4529 case 3:
4530 err = (object->*func)( reference,
4531 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4532 inputStruct, (void *)(uintptr_t)inputStructCount,
4533 0 );
4534 break;
4535 case 2:
4536 err = (object->*func)( reference,
4537 ARG32(input[0]), ARG32(input[1]),
4538 inputStruct, (void *)(uintptr_t)inputStructCount,
4539 0, 0 );
4540 break;
4541 case 1:
4542 err = (object->*func)( reference,
4543 ARG32(input[0]),
4544 inputStruct, (void *)(uintptr_t)inputStructCount,
4545 0, 0, 0 );
4546 break;
4547 case 0:
4548 err = (object->*func)( reference,
4549 inputStruct, (void *)(uintptr_t)inputStructCount,
4550 0, 0, 0, 0 );
4551 break;
4552
4553 default:
4554 IOLog("%s: Bad method table\n", object->getName());
4555 }
4556 }
4557 while (false);
4558
4559 return( err);
4560 }
4561
4562 /* Routine io_connect_method_structureI_structureO */
4563 kern_return_t is_io_connect_method_structureI_structureO(
4564 io_object_t connect,
4565 uint32_t index,
4566 io_struct_inband_t input,
4567 mach_msg_type_number_t inputCount,
4568 io_struct_inband_t output,
4569 mach_msg_type_number_t * outputCount )
4570 {
4571 mach_msg_type_number_t scalar_outputCnt = 0;
4572 mach_vm_size_t ool_output_size = 0;
4573
4574 return (is_io_connect_method(connect, index,
4575 NULL, 0,
4576 input, inputCount,
4577 0, 0,
4578 output, outputCount,
4579 NULL, &scalar_outputCnt,
4580 0, &ool_output_size));
4581 }
4582
4583 kern_return_t shim_io_connect_method_structureI_structureO(
4584 IOExternalMethod * method,
4585 IOService * object,
4586 io_struct_inband_t input,
4587 mach_msg_type_number_t inputCount,
4588 io_struct_inband_t output,
4589 IOByteCount * outputCount )
4590 {
4591 IOMethod func;
4592 IOReturn err = kIOReturnBadArgument;
4593
4594 do
4595 {
4596 if( (kIOUCVariableStructureSize != method->count0)
4597 && (inputCount != method->count0))
4598 {
4599 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4600 continue;
4601 }
4602 if( (kIOUCVariableStructureSize != method->count1)
4603 && (*outputCount != method->count1))
4604 {
4605 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4606 continue;
4607 }
4608
4609 func = method->func;
4610
4611 if( method->count1) {
4612 if( method->count0) {
4613 err = (object->*func)( input, output,
4614 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4615 } else {
4616 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4617 }
4618 } else {
4619 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4620 }
4621 }
4622 while( false);
4623
4624
4625 return( err);
4626 }
4627
4628 kern_return_t shim_io_async_method_structureI_structureO(
4629 IOExternalAsyncMethod * method,
4630 IOService * object,
4631 mach_port_t asyncWakePort,
4632 io_user_reference_t * asyncReference,
4633 uint32_t asyncReferenceCount,
4634 io_struct_inband_t input,
4635 mach_msg_type_number_t inputCount,
4636 io_struct_inband_t output,
4637 mach_msg_type_number_t * outputCount )
4638 {
4639 IOAsyncMethod func;
4640 uint32_t i;
4641 IOReturn err;
4642 io_async_ref_t reference;
4643
4644 for (i = 0; i < asyncReferenceCount; i++)
4645 reference[i] = REF32(asyncReference[i]);
4646
4647 err = kIOReturnBadArgument;
4648 do
4649 {
4650 if( (kIOUCVariableStructureSize != method->count0)
4651 && (inputCount != method->count0))
4652 {
4653 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4654 continue;
4655 }
4656 if( (kIOUCVariableStructureSize != method->count1)
4657 && (*outputCount != method->count1))
4658 {
4659 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4660 continue;
4661 }
4662
4663 func = method->func;
4664
4665 if( method->count1) {
4666 if( method->count0) {
4667 err = (object->*func)( reference,
4668 input, output,
4669 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4670 } else {
4671 err = (object->*func)( reference,
4672 output, outputCount, 0, 0, 0, 0 );
4673 }
4674 } else {
4675 err = (object->*func)( reference,
4676 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4677 }
4678 }
4679 while( false);
4680
4681 return( err);
4682 }
4683
4684 /* Routine io_catalog_send_data */
4685 kern_return_t is_io_catalog_send_data(
4686 mach_port_t master_port,
4687 uint32_t flag,
4688 io_buf_ptr_t inData,
4689 mach_msg_type_number_t inDataCount,
4690 kern_return_t * result)
4691 {
4692 OSObject * obj = 0;
4693 vm_offset_t data;
4694 kern_return_t kr = kIOReturnError;
4695
4696 //printf("io_catalog_send_data called. flag: %d\n", flag);
4697
4698 if( master_port != master_device_port)
4699 return kIOReturnNotPrivileged;
4700
4701 if( (flag != kIOCatalogRemoveKernelLinker &&
4702 flag != kIOCatalogKextdActive &&
4703 flag != kIOCatalogKextdFinishedLaunching) &&
4704 ( !inData || !inDataCount) )
4705 {
4706 return kIOReturnBadArgument;
4707 }
4708
4709 if (inData) {
4710 vm_map_offset_t map_data;
4711
4712 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4713 return( kIOReturnMessageTooLarge);
4714
4715 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4716 data = CAST_DOWN(vm_offset_t, map_data);
4717
4718 if( kr != KERN_SUCCESS)
4719 return kr;
4720
4721 // must return success after vm_map_copyout() succeeds
4722
4723 if( inDataCount ) {
4724 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4725 vm_deallocate( kernel_map, data, inDataCount );
4726 if( !obj) {
4727 *result = kIOReturnNoMemory;
4728 return( KERN_SUCCESS);
4729 }
4730 }
4731 }
4732
4733 switch ( flag ) {
4734 case kIOCatalogResetDrivers:
4735 case kIOCatalogResetDriversNoMatch: {
4736 OSArray * array;
4737
4738 array = OSDynamicCast(OSArray, obj);
4739 if (array) {
4740 if ( !gIOCatalogue->resetAndAddDrivers(array,
4741 flag == kIOCatalogResetDrivers) ) {
4742
4743 kr = kIOReturnError;
4744 }
4745 } else {
4746 kr = kIOReturnBadArgument;
4747 }
4748 }
4749 break;
4750
4751 case kIOCatalogAddDrivers:
4752 case kIOCatalogAddDriversNoMatch: {
4753 OSArray * array;
4754
4755 array = OSDynamicCast(OSArray, obj);
4756 if ( array ) {
4757 if ( !gIOCatalogue->addDrivers( array ,
4758 flag == kIOCatalogAddDrivers) ) {
4759 kr = kIOReturnError;
4760 }
4761 }
4762 else {
4763 kr = kIOReturnBadArgument;
4764 }
4765 }
4766 break;
4767
4768 case kIOCatalogRemoveDrivers:
4769 case kIOCatalogRemoveDriversNoMatch: {
4770 OSDictionary * dict;
4771
4772 dict = OSDynamicCast(OSDictionary, obj);
4773 if ( dict ) {
4774 if ( !gIOCatalogue->removeDrivers( dict,
4775 flag == kIOCatalogRemoveDrivers ) ) {
4776 kr = kIOReturnError;
4777 }
4778 }
4779 else {
4780 kr = kIOReturnBadArgument;
4781 }
4782 }
4783 break;
4784
4785 case kIOCatalogStartMatching: {
4786 OSDictionary * dict;
4787
4788 dict = OSDynamicCast(OSDictionary, obj);
4789 if ( dict ) {
4790 if ( !gIOCatalogue->startMatching( dict ) ) {
4791 kr = kIOReturnError;
4792 }
4793 }
4794 else {
4795 kr = kIOReturnBadArgument;
4796 }
4797 }
4798 break;
4799
4800 case kIOCatalogRemoveKernelLinker:
4801 kr = KERN_NOT_SUPPORTED;
4802 break;
4803
4804 case kIOCatalogKextdActive:
4805 #if !NO_KEXTD
4806 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4807 OSKext::setKextdActive();
4808
4809 /* Dump all nonloaded startup extensions; kextd will now send them
4810 * down on request.
4811 */
4812 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4813 #endif
4814 kr = kIOReturnSuccess;
4815 break;
4816
4817 case kIOCatalogKextdFinishedLaunching: {
4818 #if !NO_KEXTD
4819 static bool clearedBusy = false;
4820
4821 if (!clearedBusy) {
4822 IOService * serviceRoot = IOService::getServiceRoot();
4823 if (serviceRoot) {
4824 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4825 serviceRoot->adjustBusy(-1);
4826 clearedBusy = true;
4827 }
4828 }
4829 #endif
4830 kr = kIOReturnSuccess;
4831 }
4832 break;
4833
4834 default:
4835 kr = kIOReturnBadArgument;
4836 break;
4837 }
4838
4839 if (obj) obj->release();
4840
4841 *result = kr;
4842 return( KERN_SUCCESS);
4843 }
4844
4845 /* Routine io_catalog_terminate */
4846 kern_return_t is_io_catalog_terminate(
4847 mach_port_t master_port,
4848 uint32_t flag,
4849 io_name_t name )
4850 {
4851 kern_return_t kr;
4852
4853 if( master_port != master_device_port )
4854 return kIOReturnNotPrivileged;
4855
4856 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4857 kIOClientPrivilegeAdministrator );
4858 if( kIOReturnSuccess != kr)
4859 return( kr );
4860
4861 switch ( flag ) {
4862 #if !defined(SECURE_KERNEL)
4863 case kIOCatalogServiceTerminate:
4864 OSIterator * iter;
4865 IOService * service;
4866
4867 iter = IORegistryIterator::iterateOver(gIOServicePlane,
4868 kIORegistryIterateRecursively);
4869 if ( !iter )
4870 return kIOReturnNoMemory;
4871
4872 do {
4873 iter->reset();
4874 while( (service = (IOService *)iter->getNextObject()) ) {
4875 if( service->metaCast(name)) {
4876 if ( !service->terminate( kIOServiceRequired
4877 | kIOServiceSynchronous) ) {
4878 kr = kIOReturnUnsupported;
4879 break;
4880 }
4881 }
4882 }
4883 } while( !service && !iter->isValid());
4884 iter->release();
4885 break;
4886
4887 case kIOCatalogModuleUnload:
4888 case kIOCatalogModuleTerminate:
4889 kr = gIOCatalogue->terminateDriversForModule(name,
4890 flag == kIOCatalogModuleUnload);
4891 break;
4892 #endif
4893
4894 default:
4895 kr = kIOReturnBadArgument;
4896 break;
4897 }
4898
4899 return( kr );
4900 }
4901
4902 /* Routine io_catalog_get_data */
4903 kern_return_t is_io_catalog_get_data(
4904 mach_port_t master_port,
4905 uint32_t flag,
4906 io_buf_ptr_t *outData,
4907 mach_msg_type_number_t *outDataCount)
4908 {
4909 kern_return_t kr = kIOReturnSuccess;
4910 OSSerialize * s;
4911
4912 if( master_port != master_device_port)
4913 return kIOReturnNotPrivileged;
4914
4915 //printf("io_catalog_get_data called. flag: %d\n", flag);
4916
4917 s = OSSerialize::withCapacity(4096);
4918 if ( !s )
4919 return kIOReturnNoMemory;
4920
4921 kr = gIOCatalogue->serializeData(flag, s);
4922
4923 if ( kr == kIOReturnSuccess ) {
4924 vm_offset_t data;
4925 vm_map_copy_t copy;
4926 vm_size_t size;
4927
4928 size = s->getLength();
4929 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
4930 if ( kr == kIOReturnSuccess ) {
4931 bcopy(s->text(), (void *)data, size);
4932 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
4933 (vm_map_size_t)size, true, &copy);
4934 *outData = (char *)copy;
4935 *outDataCount = size;
4936 }
4937 }
4938
4939 s->release();
4940
4941 return kr;
4942 }
4943
4944 /* Routine io_catalog_get_gen_count */
4945 kern_return_t is_io_catalog_get_gen_count(
4946 mach_port_t master_port,
4947 uint32_t *genCount)
4948 {
4949 if( master_port != master_device_port)
4950 return kIOReturnNotPrivileged;
4951
4952 //printf("io_catalog_get_gen_count called.\n");
4953
4954 if ( !genCount )
4955 return kIOReturnBadArgument;
4956
4957 *genCount = gIOCatalogue->getGenerationCount();
4958
4959 return kIOReturnSuccess;
4960 }
4961
4962 /* Routine io_catalog_module_loaded.
4963 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
4964 */
4965 kern_return_t is_io_catalog_module_loaded(
4966 mach_port_t master_port,
4967 io_name_t name)
4968 {
4969 if( master_port != master_device_port)
4970 return kIOReturnNotPrivileged;
4971
4972 //printf("io_catalog_module_loaded called. name %s\n", name);
4973
4974 if ( !name )
4975 return kIOReturnBadArgument;
4976
4977 gIOCatalogue->moduleHasLoaded(name);
4978
4979 return kIOReturnSuccess;
4980 }
4981
4982 kern_return_t is_io_catalog_reset(
4983 mach_port_t master_port,
4984 uint32_t flag)
4985 {
4986 if( master_port != master_device_port)
4987 return kIOReturnNotPrivileged;
4988
4989 switch ( flag ) {
4990 case kIOCatalogResetDefault:
4991 gIOCatalogue->reset();
4992 break;
4993
4994 default:
4995 return kIOReturnBadArgument;
4996 }
4997
4998 return kIOReturnSuccess;
4999 }
5000
5001 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5002 {
5003 kern_return_t result = kIOReturnBadArgument;
5004 IOUserClient *userClient;
5005
5006 if ((userClient = OSDynamicCast(IOUserClient,
5007 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5008 IOExternalTrap *trap;
5009 IOService *target = NULL;
5010
5011 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5012
5013 if (trap && target) {
5014 IOTrap func;
5015
5016 func = trap->func;
5017
5018 if (func) {
5019 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5020 }
5021 }
5022
5023 iokit_remove_connect_reference(userClient);
5024 }
5025
5026 return result;
5027 }
5028
5029 } /* extern "C" */
5030
5031 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5032 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5033 {
5034 IOReturn err;
5035 IOService * object;
5036 IOByteCount structureOutputSize;
5037
5038 if (dispatch)
5039 {
5040 uint32_t count;
5041 count = dispatch->checkScalarInputCount;
5042 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5043 {
5044 return (kIOReturnBadArgument);
5045 }
5046
5047 count = dispatch->checkStructureInputSize;
5048 if ((kIOUCVariableStructureSize != count)
5049 && (count != ((args->structureInputDescriptor)
5050 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5051 {
5052 return (kIOReturnBadArgument);
5053 }
5054
5055 count = dispatch->checkScalarOutputCount;
5056 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5057 {
5058 return (kIOReturnBadArgument);
5059 }
5060
5061 count = dispatch->checkStructureOutputSize;
5062 if ((kIOUCVariableStructureSize != count)
5063 && (count != ((args->structureOutputDescriptor)
5064 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5065 {
5066 return (kIOReturnBadArgument);
5067 }
5068
5069 if (dispatch->function)
5070 err = (*dispatch->function)(target, reference, args);
5071 else
5072 err = kIOReturnNoCompletion; /* implementator can dispatch */
5073
5074 return (err);
5075 }
5076
5077
5078 // pre-Leopard API's don't do ool structs
5079 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5080 {
5081 err = kIOReturnIPCError;
5082 return (err);
5083 }
5084
5085 structureOutputSize = args->structureOutputSize;
5086
5087 if (args->asyncWakePort)
5088 {
5089 IOExternalAsyncMethod * method;
5090 object = 0;
5091 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5092 return (kIOReturnUnsupported);
5093
5094 if (kIOUCForegroundOnly & method->flags)
5095 {
5096 if (task_is_gpu_denied(current_task()))
5097 return (kIOReturnNotPermitted);
5098 }
5099
5100 switch (method->flags & kIOUCTypeMask)
5101 {
5102 case kIOUCScalarIStructI:
5103 err = shim_io_async_method_scalarI_structureI( method, object,
5104 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5105 args->scalarInput, args->scalarInputCount,
5106 (char *)args->structureInput, args->structureInputSize );
5107 break;
5108
5109 case kIOUCScalarIScalarO:
5110 err = shim_io_async_method_scalarI_scalarO( method, object,
5111 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5112 args->scalarInput, args->scalarInputCount,
5113 args->scalarOutput, &args->scalarOutputCount );
5114 break;
5115
5116 case kIOUCScalarIStructO:
5117 err = shim_io_async_method_scalarI_structureO( method, object,
5118 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5119 args->scalarInput, args->scalarInputCount,
5120 (char *) args->structureOutput, &args->structureOutputSize );
5121 break;
5122
5123
5124 case kIOUCStructIStructO:
5125 err = shim_io_async_method_structureI_structureO( method, object,
5126 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5127 (char *)args->structureInput, args->structureInputSize,
5128 (char *) args->structureOutput, &args->structureOutputSize );
5129 break;
5130
5131 default:
5132 err = kIOReturnBadArgument;
5133 break;
5134 }
5135 }
5136 else
5137 {
5138 IOExternalMethod * method;
5139 object = 0;
5140 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5141 return (kIOReturnUnsupported);
5142
5143 if (kIOUCForegroundOnly & method->flags)
5144 {
5145 if (task_is_gpu_denied(current_task()))
5146 return (kIOReturnNotPermitted);
5147 }
5148
5149 switch (method->flags & kIOUCTypeMask)
5150 {
5151 case kIOUCScalarIStructI:
5152 err = shim_io_connect_method_scalarI_structureI( method, object,
5153 args->scalarInput, args->scalarInputCount,
5154 (char *) args->structureInput, args->structureInputSize );
5155 break;
5156
5157 case kIOUCScalarIScalarO:
5158 err = shim_io_connect_method_scalarI_scalarO( method, object,
5159 args->scalarInput, args->scalarInputCount,
5160 args->scalarOutput, &args->scalarOutputCount );
5161 break;
5162
5163 case kIOUCScalarIStructO:
5164 err = shim_io_connect_method_scalarI_structureO( method, object,
5165 args->scalarInput, args->scalarInputCount,
5166 (char *) args->structureOutput, &structureOutputSize );
5167 break;
5168
5169
5170 case kIOUCStructIStructO:
5171 err = shim_io_connect_method_structureI_structureO( method, object,
5172 (char *) args->structureInput, args->structureInputSize,
5173 (char *) args->structureOutput, &structureOutputSize );
5174 break;
5175
5176 default:
5177 err = kIOReturnBadArgument;
5178 break;
5179 }
5180 }
5181
5182 args->structureOutputSize = structureOutputSize;
5183
5184 return (err);
5185 }
5186
5187 #if __LP64__
5188 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5189 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5190 #else
5191 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5192 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5193 #endif
5194 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5195 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5196 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5197 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5198 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5199 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5200 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5201 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5202 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5203 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5204 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5205 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5206 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5207 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5208