]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
06e4a612c2220a43946e1820b11265be1e5ef6f9
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #if CONFIG_MACF
49
50 extern "C" {
51 #include <security/mac_framework.h>
52 };
53 #include <sys/kauth.h>
54
55 #define IOMACF_LOG 0
56
57 #endif /* CONFIG_MACF */
58
59 #include <IOKit/assert.h>
60
61 #include "IOServicePrivate.h"
62 #include "IOKitKernelInternal.h"
63
64 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
65 #define SCALAR32(x) ((uint32_t )x)
66 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
67 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
68 #define REF32(x) ((int)(x))
69
70 enum
71 {
72 kIOUCAsync0Flags = 3ULL,
73 kIOUCAsync64Flag = 1ULL
74 };
75
76 #if IOKITSTATS
77
78 #define IOStatisticsRegisterCounter() \
79 do { \
80 reserved->counter = IOStatistics::registerUserClient(this); \
81 } while (0)
82
83 #define IOStatisticsUnregisterCounter() \
84 do { \
85 if (reserved) \
86 IOStatistics::unregisterUserClient(reserved->counter); \
87 } while (0)
88
89 #define IOStatisticsClientCall() \
90 do { \
91 IOStatistics::countUserClientCall(client); \
92 } while (0)
93
94 #else
95
96 #define IOStatisticsRegisterCounter()
97 #define IOStatisticsUnregisterCounter()
98 #define IOStatisticsClientCall()
99
100 #endif /* IOKITSTATS */
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 // definitions we should get from osfmk
105
106 //typedef struct ipc_port * ipc_port_t;
107 typedef natural_t ipc_kobject_type_t;
108
109 #define IKOT_IOKIT_SPARE 27
110 #define IKOT_IOKIT_CONNECT 29
111 #define IKOT_IOKIT_OBJECT 30
112
113 extern "C" {
114
115 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
116 ipc_kobject_type_t type );
117
118 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
119
120 extern mach_port_name_t iokit_make_send_right( task_t task,
121 io_object_t obj, ipc_kobject_type_t type );
122
123 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
124
125 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
126
127 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
128
129 extern ipc_port_t master_device_port;
130
131 extern void iokit_retain_port( ipc_port_t port );
132 extern void iokit_release_port( ipc_port_t port );
133 extern void iokit_release_port_send( ipc_port_t port );
134
135 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
136
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139
140 } /* extern "C" */
141
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146
147 class IOMachPort : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort)
150 public:
151 OSObject * object;
152 ipc_port_t port;
153 UInt32 mscount;
154 UInt8 holdDestroy;
155
156 static IOMachPort * portForObject( OSObject * obj,
157 ipc_kobject_type_t type );
158 static bool noMoreSendersForObject( OSObject * obj,
159 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
160 static void releasePortForObject( OSObject * obj,
161 ipc_kobject_type_t type );
162 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
163
164 static OSDictionary * dictForType( ipc_kobject_type_t type );
165
166 static mach_port_name_t makeSendRightForTask( task_t task,
167 io_object_t obj, ipc_kobject_type_t type );
168
169 virtual void free() APPLE_KEXT_OVERRIDE;
170 };
171
172 #define super OSObject
173 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
174
175 static IOLock * gIOObjectPortLock;
176
177 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
178
179 // not in dictForType() for debugging ease
180 static OSDictionary * gIOObjectPorts;
181 static OSDictionary * gIOConnectPorts;
182
183 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
184 {
185 OSDictionary ** dict;
186
187 if( IKOT_IOKIT_OBJECT == type )
188 dict = &gIOObjectPorts;
189 else if( IKOT_IOKIT_CONNECT == type )
190 dict = &gIOConnectPorts;
191 else
192 return( 0 );
193
194 if( 0 == *dict)
195 *dict = OSDictionary::withCapacity( 1 );
196
197 return( *dict );
198 }
199
200 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
201 ipc_kobject_type_t type )
202 {
203 IOMachPort * inst = 0;
204 OSDictionary * dict;
205
206 IOTakeLock( gIOObjectPortLock);
207
208 do {
209
210 dict = dictForType( type );
211 if( !dict)
212 continue;
213
214 if( (inst = (IOMachPort *)
215 dict->getObject( (const OSSymbol *) obj ))) {
216 inst->mscount++;
217 inst->retain();
218 continue;
219 }
220
221 inst = new IOMachPort;
222 if( inst && !inst->init()) {
223 inst = 0;
224 continue;
225 }
226
227 inst->port = iokit_alloc_object_port( obj, type );
228 if( inst->port) {
229 // retains obj
230 dict->setObject( (const OSSymbol *) obj, inst );
231 inst->mscount++;
232
233 } else {
234 inst->release();
235 inst = 0;
236 }
237
238 } while( false );
239
240 IOUnlock( gIOObjectPortLock);
241
242 return( inst );
243 }
244
245 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 OSDictionary * dict;
249 IOMachPort * machPort;
250 bool destroyed = true;
251
252 IOTakeLock( gIOObjectPortLock);
253
254 if( (dict = dictForType( type ))) {
255 obj->retain();
256
257 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
258 if( machPort) {
259 destroyed = (machPort->mscount <= *mscount);
260 if( destroyed)
261 dict->removeObject( (const OSSymbol *) obj );
262 else
263 *mscount = machPort->mscount;
264 }
265 obj->release();
266 }
267
268 IOUnlock( gIOObjectPortLock);
269
270 return( destroyed );
271 }
272
273 void IOMachPort::releasePortForObject( OSObject * obj,
274 ipc_kobject_type_t type )
275 {
276 OSDictionary * dict;
277 IOMachPort * machPort;
278
279 IOTakeLock( gIOObjectPortLock);
280
281 if( (dict = dictForType( type ))) {
282 obj->retain();
283 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
284 if( machPort && !machPort->holdDestroy)
285 dict->removeObject( (const OSSymbol *) obj );
286 obj->release();
287 }
288
289 IOUnlock( gIOObjectPortLock);
290 }
291
292 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
293 {
294 OSDictionary * dict;
295 IOMachPort * machPort;
296
297 IOLockLock( gIOObjectPortLock );
298
299 if( (dict = dictForType( type ))) {
300 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
301 if( machPort)
302 machPort->holdDestroy = true;
303 }
304
305 IOLockUnlock( gIOObjectPortLock );
306 }
307
308 void IOUserClient::destroyUserReferences( OSObject * obj )
309 {
310 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
311
312 // panther, 3160200
313 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
314
315 OSDictionary * dict;
316
317 IOTakeLock( gIOObjectPortLock);
318 obj->retain();
319
320 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
321 {
322 IOMachPort * port;
323 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
324 if (port)
325 {
326 IOUserClient * uc;
327 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
328 {
329 dict->setObject((const OSSymbol *) uc->mappings, port);
330 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
331
332 uc->mappings->release();
333 uc->mappings = 0;
334 }
335 dict->removeObject( (const OSSymbol *) obj );
336 }
337 }
338 obj->release();
339 IOUnlock( gIOObjectPortLock);
340 }
341
342 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
343 io_object_t obj, ipc_kobject_type_t type )
344 {
345 return( iokit_make_send_right( task, obj, type ));
346 }
347
348 void IOMachPort::free( void )
349 {
350 if( port)
351 iokit_destroy_object_port( port );
352 super::free();
353 }
354
355 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
356
357 class IOUserIterator : public OSIterator
358 {
359 OSDeclareDefaultStructors(IOUserIterator)
360 public:
361 OSObject * userIteratorObject;
362 IOLock * lock;
363
364 static IOUserIterator * withIterator(OSIterator * iter);
365 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
366 virtual void free() APPLE_KEXT_OVERRIDE;
367
368 virtual void reset() APPLE_KEXT_OVERRIDE;
369 virtual bool isValid() APPLE_KEXT_OVERRIDE;
370 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
371 };
372
373 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
374
375 class IOUserNotification : public IOUserIterator
376 {
377 OSDeclareDefaultStructors(IOUserNotification)
378
379 #define holdNotify userIteratorObject
380
381 public:
382
383 virtual void free() APPLE_KEXT_OVERRIDE;
384
385 virtual void setNotification( IONotifier * obj );
386
387 virtual void reset() APPLE_KEXT_OVERRIDE;
388 virtual bool isValid() APPLE_KEXT_OVERRIDE;
389 };
390
391 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
392
393 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
394
395 IOUserIterator *
396 IOUserIterator::withIterator(OSIterator * iter)
397 {
398 IOUserIterator * me;
399
400 if (!iter) return (0);
401
402 me = new IOUserIterator;
403 if (me && !me->init())
404 {
405 me->release();
406 me = 0;
407 }
408 if (!me) return me;
409 me->userIteratorObject = iter;
410
411 return (me);
412 }
413
414 bool
415 IOUserIterator::init( void )
416 {
417 if (!OSObject::init()) return (false);
418
419 lock = IOLockAlloc();
420 if( !lock)
421 return( false );
422
423 return (true);
424 }
425
426 void
427 IOUserIterator::free()
428 {
429 if (userIteratorObject) userIteratorObject->release();
430 if (lock) IOLockFree(lock);
431 OSObject::free();
432 }
433
434 void
435 IOUserIterator::reset()
436 {
437 IOLockLock(lock);
438 assert(OSDynamicCast(OSIterator, userIteratorObject));
439 ((OSIterator *)userIteratorObject)->reset();
440 IOLockUnlock(lock);
441 }
442
443 bool
444 IOUserIterator::isValid()
445 {
446 bool ret;
447
448 IOLockLock(lock);
449 assert(OSDynamicCast(OSIterator, userIteratorObject));
450 ret = ((OSIterator *)userIteratorObject)->isValid();
451 IOLockUnlock(lock);
452
453 return (ret);
454 }
455
456 OSObject *
457 IOUserIterator::getNextObject()
458 {
459 OSObject * ret;
460
461 IOLockLock(lock);
462 assert(OSDynamicCast(OSIterator, userIteratorObject));
463 ret = ((OSIterator *)userIteratorObject)->getNextObject();
464 IOLockUnlock(lock);
465
466 return (ret);
467 }
468
469 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
470 extern "C" {
471
472 // functions called from osfmk/device/iokit_rpc.c
473
474 void
475 iokit_add_reference( io_object_t obj )
476 {
477 if( obj)
478 obj->retain();
479 }
480
481 void
482 iokit_remove_reference( io_object_t obj )
483 {
484 if( obj)
485 obj->release();
486 }
487
488 ipc_port_t
489 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
490 {
491 IOMachPort * machPort;
492 ipc_port_t port;
493
494 if( (machPort = IOMachPort::portForObject( obj, type ))) {
495
496 port = machPort->port;
497 if( port)
498 iokit_retain_port( port );
499
500 machPort->release();
501
502 } else
503 port = NULL;
504
505 return( port );
506 }
507
508 kern_return_t
509 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
510 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
511 {
512 IOUserClient * client;
513 IOMemoryMap * map;
514 IOUserNotification * notify;
515
516 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
517 return( kIOReturnNotReady );
518
519 if( IKOT_IOKIT_CONNECT == type)
520 {
521 if( (client = OSDynamicCast( IOUserClient, obj ))) {
522 IOStatisticsClientCall();
523 client->clientDied();
524 }
525 }
526 else if( IKOT_IOKIT_OBJECT == type)
527 {
528 if( (map = OSDynamicCast( IOMemoryMap, obj )))
529 map->taskDied();
530 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
531 notify->setNotification( 0 );
532 }
533
534 return( kIOReturnSuccess );
535 }
536
537 }; /* extern "C" */
538
539 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
540
541 class IOServiceUserNotification : public IOUserNotification
542 {
543 OSDeclareDefaultStructors(IOServiceUserNotification)
544
545 struct PingMsg {
546 mach_msg_header_t msgHdr;
547 OSNotificationHeader64 notifyHeader;
548 };
549
550 enum { kMaxOutstanding = 1024 };
551
552 PingMsg * pingMsg;
553 vm_size_t msgSize;
554 OSArray * newSet;
555 OSObject * lastEntry;
556 bool armed;
557
558 public:
559
560 virtual bool init( mach_port_t port, natural_t type,
561 void * reference, vm_size_t referenceSize,
562 bool clientIs64 );
563 virtual void free() APPLE_KEXT_OVERRIDE;
564
565 static bool _handler( void * target,
566 void * ref, IOService * newService, IONotifier * notifier );
567 virtual bool handler( void * ref, IOService * newService );
568
569 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
570 };
571
572 class IOServiceMessageUserNotification : public IOUserNotification
573 {
574 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
575
576 struct PingMsg {
577 mach_msg_header_t msgHdr;
578 mach_msg_body_t msgBody;
579 mach_msg_port_descriptor_t ports[1];
580 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
581 };
582
583 PingMsg * pingMsg;
584 vm_size_t msgSize;
585 uint8_t clientIs64;
586 int owningPID;
587
588 public:
589
590 virtual bool init( mach_port_t port, natural_t type,
591 void * reference, vm_size_t referenceSize,
592 vm_size_t extraSize,
593 bool clientIs64 );
594
595 virtual void free() APPLE_KEXT_OVERRIDE;
596
597 static IOReturn _handler( void * target, void * ref,
598 UInt32 messageType, IOService * provider,
599 void * messageArgument, vm_size_t argSize );
600 virtual IOReturn handler( void * ref,
601 UInt32 messageType, IOService * provider,
602 void * messageArgument, vm_size_t argSize );
603
604 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
605 };
606
607 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
608
609 #undef super
610 #define super IOUserIterator
611 OSDefineMetaClass( IOUserNotification, IOUserIterator )
612 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
613
614 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
615
616 void IOUserNotification::free( void )
617 {
618 if (holdNotify)
619 {
620 assert(OSDynamicCast(IONotifier, holdNotify));
621 ((IONotifier *)holdNotify)->remove();
622 holdNotify = 0;
623 }
624 // can't be in handler now
625
626 super::free();
627 }
628
629
630 void IOUserNotification::setNotification( IONotifier * notify )
631 {
632 OSObject * previousNotify;
633
634 IOLockLock( gIOObjectPortLock);
635
636 previousNotify = holdNotify;
637 holdNotify = notify;
638
639 IOLockUnlock( gIOObjectPortLock);
640
641 if( previousNotify)
642 {
643 assert(OSDynamicCast(IONotifier, previousNotify));
644 ((IONotifier *)previousNotify)->remove();
645 }
646 }
647
648 void IOUserNotification::reset()
649 {
650 // ?
651 }
652
653 bool IOUserNotification::isValid()
654 {
655 return( true );
656 }
657
658 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
659
660 #undef super
661 #define super IOUserNotification
662 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
663
664 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
665
666 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
667 void * reference, vm_size_t referenceSize,
668 bool clientIs64 )
669 {
670 if( !super::init())
671 return( false );
672
673 newSet = OSArray::withCapacity( 1 );
674 if( !newSet)
675 return( false );
676
677 if (referenceSize > sizeof(OSAsyncReference64))
678 return( false );
679
680 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
681 pingMsg = (PingMsg *) IOMalloc( msgSize);
682 if( !pingMsg)
683 return( false );
684
685 bzero( pingMsg, msgSize);
686
687 pingMsg->msgHdr.msgh_remote_port = port;
688 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
689 MACH_MSG_TYPE_COPY_SEND /*remote*/,
690 MACH_MSG_TYPE_MAKE_SEND /*local*/);
691 pingMsg->msgHdr.msgh_size = msgSize;
692 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
693
694 pingMsg->notifyHeader.size = 0;
695 pingMsg->notifyHeader.type = type;
696 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
697
698 return( true );
699 }
700
701 void IOServiceUserNotification::free( void )
702 {
703 PingMsg * _pingMsg;
704 vm_size_t _msgSize;
705 OSArray * _newSet;
706 OSObject * _lastEntry;
707
708 _pingMsg = pingMsg;
709 _msgSize = msgSize;
710 _lastEntry = lastEntry;
711 _newSet = newSet;
712
713 super::free();
714
715 if( _pingMsg && _msgSize) {
716 if (_pingMsg->msgHdr.msgh_remote_port) {
717 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
718 }
719 IOFree(_pingMsg, _msgSize);
720 }
721
722 if( _lastEntry)
723 _lastEntry->release();
724
725 if( _newSet)
726 _newSet->release();
727 }
728
729 bool IOServiceUserNotification::_handler( void * target,
730 void * ref, IOService * newService, IONotifier * notifier )
731 {
732 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
733 }
734
735 bool IOServiceUserNotification::handler( void * ref,
736 IOService * newService )
737 {
738 unsigned int count;
739 kern_return_t kr;
740 ipc_port_t port = NULL;
741 bool sendPing = false;
742
743 IOTakeLock( lock );
744
745 count = newSet->getCount();
746 if( count < kMaxOutstanding) {
747
748 newSet->setObject( newService );
749 if( (sendPing = (armed && (0 == count))))
750 armed = false;
751 }
752
753 IOUnlock( lock );
754
755 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
756 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
757
758 if( sendPing) {
759 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
760 pingMsg->msgHdr.msgh_local_port = port;
761 else
762 pingMsg->msgHdr.msgh_local_port = NULL;
763
764 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
765 pingMsg->msgHdr.msgh_size,
766 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
767 0);
768 if( port)
769 iokit_release_port( port );
770
771 if( KERN_SUCCESS != kr)
772 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
773 }
774
775 return( true );
776 }
777
778 OSObject * IOServiceUserNotification::getNextObject()
779 {
780 unsigned int count;
781 OSObject * result;
782
783 IOTakeLock( lock );
784
785 if( lastEntry)
786 lastEntry->release();
787
788 count = newSet->getCount();
789 if( count ) {
790 result = newSet->getObject( count - 1 );
791 result->retain();
792 newSet->removeObject( count - 1);
793 } else {
794 result = 0;
795 armed = true;
796 }
797 lastEntry = result;
798
799 IOUnlock( lock );
800
801 return( result );
802 }
803
804 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
805
806 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
807
808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
809
810 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
811 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
812 bool client64 )
813 {
814 if( !super::init())
815 return( false );
816
817 if (referenceSize > sizeof(OSAsyncReference64))
818 return( false );
819
820 clientIs64 = client64;
821
822 owningPID = proc_selfpid();
823
824 extraSize += sizeof(IOServiceInterestContent64);
825 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
826 pingMsg = (PingMsg *) IOMalloc( msgSize);
827 if( !pingMsg)
828 return( false );
829
830 bzero( pingMsg, msgSize);
831
832 pingMsg->msgHdr.msgh_remote_port = port;
833 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
834 | MACH_MSGH_BITS(
835 MACH_MSG_TYPE_COPY_SEND /*remote*/,
836 MACH_MSG_TYPE_MAKE_SEND /*local*/);
837 pingMsg->msgHdr.msgh_size = msgSize;
838 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
839
840 pingMsg->msgBody.msgh_descriptor_count = 1;
841
842 pingMsg->ports[0].name = 0;
843 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
844 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
845
846 pingMsg->notifyHeader.size = extraSize;
847 pingMsg->notifyHeader.type = type;
848 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
849
850 return( true );
851 }
852
853 void IOServiceMessageUserNotification::free( void )
854 {
855 PingMsg * _pingMsg;
856 vm_size_t _msgSize;
857
858 _pingMsg = pingMsg;
859 _msgSize = msgSize;
860
861 super::free();
862
863 if( _pingMsg && _msgSize) {
864 if (_pingMsg->msgHdr.msgh_remote_port) {
865 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
866 }
867 IOFree( _pingMsg, _msgSize);
868 }
869 }
870
871 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
872 UInt32 messageType, IOService * provider,
873 void * argument, vm_size_t argSize )
874 {
875 return( ((IOServiceMessageUserNotification *) target)->handler(
876 ref, messageType, provider, argument, argSize));
877 }
878
879 IOReturn IOServiceMessageUserNotification::handler( void * ref,
880 UInt32 messageType, IOService * provider,
881 void * messageArgument, vm_size_t argSize )
882 {
883 kern_return_t kr;
884 ipc_port_t thisPort, providerPort;
885 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
886 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
887 // == pingMsg->notifyHeader.content;
888
889 if (kIOMessageCopyClientID == messageType)
890 {
891 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
892 return (kIOReturnSuccess);
893 }
894
895 data->messageType = messageType;
896
897 if( argSize == 0)
898 {
899 data->messageArgument[0] = (io_user_reference_t) messageArgument;
900 if (clientIs64)
901 argSize = sizeof(data->messageArgument[0]);
902 else
903 {
904 data->messageArgument[0] |= (data->messageArgument[0] << 32);
905 argSize = sizeof(uint32_t);
906 }
907 }
908 else
909 {
910 if( argSize > kIOUserNotifyMaxMessageSize)
911 argSize = kIOUserNotifyMaxMessageSize;
912 bcopy( messageArgument, data->messageArgument, argSize );
913 }
914
915 // adjust message size for ipc restrictions
916 natural_t type;
917 type = pingMsg->notifyHeader.type;
918 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
919 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
920 pingMsg->notifyHeader.type = type;
921 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
922
923 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
924 + sizeof( IOServiceInterestContent64 )
925 - sizeof( data->messageArgument)
926 + argSize;
927
928 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
929 pingMsg->ports[0].name = providerPort;
930 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
931 pingMsg->msgHdr.msgh_local_port = thisPort;
932 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
933 pingMsg->msgHdr.msgh_size,
934 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
935 0);
936 if( thisPort)
937 iokit_release_port( thisPort );
938 if( providerPort)
939 iokit_release_port( providerPort );
940
941 if( KERN_SUCCESS != kr)
942 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
943
944 return( kIOReturnSuccess );
945 }
946
947 OSObject * IOServiceMessageUserNotification::getNextObject()
948 {
949 return( 0 );
950 }
951
952 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
953
954 #undef super
955 #define super IOService
956 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
957
958 void IOUserClient::initialize( void )
959 {
960 gIOObjectPortLock = IOLockAlloc();
961
962 assert( gIOObjectPortLock );
963 }
964
965 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
966 mach_port_t wakePort,
967 void *callback, void *refcon)
968 {
969 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
970 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
971 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
972 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
973 }
974
975 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
976 mach_port_t wakePort,
977 mach_vm_address_t callback, io_user_reference_t refcon)
978 {
979 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
980 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
981 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
982 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
983 }
984
985 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
986 mach_port_t wakePort,
987 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
988 {
989 setAsyncReference64(asyncRef, wakePort, callback, refcon);
990 if (vm_map_is_64bit(get_task_map(task))) {
991 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
992 }
993 }
994
995 static OSDictionary * CopyConsoleUser(UInt32 uid)
996 {
997 OSArray * array;
998 OSDictionary * user = 0;
999
1000 if ((array = OSDynamicCast(OSArray,
1001 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1002 {
1003 for (unsigned int idx = 0;
1004 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1005 idx++) {
1006 OSNumber * num;
1007
1008 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1009 && (uid == num->unsigned32BitValue())) {
1010 user->retain();
1011 break;
1012 }
1013 }
1014 array->release();
1015 }
1016 return user;
1017 }
1018
1019 static OSDictionary * CopyUserOnConsole(void)
1020 {
1021 OSArray * array;
1022 OSDictionary * user = 0;
1023
1024 if ((array = OSDynamicCast(OSArray,
1025 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1026 {
1027 for (unsigned int idx = 0;
1028 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1029 idx++)
1030 {
1031 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1032 {
1033 user->retain();
1034 break;
1035 }
1036 }
1037 array->release();
1038 }
1039 return (user);
1040 }
1041
1042 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1043 IOService * service )
1044 {
1045 proc_t p;
1046
1047 p = (proc_t) get_bsdtask_info(task);
1048 if (p)
1049 {
1050 uint64_t authorizationID;
1051
1052 authorizationID = proc_uniqueid(p);
1053 if (authorizationID)
1054 {
1055 if (service->getAuthorizationID() == authorizationID)
1056 {
1057 return (kIOReturnSuccess);
1058 }
1059 }
1060 }
1061
1062 return (kIOReturnNotPermitted);
1063 }
1064
1065 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1066 const char * privilegeName )
1067 {
1068 kern_return_t kr;
1069 security_token_t token;
1070 mach_msg_type_number_t count;
1071 task_t task;
1072 OSDictionary * user;
1073 bool secureConsole;
1074
1075
1076 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1077 sizeof(kIOClientPrivilegeForeground)))
1078 {
1079 if (task_is_gpu_denied(current_task()))
1080 return (kIOReturnNotPrivileged);
1081 else
1082 return (kIOReturnSuccess);
1083 }
1084
1085 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1086 sizeof(kIOClientPrivilegeConsoleSession)))
1087 {
1088 kauth_cred_t cred;
1089 proc_t p;
1090
1091 task = (task_t) securityToken;
1092 if (!task)
1093 task = current_task();
1094 p = (proc_t) get_bsdtask_info(task);
1095 kr = kIOReturnNotPrivileged;
1096
1097 if (p && (cred = kauth_cred_proc_ref(p)))
1098 {
1099 user = CopyUserOnConsole();
1100 if (user)
1101 {
1102 OSNumber * num;
1103 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1104 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1105 {
1106 kr = kIOReturnSuccess;
1107 }
1108 user->release();
1109 }
1110 kauth_cred_unref(&cred);
1111 }
1112 return (kr);
1113 }
1114
1115 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1116 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1117 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1118 else
1119 task = (task_t)securityToken;
1120
1121 count = TASK_SECURITY_TOKEN_COUNT;
1122 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1123
1124 if (KERN_SUCCESS != kr)
1125 {}
1126 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1127 sizeof(kIOClientPrivilegeAdministrator))) {
1128 if (0 != token.val[0])
1129 kr = kIOReturnNotPrivileged;
1130 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1131 sizeof(kIOClientPrivilegeLocalUser))) {
1132 user = CopyConsoleUser(token.val[0]);
1133 if ( user )
1134 user->release();
1135 else
1136 kr = kIOReturnNotPrivileged;
1137 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1138 sizeof(kIOClientPrivilegeConsoleUser))) {
1139 user = CopyConsoleUser(token.val[0]);
1140 if ( user ) {
1141 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1142 kr = kIOReturnNotPrivileged;
1143 else if ( secureConsole ) {
1144 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1145 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1146 kr = kIOReturnNotPrivileged;
1147 }
1148 user->release();
1149 }
1150 else
1151 kr = kIOReturnNotPrivileged;
1152 } else
1153 kr = kIOReturnUnsupported;
1154
1155 return (kr);
1156 }
1157
1158 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1159 const char * entitlement )
1160 {
1161 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1162
1163 proc_t p = NULL;
1164 pid_t pid = 0;
1165 char procname[MAXCOMLEN + 1] = "";
1166 size_t len = 0;
1167 void *entitlements_blob = NULL;
1168 char *entitlements_data = NULL;
1169 OSObject *entitlements_obj = NULL;
1170 OSDictionary *entitlements = NULL;
1171 OSString *errorString = NULL;
1172 OSObject *value = NULL;
1173
1174 p = (proc_t)get_bsdtask_info(task);
1175 if (p == NULL)
1176 goto fail;
1177 pid = proc_pid(p);
1178 proc_name(pid, procname, (int)sizeof(procname));
1179
1180 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1181 goto fail;
1182
1183 if (len <= offsetof(CS_GenericBlob, data))
1184 goto fail;
1185
1186 /*
1187 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1188 * we'll try to parse in the kernel.
1189 */
1190 len -= offsetof(CS_GenericBlob, data);
1191 if (len > MAX_ENTITLEMENTS_LEN) {
1192 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1193 goto fail;
1194 }
1195
1196 /*
1197 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1198 * what is stored in the entitlements blob. Copy the string and
1199 * terminate it.
1200 */
1201 entitlements_data = (char *)IOMalloc(len + 1);
1202 if (entitlements_data == NULL)
1203 goto fail;
1204 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1205 entitlements_data[len] = '\0';
1206
1207 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1208 if (errorString != NULL) {
1209 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1210 goto fail;
1211 }
1212 if (entitlements_obj == NULL)
1213 goto fail;
1214
1215 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1216 if (entitlements == NULL)
1217 goto fail;
1218
1219 /* Fetch the entitlement value from the dictionary. */
1220 value = entitlements->getObject(entitlement);
1221 if (value != NULL)
1222 value->retain();
1223
1224 fail:
1225 if (entitlements_data != NULL)
1226 IOFree(entitlements_data, len + 1);
1227 if (entitlements_obj != NULL)
1228 entitlements_obj->release();
1229 if (errorString != NULL)
1230 errorString->release();
1231 return value;
1232 }
1233
1234 bool IOUserClient::init()
1235 {
1236 if (getPropertyTable() || super::init())
1237 return reserve();
1238
1239 return false;
1240 }
1241
1242 bool IOUserClient::init(OSDictionary * dictionary)
1243 {
1244 if (getPropertyTable() || super::init(dictionary))
1245 return reserve();
1246
1247 return false;
1248 }
1249
1250 bool IOUserClient::initWithTask(task_t owningTask,
1251 void * securityID,
1252 UInt32 type )
1253 {
1254 if (getPropertyTable() || super::init())
1255 return reserve();
1256
1257 return false;
1258 }
1259
1260 bool IOUserClient::initWithTask(task_t owningTask,
1261 void * securityID,
1262 UInt32 type,
1263 OSDictionary * properties )
1264 {
1265 bool ok;
1266
1267 ok = super::init( properties );
1268 ok &= initWithTask( owningTask, securityID, type );
1269
1270 return( ok );
1271 }
1272
1273 bool IOUserClient::reserve()
1274 {
1275 if(!reserved) {
1276 reserved = IONew(ExpansionData, 1);
1277 if (!reserved) {
1278 return false;
1279 }
1280 }
1281 setTerminateDefer(NULL, true);
1282 IOStatisticsRegisterCounter();
1283
1284 return true;
1285 }
1286
1287 void IOUserClient::free()
1288 {
1289 if( mappings)
1290 mappings->release();
1291
1292 IOStatisticsUnregisterCounter();
1293
1294 if (reserved)
1295 IODelete(reserved, ExpansionData, 1);
1296
1297 super::free();
1298 }
1299
1300 IOReturn IOUserClient::clientDied( void )
1301 {
1302 IOReturn ret = kIOReturnNotReady;
1303
1304 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1305 {
1306 ret = clientClose();
1307 }
1308
1309 return (ret);
1310 }
1311
1312 IOReturn IOUserClient::clientClose( void )
1313 {
1314 return( kIOReturnUnsupported );
1315 }
1316
1317 IOService * IOUserClient::getService( void )
1318 {
1319 return( 0 );
1320 }
1321
1322 IOReturn IOUserClient::registerNotificationPort(
1323 mach_port_t /* port */,
1324 UInt32 /* type */,
1325 UInt32 /* refCon */)
1326 {
1327 return( kIOReturnUnsupported);
1328 }
1329
1330 IOReturn IOUserClient::registerNotificationPort(
1331 mach_port_t port,
1332 UInt32 type,
1333 io_user_reference_t refCon)
1334 {
1335 return (registerNotificationPort(port, type, (UInt32) refCon));
1336 }
1337
1338 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1339 semaphore_t * semaphore )
1340 {
1341 return( kIOReturnUnsupported);
1342 }
1343
1344 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1345 {
1346 return( kIOReturnUnsupported);
1347 }
1348
1349 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1350 IOOptionBits * options,
1351 IOMemoryDescriptor ** memory )
1352 {
1353 return( kIOReturnUnsupported);
1354 }
1355
1356 #if !__LP64__
1357 IOMemoryMap * IOUserClient::mapClientMemory(
1358 IOOptionBits type,
1359 task_t task,
1360 IOOptionBits mapFlags,
1361 IOVirtualAddress atAddress )
1362 {
1363 return (NULL);
1364 }
1365 #endif
1366
1367 IOMemoryMap * IOUserClient::mapClientMemory64(
1368 IOOptionBits type,
1369 task_t task,
1370 IOOptionBits mapFlags,
1371 mach_vm_address_t atAddress )
1372 {
1373 IOReturn err;
1374 IOOptionBits options = 0;
1375 IOMemoryDescriptor * memory;
1376 IOMemoryMap * map = 0;
1377
1378 err = clientMemoryForType( (UInt32) type, &options, &memory );
1379
1380 if( memory && (kIOReturnSuccess == err)) {
1381
1382 options = (options & ~kIOMapUserOptionsMask)
1383 | (mapFlags & kIOMapUserOptionsMask);
1384 map = memory->createMappingInTask( task, atAddress, options );
1385 memory->release();
1386 }
1387
1388 return( map );
1389 }
1390
1391 IOReturn IOUserClient::exportObjectToClient(task_t task,
1392 OSObject *obj, io_object_t *clientObj)
1393 {
1394 mach_port_name_t name;
1395
1396 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1397
1398 *(mach_port_name_t *)clientObj = name;
1399 return kIOReturnSuccess;
1400 }
1401
1402 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1403 {
1404 return( 0 );
1405 }
1406
1407 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1408 {
1409 return( 0 );
1410 }
1411
1412 IOExternalMethod * IOUserClient::
1413 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1414 {
1415 IOExternalMethod *method = getExternalMethodForIndex(index);
1416
1417 if (method)
1418 *targetP = (IOService *) method->object;
1419
1420 return method;
1421 }
1422
1423 IOExternalAsyncMethod * IOUserClient::
1424 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1425 {
1426 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1427
1428 if (method)
1429 *targetP = (IOService *) method->object;
1430
1431 return method;
1432 }
1433
1434 IOExternalTrap * IOUserClient::
1435 getExternalTrapForIndex(UInt32 index)
1436 {
1437 return NULL;
1438 }
1439
1440 IOExternalTrap * IOUserClient::
1441 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1442 {
1443 IOExternalTrap *trap = getExternalTrapForIndex(index);
1444
1445 if (trap) {
1446 *targetP = trap->object;
1447 }
1448
1449 return trap;
1450 }
1451
1452 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1453 {
1454 mach_port_t port;
1455 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1456
1457 if (MACH_PORT_NULL != port)
1458 iokit_release_port_send(port);
1459
1460 return (kIOReturnSuccess);
1461 }
1462
1463 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1464 {
1465 if (MACH_PORT_NULL != port)
1466 iokit_release_port_send(port);
1467
1468 return (kIOReturnSuccess);
1469 }
1470
1471 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1472 IOReturn result, void *args[], UInt32 numArgs)
1473 {
1474 OSAsyncReference64 reference64;
1475 io_user_reference_t args64[kMaxAsyncArgs];
1476 unsigned int idx;
1477
1478 if (numArgs > kMaxAsyncArgs)
1479 return kIOReturnMessageTooLarge;
1480
1481 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1482 reference64[idx] = REF64(reference[idx]);
1483
1484 for (idx = 0; idx < numArgs; idx++)
1485 args64[idx] = REF64(args[idx]);
1486
1487 return (sendAsyncResult64(reference64, result, args64, numArgs));
1488 }
1489
1490 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1491 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1492 {
1493 return _sendAsyncResult64(reference, result, args, numArgs, options);
1494 }
1495
1496 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1497 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1498 {
1499 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1500 }
1501
1502 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1503 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1504 {
1505 struct ReplyMsg
1506 {
1507 mach_msg_header_t msgHdr;
1508 union
1509 {
1510 struct
1511 {
1512 OSNotificationHeader notifyHdr;
1513 IOAsyncCompletionContent asyncContent;
1514 uint32_t args[kMaxAsyncArgs];
1515 } msg32;
1516 struct
1517 {
1518 OSNotificationHeader64 notifyHdr;
1519 IOAsyncCompletionContent asyncContent;
1520 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1521 } msg64;
1522 } m;
1523 };
1524 ReplyMsg replyMsg;
1525 mach_port_t replyPort;
1526 kern_return_t kr;
1527
1528 // If no reply port, do nothing.
1529 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1530 if (replyPort == MACH_PORT_NULL)
1531 return kIOReturnSuccess;
1532
1533 if (numArgs > kMaxAsyncArgs)
1534 return kIOReturnMessageTooLarge;
1535
1536 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1537 0 /*local*/);
1538 replyMsg.msgHdr.msgh_remote_port = replyPort;
1539 replyMsg.msgHdr.msgh_local_port = 0;
1540 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1541 if (kIOUCAsync64Flag & reference[0])
1542 {
1543 replyMsg.msgHdr.msgh_size =
1544 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1545 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1546 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1547 + numArgs * sizeof(io_user_reference_t);
1548 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1549 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1550
1551 replyMsg.m.msg64.asyncContent.result = result;
1552 if (numArgs)
1553 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1554 }
1555 else
1556 {
1557 unsigned int idx;
1558
1559 replyMsg.msgHdr.msgh_size =
1560 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1561 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1562
1563 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1564 + numArgs * sizeof(uint32_t);
1565 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1566
1567 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1568 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1569
1570 replyMsg.m.msg32.asyncContent.result = result;
1571
1572 for (idx = 0; idx < numArgs; idx++)
1573 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1574 }
1575
1576 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1577 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1578 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1579 } else {
1580 /* Fail on full queue. */
1581 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1582 replyMsg.msgHdr.msgh_size);
1583 }
1584 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr))
1585 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1586 return kr;
1587 }
1588
1589
1590 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1591
1592 extern "C" {
1593
1594 #define CHECK(cls,obj,out) \
1595 cls * out; \
1596 if( !(out = OSDynamicCast( cls, obj))) \
1597 return( kIOReturnBadArgument )
1598
1599 #define CHECKLOCKED(cls,obj,out) \
1600 IOUserIterator * oIter; \
1601 cls * out; \
1602 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1603 return (kIOReturnBadArgument); \
1604 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1605 return (kIOReturnBadArgument)
1606
1607 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1608
1609 // Create a vm_map_copy_t or kalloc'ed data for memory
1610 // to be copied out. ipc will free after the copyout.
1611
1612 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1613 io_buf_ptr_t * buf )
1614 {
1615 kern_return_t err;
1616 vm_map_copy_t copy;
1617
1618 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1619 false /* src_destroy */, &copy);
1620
1621 assert( err == KERN_SUCCESS );
1622 if( err == KERN_SUCCESS )
1623 *buf = (char *) copy;
1624
1625 return( err );
1626 }
1627
1628 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1629
1630 /* Routine io_server_version */
1631 kern_return_t is_io_server_version(
1632 mach_port_t master_port,
1633 uint64_t *version)
1634 {
1635 *version = IOKIT_SERVER_VERSION;
1636 return (kIOReturnSuccess);
1637 }
1638
1639 /* Routine io_object_get_class */
1640 kern_return_t is_io_object_get_class(
1641 io_object_t object,
1642 io_name_t className )
1643 {
1644 const OSMetaClass* my_obj = NULL;
1645 const char * my_class_name = NULL;
1646
1647 if( !object)
1648 return( kIOReturnBadArgument );
1649
1650 if ( !my_class_name ) {
1651 my_obj = object->getMetaClass();
1652 if (!my_obj) {
1653 return (kIOReturnNotFound);
1654 }
1655
1656 my_class_name = my_obj->getClassName();
1657 }
1658
1659 strlcpy( className, my_class_name, sizeof(io_name_t));
1660
1661 return( kIOReturnSuccess );
1662 }
1663
1664 /* Routine io_object_get_superclass */
1665 kern_return_t is_io_object_get_superclass(
1666 mach_port_t master_port,
1667 io_name_t obj_name,
1668 io_name_t class_name)
1669 {
1670 const OSMetaClass* my_obj = NULL;
1671 const OSMetaClass* superclass = NULL;
1672 const OSSymbol *my_name = NULL;
1673 const char *my_cstr = NULL;
1674
1675 if (!obj_name || !class_name)
1676 return (kIOReturnBadArgument);
1677
1678 if( master_port != master_device_port)
1679 return( kIOReturnNotPrivileged);
1680
1681 my_name = OSSymbol::withCString(obj_name);
1682
1683 if (my_name) {
1684 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1685 my_name->release();
1686 }
1687 if (my_obj) {
1688 superclass = my_obj->getSuperClass();
1689 }
1690
1691 if (!superclass) {
1692 return( kIOReturnNotFound );
1693 }
1694
1695 my_cstr = superclass->getClassName();
1696
1697 if (my_cstr) {
1698 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1699 return( kIOReturnSuccess );
1700 }
1701 return (kIOReturnNotFound);
1702 }
1703
1704 /* Routine io_object_get_bundle_identifier */
1705 kern_return_t is_io_object_get_bundle_identifier(
1706 mach_port_t master_port,
1707 io_name_t obj_name,
1708 io_name_t bundle_name)
1709 {
1710 const OSMetaClass* my_obj = NULL;
1711 const OSSymbol *my_name = NULL;
1712 const OSSymbol *identifier = NULL;
1713 const char *my_cstr = NULL;
1714
1715 if (!obj_name || !bundle_name)
1716 return (kIOReturnBadArgument);
1717
1718 if( master_port != master_device_port)
1719 return( kIOReturnNotPrivileged);
1720
1721 my_name = OSSymbol::withCString(obj_name);
1722
1723 if (my_name) {
1724 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1725 my_name->release();
1726 }
1727
1728 if (my_obj) {
1729 identifier = my_obj->getKmodName();
1730 }
1731 if (!identifier) {
1732 return( kIOReturnNotFound );
1733 }
1734
1735 my_cstr = identifier->getCStringNoCopy();
1736 if (my_cstr) {
1737 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1738 return( kIOReturnSuccess );
1739 }
1740
1741 return (kIOReturnBadArgument);
1742 }
1743
1744 /* Routine io_object_conforms_to */
1745 kern_return_t is_io_object_conforms_to(
1746 io_object_t object,
1747 io_name_t className,
1748 boolean_t *conforms )
1749 {
1750 if( !object)
1751 return( kIOReturnBadArgument );
1752
1753 *conforms = (0 != object->metaCast( className ));
1754
1755 return( kIOReturnSuccess );
1756 }
1757
1758 /* Routine io_object_get_retain_count */
1759 kern_return_t is_io_object_get_retain_count(
1760 io_object_t object,
1761 uint32_t *retainCount )
1762 {
1763 if( !object)
1764 return( kIOReturnBadArgument );
1765
1766 *retainCount = object->getRetainCount();
1767 return( kIOReturnSuccess );
1768 }
1769
1770 /* Routine io_iterator_next */
1771 kern_return_t is_io_iterator_next(
1772 io_object_t iterator,
1773 io_object_t *object )
1774 {
1775 IOReturn ret;
1776 OSObject * obj;
1777
1778 CHECK( OSIterator, iterator, iter );
1779
1780 obj = iter->getNextObject();
1781 if( obj) {
1782 obj->retain();
1783 *object = obj;
1784 ret = kIOReturnSuccess;
1785 } else
1786 ret = kIOReturnNoDevice;
1787
1788 return (ret);
1789 }
1790
1791 /* Routine io_iterator_reset */
1792 kern_return_t is_io_iterator_reset(
1793 io_object_t iterator )
1794 {
1795 CHECK( OSIterator, iterator, iter );
1796
1797 iter->reset();
1798
1799 return( kIOReturnSuccess );
1800 }
1801
1802 /* Routine io_iterator_is_valid */
1803 kern_return_t is_io_iterator_is_valid(
1804 io_object_t iterator,
1805 boolean_t *is_valid )
1806 {
1807 CHECK( OSIterator, iterator, iter );
1808
1809 *is_valid = iter->isValid();
1810
1811 return( kIOReturnSuccess );
1812 }
1813
1814
1815 static kern_return_t internal_io_service_match_property_table(
1816 io_service_t _service,
1817 const char * matching,
1818 mach_msg_type_number_t matching_size,
1819 boolean_t *matches)
1820 {
1821 CHECK( IOService, _service, service );
1822
1823 kern_return_t kr;
1824 OSObject * obj;
1825 OSDictionary * dict;
1826
1827 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1828 : OSUnserializeXML(matching);
1829 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1830
1831 *matches = service->passiveMatch( dict );
1832 kr = kIOReturnSuccess;
1833 } else
1834 kr = kIOReturnBadArgument;
1835
1836 if( obj)
1837 obj->release();
1838
1839 return( kr );
1840 }
1841
1842 /* Routine io_service_match_property_table */
1843 kern_return_t is_io_service_match_property_table(
1844 io_service_t service,
1845 io_string_t matching,
1846 boolean_t *matches )
1847 {
1848 return (internal_io_service_match_property_table(service, matching, 0, matches));
1849 }
1850
1851
1852 /* Routine io_service_match_property_table_ool */
1853 kern_return_t is_io_service_match_property_table_ool(
1854 io_object_t service,
1855 io_buf_ptr_t matching,
1856 mach_msg_type_number_t matchingCnt,
1857 kern_return_t *result,
1858 boolean_t *matches )
1859 {
1860 kern_return_t kr;
1861 vm_offset_t data;
1862 vm_map_offset_t map_data;
1863
1864 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1865 data = CAST_DOWN(vm_offset_t, map_data);
1866
1867 if( KERN_SUCCESS == kr) {
1868 // must return success after vm_map_copyout() succeeds
1869 *result = internal_io_service_match_property_table(service,
1870 (const char *)data, matchingCnt, matches );
1871 vm_deallocate( kernel_map, data, matchingCnt );
1872 }
1873
1874 return( kr );
1875 }
1876
1877 /* Routine io_service_match_property_table_bin */
1878 kern_return_t is_io_service_match_property_table_bin(
1879 io_object_t service,
1880 io_struct_inband_t matching,
1881 mach_msg_type_number_t matchingCnt,
1882 boolean_t *matches)
1883 {
1884 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
1885 }
1886
1887 static kern_return_t internal_io_service_get_matching_services(
1888 mach_port_t master_port,
1889 const char * matching,
1890 mach_msg_type_number_t matching_size,
1891 io_iterator_t *existing )
1892 {
1893 kern_return_t kr;
1894 OSObject * obj;
1895 OSDictionary * dict;
1896
1897 if( master_port != master_device_port)
1898 return( kIOReturnNotPrivileged);
1899
1900 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1901 : OSUnserializeXML(matching);
1902 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1903 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
1904 kr = kIOReturnSuccess;
1905 } else
1906 kr = kIOReturnBadArgument;
1907
1908 if( obj)
1909 obj->release();
1910
1911 return( kr );
1912 }
1913
1914 /* Routine io_service_get_matching_services */
1915 kern_return_t is_io_service_get_matching_services(
1916 mach_port_t master_port,
1917 io_string_t matching,
1918 io_iterator_t *existing )
1919 {
1920 return (internal_io_service_get_matching_services(master_port, matching, 0, existing));
1921 }
1922
1923 /* Routine io_service_get_matching_services_ool */
1924 kern_return_t is_io_service_get_matching_services_ool(
1925 mach_port_t master_port,
1926 io_buf_ptr_t matching,
1927 mach_msg_type_number_t matchingCnt,
1928 kern_return_t *result,
1929 io_object_t *existing )
1930 {
1931 kern_return_t kr;
1932 vm_offset_t data;
1933 vm_map_offset_t map_data;
1934
1935 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1936 data = CAST_DOWN(vm_offset_t, map_data);
1937
1938 if( KERN_SUCCESS == kr) {
1939 // must return success after vm_map_copyout() succeeds
1940 // and mig will copy out objects on success
1941 *existing = 0;
1942 *result = internal_io_service_get_matching_services(master_port,
1943 (const char *) data, matchingCnt, existing);
1944 vm_deallocate( kernel_map, data, matchingCnt );
1945 }
1946
1947 return( kr );
1948 }
1949
1950 /* Routine io_service_get_matching_services_bin */
1951 kern_return_t is_io_service_get_matching_services_bin(
1952 mach_port_t master_port,
1953 io_struct_inband_t matching,
1954 mach_msg_type_number_t matchingCnt,
1955 io_object_t *existing)
1956 {
1957 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
1958 }
1959
1960
1961 static kern_return_t internal_io_service_get_matching_service(
1962 mach_port_t master_port,
1963 const char * matching,
1964 mach_msg_type_number_t matching_size,
1965 io_service_t *service )
1966 {
1967 kern_return_t kr;
1968 OSObject * obj;
1969 OSDictionary * dict;
1970
1971 if( master_port != master_device_port)
1972 return( kIOReturnNotPrivileged);
1973
1974 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1975 : OSUnserializeXML(matching);
1976 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1977 *service = IOService::copyMatchingService( dict );
1978 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
1979 } else
1980 kr = kIOReturnBadArgument;
1981
1982 if( obj)
1983 obj->release();
1984
1985 return( kr );
1986 }
1987
1988 /* Routine io_service_get_matching_service */
1989 kern_return_t is_io_service_get_matching_service(
1990 mach_port_t master_port,
1991 io_string_t matching,
1992 io_service_t *service )
1993 {
1994 return (internal_io_service_get_matching_service(master_port, matching, 0, service));
1995 }
1996
1997 /* Routine io_service_get_matching_services_ool */
1998 kern_return_t is_io_service_get_matching_service_ool(
1999 mach_port_t master_port,
2000 io_buf_ptr_t matching,
2001 mach_msg_type_number_t matchingCnt,
2002 kern_return_t *result,
2003 io_object_t *service )
2004 {
2005 kern_return_t kr;
2006 vm_offset_t data;
2007 vm_map_offset_t map_data;
2008
2009 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2010 data = CAST_DOWN(vm_offset_t, map_data);
2011
2012 if( KERN_SUCCESS == kr) {
2013 // must return success after vm_map_copyout() succeeds
2014 // and mig will copy out objects on success
2015 *service = 0;
2016 *result = internal_io_service_get_matching_service(master_port,
2017 (const char *) data, matchingCnt, service );
2018 vm_deallocate( kernel_map, data, matchingCnt );
2019 }
2020
2021 return( kr );
2022 }
2023
2024 /* Routine io_service_get_matching_service_bin */
2025 kern_return_t is_io_service_get_matching_service_bin(
2026 mach_port_t master_port,
2027 io_struct_inband_t matching,
2028 mach_msg_type_number_t matchingCnt,
2029 io_object_t *service)
2030 {
2031 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2032 }
2033
2034 static kern_return_t internal_io_service_add_notification(
2035 mach_port_t master_port,
2036 io_name_t notification_type,
2037 const char * matching,
2038 size_t matching_size,
2039 mach_port_t port,
2040 void * reference,
2041 vm_size_t referenceSize,
2042 bool client64,
2043 io_object_t * notification )
2044 {
2045 IOServiceUserNotification * userNotify = 0;
2046 IONotifier * notify = 0;
2047 const OSSymbol * sym;
2048 OSDictionary * dict;
2049 IOReturn err;
2050 unsigned long int userMsgType;
2051
2052 if( master_port != master_device_port)
2053 return( kIOReturnNotPrivileged);
2054
2055 do {
2056 err = kIOReturnNoResources;
2057
2058 if( !(sym = OSSymbol::withCString( notification_type )))
2059 err = kIOReturnNoResources;
2060
2061 if (matching_size)
2062 {
2063 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2064 }
2065 else
2066 {
2067 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching));
2068 }
2069
2070 if (!dict) {
2071 err = kIOReturnBadArgument;
2072 continue;
2073 }
2074
2075 if( (sym == gIOPublishNotification)
2076 || (sym == gIOFirstPublishNotification))
2077 userMsgType = kIOServicePublishNotificationType;
2078 else if( (sym == gIOMatchedNotification)
2079 || (sym == gIOFirstMatchNotification))
2080 userMsgType = kIOServiceMatchedNotificationType;
2081 else if( sym == gIOTerminatedNotification)
2082 userMsgType = kIOServiceTerminatedNotificationType;
2083 else
2084 userMsgType = kLastIOKitNotificationType;
2085
2086 userNotify = new IOServiceUserNotification;
2087
2088 if( userNotify && !userNotify->init( port, userMsgType,
2089 reference, referenceSize, client64)) {
2090 iokit_release_port_send(port);
2091 userNotify->release();
2092 userNotify = 0;
2093 }
2094 if( !userNotify)
2095 continue;
2096
2097 notify = IOService::addMatchingNotification( sym, dict,
2098 &userNotify->_handler, userNotify );
2099 if( notify) {
2100 *notification = userNotify;
2101 userNotify->setNotification( notify );
2102 err = kIOReturnSuccess;
2103 } else
2104 err = kIOReturnUnsupported;
2105
2106 } while( false );
2107
2108 if( sym)
2109 sym->release();
2110 if( dict)
2111 dict->release();
2112
2113 return( err );
2114 }
2115
2116
2117 /* Routine io_service_add_notification */
2118 kern_return_t is_io_service_add_notification(
2119 mach_port_t master_port,
2120 io_name_t notification_type,
2121 io_string_t matching,
2122 mach_port_t port,
2123 io_async_ref_t reference,
2124 mach_msg_type_number_t referenceCnt,
2125 io_object_t * notification )
2126 {
2127 return (internal_io_service_add_notification(master_port, notification_type,
2128 matching, 0, port, &reference[0], sizeof(io_async_ref_t),
2129 false, notification));
2130 }
2131
2132 /* Routine io_service_add_notification_64 */
2133 kern_return_t is_io_service_add_notification_64(
2134 mach_port_t master_port,
2135 io_name_t notification_type,
2136 io_string_t matching,
2137 mach_port_t wake_port,
2138 io_async_ref64_t reference,
2139 mach_msg_type_number_t referenceCnt,
2140 io_object_t *notification )
2141 {
2142 return (internal_io_service_add_notification(master_port, notification_type,
2143 matching, 0, wake_port, &reference[0], sizeof(io_async_ref64_t),
2144 true, notification));
2145 }
2146
2147 /* Routine io_service_add_notification_bin */
2148 kern_return_t is_io_service_add_notification_bin
2149 (
2150 mach_port_t master_port,
2151 io_name_t notification_type,
2152 io_struct_inband_t matching,
2153 mach_msg_type_number_t matchingCnt,
2154 mach_port_t wake_port,
2155 io_async_ref_t reference,
2156 mach_msg_type_number_t referenceCnt,
2157 io_object_t *notification)
2158 {
2159 return (internal_io_service_add_notification(master_port, notification_type,
2160 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2161 false, notification));
2162 }
2163
2164 /* Routine io_service_add_notification_bin_64 */
2165 kern_return_t is_io_service_add_notification_bin_64
2166 (
2167 mach_port_t master_port,
2168 io_name_t notification_type,
2169 io_struct_inband_t matching,
2170 mach_msg_type_number_t matchingCnt,
2171 mach_port_t wake_port,
2172 io_async_ref64_t reference,
2173 mach_msg_type_number_t referenceCnt,
2174 io_object_t *notification)
2175 {
2176 return (internal_io_service_add_notification(master_port, notification_type,
2177 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2178 true, notification));
2179 }
2180
2181 static kern_return_t internal_io_service_add_notification_ool(
2182 mach_port_t master_port,
2183 io_name_t notification_type,
2184 io_buf_ptr_t matching,
2185 mach_msg_type_number_t matchingCnt,
2186 mach_port_t wake_port,
2187 void * reference,
2188 vm_size_t referenceSize,
2189 bool client64,
2190 kern_return_t *result,
2191 io_object_t *notification )
2192 {
2193 kern_return_t kr;
2194 vm_offset_t data;
2195 vm_map_offset_t map_data;
2196
2197 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2198 data = CAST_DOWN(vm_offset_t, map_data);
2199
2200 if( KERN_SUCCESS == kr) {
2201 // must return success after vm_map_copyout() succeeds
2202 // and mig will copy out objects on success
2203 *notification = 0;
2204 *result = internal_io_service_add_notification( master_port, notification_type,
2205 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2206 vm_deallocate( kernel_map, data, matchingCnt );
2207 }
2208
2209 return( kr );
2210 }
2211
2212 /* Routine io_service_add_notification_ool */
2213 kern_return_t is_io_service_add_notification_ool(
2214 mach_port_t master_port,
2215 io_name_t notification_type,
2216 io_buf_ptr_t matching,
2217 mach_msg_type_number_t matchingCnt,
2218 mach_port_t wake_port,
2219 io_async_ref_t reference,
2220 mach_msg_type_number_t referenceCnt,
2221 kern_return_t *result,
2222 io_object_t *notification )
2223 {
2224 return (internal_io_service_add_notification_ool(master_port, notification_type,
2225 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2226 false, result, notification));
2227 }
2228
2229 /* Routine io_service_add_notification_ool_64 */
2230 kern_return_t is_io_service_add_notification_ool_64(
2231 mach_port_t master_port,
2232 io_name_t notification_type,
2233 io_buf_ptr_t matching,
2234 mach_msg_type_number_t matchingCnt,
2235 mach_port_t wake_port,
2236 io_async_ref64_t reference,
2237 mach_msg_type_number_t referenceCnt,
2238 kern_return_t *result,
2239 io_object_t *notification )
2240 {
2241 return (internal_io_service_add_notification_ool(master_port, notification_type,
2242 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2243 true, result, notification));
2244 }
2245
2246 /* Routine io_service_add_notification_old */
2247 kern_return_t is_io_service_add_notification_old(
2248 mach_port_t master_port,
2249 io_name_t notification_type,
2250 io_string_t matching,
2251 mach_port_t port,
2252 // for binary compatibility reasons, this must be natural_t for ILP32
2253 natural_t ref,
2254 io_object_t * notification )
2255 {
2256 return( is_io_service_add_notification( master_port, notification_type,
2257 matching, port, &ref, 1, notification ));
2258 }
2259
2260
2261 static kern_return_t internal_io_service_add_interest_notification(
2262 io_object_t _service,
2263 io_name_t type_of_interest,
2264 mach_port_t port,
2265 void * reference,
2266 vm_size_t referenceSize,
2267 bool client64,
2268 io_object_t * notification )
2269 {
2270
2271 IOServiceMessageUserNotification * userNotify = 0;
2272 IONotifier * notify = 0;
2273 const OSSymbol * sym;
2274 IOReturn err;
2275
2276 CHECK( IOService, _service, service );
2277
2278 err = kIOReturnNoResources;
2279 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2280
2281 userNotify = new IOServiceMessageUserNotification;
2282
2283 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2284 reference, referenceSize,
2285 kIOUserNotifyMaxMessageSize,
2286 client64 )) {
2287 iokit_release_port_send(port);
2288 userNotify->release();
2289 userNotify = 0;
2290 }
2291 if( !userNotify)
2292 continue;
2293
2294 notify = service->registerInterest( sym,
2295 &userNotify->_handler, userNotify );
2296 if( notify) {
2297 *notification = userNotify;
2298 userNotify->setNotification( notify );
2299 err = kIOReturnSuccess;
2300 } else
2301 err = kIOReturnUnsupported;
2302
2303 sym->release();
2304
2305 } while( false );
2306
2307 return( err );
2308 }
2309
2310 /* Routine io_service_add_message_notification */
2311 kern_return_t is_io_service_add_interest_notification(
2312 io_object_t service,
2313 io_name_t type_of_interest,
2314 mach_port_t port,
2315 io_async_ref_t reference,
2316 mach_msg_type_number_t referenceCnt,
2317 io_object_t * notification )
2318 {
2319 return (internal_io_service_add_interest_notification(service, type_of_interest,
2320 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2321 }
2322
2323 /* Routine io_service_add_interest_notification_64 */
2324 kern_return_t is_io_service_add_interest_notification_64(
2325 io_object_t service,
2326 io_name_t type_of_interest,
2327 mach_port_t wake_port,
2328 io_async_ref64_t reference,
2329 mach_msg_type_number_t referenceCnt,
2330 io_object_t *notification )
2331 {
2332 return (internal_io_service_add_interest_notification(service, type_of_interest,
2333 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2334 }
2335
2336
2337 /* Routine io_service_acknowledge_notification */
2338 kern_return_t is_io_service_acknowledge_notification(
2339 io_object_t _service,
2340 natural_t notify_ref,
2341 natural_t response )
2342 {
2343 CHECK( IOService, _service, service );
2344
2345 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2346 (IOOptionBits) response ));
2347
2348 }
2349
2350 /* Routine io_connect_get_semaphore */
2351 kern_return_t is_io_connect_get_notification_semaphore(
2352 io_connect_t connection,
2353 natural_t notification_type,
2354 semaphore_t *semaphore )
2355 {
2356 CHECK( IOUserClient, connection, client );
2357
2358 IOStatisticsClientCall();
2359 return( client->getNotificationSemaphore( (UInt32) notification_type,
2360 semaphore ));
2361 }
2362
2363 /* Routine io_registry_get_root_entry */
2364 kern_return_t is_io_registry_get_root_entry(
2365 mach_port_t master_port,
2366 io_object_t *root )
2367 {
2368 IORegistryEntry * entry;
2369
2370 if( master_port != master_device_port)
2371 return( kIOReturnNotPrivileged);
2372
2373 entry = IORegistryEntry::getRegistryRoot();
2374 if( entry)
2375 entry->retain();
2376 *root = entry;
2377
2378 return( kIOReturnSuccess );
2379 }
2380
2381 /* Routine io_registry_create_iterator */
2382 kern_return_t is_io_registry_create_iterator(
2383 mach_port_t master_port,
2384 io_name_t plane,
2385 uint32_t options,
2386 io_object_t *iterator )
2387 {
2388 if( master_port != master_device_port)
2389 return( kIOReturnNotPrivileged);
2390
2391 *iterator = IOUserIterator::withIterator(
2392 IORegistryIterator::iterateOver(
2393 IORegistryEntry::getPlane( plane ), options ));
2394
2395 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2396 }
2397
2398 /* Routine io_registry_entry_create_iterator */
2399 kern_return_t is_io_registry_entry_create_iterator(
2400 io_object_t registry_entry,
2401 io_name_t plane,
2402 uint32_t options,
2403 io_object_t *iterator )
2404 {
2405 CHECK( IORegistryEntry, registry_entry, entry );
2406
2407 *iterator = IOUserIterator::withIterator(
2408 IORegistryIterator::iterateOver( entry,
2409 IORegistryEntry::getPlane( plane ), options ));
2410
2411 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2412 }
2413
2414 /* Routine io_registry_iterator_enter */
2415 kern_return_t is_io_registry_iterator_enter_entry(
2416 io_object_t iterator )
2417 {
2418 CHECKLOCKED( IORegistryIterator, iterator, iter );
2419
2420 IOLockLock(oIter->lock);
2421 iter->enterEntry();
2422 IOLockUnlock(oIter->lock);
2423
2424 return( kIOReturnSuccess );
2425 }
2426
2427 /* Routine io_registry_iterator_exit */
2428 kern_return_t is_io_registry_iterator_exit_entry(
2429 io_object_t iterator )
2430 {
2431 bool didIt;
2432
2433 CHECKLOCKED( IORegistryIterator, iterator, iter );
2434
2435 IOLockLock(oIter->lock);
2436 didIt = iter->exitEntry();
2437 IOLockUnlock(oIter->lock);
2438
2439 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2440 }
2441
2442 /* Routine io_registry_entry_from_path */
2443 kern_return_t is_io_registry_entry_from_path(
2444 mach_port_t master_port,
2445 io_string_t path,
2446 io_object_t *registry_entry )
2447 {
2448 IORegistryEntry * entry;
2449
2450 if( master_port != master_device_port)
2451 return( kIOReturnNotPrivileged);
2452
2453 entry = IORegistryEntry::fromPath( path );
2454
2455 *registry_entry = entry;
2456
2457 return( kIOReturnSuccess );
2458 }
2459
2460
2461 /* Routine io_registry_entry_from_path */
2462 kern_return_t is_io_registry_entry_from_path_ool(
2463 mach_port_t master_port,
2464 io_string_inband_t path,
2465 io_buf_ptr_t path_ool,
2466 mach_msg_type_number_t path_oolCnt,
2467 kern_return_t *result,
2468 io_object_t *registry_entry)
2469 {
2470 IORegistryEntry * entry;
2471 vm_map_offset_t map_data;
2472 const char * cpath;
2473 IOReturn res;
2474 kern_return_t err;
2475
2476 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2477
2478 map_data = 0;
2479 entry = 0;
2480 res = err = KERN_SUCCESS;
2481 if (path[0]) cpath = path;
2482 else
2483 {
2484 if (!path_oolCnt) return(kIOReturnBadArgument);
2485 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2486
2487 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2488 if (KERN_SUCCESS == err)
2489 {
2490 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2491 cpath = CAST_DOWN(const char *, map_data);
2492 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2493 }
2494 }
2495
2496 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2497 {
2498 entry = IORegistryEntry::fromPath(cpath);
2499 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2500 }
2501
2502 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2503
2504 if (KERN_SUCCESS != err) res = err;
2505 *registry_entry = entry;
2506 *result = res;
2507
2508 return (err);
2509 }
2510
2511
2512 /* Routine io_registry_entry_in_plane */
2513 kern_return_t is_io_registry_entry_in_plane(
2514 io_object_t registry_entry,
2515 io_name_t plane,
2516 boolean_t *inPlane )
2517 {
2518 CHECK( IORegistryEntry, registry_entry, entry );
2519
2520 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2521
2522 return( kIOReturnSuccess );
2523 }
2524
2525
2526 /* Routine io_registry_entry_get_path */
2527 kern_return_t is_io_registry_entry_get_path(
2528 io_object_t registry_entry,
2529 io_name_t plane,
2530 io_string_t path )
2531 {
2532 int length;
2533 CHECK( IORegistryEntry, registry_entry, entry );
2534
2535 length = sizeof( io_string_t);
2536 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2537 return( kIOReturnSuccess );
2538 else
2539 return( kIOReturnBadArgument );
2540 }
2541
2542 /* Routine io_registry_entry_get_path */
2543 kern_return_t is_io_registry_entry_get_path_ool(
2544 io_object_t registry_entry,
2545 io_name_t plane,
2546 io_string_inband_t path,
2547 io_buf_ptr_t *path_ool,
2548 mach_msg_type_number_t *path_oolCnt)
2549 {
2550 enum { kMaxPath = 16384 };
2551 IOReturn err;
2552 int length;
2553 char * buf;
2554
2555 CHECK( IORegistryEntry, registry_entry, entry );
2556
2557 *path_ool = NULL;
2558 *path_oolCnt = 0;
2559 length = sizeof(io_string_inband_t);
2560 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2561 else
2562 {
2563 length = kMaxPath;
2564 buf = IONew(char, length);
2565 if (!buf) err = kIOReturnNoMemory;
2566 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2567 else
2568 {
2569 *path_oolCnt = length;
2570 err = copyoutkdata(buf, length, path_ool);
2571 }
2572 if (buf) IODelete(buf, char, kMaxPath);
2573 }
2574
2575 return (err);
2576 }
2577
2578
2579 /* Routine io_registry_entry_get_name */
2580 kern_return_t is_io_registry_entry_get_name(
2581 io_object_t registry_entry,
2582 io_name_t name )
2583 {
2584 CHECK( IORegistryEntry, registry_entry, entry );
2585
2586 strncpy( name, entry->getName(), sizeof( io_name_t));
2587
2588 return( kIOReturnSuccess );
2589 }
2590
2591 /* Routine io_registry_entry_get_name_in_plane */
2592 kern_return_t is_io_registry_entry_get_name_in_plane(
2593 io_object_t registry_entry,
2594 io_name_t planeName,
2595 io_name_t name )
2596 {
2597 const IORegistryPlane * plane;
2598 CHECK( IORegistryEntry, registry_entry, entry );
2599
2600 if( planeName[0])
2601 plane = IORegistryEntry::getPlane( planeName );
2602 else
2603 plane = 0;
2604
2605 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2606
2607 return( kIOReturnSuccess );
2608 }
2609
2610 /* Routine io_registry_entry_get_location_in_plane */
2611 kern_return_t is_io_registry_entry_get_location_in_plane(
2612 io_object_t registry_entry,
2613 io_name_t planeName,
2614 io_name_t location )
2615 {
2616 const IORegistryPlane * plane;
2617 CHECK( IORegistryEntry, registry_entry, entry );
2618
2619 if( planeName[0])
2620 plane = IORegistryEntry::getPlane( planeName );
2621 else
2622 plane = 0;
2623
2624 const char * cstr = entry->getLocation( plane );
2625
2626 if( cstr) {
2627 strncpy( location, cstr, sizeof( io_name_t));
2628 return( kIOReturnSuccess );
2629 } else
2630 return( kIOReturnNotFound );
2631 }
2632
2633 /* Routine io_registry_entry_get_registry_entry_id */
2634 kern_return_t is_io_registry_entry_get_registry_entry_id(
2635 io_object_t registry_entry,
2636 uint64_t *entry_id )
2637 {
2638 CHECK( IORegistryEntry, registry_entry, entry );
2639
2640 *entry_id = entry->getRegistryEntryID();
2641
2642 return (kIOReturnSuccess);
2643 }
2644
2645 /* Routine io_registry_entry_get_property */
2646 kern_return_t is_io_registry_entry_get_property_bytes(
2647 io_object_t registry_entry,
2648 io_name_t property_name,
2649 io_struct_inband_t buf,
2650 mach_msg_type_number_t *dataCnt )
2651 {
2652 OSObject * obj;
2653 OSData * data;
2654 OSString * str;
2655 OSBoolean * boo;
2656 OSNumber * off;
2657 UInt64 offsetBytes;
2658 unsigned int len = 0;
2659 const void * bytes = 0;
2660 IOReturn ret = kIOReturnSuccess;
2661
2662 CHECK( IORegistryEntry, registry_entry, entry );
2663
2664 #if CONFIG_MACF
2665 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2666 return kIOReturnNotPermitted;
2667 #endif
2668
2669 obj = entry->copyProperty(property_name);
2670 if( !obj)
2671 return( kIOReturnNoResources );
2672
2673 // One day OSData will be a common container base class
2674 // until then...
2675 if( (data = OSDynamicCast( OSData, obj ))) {
2676 len = data->getLength();
2677 bytes = data->getBytesNoCopy();
2678
2679 } else if( (str = OSDynamicCast( OSString, obj ))) {
2680 len = str->getLength() + 1;
2681 bytes = str->getCStringNoCopy();
2682
2683 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2684 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2685 bytes = boo->isTrue() ? "Yes" : "No";
2686
2687 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2688 offsetBytes = off->unsigned64BitValue();
2689 len = off->numberOfBytes();
2690 bytes = &offsetBytes;
2691 #ifdef __BIG_ENDIAN__
2692 bytes = (const void *)
2693 (((UInt32) bytes) + (sizeof( UInt64) - len));
2694 #endif
2695
2696 } else
2697 ret = kIOReturnBadArgument;
2698
2699 if( bytes) {
2700 if( *dataCnt < len)
2701 ret = kIOReturnIPCError;
2702 else {
2703 *dataCnt = len;
2704 bcopy( bytes, buf, len );
2705 }
2706 }
2707 obj->release();
2708
2709 return( ret );
2710 }
2711
2712
2713 /* Routine io_registry_entry_get_property */
2714 kern_return_t is_io_registry_entry_get_property(
2715 io_object_t registry_entry,
2716 io_name_t property_name,
2717 io_buf_ptr_t *properties,
2718 mach_msg_type_number_t *propertiesCnt )
2719 {
2720 kern_return_t err;
2721 vm_size_t len;
2722 OSObject * obj;
2723
2724 CHECK( IORegistryEntry, registry_entry, entry );
2725
2726 #if CONFIG_MACF
2727 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2728 return kIOReturnNotPermitted;
2729 #endif
2730
2731 obj = entry->copyProperty(property_name);
2732 if( !obj)
2733 return( kIOReturnNotFound );
2734
2735 OSSerialize * s = OSSerialize::withCapacity(4096);
2736 if( !s) {
2737 obj->release();
2738 return( kIOReturnNoMemory );
2739 }
2740
2741 if( obj->serialize( s )) {
2742 len = s->getLength();
2743 *propertiesCnt = len;
2744 err = copyoutkdata( s->text(), len, properties );
2745
2746 } else
2747 err = kIOReturnUnsupported;
2748
2749 s->release();
2750 obj->release();
2751
2752 return( err );
2753 }
2754
2755 /* Routine io_registry_entry_get_property_recursively */
2756 kern_return_t is_io_registry_entry_get_property_recursively(
2757 io_object_t registry_entry,
2758 io_name_t plane,
2759 io_name_t property_name,
2760 uint32_t options,
2761 io_buf_ptr_t *properties,
2762 mach_msg_type_number_t *propertiesCnt )
2763 {
2764 kern_return_t err;
2765 vm_size_t len;
2766 OSObject * obj;
2767
2768 CHECK( IORegistryEntry, registry_entry, entry );
2769
2770 #if CONFIG_MACF
2771 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2772 return kIOReturnNotPermitted;
2773 #endif
2774
2775 obj = entry->copyProperty( property_name,
2776 IORegistryEntry::getPlane( plane ), options);
2777 if( !obj)
2778 return( kIOReturnNotFound );
2779
2780 OSSerialize * s = OSSerialize::withCapacity(4096);
2781 if( !s) {
2782 obj->release();
2783 return( kIOReturnNoMemory );
2784 }
2785
2786 if( obj->serialize( s )) {
2787 len = s->getLength();
2788 *propertiesCnt = len;
2789 err = copyoutkdata( s->text(), len, properties );
2790
2791 } else
2792 err = kIOReturnUnsupported;
2793
2794 s->release();
2795 obj->release();
2796
2797 return( err );
2798 }
2799
2800 #if CONFIG_MACF
2801
2802 static kern_return_t
2803 filteredProperties(IORegistryEntry *entry, OSDictionary *properties, OSDictionary **filteredp)
2804 {
2805 kern_return_t err = 0;
2806 OSDictionary *filtered = NULL;
2807 OSCollectionIterator *iter = NULL;
2808 OSSymbol *key;
2809 OSObject *p;
2810 kauth_cred_t cred = kauth_cred_get();
2811
2812 if (properties == NULL)
2813 return kIOReturnUnsupported;
2814
2815 if ((iter = OSCollectionIterator::withCollection(properties)) == NULL ||
2816 (filtered = OSDictionary::withCapacity(properties->getCapacity())) == NULL) {
2817 err = kIOReturnNoMemory;
2818 goto out;
2819 }
2820
2821 while ((p = iter->getNextObject()) != NULL) {
2822 if ((key = OSDynamicCast(OSSymbol, p)) == NULL ||
2823 mac_iokit_check_get_property(cred, entry, key->getCStringNoCopy()) != 0)
2824 continue;
2825 filtered->setObject(key, properties->getObject(key));
2826 }
2827
2828 out:
2829 if (iter != NULL)
2830 iter->release();
2831 *filteredp = filtered;
2832 return err;
2833 }
2834
2835 #endif
2836
2837 /* Routine io_registry_entry_get_properties */
2838 kern_return_t is_io_registry_entry_get_properties(
2839 io_object_t registry_entry,
2840 io_buf_ptr_t *properties,
2841 mach_msg_type_number_t *propertiesCnt )
2842 {
2843 kern_return_t err = 0;
2844 vm_size_t len;
2845
2846 CHECK( IORegistryEntry, registry_entry, entry );
2847
2848 OSSerialize * s = OSSerialize::withCapacity(4096);
2849 if( !s)
2850 return( kIOReturnNoMemory );
2851
2852 if (!entry->serializeProperties(s))
2853 err = kIOReturnUnsupported;
2854
2855 #if CONFIG_MACF
2856 if (!err && mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
2857 OSObject *propobj = OSUnserializeXML(s->text(), s->getLength());
2858 OSDictionary *filteredprops = NULL;
2859 err = filteredProperties(entry, OSDynamicCast(OSDictionary, propobj), &filteredprops);
2860 if (propobj) propobj->release();
2861
2862 if (!err) {
2863 s->clearText();
2864 if (!filteredprops->serialize(s))
2865 err = kIOReturnUnsupported;
2866 }
2867 if (filteredprops != NULL)
2868 filteredprops->release();
2869 }
2870 #endif /* CONFIG_MACF */
2871
2872 if (!err) {
2873 len = s->getLength();
2874 *propertiesCnt = len;
2875 err = copyoutkdata( s->text(), len, properties );
2876 }
2877
2878 s->release();
2879 return( err );
2880 }
2881
2882 #if CONFIG_MACF
2883
2884 struct GetPropertiesEditorRef
2885 {
2886 kauth_cred_t cred;
2887 IORegistryEntry * entry;
2888 OSCollection * root;
2889 };
2890
2891 static const OSMetaClassBase *
2892 GetPropertiesEditor(void * reference,
2893 OSSerialize * s,
2894 OSCollection * container,
2895 const OSSymbol * name,
2896 const OSMetaClassBase * value)
2897 {
2898 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
2899
2900 if (!ref->root) ref->root = container;
2901 if (ref->root == container)
2902 {
2903 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
2904 {
2905 value = 0;
2906 }
2907 }
2908 if (value) value->retain();
2909 return (value);
2910 }
2911
2912 #endif /* CONFIG_MACF */
2913
2914 /* Routine io_registry_entry_get_properties */
2915 kern_return_t is_io_registry_entry_get_properties_bin(
2916 io_object_t registry_entry,
2917 io_buf_ptr_t *properties,
2918 mach_msg_type_number_t *propertiesCnt)
2919 {
2920 kern_return_t err = kIOReturnSuccess;
2921 vm_size_t len;
2922 OSSerialize * s;
2923 OSSerialize::Editor editor = 0;
2924 void * editRef = 0;
2925
2926 CHECK(IORegistryEntry, registry_entry, entry);
2927
2928 #if CONFIG_MACF
2929 GetPropertiesEditorRef ref;
2930 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
2931 {
2932 editor = &GetPropertiesEditor;
2933 editRef = &ref;
2934 ref.cred = kauth_cred_get();
2935 ref.entry = entry;
2936 ref.root = 0;
2937 }
2938 #endif
2939
2940 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
2941 if (!s) return (kIOReturnNoMemory);
2942
2943 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
2944
2945 if (kIOReturnSuccess == err)
2946 {
2947 len = s->getLength();
2948 *propertiesCnt = len;
2949 err = copyoutkdata(s->text(), len, properties);
2950 }
2951 s->release();
2952
2953 return (err);
2954 }
2955
2956 /* Routine io_registry_entry_get_property_bin */
2957 kern_return_t is_io_registry_entry_get_property_bin(
2958 io_object_t registry_entry,
2959 io_name_t plane,
2960 io_name_t property_name,
2961 uint32_t options,
2962 io_buf_ptr_t *properties,
2963 mach_msg_type_number_t *propertiesCnt )
2964 {
2965 kern_return_t err;
2966 vm_size_t len;
2967 OSObject * obj;
2968 const OSSymbol * sym;
2969
2970 CHECK( IORegistryEntry, registry_entry, entry );
2971
2972 #if CONFIG_MACF
2973 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2974 return kIOReturnNotPermitted;
2975 #endif
2976
2977 if ((kIORegistryIterateRecursively & options) && plane[0])
2978 {
2979 obj = entry->copyProperty(property_name,
2980 IORegistryEntry::getPlane(plane), options);
2981 }
2982 else
2983 {
2984 obj = entry->copyProperty(property_name);
2985 }
2986
2987 if( !obj)
2988 return( kIOReturnNotFound );
2989
2990 sym = OSSymbol::withCString(property_name);
2991 if (sym)
2992 {
2993 if (gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
2994 sym->release();
2995 }
2996
2997 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
2998 if( !s) {
2999 obj->release();
3000 return( kIOReturnNoMemory );
3001 }
3002
3003 if( obj->serialize( s )) {
3004 len = s->getLength();
3005 *propertiesCnt = len;
3006 err = copyoutkdata( s->text(), len, properties );
3007
3008 } else err = kIOReturnUnsupported;
3009
3010 s->release();
3011 obj->release();
3012
3013 return( err );
3014 }
3015
3016
3017 /* Routine io_registry_entry_set_properties */
3018 kern_return_t is_io_registry_entry_set_properties
3019 (
3020 io_object_t registry_entry,
3021 io_buf_ptr_t properties,
3022 mach_msg_type_number_t propertiesCnt,
3023 kern_return_t * result)
3024 {
3025 OSObject * obj;
3026 kern_return_t err;
3027 IOReturn res;
3028 vm_offset_t data;
3029 vm_map_offset_t map_data;
3030
3031 CHECK( IORegistryEntry, registry_entry, entry );
3032
3033 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3034 return( kIOReturnMessageTooLarge);
3035
3036 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3037 data = CAST_DOWN(vm_offset_t, map_data);
3038
3039 if( KERN_SUCCESS == err) {
3040
3041 // must return success after vm_map_copyout() succeeds
3042 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3043 vm_deallocate( kernel_map, data, propertiesCnt );
3044
3045 if (!obj)
3046 res = kIOReturnBadArgument;
3047 #if CONFIG_MACF
3048 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3049 registry_entry, obj))
3050 {
3051 res = kIOReturnNotPermitted;
3052 }
3053 #endif
3054 else
3055 {
3056 res = entry->setProperties( obj );
3057 }
3058
3059 if (obj)
3060 obj->release();
3061 } else
3062 res = err;
3063
3064 *result = res;
3065 return( err );
3066 }
3067
3068 /* Routine io_registry_entry_get_child_iterator */
3069 kern_return_t is_io_registry_entry_get_child_iterator(
3070 io_object_t registry_entry,
3071 io_name_t plane,
3072 io_object_t *iterator )
3073 {
3074 CHECK( IORegistryEntry, registry_entry, entry );
3075
3076 *iterator = entry->getChildIterator(
3077 IORegistryEntry::getPlane( plane ));
3078
3079 return( kIOReturnSuccess );
3080 }
3081
3082 /* Routine io_registry_entry_get_parent_iterator */
3083 kern_return_t is_io_registry_entry_get_parent_iterator(
3084 io_object_t registry_entry,
3085 io_name_t plane,
3086 io_object_t *iterator)
3087 {
3088 CHECK( IORegistryEntry, registry_entry, entry );
3089
3090 *iterator = entry->getParentIterator(
3091 IORegistryEntry::getPlane( plane ));
3092
3093 return( kIOReturnSuccess );
3094 }
3095
3096 /* Routine io_service_get_busy_state */
3097 kern_return_t is_io_service_get_busy_state(
3098 io_object_t _service,
3099 uint32_t *busyState )
3100 {
3101 CHECK( IOService, _service, service );
3102
3103 *busyState = service->getBusyState();
3104
3105 return( kIOReturnSuccess );
3106 }
3107
3108 /* Routine io_service_get_state */
3109 kern_return_t is_io_service_get_state(
3110 io_object_t _service,
3111 uint64_t *state,
3112 uint32_t *busy_state,
3113 uint64_t *accumulated_busy_time )
3114 {
3115 CHECK( IOService, _service, service );
3116
3117 *state = service->getState();
3118 *busy_state = service->getBusyState();
3119 *accumulated_busy_time = service->getAccumulatedBusyTime();
3120
3121 return( kIOReturnSuccess );
3122 }
3123
3124 /* Routine io_service_wait_quiet */
3125 kern_return_t is_io_service_wait_quiet(
3126 io_object_t _service,
3127 mach_timespec_t wait_time )
3128 {
3129 uint64_t timeoutNS;
3130
3131 CHECK( IOService, _service, service );
3132
3133 timeoutNS = wait_time.tv_sec;
3134 timeoutNS *= kSecondScale;
3135 timeoutNS += wait_time.tv_nsec;
3136
3137 return( service->waitQuiet(timeoutNS) );
3138 }
3139
3140 /* Routine io_service_request_probe */
3141 kern_return_t is_io_service_request_probe(
3142 io_object_t _service,
3143 uint32_t options )
3144 {
3145 CHECK( IOService, _service, service );
3146
3147 return( service->requestProbe( options ));
3148 }
3149
3150 /* Routine io_service_get_authorization_id */
3151 kern_return_t is_io_service_get_authorization_id(
3152 io_object_t _service,
3153 uint64_t *authorization_id )
3154 {
3155 kern_return_t kr;
3156
3157 CHECK( IOService, _service, service );
3158
3159 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3160 kIOClientPrivilegeAdministrator );
3161 if( kIOReturnSuccess != kr)
3162 return( kr );
3163
3164 *authorization_id = service->getAuthorizationID();
3165
3166 return( kr );
3167 }
3168
3169 /* Routine io_service_set_authorization_id */
3170 kern_return_t is_io_service_set_authorization_id(
3171 io_object_t _service,
3172 uint64_t authorization_id )
3173 {
3174 CHECK( IOService, _service, service );
3175
3176 return( service->setAuthorizationID( authorization_id ) );
3177 }
3178
3179 /* Routine io_service_open_ndr */
3180 kern_return_t is_io_service_open_extended(
3181 io_object_t _service,
3182 task_t owningTask,
3183 uint32_t connect_type,
3184 NDR_record_t ndr,
3185 io_buf_ptr_t properties,
3186 mach_msg_type_number_t propertiesCnt,
3187 kern_return_t * result,
3188 io_object_t *connection )
3189 {
3190 IOUserClient * client = 0;
3191 kern_return_t err = KERN_SUCCESS;
3192 IOReturn res = kIOReturnSuccess;
3193 OSDictionary * propertiesDict = 0;
3194 bool crossEndian;
3195 bool disallowAccess;
3196
3197 CHECK( IOService, _service, service );
3198
3199 if (!owningTask) return (kIOReturnBadArgument);
3200
3201 do
3202 {
3203 if (properties)
3204 {
3205 OSObject * obj;
3206 vm_offset_t data;
3207 vm_map_offset_t map_data;
3208
3209 if( propertiesCnt > sizeof(io_struct_inband_t))
3210 return( kIOReturnMessageTooLarge);
3211
3212 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3213 res = err;
3214 data = CAST_DOWN(vm_offset_t, map_data);
3215 if (KERN_SUCCESS == err)
3216 {
3217 // must return success after vm_map_copyout() succeeds
3218 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3219 vm_deallocate( kernel_map, data, propertiesCnt );
3220 propertiesDict = OSDynamicCast(OSDictionary, obj);
3221 if (!propertiesDict)
3222 {
3223 res = kIOReturnBadArgument;
3224 if (obj)
3225 obj->release();
3226 }
3227 }
3228 if (kIOReturnSuccess != res)
3229 break;
3230 }
3231
3232 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3233 if (crossEndian)
3234 {
3235 if (!propertiesDict)
3236 propertiesDict = OSDictionary::withCapacity(4);
3237 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3238 if (data)
3239 {
3240 if (propertiesDict)
3241 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3242 data->release();
3243 }
3244 }
3245
3246 res = service->newUserClient( owningTask, (void *) owningTask,
3247 connect_type, propertiesDict, &client );
3248
3249 if (propertiesDict)
3250 propertiesDict->release();
3251
3252 if (res == kIOReturnSuccess)
3253 {
3254 assert( OSDynamicCast(IOUserClient, client) );
3255
3256 disallowAccess = (crossEndian
3257 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3258 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3259 if (disallowAccess) res = kIOReturnUnsupported;
3260 #if CONFIG_MACF
3261 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3262 res = kIOReturnNotPermitted;
3263 #endif
3264 if (kIOReturnSuccess != res)
3265 {
3266 IOStatisticsClientCall();
3267 client->clientClose();
3268 client->release();
3269 client = 0;
3270 break;
3271 }
3272 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3273 client->closed = false;
3274 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3275 if (creatorName)
3276 {
3277 client->setProperty(kIOUserClientCreatorKey, creatorName);
3278 creatorName->release();
3279 }
3280 client->setTerminateDefer(service, false);
3281 }
3282 }
3283 while (false);
3284
3285 *connection = client;
3286 *result = res;
3287
3288 return (err);
3289 }
3290
3291 /* Routine io_service_close */
3292 kern_return_t is_io_service_close(
3293 io_object_t connection )
3294 {
3295 OSSet * mappings;
3296 if ((mappings = OSDynamicCast(OSSet, connection)))
3297 return( kIOReturnSuccess );
3298
3299 CHECK( IOUserClient, connection, client );
3300
3301 IOStatisticsClientCall();
3302
3303 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3304 {
3305 client->clientClose();
3306 }
3307 else
3308 {
3309 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3310 client->getRegistryEntryID(), client->getName());
3311 }
3312
3313 return( kIOReturnSuccess );
3314 }
3315
3316 /* Routine io_connect_get_service */
3317 kern_return_t is_io_connect_get_service(
3318 io_object_t connection,
3319 io_object_t *service )
3320 {
3321 IOService * theService;
3322
3323 CHECK( IOUserClient, connection, client );
3324
3325 theService = client->getService();
3326 if( theService)
3327 theService->retain();
3328
3329 *service = theService;
3330
3331 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3332 }
3333
3334 /* Routine io_connect_set_notification_port */
3335 kern_return_t is_io_connect_set_notification_port(
3336 io_object_t connection,
3337 uint32_t notification_type,
3338 mach_port_t port,
3339 uint32_t reference)
3340 {
3341 CHECK( IOUserClient, connection, client );
3342
3343 IOStatisticsClientCall();
3344 return( client->registerNotificationPort( port, notification_type,
3345 (io_user_reference_t) reference ));
3346 }
3347
3348 /* Routine io_connect_set_notification_port */
3349 kern_return_t is_io_connect_set_notification_port_64(
3350 io_object_t connection,
3351 uint32_t notification_type,
3352 mach_port_t port,
3353 io_user_reference_t reference)
3354 {
3355 CHECK( IOUserClient, connection, client );
3356
3357 IOStatisticsClientCall();
3358 return( client->registerNotificationPort( port, notification_type,
3359 reference ));
3360 }
3361
3362 /* Routine io_connect_map_memory_into_task */
3363 kern_return_t is_io_connect_map_memory_into_task
3364 (
3365 io_connect_t connection,
3366 uint32_t memory_type,
3367 task_t into_task,
3368 mach_vm_address_t *address,
3369 mach_vm_size_t *size,
3370 uint32_t flags
3371 )
3372 {
3373 IOReturn err;
3374 IOMemoryMap * map;
3375
3376 CHECK( IOUserClient, connection, client );
3377
3378 if (!into_task) return (kIOReturnBadArgument);
3379
3380 IOStatisticsClientCall();
3381 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3382
3383 if( map) {
3384 *address = map->getAddress();
3385 if( size)
3386 *size = map->getSize();
3387
3388 if( client->sharedInstance
3389 || (into_task != current_task())) {
3390 // push a name out to the task owning the map,
3391 // so we can clean up maps
3392 mach_port_name_t name __unused =
3393 IOMachPort::makeSendRightForTask(
3394 into_task, map, IKOT_IOKIT_OBJECT );
3395
3396 } else {
3397 // keep it with the user client
3398 IOLockLock( gIOObjectPortLock);
3399 if( 0 == client->mappings)
3400 client->mappings = OSSet::withCapacity(2);
3401 if( client->mappings)
3402 client->mappings->setObject( map);
3403 IOLockUnlock( gIOObjectPortLock);
3404 map->release();
3405 }
3406 err = kIOReturnSuccess;
3407
3408 } else
3409 err = kIOReturnBadArgument;
3410
3411 return( err );
3412 }
3413
3414 /* Routine is_io_connect_map_memory */
3415 kern_return_t is_io_connect_map_memory(
3416 io_object_t connect,
3417 uint32_t type,
3418 task_t task,
3419 uint32_t * mapAddr,
3420 uint32_t * mapSize,
3421 uint32_t flags )
3422 {
3423 IOReturn err;
3424 mach_vm_address_t address;
3425 mach_vm_size_t size;
3426
3427 address = SCALAR64(*mapAddr);
3428 size = SCALAR64(*mapSize);
3429
3430 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3431
3432 *mapAddr = SCALAR32(address);
3433 *mapSize = SCALAR32(size);
3434
3435 return (err);
3436 }
3437
3438 } /* extern "C" */
3439
3440 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3441 {
3442 OSIterator * iter;
3443 IOMemoryMap * map = 0;
3444
3445 IOLockLock(gIOObjectPortLock);
3446
3447 iter = OSCollectionIterator::withCollection(mappings);
3448 if(iter)
3449 {
3450 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3451 {
3452 if(mem == map->getMemoryDescriptor())
3453 {
3454 map->retain();
3455 mappings->removeObject(map);
3456 break;
3457 }
3458 }
3459 iter->release();
3460 }
3461
3462 IOLockUnlock(gIOObjectPortLock);
3463
3464 return (map);
3465 }
3466
3467 extern "C" {
3468
3469 /* Routine io_connect_unmap_memory_from_task */
3470 kern_return_t is_io_connect_unmap_memory_from_task
3471 (
3472 io_connect_t connection,
3473 uint32_t memory_type,
3474 task_t from_task,
3475 mach_vm_address_t address)
3476 {
3477 IOReturn err;
3478 IOOptionBits options = 0;
3479 IOMemoryDescriptor * memory;
3480 IOMemoryMap * map;
3481
3482 CHECK( IOUserClient, connection, client );
3483
3484 if (!from_task) return (kIOReturnBadArgument);
3485
3486 IOStatisticsClientCall();
3487 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3488
3489 if( memory && (kIOReturnSuccess == err)) {
3490
3491 options = (options & ~kIOMapUserOptionsMask)
3492 | kIOMapAnywhere | kIOMapReference;
3493
3494 map = memory->createMappingInTask( from_task, address, options );
3495 memory->release();
3496 if( map)
3497 {
3498 IOLockLock( gIOObjectPortLock);
3499 if( client->mappings)
3500 client->mappings->removeObject( map);
3501 IOLockUnlock( gIOObjectPortLock);
3502
3503 mach_port_name_t name = 0;
3504 if (from_task != current_task())
3505 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3506 if (name)
3507 {
3508 map->userClientUnmap();
3509 err = iokit_mod_send_right( from_task, name, -2 );
3510 err = kIOReturnSuccess;
3511 }
3512 else
3513 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3514 if (from_task == current_task())
3515 map->release();
3516 }
3517 else
3518 err = kIOReturnBadArgument;
3519 }
3520
3521 return( err );
3522 }
3523
3524 kern_return_t is_io_connect_unmap_memory(
3525 io_object_t connect,
3526 uint32_t type,
3527 task_t task,
3528 uint32_t mapAddr )
3529 {
3530 IOReturn err;
3531 mach_vm_address_t address;
3532
3533 address = SCALAR64(mapAddr);
3534
3535 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3536
3537 return (err);
3538 }
3539
3540
3541 /* Routine io_connect_add_client */
3542 kern_return_t is_io_connect_add_client(
3543 io_object_t connection,
3544 io_object_t connect_to)
3545 {
3546 CHECK( IOUserClient, connection, client );
3547 CHECK( IOUserClient, connect_to, to );
3548
3549 IOStatisticsClientCall();
3550 return( client->connectClient( to ) );
3551 }
3552
3553
3554 /* Routine io_connect_set_properties */
3555 kern_return_t is_io_connect_set_properties(
3556 io_object_t connection,
3557 io_buf_ptr_t properties,
3558 mach_msg_type_number_t propertiesCnt,
3559 kern_return_t * result)
3560 {
3561 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3562 }
3563
3564 /* Routine io_user_client_method */
3565 kern_return_t is_io_connect_method_var_output
3566 (
3567 io_connect_t connection,
3568 uint32_t selector,
3569 io_scalar_inband64_t scalar_input,
3570 mach_msg_type_number_t scalar_inputCnt,
3571 io_struct_inband_t inband_input,
3572 mach_msg_type_number_t inband_inputCnt,
3573 mach_vm_address_t ool_input,
3574 mach_vm_size_t ool_input_size,
3575 io_struct_inband_t inband_output,
3576 mach_msg_type_number_t *inband_outputCnt,
3577 io_scalar_inband64_t scalar_output,
3578 mach_msg_type_number_t *scalar_outputCnt,
3579 io_buf_ptr_t *var_output,
3580 mach_msg_type_number_t *var_outputCnt
3581 )
3582 {
3583 CHECK( IOUserClient, connection, client );
3584
3585 IOExternalMethodArguments args;
3586 IOReturn ret;
3587 IOMemoryDescriptor * inputMD = 0;
3588 OSObject * structureVariableOutputData = 0;
3589
3590 bzero(&args.__reserved[0], sizeof(args.__reserved));
3591 args.version = kIOExternalMethodArgumentsCurrentVersion;
3592
3593 args.selector = selector;
3594
3595 args.asyncWakePort = MACH_PORT_NULL;
3596 args.asyncReference = 0;
3597 args.asyncReferenceCount = 0;
3598 args.structureVariableOutputData = &structureVariableOutputData;
3599
3600 args.scalarInput = scalar_input;
3601 args.scalarInputCount = scalar_inputCnt;
3602 args.structureInput = inband_input;
3603 args.structureInputSize = inband_inputCnt;
3604
3605 if (ool_input)
3606 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3607 kIODirectionOut, current_task());
3608
3609 args.structureInputDescriptor = inputMD;
3610
3611 args.scalarOutput = scalar_output;
3612 args.scalarOutputCount = *scalar_outputCnt;
3613 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3614 args.structureOutput = inband_output;
3615 args.structureOutputSize = *inband_outputCnt;
3616 args.structureOutputDescriptor = NULL;
3617 args.structureOutputDescriptorSize = 0;
3618
3619 IOStatisticsClientCall();
3620 ret = client->externalMethod( selector, &args );
3621
3622 *scalar_outputCnt = args.scalarOutputCount;
3623 *inband_outputCnt = args.structureOutputSize;
3624
3625 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3626 {
3627 OSSerialize * serialize;
3628 OSData * data;
3629 vm_size_t len;
3630
3631 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3632 {
3633 len = serialize->getLength();
3634 *var_outputCnt = len;
3635 ret = copyoutkdata(serialize->text(), len, var_output);
3636 }
3637 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3638 {
3639 len = data->getLength();
3640 *var_outputCnt = len;
3641 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3642 }
3643 else
3644 {
3645 ret = kIOReturnUnderrun;
3646 }
3647 }
3648
3649 if (inputMD)
3650 inputMD->release();
3651 if (structureVariableOutputData)
3652 structureVariableOutputData->release();
3653
3654 return (ret);
3655 }
3656
3657 /* Routine io_user_client_method */
3658 kern_return_t is_io_connect_method
3659 (
3660 io_connect_t connection,
3661 uint32_t selector,
3662 io_scalar_inband64_t scalar_input,
3663 mach_msg_type_number_t scalar_inputCnt,
3664 io_struct_inband_t inband_input,
3665 mach_msg_type_number_t inband_inputCnt,
3666 mach_vm_address_t ool_input,
3667 mach_vm_size_t ool_input_size,
3668 io_struct_inband_t inband_output,
3669 mach_msg_type_number_t *inband_outputCnt,
3670 io_scalar_inband64_t scalar_output,
3671 mach_msg_type_number_t *scalar_outputCnt,
3672 mach_vm_address_t ool_output,
3673 mach_vm_size_t *ool_output_size
3674 )
3675 {
3676 CHECK( IOUserClient, connection, client );
3677
3678 IOExternalMethodArguments args;
3679 IOReturn ret;
3680 IOMemoryDescriptor * inputMD = 0;
3681 IOMemoryDescriptor * outputMD = 0;
3682
3683 bzero(&args.__reserved[0], sizeof(args.__reserved));
3684 args.version = kIOExternalMethodArgumentsCurrentVersion;
3685
3686 args.selector = selector;
3687
3688 args.asyncWakePort = MACH_PORT_NULL;
3689 args.asyncReference = 0;
3690 args.asyncReferenceCount = 0;
3691 args.structureVariableOutputData = 0;
3692
3693 args.scalarInput = scalar_input;
3694 args.scalarInputCount = scalar_inputCnt;
3695 args.structureInput = inband_input;
3696 args.structureInputSize = inband_inputCnt;
3697
3698 if (ool_input)
3699 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3700 kIODirectionOut, current_task());
3701
3702 args.structureInputDescriptor = inputMD;
3703
3704 args.scalarOutput = scalar_output;
3705 args.scalarOutputCount = *scalar_outputCnt;
3706 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3707 args.structureOutput = inband_output;
3708 args.structureOutputSize = *inband_outputCnt;
3709
3710 if (ool_output && ool_output_size)
3711 {
3712 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3713 kIODirectionIn, current_task());
3714 }
3715
3716 args.structureOutputDescriptor = outputMD;
3717 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3718
3719 IOStatisticsClientCall();
3720 ret = client->externalMethod( selector, &args );
3721
3722 *scalar_outputCnt = args.scalarOutputCount;
3723 *inband_outputCnt = args.structureOutputSize;
3724 *ool_output_size = args.structureOutputDescriptorSize;
3725
3726 if (inputMD)
3727 inputMD->release();
3728 if (outputMD)
3729 outputMD->release();
3730
3731 return (ret);
3732 }
3733
3734 /* Routine io_async_user_client_method */
3735 kern_return_t is_io_connect_async_method
3736 (
3737 io_connect_t connection,
3738 mach_port_t wake_port,
3739 io_async_ref64_t reference,
3740 mach_msg_type_number_t referenceCnt,
3741 uint32_t selector,
3742 io_scalar_inband64_t scalar_input,
3743 mach_msg_type_number_t scalar_inputCnt,
3744 io_struct_inband_t inband_input,
3745 mach_msg_type_number_t inband_inputCnt,
3746 mach_vm_address_t ool_input,
3747 mach_vm_size_t ool_input_size,
3748 io_struct_inband_t inband_output,
3749 mach_msg_type_number_t *inband_outputCnt,
3750 io_scalar_inband64_t scalar_output,
3751 mach_msg_type_number_t *scalar_outputCnt,
3752 mach_vm_address_t ool_output,
3753 mach_vm_size_t * ool_output_size
3754 )
3755 {
3756 CHECK( IOUserClient, connection, client );
3757
3758 IOExternalMethodArguments args;
3759 IOReturn ret;
3760 IOMemoryDescriptor * inputMD = 0;
3761 IOMemoryDescriptor * outputMD = 0;
3762
3763 bzero(&args.__reserved[0], sizeof(args.__reserved));
3764 args.version = kIOExternalMethodArgumentsCurrentVersion;
3765
3766 reference[0] = (io_user_reference_t) wake_port;
3767 if (vm_map_is_64bit(get_task_map(current_task())))
3768 reference[0] |= kIOUCAsync64Flag;
3769
3770 args.selector = selector;
3771
3772 args.asyncWakePort = wake_port;
3773 args.asyncReference = reference;
3774 args.asyncReferenceCount = referenceCnt;
3775
3776 args.scalarInput = scalar_input;
3777 args.scalarInputCount = scalar_inputCnt;
3778 args.structureInput = inband_input;
3779 args.structureInputSize = inband_inputCnt;
3780
3781 if (ool_input)
3782 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3783 kIODirectionOut, current_task());
3784
3785 args.structureInputDescriptor = inputMD;
3786
3787 args.scalarOutput = scalar_output;
3788 args.scalarOutputCount = *scalar_outputCnt;
3789 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3790 args.structureOutput = inband_output;
3791 args.structureOutputSize = *inband_outputCnt;
3792
3793 if (ool_output)
3794 {
3795 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3796 kIODirectionIn, current_task());
3797 }
3798
3799 args.structureOutputDescriptor = outputMD;
3800 args.structureOutputDescriptorSize = *ool_output_size;
3801
3802 IOStatisticsClientCall();
3803 ret = client->externalMethod( selector, &args );
3804
3805 *inband_outputCnt = args.structureOutputSize;
3806 *ool_output_size = args.structureOutputDescriptorSize;
3807
3808 if (inputMD)
3809 inputMD->release();
3810 if (outputMD)
3811 outputMD->release();
3812
3813 return (ret);
3814 }
3815
3816 /* Routine io_connect_method_scalarI_scalarO */
3817 kern_return_t is_io_connect_method_scalarI_scalarO(
3818 io_object_t connect,
3819 uint32_t index,
3820 io_scalar_inband_t input,
3821 mach_msg_type_number_t inputCount,
3822 io_scalar_inband_t output,
3823 mach_msg_type_number_t * outputCount )
3824 {
3825 IOReturn err;
3826 uint32_t i;
3827 io_scalar_inband64_t _input;
3828 io_scalar_inband64_t _output;
3829
3830 mach_msg_type_number_t struct_outputCnt = 0;
3831 mach_vm_size_t ool_output_size = 0;
3832
3833 bzero(&_output[0], sizeof(_output));
3834 for (i = 0; i < inputCount; i++)
3835 _input[i] = SCALAR64(input[i]);
3836
3837 err = is_io_connect_method(connect, index,
3838 _input, inputCount,
3839 NULL, 0,
3840 0, 0,
3841 NULL, &struct_outputCnt,
3842 _output, outputCount,
3843 0, &ool_output_size);
3844
3845 for (i = 0; i < *outputCount; i++)
3846 output[i] = SCALAR32(_output[i]);
3847
3848 return (err);
3849 }
3850
3851 kern_return_t shim_io_connect_method_scalarI_scalarO(
3852 IOExternalMethod * method,
3853 IOService * object,
3854 const io_user_scalar_t * input,
3855 mach_msg_type_number_t inputCount,
3856 io_user_scalar_t * output,
3857 mach_msg_type_number_t * outputCount )
3858 {
3859 IOMethod func;
3860 io_scalar_inband_t _output;
3861 IOReturn err;
3862 err = kIOReturnBadArgument;
3863
3864 bzero(&_output[0], sizeof(_output));
3865 do {
3866
3867 if( inputCount != method->count0)
3868 {
3869 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3870 continue;
3871 }
3872 if( *outputCount != method->count1)
3873 {
3874 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3875 continue;
3876 }
3877
3878 func = method->func;
3879
3880 switch( inputCount) {
3881
3882 case 6:
3883 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3884 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3885 break;
3886 case 5:
3887 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3888 ARG32(input[3]), ARG32(input[4]),
3889 &_output[0] );
3890 break;
3891 case 4:
3892 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3893 ARG32(input[3]),
3894 &_output[0], &_output[1] );
3895 break;
3896 case 3:
3897 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3898 &_output[0], &_output[1], &_output[2] );
3899 break;
3900 case 2:
3901 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3902 &_output[0], &_output[1], &_output[2],
3903 &_output[3] );
3904 break;
3905 case 1:
3906 err = (object->*func)( ARG32(input[0]),
3907 &_output[0], &_output[1], &_output[2],
3908 &_output[3], &_output[4] );
3909 break;
3910 case 0:
3911 err = (object->*func)( &_output[0], &_output[1], &_output[2],
3912 &_output[3], &_output[4], &_output[5] );
3913 break;
3914
3915 default:
3916 IOLog("%s: Bad method table\n", object->getName());
3917 }
3918 }
3919 while( false);
3920
3921 uint32_t i;
3922 for (i = 0; i < *outputCount; i++)
3923 output[i] = SCALAR32(_output[i]);
3924
3925 return( err);
3926 }
3927
3928 /* Routine io_async_method_scalarI_scalarO */
3929 kern_return_t is_io_async_method_scalarI_scalarO(
3930 io_object_t connect,
3931 mach_port_t wake_port,
3932 io_async_ref_t reference,
3933 mach_msg_type_number_t referenceCnt,
3934 uint32_t index,
3935 io_scalar_inband_t input,
3936 mach_msg_type_number_t inputCount,
3937 io_scalar_inband_t output,
3938 mach_msg_type_number_t * outputCount )
3939 {
3940 IOReturn err;
3941 uint32_t i;
3942 io_scalar_inband64_t _input;
3943 io_scalar_inband64_t _output;
3944 io_async_ref64_t _reference;
3945
3946 bzero(&_output[0], sizeof(_output));
3947 for (i = 0; i < referenceCnt; i++)
3948 _reference[i] = REF64(reference[i]);
3949
3950 mach_msg_type_number_t struct_outputCnt = 0;
3951 mach_vm_size_t ool_output_size = 0;
3952
3953 for (i = 0; i < inputCount; i++)
3954 _input[i] = SCALAR64(input[i]);
3955
3956 err = is_io_connect_async_method(connect,
3957 wake_port, _reference, referenceCnt,
3958 index,
3959 _input, inputCount,
3960 NULL, 0,
3961 0, 0,
3962 NULL, &struct_outputCnt,
3963 _output, outputCount,
3964 0, &ool_output_size);
3965
3966 for (i = 0; i < *outputCount; i++)
3967 output[i] = SCALAR32(_output[i]);
3968
3969 return (err);
3970 }
3971 /* Routine io_async_method_scalarI_structureO */
3972 kern_return_t is_io_async_method_scalarI_structureO(
3973 io_object_t connect,
3974 mach_port_t wake_port,
3975 io_async_ref_t reference,
3976 mach_msg_type_number_t referenceCnt,
3977 uint32_t index,
3978 io_scalar_inband_t input,
3979 mach_msg_type_number_t inputCount,
3980 io_struct_inband_t output,
3981 mach_msg_type_number_t * outputCount )
3982 {
3983 uint32_t i;
3984 io_scalar_inband64_t _input;
3985 io_async_ref64_t _reference;
3986
3987 for (i = 0; i < referenceCnt; i++)
3988 _reference[i] = REF64(reference[i]);
3989
3990 mach_msg_type_number_t scalar_outputCnt = 0;
3991 mach_vm_size_t ool_output_size = 0;
3992
3993 for (i = 0; i < inputCount; i++)
3994 _input[i] = SCALAR64(input[i]);
3995
3996 return (is_io_connect_async_method(connect,
3997 wake_port, _reference, referenceCnt,
3998 index,
3999 _input, inputCount,
4000 NULL, 0,
4001 0, 0,
4002 output, outputCount,
4003 NULL, &scalar_outputCnt,
4004 0, &ool_output_size));
4005 }
4006
4007 /* Routine io_async_method_scalarI_structureI */
4008 kern_return_t is_io_async_method_scalarI_structureI(
4009 io_connect_t connect,
4010 mach_port_t wake_port,
4011 io_async_ref_t reference,
4012 mach_msg_type_number_t referenceCnt,
4013 uint32_t index,
4014 io_scalar_inband_t input,
4015 mach_msg_type_number_t inputCount,
4016 io_struct_inband_t inputStruct,
4017 mach_msg_type_number_t inputStructCount )
4018 {
4019 uint32_t i;
4020 io_scalar_inband64_t _input;
4021 io_async_ref64_t _reference;
4022
4023 for (i = 0; i < referenceCnt; i++)
4024 _reference[i] = REF64(reference[i]);
4025
4026 mach_msg_type_number_t scalar_outputCnt = 0;
4027 mach_msg_type_number_t inband_outputCnt = 0;
4028 mach_vm_size_t ool_output_size = 0;
4029
4030 for (i = 0; i < inputCount; i++)
4031 _input[i] = SCALAR64(input[i]);
4032
4033 return (is_io_connect_async_method(connect,
4034 wake_port, _reference, referenceCnt,
4035 index,
4036 _input, inputCount,
4037 inputStruct, inputStructCount,
4038 0, 0,
4039 NULL, &inband_outputCnt,
4040 NULL, &scalar_outputCnt,
4041 0, &ool_output_size));
4042 }
4043
4044 /* Routine io_async_method_structureI_structureO */
4045 kern_return_t is_io_async_method_structureI_structureO(
4046 io_object_t connect,
4047 mach_port_t wake_port,
4048 io_async_ref_t reference,
4049 mach_msg_type_number_t referenceCnt,
4050 uint32_t index,
4051 io_struct_inband_t input,
4052 mach_msg_type_number_t inputCount,
4053 io_struct_inband_t output,
4054 mach_msg_type_number_t * outputCount )
4055 {
4056 uint32_t i;
4057 mach_msg_type_number_t scalar_outputCnt = 0;
4058 mach_vm_size_t ool_output_size = 0;
4059 io_async_ref64_t _reference;
4060
4061 for (i = 0; i < referenceCnt; i++)
4062 _reference[i] = REF64(reference[i]);
4063
4064 return (is_io_connect_async_method(connect,
4065 wake_port, _reference, referenceCnt,
4066 index,
4067 NULL, 0,
4068 input, inputCount,
4069 0, 0,
4070 output, outputCount,
4071 NULL, &scalar_outputCnt,
4072 0, &ool_output_size));
4073 }
4074
4075
4076 kern_return_t shim_io_async_method_scalarI_scalarO(
4077 IOExternalAsyncMethod * method,
4078 IOService * object,
4079 mach_port_t asyncWakePort,
4080 io_user_reference_t * asyncReference,
4081 uint32_t asyncReferenceCount,
4082 const io_user_scalar_t * input,
4083 mach_msg_type_number_t inputCount,
4084 io_user_scalar_t * output,
4085 mach_msg_type_number_t * outputCount )
4086 {
4087 IOAsyncMethod func;
4088 uint32_t i;
4089 io_scalar_inband_t _output;
4090 IOReturn err;
4091 io_async_ref_t reference;
4092
4093 bzero(&_output[0], sizeof(_output));
4094 for (i = 0; i < asyncReferenceCount; i++)
4095 reference[i] = REF32(asyncReference[i]);
4096
4097 err = kIOReturnBadArgument;
4098
4099 do {
4100
4101 if( inputCount != method->count0)
4102 {
4103 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4104 continue;
4105 }
4106 if( *outputCount != method->count1)
4107 {
4108 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4109 continue;
4110 }
4111
4112 func = method->func;
4113
4114 switch( inputCount) {
4115
4116 case 6:
4117 err = (object->*func)( reference,
4118 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4119 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4120 break;
4121 case 5:
4122 err = (object->*func)( reference,
4123 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4124 ARG32(input[3]), ARG32(input[4]),
4125 &_output[0] );
4126 break;
4127 case 4:
4128 err = (object->*func)( reference,
4129 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4130 ARG32(input[3]),
4131 &_output[0], &_output[1] );
4132 break;
4133 case 3:
4134 err = (object->*func)( reference,
4135 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4136 &_output[0], &_output[1], &_output[2] );
4137 break;
4138 case 2:
4139 err = (object->*func)( reference,
4140 ARG32(input[0]), ARG32(input[1]),
4141 &_output[0], &_output[1], &_output[2],
4142 &_output[3] );
4143 break;
4144 case 1:
4145 err = (object->*func)( reference,
4146 ARG32(input[0]),
4147 &_output[0], &_output[1], &_output[2],
4148 &_output[3], &_output[4] );
4149 break;
4150 case 0:
4151 err = (object->*func)( reference,
4152 &_output[0], &_output[1], &_output[2],
4153 &_output[3], &_output[4], &_output[5] );
4154 break;
4155
4156 default:
4157 IOLog("%s: Bad method table\n", object->getName());
4158 }
4159 }
4160 while( false);
4161
4162 for (i = 0; i < *outputCount; i++)
4163 output[i] = SCALAR32(_output[i]);
4164
4165 return( err);
4166 }
4167
4168
4169 /* Routine io_connect_method_scalarI_structureO */
4170 kern_return_t is_io_connect_method_scalarI_structureO(
4171 io_object_t connect,
4172 uint32_t index,
4173 io_scalar_inband_t input,
4174 mach_msg_type_number_t inputCount,
4175 io_struct_inband_t output,
4176 mach_msg_type_number_t * outputCount )
4177 {
4178 uint32_t i;
4179 io_scalar_inband64_t _input;
4180
4181 mach_msg_type_number_t scalar_outputCnt = 0;
4182 mach_vm_size_t ool_output_size = 0;
4183
4184 for (i = 0; i < inputCount; i++)
4185 _input[i] = SCALAR64(input[i]);
4186
4187 return (is_io_connect_method(connect, index,
4188 _input, inputCount,
4189 NULL, 0,
4190 0, 0,
4191 output, outputCount,
4192 NULL, &scalar_outputCnt,
4193 0, &ool_output_size));
4194 }
4195
4196 kern_return_t shim_io_connect_method_scalarI_structureO(
4197
4198 IOExternalMethod * method,
4199 IOService * object,
4200 const io_user_scalar_t * input,
4201 mach_msg_type_number_t inputCount,
4202 io_struct_inband_t output,
4203 IOByteCount * outputCount )
4204 {
4205 IOMethod func;
4206 IOReturn err;
4207
4208 err = kIOReturnBadArgument;
4209
4210 do {
4211 if( inputCount != method->count0)
4212 {
4213 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4214 continue;
4215 }
4216 if( (kIOUCVariableStructureSize != method->count1)
4217 && (*outputCount != method->count1))
4218 {
4219 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4220 continue;
4221 }
4222
4223 func = method->func;
4224
4225 switch( inputCount) {
4226
4227 case 5:
4228 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4229 ARG32(input[3]), ARG32(input[4]),
4230 output );
4231 break;
4232 case 4:
4233 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4234 ARG32(input[3]),
4235 output, (void *)outputCount );
4236 break;
4237 case 3:
4238 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4239 output, (void *)outputCount, 0 );
4240 break;
4241 case 2:
4242 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4243 output, (void *)outputCount, 0, 0 );
4244 break;
4245 case 1:
4246 err = (object->*func)( ARG32(input[0]),
4247 output, (void *)outputCount, 0, 0, 0 );
4248 break;
4249 case 0:
4250 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4251 break;
4252
4253 default:
4254 IOLog("%s: Bad method table\n", object->getName());
4255 }
4256 }
4257 while( false);
4258
4259 return( err);
4260 }
4261
4262
4263 kern_return_t shim_io_async_method_scalarI_structureO(
4264 IOExternalAsyncMethod * method,
4265 IOService * object,
4266 mach_port_t asyncWakePort,
4267 io_user_reference_t * asyncReference,
4268 uint32_t asyncReferenceCount,
4269 const io_user_scalar_t * input,
4270 mach_msg_type_number_t inputCount,
4271 io_struct_inband_t output,
4272 mach_msg_type_number_t * outputCount )
4273 {
4274 IOAsyncMethod func;
4275 uint32_t i;
4276 IOReturn err;
4277 io_async_ref_t reference;
4278
4279 for (i = 0; i < asyncReferenceCount; i++)
4280 reference[i] = REF32(asyncReference[i]);
4281
4282 err = kIOReturnBadArgument;
4283 do {
4284 if( inputCount != method->count0)
4285 {
4286 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4287 continue;
4288 }
4289 if( (kIOUCVariableStructureSize != method->count1)
4290 && (*outputCount != method->count1))
4291 {
4292 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4293 continue;
4294 }
4295
4296 func = method->func;
4297
4298 switch( inputCount) {
4299
4300 case 5:
4301 err = (object->*func)( reference,
4302 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4303 ARG32(input[3]), ARG32(input[4]),
4304 output );
4305 break;
4306 case 4:
4307 err = (object->*func)( reference,
4308 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4309 ARG32(input[3]),
4310 output, (void *)outputCount );
4311 break;
4312 case 3:
4313 err = (object->*func)( reference,
4314 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4315 output, (void *)outputCount, 0 );
4316 break;
4317 case 2:
4318 err = (object->*func)( reference,
4319 ARG32(input[0]), ARG32(input[1]),
4320 output, (void *)outputCount, 0, 0 );
4321 break;
4322 case 1:
4323 err = (object->*func)( reference,
4324 ARG32(input[0]),
4325 output, (void *)outputCount, 0, 0, 0 );
4326 break;
4327 case 0:
4328 err = (object->*func)( reference,
4329 output, (void *)outputCount, 0, 0, 0, 0 );
4330 break;
4331
4332 default:
4333 IOLog("%s: Bad method table\n", object->getName());
4334 }
4335 }
4336 while( false);
4337
4338 return( err);
4339 }
4340
4341 /* Routine io_connect_method_scalarI_structureI */
4342 kern_return_t is_io_connect_method_scalarI_structureI(
4343 io_connect_t connect,
4344 uint32_t index,
4345 io_scalar_inband_t input,
4346 mach_msg_type_number_t inputCount,
4347 io_struct_inband_t inputStruct,
4348 mach_msg_type_number_t inputStructCount )
4349 {
4350 uint32_t i;
4351 io_scalar_inband64_t _input;
4352
4353 mach_msg_type_number_t scalar_outputCnt = 0;
4354 mach_msg_type_number_t inband_outputCnt = 0;
4355 mach_vm_size_t ool_output_size = 0;
4356
4357 for (i = 0; i < inputCount; i++)
4358 _input[i] = SCALAR64(input[i]);
4359
4360 return (is_io_connect_method(connect, index,
4361 _input, inputCount,
4362 inputStruct, inputStructCount,
4363 0, 0,
4364 NULL, &inband_outputCnt,
4365 NULL, &scalar_outputCnt,
4366 0, &ool_output_size));
4367 }
4368
4369 kern_return_t shim_io_connect_method_scalarI_structureI(
4370 IOExternalMethod * method,
4371 IOService * object,
4372 const io_user_scalar_t * input,
4373 mach_msg_type_number_t inputCount,
4374 io_struct_inband_t inputStruct,
4375 mach_msg_type_number_t inputStructCount )
4376 {
4377 IOMethod func;
4378 IOReturn err = kIOReturnBadArgument;
4379
4380 do
4381 {
4382 if (inputCount != method->count0)
4383 {
4384 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4385 continue;
4386 }
4387 if( (kIOUCVariableStructureSize != method->count1)
4388 && (inputStructCount != method->count1))
4389 {
4390 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4391 continue;
4392 }
4393
4394 func = method->func;
4395
4396 switch( inputCount) {
4397
4398 case 5:
4399 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4400 ARG32(input[3]), ARG32(input[4]),
4401 inputStruct );
4402 break;
4403 case 4:
4404 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4405 ARG32(input[3]),
4406 inputStruct, (void *)(uintptr_t)inputStructCount );
4407 break;
4408 case 3:
4409 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4410 inputStruct, (void *)(uintptr_t)inputStructCount,
4411 0 );
4412 break;
4413 case 2:
4414 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4415 inputStruct, (void *)(uintptr_t)inputStructCount,
4416 0, 0 );
4417 break;
4418 case 1:
4419 err = (object->*func)( ARG32(input[0]),
4420 inputStruct, (void *)(uintptr_t)inputStructCount,
4421 0, 0, 0 );
4422 break;
4423 case 0:
4424 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4425 0, 0, 0, 0 );
4426 break;
4427
4428 default:
4429 IOLog("%s: Bad method table\n", object->getName());
4430 }
4431 }
4432 while (false);
4433
4434 return( err);
4435 }
4436
4437 kern_return_t shim_io_async_method_scalarI_structureI(
4438 IOExternalAsyncMethod * method,
4439 IOService * object,
4440 mach_port_t asyncWakePort,
4441 io_user_reference_t * asyncReference,
4442 uint32_t asyncReferenceCount,
4443 const io_user_scalar_t * input,
4444 mach_msg_type_number_t inputCount,
4445 io_struct_inband_t inputStruct,
4446 mach_msg_type_number_t inputStructCount )
4447 {
4448 IOAsyncMethod func;
4449 uint32_t i;
4450 IOReturn err = kIOReturnBadArgument;
4451 io_async_ref_t reference;
4452
4453 for (i = 0; i < asyncReferenceCount; i++)
4454 reference[i] = REF32(asyncReference[i]);
4455
4456 do
4457 {
4458 if (inputCount != method->count0)
4459 {
4460 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4461 continue;
4462 }
4463 if( (kIOUCVariableStructureSize != method->count1)
4464 && (inputStructCount != method->count1))
4465 {
4466 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4467 continue;
4468 }
4469
4470 func = method->func;
4471
4472 switch( inputCount) {
4473
4474 case 5:
4475 err = (object->*func)( reference,
4476 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4477 ARG32(input[3]), ARG32(input[4]),
4478 inputStruct );
4479 break;
4480 case 4:
4481 err = (object->*func)( reference,
4482 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4483 ARG32(input[3]),
4484 inputStruct, (void *)(uintptr_t)inputStructCount );
4485 break;
4486 case 3:
4487 err = (object->*func)( reference,
4488 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4489 inputStruct, (void *)(uintptr_t)inputStructCount,
4490 0 );
4491 break;
4492 case 2:
4493 err = (object->*func)( reference,
4494 ARG32(input[0]), ARG32(input[1]),
4495 inputStruct, (void *)(uintptr_t)inputStructCount,
4496 0, 0 );
4497 break;
4498 case 1:
4499 err = (object->*func)( reference,
4500 ARG32(input[0]),
4501 inputStruct, (void *)(uintptr_t)inputStructCount,
4502 0, 0, 0 );
4503 break;
4504 case 0:
4505 err = (object->*func)( reference,
4506 inputStruct, (void *)(uintptr_t)inputStructCount,
4507 0, 0, 0, 0 );
4508 break;
4509
4510 default:
4511 IOLog("%s: Bad method table\n", object->getName());
4512 }
4513 }
4514 while (false);
4515
4516 return( err);
4517 }
4518
4519 /* Routine io_connect_method_structureI_structureO */
4520 kern_return_t is_io_connect_method_structureI_structureO(
4521 io_object_t connect,
4522 uint32_t index,
4523 io_struct_inband_t input,
4524 mach_msg_type_number_t inputCount,
4525 io_struct_inband_t output,
4526 mach_msg_type_number_t * outputCount )
4527 {
4528 mach_msg_type_number_t scalar_outputCnt = 0;
4529 mach_vm_size_t ool_output_size = 0;
4530
4531 return (is_io_connect_method(connect, index,
4532 NULL, 0,
4533 input, inputCount,
4534 0, 0,
4535 output, outputCount,
4536 NULL, &scalar_outputCnt,
4537 0, &ool_output_size));
4538 }
4539
4540 kern_return_t shim_io_connect_method_structureI_structureO(
4541 IOExternalMethod * method,
4542 IOService * object,
4543 io_struct_inband_t input,
4544 mach_msg_type_number_t inputCount,
4545 io_struct_inband_t output,
4546 IOByteCount * outputCount )
4547 {
4548 IOMethod func;
4549 IOReturn err = kIOReturnBadArgument;
4550
4551 do
4552 {
4553 if( (kIOUCVariableStructureSize != method->count0)
4554 && (inputCount != method->count0))
4555 {
4556 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4557 continue;
4558 }
4559 if( (kIOUCVariableStructureSize != method->count1)
4560 && (*outputCount != method->count1))
4561 {
4562 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4563 continue;
4564 }
4565
4566 func = method->func;
4567
4568 if( method->count1) {
4569 if( method->count0) {
4570 err = (object->*func)( input, output,
4571 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4572 } else {
4573 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4574 }
4575 } else {
4576 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4577 }
4578 }
4579 while( false);
4580
4581
4582 return( err);
4583 }
4584
4585 kern_return_t shim_io_async_method_structureI_structureO(
4586 IOExternalAsyncMethod * method,
4587 IOService * object,
4588 mach_port_t asyncWakePort,
4589 io_user_reference_t * asyncReference,
4590 uint32_t asyncReferenceCount,
4591 io_struct_inband_t input,
4592 mach_msg_type_number_t inputCount,
4593 io_struct_inband_t output,
4594 mach_msg_type_number_t * outputCount )
4595 {
4596 IOAsyncMethod func;
4597 uint32_t i;
4598 IOReturn err;
4599 io_async_ref_t reference;
4600
4601 for (i = 0; i < asyncReferenceCount; i++)
4602 reference[i] = REF32(asyncReference[i]);
4603
4604 err = kIOReturnBadArgument;
4605 do
4606 {
4607 if( (kIOUCVariableStructureSize != method->count0)
4608 && (inputCount != method->count0))
4609 {
4610 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4611 continue;
4612 }
4613 if( (kIOUCVariableStructureSize != method->count1)
4614 && (*outputCount != method->count1))
4615 {
4616 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4617 continue;
4618 }
4619
4620 func = method->func;
4621
4622 if( method->count1) {
4623 if( method->count0) {
4624 err = (object->*func)( reference,
4625 input, output,
4626 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4627 } else {
4628 err = (object->*func)( reference,
4629 output, outputCount, 0, 0, 0, 0 );
4630 }
4631 } else {
4632 err = (object->*func)( reference,
4633 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4634 }
4635 }
4636 while( false);
4637
4638 return( err);
4639 }
4640
4641 /* Routine io_catalog_send_data */
4642 kern_return_t is_io_catalog_send_data(
4643 mach_port_t master_port,
4644 uint32_t flag,
4645 io_buf_ptr_t inData,
4646 mach_msg_type_number_t inDataCount,
4647 kern_return_t * result)
4648 {
4649 OSObject * obj = 0;
4650 vm_offset_t data;
4651 kern_return_t kr = kIOReturnError;
4652
4653 //printf("io_catalog_send_data called. flag: %d\n", flag);
4654
4655 if( master_port != master_device_port)
4656 return kIOReturnNotPrivileged;
4657
4658 if( (flag != kIOCatalogRemoveKernelLinker &&
4659 flag != kIOCatalogKextdActive &&
4660 flag != kIOCatalogKextdFinishedLaunching) &&
4661 ( !inData || !inDataCount) )
4662 {
4663 return kIOReturnBadArgument;
4664 }
4665
4666 if (inData) {
4667 vm_map_offset_t map_data;
4668
4669 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4670 return( kIOReturnMessageTooLarge);
4671
4672 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4673 data = CAST_DOWN(vm_offset_t, map_data);
4674
4675 if( kr != KERN_SUCCESS)
4676 return kr;
4677
4678 // must return success after vm_map_copyout() succeeds
4679
4680 if( inDataCount ) {
4681 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4682 vm_deallocate( kernel_map, data, inDataCount );
4683 if( !obj) {
4684 *result = kIOReturnNoMemory;
4685 return( KERN_SUCCESS);
4686 }
4687 }
4688 }
4689
4690 switch ( flag ) {
4691 case kIOCatalogResetDrivers:
4692 case kIOCatalogResetDriversNoMatch: {
4693 OSArray * array;
4694
4695 array = OSDynamicCast(OSArray, obj);
4696 if (array) {
4697 if ( !gIOCatalogue->resetAndAddDrivers(array,
4698 flag == kIOCatalogResetDrivers) ) {
4699
4700 kr = kIOReturnError;
4701 }
4702 } else {
4703 kr = kIOReturnBadArgument;
4704 }
4705 }
4706 break;
4707
4708 case kIOCatalogAddDrivers:
4709 case kIOCatalogAddDriversNoMatch: {
4710 OSArray * array;
4711
4712 array = OSDynamicCast(OSArray, obj);
4713 if ( array ) {
4714 if ( !gIOCatalogue->addDrivers( array ,
4715 flag == kIOCatalogAddDrivers) ) {
4716 kr = kIOReturnError;
4717 }
4718 }
4719 else {
4720 kr = kIOReturnBadArgument;
4721 }
4722 }
4723 break;
4724
4725 case kIOCatalogRemoveDrivers:
4726 case kIOCatalogRemoveDriversNoMatch: {
4727 OSDictionary * dict;
4728
4729 dict = OSDynamicCast(OSDictionary, obj);
4730 if ( dict ) {
4731 if ( !gIOCatalogue->removeDrivers( dict,
4732 flag == kIOCatalogRemoveDrivers ) ) {
4733 kr = kIOReturnError;
4734 }
4735 }
4736 else {
4737 kr = kIOReturnBadArgument;
4738 }
4739 }
4740 break;
4741
4742 case kIOCatalogStartMatching: {
4743 OSDictionary * dict;
4744
4745 dict = OSDynamicCast(OSDictionary, obj);
4746 if ( dict ) {
4747 if ( !gIOCatalogue->startMatching( dict ) ) {
4748 kr = kIOReturnError;
4749 }
4750 }
4751 else {
4752 kr = kIOReturnBadArgument;
4753 }
4754 }
4755 break;
4756
4757 case kIOCatalogRemoveKernelLinker:
4758 kr = KERN_NOT_SUPPORTED;
4759 break;
4760
4761 case kIOCatalogKextdActive:
4762 #if !NO_KEXTD
4763 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4764 OSKext::setKextdActive();
4765
4766 /* Dump all nonloaded startup extensions; kextd will now send them
4767 * down on request.
4768 */
4769 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4770 #endif
4771 kr = kIOReturnSuccess;
4772 break;
4773
4774 case kIOCatalogKextdFinishedLaunching: {
4775 #if !NO_KEXTD
4776 static bool clearedBusy = false;
4777
4778 if (!clearedBusy) {
4779 IOService * serviceRoot = IOService::getServiceRoot();
4780 if (serviceRoot) {
4781 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4782 serviceRoot->adjustBusy(-1);
4783 clearedBusy = true;
4784 }
4785 }
4786 #endif
4787 kr = kIOReturnSuccess;
4788 }
4789 break;
4790
4791 default:
4792 kr = kIOReturnBadArgument;
4793 break;
4794 }
4795
4796 if (obj) obj->release();
4797
4798 *result = kr;
4799 return( KERN_SUCCESS);
4800 }
4801
4802 /* Routine io_catalog_terminate */
4803 kern_return_t is_io_catalog_terminate(
4804 mach_port_t master_port,
4805 uint32_t flag,
4806 io_name_t name )
4807 {
4808 kern_return_t kr;
4809
4810 if( master_port != master_device_port )
4811 return kIOReturnNotPrivileged;
4812
4813 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4814 kIOClientPrivilegeAdministrator );
4815 if( kIOReturnSuccess != kr)
4816 return( kr );
4817
4818 switch ( flag ) {
4819 #if !defined(SECURE_KERNEL)
4820 case kIOCatalogServiceTerminate:
4821 OSIterator * iter;
4822 IOService * service;
4823
4824 iter = IORegistryIterator::iterateOver(gIOServicePlane,
4825 kIORegistryIterateRecursively);
4826 if ( !iter )
4827 return kIOReturnNoMemory;
4828
4829 do {
4830 iter->reset();
4831 while( (service = (IOService *)iter->getNextObject()) ) {
4832 if( service->metaCast(name)) {
4833 if ( !service->terminate( kIOServiceRequired
4834 | kIOServiceSynchronous) ) {
4835 kr = kIOReturnUnsupported;
4836 break;
4837 }
4838 }
4839 }
4840 } while( !service && !iter->isValid());
4841 iter->release();
4842 break;
4843
4844 case kIOCatalogModuleUnload:
4845 case kIOCatalogModuleTerminate:
4846 kr = gIOCatalogue->terminateDriversForModule(name,
4847 flag == kIOCatalogModuleUnload);
4848 break;
4849 #endif
4850
4851 default:
4852 kr = kIOReturnBadArgument;
4853 break;
4854 }
4855
4856 return( kr );
4857 }
4858
4859 /* Routine io_catalog_get_data */
4860 kern_return_t is_io_catalog_get_data(
4861 mach_port_t master_port,
4862 uint32_t flag,
4863 io_buf_ptr_t *outData,
4864 mach_msg_type_number_t *outDataCount)
4865 {
4866 kern_return_t kr = kIOReturnSuccess;
4867 OSSerialize * s;
4868
4869 if( master_port != master_device_port)
4870 return kIOReturnNotPrivileged;
4871
4872 //printf("io_catalog_get_data called. flag: %d\n", flag);
4873
4874 s = OSSerialize::withCapacity(4096);
4875 if ( !s )
4876 return kIOReturnNoMemory;
4877
4878 kr = gIOCatalogue->serializeData(flag, s);
4879
4880 if ( kr == kIOReturnSuccess ) {
4881 vm_offset_t data;
4882 vm_map_copy_t copy;
4883 vm_size_t size;
4884
4885 size = s->getLength();
4886 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
4887 if ( kr == kIOReturnSuccess ) {
4888 bcopy(s->text(), (void *)data, size);
4889 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
4890 (vm_map_size_t)size, true, &copy);
4891 *outData = (char *)copy;
4892 *outDataCount = size;
4893 }
4894 }
4895
4896 s->release();
4897
4898 return kr;
4899 }
4900
4901 /* Routine io_catalog_get_gen_count */
4902 kern_return_t is_io_catalog_get_gen_count(
4903 mach_port_t master_port,
4904 uint32_t *genCount)
4905 {
4906 if( master_port != master_device_port)
4907 return kIOReturnNotPrivileged;
4908
4909 //printf("io_catalog_get_gen_count called.\n");
4910
4911 if ( !genCount )
4912 return kIOReturnBadArgument;
4913
4914 *genCount = gIOCatalogue->getGenerationCount();
4915
4916 return kIOReturnSuccess;
4917 }
4918
4919 /* Routine io_catalog_module_loaded.
4920 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
4921 */
4922 kern_return_t is_io_catalog_module_loaded(
4923 mach_port_t master_port,
4924 io_name_t name)
4925 {
4926 if( master_port != master_device_port)
4927 return kIOReturnNotPrivileged;
4928
4929 //printf("io_catalog_module_loaded called. name %s\n", name);
4930
4931 if ( !name )
4932 return kIOReturnBadArgument;
4933
4934 gIOCatalogue->moduleHasLoaded(name);
4935
4936 return kIOReturnSuccess;
4937 }
4938
4939 kern_return_t is_io_catalog_reset(
4940 mach_port_t master_port,
4941 uint32_t flag)
4942 {
4943 if( master_port != master_device_port)
4944 return kIOReturnNotPrivileged;
4945
4946 switch ( flag ) {
4947 case kIOCatalogResetDefault:
4948 gIOCatalogue->reset();
4949 break;
4950
4951 default:
4952 return kIOReturnBadArgument;
4953 }
4954
4955 return kIOReturnSuccess;
4956 }
4957
4958 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
4959 {
4960 kern_return_t result = kIOReturnBadArgument;
4961 IOUserClient *userClient;
4962
4963 if ((userClient = OSDynamicCast(IOUserClient,
4964 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4965 IOExternalTrap *trap;
4966 IOService *target = NULL;
4967
4968 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4969
4970 if (trap && target) {
4971 IOTrap func;
4972
4973 func = trap->func;
4974
4975 if (func) {
4976 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4977 }
4978 }
4979
4980 userClient->release();
4981 }
4982
4983 return result;
4984 }
4985
4986 } /* extern "C" */
4987
4988 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4989 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4990 {
4991 IOReturn err;
4992 IOService * object;
4993 IOByteCount structureOutputSize;
4994
4995 if (dispatch)
4996 {
4997 uint32_t count;
4998 count = dispatch->checkScalarInputCount;
4999 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5000 {
5001 return (kIOReturnBadArgument);
5002 }
5003
5004 count = dispatch->checkStructureInputSize;
5005 if ((kIOUCVariableStructureSize != count)
5006 && (count != ((args->structureInputDescriptor)
5007 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5008 {
5009 return (kIOReturnBadArgument);
5010 }
5011
5012 count = dispatch->checkScalarOutputCount;
5013 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5014 {
5015 return (kIOReturnBadArgument);
5016 }
5017
5018 count = dispatch->checkStructureOutputSize;
5019 if ((kIOUCVariableStructureSize != count)
5020 && (count != ((args->structureOutputDescriptor)
5021 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5022 {
5023 return (kIOReturnBadArgument);
5024 }
5025
5026 if (dispatch->function)
5027 err = (*dispatch->function)(target, reference, args);
5028 else
5029 err = kIOReturnNoCompletion; /* implementator can dispatch */
5030
5031 return (err);
5032 }
5033
5034
5035 // pre-Leopard API's don't do ool structs
5036 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5037 {
5038 err = kIOReturnIPCError;
5039 return (err);
5040 }
5041
5042 structureOutputSize = args->structureOutputSize;
5043
5044 if (args->asyncWakePort)
5045 {
5046 IOExternalAsyncMethod * method;
5047 object = 0;
5048 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5049 return (kIOReturnUnsupported);
5050
5051 if (kIOUCForegroundOnly & method->flags)
5052 {
5053 if (task_is_gpu_denied(current_task()))
5054 return (kIOReturnNotPermitted);
5055 }
5056
5057 switch (method->flags & kIOUCTypeMask)
5058 {
5059 case kIOUCScalarIStructI:
5060 err = shim_io_async_method_scalarI_structureI( method, object,
5061 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5062 args->scalarInput, args->scalarInputCount,
5063 (char *)args->structureInput, args->structureInputSize );
5064 break;
5065
5066 case kIOUCScalarIScalarO:
5067 err = shim_io_async_method_scalarI_scalarO( method, object,
5068 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5069 args->scalarInput, args->scalarInputCount,
5070 args->scalarOutput, &args->scalarOutputCount );
5071 break;
5072
5073 case kIOUCScalarIStructO:
5074 err = shim_io_async_method_scalarI_structureO( method, object,
5075 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5076 args->scalarInput, args->scalarInputCount,
5077 (char *) args->structureOutput, &args->structureOutputSize );
5078 break;
5079
5080
5081 case kIOUCStructIStructO:
5082 err = shim_io_async_method_structureI_structureO( method, object,
5083 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5084 (char *)args->structureInput, args->structureInputSize,
5085 (char *) args->structureOutput, &args->structureOutputSize );
5086 break;
5087
5088 default:
5089 err = kIOReturnBadArgument;
5090 break;
5091 }
5092 }
5093 else
5094 {
5095 IOExternalMethod * method;
5096 object = 0;
5097 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5098 return (kIOReturnUnsupported);
5099
5100 if (kIOUCForegroundOnly & method->flags)
5101 {
5102 if (task_is_gpu_denied(current_task()))
5103 return (kIOReturnNotPermitted);
5104 }
5105
5106 switch (method->flags & kIOUCTypeMask)
5107 {
5108 case kIOUCScalarIStructI:
5109 err = shim_io_connect_method_scalarI_structureI( method, object,
5110 args->scalarInput, args->scalarInputCount,
5111 (char *) args->structureInput, args->structureInputSize );
5112 break;
5113
5114 case kIOUCScalarIScalarO:
5115 err = shim_io_connect_method_scalarI_scalarO( method, object,
5116 args->scalarInput, args->scalarInputCount,
5117 args->scalarOutput, &args->scalarOutputCount );
5118 break;
5119
5120 case kIOUCScalarIStructO:
5121 err = shim_io_connect_method_scalarI_structureO( method, object,
5122 args->scalarInput, args->scalarInputCount,
5123 (char *) args->structureOutput, &structureOutputSize );
5124 break;
5125
5126
5127 case kIOUCStructIStructO:
5128 err = shim_io_connect_method_structureI_structureO( method, object,
5129 (char *) args->structureInput, args->structureInputSize,
5130 (char *) args->structureOutput, &structureOutputSize );
5131 break;
5132
5133 default:
5134 err = kIOReturnBadArgument;
5135 break;
5136 }
5137 }
5138
5139 args->structureOutputSize = structureOutputSize;
5140
5141 return (err);
5142 }
5143
5144
5145 #if __LP64__
5146 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5147 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5148 #else
5149 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5150 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5151 #endif
5152 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5153 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5154 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5155 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5156 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5157 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5158 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5159 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5160 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5161 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5162 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5163 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5164 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5165 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5166