]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-2782.1.97.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #if CONFIG_MACF
49
50 extern "C" {
51 #include <security/mac_framework.h>
52 };
53 #include <sys/kauth.h>
54
55 #define IOMACF_LOG 0
56
57 #endif /* CONFIG_MACF */
58
59 #include <IOKit/assert.h>
60
61 #include "IOServicePrivate.h"
62 #include "IOKitKernelInternal.h"
63
64 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
65 #define SCALAR32(x) ((uint32_t )x)
66 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
67 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
68 #define REF32(x) ((int)(x))
69
70 enum
71 {
72 kIOUCAsync0Flags = 3ULL,
73 kIOUCAsync64Flag = 1ULL
74 };
75
76 #if IOKITSTATS
77
78 #define IOStatisticsRegisterCounter() \
79 do { \
80 reserved->counter = IOStatistics::registerUserClient(this); \
81 } while (0)
82
83 #define IOStatisticsUnregisterCounter() \
84 do { \
85 if (reserved) \
86 IOStatistics::unregisterUserClient(reserved->counter); \
87 } while (0)
88
89 #define IOStatisticsClientCall() \
90 do { \
91 IOStatistics::countUserClientCall(client); \
92 } while (0)
93
94 #else
95
96 #define IOStatisticsRegisterCounter()
97 #define IOStatisticsUnregisterCounter()
98 #define IOStatisticsClientCall()
99
100 #endif /* IOKITSTATS */
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 // definitions we should get from osfmk
105
106 //typedef struct ipc_port * ipc_port_t;
107 typedef natural_t ipc_kobject_type_t;
108
109 #define IKOT_IOKIT_SPARE 27
110 #define IKOT_IOKIT_CONNECT 29
111 #define IKOT_IOKIT_OBJECT 30
112
113 extern "C" {
114
115 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
116 ipc_kobject_type_t type );
117
118 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
119
120 extern mach_port_name_t iokit_make_send_right( task_t task,
121 io_object_t obj, ipc_kobject_type_t type );
122
123 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
124
125 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
126
127 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
128
129 extern ipc_port_t master_device_port;
130
131 extern void iokit_retain_port( ipc_port_t port );
132 extern void iokit_release_port( ipc_port_t port );
133 extern void iokit_release_port_send( ipc_port_t port );
134
135 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
136
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139
140 } /* extern "C" */
141
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146
147 class IOMachPort : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort)
150 public:
151 OSObject * object;
152 ipc_port_t port;
153 UInt32 mscount;
154 UInt8 holdDestroy;
155
156 static IOMachPort * portForObject( OSObject * obj,
157 ipc_kobject_type_t type );
158 static bool noMoreSendersForObject( OSObject * obj,
159 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
160 static void releasePortForObject( OSObject * obj,
161 ipc_kobject_type_t type );
162 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
163
164 static OSDictionary * dictForType( ipc_kobject_type_t type );
165
166 static mach_port_name_t makeSendRightForTask( task_t task,
167 io_object_t obj, ipc_kobject_type_t type );
168
169 virtual void free();
170 };
171
172 #define super OSObject
173 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
174
175 static IOLock * gIOObjectPortLock;
176
177 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
178
179 // not in dictForType() for debugging ease
180 static OSDictionary * gIOObjectPorts;
181 static OSDictionary * gIOConnectPorts;
182
183 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
184 {
185 OSDictionary ** dict;
186
187 if( IKOT_IOKIT_OBJECT == type )
188 dict = &gIOObjectPorts;
189 else if( IKOT_IOKIT_CONNECT == type )
190 dict = &gIOConnectPorts;
191 else
192 return( 0 );
193
194 if( 0 == *dict)
195 *dict = OSDictionary::withCapacity( 1 );
196
197 return( *dict );
198 }
199
200 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
201 ipc_kobject_type_t type )
202 {
203 IOMachPort * inst = 0;
204 OSDictionary * dict;
205
206 IOTakeLock( gIOObjectPortLock);
207
208 do {
209
210 dict = dictForType( type );
211 if( !dict)
212 continue;
213
214 if( (inst = (IOMachPort *)
215 dict->getObject( (const OSSymbol *) obj ))) {
216 inst->mscount++;
217 inst->retain();
218 continue;
219 }
220
221 inst = new IOMachPort;
222 if( inst && !inst->init()) {
223 inst = 0;
224 continue;
225 }
226
227 inst->port = iokit_alloc_object_port( obj, type );
228 if( inst->port) {
229 // retains obj
230 dict->setObject( (const OSSymbol *) obj, inst );
231 inst->mscount++;
232
233 } else {
234 inst->release();
235 inst = 0;
236 }
237
238 } while( false );
239
240 IOUnlock( gIOObjectPortLock);
241
242 return( inst );
243 }
244
245 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 OSDictionary * dict;
249 IOMachPort * machPort;
250 bool destroyed = true;
251
252 IOTakeLock( gIOObjectPortLock);
253
254 if( (dict = dictForType( type ))) {
255 obj->retain();
256
257 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
258 if( machPort) {
259 destroyed = (machPort->mscount <= *mscount);
260 if( destroyed)
261 dict->removeObject( (const OSSymbol *) obj );
262 else
263 *mscount = machPort->mscount;
264 }
265 obj->release();
266 }
267
268 IOUnlock( gIOObjectPortLock);
269
270 return( destroyed );
271 }
272
273 void IOMachPort::releasePortForObject( OSObject * obj,
274 ipc_kobject_type_t type )
275 {
276 OSDictionary * dict;
277 IOMachPort * machPort;
278
279 IOTakeLock( gIOObjectPortLock);
280
281 if( (dict = dictForType( type ))) {
282 obj->retain();
283 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
284 if( machPort && !machPort->holdDestroy)
285 dict->removeObject( (const OSSymbol *) obj );
286 obj->release();
287 }
288
289 IOUnlock( gIOObjectPortLock);
290 }
291
292 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
293 {
294 OSDictionary * dict;
295 IOMachPort * machPort;
296
297 IOLockLock( gIOObjectPortLock );
298
299 if( (dict = dictForType( type ))) {
300 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
301 if( machPort)
302 machPort->holdDestroy = true;
303 }
304
305 IOLockUnlock( gIOObjectPortLock );
306 }
307
308 void IOUserClient::destroyUserReferences( OSObject * obj )
309 {
310 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
311
312 // panther, 3160200
313 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
314
315 OSDictionary * dict;
316
317 IOTakeLock( gIOObjectPortLock);
318 obj->retain();
319
320 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
321 {
322 IOMachPort * port;
323 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
324 if (port)
325 {
326 IOUserClient * uc;
327 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
328 {
329 dict->setObject((const OSSymbol *) uc->mappings, port);
330 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
331
332 uc->mappings->release();
333 uc->mappings = 0;
334 }
335 dict->removeObject( (const OSSymbol *) obj );
336 }
337 }
338 obj->release();
339 IOUnlock( gIOObjectPortLock);
340 }
341
342 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
343 io_object_t obj, ipc_kobject_type_t type )
344 {
345 return( iokit_make_send_right( task, obj, type ));
346 }
347
348 void IOMachPort::free( void )
349 {
350 if( port)
351 iokit_destroy_object_port( port );
352 super::free();
353 }
354
355 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
356
357 class IOUserNotification : public OSIterator
358 {
359 OSDeclareDefaultStructors(IOUserNotification)
360
361 IONotifier * holdNotify;
362 IOLock * lock;
363
364 public:
365
366 virtual bool init( void );
367 virtual void free();
368
369 virtual void setNotification( IONotifier * obj );
370
371 virtual void reset();
372 virtual bool isValid();
373 };
374
375 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
376 extern "C" {
377
378 // functions called from osfmk/device/iokit_rpc.c
379
380 void
381 iokit_add_reference( io_object_t obj )
382 {
383 if( obj)
384 obj->retain();
385 }
386
387 void
388 iokit_remove_reference( io_object_t obj )
389 {
390 if( obj)
391 obj->release();
392 }
393
394 ipc_port_t
395 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
396 {
397 IOMachPort * machPort;
398 ipc_port_t port;
399
400 if( (machPort = IOMachPort::portForObject( obj, type ))) {
401
402 port = machPort->port;
403 if( port)
404 iokit_retain_port( port );
405
406 machPort->release();
407
408 } else
409 port = NULL;
410
411 return( port );
412 }
413
414 kern_return_t
415 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
416 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
417 {
418 IOUserClient * client;
419 IOMemoryMap * map;
420 IOUserNotification * notify;
421
422 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
423 return( kIOReturnNotReady );
424
425 if( IKOT_IOKIT_CONNECT == type)
426 {
427 if( (client = OSDynamicCast( IOUserClient, obj ))) {
428 IOStatisticsClientCall();
429 client->clientDied();
430 }
431 }
432 else if( IKOT_IOKIT_OBJECT == type)
433 {
434 if( (map = OSDynamicCast( IOMemoryMap, obj )))
435 map->taskDied();
436 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
437 notify->setNotification( 0 );
438 }
439
440 return( kIOReturnSuccess );
441 }
442
443 }; /* extern "C" */
444
445 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
446
447 class IOServiceUserNotification : public IOUserNotification
448 {
449 OSDeclareDefaultStructors(IOServiceUserNotification)
450
451 struct PingMsg {
452 mach_msg_header_t msgHdr;
453 OSNotificationHeader64 notifyHeader;
454 };
455
456 enum { kMaxOutstanding = 1024 };
457
458 PingMsg * pingMsg;
459 vm_size_t msgSize;
460 OSArray * newSet;
461 OSObject * lastEntry;
462 bool armed;
463
464 public:
465
466 virtual bool init( mach_port_t port, natural_t type,
467 void * reference, vm_size_t referenceSize,
468 bool clientIs64 );
469 virtual void free();
470
471 static bool _handler( void * target,
472 void * ref, IOService * newService, IONotifier * notifier );
473 virtual bool handler( void * ref, IOService * newService );
474
475 virtual OSObject * getNextObject();
476 };
477
478 class IOServiceMessageUserNotification : public IOUserNotification
479 {
480 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
481
482 struct PingMsg {
483 mach_msg_header_t msgHdr;
484 mach_msg_body_t msgBody;
485 mach_msg_port_descriptor_t ports[1];
486 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
487 };
488
489 PingMsg * pingMsg;
490 vm_size_t msgSize;
491 uint8_t clientIs64;
492 int owningPID;
493
494 public:
495
496 virtual bool init( mach_port_t port, natural_t type,
497 void * reference, vm_size_t referenceSize,
498 vm_size_t extraSize,
499 bool clientIs64 );
500
501 virtual void free();
502
503 static IOReturn _handler( void * target, void * ref,
504 UInt32 messageType, IOService * provider,
505 void * messageArgument, vm_size_t argSize );
506 virtual IOReturn handler( void * ref,
507 UInt32 messageType, IOService * provider,
508 void * messageArgument, vm_size_t argSize );
509
510 virtual OSObject * getNextObject();
511 };
512
513 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
514
515 #undef super
516 #define super OSIterator
517 OSDefineMetaClass( IOUserNotification, OSIterator )
518 OSDefineAbstractStructors( IOUserNotification, OSIterator )
519
520 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
521
522 bool IOUserNotification::init( void )
523 {
524 if( !super::init())
525 return( false );
526
527 lock = IOLockAlloc();
528 if( !lock)
529 return( false );
530
531 return( true );
532 }
533
534 void IOUserNotification::free( void )
535 {
536 if( holdNotify)
537 holdNotify->remove();
538 // can't be in handler now
539
540 if( lock)
541 IOLockFree( lock );
542
543 super::free();
544 }
545
546
547 void IOUserNotification::setNotification( IONotifier * notify )
548 {
549 IONotifier * previousNotify;
550
551 IOLockLock( gIOObjectPortLock);
552
553 previousNotify = holdNotify;
554 holdNotify = notify;
555
556 IOLockUnlock( gIOObjectPortLock);
557
558 if( previousNotify)
559 previousNotify->remove();
560 }
561
562 void IOUserNotification::reset()
563 {
564 // ?
565 }
566
567 bool IOUserNotification::isValid()
568 {
569 return( true );
570 }
571
572 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
573
574 #undef super
575 #define super IOUserNotification
576 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
577
578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
579
580 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
581 void * reference, vm_size_t referenceSize,
582 bool clientIs64 )
583 {
584 if( !super::init())
585 return( false );
586
587 newSet = OSArray::withCapacity( 1 );
588 if( !newSet)
589 return( false );
590
591 if (referenceSize > sizeof(OSAsyncReference64))
592 return( false );
593
594 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
595 pingMsg = (PingMsg *) IOMalloc( msgSize);
596 if( !pingMsg)
597 return( false );
598
599 bzero( pingMsg, msgSize);
600
601 pingMsg->msgHdr.msgh_remote_port = port;
602 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
603 MACH_MSG_TYPE_COPY_SEND /*remote*/,
604 MACH_MSG_TYPE_MAKE_SEND /*local*/);
605 pingMsg->msgHdr.msgh_size = msgSize;
606 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
607
608 pingMsg->notifyHeader.size = 0;
609 pingMsg->notifyHeader.type = type;
610 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
611
612 return( true );
613 }
614
615 void IOServiceUserNotification::free( void )
616 {
617 PingMsg * _pingMsg;
618 vm_size_t _msgSize;
619 OSArray * _newSet;
620 OSObject * _lastEntry;
621
622 _pingMsg = pingMsg;
623 _msgSize = msgSize;
624 _lastEntry = lastEntry;
625 _newSet = newSet;
626
627 super::free();
628
629 if( _pingMsg && _msgSize) {
630 if (_pingMsg->msgHdr.msgh_remote_port) {
631 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
632 }
633 IOFree(_pingMsg, _msgSize);
634 }
635
636 if( _lastEntry)
637 _lastEntry->release();
638
639 if( _newSet)
640 _newSet->release();
641 }
642
643 bool IOServiceUserNotification::_handler( void * target,
644 void * ref, IOService * newService, IONotifier * notifier )
645 {
646 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
647 }
648
649 bool IOServiceUserNotification::handler( void * ref,
650 IOService * newService )
651 {
652 unsigned int count;
653 kern_return_t kr;
654 ipc_port_t port = NULL;
655 bool sendPing = false;
656
657 IOTakeLock( lock );
658
659 count = newSet->getCount();
660 if( count < kMaxOutstanding) {
661
662 newSet->setObject( newService );
663 if( (sendPing = (armed && (0 == count))))
664 armed = false;
665 }
666
667 IOUnlock( lock );
668
669 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
670 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
671
672 if( sendPing) {
673 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
674 pingMsg->msgHdr.msgh_local_port = port;
675 else
676 pingMsg->msgHdr.msgh_local_port = NULL;
677
678 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
679 pingMsg->msgHdr.msgh_size,
680 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
681 0);
682 if( port)
683 iokit_release_port( port );
684
685 if( KERN_SUCCESS != kr)
686 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
687 }
688
689 return( true );
690 }
691
692 OSObject * IOServiceUserNotification::getNextObject()
693 {
694 unsigned int count;
695 OSObject * result;
696
697 IOTakeLock( lock );
698
699 if( lastEntry)
700 lastEntry->release();
701
702 count = newSet->getCount();
703 if( count ) {
704 result = newSet->getObject( count - 1 );
705 result->retain();
706 newSet->removeObject( count - 1);
707 } else {
708 result = 0;
709 armed = true;
710 }
711 lastEntry = result;
712
713 IOUnlock( lock );
714
715 return( result );
716 }
717
718 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
719
720 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
721
722 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
723
724 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
725 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
726 bool client64 )
727 {
728 if( !super::init())
729 return( false );
730
731 if (referenceSize > sizeof(OSAsyncReference64))
732 return( false );
733
734 clientIs64 = client64;
735
736 owningPID = proc_selfpid();
737
738 extraSize += sizeof(IOServiceInterestContent64);
739 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
740 pingMsg = (PingMsg *) IOMalloc( msgSize);
741 if( !pingMsg)
742 return( false );
743
744 bzero( pingMsg, msgSize);
745
746 pingMsg->msgHdr.msgh_remote_port = port;
747 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
748 | MACH_MSGH_BITS(
749 MACH_MSG_TYPE_COPY_SEND /*remote*/,
750 MACH_MSG_TYPE_MAKE_SEND /*local*/);
751 pingMsg->msgHdr.msgh_size = msgSize;
752 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
753
754 pingMsg->msgBody.msgh_descriptor_count = 1;
755
756 pingMsg->ports[0].name = 0;
757 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
758 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
759
760 pingMsg->notifyHeader.size = extraSize;
761 pingMsg->notifyHeader.type = type;
762 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
763
764 return( true );
765 }
766
767 void IOServiceMessageUserNotification::free( void )
768 {
769 PingMsg * _pingMsg;
770 vm_size_t _msgSize;
771
772 _pingMsg = pingMsg;
773 _msgSize = msgSize;
774
775 super::free();
776
777 if( _pingMsg && _msgSize) {
778 if (_pingMsg->msgHdr.msgh_remote_port) {
779 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
780 }
781 IOFree( _pingMsg, _msgSize);
782 }
783 }
784
785 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
786 UInt32 messageType, IOService * provider,
787 void * argument, vm_size_t argSize )
788 {
789 return( ((IOServiceMessageUserNotification *) target)->handler(
790 ref, messageType, provider, argument, argSize));
791 }
792
793 IOReturn IOServiceMessageUserNotification::handler( void * ref,
794 UInt32 messageType, IOService * provider,
795 void * messageArgument, vm_size_t argSize )
796 {
797 kern_return_t kr;
798 ipc_port_t thisPort, providerPort;
799 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
800 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
801 // == pingMsg->notifyHeader.content;
802
803 if (kIOMessageCopyClientID == messageType)
804 {
805 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
806 return (kIOReturnSuccess);
807 }
808
809 data->messageType = messageType;
810
811 if( argSize == 0)
812 {
813 data->messageArgument[0] = (io_user_reference_t) messageArgument;
814 if (clientIs64)
815 argSize = sizeof(data->messageArgument[0]);
816 else
817 {
818 data->messageArgument[0] |= (data->messageArgument[0] << 32);
819 argSize = sizeof(uint32_t);
820 }
821 }
822 else
823 {
824 if( argSize > kIOUserNotifyMaxMessageSize)
825 argSize = kIOUserNotifyMaxMessageSize;
826 bcopy( messageArgument, data->messageArgument, argSize );
827 }
828
829 // adjust message size for ipc restrictions
830 natural_t type;
831 type = pingMsg->notifyHeader.type;
832 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
833 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
834 pingMsg->notifyHeader.type = type;
835 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
836
837 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
838 + sizeof( IOServiceInterestContent64 )
839 - sizeof( data->messageArgument)
840 + argSize;
841
842 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
843 pingMsg->ports[0].name = providerPort;
844 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
845 pingMsg->msgHdr.msgh_local_port = thisPort;
846 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
847 pingMsg->msgHdr.msgh_size,
848 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
849 0);
850 if( thisPort)
851 iokit_release_port( thisPort );
852 if( providerPort)
853 iokit_release_port( providerPort );
854
855 if( KERN_SUCCESS != kr)
856 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
857
858 return( kIOReturnSuccess );
859 }
860
861 OSObject * IOServiceMessageUserNotification::getNextObject()
862 {
863 return( 0 );
864 }
865
866 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
867
868 #undef super
869 #define super IOService
870 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
871
872 void IOUserClient::initialize( void )
873 {
874 gIOObjectPortLock = IOLockAlloc();
875
876 assert( gIOObjectPortLock );
877 }
878
879 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
880 mach_port_t wakePort,
881 void *callback, void *refcon)
882 {
883 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
884 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
885 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
886 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
887 }
888
889 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
890 mach_port_t wakePort,
891 mach_vm_address_t callback, io_user_reference_t refcon)
892 {
893 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
894 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
895 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
896 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
897 }
898
899 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
900 mach_port_t wakePort,
901 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
902 {
903 setAsyncReference64(asyncRef, wakePort, callback, refcon);
904 if (vm_map_is_64bit(get_task_map(task))) {
905 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
906 }
907 }
908
909 static OSDictionary * CopyConsoleUser(UInt32 uid)
910 {
911 OSArray * array;
912 OSDictionary * user = 0;
913
914 if ((array = OSDynamicCast(OSArray,
915 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
916 {
917 for (unsigned int idx = 0;
918 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
919 idx++) {
920 OSNumber * num;
921
922 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
923 && (uid == num->unsigned32BitValue())) {
924 user->retain();
925 break;
926 }
927 }
928 array->release();
929 }
930 return user;
931 }
932
933 static OSDictionary * CopyUserOnConsole(void)
934 {
935 OSArray * array;
936 OSDictionary * user = 0;
937
938 if ((array = OSDynamicCast(OSArray,
939 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
940 {
941 for (unsigned int idx = 0;
942 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
943 idx++)
944 {
945 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
946 {
947 user->retain();
948 break;
949 }
950 }
951 array->release();
952 }
953 return (user);
954 }
955
956 IOReturn IOUserClient::clientHasAuthorization( task_t task,
957 IOService * service )
958 {
959 proc_t p;
960
961 p = (proc_t) get_bsdtask_info(task);
962 if (p)
963 {
964 uint64_t authorizationID;
965
966 authorizationID = proc_uniqueid(p);
967 if (authorizationID)
968 {
969 if (service->getAuthorizationID() == authorizationID)
970 {
971 return (kIOReturnSuccess);
972 }
973 }
974 }
975
976 return (kIOReturnNotPermitted);
977 }
978
979 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
980 const char * privilegeName )
981 {
982 kern_return_t kr;
983 security_token_t token;
984 mach_msg_type_number_t count;
985 task_t task;
986 OSDictionary * user;
987 bool secureConsole;
988
989
990 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
991 sizeof(kIOClientPrivilegeForeground)))
992 {
993 if (task_is_gpu_denied(current_task()))
994 return (kIOReturnNotPrivileged);
995 else
996 return (kIOReturnSuccess);
997 }
998
999 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1000 sizeof(kIOClientPrivilegeConsoleSession)))
1001 {
1002 kauth_cred_t cred;
1003 proc_t p;
1004
1005 task = (task_t) securityToken;
1006 if (!task)
1007 task = current_task();
1008 p = (proc_t) get_bsdtask_info(task);
1009 kr = kIOReturnNotPrivileged;
1010
1011 if (p && (cred = kauth_cred_proc_ref(p)))
1012 {
1013 user = CopyUserOnConsole();
1014 if (user)
1015 {
1016 OSNumber * num;
1017 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1018 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1019 {
1020 kr = kIOReturnSuccess;
1021 }
1022 user->release();
1023 }
1024 kauth_cred_unref(&cred);
1025 }
1026 return (kr);
1027 }
1028
1029 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1030 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1031 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1032 else
1033 task = (task_t)securityToken;
1034
1035 count = TASK_SECURITY_TOKEN_COUNT;
1036 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1037
1038 if (KERN_SUCCESS != kr)
1039 {}
1040 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1041 sizeof(kIOClientPrivilegeAdministrator))) {
1042 if (0 != token.val[0])
1043 kr = kIOReturnNotPrivileged;
1044 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1045 sizeof(kIOClientPrivilegeLocalUser))) {
1046 user = CopyConsoleUser(token.val[0]);
1047 if ( user )
1048 user->release();
1049 else
1050 kr = kIOReturnNotPrivileged;
1051 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1052 sizeof(kIOClientPrivilegeConsoleUser))) {
1053 user = CopyConsoleUser(token.val[0]);
1054 if ( user ) {
1055 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1056 kr = kIOReturnNotPrivileged;
1057 else if ( secureConsole ) {
1058 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1059 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1060 kr = kIOReturnNotPrivileged;
1061 }
1062 user->release();
1063 }
1064 else
1065 kr = kIOReturnNotPrivileged;
1066 } else
1067 kr = kIOReturnUnsupported;
1068
1069 return (kr);
1070 }
1071
1072 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1073 const char * entitlement )
1074 {
1075 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1076
1077 proc_t p = NULL;
1078 pid_t pid = 0;
1079 char procname[MAXCOMLEN + 1] = "";
1080 size_t len = 0;
1081 void *entitlements_blob = NULL;
1082 char *entitlements_data = NULL;
1083 OSObject *entitlements_obj = NULL;
1084 OSDictionary *entitlements = NULL;
1085 OSString *errorString = NULL;
1086 OSObject *value = NULL;
1087
1088 p = (proc_t)get_bsdtask_info(task);
1089 if (p == NULL)
1090 goto fail;
1091 pid = proc_pid(p);
1092 proc_name(pid, procname, (int)sizeof(procname));
1093
1094 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1095 goto fail;
1096
1097 if (len <= offsetof(CS_GenericBlob, data))
1098 goto fail;
1099
1100 /*
1101 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1102 * we'll try to parse in the kernel.
1103 */
1104 len -= offsetof(CS_GenericBlob, data);
1105 if (len > MAX_ENTITLEMENTS_LEN) {
1106 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1107 goto fail;
1108 }
1109
1110 /*
1111 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1112 * what is stored in the entitlements blob. Copy the string and
1113 * terminate it.
1114 */
1115 entitlements_data = (char *)IOMalloc(len + 1);
1116 if (entitlements_data == NULL)
1117 goto fail;
1118 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1119 entitlements_data[len] = '\0';
1120
1121 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1122 if (errorString != NULL) {
1123 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1124 goto fail;
1125 }
1126 if (entitlements_obj == NULL)
1127 goto fail;
1128
1129 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1130 if (entitlements == NULL)
1131 goto fail;
1132
1133 /* Fetch the entitlement value from the dictionary. */
1134 value = entitlements->getObject(entitlement);
1135 if (value != NULL)
1136 value->retain();
1137
1138 fail:
1139 if (entitlements_data != NULL)
1140 IOFree(entitlements_data, len + 1);
1141 if (entitlements_obj != NULL)
1142 entitlements_obj->release();
1143 if (errorString != NULL)
1144 errorString->release();
1145 return value;
1146 }
1147
1148 bool IOUserClient::init()
1149 {
1150 if (getPropertyTable() || super::init())
1151 return reserve();
1152
1153 return false;
1154 }
1155
1156 bool IOUserClient::init(OSDictionary * dictionary)
1157 {
1158 if (getPropertyTable() || super::init(dictionary))
1159 return reserve();
1160
1161 return false;
1162 }
1163
1164 bool IOUserClient::initWithTask(task_t owningTask,
1165 void * securityID,
1166 UInt32 type )
1167 {
1168 if (getPropertyTable() || super::init())
1169 return reserve();
1170
1171 return false;
1172 }
1173
1174 bool IOUserClient::initWithTask(task_t owningTask,
1175 void * securityID,
1176 UInt32 type,
1177 OSDictionary * properties )
1178 {
1179 bool ok;
1180
1181 ok = super::init( properties );
1182 ok &= initWithTask( owningTask, securityID, type );
1183
1184 return( ok );
1185 }
1186
1187 bool IOUserClient::reserve()
1188 {
1189 if(!reserved) {
1190 reserved = IONew(ExpansionData, 1);
1191 if (!reserved) {
1192 return false;
1193 }
1194 }
1195 setTerminateDefer(NULL, true);
1196 IOStatisticsRegisterCounter();
1197
1198 return true;
1199 }
1200
1201 void IOUserClient::free()
1202 {
1203 if( mappings)
1204 mappings->release();
1205
1206 IOStatisticsUnregisterCounter();
1207
1208 if (reserved)
1209 IODelete(reserved, ExpansionData, 1);
1210
1211 super::free();
1212 }
1213
1214 IOReturn IOUserClient::clientDied( void )
1215 {
1216 return( clientClose());
1217 }
1218
1219 IOReturn IOUserClient::clientClose( void )
1220 {
1221 return( kIOReturnUnsupported );
1222 }
1223
1224 IOService * IOUserClient::getService( void )
1225 {
1226 return( 0 );
1227 }
1228
1229 IOReturn IOUserClient::registerNotificationPort(
1230 mach_port_t /* port */,
1231 UInt32 /* type */,
1232 UInt32 /* refCon */)
1233 {
1234 return( kIOReturnUnsupported);
1235 }
1236
1237 IOReturn IOUserClient::registerNotificationPort(
1238 mach_port_t port,
1239 UInt32 type,
1240 io_user_reference_t refCon)
1241 {
1242 return (registerNotificationPort(port, type, (UInt32) refCon));
1243 }
1244
1245 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1246 semaphore_t * semaphore )
1247 {
1248 return( kIOReturnUnsupported);
1249 }
1250
1251 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1252 {
1253 return( kIOReturnUnsupported);
1254 }
1255
1256 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1257 IOOptionBits * options,
1258 IOMemoryDescriptor ** memory )
1259 {
1260 return( kIOReturnUnsupported);
1261 }
1262
1263 #if !__LP64__
1264 IOMemoryMap * IOUserClient::mapClientMemory(
1265 IOOptionBits type,
1266 task_t task,
1267 IOOptionBits mapFlags,
1268 IOVirtualAddress atAddress )
1269 {
1270 return (NULL);
1271 }
1272 #endif
1273
1274 IOMemoryMap * IOUserClient::mapClientMemory64(
1275 IOOptionBits type,
1276 task_t task,
1277 IOOptionBits mapFlags,
1278 mach_vm_address_t atAddress )
1279 {
1280 IOReturn err;
1281 IOOptionBits options = 0;
1282 IOMemoryDescriptor * memory;
1283 IOMemoryMap * map = 0;
1284
1285 err = clientMemoryForType( (UInt32) type, &options, &memory );
1286
1287 if( memory && (kIOReturnSuccess == err)) {
1288
1289 options = (options & ~kIOMapUserOptionsMask)
1290 | (mapFlags & kIOMapUserOptionsMask);
1291 map = memory->createMappingInTask( task, atAddress, options );
1292 memory->release();
1293 }
1294
1295 return( map );
1296 }
1297
1298 IOReturn IOUserClient::exportObjectToClient(task_t task,
1299 OSObject *obj, io_object_t *clientObj)
1300 {
1301 mach_port_name_t name;
1302
1303 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1304
1305 *(mach_port_name_t *)clientObj = name;
1306 return kIOReturnSuccess;
1307 }
1308
1309 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1310 {
1311 return( 0 );
1312 }
1313
1314 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1315 {
1316 return( 0 );
1317 }
1318
1319 IOExternalMethod * IOUserClient::
1320 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1321 {
1322 IOExternalMethod *method = getExternalMethodForIndex(index);
1323
1324 if (method)
1325 *targetP = (IOService *) method->object;
1326
1327 return method;
1328 }
1329
1330 IOExternalAsyncMethod * IOUserClient::
1331 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1332 {
1333 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1334
1335 if (method)
1336 *targetP = (IOService *) method->object;
1337
1338 return method;
1339 }
1340
1341 IOExternalTrap * IOUserClient::
1342 getExternalTrapForIndex(UInt32 index)
1343 {
1344 return NULL;
1345 }
1346
1347 IOExternalTrap * IOUserClient::
1348 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1349 {
1350 IOExternalTrap *trap = getExternalTrapForIndex(index);
1351
1352 if (trap) {
1353 *targetP = trap->object;
1354 }
1355
1356 return trap;
1357 }
1358
1359 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1360 {
1361 mach_port_t port;
1362 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1363
1364 if (MACH_PORT_NULL != port)
1365 iokit_release_port_send(port);
1366
1367 return (kIOReturnSuccess);
1368 }
1369
1370 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1371 {
1372 if (MACH_PORT_NULL != port)
1373 iokit_release_port_send(port);
1374
1375 return (kIOReturnSuccess);
1376 }
1377
1378 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1379 IOReturn result, void *args[], UInt32 numArgs)
1380 {
1381 OSAsyncReference64 reference64;
1382 io_user_reference_t args64[kMaxAsyncArgs];
1383 unsigned int idx;
1384
1385 if (numArgs > kMaxAsyncArgs)
1386 return kIOReturnMessageTooLarge;
1387
1388 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1389 reference64[idx] = REF64(reference[idx]);
1390
1391 for (idx = 0; idx < numArgs; idx++)
1392 args64[idx] = REF64(args[idx]);
1393
1394 return (sendAsyncResult64(reference64, result, args64, numArgs));
1395 }
1396
1397 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1398 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1399 {
1400 return _sendAsyncResult64(reference, result, args, numArgs, options);
1401 }
1402
1403 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1404 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1405 {
1406 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1407 }
1408
1409 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1410 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1411 {
1412 struct ReplyMsg
1413 {
1414 mach_msg_header_t msgHdr;
1415 union
1416 {
1417 struct
1418 {
1419 OSNotificationHeader notifyHdr;
1420 IOAsyncCompletionContent asyncContent;
1421 uint32_t args[kMaxAsyncArgs];
1422 } msg32;
1423 struct
1424 {
1425 OSNotificationHeader64 notifyHdr;
1426 IOAsyncCompletionContent asyncContent;
1427 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1428 } msg64;
1429 } m;
1430 };
1431 ReplyMsg replyMsg;
1432 mach_port_t replyPort;
1433 kern_return_t kr;
1434
1435 // If no reply port, do nothing.
1436 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1437 if (replyPort == MACH_PORT_NULL)
1438 return kIOReturnSuccess;
1439
1440 if (numArgs > kMaxAsyncArgs)
1441 return kIOReturnMessageTooLarge;
1442
1443 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1444 0 /*local*/);
1445 replyMsg.msgHdr.msgh_remote_port = replyPort;
1446 replyMsg.msgHdr.msgh_local_port = 0;
1447 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1448 if (kIOUCAsync64Flag & reference[0])
1449 {
1450 replyMsg.msgHdr.msgh_size =
1451 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1452 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1453 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1454 + numArgs * sizeof(io_user_reference_t);
1455 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1456 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1457
1458 replyMsg.m.msg64.asyncContent.result = result;
1459 if (numArgs)
1460 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1461 }
1462 else
1463 {
1464 unsigned int idx;
1465
1466 replyMsg.msgHdr.msgh_size =
1467 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1468 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1469
1470 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1471 + numArgs * sizeof(uint32_t);
1472 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1473
1474 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1475 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1476
1477 replyMsg.m.msg32.asyncContent.result = result;
1478
1479 for (idx = 0; idx < numArgs; idx++)
1480 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1481 }
1482
1483 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1484 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1485 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1486 } else {
1487 /* Fail on full queue. */
1488 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1489 replyMsg.msgHdr.msgh_size);
1490 }
1491 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr))
1492 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1493 return kr;
1494 }
1495
1496
1497 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1498
1499 extern "C" {
1500
1501 #define CHECK(cls,obj,out) \
1502 cls * out; \
1503 if( !(out = OSDynamicCast( cls, obj))) \
1504 return( kIOReturnBadArgument )
1505
1506 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1507
1508 /* Routine io_server_version */
1509 kern_return_t is_io_server_version(
1510 mach_port_t master_port,
1511 uint64_t *version)
1512 {
1513 *version = IOKIT_SERVER_VERSION;
1514 return (kIOReturnSuccess);
1515 }
1516
1517 /* Routine io_object_get_class */
1518 kern_return_t is_io_object_get_class(
1519 io_object_t object,
1520 io_name_t className )
1521 {
1522 const OSMetaClass* my_obj = NULL;
1523
1524 if( !object)
1525 return( kIOReturnBadArgument );
1526
1527 my_obj = object->getMetaClass();
1528 if (!my_obj) {
1529 return (kIOReturnNotFound);
1530 }
1531
1532 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1533 return( kIOReturnSuccess );
1534 }
1535
1536 /* Routine io_object_get_superclass */
1537 kern_return_t is_io_object_get_superclass(
1538 mach_port_t master_port,
1539 io_name_t obj_name,
1540 io_name_t class_name)
1541 {
1542 const OSMetaClass* my_obj = NULL;
1543 const OSMetaClass* superclass = NULL;
1544 const OSSymbol *my_name = NULL;
1545 const char *my_cstr = NULL;
1546
1547 if (!obj_name || !class_name)
1548 return (kIOReturnBadArgument);
1549
1550 if( master_port != master_device_port)
1551 return( kIOReturnNotPrivileged);
1552
1553 my_name = OSSymbol::withCString(obj_name);
1554
1555 if (my_name) {
1556 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1557 my_name->release();
1558 }
1559 if (my_obj) {
1560 superclass = my_obj->getSuperClass();
1561 }
1562
1563 if (!superclass) {
1564 return( kIOReturnNotFound );
1565 }
1566
1567 my_cstr = superclass->getClassName();
1568
1569 if (my_cstr) {
1570 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1571 return( kIOReturnSuccess );
1572 }
1573 return (kIOReturnNotFound);
1574 }
1575
1576 /* Routine io_object_get_bundle_identifier */
1577 kern_return_t is_io_object_get_bundle_identifier(
1578 mach_port_t master_port,
1579 io_name_t obj_name,
1580 io_name_t bundle_name)
1581 {
1582 const OSMetaClass* my_obj = NULL;
1583 const OSSymbol *my_name = NULL;
1584 const OSSymbol *identifier = NULL;
1585 const char *my_cstr = NULL;
1586
1587 if (!obj_name || !bundle_name)
1588 return (kIOReturnBadArgument);
1589
1590 if( master_port != master_device_port)
1591 return( kIOReturnNotPrivileged);
1592
1593 my_name = OSSymbol::withCString(obj_name);
1594
1595 if (my_name) {
1596 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1597 my_name->release();
1598 }
1599
1600 if (my_obj) {
1601 identifier = my_obj->getKmodName();
1602 }
1603 if (!identifier) {
1604 return( kIOReturnNotFound );
1605 }
1606
1607 my_cstr = identifier->getCStringNoCopy();
1608 if (my_cstr) {
1609 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1610 return( kIOReturnSuccess );
1611 }
1612
1613 return (kIOReturnBadArgument);
1614 }
1615
1616 /* Routine io_object_conforms_to */
1617 kern_return_t is_io_object_conforms_to(
1618 io_object_t object,
1619 io_name_t className,
1620 boolean_t *conforms )
1621 {
1622 if( !object)
1623 return( kIOReturnBadArgument );
1624
1625 *conforms = (0 != object->metaCast( className ));
1626 return( kIOReturnSuccess );
1627 }
1628
1629 /* Routine io_object_get_retain_count */
1630 kern_return_t is_io_object_get_retain_count(
1631 io_object_t object,
1632 uint32_t *retainCount )
1633 {
1634 if( !object)
1635 return( kIOReturnBadArgument );
1636
1637 *retainCount = object->getRetainCount();
1638 return( kIOReturnSuccess );
1639 }
1640
1641 /* Routine io_iterator_next */
1642 kern_return_t is_io_iterator_next(
1643 io_object_t iterator,
1644 io_object_t *object )
1645 {
1646 OSObject * obj;
1647
1648 CHECK( OSIterator, iterator, iter );
1649
1650 obj = iter->getNextObject();
1651 if( obj) {
1652 obj->retain();
1653 *object = obj;
1654 return( kIOReturnSuccess );
1655 } else
1656 return( kIOReturnNoDevice );
1657 }
1658
1659 /* Routine io_iterator_reset */
1660 kern_return_t is_io_iterator_reset(
1661 io_object_t iterator )
1662 {
1663 CHECK( OSIterator, iterator, iter );
1664
1665 iter->reset();
1666
1667 return( kIOReturnSuccess );
1668 }
1669
1670 /* Routine io_iterator_is_valid */
1671 kern_return_t is_io_iterator_is_valid(
1672 io_object_t iterator,
1673 boolean_t *is_valid )
1674 {
1675 CHECK( OSIterator, iterator, iter );
1676
1677 *is_valid = iter->isValid();
1678
1679 return( kIOReturnSuccess );
1680 }
1681
1682
1683 static kern_return_t internal_io_service_match_property_table(
1684 io_service_t _service,
1685 const char * matching,
1686 mach_msg_type_number_t matching_size,
1687 boolean_t *matches)
1688 {
1689 CHECK( IOService, _service, service );
1690
1691 kern_return_t kr;
1692 OSObject * obj;
1693 OSDictionary * dict;
1694
1695 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1696 : OSUnserializeXML(matching);
1697 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1698 *matches = service->passiveMatch( dict );
1699 kr = kIOReturnSuccess;
1700 } else
1701 kr = kIOReturnBadArgument;
1702
1703 if( obj)
1704 obj->release();
1705
1706 return( kr );
1707 }
1708
1709 /* Routine io_service_match_property_table */
1710 kern_return_t is_io_service_match_property_table(
1711 io_service_t service,
1712 io_string_t matching,
1713 boolean_t *matches )
1714 {
1715 return (internal_io_service_match_property_table(service, matching, 0, matches));
1716 }
1717
1718
1719 /* Routine io_service_match_property_table_ool */
1720 kern_return_t is_io_service_match_property_table_ool(
1721 io_object_t service,
1722 io_buf_ptr_t matching,
1723 mach_msg_type_number_t matchingCnt,
1724 kern_return_t *result,
1725 boolean_t *matches )
1726 {
1727 kern_return_t kr;
1728 vm_offset_t data;
1729 vm_map_offset_t map_data;
1730
1731 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1732 data = CAST_DOWN(vm_offset_t, map_data);
1733
1734 if( KERN_SUCCESS == kr) {
1735 // must return success after vm_map_copyout() succeeds
1736 *result = internal_io_service_match_property_table(service,
1737 (const char *)data, matchingCnt, matches );
1738 vm_deallocate( kernel_map, data, matchingCnt );
1739 }
1740
1741 return( kr );
1742 }
1743
1744 /* Routine io_service_match_property_table_bin */
1745 kern_return_t is_io_service_match_property_table_bin(
1746 io_object_t service,
1747 io_struct_inband_t matching,
1748 mach_msg_type_number_t matchingCnt,
1749 boolean_t *matches)
1750 {
1751 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
1752 }
1753
1754 static kern_return_t internal_io_service_get_matching_services(
1755 mach_port_t master_port,
1756 const char * matching,
1757 mach_msg_type_number_t matching_size,
1758 io_iterator_t *existing )
1759 {
1760 kern_return_t kr;
1761 OSObject * obj;
1762 OSDictionary * dict;
1763
1764 if( master_port != master_device_port)
1765 return( kIOReturnNotPrivileged);
1766
1767 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1768 : OSUnserializeXML(matching);
1769 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1770 *existing = IOService::getMatchingServices( dict );
1771 kr = kIOReturnSuccess;
1772 } else
1773 kr = kIOReturnBadArgument;
1774
1775 if( obj)
1776 obj->release();
1777
1778 return( kr );
1779 }
1780
1781 /* Routine io_service_get_matching_services */
1782 kern_return_t is_io_service_get_matching_services(
1783 mach_port_t master_port,
1784 io_string_t matching,
1785 io_iterator_t *existing )
1786 {
1787 return (internal_io_service_get_matching_services(master_port, matching, 0, existing));
1788 }
1789
1790 /* Routine io_service_get_matching_services_ool */
1791 kern_return_t is_io_service_get_matching_services_ool(
1792 mach_port_t master_port,
1793 io_buf_ptr_t matching,
1794 mach_msg_type_number_t matchingCnt,
1795 kern_return_t *result,
1796 io_object_t *existing )
1797 {
1798 kern_return_t kr;
1799 vm_offset_t data;
1800 vm_map_offset_t map_data;
1801
1802 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1803 data = CAST_DOWN(vm_offset_t, map_data);
1804
1805 if( KERN_SUCCESS == kr) {
1806 // must return success after vm_map_copyout() succeeds
1807 *result = internal_io_service_get_matching_services(master_port,
1808 (const char *) data, matchingCnt, existing);
1809 vm_deallocate( kernel_map, data, matchingCnt );
1810 }
1811
1812 return( kr );
1813 }
1814
1815 /* Routine io_service_get_matching_services_bin */
1816 kern_return_t is_io_service_get_matching_services_bin(
1817 mach_port_t master_port,
1818 io_struct_inband_t matching,
1819 mach_msg_type_number_t matchingCnt,
1820 io_object_t *existing)
1821 {
1822 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
1823 }
1824
1825
1826 static kern_return_t internal_io_service_get_matching_service(
1827 mach_port_t master_port,
1828 const char * matching,
1829 mach_msg_type_number_t matching_size,
1830 io_service_t *service )
1831 {
1832 kern_return_t kr;
1833 OSObject * obj;
1834 OSDictionary * dict;
1835
1836 if( master_port != master_device_port)
1837 return( kIOReturnNotPrivileged);
1838
1839 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1840 : OSUnserializeXML(matching);
1841 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1842 *service = IOService::copyMatchingService( dict );
1843 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
1844 } else
1845 kr = kIOReturnBadArgument;
1846
1847 if( obj)
1848 obj->release();
1849
1850 return( kr );
1851 }
1852
1853 /* Routine io_service_get_matching_service */
1854 kern_return_t is_io_service_get_matching_service(
1855 mach_port_t master_port,
1856 io_string_t matching,
1857 io_service_t *service )
1858 {
1859 return (internal_io_service_get_matching_service(master_port, matching, 0, service));
1860 }
1861
1862 /* Routine io_service_get_matching_services_ool */
1863 kern_return_t is_io_service_get_matching_service_ool(
1864 mach_port_t master_port,
1865 io_buf_ptr_t matching,
1866 mach_msg_type_number_t matchingCnt,
1867 kern_return_t *result,
1868 io_object_t *service )
1869 {
1870 kern_return_t kr;
1871 vm_offset_t data;
1872 vm_map_offset_t map_data;
1873
1874 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1875 data = CAST_DOWN(vm_offset_t, map_data);
1876
1877 if( KERN_SUCCESS == kr) {
1878 // must return success after vm_map_copyout() succeeds
1879 *result = internal_io_service_get_matching_service(master_port,
1880 (const char *) data, matchingCnt, service );
1881 vm_deallocate( kernel_map, data, matchingCnt );
1882 }
1883
1884 return( kr );
1885 }
1886
1887 /* Routine io_service_get_matching_service_bin */
1888 kern_return_t is_io_service_get_matching_service_bin(
1889 mach_port_t master_port,
1890 io_struct_inband_t matching,
1891 mach_msg_type_number_t matchingCnt,
1892 io_object_t *service)
1893 {
1894 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
1895 }
1896
1897 static kern_return_t internal_io_service_add_notification(
1898 mach_port_t master_port,
1899 io_name_t notification_type,
1900 const char * matching,
1901 size_t matching_size,
1902 mach_port_t port,
1903 void * reference,
1904 vm_size_t referenceSize,
1905 bool client64,
1906 io_object_t * notification )
1907 {
1908 IOServiceUserNotification * userNotify = 0;
1909 IONotifier * notify = 0;
1910 const OSSymbol * sym;
1911 OSDictionary * dict;
1912 IOReturn err;
1913 unsigned long int userMsgType;
1914
1915 if( master_port != master_device_port)
1916 return( kIOReturnNotPrivileged);
1917
1918 do {
1919 err = kIOReturnNoResources;
1920
1921 if( !(sym = OSSymbol::withCString( notification_type )))
1922 err = kIOReturnNoResources;
1923
1924 if (matching_size)
1925 {
1926 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
1927 }
1928 else
1929 {
1930 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching));
1931 }
1932
1933 if (!dict) {
1934 err = kIOReturnBadArgument;
1935 continue;
1936 }
1937
1938 if( (sym == gIOPublishNotification)
1939 || (sym == gIOFirstPublishNotification))
1940 userMsgType = kIOServicePublishNotificationType;
1941 else if( (sym == gIOMatchedNotification)
1942 || (sym == gIOFirstMatchNotification))
1943 userMsgType = kIOServiceMatchedNotificationType;
1944 else if( sym == gIOTerminatedNotification)
1945 userMsgType = kIOServiceTerminatedNotificationType;
1946 else
1947 userMsgType = kLastIOKitNotificationType;
1948
1949 userNotify = new IOServiceUserNotification;
1950
1951 if( userNotify && !userNotify->init( port, userMsgType,
1952 reference, referenceSize, client64)) {
1953 iokit_release_port_send(port);
1954 userNotify->release();
1955 userNotify = 0;
1956 }
1957 if( !userNotify)
1958 continue;
1959
1960 notify = IOService::addMatchingNotification( sym, dict,
1961 &userNotify->_handler, userNotify );
1962 if( notify) {
1963 *notification = userNotify;
1964 userNotify->setNotification( notify );
1965 err = kIOReturnSuccess;
1966 } else
1967 err = kIOReturnUnsupported;
1968
1969 } while( false );
1970
1971 if( sym)
1972 sym->release();
1973 if( dict)
1974 dict->release();
1975
1976 return( err );
1977 }
1978
1979
1980 /* Routine io_service_add_notification */
1981 kern_return_t is_io_service_add_notification(
1982 mach_port_t master_port,
1983 io_name_t notification_type,
1984 io_string_t matching,
1985 mach_port_t port,
1986 io_async_ref_t reference,
1987 mach_msg_type_number_t referenceCnt,
1988 io_object_t * notification )
1989 {
1990 return (internal_io_service_add_notification(master_port, notification_type,
1991 matching, 0, port, &reference[0], sizeof(io_async_ref_t),
1992 false, notification));
1993 }
1994
1995 /* Routine io_service_add_notification_64 */
1996 kern_return_t is_io_service_add_notification_64(
1997 mach_port_t master_port,
1998 io_name_t notification_type,
1999 io_string_t matching,
2000 mach_port_t wake_port,
2001 io_async_ref64_t reference,
2002 mach_msg_type_number_t referenceCnt,
2003 io_object_t *notification )
2004 {
2005 return (internal_io_service_add_notification(master_port, notification_type,
2006 matching, 0, wake_port, &reference[0], sizeof(io_async_ref64_t),
2007 true, notification));
2008 }
2009
2010 /* Routine io_service_add_notification_bin */
2011 kern_return_t is_io_service_add_notification_bin
2012 (
2013 mach_port_t master_port,
2014 io_name_t notification_type,
2015 io_struct_inband_t matching,
2016 mach_msg_type_number_t matchingCnt,
2017 mach_port_t wake_port,
2018 io_async_ref_t reference,
2019 mach_msg_type_number_t referenceCnt,
2020 io_object_t *notification)
2021 {
2022 return (internal_io_service_add_notification(master_port, notification_type,
2023 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2024 false, notification));
2025 }
2026
2027 /* Routine io_service_add_notification_bin_64 */
2028 kern_return_t is_io_service_add_notification_bin_64
2029 (
2030 mach_port_t master_port,
2031 io_name_t notification_type,
2032 io_struct_inband_t matching,
2033 mach_msg_type_number_t matchingCnt,
2034 mach_port_t wake_port,
2035 io_async_ref64_t reference,
2036 mach_msg_type_number_t referenceCnt,
2037 io_object_t *notification)
2038 {
2039 return (internal_io_service_add_notification(master_port, notification_type,
2040 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2041 true, notification));
2042 }
2043
2044 static kern_return_t internal_io_service_add_notification_ool(
2045 mach_port_t master_port,
2046 io_name_t notification_type,
2047 io_buf_ptr_t matching,
2048 mach_msg_type_number_t matchingCnt,
2049 mach_port_t wake_port,
2050 void * reference,
2051 vm_size_t referenceSize,
2052 bool client64,
2053 kern_return_t *result,
2054 io_object_t *notification )
2055 {
2056 kern_return_t kr;
2057 vm_offset_t data;
2058 vm_map_offset_t map_data;
2059
2060 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2061 data = CAST_DOWN(vm_offset_t, map_data);
2062
2063 if( KERN_SUCCESS == kr) {
2064 // must return success after vm_map_copyout() succeeds
2065 *result = internal_io_service_add_notification( master_port, notification_type,
2066 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2067 vm_deallocate( kernel_map, data, matchingCnt );
2068 }
2069
2070 return( kr );
2071 }
2072
2073 /* Routine io_service_add_notification_ool */
2074 kern_return_t is_io_service_add_notification_ool(
2075 mach_port_t master_port,
2076 io_name_t notification_type,
2077 io_buf_ptr_t matching,
2078 mach_msg_type_number_t matchingCnt,
2079 mach_port_t wake_port,
2080 io_async_ref_t reference,
2081 mach_msg_type_number_t referenceCnt,
2082 kern_return_t *result,
2083 io_object_t *notification )
2084 {
2085 return (internal_io_service_add_notification_ool(master_port, notification_type,
2086 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2087 false, result, notification));
2088 }
2089
2090 /* Routine io_service_add_notification_ool_64 */
2091 kern_return_t is_io_service_add_notification_ool_64(
2092 mach_port_t master_port,
2093 io_name_t notification_type,
2094 io_buf_ptr_t matching,
2095 mach_msg_type_number_t matchingCnt,
2096 mach_port_t wake_port,
2097 io_async_ref64_t reference,
2098 mach_msg_type_number_t referenceCnt,
2099 kern_return_t *result,
2100 io_object_t *notification )
2101 {
2102 return (internal_io_service_add_notification_ool(master_port, notification_type,
2103 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2104 true, result, notification));
2105 }
2106
2107 /* Routine io_service_add_notification_old */
2108 kern_return_t is_io_service_add_notification_old(
2109 mach_port_t master_port,
2110 io_name_t notification_type,
2111 io_string_t matching,
2112 mach_port_t port,
2113 // for binary compatibility reasons, this must be natural_t for ILP32
2114 natural_t ref,
2115 io_object_t * notification )
2116 {
2117 return( is_io_service_add_notification( master_port, notification_type,
2118 matching, port, &ref, 1, notification ));
2119 }
2120
2121
2122 static kern_return_t internal_io_service_add_interest_notification(
2123 io_object_t _service,
2124 io_name_t type_of_interest,
2125 mach_port_t port,
2126 void * reference,
2127 vm_size_t referenceSize,
2128 bool client64,
2129 io_object_t * notification )
2130 {
2131
2132 IOServiceMessageUserNotification * userNotify = 0;
2133 IONotifier * notify = 0;
2134 const OSSymbol * sym;
2135 IOReturn err;
2136
2137 CHECK( IOService, _service, service );
2138
2139 err = kIOReturnNoResources;
2140 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2141
2142 userNotify = new IOServiceMessageUserNotification;
2143
2144 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2145 reference, referenceSize,
2146 kIOUserNotifyMaxMessageSize,
2147 client64 )) {
2148 iokit_release_port_send(port);
2149 userNotify->release();
2150 userNotify = 0;
2151 }
2152 if( !userNotify)
2153 continue;
2154
2155 notify = service->registerInterest( sym,
2156 &userNotify->_handler, userNotify );
2157 if( notify) {
2158 *notification = userNotify;
2159 userNotify->setNotification( notify );
2160 err = kIOReturnSuccess;
2161 } else
2162 err = kIOReturnUnsupported;
2163
2164 sym->release();
2165
2166 } while( false );
2167
2168 return( err );
2169 }
2170
2171 /* Routine io_service_add_message_notification */
2172 kern_return_t is_io_service_add_interest_notification(
2173 io_object_t service,
2174 io_name_t type_of_interest,
2175 mach_port_t port,
2176 io_async_ref_t reference,
2177 mach_msg_type_number_t referenceCnt,
2178 io_object_t * notification )
2179 {
2180 return (internal_io_service_add_interest_notification(service, type_of_interest,
2181 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2182 }
2183
2184 /* Routine io_service_add_interest_notification_64 */
2185 kern_return_t is_io_service_add_interest_notification_64(
2186 io_object_t service,
2187 io_name_t type_of_interest,
2188 mach_port_t wake_port,
2189 io_async_ref64_t reference,
2190 mach_msg_type_number_t referenceCnt,
2191 io_object_t *notification )
2192 {
2193 return (internal_io_service_add_interest_notification(service, type_of_interest,
2194 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2195 }
2196
2197
2198 /* Routine io_service_acknowledge_notification */
2199 kern_return_t is_io_service_acknowledge_notification(
2200 io_object_t _service,
2201 natural_t notify_ref,
2202 natural_t response )
2203 {
2204 CHECK( IOService, _service, service );
2205
2206 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2207 (IOOptionBits) response ));
2208
2209 }
2210
2211 /* Routine io_connect_get_semaphore */
2212 kern_return_t is_io_connect_get_notification_semaphore(
2213 io_connect_t connection,
2214 natural_t notification_type,
2215 semaphore_t *semaphore )
2216 {
2217 CHECK( IOUserClient, connection, client );
2218
2219 IOStatisticsClientCall();
2220 return( client->getNotificationSemaphore( (UInt32) notification_type,
2221 semaphore ));
2222 }
2223
2224 /* Routine io_registry_get_root_entry */
2225 kern_return_t is_io_registry_get_root_entry(
2226 mach_port_t master_port,
2227 io_object_t *root )
2228 {
2229 IORegistryEntry * entry;
2230
2231 if( master_port != master_device_port)
2232 return( kIOReturnNotPrivileged);
2233
2234 entry = IORegistryEntry::getRegistryRoot();
2235 if( entry)
2236 entry->retain();
2237 *root = entry;
2238
2239 return( kIOReturnSuccess );
2240 }
2241
2242 /* Routine io_registry_create_iterator */
2243 kern_return_t is_io_registry_create_iterator(
2244 mach_port_t master_port,
2245 io_name_t plane,
2246 uint32_t options,
2247 io_object_t *iterator )
2248 {
2249 if( master_port != master_device_port)
2250 return( kIOReturnNotPrivileged);
2251
2252 *iterator = IORegistryIterator::iterateOver(
2253 IORegistryEntry::getPlane( plane ), options );
2254
2255 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2256 }
2257
2258 /* Routine io_registry_entry_create_iterator */
2259 kern_return_t is_io_registry_entry_create_iterator(
2260 io_object_t registry_entry,
2261 io_name_t plane,
2262 uint32_t options,
2263 io_object_t *iterator )
2264 {
2265 CHECK( IORegistryEntry, registry_entry, entry );
2266
2267 *iterator = IORegistryIterator::iterateOver( entry,
2268 IORegistryEntry::getPlane( plane ), options );
2269
2270 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2271 }
2272
2273 /* Routine io_registry_iterator_enter */
2274 kern_return_t is_io_registry_iterator_enter_entry(
2275 io_object_t iterator )
2276 {
2277 CHECK( IORegistryIterator, iterator, iter );
2278
2279 iter->enterEntry();
2280
2281 return( kIOReturnSuccess );
2282 }
2283
2284 /* Routine io_registry_iterator_exit */
2285 kern_return_t is_io_registry_iterator_exit_entry(
2286 io_object_t iterator )
2287 {
2288 bool didIt;
2289
2290 CHECK( IORegistryIterator, iterator, iter );
2291
2292 didIt = iter->exitEntry();
2293
2294 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2295 }
2296
2297 /* Routine io_registry_entry_from_path */
2298 kern_return_t is_io_registry_entry_from_path(
2299 mach_port_t master_port,
2300 io_string_t path,
2301 io_object_t *registry_entry )
2302 {
2303 IORegistryEntry * entry;
2304
2305 if( master_port != master_device_port)
2306 return( kIOReturnNotPrivileged);
2307
2308 entry = IORegistryEntry::fromPath( path );
2309
2310 *registry_entry = entry;
2311
2312 return( kIOReturnSuccess );
2313 }
2314
2315 /* Routine io_registry_entry_in_plane */
2316 kern_return_t is_io_registry_entry_in_plane(
2317 io_object_t registry_entry,
2318 io_name_t plane,
2319 boolean_t *inPlane )
2320 {
2321 CHECK( IORegistryEntry, registry_entry, entry );
2322
2323 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2324
2325 return( kIOReturnSuccess );
2326 }
2327
2328
2329 /* Routine io_registry_entry_get_path */
2330 kern_return_t is_io_registry_entry_get_path(
2331 io_object_t registry_entry,
2332 io_name_t plane,
2333 io_string_t path )
2334 {
2335 int length;
2336 CHECK( IORegistryEntry, registry_entry, entry );
2337
2338 length = sizeof( io_string_t);
2339 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2340 return( kIOReturnSuccess );
2341 else
2342 return( kIOReturnBadArgument );
2343 }
2344
2345
2346 /* Routine io_registry_entry_get_name */
2347 kern_return_t is_io_registry_entry_get_name(
2348 io_object_t registry_entry,
2349 io_name_t name )
2350 {
2351 CHECK( IORegistryEntry, registry_entry, entry );
2352
2353 strncpy( name, entry->getName(), sizeof( io_name_t));
2354
2355 return( kIOReturnSuccess );
2356 }
2357
2358 /* Routine io_registry_entry_get_name_in_plane */
2359 kern_return_t is_io_registry_entry_get_name_in_plane(
2360 io_object_t registry_entry,
2361 io_name_t planeName,
2362 io_name_t name )
2363 {
2364 const IORegistryPlane * plane;
2365 CHECK( IORegistryEntry, registry_entry, entry );
2366
2367 if( planeName[0])
2368 plane = IORegistryEntry::getPlane( planeName );
2369 else
2370 plane = 0;
2371
2372 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2373
2374 return( kIOReturnSuccess );
2375 }
2376
2377 /* Routine io_registry_entry_get_location_in_plane */
2378 kern_return_t is_io_registry_entry_get_location_in_plane(
2379 io_object_t registry_entry,
2380 io_name_t planeName,
2381 io_name_t location )
2382 {
2383 const IORegistryPlane * plane;
2384 CHECK( IORegistryEntry, registry_entry, entry );
2385
2386 if( planeName[0])
2387 plane = IORegistryEntry::getPlane( planeName );
2388 else
2389 plane = 0;
2390
2391 const char * cstr = entry->getLocation( plane );
2392
2393 if( cstr) {
2394 strncpy( location, cstr, sizeof( io_name_t));
2395 return( kIOReturnSuccess );
2396 } else
2397 return( kIOReturnNotFound );
2398 }
2399
2400 /* Routine io_registry_entry_get_registry_entry_id */
2401 kern_return_t is_io_registry_entry_get_registry_entry_id(
2402 io_object_t registry_entry,
2403 uint64_t *entry_id )
2404 {
2405 CHECK( IORegistryEntry, registry_entry, entry );
2406
2407 *entry_id = entry->getRegistryEntryID();
2408
2409 return (kIOReturnSuccess);
2410 }
2411
2412 // Create a vm_map_copy_t or kalloc'ed data for memory
2413 // to be copied out. ipc will free after the copyout.
2414
2415 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
2416 io_buf_ptr_t * buf )
2417 {
2418 kern_return_t err;
2419 vm_map_copy_t copy;
2420
2421 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2422 false /* src_destroy */, &copy);
2423
2424 assert( err == KERN_SUCCESS );
2425 if( err == KERN_SUCCESS )
2426 *buf = (char *) copy;
2427
2428 return( err );
2429 }
2430
2431 /* Routine io_registry_entry_get_property */
2432 kern_return_t is_io_registry_entry_get_property_bytes(
2433 io_object_t registry_entry,
2434 io_name_t property_name,
2435 io_struct_inband_t buf,
2436 mach_msg_type_number_t *dataCnt )
2437 {
2438 OSObject * obj;
2439 OSData * data;
2440 OSString * str;
2441 OSBoolean * boo;
2442 OSNumber * off;
2443 UInt64 offsetBytes;
2444 unsigned int len = 0;
2445 const void * bytes = 0;
2446 IOReturn ret = kIOReturnSuccess;
2447
2448 CHECK( IORegistryEntry, registry_entry, entry );
2449
2450 #if CONFIG_MACF
2451 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2452 return kIOReturnNotPermitted;
2453 #endif
2454
2455 obj = entry->copyProperty(property_name);
2456 if( !obj)
2457 return( kIOReturnNoResources );
2458
2459 // One day OSData will be a common container base class
2460 // until then...
2461 if( (data = OSDynamicCast( OSData, obj ))) {
2462 len = data->getLength();
2463 bytes = data->getBytesNoCopy();
2464
2465 } else if( (str = OSDynamicCast( OSString, obj ))) {
2466 len = str->getLength() + 1;
2467 bytes = str->getCStringNoCopy();
2468
2469 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2470 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2471 bytes = boo->isTrue() ? "Yes" : "No";
2472
2473 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2474 offsetBytes = off->unsigned64BitValue();
2475 len = off->numberOfBytes();
2476 bytes = &offsetBytes;
2477 #ifdef __BIG_ENDIAN__
2478 bytes = (const void *)
2479 (((UInt32) bytes) + (sizeof( UInt64) - len));
2480 #endif
2481
2482 } else
2483 ret = kIOReturnBadArgument;
2484
2485 if( bytes) {
2486 if( *dataCnt < len)
2487 ret = kIOReturnIPCError;
2488 else {
2489 *dataCnt = len;
2490 bcopy( bytes, buf, len );
2491 }
2492 }
2493 obj->release();
2494
2495 return( ret );
2496 }
2497
2498
2499 /* Routine io_registry_entry_get_property */
2500 kern_return_t is_io_registry_entry_get_property(
2501 io_object_t registry_entry,
2502 io_name_t property_name,
2503 io_buf_ptr_t *properties,
2504 mach_msg_type_number_t *propertiesCnt )
2505 {
2506 kern_return_t err;
2507 vm_size_t len;
2508 OSObject * obj;
2509
2510 CHECK( IORegistryEntry, registry_entry, entry );
2511
2512 #if CONFIG_MACF
2513 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2514 return kIOReturnNotPermitted;
2515 #endif
2516
2517 obj = entry->copyProperty(property_name);
2518 if( !obj)
2519 return( kIOReturnNotFound );
2520
2521 OSSerialize * s = OSSerialize::withCapacity(4096);
2522 if( !s) {
2523 obj->release();
2524 return( kIOReturnNoMemory );
2525 }
2526
2527 if( obj->serialize( s )) {
2528 len = s->getLength();
2529 *propertiesCnt = len;
2530 err = copyoutkdata( s->text(), len, properties );
2531
2532 } else
2533 err = kIOReturnUnsupported;
2534
2535 s->release();
2536 obj->release();
2537
2538 return( err );
2539 }
2540
2541 /* Routine io_registry_entry_get_property_recursively */
2542 kern_return_t is_io_registry_entry_get_property_recursively(
2543 io_object_t registry_entry,
2544 io_name_t plane,
2545 io_name_t property_name,
2546 uint32_t options,
2547 io_buf_ptr_t *properties,
2548 mach_msg_type_number_t *propertiesCnt )
2549 {
2550 kern_return_t err;
2551 vm_size_t len;
2552 OSObject * obj;
2553
2554 CHECK( IORegistryEntry, registry_entry, entry );
2555
2556 #if CONFIG_MACF
2557 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2558 return kIOReturnNotPermitted;
2559 #endif
2560
2561 obj = entry->copyProperty( property_name,
2562 IORegistryEntry::getPlane( plane ), options);
2563 if( !obj)
2564 return( kIOReturnNotFound );
2565
2566 OSSerialize * s = OSSerialize::withCapacity(4096);
2567 if( !s) {
2568 obj->release();
2569 return( kIOReturnNoMemory );
2570 }
2571
2572 if( obj->serialize( s )) {
2573 len = s->getLength();
2574 *propertiesCnt = len;
2575 err = copyoutkdata( s->text(), len, properties );
2576
2577 } else
2578 err = kIOReturnUnsupported;
2579
2580 s->release();
2581 obj->release();
2582
2583 return( err );
2584 }
2585
2586 #if CONFIG_MACF
2587
2588 static kern_return_t
2589 filteredProperties(IORegistryEntry *entry, OSDictionary *properties, OSDictionary **filteredp)
2590 {
2591 kern_return_t err = 0;
2592 OSDictionary *filtered = NULL;
2593 OSCollectionIterator *iter = NULL;
2594 OSSymbol *key;
2595 OSObject *p;
2596 kauth_cred_t cred = kauth_cred_get();
2597
2598 if (properties == NULL)
2599 return kIOReturnUnsupported;
2600
2601 if ((iter = OSCollectionIterator::withCollection(properties)) == NULL ||
2602 (filtered = OSDictionary::withCapacity(properties->getCapacity())) == NULL) {
2603 err = kIOReturnNoMemory;
2604 goto out;
2605 }
2606
2607 while ((p = iter->getNextObject()) != NULL) {
2608 if ((key = OSDynamicCast(OSSymbol, p)) == NULL ||
2609 mac_iokit_check_get_property(cred, entry, key->getCStringNoCopy()) != 0)
2610 continue;
2611 filtered->setObject(key, properties->getObject(key));
2612 }
2613
2614 out:
2615 if (iter != NULL)
2616 iter->release();
2617 *filteredp = filtered;
2618 return err;
2619 }
2620
2621 #endif
2622
2623 /* Routine io_registry_entry_get_properties */
2624 kern_return_t is_io_registry_entry_get_properties(
2625 io_object_t registry_entry,
2626 io_buf_ptr_t *properties,
2627 mach_msg_type_number_t *propertiesCnt )
2628 {
2629 kern_return_t err = 0;
2630 vm_size_t len;
2631
2632 CHECK( IORegistryEntry, registry_entry, entry );
2633
2634 OSSerialize * s = OSSerialize::withCapacity(4096);
2635 if( !s)
2636 return( kIOReturnNoMemory );
2637
2638 if (!entry->serializeProperties(s))
2639 err = kIOReturnUnsupported;
2640
2641 #if CONFIG_MACF
2642 if (!err && mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
2643 OSObject *propobj = OSUnserializeXML(s->text(), s->getLength());
2644 OSDictionary *filteredprops = NULL;
2645 err = filteredProperties(entry, OSDynamicCast(OSDictionary, propobj), &filteredprops);
2646 if (propobj) propobj->release();
2647
2648 if (!err) {
2649 s->clearText();
2650 if (!filteredprops->serialize(s))
2651 err = kIOReturnUnsupported;
2652 }
2653 if (filteredprops != NULL)
2654 filteredprops->release();
2655 }
2656 #endif /* CONFIG_MACF */
2657
2658 if (!err) {
2659 len = s->getLength();
2660 *propertiesCnt = len;
2661 err = copyoutkdata( s->text(), len, properties );
2662 }
2663
2664 s->release();
2665 return( err );
2666 }
2667
2668 #if CONFIG_MACF
2669
2670 struct GetPropertiesEditorRef
2671 {
2672 kauth_cred_t cred;
2673 IORegistryEntry * entry;
2674 OSCollection * root;
2675 };
2676
2677 static const OSMetaClassBase *
2678 GetPropertiesEditor(void * reference,
2679 OSSerialize * s,
2680 OSCollection * container,
2681 const OSSymbol * name,
2682 const OSMetaClassBase * value)
2683 {
2684 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
2685
2686 if (!ref->root) ref->root = container;
2687 if (ref->root == container)
2688 {
2689 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
2690 {
2691 value = 0;
2692 }
2693 }
2694 if (value) value->retain();
2695 return (value);
2696 }
2697
2698 #endif /* CONFIG_MACF */
2699
2700 /* Routine io_registry_entry_get_properties */
2701 kern_return_t is_io_registry_entry_get_properties_bin(
2702 io_object_t registry_entry,
2703 io_buf_ptr_t *properties,
2704 mach_msg_type_number_t *propertiesCnt)
2705 {
2706 kern_return_t err = kIOReturnSuccess;
2707 vm_size_t len;
2708 OSSerialize * s;
2709 OSSerialize::Editor editor = 0;
2710 void * editRef = 0;
2711
2712 CHECK(IORegistryEntry, registry_entry, entry);
2713
2714 #if CONFIG_MACF
2715 GetPropertiesEditorRef ref;
2716 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
2717 {
2718 editor = &GetPropertiesEditor;
2719 editRef = &ref;
2720 ref.cred = kauth_cred_get();
2721 ref.entry = entry;
2722 ref.root = 0;
2723 }
2724 #endif
2725
2726 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
2727 if (!s) return (kIOReturnNoMemory);
2728
2729 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
2730
2731 if (kIOReturnSuccess == err)
2732 {
2733 len = s->getLength();
2734 *propertiesCnt = len;
2735 err = copyoutkdata(s->text(), len, properties);
2736 }
2737 s->release();
2738
2739 return (err);
2740 }
2741
2742 /* Routine io_registry_entry_get_property_bin */
2743 kern_return_t is_io_registry_entry_get_property_bin(
2744 io_object_t registry_entry,
2745 io_name_t plane,
2746 io_name_t property_name,
2747 uint32_t options,
2748 io_buf_ptr_t *properties,
2749 mach_msg_type_number_t *propertiesCnt )
2750 {
2751 kern_return_t err;
2752 vm_size_t len;
2753 OSObject * obj;
2754 const OSSymbol * sym;
2755
2756 CHECK( IORegistryEntry, registry_entry, entry );
2757
2758 #if CONFIG_MACF
2759 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2760 return kIOReturnNotPermitted;
2761 #endif
2762
2763 if ((kIORegistryIterateRecursively & options) && plane[0])
2764 {
2765 obj = entry->copyProperty(property_name,
2766 IORegistryEntry::getPlane(plane), options);
2767 }
2768 else
2769 {
2770 obj = entry->copyProperty(property_name);
2771 }
2772
2773 if( !obj)
2774 return( kIOReturnNotFound );
2775
2776 sym = OSSymbol::withCString(property_name);
2777 if (sym)
2778 {
2779 if (gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
2780 sym->release();
2781 }
2782
2783 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
2784 if( !s) {
2785 obj->release();
2786 return( kIOReturnNoMemory );
2787 }
2788
2789 if( obj->serialize( s )) {
2790 len = s->getLength();
2791 *propertiesCnt = len;
2792 err = copyoutkdata( s->text(), len, properties );
2793
2794 } else err = kIOReturnUnsupported;
2795
2796 s->release();
2797 obj->release();
2798
2799 return( err );
2800 }
2801
2802 /* Routine io_registry_entry_set_properties */
2803 kern_return_t is_io_registry_entry_set_properties
2804 (
2805 io_object_t registry_entry,
2806 io_buf_ptr_t properties,
2807 mach_msg_type_number_t propertiesCnt,
2808 kern_return_t * result)
2809 {
2810 OSObject * obj;
2811 kern_return_t err;
2812 IOReturn res;
2813 vm_offset_t data;
2814 vm_map_offset_t map_data;
2815
2816 CHECK( IORegistryEntry, registry_entry, entry );
2817
2818 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
2819 return( kIOReturnMessageTooLarge);
2820
2821 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2822 data = CAST_DOWN(vm_offset_t, map_data);
2823
2824 if( KERN_SUCCESS == err) {
2825
2826 // must return success after vm_map_copyout() succeeds
2827 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
2828 vm_deallocate( kernel_map, data, propertiesCnt );
2829
2830 if (!obj)
2831 res = kIOReturnBadArgument;
2832 #if CONFIG_MACF
2833 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
2834 registry_entry, obj))
2835 {
2836 res = kIOReturnNotPermitted;
2837 }
2838 #endif
2839 else
2840 {
2841 res = entry->setProperties( obj );
2842 }
2843
2844 if (obj)
2845 obj->release();
2846 } else
2847 res = err;
2848
2849 *result = res;
2850 return( err );
2851 }
2852
2853 /* Routine io_registry_entry_get_child_iterator */
2854 kern_return_t is_io_registry_entry_get_child_iterator(
2855 io_object_t registry_entry,
2856 io_name_t plane,
2857 io_object_t *iterator )
2858 {
2859 CHECK( IORegistryEntry, registry_entry, entry );
2860
2861 *iterator = entry->getChildIterator(
2862 IORegistryEntry::getPlane( plane ));
2863
2864 return( kIOReturnSuccess );
2865 }
2866
2867 /* Routine io_registry_entry_get_parent_iterator */
2868 kern_return_t is_io_registry_entry_get_parent_iterator(
2869 io_object_t registry_entry,
2870 io_name_t plane,
2871 io_object_t *iterator)
2872 {
2873 CHECK( IORegistryEntry, registry_entry, entry );
2874
2875 *iterator = entry->getParentIterator(
2876 IORegistryEntry::getPlane( plane ));
2877
2878 return( kIOReturnSuccess );
2879 }
2880
2881 /* Routine io_service_get_busy_state */
2882 kern_return_t is_io_service_get_busy_state(
2883 io_object_t _service,
2884 uint32_t *busyState )
2885 {
2886 CHECK( IOService, _service, service );
2887
2888 *busyState = service->getBusyState();
2889
2890 return( kIOReturnSuccess );
2891 }
2892
2893 /* Routine io_service_get_state */
2894 kern_return_t is_io_service_get_state(
2895 io_object_t _service,
2896 uint64_t *state,
2897 uint32_t *busy_state,
2898 uint64_t *accumulated_busy_time )
2899 {
2900 CHECK( IOService, _service, service );
2901
2902 *state = service->getState();
2903 *busy_state = service->getBusyState();
2904 *accumulated_busy_time = service->getAccumulatedBusyTime();
2905
2906 return( kIOReturnSuccess );
2907 }
2908
2909 /* Routine io_service_wait_quiet */
2910 kern_return_t is_io_service_wait_quiet(
2911 io_object_t _service,
2912 mach_timespec_t wait_time )
2913 {
2914 uint64_t timeoutNS;
2915
2916 CHECK( IOService, _service, service );
2917
2918 timeoutNS = wait_time.tv_sec;
2919 timeoutNS *= kSecondScale;
2920 timeoutNS += wait_time.tv_nsec;
2921
2922 return( service->waitQuiet(timeoutNS) );
2923 }
2924
2925 /* Routine io_service_request_probe */
2926 kern_return_t is_io_service_request_probe(
2927 io_object_t _service,
2928 uint32_t options )
2929 {
2930 CHECK( IOService, _service, service );
2931
2932 return( service->requestProbe( options ));
2933 }
2934
2935 /* Routine io_service_get_authorization_id */
2936 kern_return_t is_io_service_get_authorization_id(
2937 io_object_t _service,
2938 uint64_t *authorization_id )
2939 {
2940 kern_return_t kr;
2941
2942 CHECK( IOService, _service, service );
2943
2944 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
2945 kIOClientPrivilegeAdministrator );
2946 if( kIOReturnSuccess != kr)
2947 return( kr );
2948
2949 *authorization_id = service->getAuthorizationID();
2950
2951 return( kr );
2952 }
2953
2954 /* Routine io_service_set_authorization_id */
2955 kern_return_t is_io_service_set_authorization_id(
2956 io_object_t _service,
2957 uint64_t authorization_id )
2958 {
2959 CHECK( IOService, _service, service );
2960
2961 return( service->setAuthorizationID( authorization_id ) );
2962 }
2963
2964 /* Routine io_service_open_ndr */
2965 kern_return_t is_io_service_open_extended(
2966 io_object_t _service,
2967 task_t owningTask,
2968 uint32_t connect_type,
2969 NDR_record_t ndr,
2970 io_buf_ptr_t properties,
2971 mach_msg_type_number_t propertiesCnt,
2972 kern_return_t * result,
2973 io_object_t *connection )
2974 {
2975 IOUserClient * client = 0;
2976 kern_return_t err = KERN_SUCCESS;
2977 IOReturn res = kIOReturnSuccess;
2978 OSDictionary * propertiesDict = 0;
2979 bool crossEndian;
2980 bool disallowAccess;
2981
2982 CHECK( IOService, _service, service );
2983
2984 do
2985 {
2986 if (properties)
2987 {
2988 OSObject * obj;
2989 vm_offset_t data;
2990 vm_map_offset_t map_data;
2991
2992 if( propertiesCnt > sizeof(io_struct_inband_t))
2993 return( kIOReturnMessageTooLarge);
2994
2995 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2996 res = err;
2997 data = CAST_DOWN(vm_offset_t, map_data);
2998 if (KERN_SUCCESS == err)
2999 {
3000 // must return success after vm_map_copyout() succeeds
3001 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3002 vm_deallocate( kernel_map, data, propertiesCnt );
3003 propertiesDict = OSDynamicCast(OSDictionary, obj);
3004 if (!propertiesDict)
3005 {
3006 res = kIOReturnBadArgument;
3007 if (obj)
3008 obj->release();
3009 }
3010 }
3011 if (kIOReturnSuccess != res)
3012 break;
3013 }
3014
3015 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3016 if (crossEndian)
3017 {
3018 if (!propertiesDict)
3019 propertiesDict = OSDictionary::withCapacity(4);
3020 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3021 if (data)
3022 {
3023 if (propertiesDict)
3024 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3025 data->release();
3026 }
3027 }
3028
3029 res = service->newUserClient( owningTask, (void *) owningTask,
3030 connect_type, propertiesDict, &client );
3031
3032 if (propertiesDict)
3033 propertiesDict->release();
3034
3035 if (res == kIOReturnSuccess)
3036 {
3037 assert( OSDynamicCast(IOUserClient, client) );
3038
3039 disallowAccess = (crossEndian
3040 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3041 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3042 if (disallowAccess) res = kIOReturnUnsupported;
3043 #if CONFIG_MACF
3044 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3045 res = kIOReturnNotPermitted;
3046 #endif
3047 if (kIOReturnSuccess != res)
3048 {
3049 IOStatisticsClientCall();
3050 client->clientClose();
3051 client->release();
3052 client = 0;
3053 break;
3054 }
3055 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3056 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3057 if (creatorName)
3058 {
3059 client->setProperty(kIOUserClientCreatorKey, creatorName);
3060 creatorName->release();
3061 }
3062 client->setTerminateDefer(service, false);
3063 }
3064 }
3065 while (false);
3066
3067 *connection = client;
3068 *result = res;
3069
3070 return (err);
3071 }
3072
3073 /* Routine io_service_close */
3074 kern_return_t is_io_service_close(
3075 io_object_t connection )
3076 {
3077 OSSet * mappings;
3078 if ((mappings = OSDynamicCast(OSSet, connection)))
3079 return( kIOReturnSuccess );
3080
3081 CHECK( IOUserClient, connection, client );
3082
3083 IOStatisticsClientCall();
3084 client->clientClose();
3085
3086 return( kIOReturnSuccess );
3087 }
3088
3089 /* Routine io_connect_get_service */
3090 kern_return_t is_io_connect_get_service(
3091 io_object_t connection,
3092 io_object_t *service )
3093 {
3094 IOService * theService;
3095
3096 CHECK( IOUserClient, connection, client );
3097
3098 theService = client->getService();
3099 if( theService)
3100 theService->retain();
3101
3102 *service = theService;
3103
3104 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3105 }
3106
3107 /* Routine io_connect_set_notification_port */
3108 kern_return_t is_io_connect_set_notification_port(
3109 io_object_t connection,
3110 uint32_t notification_type,
3111 mach_port_t port,
3112 uint32_t reference)
3113 {
3114 CHECK( IOUserClient, connection, client );
3115
3116 IOStatisticsClientCall();
3117 return( client->registerNotificationPort( port, notification_type,
3118 (io_user_reference_t) reference ));
3119 }
3120
3121 /* Routine io_connect_set_notification_port */
3122 kern_return_t is_io_connect_set_notification_port_64(
3123 io_object_t connection,
3124 uint32_t notification_type,
3125 mach_port_t port,
3126 io_user_reference_t reference)
3127 {
3128 CHECK( IOUserClient, connection, client );
3129
3130 IOStatisticsClientCall();
3131 return( client->registerNotificationPort( port, notification_type,
3132 reference ));
3133 }
3134
3135 /* Routine io_connect_map_memory_into_task */
3136 kern_return_t is_io_connect_map_memory_into_task
3137 (
3138 io_connect_t connection,
3139 uint32_t memory_type,
3140 task_t into_task,
3141 mach_vm_address_t *address,
3142 mach_vm_size_t *size,
3143 uint32_t flags
3144 )
3145 {
3146 IOReturn err;
3147 IOMemoryMap * map;
3148
3149 CHECK( IOUserClient, connection, client );
3150
3151 IOStatisticsClientCall();
3152 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3153
3154 if( map) {
3155 *address = map->getAddress();
3156 if( size)
3157 *size = map->getSize();
3158
3159 if( client->sharedInstance
3160 || (into_task != current_task())) {
3161 // push a name out to the task owning the map,
3162 // so we can clean up maps
3163 mach_port_name_t name __unused =
3164 IOMachPort::makeSendRightForTask(
3165 into_task, map, IKOT_IOKIT_OBJECT );
3166
3167 } else {
3168 // keep it with the user client
3169 IOLockLock( gIOObjectPortLock);
3170 if( 0 == client->mappings)
3171 client->mappings = OSSet::withCapacity(2);
3172 if( client->mappings)
3173 client->mappings->setObject( map);
3174 IOLockUnlock( gIOObjectPortLock);
3175 map->release();
3176 }
3177 err = kIOReturnSuccess;
3178
3179 } else
3180 err = kIOReturnBadArgument;
3181
3182 return( err );
3183 }
3184
3185 /* Routine is_io_connect_map_memory */
3186 kern_return_t is_io_connect_map_memory(
3187 io_object_t connect,
3188 uint32_t type,
3189 task_t task,
3190 uint32_t * mapAddr,
3191 uint32_t * mapSize,
3192 uint32_t flags )
3193 {
3194 IOReturn err;
3195 mach_vm_address_t address;
3196 mach_vm_size_t size;
3197
3198 address = SCALAR64(*mapAddr);
3199 size = SCALAR64(*mapSize);
3200
3201 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3202
3203 *mapAddr = SCALAR32(address);
3204 *mapSize = SCALAR32(size);
3205
3206 return (err);
3207 }
3208
3209 } /* extern "C" */
3210
3211 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3212 {
3213 OSIterator * iter;
3214 IOMemoryMap * map = 0;
3215
3216 IOLockLock(gIOObjectPortLock);
3217
3218 iter = OSCollectionIterator::withCollection(mappings);
3219 if(iter)
3220 {
3221 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3222 {
3223 if(mem == map->getMemoryDescriptor())
3224 {
3225 map->retain();
3226 mappings->removeObject(map);
3227 break;
3228 }
3229 }
3230 iter->release();
3231 }
3232
3233 IOLockUnlock(gIOObjectPortLock);
3234
3235 return (map);
3236 }
3237
3238 extern "C" {
3239
3240 /* Routine io_connect_unmap_memory_from_task */
3241 kern_return_t is_io_connect_unmap_memory_from_task
3242 (
3243 io_connect_t connection,
3244 uint32_t memory_type,
3245 task_t from_task,
3246 mach_vm_address_t address)
3247 {
3248 IOReturn err;
3249 IOOptionBits options = 0;
3250 IOMemoryDescriptor * memory;
3251 IOMemoryMap * map;
3252
3253 CHECK( IOUserClient, connection, client );
3254
3255 IOStatisticsClientCall();
3256 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3257
3258 if( memory && (kIOReturnSuccess == err)) {
3259
3260 options = (options & ~kIOMapUserOptionsMask)
3261 | kIOMapAnywhere | kIOMapReference;
3262
3263 map = memory->createMappingInTask( from_task, address, options );
3264 memory->release();
3265 if( map)
3266 {
3267 IOLockLock( gIOObjectPortLock);
3268 if( client->mappings)
3269 client->mappings->removeObject( map);
3270 IOLockUnlock( gIOObjectPortLock);
3271
3272 mach_port_name_t name = 0;
3273 if (from_task != current_task())
3274 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3275 if (name)
3276 {
3277 map->userClientUnmap();
3278 err = iokit_mod_send_right( from_task, name, -2 );
3279 err = kIOReturnSuccess;
3280 }
3281 else
3282 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3283 if (from_task == current_task())
3284 map->release();
3285 }
3286 else
3287 err = kIOReturnBadArgument;
3288 }
3289
3290 return( err );
3291 }
3292
3293 kern_return_t is_io_connect_unmap_memory(
3294 io_object_t connect,
3295 uint32_t type,
3296 task_t task,
3297 uint32_t mapAddr )
3298 {
3299 IOReturn err;
3300 mach_vm_address_t address;
3301
3302 address = SCALAR64(mapAddr);
3303
3304 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3305
3306 return (err);
3307 }
3308
3309
3310 /* Routine io_connect_add_client */
3311 kern_return_t is_io_connect_add_client(
3312 io_object_t connection,
3313 io_object_t connect_to)
3314 {
3315 CHECK( IOUserClient, connection, client );
3316 CHECK( IOUserClient, connect_to, to );
3317
3318 IOStatisticsClientCall();
3319 return( client->connectClient( to ) );
3320 }
3321
3322
3323 /* Routine io_connect_set_properties */
3324 kern_return_t is_io_connect_set_properties(
3325 io_object_t connection,
3326 io_buf_ptr_t properties,
3327 mach_msg_type_number_t propertiesCnt,
3328 kern_return_t * result)
3329 {
3330 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3331 }
3332
3333 /* Routine io_user_client_method */
3334 kern_return_t is_io_connect_method_var_output
3335 (
3336 io_connect_t connection,
3337 uint32_t selector,
3338 io_scalar_inband64_t scalar_input,
3339 mach_msg_type_number_t scalar_inputCnt,
3340 io_struct_inband_t inband_input,
3341 mach_msg_type_number_t inband_inputCnt,
3342 mach_vm_address_t ool_input,
3343 mach_vm_size_t ool_input_size,
3344 io_struct_inband_t inband_output,
3345 mach_msg_type_number_t *inband_outputCnt,
3346 io_scalar_inband64_t scalar_output,
3347 mach_msg_type_number_t *scalar_outputCnt,
3348 io_buf_ptr_t *var_output,
3349 mach_msg_type_number_t *var_outputCnt
3350 )
3351 {
3352 CHECK( IOUserClient, connection, client );
3353
3354 IOExternalMethodArguments args;
3355 IOReturn ret;
3356 IOMemoryDescriptor * inputMD = 0;
3357 OSObject * structureVariableOutputData = 0;
3358
3359 bzero(&args.__reserved[0], sizeof(args.__reserved));
3360 args.version = kIOExternalMethodArgumentsCurrentVersion;
3361
3362 args.selector = selector;
3363
3364 args.asyncWakePort = MACH_PORT_NULL;
3365 args.asyncReference = 0;
3366 args.asyncReferenceCount = 0;
3367 args.structureVariableOutputData = &structureVariableOutputData;
3368
3369 args.scalarInput = scalar_input;
3370 args.scalarInputCount = scalar_inputCnt;
3371 args.structureInput = inband_input;
3372 args.structureInputSize = inband_inputCnt;
3373
3374 if (ool_input)
3375 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3376 kIODirectionOut, current_task());
3377
3378 args.structureInputDescriptor = inputMD;
3379
3380 args.scalarOutput = scalar_output;
3381 args.scalarOutputCount = *scalar_outputCnt;
3382 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3383 args.structureOutput = inband_output;
3384 args.structureOutputSize = *inband_outputCnt;
3385 args.structureOutputDescriptor = NULL;
3386 args.structureOutputDescriptorSize = 0;
3387
3388 IOStatisticsClientCall();
3389 ret = client->externalMethod( selector, &args );
3390
3391 *scalar_outputCnt = args.scalarOutputCount;
3392 *inband_outputCnt = args.structureOutputSize;
3393
3394 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3395 {
3396 OSSerialize * serialize;
3397 OSData * data;
3398 vm_size_t len;
3399
3400 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3401 {
3402 len = serialize->getLength();
3403 *var_outputCnt = len;
3404 ret = copyoutkdata(serialize->text(), len, var_output);
3405 }
3406 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3407 {
3408 len = data->getLength();
3409 *var_outputCnt = len;
3410 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3411 }
3412 else
3413 {
3414 ret = kIOReturnUnderrun;
3415 }
3416 }
3417
3418 if (inputMD)
3419 inputMD->release();
3420 if (structureVariableOutputData)
3421 structureVariableOutputData->release();
3422
3423 return (ret);
3424 }
3425
3426 /* Routine io_user_client_method */
3427 kern_return_t is_io_connect_method
3428 (
3429 io_connect_t connection,
3430 uint32_t selector,
3431 io_scalar_inband64_t scalar_input,
3432 mach_msg_type_number_t scalar_inputCnt,
3433 io_struct_inband_t inband_input,
3434 mach_msg_type_number_t inband_inputCnt,
3435 mach_vm_address_t ool_input,
3436 mach_vm_size_t ool_input_size,
3437 io_struct_inband_t inband_output,
3438 mach_msg_type_number_t *inband_outputCnt,
3439 io_scalar_inband64_t scalar_output,
3440 mach_msg_type_number_t *scalar_outputCnt,
3441 mach_vm_address_t ool_output,
3442 mach_vm_size_t *ool_output_size
3443 )
3444 {
3445 CHECK( IOUserClient, connection, client );
3446
3447 IOExternalMethodArguments args;
3448 IOReturn ret;
3449 IOMemoryDescriptor * inputMD = 0;
3450 IOMemoryDescriptor * outputMD = 0;
3451
3452 bzero(&args.__reserved[0], sizeof(args.__reserved));
3453 args.version = kIOExternalMethodArgumentsCurrentVersion;
3454
3455 args.selector = selector;
3456
3457 args.asyncWakePort = MACH_PORT_NULL;
3458 args.asyncReference = 0;
3459 args.asyncReferenceCount = 0;
3460 args.structureVariableOutputData = 0;
3461
3462 args.scalarInput = scalar_input;
3463 args.scalarInputCount = scalar_inputCnt;
3464 args.structureInput = inband_input;
3465 args.structureInputSize = inband_inputCnt;
3466
3467 if (ool_input)
3468 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3469 kIODirectionOut, current_task());
3470
3471 args.structureInputDescriptor = inputMD;
3472
3473 args.scalarOutput = scalar_output;
3474 args.scalarOutputCount = *scalar_outputCnt;
3475 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3476 args.structureOutput = inband_output;
3477 args.structureOutputSize = *inband_outputCnt;
3478
3479 if (ool_output && ool_output_size)
3480 {
3481 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3482 kIODirectionIn, current_task());
3483 }
3484
3485 args.structureOutputDescriptor = outputMD;
3486 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3487
3488 IOStatisticsClientCall();
3489 ret = client->externalMethod( selector, &args );
3490
3491 *scalar_outputCnt = args.scalarOutputCount;
3492 *inband_outputCnt = args.structureOutputSize;
3493 *ool_output_size = args.structureOutputDescriptorSize;
3494
3495 if (inputMD)
3496 inputMD->release();
3497 if (outputMD)
3498 outputMD->release();
3499
3500 return (ret);
3501 }
3502
3503 /* Routine io_async_user_client_method */
3504 kern_return_t is_io_connect_async_method
3505 (
3506 io_connect_t connection,
3507 mach_port_t wake_port,
3508 io_async_ref64_t reference,
3509 mach_msg_type_number_t referenceCnt,
3510 uint32_t selector,
3511 io_scalar_inband64_t scalar_input,
3512 mach_msg_type_number_t scalar_inputCnt,
3513 io_struct_inband_t inband_input,
3514 mach_msg_type_number_t inband_inputCnt,
3515 mach_vm_address_t ool_input,
3516 mach_vm_size_t ool_input_size,
3517 io_struct_inband_t inband_output,
3518 mach_msg_type_number_t *inband_outputCnt,
3519 io_scalar_inband64_t scalar_output,
3520 mach_msg_type_number_t *scalar_outputCnt,
3521 mach_vm_address_t ool_output,
3522 mach_vm_size_t * ool_output_size
3523 )
3524 {
3525 CHECK( IOUserClient, connection, client );
3526
3527 IOExternalMethodArguments args;
3528 IOReturn ret;
3529 IOMemoryDescriptor * inputMD = 0;
3530 IOMemoryDescriptor * outputMD = 0;
3531
3532 bzero(&args.__reserved[0], sizeof(args.__reserved));
3533 args.version = kIOExternalMethodArgumentsCurrentVersion;
3534
3535 reference[0] = (io_user_reference_t) wake_port;
3536 if (vm_map_is_64bit(get_task_map(current_task())))
3537 reference[0] |= kIOUCAsync64Flag;
3538
3539 args.selector = selector;
3540
3541 args.asyncWakePort = wake_port;
3542 args.asyncReference = reference;
3543 args.asyncReferenceCount = referenceCnt;
3544
3545 args.scalarInput = scalar_input;
3546 args.scalarInputCount = scalar_inputCnt;
3547 args.structureInput = inband_input;
3548 args.structureInputSize = inband_inputCnt;
3549
3550 if (ool_input)
3551 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3552 kIODirectionOut, current_task());
3553
3554 args.structureInputDescriptor = inputMD;
3555
3556 args.scalarOutput = scalar_output;
3557 args.scalarOutputCount = *scalar_outputCnt;
3558 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3559 args.structureOutput = inband_output;
3560 args.structureOutputSize = *inband_outputCnt;
3561
3562 if (ool_output)
3563 {
3564 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3565 kIODirectionIn, current_task());
3566 }
3567
3568 args.structureOutputDescriptor = outputMD;
3569 args.structureOutputDescriptorSize = *ool_output_size;
3570
3571 IOStatisticsClientCall();
3572 ret = client->externalMethod( selector, &args );
3573
3574 *inband_outputCnt = args.structureOutputSize;
3575 *ool_output_size = args.structureOutputDescriptorSize;
3576
3577 if (inputMD)
3578 inputMD->release();
3579 if (outputMD)
3580 outputMD->release();
3581
3582 return (ret);
3583 }
3584
3585 /* Routine io_connect_method_scalarI_scalarO */
3586 kern_return_t is_io_connect_method_scalarI_scalarO(
3587 io_object_t connect,
3588 uint32_t index,
3589 io_scalar_inband_t input,
3590 mach_msg_type_number_t inputCount,
3591 io_scalar_inband_t output,
3592 mach_msg_type_number_t * outputCount )
3593 {
3594 IOReturn err;
3595 uint32_t i;
3596 io_scalar_inband64_t _input;
3597 io_scalar_inband64_t _output;
3598
3599 mach_msg_type_number_t struct_outputCnt = 0;
3600 mach_vm_size_t ool_output_size = 0;
3601
3602 bzero(&_output[0], sizeof(_output));
3603 for (i = 0; i < inputCount; i++)
3604 _input[i] = SCALAR64(input[i]);
3605
3606 err = is_io_connect_method(connect, index,
3607 _input, inputCount,
3608 NULL, 0,
3609 0, 0,
3610 NULL, &struct_outputCnt,
3611 _output, outputCount,
3612 0, &ool_output_size);
3613
3614 for (i = 0; i < *outputCount; i++)
3615 output[i] = SCALAR32(_output[i]);
3616
3617 return (err);
3618 }
3619
3620 kern_return_t shim_io_connect_method_scalarI_scalarO(
3621 IOExternalMethod * method,
3622 IOService * object,
3623 const io_user_scalar_t * input,
3624 mach_msg_type_number_t inputCount,
3625 io_user_scalar_t * output,
3626 mach_msg_type_number_t * outputCount )
3627 {
3628 IOMethod func;
3629 io_scalar_inband_t _output;
3630 IOReturn err;
3631 err = kIOReturnBadArgument;
3632
3633 bzero(&_output[0], sizeof(_output));
3634 do {
3635
3636 if( inputCount != method->count0)
3637 {
3638 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3639 continue;
3640 }
3641 if( *outputCount != method->count1)
3642 {
3643 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3644 continue;
3645 }
3646
3647 func = method->func;
3648
3649 switch( inputCount) {
3650
3651 case 6:
3652 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3653 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3654 break;
3655 case 5:
3656 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3657 ARG32(input[3]), ARG32(input[4]),
3658 &_output[0] );
3659 break;
3660 case 4:
3661 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3662 ARG32(input[3]),
3663 &_output[0], &_output[1] );
3664 break;
3665 case 3:
3666 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3667 &_output[0], &_output[1], &_output[2] );
3668 break;
3669 case 2:
3670 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3671 &_output[0], &_output[1], &_output[2],
3672 &_output[3] );
3673 break;
3674 case 1:
3675 err = (object->*func)( ARG32(input[0]),
3676 &_output[0], &_output[1], &_output[2],
3677 &_output[3], &_output[4] );
3678 break;
3679 case 0:
3680 err = (object->*func)( &_output[0], &_output[1], &_output[2],
3681 &_output[3], &_output[4], &_output[5] );
3682 break;
3683
3684 default:
3685 IOLog("%s: Bad method table\n", object->getName());
3686 }
3687 }
3688 while( false);
3689
3690 uint32_t i;
3691 for (i = 0; i < *outputCount; i++)
3692 output[i] = SCALAR32(_output[i]);
3693
3694 return( err);
3695 }
3696
3697 /* Routine io_async_method_scalarI_scalarO */
3698 kern_return_t is_io_async_method_scalarI_scalarO(
3699 io_object_t connect,
3700 mach_port_t wake_port,
3701 io_async_ref_t reference,
3702 mach_msg_type_number_t referenceCnt,
3703 uint32_t index,
3704 io_scalar_inband_t input,
3705 mach_msg_type_number_t inputCount,
3706 io_scalar_inband_t output,
3707 mach_msg_type_number_t * outputCount )
3708 {
3709 IOReturn err;
3710 uint32_t i;
3711 io_scalar_inband64_t _input;
3712 io_scalar_inband64_t _output;
3713 io_async_ref64_t _reference;
3714
3715 bzero(&_output[0], sizeof(_output));
3716 for (i = 0; i < referenceCnt; i++)
3717 _reference[i] = REF64(reference[i]);
3718
3719 mach_msg_type_number_t struct_outputCnt = 0;
3720 mach_vm_size_t ool_output_size = 0;
3721
3722 for (i = 0; i < inputCount; i++)
3723 _input[i] = SCALAR64(input[i]);
3724
3725 err = is_io_connect_async_method(connect,
3726 wake_port, _reference, referenceCnt,
3727 index,
3728 _input, inputCount,
3729 NULL, 0,
3730 0, 0,
3731 NULL, &struct_outputCnt,
3732 _output, outputCount,
3733 0, &ool_output_size);
3734
3735 for (i = 0; i < *outputCount; i++)
3736 output[i] = SCALAR32(_output[i]);
3737
3738 return (err);
3739 }
3740 /* Routine io_async_method_scalarI_structureO */
3741 kern_return_t is_io_async_method_scalarI_structureO(
3742 io_object_t connect,
3743 mach_port_t wake_port,
3744 io_async_ref_t reference,
3745 mach_msg_type_number_t referenceCnt,
3746 uint32_t index,
3747 io_scalar_inband_t input,
3748 mach_msg_type_number_t inputCount,
3749 io_struct_inband_t output,
3750 mach_msg_type_number_t * outputCount )
3751 {
3752 uint32_t i;
3753 io_scalar_inband64_t _input;
3754 io_async_ref64_t _reference;
3755
3756 for (i = 0; i < referenceCnt; i++)
3757 _reference[i] = REF64(reference[i]);
3758
3759 mach_msg_type_number_t scalar_outputCnt = 0;
3760 mach_vm_size_t ool_output_size = 0;
3761
3762 for (i = 0; i < inputCount; i++)
3763 _input[i] = SCALAR64(input[i]);
3764
3765 return (is_io_connect_async_method(connect,
3766 wake_port, _reference, referenceCnt,
3767 index,
3768 _input, inputCount,
3769 NULL, 0,
3770 0, 0,
3771 output, outputCount,
3772 NULL, &scalar_outputCnt,
3773 0, &ool_output_size));
3774 }
3775
3776 /* Routine io_async_method_scalarI_structureI */
3777 kern_return_t is_io_async_method_scalarI_structureI(
3778 io_connect_t connect,
3779 mach_port_t wake_port,
3780 io_async_ref_t reference,
3781 mach_msg_type_number_t referenceCnt,
3782 uint32_t index,
3783 io_scalar_inband_t input,
3784 mach_msg_type_number_t inputCount,
3785 io_struct_inband_t inputStruct,
3786 mach_msg_type_number_t inputStructCount )
3787 {
3788 uint32_t i;
3789 io_scalar_inband64_t _input;
3790 io_async_ref64_t _reference;
3791
3792 for (i = 0; i < referenceCnt; i++)
3793 _reference[i] = REF64(reference[i]);
3794
3795 mach_msg_type_number_t scalar_outputCnt = 0;
3796 mach_msg_type_number_t inband_outputCnt = 0;
3797 mach_vm_size_t ool_output_size = 0;
3798
3799 for (i = 0; i < inputCount; i++)
3800 _input[i] = SCALAR64(input[i]);
3801
3802 return (is_io_connect_async_method(connect,
3803 wake_port, _reference, referenceCnt,
3804 index,
3805 _input, inputCount,
3806 inputStruct, inputStructCount,
3807 0, 0,
3808 NULL, &inband_outputCnt,
3809 NULL, &scalar_outputCnt,
3810 0, &ool_output_size));
3811 }
3812
3813 /* Routine io_async_method_structureI_structureO */
3814 kern_return_t is_io_async_method_structureI_structureO(
3815 io_object_t connect,
3816 mach_port_t wake_port,
3817 io_async_ref_t reference,
3818 mach_msg_type_number_t referenceCnt,
3819 uint32_t index,
3820 io_struct_inband_t input,
3821 mach_msg_type_number_t inputCount,
3822 io_struct_inband_t output,
3823 mach_msg_type_number_t * outputCount )
3824 {
3825 uint32_t i;
3826 mach_msg_type_number_t scalar_outputCnt = 0;
3827 mach_vm_size_t ool_output_size = 0;
3828 io_async_ref64_t _reference;
3829
3830 for (i = 0; i < referenceCnt; i++)
3831 _reference[i] = REF64(reference[i]);
3832
3833 return (is_io_connect_async_method(connect,
3834 wake_port, _reference, referenceCnt,
3835 index,
3836 NULL, 0,
3837 input, inputCount,
3838 0, 0,
3839 output, outputCount,
3840 NULL, &scalar_outputCnt,
3841 0, &ool_output_size));
3842 }
3843
3844
3845 kern_return_t shim_io_async_method_scalarI_scalarO(
3846 IOExternalAsyncMethod * method,
3847 IOService * object,
3848 mach_port_t asyncWakePort,
3849 io_user_reference_t * asyncReference,
3850 uint32_t asyncReferenceCount,
3851 const io_user_scalar_t * input,
3852 mach_msg_type_number_t inputCount,
3853 io_user_scalar_t * output,
3854 mach_msg_type_number_t * outputCount )
3855 {
3856 IOAsyncMethod func;
3857 uint32_t i;
3858 io_scalar_inband_t _output;
3859 IOReturn err;
3860 io_async_ref_t reference;
3861
3862 bzero(&_output[0], sizeof(_output));
3863 for (i = 0; i < asyncReferenceCount; i++)
3864 reference[i] = REF32(asyncReference[i]);
3865
3866 err = kIOReturnBadArgument;
3867
3868 do {
3869
3870 if( inputCount != method->count0)
3871 {
3872 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3873 continue;
3874 }
3875 if( *outputCount != method->count1)
3876 {
3877 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3878 continue;
3879 }
3880
3881 func = method->func;
3882
3883 switch( inputCount) {
3884
3885 case 6:
3886 err = (object->*func)( reference,
3887 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3888 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3889 break;
3890 case 5:
3891 err = (object->*func)( reference,
3892 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3893 ARG32(input[3]), ARG32(input[4]),
3894 &_output[0] );
3895 break;
3896 case 4:
3897 err = (object->*func)( reference,
3898 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3899 ARG32(input[3]),
3900 &_output[0], &_output[1] );
3901 break;
3902 case 3:
3903 err = (object->*func)( reference,
3904 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3905 &_output[0], &_output[1], &_output[2] );
3906 break;
3907 case 2:
3908 err = (object->*func)( reference,
3909 ARG32(input[0]), ARG32(input[1]),
3910 &_output[0], &_output[1], &_output[2],
3911 &_output[3] );
3912 break;
3913 case 1:
3914 err = (object->*func)( reference,
3915 ARG32(input[0]),
3916 &_output[0], &_output[1], &_output[2],
3917 &_output[3], &_output[4] );
3918 break;
3919 case 0:
3920 err = (object->*func)( reference,
3921 &_output[0], &_output[1], &_output[2],
3922 &_output[3], &_output[4], &_output[5] );
3923 break;
3924
3925 default:
3926 IOLog("%s: Bad method table\n", object->getName());
3927 }
3928 }
3929 while( false);
3930
3931 for (i = 0; i < *outputCount; i++)
3932 output[i] = SCALAR32(_output[i]);
3933
3934 return( err);
3935 }
3936
3937
3938 /* Routine io_connect_method_scalarI_structureO */
3939 kern_return_t is_io_connect_method_scalarI_structureO(
3940 io_object_t connect,
3941 uint32_t index,
3942 io_scalar_inband_t input,
3943 mach_msg_type_number_t inputCount,
3944 io_struct_inband_t output,
3945 mach_msg_type_number_t * outputCount )
3946 {
3947 uint32_t i;
3948 io_scalar_inband64_t _input;
3949
3950 mach_msg_type_number_t scalar_outputCnt = 0;
3951 mach_vm_size_t ool_output_size = 0;
3952
3953 for (i = 0; i < inputCount; i++)
3954 _input[i] = SCALAR64(input[i]);
3955
3956 return (is_io_connect_method(connect, index,
3957 _input, inputCount,
3958 NULL, 0,
3959 0, 0,
3960 output, outputCount,
3961 NULL, &scalar_outputCnt,
3962 0, &ool_output_size));
3963 }
3964
3965 kern_return_t shim_io_connect_method_scalarI_structureO(
3966
3967 IOExternalMethod * method,
3968 IOService * object,
3969 const io_user_scalar_t * input,
3970 mach_msg_type_number_t inputCount,
3971 io_struct_inband_t output,
3972 IOByteCount * outputCount )
3973 {
3974 IOMethod func;
3975 IOReturn err;
3976
3977 err = kIOReturnBadArgument;
3978
3979 do {
3980 if( inputCount != method->count0)
3981 {
3982 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3983 continue;
3984 }
3985 if( (kIOUCVariableStructureSize != method->count1)
3986 && (*outputCount != method->count1))
3987 {
3988 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3989 continue;
3990 }
3991
3992 func = method->func;
3993
3994 switch( inputCount) {
3995
3996 case 5:
3997 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3998 ARG32(input[3]), ARG32(input[4]),
3999 output );
4000 break;
4001 case 4:
4002 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4003 ARG32(input[3]),
4004 output, (void *)outputCount );
4005 break;
4006 case 3:
4007 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4008 output, (void *)outputCount, 0 );
4009 break;
4010 case 2:
4011 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4012 output, (void *)outputCount, 0, 0 );
4013 break;
4014 case 1:
4015 err = (object->*func)( ARG32(input[0]),
4016 output, (void *)outputCount, 0, 0, 0 );
4017 break;
4018 case 0:
4019 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4020 break;
4021
4022 default:
4023 IOLog("%s: Bad method table\n", object->getName());
4024 }
4025 }
4026 while( false);
4027
4028 return( err);
4029 }
4030
4031
4032 kern_return_t shim_io_async_method_scalarI_structureO(
4033 IOExternalAsyncMethod * method,
4034 IOService * object,
4035 mach_port_t asyncWakePort,
4036 io_user_reference_t * asyncReference,
4037 uint32_t asyncReferenceCount,
4038 const io_user_scalar_t * input,
4039 mach_msg_type_number_t inputCount,
4040 io_struct_inband_t output,
4041 mach_msg_type_number_t * outputCount )
4042 {
4043 IOAsyncMethod func;
4044 uint32_t i;
4045 IOReturn err;
4046 io_async_ref_t reference;
4047
4048 for (i = 0; i < asyncReferenceCount; i++)
4049 reference[i] = REF32(asyncReference[i]);
4050
4051 err = kIOReturnBadArgument;
4052 do {
4053 if( inputCount != method->count0)
4054 {
4055 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4056 continue;
4057 }
4058 if( (kIOUCVariableStructureSize != method->count1)
4059 && (*outputCount != method->count1))
4060 {
4061 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4062 continue;
4063 }
4064
4065 func = method->func;
4066
4067 switch( inputCount) {
4068
4069 case 5:
4070 err = (object->*func)( reference,
4071 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4072 ARG32(input[3]), ARG32(input[4]),
4073 output );
4074 break;
4075 case 4:
4076 err = (object->*func)( reference,
4077 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4078 ARG32(input[3]),
4079 output, (void *)outputCount );
4080 break;
4081 case 3:
4082 err = (object->*func)( reference,
4083 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4084 output, (void *)outputCount, 0 );
4085 break;
4086 case 2:
4087 err = (object->*func)( reference,
4088 ARG32(input[0]), ARG32(input[1]),
4089 output, (void *)outputCount, 0, 0 );
4090 break;
4091 case 1:
4092 err = (object->*func)( reference,
4093 ARG32(input[0]),
4094 output, (void *)outputCount, 0, 0, 0 );
4095 break;
4096 case 0:
4097 err = (object->*func)( reference,
4098 output, (void *)outputCount, 0, 0, 0, 0 );
4099 break;
4100
4101 default:
4102 IOLog("%s: Bad method table\n", object->getName());
4103 }
4104 }
4105 while( false);
4106
4107 return( err);
4108 }
4109
4110 /* Routine io_connect_method_scalarI_structureI */
4111 kern_return_t is_io_connect_method_scalarI_structureI(
4112 io_connect_t connect,
4113 uint32_t index,
4114 io_scalar_inband_t input,
4115 mach_msg_type_number_t inputCount,
4116 io_struct_inband_t inputStruct,
4117 mach_msg_type_number_t inputStructCount )
4118 {
4119 uint32_t i;
4120 io_scalar_inband64_t _input;
4121
4122 mach_msg_type_number_t scalar_outputCnt = 0;
4123 mach_msg_type_number_t inband_outputCnt = 0;
4124 mach_vm_size_t ool_output_size = 0;
4125
4126 for (i = 0; i < inputCount; i++)
4127 _input[i] = SCALAR64(input[i]);
4128
4129 return (is_io_connect_method(connect, index,
4130 _input, inputCount,
4131 inputStruct, inputStructCount,
4132 0, 0,
4133 NULL, &inband_outputCnt,
4134 NULL, &scalar_outputCnt,
4135 0, &ool_output_size));
4136 }
4137
4138 kern_return_t shim_io_connect_method_scalarI_structureI(
4139 IOExternalMethod * method,
4140 IOService * object,
4141 const io_user_scalar_t * input,
4142 mach_msg_type_number_t inputCount,
4143 io_struct_inband_t inputStruct,
4144 mach_msg_type_number_t inputStructCount )
4145 {
4146 IOMethod func;
4147 IOReturn err = kIOReturnBadArgument;
4148
4149 do
4150 {
4151 if( (kIOUCVariableStructureSize != method->count0)
4152 && (inputCount != method->count0))
4153 {
4154 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4155 continue;
4156 }
4157 if( (kIOUCVariableStructureSize != method->count1)
4158 && (inputStructCount != method->count1))
4159 {
4160 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4161 continue;
4162 }
4163
4164 func = method->func;
4165
4166 switch( inputCount) {
4167
4168 case 5:
4169 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4170 ARG32(input[3]), ARG32(input[4]),
4171 inputStruct );
4172 break;
4173 case 4:
4174 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4175 ARG32(input[3]),
4176 inputStruct, (void *)(uintptr_t)inputStructCount );
4177 break;
4178 case 3:
4179 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4180 inputStruct, (void *)(uintptr_t)inputStructCount,
4181 0 );
4182 break;
4183 case 2:
4184 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4185 inputStruct, (void *)(uintptr_t)inputStructCount,
4186 0, 0 );
4187 break;
4188 case 1:
4189 err = (object->*func)( ARG32(input[0]),
4190 inputStruct, (void *)(uintptr_t)inputStructCount,
4191 0, 0, 0 );
4192 break;
4193 case 0:
4194 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4195 0, 0, 0, 0 );
4196 break;
4197
4198 default:
4199 IOLog("%s: Bad method table\n", object->getName());
4200 }
4201 }
4202 while (false);
4203
4204 return( err);
4205 }
4206
4207 kern_return_t shim_io_async_method_scalarI_structureI(
4208 IOExternalAsyncMethod * method,
4209 IOService * object,
4210 mach_port_t asyncWakePort,
4211 io_user_reference_t * asyncReference,
4212 uint32_t asyncReferenceCount,
4213 const io_user_scalar_t * input,
4214 mach_msg_type_number_t inputCount,
4215 io_struct_inband_t inputStruct,
4216 mach_msg_type_number_t inputStructCount )
4217 {
4218 IOAsyncMethod func;
4219 uint32_t i;
4220 IOReturn err = kIOReturnBadArgument;
4221 io_async_ref_t reference;
4222
4223 for (i = 0; i < asyncReferenceCount; i++)
4224 reference[i] = REF32(asyncReference[i]);
4225
4226 do
4227 {
4228 if( (kIOUCVariableStructureSize != method->count0)
4229 && (inputCount != method->count0))
4230 {
4231 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4232 continue;
4233 }
4234 if( (kIOUCVariableStructureSize != method->count1)
4235 && (inputStructCount != method->count1))
4236 {
4237 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4238 continue;
4239 }
4240
4241 func = method->func;
4242
4243 switch( inputCount) {
4244
4245 case 5:
4246 err = (object->*func)( reference,
4247 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4248 ARG32(input[3]), ARG32(input[4]),
4249 inputStruct );
4250 break;
4251 case 4:
4252 err = (object->*func)( reference,
4253 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4254 ARG32(input[3]),
4255 inputStruct, (void *)(uintptr_t)inputStructCount );
4256 break;
4257 case 3:
4258 err = (object->*func)( reference,
4259 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4260 inputStruct, (void *)(uintptr_t)inputStructCount,
4261 0 );
4262 break;
4263 case 2:
4264 err = (object->*func)( reference,
4265 ARG32(input[0]), ARG32(input[1]),
4266 inputStruct, (void *)(uintptr_t)inputStructCount,
4267 0, 0 );
4268 break;
4269 case 1:
4270 err = (object->*func)( reference,
4271 ARG32(input[0]),
4272 inputStruct, (void *)(uintptr_t)inputStructCount,
4273 0, 0, 0 );
4274 break;
4275 case 0:
4276 err = (object->*func)( reference,
4277 inputStruct, (void *)(uintptr_t)inputStructCount,
4278 0, 0, 0, 0 );
4279 break;
4280
4281 default:
4282 IOLog("%s: Bad method table\n", object->getName());
4283 }
4284 }
4285 while (false);
4286
4287 return( err);
4288 }
4289
4290 /* Routine io_connect_method_structureI_structureO */
4291 kern_return_t is_io_connect_method_structureI_structureO(
4292 io_object_t connect,
4293 uint32_t index,
4294 io_struct_inband_t input,
4295 mach_msg_type_number_t inputCount,
4296 io_struct_inband_t output,
4297 mach_msg_type_number_t * outputCount )
4298 {
4299 mach_msg_type_number_t scalar_outputCnt = 0;
4300 mach_vm_size_t ool_output_size = 0;
4301
4302 return (is_io_connect_method(connect, index,
4303 NULL, 0,
4304 input, inputCount,
4305 0, 0,
4306 output, outputCount,
4307 NULL, &scalar_outputCnt,
4308 0, &ool_output_size));
4309 }
4310
4311 kern_return_t shim_io_connect_method_structureI_structureO(
4312 IOExternalMethod * method,
4313 IOService * object,
4314 io_struct_inband_t input,
4315 mach_msg_type_number_t inputCount,
4316 io_struct_inband_t output,
4317 IOByteCount * outputCount )
4318 {
4319 IOMethod func;
4320 IOReturn err = kIOReturnBadArgument;
4321
4322 do
4323 {
4324 if( (kIOUCVariableStructureSize != method->count0)
4325 && (inputCount != method->count0))
4326 {
4327 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4328 continue;
4329 }
4330 if( (kIOUCVariableStructureSize != method->count1)
4331 && (*outputCount != method->count1))
4332 {
4333 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4334 continue;
4335 }
4336
4337 func = method->func;
4338
4339 if( method->count1) {
4340 if( method->count0) {
4341 err = (object->*func)( input, output,
4342 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4343 } else {
4344 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4345 }
4346 } else {
4347 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4348 }
4349 }
4350 while( false);
4351
4352
4353 return( err);
4354 }
4355
4356 kern_return_t shim_io_async_method_structureI_structureO(
4357 IOExternalAsyncMethod * method,
4358 IOService * object,
4359 mach_port_t asyncWakePort,
4360 io_user_reference_t * asyncReference,
4361 uint32_t asyncReferenceCount,
4362 io_struct_inband_t input,
4363 mach_msg_type_number_t inputCount,
4364 io_struct_inband_t output,
4365 mach_msg_type_number_t * outputCount )
4366 {
4367 IOAsyncMethod func;
4368 uint32_t i;
4369 IOReturn err;
4370 io_async_ref_t reference;
4371
4372 for (i = 0; i < asyncReferenceCount; i++)
4373 reference[i] = REF32(asyncReference[i]);
4374
4375 err = kIOReturnBadArgument;
4376 do
4377 {
4378 if( (kIOUCVariableStructureSize != method->count0)
4379 && (inputCount != method->count0))
4380 {
4381 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4382 continue;
4383 }
4384 if( (kIOUCVariableStructureSize != method->count1)
4385 && (*outputCount != method->count1))
4386 {
4387 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4388 continue;
4389 }
4390
4391 func = method->func;
4392
4393 if( method->count1) {
4394 if( method->count0) {
4395 err = (object->*func)( reference,
4396 input, output,
4397 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4398 } else {
4399 err = (object->*func)( reference,
4400 output, outputCount, 0, 0, 0, 0 );
4401 }
4402 } else {
4403 err = (object->*func)( reference,
4404 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4405 }
4406 }
4407 while( false);
4408
4409 return( err);
4410 }
4411
4412 /* Routine io_catalog_send_data */
4413 kern_return_t is_io_catalog_send_data(
4414 mach_port_t master_port,
4415 uint32_t flag,
4416 io_buf_ptr_t inData,
4417 mach_msg_type_number_t inDataCount,
4418 kern_return_t * result)
4419 {
4420 OSObject * obj = 0;
4421 vm_offset_t data;
4422 kern_return_t kr = kIOReturnError;
4423
4424 //printf("io_catalog_send_data called. flag: %d\n", flag);
4425
4426 if( master_port != master_device_port)
4427 return kIOReturnNotPrivileged;
4428
4429 if( (flag != kIOCatalogRemoveKernelLinker &&
4430 flag != kIOCatalogKextdActive &&
4431 flag != kIOCatalogKextdFinishedLaunching) &&
4432 ( !inData || !inDataCount) )
4433 {
4434 return kIOReturnBadArgument;
4435 }
4436
4437 if (inData) {
4438 vm_map_offset_t map_data;
4439
4440 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4441 return( kIOReturnMessageTooLarge);
4442
4443 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4444 data = CAST_DOWN(vm_offset_t, map_data);
4445
4446 if( kr != KERN_SUCCESS)
4447 return kr;
4448
4449 // must return success after vm_map_copyout() succeeds
4450
4451 if( inDataCount ) {
4452 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4453 vm_deallocate( kernel_map, data, inDataCount );
4454 if( !obj) {
4455 *result = kIOReturnNoMemory;
4456 return( KERN_SUCCESS);
4457 }
4458 }
4459 }
4460
4461 switch ( flag ) {
4462 case kIOCatalogResetDrivers:
4463 case kIOCatalogResetDriversNoMatch: {
4464 OSArray * array;
4465
4466 array = OSDynamicCast(OSArray, obj);
4467 if (array) {
4468 if ( !gIOCatalogue->resetAndAddDrivers(array,
4469 flag == kIOCatalogResetDrivers) ) {
4470
4471 kr = kIOReturnError;
4472 }
4473 } else {
4474 kr = kIOReturnBadArgument;
4475 }
4476 }
4477 break;
4478
4479 case kIOCatalogAddDrivers:
4480 case kIOCatalogAddDriversNoMatch: {
4481 OSArray * array;
4482
4483 array = OSDynamicCast(OSArray, obj);
4484 if ( array ) {
4485 if ( !gIOCatalogue->addDrivers( array ,
4486 flag == kIOCatalogAddDrivers) ) {
4487 kr = kIOReturnError;
4488 }
4489 }
4490 else {
4491 kr = kIOReturnBadArgument;
4492 }
4493 }
4494 break;
4495
4496 case kIOCatalogRemoveDrivers:
4497 case kIOCatalogRemoveDriversNoMatch: {
4498 OSDictionary * dict;
4499
4500 dict = OSDynamicCast(OSDictionary, obj);
4501 if ( dict ) {
4502 if ( !gIOCatalogue->removeDrivers( dict,
4503 flag == kIOCatalogRemoveDrivers ) ) {
4504 kr = kIOReturnError;
4505 }
4506 }
4507 else {
4508 kr = kIOReturnBadArgument;
4509 }
4510 }
4511 break;
4512
4513 case kIOCatalogStartMatching: {
4514 OSDictionary * dict;
4515
4516 dict = OSDynamicCast(OSDictionary, obj);
4517 if ( dict ) {
4518 if ( !gIOCatalogue->startMatching( dict ) ) {
4519 kr = kIOReturnError;
4520 }
4521 }
4522 else {
4523 kr = kIOReturnBadArgument;
4524 }
4525 }
4526 break;
4527
4528 case kIOCatalogRemoveKernelLinker:
4529 kr = KERN_NOT_SUPPORTED;
4530 break;
4531
4532 case kIOCatalogKextdActive:
4533 #if !NO_KEXTD
4534 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4535 OSKext::setKextdActive();
4536
4537 /* Dump all nonloaded startup extensions; kextd will now send them
4538 * down on request.
4539 */
4540 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4541 #endif
4542 kr = kIOReturnSuccess;
4543 break;
4544
4545 case kIOCatalogKextdFinishedLaunching: {
4546 #if !NO_KEXTD
4547 static bool clearedBusy = false;
4548
4549 if (!clearedBusy) {
4550 IOService * serviceRoot = IOService::getServiceRoot();
4551 if (serviceRoot) {
4552 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4553 serviceRoot->adjustBusy(-1);
4554 clearedBusy = true;
4555 }
4556 }
4557 #endif
4558 kr = kIOReturnSuccess;
4559 }
4560 break;
4561
4562 default:
4563 kr = kIOReturnBadArgument;
4564 break;
4565 }
4566
4567 if (obj) obj->release();
4568
4569 *result = kr;
4570 return( KERN_SUCCESS);
4571 }
4572
4573 /* Routine io_catalog_terminate */
4574 kern_return_t is_io_catalog_terminate(
4575 mach_port_t master_port,
4576 uint32_t flag,
4577 io_name_t name )
4578 {
4579 kern_return_t kr;
4580
4581 if( master_port != master_device_port )
4582 return kIOReturnNotPrivileged;
4583
4584 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4585 kIOClientPrivilegeAdministrator );
4586 if( kIOReturnSuccess != kr)
4587 return( kr );
4588
4589 switch ( flag ) {
4590 #if !defined(SECURE_KERNEL)
4591 case kIOCatalogServiceTerminate:
4592 OSIterator * iter;
4593 IOService * service;
4594
4595 iter = IORegistryIterator::iterateOver(gIOServicePlane,
4596 kIORegistryIterateRecursively);
4597 if ( !iter )
4598 return kIOReturnNoMemory;
4599
4600 do {
4601 iter->reset();
4602 while( (service = (IOService *)iter->getNextObject()) ) {
4603 if( service->metaCast(name)) {
4604 if ( !service->terminate( kIOServiceRequired
4605 | kIOServiceSynchronous) ) {
4606 kr = kIOReturnUnsupported;
4607 break;
4608 }
4609 }
4610 }
4611 } while( !service && !iter->isValid());
4612 iter->release();
4613 break;
4614
4615 case kIOCatalogModuleUnload:
4616 case kIOCatalogModuleTerminate:
4617 kr = gIOCatalogue->terminateDriversForModule(name,
4618 flag == kIOCatalogModuleUnload);
4619 break;
4620 #endif
4621
4622 default:
4623 kr = kIOReturnBadArgument;
4624 break;
4625 }
4626
4627 return( kr );
4628 }
4629
4630 /* Routine io_catalog_get_data */
4631 kern_return_t is_io_catalog_get_data(
4632 mach_port_t master_port,
4633 uint32_t flag,
4634 io_buf_ptr_t *outData,
4635 mach_msg_type_number_t *outDataCount)
4636 {
4637 kern_return_t kr = kIOReturnSuccess;
4638 OSSerialize * s;
4639
4640 if( master_port != master_device_port)
4641 return kIOReturnNotPrivileged;
4642
4643 //printf("io_catalog_get_data called. flag: %d\n", flag);
4644
4645 s = OSSerialize::withCapacity(4096);
4646 if ( !s )
4647 return kIOReturnNoMemory;
4648
4649 kr = gIOCatalogue->serializeData(flag, s);
4650
4651 if ( kr == kIOReturnSuccess ) {
4652 vm_offset_t data;
4653 vm_map_copy_t copy;
4654 vm_size_t size;
4655
4656 size = s->getLength();
4657 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
4658 if ( kr == kIOReturnSuccess ) {
4659 bcopy(s->text(), (void *)data, size);
4660 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
4661 (vm_map_size_t)size, true, &copy);
4662 *outData = (char *)copy;
4663 *outDataCount = size;
4664 }
4665 }
4666
4667 s->release();
4668
4669 return kr;
4670 }
4671
4672 /* Routine io_catalog_get_gen_count */
4673 kern_return_t is_io_catalog_get_gen_count(
4674 mach_port_t master_port,
4675 uint32_t *genCount)
4676 {
4677 if( master_port != master_device_port)
4678 return kIOReturnNotPrivileged;
4679
4680 //printf("io_catalog_get_gen_count called.\n");
4681
4682 if ( !genCount )
4683 return kIOReturnBadArgument;
4684
4685 *genCount = gIOCatalogue->getGenerationCount();
4686
4687 return kIOReturnSuccess;
4688 }
4689
4690 /* Routine io_catalog_module_loaded.
4691 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
4692 */
4693 kern_return_t is_io_catalog_module_loaded(
4694 mach_port_t master_port,
4695 io_name_t name)
4696 {
4697 if( master_port != master_device_port)
4698 return kIOReturnNotPrivileged;
4699
4700 //printf("io_catalog_module_loaded called. name %s\n", name);
4701
4702 if ( !name )
4703 return kIOReturnBadArgument;
4704
4705 gIOCatalogue->moduleHasLoaded(name);
4706
4707 return kIOReturnSuccess;
4708 }
4709
4710 kern_return_t is_io_catalog_reset(
4711 mach_port_t master_port,
4712 uint32_t flag)
4713 {
4714 if( master_port != master_device_port)
4715 return kIOReturnNotPrivileged;
4716
4717 switch ( flag ) {
4718 case kIOCatalogResetDefault:
4719 gIOCatalogue->reset();
4720 break;
4721
4722 default:
4723 return kIOReturnBadArgument;
4724 }
4725
4726 return kIOReturnSuccess;
4727 }
4728
4729 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
4730 {
4731 kern_return_t result = kIOReturnBadArgument;
4732 IOUserClient *userClient;
4733
4734 if ((userClient = OSDynamicCast(IOUserClient,
4735 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4736 IOExternalTrap *trap;
4737 IOService *target = NULL;
4738
4739 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4740
4741 if (trap && target) {
4742 IOTrap func;
4743
4744 func = trap->func;
4745
4746 if (func) {
4747 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4748 }
4749 }
4750
4751 userClient->release();
4752 }
4753
4754 return result;
4755 }
4756
4757 } /* extern "C" */
4758
4759 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4760 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4761 {
4762 IOReturn err;
4763 IOService * object;
4764 IOByteCount structureOutputSize;
4765
4766 if (dispatch)
4767 {
4768 uint32_t count;
4769 count = dispatch->checkScalarInputCount;
4770 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
4771 {
4772 return (kIOReturnBadArgument);
4773 }
4774
4775 count = dispatch->checkStructureInputSize;
4776 if ((kIOUCVariableStructureSize != count)
4777 && (count != ((args->structureInputDescriptor)
4778 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
4779 {
4780 return (kIOReturnBadArgument);
4781 }
4782
4783 count = dispatch->checkScalarOutputCount;
4784 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
4785 {
4786 return (kIOReturnBadArgument);
4787 }
4788
4789 count = dispatch->checkStructureOutputSize;
4790 if ((kIOUCVariableStructureSize != count)
4791 && (count != ((args->structureOutputDescriptor)
4792 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
4793 {
4794 return (kIOReturnBadArgument);
4795 }
4796
4797 if (dispatch->function)
4798 err = (*dispatch->function)(target, reference, args);
4799 else
4800 err = kIOReturnNoCompletion; /* implementator can dispatch */
4801
4802 return (err);
4803 }
4804
4805
4806 // pre-Leopard API's don't do ool structs
4807 if (args->structureInputDescriptor || args->structureOutputDescriptor)
4808 {
4809 err = kIOReturnIPCError;
4810 return (err);
4811 }
4812
4813 structureOutputSize = args->structureOutputSize;
4814
4815 if (args->asyncWakePort)
4816 {
4817 IOExternalAsyncMethod * method;
4818
4819 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) )
4820 return (kIOReturnUnsupported);
4821
4822 if (kIOUCForegroundOnly & method->flags)
4823 {
4824 if (task_is_gpu_denied(current_task()))
4825 return (kIOReturnNotPermitted);
4826 }
4827
4828 switch (method->flags & kIOUCTypeMask)
4829 {
4830 case kIOUCScalarIStructI:
4831 err = shim_io_async_method_scalarI_structureI( method, object,
4832 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4833 args->scalarInput, args->scalarInputCount,
4834 (char *)args->structureInput, args->structureInputSize );
4835 break;
4836
4837 case kIOUCScalarIScalarO:
4838 err = shim_io_async_method_scalarI_scalarO( method, object,
4839 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4840 args->scalarInput, args->scalarInputCount,
4841 args->scalarOutput, &args->scalarOutputCount );
4842 break;
4843
4844 case kIOUCScalarIStructO:
4845 err = shim_io_async_method_scalarI_structureO( method, object,
4846 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4847 args->scalarInput, args->scalarInputCount,
4848 (char *) args->structureOutput, &args->structureOutputSize );
4849 break;
4850
4851
4852 case kIOUCStructIStructO:
4853 err = shim_io_async_method_structureI_structureO( method, object,
4854 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4855 (char *)args->structureInput, args->structureInputSize,
4856 (char *) args->structureOutput, &args->structureOutputSize );
4857 break;
4858
4859 default:
4860 err = kIOReturnBadArgument;
4861 break;
4862 }
4863 }
4864 else
4865 {
4866 IOExternalMethod * method;
4867
4868 if( !(method = getTargetAndMethodForIndex(&object, selector)) )
4869 return (kIOReturnUnsupported);
4870
4871 if (kIOUCForegroundOnly & method->flags)
4872 {
4873 if (task_is_gpu_denied(current_task()))
4874 return (kIOReturnNotPermitted);
4875 }
4876
4877 switch (method->flags & kIOUCTypeMask)
4878 {
4879 case kIOUCScalarIStructI:
4880 err = shim_io_connect_method_scalarI_structureI( method, object,
4881 args->scalarInput, args->scalarInputCount,
4882 (char *) args->structureInput, args->structureInputSize );
4883 break;
4884
4885 case kIOUCScalarIScalarO:
4886 err = shim_io_connect_method_scalarI_scalarO( method, object,
4887 args->scalarInput, args->scalarInputCount,
4888 args->scalarOutput, &args->scalarOutputCount );
4889 break;
4890
4891 case kIOUCScalarIStructO:
4892 err = shim_io_connect_method_scalarI_structureO( method, object,
4893 args->scalarInput, args->scalarInputCount,
4894 (char *) args->structureOutput, &structureOutputSize );
4895 break;
4896
4897
4898 case kIOUCStructIStructO:
4899 err = shim_io_connect_method_structureI_structureO( method, object,
4900 (char *) args->structureInput, args->structureInputSize,
4901 (char *) args->structureOutput, &structureOutputSize );
4902 break;
4903
4904 default:
4905 err = kIOReturnBadArgument;
4906 break;
4907 }
4908 }
4909
4910 args->structureOutputSize = structureOutputSize;
4911
4912 return (err);
4913 }
4914
4915
4916 #if __LP64__
4917 OSMetaClassDefineReservedUnused(IOUserClient, 0);
4918 OSMetaClassDefineReservedUnused(IOUserClient, 1);
4919 #else
4920 OSMetaClassDefineReservedUsed(IOUserClient, 0);
4921 OSMetaClassDefineReservedUsed(IOUserClient, 1);
4922 #endif
4923 OSMetaClassDefineReservedUnused(IOUserClient, 2);
4924 OSMetaClassDefineReservedUnused(IOUserClient, 3);
4925 OSMetaClassDefineReservedUnused(IOUserClient, 4);
4926 OSMetaClassDefineReservedUnused(IOUserClient, 5);
4927 OSMetaClassDefineReservedUnused(IOUserClient, 6);
4928 OSMetaClassDefineReservedUnused(IOUserClient, 7);
4929 OSMetaClassDefineReservedUnused(IOUserClient, 8);
4930 OSMetaClassDefineReservedUnused(IOUserClient, 9);
4931 OSMetaClassDefineReservedUnused(IOUserClient, 10);
4932 OSMetaClassDefineReservedUnused(IOUserClient, 11);
4933 OSMetaClassDefineReservedUnused(IOUserClient, 12);
4934 OSMetaClassDefineReservedUnused(IOUserClient, 13);
4935 OSMetaClassDefineReservedUnused(IOUserClient, 14);
4936 OSMetaClassDefineReservedUnused(IOUserClient, 15);
4937