]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-1699.24.8.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <libkern/OSDebug.h>
43 #include <sys/proc.h>
44 #include <sys/kauth.h>
45
46 #if CONFIG_MACF
47
48 extern "C" {
49 #include <security/mac_framework.h>
50 };
51 #include <sys/kauth.h>
52
53 #define IOMACF_LOG 0
54
55 #endif /* CONFIG_MACF */
56
57 #include <IOKit/assert.h>
58
59 #include "IOServicePrivate.h"
60 #include "IOKitKernelInternal.h"
61
62 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
63 #define SCALAR32(x) ((uint32_t )x)
64 #define ARG32(x) ((void *)SCALAR32(x))
65 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
66 #define REF32(x) ((int)(x))
67
68 enum
69 {
70 kIOUCAsync0Flags = 3ULL,
71 kIOUCAsync64Flag = 1ULL
72 };
73
74 #if IOKITSTATS
75
76 #define IOStatisticsRegisterCounter() \
77 do { \
78 reserved->counter = IOStatistics::registerUserClient(this); \
79 } while (0)
80
81 #define IOStatisticsUnregisterCounter() \
82 do { \
83 if (reserved) \
84 IOStatistics::unregisterUserClient(reserved->counter); \
85 } while (0)
86
87 #define IOStatisticsClientCall() \
88 do { \
89 IOStatistics::countUserClientCall(client); \
90 } while (0)
91
92 #else
93
94 #define IOStatisticsRegisterCounter()
95 #define IOStatisticsUnregisterCounter()
96 #define IOStatisticsClientCall()
97
98 #endif /* IOKITSTATS */
99
100 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
101
102 // definitions we should get from osfmk
103
104 //typedef struct ipc_port * ipc_port_t;
105 typedef natural_t ipc_kobject_type_t;
106
107 #define IKOT_IOKIT_SPARE 27
108 #define IKOT_IOKIT_CONNECT 29
109 #define IKOT_IOKIT_OBJECT 30
110
111 extern "C" {
112
113 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
114 ipc_kobject_type_t type );
115
116 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
117
118 extern mach_port_name_t iokit_make_send_right( task_t task,
119 io_object_t obj, ipc_kobject_type_t type );
120
121 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
122
123 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
124
125 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
126
127 extern ipc_port_t master_device_port;
128
129 extern void iokit_retain_port( ipc_port_t port );
130 extern void iokit_release_port( ipc_port_t port );
131 extern void iokit_release_port_send( ipc_port_t port );
132
133 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
134
135 #include <mach/mach_traps.h>
136 #include <vm/vm_map.h>
137
138 } /* extern "C" */
139
140
141 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
142
143 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
144
145 class IOMachPort : public OSObject
146 {
147 OSDeclareDefaultStructors(IOMachPort)
148 public:
149 OSObject * object;
150 ipc_port_t port;
151 UInt32 mscount;
152 UInt8 holdDestroy;
153
154 static IOMachPort * portForObject( OSObject * obj,
155 ipc_kobject_type_t type );
156 static bool noMoreSendersForObject( OSObject * obj,
157 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
158 static void releasePortForObject( OSObject * obj,
159 ipc_kobject_type_t type );
160 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
161
162 static OSDictionary * dictForType( ipc_kobject_type_t type );
163
164 static mach_port_name_t makeSendRightForTask( task_t task,
165 io_object_t obj, ipc_kobject_type_t type );
166
167 virtual void free();
168 };
169
170 #define super OSObject
171 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
172
173 static IOLock * gIOObjectPortLock;
174
175 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
176
177 // not in dictForType() for debugging ease
178 static OSDictionary * gIOObjectPorts;
179 static OSDictionary * gIOConnectPorts;
180
181 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
182 {
183 OSDictionary ** dict;
184
185 if( IKOT_IOKIT_OBJECT == type )
186 dict = &gIOObjectPorts;
187 else if( IKOT_IOKIT_CONNECT == type )
188 dict = &gIOConnectPorts;
189 else
190 return( 0 );
191
192 if( 0 == *dict)
193 *dict = OSDictionary::withCapacity( 1 );
194
195 return( *dict );
196 }
197
198 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
199 ipc_kobject_type_t type )
200 {
201 IOMachPort * inst = 0;
202 OSDictionary * dict;
203
204 IOTakeLock( gIOObjectPortLock);
205
206 do {
207
208 dict = dictForType( type );
209 if( !dict)
210 continue;
211
212 if( (inst = (IOMachPort *)
213 dict->getObject( (const OSSymbol *) obj ))) {
214 inst->mscount++;
215 inst->retain();
216 continue;
217 }
218
219 inst = new IOMachPort;
220 if( inst && !inst->init()) {
221 inst = 0;
222 continue;
223 }
224
225 inst->port = iokit_alloc_object_port( obj, type );
226 if( inst->port) {
227 // retains obj
228 dict->setObject( (const OSSymbol *) obj, inst );
229 inst->mscount++;
230
231 } else {
232 inst->release();
233 inst = 0;
234 }
235
236 } while( false );
237
238 IOUnlock( gIOObjectPortLock);
239
240 return( inst );
241 }
242
243 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
244 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
245 {
246 OSDictionary * dict;
247 IOMachPort * machPort;
248 bool destroyed = true;
249
250 IOTakeLock( gIOObjectPortLock);
251
252 if( (dict = dictForType( type ))) {
253 obj->retain();
254
255 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
256 if( machPort) {
257 destroyed = (machPort->mscount <= *mscount);
258 if( destroyed)
259 dict->removeObject( (const OSSymbol *) obj );
260 else
261 *mscount = machPort->mscount;
262 }
263 obj->release();
264 }
265
266 IOUnlock( gIOObjectPortLock);
267
268 return( destroyed );
269 }
270
271 void IOMachPort::releasePortForObject( OSObject * obj,
272 ipc_kobject_type_t type )
273 {
274 OSDictionary * dict;
275 IOMachPort * machPort;
276
277 IOTakeLock( gIOObjectPortLock);
278
279 if( (dict = dictForType( type ))) {
280 obj->retain();
281 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if( machPort && !machPort->holdDestroy)
283 dict->removeObject( (const OSSymbol *) obj );
284 obj->release();
285 }
286
287 IOUnlock( gIOObjectPortLock);
288 }
289
290 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
291 {
292 OSDictionary * dict;
293 IOMachPort * machPort;
294
295 IOLockLock( gIOObjectPortLock );
296
297 if( (dict = dictForType( type ))) {
298 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
299 if( machPort)
300 machPort->holdDestroy = true;
301 }
302
303 IOLockUnlock( gIOObjectPortLock );
304 }
305
306 void IOUserClient::destroyUserReferences( OSObject * obj )
307 {
308 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
309
310 // panther, 3160200
311 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
312
313 OSDictionary * dict;
314
315 IOTakeLock( gIOObjectPortLock);
316 obj->retain();
317
318 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
319 {
320 IOMachPort * port;
321 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
322 if (port)
323 {
324 IOUserClient * uc;
325 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
326 {
327 dict->setObject((const OSSymbol *) uc->mappings, port);
328 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
329
330 uc->mappings->release();
331 uc->mappings = 0;
332 }
333 dict->removeObject( (const OSSymbol *) obj );
334 }
335 }
336 obj->release();
337 IOUnlock( gIOObjectPortLock);
338 }
339
340 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
341 io_object_t obj, ipc_kobject_type_t type )
342 {
343 return( iokit_make_send_right( task, obj, type ));
344 }
345
346 void IOMachPort::free( void )
347 {
348 if( port)
349 iokit_destroy_object_port( port );
350 super::free();
351 }
352
353 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
354
355 class IOUserNotification : public OSIterator
356 {
357 OSDeclareDefaultStructors(IOUserNotification)
358
359 IONotifier * holdNotify;
360 IOLock * lock;
361
362 public:
363
364 virtual bool init( void );
365 virtual void free();
366
367 virtual void setNotification( IONotifier * obj );
368
369 virtual void reset();
370 virtual bool isValid();
371 };
372
373 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
374
375 extern "C" {
376
377 // functions called from osfmk/device/iokit_rpc.c
378
379 void
380 iokit_add_reference( io_object_t obj )
381 {
382 if( obj)
383 obj->retain();
384 }
385
386 void
387 iokit_remove_reference( io_object_t obj )
388 {
389 if( obj)
390 obj->release();
391 }
392
393 ipc_port_t
394 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
395 {
396 IOMachPort * machPort;
397 ipc_port_t port;
398
399 if( (machPort = IOMachPort::portForObject( obj, type ))) {
400
401 port = machPort->port;
402 if( port)
403 iokit_retain_port( port );
404
405 machPort->release();
406
407 } else
408 port = NULL;
409
410 return( port );
411 }
412
413 kern_return_t
414 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
415 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
416 {
417 IOUserClient * client;
418 IOMemoryMap * map;
419 IOUserNotification * notify;
420
421 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
422 return( kIOReturnNotReady );
423
424 if( IKOT_IOKIT_CONNECT == type)
425 {
426 if( (client = OSDynamicCast( IOUserClient, obj ))) {
427 IOStatisticsClientCall();
428 client->clientDied();
429 }
430 }
431 else if( IKOT_IOKIT_OBJECT == type)
432 {
433 if( (map = OSDynamicCast( IOMemoryMap, obj )))
434 map->taskDied();
435 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
436 notify->setNotification( 0 );
437 }
438
439 return( kIOReturnSuccess );
440 }
441
442 }; /* extern "C" */
443
444 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
445
446 class IOServiceUserNotification : public IOUserNotification
447 {
448 OSDeclareDefaultStructors(IOServiceUserNotification)
449
450 struct PingMsg {
451 mach_msg_header_t msgHdr;
452 OSNotificationHeader64 notifyHeader;
453 };
454
455 enum { kMaxOutstanding = 1024 };
456
457 PingMsg * pingMsg;
458 vm_size_t msgSize;
459 OSArray * newSet;
460 OSObject * lastEntry;
461 bool armed;
462
463 public:
464
465 virtual bool init( mach_port_t port, natural_t type,
466 void * reference, vm_size_t referenceSize,
467 bool clientIs64 );
468 virtual void free();
469
470 static bool _handler( void * target,
471 void * ref, IOService * newService, IONotifier * notifier );
472 virtual bool handler( void * ref, IOService * newService );
473
474 virtual OSObject * getNextObject();
475 };
476
477 class IOServiceMessageUserNotification : public IOUserNotification
478 {
479 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
480
481 struct PingMsg {
482 mach_msg_header_t msgHdr;
483 mach_msg_body_t msgBody;
484 mach_msg_port_descriptor_t ports[1];
485 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
486 };
487
488 PingMsg * pingMsg;
489 vm_size_t msgSize;
490 uint8_t clientIs64;
491 int owningPID;
492
493 public:
494
495 virtual bool init( mach_port_t port, natural_t type,
496 void * reference, vm_size_t referenceSize,
497 vm_size_t extraSize,
498 bool clientIs64 );
499
500 virtual void free();
501
502 static IOReturn _handler( void * target, void * ref,
503 UInt32 messageType, IOService * provider,
504 void * messageArgument, vm_size_t argSize );
505 virtual IOReturn handler( void * ref,
506 UInt32 messageType, IOService * provider,
507 void * messageArgument, vm_size_t argSize );
508
509 virtual OSObject * getNextObject();
510 };
511
512 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
513
514 #undef super
515 #define super OSIterator
516 OSDefineMetaClass( IOUserNotification, OSIterator )
517 OSDefineAbstractStructors( IOUserNotification, OSIterator )
518
519 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
520
521 bool IOUserNotification::init( void )
522 {
523 if( !super::init())
524 return( false );
525
526 lock = IOLockAlloc();
527 if( !lock)
528 return( false );
529
530 return( true );
531 }
532
533 void IOUserNotification::free( void )
534 {
535 if( holdNotify)
536 holdNotify->remove();
537 // can't be in handler now
538
539 if( lock)
540 IOLockFree( lock );
541
542 super::free();
543 }
544
545
546 void IOUserNotification::setNotification( IONotifier * notify )
547 {
548 IONotifier * previousNotify;
549
550 IOLockLock( gIOObjectPortLock);
551
552 previousNotify = holdNotify;
553 holdNotify = notify;
554
555 IOLockUnlock( gIOObjectPortLock);
556
557 if( previousNotify)
558 previousNotify->remove();
559 }
560
561 void IOUserNotification::reset()
562 {
563 // ?
564 }
565
566 bool IOUserNotification::isValid()
567 {
568 return( true );
569 }
570
571 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
572
573 #undef super
574 #define super IOUserNotification
575 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
576
577 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
578
579 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
580 void * reference, vm_size_t referenceSize,
581 bool clientIs64 )
582 {
583 newSet = OSArray::withCapacity( 1 );
584 if( !newSet)
585 return( false );
586
587 if (referenceSize > sizeof(OSAsyncReference64))
588 return( false );
589
590 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
591 pingMsg = (PingMsg *) IOMalloc( msgSize);
592 if( !pingMsg)
593 return( false );
594
595 bzero( pingMsg, msgSize);
596
597 pingMsg->msgHdr.msgh_remote_port = port;
598 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
599 MACH_MSG_TYPE_COPY_SEND /*remote*/,
600 MACH_MSG_TYPE_MAKE_SEND /*local*/);
601 pingMsg->msgHdr.msgh_size = msgSize;
602 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
603
604 pingMsg->notifyHeader.size = 0;
605 pingMsg->notifyHeader.type = type;
606 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
607
608 return( super::init() );
609 }
610
611 void IOServiceUserNotification::free( void )
612 {
613 PingMsg * _pingMsg;
614 vm_size_t _msgSize;
615 OSArray * _newSet;
616 OSObject * _lastEntry;
617
618 _pingMsg = pingMsg;
619 _msgSize = msgSize;
620 _lastEntry = lastEntry;
621 _newSet = newSet;
622
623 super::free();
624
625 if( _pingMsg && _msgSize)
626 IOFree( _pingMsg, _msgSize);
627
628 if( _lastEntry)
629 _lastEntry->release();
630
631 if( _newSet)
632 _newSet->release();
633 }
634
635 bool IOServiceUserNotification::_handler( void * target,
636 void * ref, IOService * newService, IONotifier * notifier )
637 {
638 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
639 }
640
641 bool IOServiceUserNotification::handler( void * ref,
642 IOService * newService )
643 {
644 unsigned int count;
645 kern_return_t kr;
646 ipc_port_t port = NULL;
647 bool sendPing = false;
648
649 IOTakeLock( lock );
650
651 count = newSet->getCount();
652 if( count < kMaxOutstanding) {
653
654 newSet->setObject( newService );
655 if( (sendPing = (armed && (0 == count))))
656 armed = false;
657 }
658
659 IOUnlock( lock );
660
661 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
662 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
663
664 if( sendPing) {
665 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
666 pingMsg->msgHdr.msgh_local_port = port;
667 else
668 pingMsg->msgHdr.msgh_local_port = NULL;
669
670 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr,
671 pingMsg->msgHdr.msgh_size);
672 if( port)
673 iokit_release_port( port );
674
675 if( KERN_SUCCESS != kr)
676 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
677 }
678
679 return( true );
680 }
681
682 OSObject * IOServiceUserNotification::getNextObject()
683 {
684 unsigned int count;
685 OSObject * result;
686
687 IOTakeLock( lock );
688
689 if( lastEntry)
690 lastEntry->release();
691
692 count = newSet->getCount();
693 if( count ) {
694 result = newSet->getObject( count - 1 );
695 result->retain();
696 newSet->removeObject( count - 1);
697 } else {
698 result = 0;
699 armed = true;
700 }
701 lastEntry = result;
702
703 IOUnlock( lock );
704
705 return( result );
706 }
707
708 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
709
710 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
711
712 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
713
714 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
715 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
716 bool client64 )
717 {
718
719 if (referenceSize > sizeof(OSAsyncReference64))
720 return( false );
721
722 clientIs64 = client64;
723
724 owningPID = proc_selfpid();
725
726 extraSize += sizeof(IOServiceInterestContent64);
727 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
728 pingMsg = (PingMsg *) IOMalloc( msgSize);
729 if( !pingMsg)
730 return( false );
731
732 bzero( pingMsg, msgSize);
733
734 pingMsg->msgHdr.msgh_remote_port = port;
735 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
736 | MACH_MSGH_BITS(
737 MACH_MSG_TYPE_COPY_SEND /*remote*/,
738 MACH_MSG_TYPE_MAKE_SEND /*local*/);
739 pingMsg->msgHdr.msgh_size = msgSize;
740 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
741
742 pingMsg->msgBody.msgh_descriptor_count = 1;
743
744 pingMsg->ports[0].name = 0;
745 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
746 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
747
748 pingMsg->notifyHeader.size = extraSize;
749 pingMsg->notifyHeader.type = type;
750 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
751
752 return( super::init() );
753 }
754
755 void IOServiceMessageUserNotification::free( void )
756 {
757 PingMsg * _pingMsg;
758 vm_size_t _msgSize;
759
760 _pingMsg = pingMsg;
761 _msgSize = msgSize;
762
763 super::free();
764
765 if( _pingMsg && _msgSize)
766 IOFree( _pingMsg, _msgSize);
767 }
768
769 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
770 UInt32 messageType, IOService * provider,
771 void * argument, vm_size_t argSize )
772 {
773 return( ((IOServiceMessageUserNotification *) target)->handler(
774 ref, messageType, provider, argument, argSize));
775 }
776
777 IOReturn IOServiceMessageUserNotification::handler( void * ref,
778 UInt32 messageType, IOService * provider,
779 void * messageArgument, vm_size_t argSize )
780 {
781 kern_return_t kr;
782 ipc_port_t thisPort, providerPort;
783 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
784 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
785 // == pingMsg->notifyHeader.content;
786
787 if (kIOMessageCopyClientID == messageType)
788 {
789 *((void **) messageArgument) = IOCopyLogNameForPID(owningPID);
790 return (kIOReturnSuccess);
791 }
792
793 data->messageType = messageType;
794
795 if( argSize == 0)
796 {
797 data->messageArgument[0] = (io_user_reference_t) messageArgument;
798 if (clientIs64)
799 argSize = sizeof(data->messageArgument[0]);
800 else
801 {
802 data->messageArgument[0] |= (data->messageArgument[0] << 32);
803 argSize = sizeof(uint32_t);
804 }
805 }
806 else
807 {
808 if( argSize > kIOUserNotifyMaxMessageSize)
809 argSize = kIOUserNotifyMaxMessageSize;
810 bcopy( messageArgument, data->messageArgument, argSize );
811 }
812 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
813 + sizeof( IOServiceInterestContent64 )
814 - sizeof( data->messageArgument)
815 + argSize;
816
817 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
818 pingMsg->ports[0].name = providerPort;
819 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
820 pingMsg->msgHdr.msgh_local_port = thisPort;
821 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr,
822 pingMsg->msgHdr.msgh_size);
823 if( thisPort)
824 iokit_release_port( thisPort );
825 if( providerPort)
826 iokit_release_port( providerPort );
827
828 if( KERN_SUCCESS != kr)
829 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
830
831 return( kIOReturnSuccess );
832 }
833
834 OSObject * IOServiceMessageUserNotification::getNextObject()
835 {
836 return( 0 );
837 }
838
839 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
840
841 #undef super
842 #define super IOService
843 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
844
845 void IOUserClient::initialize( void )
846 {
847 gIOObjectPortLock = IOLockAlloc();
848
849 assert( gIOObjectPortLock );
850 }
851
852 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
853 mach_port_t wakePort,
854 void *callback, void *refcon)
855 {
856 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
857 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
858 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
859 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
860 }
861
862 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
863 mach_port_t wakePort,
864 mach_vm_address_t callback, io_user_reference_t refcon)
865 {
866 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
867 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
868 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
869 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
870 }
871
872 static OSDictionary * CopyConsoleUser(UInt32 uid)
873 {
874 OSArray * array;
875 OSDictionary * user = 0;
876
877 if ((array = OSDynamicCast(OSArray,
878 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
879 {
880 for (unsigned int idx = 0;
881 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
882 idx++) {
883 OSNumber * num;
884
885 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
886 && (uid == num->unsigned32BitValue())) {
887 user->retain();
888 break;
889 }
890 }
891 array->release();
892 }
893 return user;
894 }
895
896 static OSDictionary * CopyUserOnConsole(void)
897 {
898 OSArray * array;
899 OSDictionary * user = 0;
900
901 if ((array = OSDynamicCast(OSArray,
902 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
903 {
904 for (unsigned int idx = 0;
905 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
906 idx++)
907 {
908 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
909 {
910 user->retain();
911 break;
912 }
913 }
914 array->release();
915 }
916 return (user);
917 }
918
919 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
920 const char * privilegeName )
921 {
922 kern_return_t kr;
923 security_token_t token;
924 mach_msg_type_number_t count;
925 task_t task;
926 OSDictionary * user;
927 bool secureConsole;
928
929
930 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
931 sizeof(kIOClientPrivilegeForeground)))
932 {
933 /* is graphics access denied for current task? */
934 if (proc_get_task_selfgpuacc_deny() != 0)
935 return (kIOReturnNotPrivileged);
936 else
937 return (kIOReturnSuccess);
938 }
939
940 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
941 sizeof(kIOClientPrivilegeConsoleSession)))
942 {
943 kauth_cred_t cred;
944 proc_t p;
945
946 task = (task_t) securityToken;
947 if (!task)
948 task = current_task();
949 p = (proc_t) get_bsdtask_info(task);
950 kr = kIOReturnNotPrivileged;
951
952 if (p && (cred = kauth_cred_proc_ref(p)))
953 {
954 user = CopyUserOnConsole();
955 if (user)
956 {
957 OSNumber * num;
958 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
959 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
960 {
961 kr = kIOReturnSuccess;
962 }
963 user->release();
964 }
965 kauth_cred_unref(&cred);
966 }
967 return (kr);
968 }
969
970 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
971 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
972 task = (task_t)((IOUCProcessToken *)securityToken)->token;
973 else
974 task = (task_t)securityToken;
975
976 count = TASK_SECURITY_TOKEN_COUNT;
977 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
978
979 if (KERN_SUCCESS != kr)
980 {}
981 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
982 sizeof(kIOClientPrivilegeAdministrator))) {
983 if (0 != token.val[0])
984 kr = kIOReturnNotPrivileged;
985 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
986 sizeof(kIOClientPrivilegeLocalUser))) {
987 user = CopyConsoleUser(token.val[0]);
988 if ( user )
989 user->release();
990 else
991 kr = kIOReturnNotPrivileged;
992 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
993 sizeof(kIOClientPrivilegeConsoleUser))) {
994 user = CopyConsoleUser(token.val[0]);
995 if ( user ) {
996 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
997 kr = kIOReturnNotPrivileged;
998 else if ( secureConsole ) {
999 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1000 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1001 kr = kIOReturnNotPrivileged;
1002 }
1003 user->release();
1004 }
1005 else
1006 kr = kIOReturnNotPrivileged;
1007 } else
1008 kr = kIOReturnUnsupported;
1009
1010 return (kr);
1011 }
1012
1013 bool IOUserClient::init()
1014 {
1015 if (getPropertyTable() || super::init())
1016 return reserve();
1017
1018 return false;
1019 }
1020
1021 bool IOUserClient::init(OSDictionary * dictionary)
1022 {
1023 if (getPropertyTable() || super::init(dictionary))
1024 return reserve();
1025
1026 return false;
1027 }
1028
1029 bool IOUserClient::initWithTask(task_t owningTask,
1030 void * securityID,
1031 UInt32 type )
1032 {
1033 if (getPropertyTable() || super::init())
1034 return reserve();
1035
1036 return false;
1037 }
1038
1039 bool IOUserClient::initWithTask(task_t owningTask,
1040 void * securityID,
1041 UInt32 type,
1042 OSDictionary * properties )
1043 {
1044 bool ok;
1045
1046 ok = super::init( properties );
1047 ok &= initWithTask( owningTask, securityID, type );
1048
1049 return( ok );
1050 }
1051
1052 bool IOUserClient::reserve()
1053 {
1054 if(!reserved) {
1055 reserved = IONew(ExpansionData, 1);
1056 if (!reserved) {
1057 return false;
1058 }
1059 }
1060
1061 IOStatisticsRegisterCounter();
1062
1063 return true;
1064 }
1065
1066 void IOUserClient::free()
1067 {
1068 if( mappings)
1069 mappings->release();
1070
1071 IOStatisticsUnregisterCounter();
1072
1073 if (reserved)
1074 IODelete(reserved, ExpansionData, 1);
1075
1076 super::free();
1077 }
1078
1079 IOReturn IOUserClient::clientDied( void )
1080 {
1081 return( clientClose());
1082 }
1083
1084 IOReturn IOUserClient::clientClose( void )
1085 {
1086 return( kIOReturnUnsupported );
1087 }
1088
1089 IOService * IOUserClient::getService( void )
1090 {
1091 return( 0 );
1092 }
1093
1094 IOReturn IOUserClient::registerNotificationPort(
1095 mach_port_t /* port */,
1096 UInt32 /* type */,
1097 UInt32 /* refCon */)
1098 {
1099 return( kIOReturnUnsupported);
1100 }
1101
1102 IOReturn IOUserClient::registerNotificationPort(
1103 mach_port_t port,
1104 UInt32 type,
1105 io_user_reference_t refCon)
1106 {
1107 return (registerNotificationPort(port, type, (UInt32) refCon));
1108 }
1109
1110 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1111 semaphore_t * semaphore )
1112 {
1113 return( kIOReturnUnsupported);
1114 }
1115
1116 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1117 {
1118 return( kIOReturnUnsupported);
1119 }
1120
1121 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1122 IOOptionBits * options,
1123 IOMemoryDescriptor ** memory )
1124 {
1125 return( kIOReturnUnsupported);
1126 }
1127
1128 #if !__LP64__
1129 IOMemoryMap * IOUserClient::mapClientMemory(
1130 IOOptionBits type,
1131 task_t task,
1132 IOOptionBits mapFlags,
1133 IOVirtualAddress atAddress )
1134 {
1135 return (NULL);
1136 }
1137 #endif
1138
1139 IOMemoryMap * IOUserClient::mapClientMemory64(
1140 IOOptionBits type,
1141 task_t task,
1142 IOOptionBits mapFlags,
1143 mach_vm_address_t atAddress )
1144 {
1145 IOReturn err;
1146 IOOptionBits options = 0;
1147 IOMemoryDescriptor * memory;
1148 IOMemoryMap * map = 0;
1149
1150 err = clientMemoryForType( (UInt32) type, &options, &memory );
1151
1152 if( memory && (kIOReturnSuccess == err)) {
1153
1154 options = (options & ~kIOMapUserOptionsMask)
1155 | (mapFlags & kIOMapUserOptionsMask);
1156 map = memory->createMappingInTask( task, atAddress, options );
1157 memory->release();
1158 }
1159
1160 return( map );
1161 }
1162
1163 IOReturn IOUserClient::exportObjectToClient(task_t task,
1164 OSObject *obj, io_object_t *clientObj)
1165 {
1166 mach_port_name_t name;
1167
1168 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1169 assert( name );
1170
1171 *(mach_port_name_t *)clientObj = name;
1172 return kIOReturnSuccess;
1173 }
1174
1175 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1176 {
1177 return( 0 );
1178 }
1179
1180 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1181 {
1182 return( 0 );
1183 }
1184
1185 IOExternalMethod * IOUserClient::
1186 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1187 {
1188 IOExternalMethod *method = getExternalMethodForIndex(index);
1189
1190 if (method)
1191 *targetP = (IOService *) method->object;
1192
1193 return method;
1194 }
1195
1196 IOExternalAsyncMethod * IOUserClient::
1197 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1198 {
1199 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1200
1201 if (method)
1202 *targetP = (IOService *) method->object;
1203
1204 return method;
1205 }
1206
1207 IOExternalTrap * IOUserClient::
1208 getExternalTrapForIndex(UInt32 index)
1209 {
1210 return NULL;
1211 }
1212
1213 IOExternalTrap * IOUserClient::
1214 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1215 {
1216 IOExternalTrap *trap = getExternalTrapForIndex(index);
1217
1218 if (trap) {
1219 *targetP = trap->object;
1220 }
1221
1222 return trap;
1223 }
1224
1225 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1226 {
1227 mach_port_t port;
1228 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1229
1230 if (MACH_PORT_NULL != port)
1231 iokit_release_port_send(port);
1232
1233 return (kIOReturnSuccess);
1234 }
1235
1236 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1237 {
1238 if (MACH_PORT_NULL != port)
1239 iokit_release_port_send(port);
1240
1241 return (kIOReturnSuccess);
1242 }
1243
1244 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1245 IOReturn result, void *args[], UInt32 numArgs)
1246 {
1247 OSAsyncReference64 reference64;
1248 io_user_reference_t args64[kMaxAsyncArgs];
1249 unsigned int idx;
1250
1251 if (numArgs > kMaxAsyncArgs)
1252 return kIOReturnMessageTooLarge;
1253
1254 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1255 reference64[idx] = REF64(reference[idx]);
1256
1257 for (idx = 0; idx < numArgs; idx++)
1258 args64[idx] = REF64(args[idx]);
1259
1260 return (sendAsyncResult64(reference64, result, args64, numArgs));
1261 }
1262
1263 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1264 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1265 {
1266 struct ReplyMsg
1267 {
1268 mach_msg_header_t msgHdr;
1269 union
1270 {
1271 struct
1272 {
1273 OSNotificationHeader notifyHdr;
1274 IOAsyncCompletionContent asyncContent;
1275 uint32_t args[kMaxAsyncArgs];
1276 } msg32;
1277 struct
1278 {
1279 OSNotificationHeader64 notifyHdr;
1280 IOAsyncCompletionContent asyncContent;
1281 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1282 } msg64;
1283 } m;
1284 };
1285 ReplyMsg replyMsg;
1286 mach_port_t replyPort;
1287 kern_return_t kr;
1288
1289 // If no reply port, do nothing.
1290 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1291 if (replyPort == MACH_PORT_NULL)
1292 return kIOReturnSuccess;
1293
1294 if (numArgs > kMaxAsyncArgs)
1295 return kIOReturnMessageTooLarge;
1296
1297 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1298 0 /*local*/);
1299 replyMsg.msgHdr.msgh_remote_port = replyPort;
1300 replyMsg.msgHdr.msgh_local_port = 0;
1301 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1302 if (kIOUCAsync64Flag & reference[0])
1303 {
1304 replyMsg.msgHdr.msgh_size =
1305 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1306 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1307 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1308 + numArgs * sizeof(io_user_reference_t);
1309 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1310 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1311
1312 replyMsg.m.msg64.asyncContent.result = result;
1313 if (numArgs)
1314 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1315 }
1316 else
1317 {
1318 unsigned int idx;
1319
1320 replyMsg.msgHdr.msgh_size =
1321 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1322 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1323
1324 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1325 + numArgs * sizeof(uint32_t);
1326 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1327
1328 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1329 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1330
1331 replyMsg.m.msg32.asyncContent.result = result;
1332
1333 for (idx = 0; idx < numArgs; idx++)
1334 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1335 }
1336
1337 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1338 replyMsg.msgHdr.msgh_size);
1339 if( KERN_SUCCESS != kr)
1340 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1341 return kr;
1342 }
1343
1344
1345 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1346
1347 extern "C" {
1348
1349 #define CHECK(cls,obj,out) \
1350 cls * out; \
1351 if( !(out = OSDynamicCast( cls, obj))) \
1352 return( kIOReturnBadArgument )
1353
1354 /* Routine io_object_get_class */
1355 kern_return_t is_io_object_get_class(
1356 io_object_t object,
1357 io_name_t className )
1358 {
1359 const OSMetaClass* my_obj = NULL;
1360
1361 if( !object)
1362 return( kIOReturnBadArgument );
1363
1364 my_obj = object->getMetaClass();
1365 if (!my_obj) {
1366 return (kIOReturnNotFound);
1367 }
1368
1369 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1370 return( kIOReturnSuccess );
1371 }
1372
1373 /* Routine io_object_get_superclass */
1374 kern_return_t is_io_object_get_superclass(
1375 mach_port_t master_port,
1376 io_name_t obj_name,
1377 io_name_t class_name)
1378 {
1379 const OSMetaClass* my_obj = NULL;
1380 const OSMetaClass* superclass = NULL;
1381 const OSSymbol *my_name = NULL;
1382 const char *my_cstr = NULL;
1383
1384 if (!obj_name || !class_name)
1385 return (kIOReturnBadArgument);
1386
1387 if( master_port != master_device_port)
1388 return( kIOReturnNotPrivileged);
1389
1390 my_name = OSSymbol::withCString(obj_name);
1391
1392 if (my_name) {
1393 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1394 my_name->release();
1395 }
1396 if (my_obj) {
1397 superclass = my_obj->getSuperClass();
1398 }
1399
1400 if (!superclass) {
1401 return( kIOReturnNotFound );
1402 }
1403
1404 my_cstr = superclass->getClassName();
1405
1406 if (my_cstr) {
1407 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1408 return( kIOReturnSuccess );
1409 }
1410 return (kIOReturnNotFound);
1411 }
1412
1413 /* Routine io_object_get_bundle_identifier */
1414 kern_return_t is_io_object_get_bundle_identifier(
1415 mach_port_t master_port,
1416 io_name_t obj_name,
1417 io_name_t bundle_name)
1418 {
1419 const OSMetaClass* my_obj = NULL;
1420 const OSSymbol *my_name = NULL;
1421 const OSSymbol *identifier = NULL;
1422 const char *my_cstr = NULL;
1423
1424 if (!obj_name || !bundle_name)
1425 return (kIOReturnBadArgument);
1426
1427 if( master_port != master_device_port)
1428 return( kIOReturnNotPrivileged);
1429
1430 my_name = OSSymbol::withCString(obj_name);
1431
1432 if (my_name) {
1433 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1434 my_name->release();
1435 }
1436
1437 if (my_obj) {
1438 identifier = my_obj->getKmodName();
1439 }
1440 if (!identifier) {
1441 return( kIOReturnNotFound );
1442 }
1443
1444 my_cstr = identifier->getCStringNoCopy();
1445 if (my_cstr) {
1446 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1447 return( kIOReturnSuccess );
1448 }
1449
1450 return (kIOReturnBadArgument);
1451 }
1452
1453 /* Routine io_object_conforms_to */
1454 kern_return_t is_io_object_conforms_to(
1455 io_object_t object,
1456 io_name_t className,
1457 boolean_t *conforms )
1458 {
1459 if( !object)
1460 return( kIOReturnBadArgument );
1461
1462 *conforms = (0 != object->metaCast( className ));
1463 return( kIOReturnSuccess );
1464 }
1465
1466 /* Routine io_object_get_retain_count */
1467 kern_return_t is_io_object_get_retain_count(
1468 io_object_t object,
1469 uint32_t *retainCount )
1470 {
1471 if( !object)
1472 return( kIOReturnBadArgument );
1473
1474 *retainCount = object->getRetainCount();
1475 return( kIOReturnSuccess );
1476 }
1477
1478 /* Routine io_iterator_next */
1479 kern_return_t is_io_iterator_next(
1480 io_object_t iterator,
1481 io_object_t *object )
1482 {
1483 OSObject * obj;
1484
1485 CHECK( OSIterator, iterator, iter );
1486
1487 obj = iter->getNextObject();
1488 if( obj) {
1489 obj->retain();
1490 *object = obj;
1491 return( kIOReturnSuccess );
1492 } else
1493 return( kIOReturnNoDevice );
1494 }
1495
1496 /* Routine io_iterator_reset */
1497 kern_return_t is_io_iterator_reset(
1498 io_object_t iterator )
1499 {
1500 CHECK( OSIterator, iterator, iter );
1501
1502 iter->reset();
1503
1504 return( kIOReturnSuccess );
1505 }
1506
1507 /* Routine io_iterator_is_valid */
1508 kern_return_t is_io_iterator_is_valid(
1509 io_object_t iterator,
1510 boolean_t *is_valid )
1511 {
1512 CHECK( OSIterator, iterator, iter );
1513
1514 *is_valid = iter->isValid();
1515
1516 return( kIOReturnSuccess );
1517 }
1518
1519 /* Routine io_service_match_property_table */
1520 kern_return_t is_io_service_match_property_table(
1521 io_service_t _service,
1522 io_string_t matching,
1523 boolean_t *matches )
1524 {
1525 CHECK( IOService, _service, service );
1526
1527 kern_return_t kr;
1528 OSObject * obj;
1529 OSDictionary * dict;
1530
1531 obj = OSUnserializeXML( matching );
1532
1533 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1534 *matches = service->passiveMatch( dict );
1535 kr = kIOReturnSuccess;
1536 } else
1537 kr = kIOReturnBadArgument;
1538
1539 if( obj)
1540 obj->release();
1541
1542 return( kr );
1543 }
1544
1545 /* Routine io_service_match_property_table_ool */
1546 kern_return_t is_io_service_match_property_table_ool(
1547 io_object_t service,
1548 io_buf_ptr_t matching,
1549 mach_msg_type_number_t matchingCnt,
1550 kern_return_t *result,
1551 boolean_t *matches )
1552 {
1553 kern_return_t kr;
1554 vm_offset_t data;
1555 vm_map_offset_t map_data;
1556
1557 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1558 data = CAST_DOWN(vm_offset_t, map_data);
1559
1560 if( KERN_SUCCESS == kr) {
1561 // must return success after vm_map_copyout() succeeds
1562 *result = is_io_service_match_property_table( service,
1563 (char *) data, matches );
1564 vm_deallocate( kernel_map, data, matchingCnt );
1565 }
1566
1567 return( kr );
1568 }
1569
1570 /* Routine io_service_get_matching_services */
1571 kern_return_t is_io_service_get_matching_services(
1572 mach_port_t master_port,
1573 io_string_t matching,
1574 io_iterator_t *existing )
1575 {
1576 kern_return_t kr;
1577 OSObject * obj;
1578 OSDictionary * dict;
1579
1580 if( master_port != master_device_port)
1581 return( kIOReturnNotPrivileged);
1582
1583 obj = OSUnserializeXML( matching );
1584
1585 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1586 *existing = IOService::getMatchingServices( dict );
1587 kr = kIOReturnSuccess;
1588 } else
1589 kr = kIOReturnBadArgument;
1590
1591 if( obj)
1592 obj->release();
1593
1594 return( kr );
1595 }
1596
1597 /* Routine io_service_get_matching_services_ool */
1598 kern_return_t is_io_service_get_matching_services_ool(
1599 mach_port_t master_port,
1600 io_buf_ptr_t matching,
1601 mach_msg_type_number_t matchingCnt,
1602 kern_return_t *result,
1603 io_object_t *existing )
1604 {
1605 kern_return_t kr;
1606 vm_offset_t data;
1607 vm_map_offset_t map_data;
1608
1609 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1610 data = CAST_DOWN(vm_offset_t, map_data);
1611
1612 if( KERN_SUCCESS == kr) {
1613 // must return success after vm_map_copyout() succeeds
1614 *result = is_io_service_get_matching_services( master_port,
1615 (char *) data, existing );
1616 vm_deallocate( kernel_map, data, matchingCnt );
1617 }
1618
1619 return( kr );
1620 }
1621
1622 static kern_return_t internal_io_service_add_notification(
1623 mach_port_t master_port,
1624 io_name_t notification_type,
1625 io_string_t matching,
1626 mach_port_t port,
1627 void * reference,
1628 vm_size_t referenceSize,
1629 bool client64,
1630 io_object_t * notification )
1631 {
1632 IOServiceUserNotification * userNotify = 0;
1633 IONotifier * notify = 0;
1634 const OSSymbol * sym;
1635 OSDictionary * dict;
1636 IOReturn err;
1637 unsigned long int userMsgType;
1638
1639
1640 if( master_port != master_device_port)
1641 return( kIOReturnNotPrivileged);
1642
1643 do {
1644 err = kIOReturnNoResources;
1645
1646 if( !(sym = OSSymbol::withCString( notification_type )))
1647 err = kIOReturnNoResources;
1648
1649 if( !(dict = OSDynamicCast( OSDictionary,
1650 OSUnserializeXML( matching )))) {
1651 err = kIOReturnBadArgument;
1652 continue;
1653 }
1654
1655 if( (sym == gIOPublishNotification)
1656 || (sym == gIOFirstPublishNotification))
1657 userMsgType = kIOServicePublishNotificationType;
1658 else if( (sym == gIOMatchedNotification)
1659 || (sym == gIOFirstMatchNotification))
1660 userMsgType = kIOServiceMatchedNotificationType;
1661 else if( sym == gIOTerminatedNotification)
1662 userMsgType = kIOServiceTerminatedNotificationType;
1663 else
1664 userMsgType = kLastIOKitNotificationType;
1665
1666 userNotify = new IOServiceUserNotification;
1667
1668 if( userNotify && !userNotify->init( port, userMsgType,
1669 reference, referenceSize, client64)) {
1670 userNotify->release();
1671 userNotify = 0;
1672 }
1673 if( !userNotify)
1674 continue;
1675
1676 notify = IOService::addMatchingNotification( sym, dict,
1677 &userNotify->_handler, userNotify );
1678 if( notify) {
1679 *notification = userNotify;
1680 userNotify->setNotification( notify );
1681 err = kIOReturnSuccess;
1682 } else
1683 err = kIOReturnUnsupported;
1684
1685 } while( false );
1686
1687 if( sym)
1688 sym->release();
1689 if( dict)
1690 dict->release();
1691
1692 return( err );
1693 }
1694
1695
1696 /* Routine io_service_add_notification */
1697 kern_return_t is_io_service_add_notification(
1698 mach_port_t master_port,
1699 io_name_t notification_type,
1700 io_string_t matching,
1701 mach_port_t port,
1702 io_async_ref_t reference,
1703 mach_msg_type_number_t referenceCnt,
1704 io_object_t * notification )
1705 {
1706 return (internal_io_service_add_notification(master_port, notification_type,
1707 matching, port, &reference[0], sizeof(io_async_ref_t),
1708 false, notification));
1709 }
1710
1711 /* Routine io_service_add_notification_64 */
1712 kern_return_t is_io_service_add_notification_64(
1713 mach_port_t master_port,
1714 io_name_t notification_type,
1715 io_string_t matching,
1716 mach_port_t wake_port,
1717 io_async_ref64_t reference,
1718 mach_msg_type_number_t referenceCnt,
1719 io_object_t *notification )
1720 {
1721 return (internal_io_service_add_notification(master_port, notification_type,
1722 matching, wake_port, &reference[0], sizeof(io_async_ref64_t),
1723 true, notification));
1724 }
1725
1726
1727 static kern_return_t internal_io_service_add_notification_ool(
1728 mach_port_t master_port,
1729 io_name_t notification_type,
1730 io_buf_ptr_t matching,
1731 mach_msg_type_number_t matchingCnt,
1732 mach_port_t wake_port,
1733 void * reference,
1734 vm_size_t referenceSize,
1735 bool client64,
1736 kern_return_t *result,
1737 io_object_t *notification )
1738 {
1739 kern_return_t kr;
1740 vm_offset_t data;
1741 vm_map_offset_t map_data;
1742
1743 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1744 data = CAST_DOWN(vm_offset_t, map_data);
1745
1746 if( KERN_SUCCESS == kr) {
1747 // must return success after vm_map_copyout() succeeds
1748 *result = internal_io_service_add_notification( master_port, notification_type,
1749 (char *) data, wake_port, reference, referenceSize, client64, notification );
1750 vm_deallocate( kernel_map, data, matchingCnt );
1751 }
1752
1753 return( kr );
1754 }
1755
1756 /* Routine io_service_add_notification_ool */
1757 kern_return_t is_io_service_add_notification_ool(
1758 mach_port_t master_port,
1759 io_name_t notification_type,
1760 io_buf_ptr_t matching,
1761 mach_msg_type_number_t matchingCnt,
1762 mach_port_t wake_port,
1763 io_async_ref_t reference,
1764 mach_msg_type_number_t referenceCnt,
1765 kern_return_t *result,
1766 io_object_t *notification )
1767 {
1768 return (internal_io_service_add_notification_ool(master_port, notification_type,
1769 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
1770 false, result, notification));
1771 }
1772
1773 /* Routine io_service_add_notification_ool_64 */
1774 kern_return_t is_io_service_add_notification_ool_64(
1775 mach_port_t master_port,
1776 io_name_t notification_type,
1777 io_buf_ptr_t matching,
1778 mach_msg_type_number_t matchingCnt,
1779 mach_port_t wake_port,
1780 io_async_ref64_t reference,
1781 mach_msg_type_number_t referenceCnt,
1782 kern_return_t *result,
1783 io_object_t *notification )
1784 {
1785 return (internal_io_service_add_notification_ool(master_port, notification_type,
1786 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
1787 true, result, notification));
1788 }
1789
1790 /* Routine io_service_add_notification_old */
1791 kern_return_t is_io_service_add_notification_old(
1792 mach_port_t master_port,
1793 io_name_t notification_type,
1794 io_string_t matching,
1795 mach_port_t port,
1796 // for binary compatibility reasons, this must be natural_t for ILP32
1797 natural_t ref,
1798 io_object_t * notification )
1799 {
1800 return( is_io_service_add_notification( master_port, notification_type,
1801 matching, port, &ref, 1, notification ));
1802 }
1803
1804
1805 static kern_return_t internal_io_service_add_interest_notification(
1806 io_object_t _service,
1807 io_name_t type_of_interest,
1808 mach_port_t port,
1809 void * reference,
1810 vm_size_t referenceSize,
1811 bool client64,
1812 io_object_t * notification )
1813 {
1814
1815 IOServiceMessageUserNotification * userNotify = 0;
1816 IONotifier * notify = 0;
1817 const OSSymbol * sym;
1818 IOReturn err;
1819
1820 CHECK( IOService, _service, service );
1821
1822 err = kIOReturnNoResources;
1823 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
1824
1825 userNotify = new IOServiceMessageUserNotification;
1826
1827 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
1828 reference, referenceSize,
1829 kIOUserNotifyMaxMessageSize,
1830 client64 )) {
1831 userNotify->release();
1832 userNotify = 0;
1833 }
1834 if( !userNotify)
1835 continue;
1836
1837 notify = service->registerInterest( sym,
1838 &userNotify->_handler, userNotify );
1839 if( notify) {
1840 *notification = userNotify;
1841 userNotify->setNotification( notify );
1842 err = kIOReturnSuccess;
1843 } else
1844 err = kIOReturnUnsupported;
1845
1846 sym->release();
1847
1848 } while( false );
1849
1850 return( err );
1851 }
1852
1853 /* Routine io_service_add_message_notification */
1854 kern_return_t is_io_service_add_interest_notification(
1855 io_object_t service,
1856 io_name_t type_of_interest,
1857 mach_port_t port,
1858 io_async_ref_t reference,
1859 mach_msg_type_number_t referenceCnt,
1860 io_object_t * notification )
1861 {
1862 return (internal_io_service_add_interest_notification(service, type_of_interest,
1863 port, &reference[0], sizeof(io_async_ref_t), false, notification));
1864 }
1865
1866 /* Routine io_service_add_interest_notification_64 */
1867 kern_return_t is_io_service_add_interest_notification_64(
1868 io_object_t service,
1869 io_name_t type_of_interest,
1870 mach_port_t wake_port,
1871 io_async_ref64_t reference,
1872 mach_msg_type_number_t referenceCnt,
1873 io_object_t *notification )
1874 {
1875 return (internal_io_service_add_interest_notification(service, type_of_interest,
1876 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
1877 }
1878
1879
1880 /* Routine io_service_acknowledge_notification */
1881 kern_return_t is_io_service_acknowledge_notification(
1882 io_object_t _service,
1883 natural_t notify_ref,
1884 natural_t response )
1885 {
1886 CHECK( IOService, _service, service );
1887
1888 return( service->acknowledgeNotification( (IONotificationRef) notify_ref,
1889 (IOOptionBits) response ));
1890
1891 }
1892
1893 /* Routine io_connect_get_semaphore */
1894 kern_return_t is_io_connect_get_notification_semaphore(
1895 io_connect_t connection,
1896 natural_t notification_type,
1897 semaphore_t *semaphore )
1898 {
1899 CHECK( IOUserClient, connection, client );
1900
1901 IOStatisticsClientCall();
1902 return( client->getNotificationSemaphore( (UInt32) notification_type,
1903 semaphore ));
1904 }
1905
1906 /* Routine io_registry_get_root_entry */
1907 kern_return_t is_io_registry_get_root_entry(
1908 mach_port_t master_port,
1909 io_object_t *root )
1910 {
1911 IORegistryEntry * entry;
1912
1913 if( master_port != master_device_port)
1914 return( kIOReturnNotPrivileged);
1915
1916 entry = IORegistryEntry::getRegistryRoot();
1917 if( entry)
1918 entry->retain();
1919 *root = entry;
1920
1921 return( kIOReturnSuccess );
1922 }
1923
1924 /* Routine io_registry_create_iterator */
1925 kern_return_t is_io_registry_create_iterator(
1926 mach_port_t master_port,
1927 io_name_t plane,
1928 uint32_t options,
1929 io_object_t *iterator )
1930 {
1931 if( master_port != master_device_port)
1932 return( kIOReturnNotPrivileged);
1933
1934 *iterator = IORegistryIterator::iterateOver(
1935 IORegistryEntry::getPlane( plane ), options );
1936
1937 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
1938 }
1939
1940 /* Routine io_registry_entry_create_iterator */
1941 kern_return_t is_io_registry_entry_create_iterator(
1942 io_object_t registry_entry,
1943 io_name_t plane,
1944 uint32_t options,
1945 io_object_t *iterator )
1946 {
1947 CHECK( IORegistryEntry, registry_entry, entry );
1948
1949 *iterator = IORegistryIterator::iterateOver( entry,
1950 IORegistryEntry::getPlane( plane ), options );
1951
1952 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
1953 }
1954
1955 /* Routine io_registry_iterator_enter */
1956 kern_return_t is_io_registry_iterator_enter_entry(
1957 io_object_t iterator )
1958 {
1959 CHECK( IORegistryIterator, iterator, iter );
1960
1961 iter->enterEntry();
1962
1963 return( kIOReturnSuccess );
1964 }
1965
1966 /* Routine io_registry_iterator_exit */
1967 kern_return_t is_io_registry_iterator_exit_entry(
1968 io_object_t iterator )
1969 {
1970 bool didIt;
1971
1972 CHECK( IORegistryIterator, iterator, iter );
1973
1974 didIt = iter->exitEntry();
1975
1976 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
1977 }
1978
1979 /* Routine io_registry_entry_from_path */
1980 kern_return_t is_io_registry_entry_from_path(
1981 mach_port_t master_port,
1982 io_string_t path,
1983 io_object_t *registry_entry )
1984 {
1985 IORegistryEntry * entry;
1986
1987 if( master_port != master_device_port)
1988 return( kIOReturnNotPrivileged);
1989
1990 entry = IORegistryEntry::fromPath( path );
1991
1992 *registry_entry = entry;
1993
1994 return( kIOReturnSuccess );
1995 }
1996
1997 /* Routine io_registry_entry_in_plane */
1998 kern_return_t is_io_registry_entry_in_plane(
1999 io_object_t registry_entry,
2000 io_name_t plane,
2001 boolean_t *inPlane )
2002 {
2003 CHECK( IORegistryEntry, registry_entry, entry );
2004
2005 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2006
2007 return( kIOReturnSuccess );
2008 }
2009
2010
2011 /* Routine io_registry_entry_get_path */
2012 kern_return_t is_io_registry_entry_get_path(
2013 io_object_t registry_entry,
2014 io_name_t plane,
2015 io_string_t path )
2016 {
2017 int length;
2018 CHECK( IORegistryEntry, registry_entry, entry );
2019
2020 length = sizeof( io_string_t);
2021 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2022 return( kIOReturnSuccess );
2023 else
2024 return( kIOReturnBadArgument );
2025 }
2026
2027
2028 /* Routine io_registry_entry_get_name */
2029 kern_return_t is_io_registry_entry_get_name(
2030 io_object_t registry_entry,
2031 io_name_t name )
2032 {
2033 CHECK( IORegistryEntry, registry_entry, entry );
2034
2035 strncpy( name, entry->getName(), sizeof( io_name_t));
2036
2037 return( kIOReturnSuccess );
2038 }
2039
2040 /* Routine io_registry_entry_get_name_in_plane */
2041 kern_return_t is_io_registry_entry_get_name_in_plane(
2042 io_object_t registry_entry,
2043 io_name_t planeName,
2044 io_name_t name )
2045 {
2046 const IORegistryPlane * plane;
2047 CHECK( IORegistryEntry, registry_entry, entry );
2048
2049 if( planeName[0])
2050 plane = IORegistryEntry::getPlane( planeName );
2051 else
2052 plane = 0;
2053
2054 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2055
2056 return( kIOReturnSuccess );
2057 }
2058
2059 /* Routine io_registry_entry_get_location_in_plane */
2060 kern_return_t is_io_registry_entry_get_location_in_plane(
2061 io_object_t registry_entry,
2062 io_name_t planeName,
2063 io_name_t location )
2064 {
2065 const IORegistryPlane * plane;
2066 CHECK( IORegistryEntry, registry_entry, entry );
2067
2068 if( planeName[0])
2069 plane = IORegistryEntry::getPlane( planeName );
2070 else
2071 plane = 0;
2072
2073 const char * cstr = entry->getLocation( plane );
2074
2075 if( cstr) {
2076 strncpy( location, cstr, sizeof( io_name_t));
2077 return( kIOReturnSuccess );
2078 } else
2079 return( kIOReturnNotFound );
2080 }
2081
2082 /* Routine io_registry_entry_get_registry_entry_id */
2083 kern_return_t is_io_registry_entry_get_registry_entry_id(
2084 io_object_t registry_entry,
2085 uint64_t *entry_id )
2086 {
2087 CHECK( IORegistryEntry, registry_entry, entry );
2088
2089 *entry_id = entry->getRegistryEntryID();
2090
2091 return (kIOReturnSuccess);
2092 }
2093
2094 // Create a vm_map_copy_t or kalloc'ed data for memory
2095 // to be copied out. ipc will free after the copyout.
2096
2097 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
2098 io_buf_ptr_t * buf )
2099 {
2100 kern_return_t err;
2101 vm_map_copy_t copy;
2102
2103 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2104 false /* src_destroy */, &copy);
2105
2106 assert( err == KERN_SUCCESS );
2107 if( err == KERN_SUCCESS )
2108 *buf = (char *) copy;
2109
2110 return( err );
2111 }
2112
2113 /* Routine io_registry_entry_get_property */
2114 kern_return_t is_io_registry_entry_get_property_bytes(
2115 io_object_t registry_entry,
2116 io_name_t property_name,
2117 io_struct_inband_t buf,
2118 mach_msg_type_number_t *dataCnt )
2119 {
2120 OSObject * obj;
2121 OSData * data;
2122 OSString * str;
2123 OSBoolean * boo;
2124 OSNumber * off;
2125 UInt64 offsetBytes;
2126 unsigned int len = 0;
2127 const void * bytes = 0;
2128 IOReturn ret = kIOReturnSuccess;
2129
2130 CHECK( IORegistryEntry, registry_entry, entry );
2131
2132 obj = entry->copyProperty(property_name);
2133 if( !obj)
2134 return( kIOReturnNoResources );
2135
2136 // One day OSData will be a common container base class
2137 // until then...
2138 if( (data = OSDynamicCast( OSData, obj ))) {
2139 len = data->getLength();
2140 bytes = data->getBytesNoCopy();
2141
2142 } else if( (str = OSDynamicCast( OSString, obj ))) {
2143 len = str->getLength() + 1;
2144 bytes = str->getCStringNoCopy();
2145
2146 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2147 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2148 bytes = boo->isTrue() ? "Yes" : "No";
2149
2150 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2151 offsetBytes = off->unsigned64BitValue();
2152 len = off->numberOfBytes();
2153 bytes = &offsetBytes;
2154 #ifdef __BIG_ENDIAN__
2155 bytes = (const void *)
2156 (((UInt32) bytes) + (sizeof( UInt64) - len));
2157 #endif
2158
2159 } else
2160 ret = kIOReturnBadArgument;
2161
2162 if( bytes) {
2163 if( *dataCnt < len)
2164 ret = kIOReturnIPCError;
2165 else {
2166 *dataCnt = len;
2167 bcopy( bytes, buf, len );
2168 }
2169 }
2170 obj->release();
2171
2172 return( ret );
2173 }
2174
2175
2176 /* Routine io_registry_entry_get_property */
2177 kern_return_t is_io_registry_entry_get_property(
2178 io_object_t registry_entry,
2179 io_name_t property_name,
2180 io_buf_ptr_t *properties,
2181 mach_msg_type_number_t *propertiesCnt )
2182 {
2183 kern_return_t err;
2184 vm_size_t len;
2185 OSObject * obj;
2186
2187 CHECK( IORegistryEntry, registry_entry, entry );
2188
2189 obj = entry->copyProperty(property_name);
2190 if( !obj)
2191 return( kIOReturnNotFound );
2192
2193 OSSerialize * s = OSSerialize::withCapacity(4096);
2194 if( !s) {
2195 obj->release();
2196 return( kIOReturnNoMemory );
2197 }
2198 s->clearText();
2199
2200 if( obj->serialize( s )) {
2201 len = s->getLength();
2202 *propertiesCnt = len;
2203 err = copyoutkdata( s->text(), len, properties );
2204
2205 } else
2206 err = kIOReturnUnsupported;
2207
2208 s->release();
2209 obj->release();
2210
2211 return( err );
2212 }
2213
2214 /* Routine io_registry_entry_get_property_recursively */
2215 kern_return_t is_io_registry_entry_get_property_recursively(
2216 io_object_t registry_entry,
2217 io_name_t plane,
2218 io_name_t property_name,
2219 uint32_t options,
2220 io_buf_ptr_t *properties,
2221 mach_msg_type_number_t *propertiesCnt )
2222 {
2223 kern_return_t err;
2224 vm_size_t len;
2225 OSObject * obj;
2226
2227 CHECK( IORegistryEntry, registry_entry, entry );
2228
2229 obj = entry->copyProperty( property_name,
2230 IORegistryEntry::getPlane( plane ), options);
2231 if( !obj)
2232 return( kIOReturnNotFound );
2233
2234 OSSerialize * s = OSSerialize::withCapacity(4096);
2235 if( !s) {
2236 obj->release();
2237 return( kIOReturnNoMemory );
2238 }
2239
2240 s->clearText();
2241
2242 if( obj->serialize( s )) {
2243 len = s->getLength();
2244 *propertiesCnt = len;
2245 err = copyoutkdata( s->text(), len, properties );
2246
2247 } else
2248 err = kIOReturnUnsupported;
2249
2250 s->release();
2251 obj->release();
2252
2253 return( err );
2254 }
2255
2256 /* Routine io_registry_entry_get_properties */
2257 kern_return_t is_io_registry_entry_get_properties(
2258 io_object_t registry_entry,
2259 io_buf_ptr_t *properties,
2260 mach_msg_type_number_t *propertiesCnt )
2261 {
2262 kern_return_t err;
2263 vm_size_t len;
2264
2265 CHECK( IORegistryEntry, registry_entry, entry );
2266
2267 OSSerialize * s = OSSerialize::withCapacity(4096);
2268 if( !s)
2269 return( kIOReturnNoMemory );
2270
2271 s->clearText();
2272
2273 if( entry->serializeProperties( s )) {
2274 len = s->getLength();
2275 *propertiesCnt = len;
2276 err = copyoutkdata( s->text(), len, properties );
2277
2278 } else
2279 err = kIOReturnUnsupported;
2280
2281 s->release();
2282
2283 return( err );
2284 }
2285
2286 /* Routine io_registry_entry_set_properties */
2287 kern_return_t is_io_registry_entry_set_properties
2288 (
2289 io_object_t registry_entry,
2290 io_buf_ptr_t properties,
2291 mach_msg_type_number_t propertiesCnt,
2292 kern_return_t * result)
2293 {
2294 OSObject * obj;
2295 kern_return_t err;
2296 IOReturn res;
2297 vm_offset_t data;
2298 vm_map_offset_t map_data;
2299
2300 CHECK( IORegistryEntry, registry_entry, entry );
2301
2302 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2303 data = CAST_DOWN(vm_offset_t, map_data);
2304
2305 if( KERN_SUCCESS == err) {
2306
2307 // must return success after vm_map_copyout() succeeds
2308 obj = OSUnserializeXML( (const char *) data );
2309 vm_deallocate( kernel_map, data, propertiesCnt );
2310
2311 if (!obj)
2312 res = kIOReturnBadArgument;
2313 #if CONFIG_MACF
2314 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
2315 registry_entry, obj))
2316 res = kIOReturnNotPermitted;
2317 #endif
2318 else
2319 res = entry->setProperties( obj );
2320 if (obj)
2321 obj->release();
2322 } else
2323 res = err;
2324
2325 *result = res;
2326 return( err );
2327 }
2328
2329 /* Routine io_registry_entry_get_child_iterator */
2330 kern_return_t is_io_registry_entry_get_child_iterator(
2331 io_object_t registry_entry,
2332 io_name_t plane,
2333 io_object_t *iterator )
2334 {
2335 CHECK( IORegistryEntry, registry_entry, entry );
2336
2337 *iterator = entry->getChildIterator(
2338 IORegistryEntry::getPlane( plane ));
2339
2340 return( kIOReturnSuccess );
2341 }
2342
2343 /* Routine io_registry_entry_get_parent_iterator */
2344 kern_return_t is_io_registry_entry_get_parent_iterator(
2345 io_object_t registry_entry,
2346 io_name_t plane,
2347 io_object_t *iterator)
2348 {
2349 CHECK( IORegistryEntry, registry_entry, entry );
2350
2351 *iterator = entry->getParentIterator(
2352 IORegistryEntry::getPlane( plane ));
2353
2354 return( kIOReturnSuccess );
2355 }
2356
2357 /* Routine io_service_get_busy_state */
2358 kern_return_t is_io_service_get_busy_state(
2359 io_object_t _service,
2360 uint32_t *busyState )
2361 {
2362 CHECK( IOService, _service, service );
2363
2364 *busyState = service->getBusyState();
2365
2366 return( kIOReturnSuccess );
2367 }
2368
2369 /* Routine io_service_get_state */
2370 kern_return_t is_io_service_get_state(
2371 io_object_t _service,
2372 uint64_t *state,
2373 uint32_t *busy_state,
2374 uint64_t *accumulated_busy_time )
2375 {
2376 CHECK( IOService, _service, service );
2377
2378 *state = service->getState();
2379 *busy_state = service->getBusyState();
2380 *accumulated_busy_time = service->getAccumulatedBusyTime();
2381
2382 return( kIOReturnSuccess );
2383 }
2384
2385 /* Routine io_service_wait_quiet */
2386 kern_return_t is_io_service_wait_quiet(
2387 io_object_t _service,
2388 mach_timespec_t wait_time )
2389 {
2390 uint64_t timeoutNS;
2391
2392 CHECK( IOService, _service, service );
2393
2394 timeoutNS = wait_time.tv_sec;
2395 timeoutNS *= kSecondScale;
2396 timeoutNS += wait_time.tv_nsec;
2397
2398 return( service->waitQuiet(timeoutNS) );
2399 }
2400
2401 /* Routine io_service_request_probe */
2402 kern_return_t is_io_service_request_probe(
2403 io_object_t _service,
2404 uint32_t options )
2405 {
2406 CHECK( IOService, _service, service );
2407
2408 return( service->requestProbe( options ));
2409 }
2410
2411 /* Routine io_service_open_ndr */
2412 kern_return_t is_io_service_open_extended(
2413 io_object_t _service,
2414 task_t owningTask,
2415 uint32_t connect_type,
2416 NDR_record_t ndr,
2417 io_buf_ptr_t properties,
2418 mach_msg_type_number_t propertiesCnt,
2419 kern_return_t * result,
2420 io_object_t *connection )
2421 {
2422 IOUserClient * client = 0;
2423 kern_return_t err = KERN_SUCCESS;
2424 IOReturn res = kIOReturnSuccess;
2425 OSDictionary * propertiesDict = 0;
2426 bool crossEndian;
2427 bool disallowAccess;
2428
2429 CHECK( IOService, _service, service );
2430
2431 do
2432 {
2433 if (properties)
2434 {
2435 OSObject * obj;
2436 vm_offset_t data;
2437 vm_map_offset_t map_data;
2438
2439 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2440 res = err;
2441 data = CAST_DOWN(vm_offset_t, map_data);
2442 if (KERN_SUCCESS == err)
2443 {
2444 // must return success after vm_map_copyout() succeeds
2445 obj = OSUnserializeXML( (const char *) data );
2446 vm_deallocate( kernel_map, data, propertiesCnt );
2447 propertiesDict = OSDynamicCast(OSDictionary, obj);
2448 if (!propertiesDict)
2449 {
2450 res = kIOReturnBadArgument;
2451 if (obj)
2452 obj->release();
2453 }
2454 }
2455 if (kIOReturnSuccess != res)
2456 break;
2457 }
2458
2459 crossEndian = (ndr.int_rep != NDR_record.int_rep);
2460 if (crossEndian)
2461 {
2462 if (!propertiesDict)
2463 propertiesDict = OSDictionary::withCapacity(4);
2464 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
2465 if (data)
2466 {
2467 if (propertiesDict)
2468 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
2469 data->release();
2470 }
2471 }
2472
2473 res = service->newUserClient( owningTask, (void *) owningTask,
2474 connect_type, propertiesDict, &client );
2475
2476 if (propertiesDict)
2477 propertiesDict->release();
2478
2479 if (res == kIOReturnSuccess)
2480 {
2481 assert( OSDynamicCast(IOUserClient, client) );
2482
2483 disallowAccess = (crossEndian
2484 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
2485 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
2486 if (disallowAccess) res = kIOReturnUnsupported;
2487 #if CONFIG_MACF
2488 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
2489 res = kIOReturnNotPermitted;
2490 #endif
2491 if (kIOReturnSuccess != res)
2492 {
2493 IOStatisticsClientCall();
2494 client->clientClose();
2495 client->release();
2496 client = 0;
2497 break;
2498 }
2499 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
2500 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
2501 if (creatorName)
2502 {
2503 client->setProperty(kIOUserClientCreatorKey, creatorName);
2504 creatorName->release();
2505 }
2506 }
2507 }
2508 while (false);
2509
2510 *connection = client;
2511 *result = res;
2512
2513 return (err);
2514 }
2515
2516 /* Routine io_service_close */
2517 kern_return_t is_io_service_close(
2518 io_object_t connection )
2519 {
2520 OSSet * mappings;
2521 if ((mappings = OSDynamicCast(OSSet, connection)))
2522 return( kIOReturnSuccess );
2523
2524 CHECK( IOUserClient, connection, client );
2525
2526 IOStatisticsClientCall();
2527 client->clientClose();
2528
2529 return( kIOReturnSuccess );
2530 }
2531
2532 /* Routine io_connect_get_service */
2533 kern_return_t is_io_connect_get_service(
2534 io_object_t connection,
2535 io_object_t *service )
2536 {
2537 IOService * theService;
2538
2539 CHECK( IOUserClient, connection, client );
2540
2541 theService = client->getService();
2542 if( theService)
2543 theService->retain();
2544
2545 *service = theService;
2546
2547 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
2548 }
2549
2550 /* Routine io_connect_set_notification_port */
2551 kern_return_t is_io_connect_set_notification_port(
2552 io_object_t connection,
2553 uint32_t notification_type,
2554 mach_port_t port,
2555 uint32_t reference)
2556 {
2557 CHECK( IOUserClient, connection, client );
2558
2559 IOStatisticsClientCall();
2560 return( client->registerNotificationPort( port, notification_type,
2561 (io_user_reference_t) reference ));
2562 }
2563
2564 /* Routine io_connect_set_notification_port */
2565 kern_return_t is_io_connect_set_notification_port_64(
2566 io_object_t connection,
2567 uint32_t notification_type,
2568 mach_port_t port,
2569 io_user_reference_t reference)
2570 {
2571 CHECK( IOUserClient, connection, client );
2572
2573 IOStatisticsClientCall();
2574 return( client->registerNotificationPort( port, notification_type,
2575 reference ));
2576 }
2577
2578 /* Routine io_connect_map_memory_into_task */
2579 kern_return_t is_io_connect_map_memory_into_task
2580 (
2581 io_connect_t connection,
2582 uint32_t memory_type,
2583 task_t into_task,
2584 mach_vm_address_t *address,
2585 mach_vm_size_t *size,
2586 uint32_t flags
2587 )
2588 {
2589 IOReturn err;
2590 IOMemoryMap * map;
2591
2592 CHECK( IOUserClient, connection, client );
2593
2594 IOStatisticsClientCall();
2595 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
2596
2597 if( map) {
2598 *address = map->getAddress();
2599 if( size)
2600 *size = map->getSize();
2601
2602 if( client->sharedInstance
2603 || (into_task != current_task())) {
2604 // push a name out to the task owning the map,
2605 // so we can clean up maps
2606 mach_port_name_t name __unused =
2607 IOMachPort::makeSendRightForTask(
2608 into_task, map, IKOT_IOKIT_OBJECT );
2609 assert( name );
2610
2611 } else {
2612 // keep it with the user client
2613 IOLockLock( gIOObjectPortLock);
2614 if( 0 == client->mappings)
2615 client->mappings = OSSet::withCapacity(2);
2616 if( client->mappings)
2617 client->mappings->setObject( map);
2618 IOLockUnlock( gIOObjectPortLock);
2619 map->release();
2620 }
2621 err = kIOReturnSuccess;
2622
2623 } else
2624 err = kIOReturnBadArgument;
2625
2626 return( err );
2627 }
2628
2629 /* Routine is_io_connect_map_memory */
2630 kern_return_t is_io_connect_map_memory(
2631 io_object_t connect,
2632 uint32_t type,
2633 task_t task,
2634 vm_address_t * mapAddr,
2635 vm_size_t * mapSize,
2636 uint32_t flags )
2637 {
2638 IOReturn err;
2639 mach_vm_address_t address;
2640 mach_vm_size_t size;
2641
2642 address = SCALAR64(*mapAddr);
2643 size = SCALAR64(*mapSize);
2644
2645 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
2646
2647 *mapAddr = SCALAR32(address);
2648 *mapSize = SCALAR32(size);
2649
2650 return (err);
2651 }
2652
2653 } /* extern "C" */
2654
2655 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
2656 {
2657 OSIterator * iter;
2658 IOMemoryMap * map = 0;
2659
2660 IOLockLock(gIOObjectPortLock);
2661
2662 iter = OSCollectionIterator::withCollection(mappings);
2663 if(iter)
2664 {
2665 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
2666 {
2667 if(mem == map->getMemoryDescriptor())
2668 {
2669 map->retain();
2670 mappings->removeObject(map);
2671 break;
2672 }
2673 }
2674 iter->release();
2675 }
2676
2677 IOLockUnlock(gIOObjectPortLock);
2678
2679 return (map);
2680 }
2681
2682 extern "C" {
2683
2684 /* Routine io_connect_unmap_memory_from_task */
2685 kern_return_t is_io_connect_unmap_memory_from_task
2686 (
2687 io_connect_t connection,
2688 uint32_t memory_type,
2689 task_t from_task,
2690 mach_vm_address_t address)
2691 {
2692 IOReturn err;
2693 IOOptionBits options = 0;
2694 IOMemoryDescriptor * memory;
2695 IOMemoryMap * map;
2696
2697 CHECK( IOUserClient, connection, client );
2698
2699 IOStatisticsClientCall();
2700 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
2701
2702 if( memory && (kIOReturnSuccess == err)) {
2703
2704 options = (options & ~kIOMapUserOptionsMask)
2705 | kIOMapAnywhere | kIOMapReference;
2706
2707 map = memory->createMappingInTask( from_task, address, options );
2708 memory->release();
2709 if( map)
2710 {
2711 IOLockLock( gIOObjectPortLock);
2712 if( client->mappings)
2713 client->mappings->removeObject( map);
2714 IOLockUnlock( gIOObjectPortLock);
2715
2716 mach_port_name_t name = 0;
2717 if (from_task != current_task())
2718 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
2719 if (name)
2720 {
2721 map->userClientUnmap();
2722 err = iokit_mod_send_right( from_task, name, -2 );
2723 err = kIOReturnSuccess;
2724 }
2725 else
2726 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
2727 if (from_task == current_task())
2728 map->release();
2729 }
2730 else
2731 err = kIOReturnBadArgument;
2732 }
2733
2734 return( err );
2735 }
2736
2737 kern_return_t is_io_connect_unmap_memory(
2738 io_object_t connect,
2739 uint32_t type,
2740 task_t task,
2741 vm_address_t mapAddr )
2742 {
2743 IOReturn err;
2744 mach_vm_address_t address;
2745
2746 address = SCALAR64(mapAddr);
2747
2748 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
2749
2750 return (err);
2751 }
2752
2753
2754 /* Routine io_connect_add_client */
2755 kern_return_t is_io_connect_add_client(
2756 io_object_t connection,
2757 io_object_t connect_to)
2758 {
2759 CHECK( IOUserClient, connection, client );
2760 CHECK( IOUserClient, connect_to, to );
2761
2762 IOStatisticsClientCall();
2763 return( client->connectClient( to ) );
2764 }
2765
2766
2767 /* Routine io_connect_set_properties */
2768 kern_return_t is_io_connect_set_properties(
2769 io_object_t connection,
2770 io_buf_ptr_t properties,
2771 mach_msg_type_number_t propertiesCnt,
2772 kern_return_t * result)
2773 {
2774 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
2775 }
2776
2777 /* Routine io_user_client_method */
2778 kern_return_t is_io_connect_method_var_output
2779 (
2780 io_connect_t connection,
2781 uint32_t selector,
2782 io_scalar_inband64_t scalar_input,
2783 mach_msg_type_number_t scalar_inputCnt,
2784 io_struct_inband_t inband_input,
2785 mach_msg_type_number_t inband_inputCnt,
2786 mach_vm_address_t ool_input,
2787 mach_vm_size_t ool_input_size,
2788 io_struct_inband_t inband_output,
2789 mach_msg_type_number_t *inband_outputCnt,
2790 io_scalar_inband64_t scalar_output,
2791 mach_msg_type_number_t *scalar_outputCnt,
2792 io_buf_ptr_t *var_output,
2793 mach_msg_type_number_t *var_outputCnt
2794 )
2795 {
2796 CHECK( IOUserClient, connection, client );
2797
2798 IOExternalMethodArguments args;
2799 IOReturn ret;
2800 IOMemoryDescriptor * inputMD = 0;
2801 OSObject * structureVariableOutputData = 0;
2802
2803 bzero(&args.__reserved[0], sizeof(args.__reserved));
2804 args.version = kIOExternalMethodArgumentsCurrentVersion;
2805
2806 args.selector = selector;
2807
2808 args.asyncWakePort = MACH_PORT_NULL;
2809 args.asyncReference = 0;
2810 args.asyncReferenceCount = 0;
2811 args.structureVariableOutputData = &structureVariableOutputData;
2812
2813 args.scalarInput = scalar_input;
2814 args.scalarInputCount = scalar_inputCnt;
2815 args.structureInput = inband_input;
2816 args.structureInputSize = inband_inputCnt;
2817
2818 if (ool_input)
2819 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2820 kIODirectionOut, current_task());
2821
2822 args.structureInputDescriptor = inputMD;
2823
2824 args.scalarOutput = scalar_output;
2825 args.scalarOutputCount = *scalar_outputCnt;
2826 args.structureOutput = inband_output;
2827 args.structureOutputSize = *inband_outputCnt;
2828 args.structureOutputDescriptor = NULL;
2829 args.structureOutputDescriptorSize = 0;
2830
2831 IOStatisticsClientCall();
2832 ret = client->externalMethod( selector, &args );
2833
2834 *scalar_outputCnt = args.scalarOutputCount;
2835 *inband_outputCnt = args.structureOutputSize;
2836
2837 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
2838 {
2839 OSSerialize * serialize;
2840 OSData * data;
2841 vm_size_t len;
2842
2843 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
2844 {
2845 len = serialize->getLength();
2846 *var_outputCnt = len;
2847 ret = copyoutkdata(serialize->text(), len, var_output);
2848 }
2849 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
2850 {
2851 len = data->getLength();
2852 *var_outputCnt = len;
2853 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
2854 }
2855 else
2856 {
2857 ret = kIOReturnUnderrun;
2858 }
2859 }
2860
2861 if (inputMD)
2862 inputMD->release();
2863 if (structureVariableOutputData)
2864 structureVariableOutputData->release();
2865
2866 return (ret);
2867 }
2868
2869 /* Routine io_user_client_method */
2870 kern_return_t is_io_connect_method
2871 (
2872 io_connect_t connection,
2873 uint32_t selector,
2874 io_scalar_inband64_t scalar_input,
2875 mach_msg_type_number_t scalar_inputCnt,
2876 io_struct_inband_t inband_input,
2877 mach_msg_type_number_t inband_inputCnt,
2878 mach_vm_address_t ool_input,
2879 mach_vm_size_t ool_input_size,
2880 io_struct_inband_t inband_output,
2881 mach_msg_type_number_t *inband_outputCnt,
2882 io_scalar_inband64_t scalar_output,
2883 mach_msg_type_number_t *scalar_outputCnt,
2884 mach_vm_address_t ool_output,
2885 mach_vm_size_t *ool_output_size
2886 )
2887 {
2888 CHECK( IOUserClient, connection, client );
2889
2890 IOExternalMethodArguments args;
2891 IOReturn ret;
2892 IOMemoryDescriptor * inputMD = 0;
2893 IOMemoryDescriptor * outputMD = 0;
2894
2895 bzero(&args.__reserved[0], sizeof(args.__reserved));
2896 args.version = kIOExternalMethodArgumentsCurrentVersion;
2897
2898 args.selector = selector;
2899
2900 args.asyncWakePort = MACH_PORT_NULL;
2901 args.asyncReference = 0;
2902 args.asyncReferenceCount = 0;
2903 args.structureVariableOutputData = 0;
2904
2905 args.scalarInput = scalar_input;
2906 args.scalarInputCount = scalar_inputCnt;
2907 args.structureInput = inband_input;
2908 args.structureInputSize = inband_inputCnt;
2909
2910 if (ool_input)
2911 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2912 kIODirectionOut, current_task());
2913
2914 args.structureInputDescriptor = inputMD;
2915
2916 args.scalarOutput = scalar_output;
2917 args.scalarOutputCount = *scalar_outputCnt;
2918 args.structureOutput = inband_output;
2919 args.structureOutputSize = *inband_outputCnt;
2920
2921 if (ool_output && ool_output_size)
2922 {
2923 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
2924 kIODirectionIn, current_task());
2925 }
2926
2927 args.structureOutputDescriptor = outputMD;
2928 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
2929
2930 IOStatisticsClientCall();
2931 ret = client->externalMethod( selector, &args );
2932
2933 *scalar_outputCnt = args.scalarOutputCount;
2934 *inband_outputCnt = args.structureOutputSize;
2935 *ool_output_size = args.structureOutputDescriptorSize;
2936
2937 if (inputMD)
2938 inputMD->release();
2939 if (outputMD)
2940 outputMD->release();
2941
2942 return (ret);
2943 }
2944
2945 /* Routine io_async_user_client_method */
2946 kern_return_t is_io_connect_async_method
2947 (
2948 io_connect_t connection,
2949 mach_port_t wake_port,
2950 io_async_ref64_t reference,
2951 mach_msg_type_number_t referenceCnt,
2952 uint32_t selector,
2953 io_scalar_inband64_t scalar_input,
2954 mach_msg_type_number_t scalar_inputCnt,
2955 io_struct_inband_t inband_input,
2956 mach_msg_type_number_t inband_inputCnt,
2957 mach_vm_address_t ool_input,
2958 mach_vm_size_t ool_input_size,
2959 io_struct_inband_t inband_output,
2960 mach_msg_type_number_t *inband_outputCnt,
2961 io_scalar_inband64_t scalar_output,
2962 mach_msg_type_number_t *scalar_outputCnt,
2963 mach_vm_address_t ool_output,
2964 mach_vm_size_t * ool_output_size
2965 )
2966 {
2967 CHECK( IOUserClient, connection, client );
2968
2969 IOExternalMethodArguments args;
2970 IOReturn ret;
2971 IOMemoryDescriptor * inputMD = 0;
2972 IOMemoryDescriptor * outputMD = 0;
2973
2974 bzero(&args.__reserved[0], sizeof(args.__reserved));
2975 args.version = kIOExternalMethodArgumentsCurrentVersion;
2976
2977 reference[0] = (io_user_reference_t) wake_port;
2978 if (vm_map_is_64bit(get_task_map(current_task())))
2979 reference[0] |= kIOUCAsync64Flag;
2980
2981 args.selector = selector;
2982
2983 args.asyncWakePort = wake_port;
2984 args.asyncReference = reference;
2985 args.asyncReferenceCount = referenceCnt;
2986
2987 args.scalarInput = scalar_input;
2988 args.scalarInputCount = scalar_inputCnt;
2989 args.structureInput = inband_input;
2990 args.structureInputSize = inband_inputCnt;
2991
2992 if (ool_input)
2993 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2994 kIODirectionOut, current_task());
2995
2996 args.structureInputDescriptor = inputMD;
2997
2998 args.scalarOutput = scalar_output;
2999 args.scalarOutputCount = *scalar_outputCnt;
3000 args.structureOutput = inband_output;
3001 args.structureOutputSize = *inband_outputCnt;
3002
3003 if (ool_output)
3004 {
3005 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3006 kIODirectionIn, current_task());
3007 }
3008
3009 args.structureOutputDescriptor = outputMD;
3010 args.structureOutputDescriptorSize = *ool_output_size;
3011
3012 IOStatisticsClientCall();
3013 ret = client->externalMethod( selector, &args );
3014
3015 *inband_outputCnt = args.structureOutputSize;
3016 *ool_output_size = args.structureOutputDescriptorSize;
3017
3018 if (inputMD)
3019 inputMD->release();
3020 if (outputMD)
3021 outputMD->release();
3022
3023 return (ret);
3024 }
3025
3026 /* Routine io_connect_method_scalarI_scalarO */
3027 kern_return_t is_io_connect_method_scalarI_scalarO(
3028 io_object_t connect,
3029 uint32_t index,
3030 io_scalar_inband_t input,
3031 mach_msg_type_number_t inputCount,
3032 io_scalar_inband_t output,
3033 mach_msg_type_number_t * outputCount )
3034 {
3035 IOReturn err;
3036 uint32_t i;
3037 io_scalar_inband64_t _input;
3038 io_scalar_inband64_t _output;
3039
3040 mach_msg_type_number_t struct_outputCnt = 0;
3041 mach_vm_size_t ool_output_size = 0;
3042
3043 for (i = 0; i < inputCount; i++)
3044 _input[i] = SCALAR64(input[i]);
3045
3046 err = is_io_connect_method(connect, index,
3047 _input, inputCount,
3048 NULL, 0,
3049 0, 0,
3050 NULL, &struct_outputCnt,
3051 _output, outputCount,
3052 0, &ool_output_size);
3053
3054 for (i = 0; i < *outputCount; i++)
3055 output[i] = SCALAR32(_output[i]);
3056
3057 return (err);
3058 }
3059
3060 kern_return_t shim_io_connect_method_scalarI_scalarO(
3061 IOExternalMethod * method,
3062 IOService * object,
3063 const io_user_scalar_t * input,
3064 mach_msg_type_number_t inputCount,
3065 io_user_scalar_t * output,
3066 mach_msg_type_number_t * outputCount )
3067 {
3068 IOMethod func;
3069 io_scalar_inband_t _output;
3070 IOReturn err;
3071 err = kIOReturnBadArgument;
3072
3073 do {
3074
3075 if( inputCount != method->count0)
3076 {
3077 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3078 continue;
3079 }
3080 if( *outputCount != method->count1)
3081 {
3082 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3083 continue;
3084 }
3085
3086 func = method->func;
3087
3088 switch( inputCount) {
3089
3090 case 6:
3091 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3092 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3093 break;
3094 case 5:
3095 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3096 ARG32(input[3]), ARG32(input[4]),
3097 &_output[0] );
3098 break;
3099 case 4:
3100 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3101 ARG32(input[3]),
3102 &_output[0], &_output[1] );
3103 break;
3104 case 3:
3105 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3106 &_output[0], &_output[1], &_output[2] );
3107 break;
3108 case 2:
3109 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3110 &_output[0], &_output[1], &_output[2],
3111 &_output[3] );
3112 break;
3113 case 1:
3114 err = (object->*func)( ARG32(input[0]),
3115 &_output[0], &_output[1], &_output[2],
3116 &_output[3], &_output[4] );
3117 break;
3118 case 0:
3119 err = (object->*func)( &_output[0], &_output[1], &_output[2],
3120 &_output[3], &_output[4], &_output[5] );
3121 break;
3122
3123 default:
3124 IOLog("%s: Bad method table\n", object->getName());
3125 }
3126 }
3127 while( false);
3128
3129 uint32_t i;
3130 for (i = 0; i < *outputCount; i++)
3131 output[i] = SCALAR32(_output[i]);
3132
3133 return( err);
3134 }
3135
3136 /* Routine io_async_method_scalarI_scalarO */
3137 kern_return_t is_io_async_method_scalarI_scalarO(
3138 io_object_t connect,
3139 mach_port_t wake_port,
3140 io_async_ref_t reference,
3141 mach_msg_type_number_t referenceCnt,
3142 uint32_t index,
3143 io_scalar_inband_t input,
3144 mach_msg_type_number_t inputCount,
3145 io_scalar_inband_t output,
3146 mach_msg_type_number_t * outputCount )
3147 {
3148 IOReturn err;
3149 uint32_t i;
3150 io_scalar_inband64_t _input;
3151 io_scalar_inband64_t _output;
3152 io_async_ref64_t _reference;
3153
3154 for (i = 0; i < referenceCnt; i++)
3155 _reference[i] = REF64(reference[i]);
3156
3157 mach_msg_type_number_t struct_outputCnt = 0;
3158 mach_vm_size_t ool_output_size = 0;
3159
3160 for (i = 0; i < inputCount; i++)
3161 _input[i] = SCALAR64(input[i]);
3162
3163 err = is_io_connect_async_method(connect,
3164 wake_port, _reference, referenceCnt,
3165 index,
3166 _input, inputCount,
3167 NULL, 0,
3168 0, 0,
3169 NULL, &struct_outputCnt,
3170 _output, outputCount,
3171 0, &ool_output_size);
3172
3173 for (i = 0; i < *outputCount; i++)
3174 output[i] = SCALAR32(_output[i]);
3175
3176 return (err);
3177 }
3178 /* Routine io_async_method_scalarI_structureO */
3179 kern_return_t is_io_async_method_scalarI_structureO(
3180 io_object_t connect,
3181 mach_port_t wake_port,
3182 io_async_ref_t reference,
3183 mach_msg_type_number_t referenceCnt,
3184 uint32_t index,
3185 io_scalar_inband_t input,
3186 mach_msg_type_number_t inputCount,
3187 io_struct_inband_t output,
3188 mach_msg_type_number_t * outputCount )
3189 {
3190 uint32_t i;
3191 io_scalar_inband64_t _input;
3192 io_async_ref64_t _reference;
3193
3194 for (i = 0; i < referenceCnt; i++)
3195 _reference[i] = REF64(reference[i]);
3196
3197 mach_msg_type_number_t scalar_outputCnt = 0;
3198 mach_vm_size_t ool_output_size = 0;
3199
3200 for (i = 0; i < inputCount; i++)
3201 _input[i] = SCALAR64(input[i]);
3202
3203 return (is_io_connect_async_method(connect,
3204 wake_port, _reference, referenceCnt,
3205 index,
3206 _input, inputCount,
3207 NULL, 0,
3208 0, 0,
3209 output, outputCount,
3210 NULL, &scalar_outputCnt,
3211 0, &ool_output_size));
3212 }
3213
3214 /* Routine io_async_method_scalarI_structureI */
3215 kern_return_t is_io_async_method_scalarI_structureI(
3216 io_connect_t connect,
3217 mach_port_t wake_port,
3218 io_async_ref_t reference,
3219 mach_msg_type_number_t referenceCnt,
3220 uint32_t index,
3221 io_scalar_inband_t input,
3222 mach_msg_type_number_t inputCount,
3223 io_struct_inband_t inputStruct,
3224 mach_msg_type_number_t inputStructCount )
3225 {
3226 uint32_t i;
3227 io_scalar_inband64_t _input;
3228 io_async_ref64_t _reference;
3229
3230 for (i = 0; i < referenceCnt; i++)
3231 _reference[i] = REF64(reference[i]);
3232
3233 mach_msg_type_number_t scalar_outputCnt = 0;
3234 mach_msg_type_number_t inband_outputCnt = 0;
3235 mach_vm_size_t ool_output_size = 0;
3236
3237 for (i = 0; i < inputCount; i++)
3238 _input[i] = SCALAR64(input[i]);
3239
3240 return (is_io_connect_async_method(connect,
3241 wake_port, _reference, referenceCnt,
3242 index,
3243 _input, inputCount,
3244 inputStruct, inputStructCount,
3245 0, 0,
3246 NULL, &inband_outputCnt,
3247 NULL, &scalar_outputCnt,
3248 0, &ool_output_size));
3249 }
3250
3251 /* Routine io_async_method_structureI_structureO */
3252 kern_return_t is_io_async_method_structureI_structureO(
3253 io_object_t connect,
3254 mach_port_t wake_port,
3255 io_async_ref_t reference,
3256 mach_msg_type_number_t referenceCnt,
3257 uint32_t index,
3258 io_struct_inband_t input,
3259 mach_msg_type_number_t inputCount,
3260 io_struct_inband_t output,
3261 mach_msg_type_number_t * outputCount )
3262 {
3263 uint32_t i;
3264 mach_msg_type_number_t scalar_outputCnt = 0;
3265 mach_vm_size_t ool_output_size = 0;
3266 io_async_ref64_t _reference;
3267
3268 for (i = 0; i < referenceCnt; i++)
3269 _reference[i] = REF64(reference[i]);
3270
3271 return (is_io_connect_async_method(connect,
3272 wake_port, _reference, referenceCnt,
3273 index,
3274 NULL, 0,
3275 input, inputCount,
3276 0, 0,
3277 output, outputCount,
3278 NULL, &scalar_outputCnt,
3279 0, &ool_output_size));
3280 }
3281
3282
3283 kern_return_t shim_io_async_method_scalarI_scalarO(
3284 IOExternalAsyncMethod * method,
3285 IOService * object,
3286 mach_port_t asyncWakePort,
3287 io_user_reference_t * asyncReference,
3288 uint32_t asyncReferenceCount,
3289 const io_user_scalar_t * input,
3290 mach_msg_type_number_t inputCount,
3291 io_user_scalar_t * output,
3292 mach_msg_type_number_t * outputCount )
3293 {
3294 IOAsyncMethod func;
3295 uint32_t i;
3296 io_scalar_inband_t _output;
3297 IOReturn err;
3298 io_async_ref_t reference;
3299
3300 for (i = 0; i < asyncReferenceCount; i++)
3301 reference[i] = REF32(asyncReference[i]);
3302
3303 err = kIOReturnBadArgument;
3304
3305 do {
3306
3307 if( inputCount != method->count0)
3308 {
3309 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3310 continue;
3311 }
3312 if( *outputCount != method->count1)
3313 {
3314 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3315 continue;
3316 }
3317
3318 func = method->func;
3319
3320 switch( inputCount) {
3321
3322 case 6:
3323 err = (object->*func)( reference,
3324 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3325 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3326 break;
3327 case 5:
3328 err = (object->*func)( reference,
3329 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3330 ARG32(input[3]), ARG32(input[4]),
3331 &_output[0] );
3332 break;
3333 case 4:
3334 err = (object->*func)( reference,
3335 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3336 ARG32(input[3]),
3337 &_output[0], &_output[1] );
3338 break;
3339 case 3:
3340 err = (object->*func)( reference,
3341 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3342 &_output[0], &_output[1], &_output[2] );
3343 break;
3344 case 2:
3345 err = (object->*func)( reference,
3346 ARG32(input[0]), ARG32(input[1]),
3347 &_output[0], &_output[1], &_output[2],
3348 &_output[3] );
3349 break;
3350 case 1:
3351 err = (object->*func)( reference,
3352 ARG32(input[0]),
3353 &_output[0], &_output[1], &_output[2],
3354 &_output[3], &_output[4] );
3355 break;
3356 case 0:
3357 err = (object->*func)( reference,
3358 &_output[0], &_output[1], &_output[2],
3359 &_output[3], &_output[4], &_output[5] );
3360 break;
3361
3362 default:
3363 IOLog("%s: Bad method table\n", object->getName());
3364 }
3365 }
3366 while( false);
3367
3368 for (i = 0; i < *outputCount; i++)
3369 output[i] = SCALAR32(_output[i]);
3370
3371 return( err);
3372 }
3373
3374
3375 /* Routine io_connect_method_scalarI_structureO */
3376 kern_return_t is_io_connect_method_scalarI_structureO(
3377 io_object_t connect,
3378 uint32_t index,
3379 io_scalar_inband_t input,
3380 mach_msg_type_number_t inputCount,
3381 io_struct_inband_t output,
3382 mach_msg_type_number_t * outputCount )
3383 {
3384 uint32_t i;
3385 io_scalar_inband64_t _input;
3386
3387 mach_msg_type_number_t scalar_outputCnt = 0;
3388 mach_vm_size_t ool_output_size = 0;
3389
3390 for (i = 0; i < inputCount; i++)
3391 _input[i] = SCALAR64(input[i]);
3392
3393 return (is_io_connect_method(connect, index,
3394 _input, inputCount,
3395 NULL, 0,
3396 0, 0,
3397 output, outputCount,
3398 NULL, &scalar_outputCnt,
3399 0, &ool_output_size));
3400 }
3401
3402 kern_return_t shim_io_connect_method_scalarI_structureO(
3403
3404 IOExternalMethod * method,
3405 IOService * object,
3406 const io_user_scalar_t * input,
3407 mach_msg_type_number_t inputCount,
3408 io_struct_inband_t output,
3409 IOByteCount * outputCount )
3410 {
3411 IOMethod func;
3412 IOReturn err;
3413
3414 err = kIOReturnBadArgument;
3415
3416 do {
3417 if( inputCount != method->count0)
3418 {
3419 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3420 continue;
3421 }
3422 if( (kIOUCVariableStructureSize != method->count1)
3423 && (*outputCount != method->count1))
3424 {
3425 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3426 continue;
3427 }
3428
3429 func = method->func;
3430
3431 switch( inputCount) {
3432
3433 case 5:
3434 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3435 ARG32(input[3]), ARG32(input[4]),
3436 output );
3437 break;
3438 case 4:
3439 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3440 ARG32(input[3]),
3441 output, (void *)outputCount );
3442 break;
3443 case 3:
3444 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3445 output, (void *)outputCount, 0 );
3446 break;
3447 case 2:
3448 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3449 output, (void *)outputCount, 0, 0 );
3450 break;
3451 case 1:
3452 err = (object->*func)( ARG32(input[0]),
3453 output, (void *)outputCount, 0, 0, 0 );
3454 break;
3455 case 0:
3456 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
3457 break;
3458
3459 default:
3460 IOLog("%s: Bad method table\n", object->getName());
3461 }
3462 }
3463 while( false);
3464
3465 return( err);
3466 }
3467
3468
3469 kern_return_t shim_io_async_method_scalarI_structureO(
3470 IOExternalAsyncMethod * method,
3471 IOService * object,
3472 mach_port_t asyncWakePort,
3473 io_user_reference_t * asyncReference,
3474 uint32_t asyncReferenceCount,
3475 const io_user_scalar_t * input,
3476 mach_msg_type_number_t inputCount,
3477 io_struct_inband_t output,
3478 mach_msg_type_number_t * outputCount )
3479 {
3480 IOAsyncMethod func;
3481 uint32_t i;
3482 IOReturn err;
3483 io_async_ref_t reference;
3484
3485 for (i = 0; i < asyncReferenceCount; i++)
3486 reference[i] = REF32(asyncReference[i]);
3487
3488 err = kIOReturnBadArgument;
3489 do {
3490 if( inputCount != method->count0)
3491 {
3492 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3493 continue;
3494 }
3495 if( (kIOUCVariableStructureSize != method->count1)
3496 && (*outputCount != method->count1))
3497 {
3498 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3499 continue;
3500 }
3501
3502 func = method->func;
3503
3504 switch( inputCount) {
3505
3506 case 5:
3507 err = (object->*func)( reference,
3508 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3509 ARG32(input[3]), ARG32(input[4]),
3510 output );
3511 break;
3512 case 4:
3513 err = (object->*func)( reference,
3514 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3515 ARG32(input[3]),
3516 output, (void *)outputCount );
3517 break;
3518 case 3:
3519 err = (object->*func)( reference,
3520 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3521 output, (void *)outputCount, 0 );
3522 break;
3523 case 2:
3524 err = (object->*func)( reference,
3525 ARG32(input[0]), ARG32(input[1]),
3526 output, (void *)outputCount, 0, 0 );
3527 break;
3528 case 1:
3529 err = (object->*func)( reference,
3530 ARG32(input[0]),
3531 output, (void *)outputCount, 0, 0, 0 );
3532 break;
3533 case 0:
3534 err = (object->*func)( reference,
3535 output, (void *)outputCount, 0, 0, 0, 0 );
3536 break;
3537
3538 default:
3539 IOLog("%s: Bad method table\n", object->getName());
3540 }
3541 }
3542 while( false);
3543
3544 return( err);
3545 }
3546
3547 /* Routine io_connect_method_scalarI_structureI */
3548 kern_return_t is_io_connect_method_scalarI_structureI(
3549 io_connect_t connect,
3550 uint32_t index,
3551 io_scalar_inband_t input,
3552 mach_msg_type_number_t inputCount,
3553 io_struct_inband_t inputStruct,
3554 mach_msg_type_number_t inputStructCount )
3555 {
3556 uint32_t i;
3557 io_scalar_inband64_t _input;
3558
3559 mach_msg_type_number_t scalar_outputCnt = 0;
3560 mach_msg_type_number_t inband_outputCnt = 0;
3561 mach_vm_size_t ool_output_size = 0;
3562
3563 for (i = 0; i < inputCount; i++)
3564 _input[i] = SCALAR64(input[i]);
3565
3566 return (is_io_connect_method(connect, index,
3567 _input, inputCount,
3568 inputStruct, inputStructCount,
3569 0, 0,
3570 NULL, &inband_outputCnt,
3571 NULL, &scalar_outputCnt,
3572 0, &ool_output_size));
3573 }
3574
3575 kern_return_t shim_io_connect_method_scalarI_structureI(
3576 IOExternalMethod * method,
3577 IOService * object,
3578 const io_user_scalar_t * input,
3579 mach_msg_type_number_t inputCount,
3580 io_struct_inband_t inputStruct,
3581 mach_msg_type_number_t inputStructCount )
3582 {
3583 IOMethod func;
3584 IOReturn err = kIOReturnBadArgument;
3585
3586 do
3587 {
3588 if( (kIOUCVariableStructureSize != method->count0)
3589 && (inputCount != method->count0))
3590 {
3591 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3592 continue;
3593 }
3594 if( (kIOUCVariableStructureSize != method->count1)
3595 && (inputStructCount != method->count1))
3596 {
3597 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3598 continue;
3599 }
3600
3601 func = method->func;
3602
3603 switch( inputCount) {
3604
3605 case 5:
3606 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3607 ARG32(input[3]), ARG32(input[4]),
3608 inputStruct );
3609 break;
3610 case 4:
3611 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
3612 ARG32(input[3]),
3613 inputStruct, (void *)inputStructCount );
3614 break;
3615 case 3:
3616 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3617 inputStruct, (void *)inputStructCount,
3618 0 );
3619 break;
3620 case 2:
3621 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3622 inputStruct, (void *)inputStructCount,
3623 0, 0 );
3624 break;
3625 case 1:
3626 err = (object->*func)( ARG32(input[0]),
3627 inputStruct, (void *)inputStructCount,
3628 0, 0, 0 );
3629 break;
3630 case 0:
3631 err = (object->*func)( inputStruct, (void *)inputStructCount,
3632 0, 0, 0, 0 );
3633 break;
3634
3635 default:
3636 IOLog("%s: Bad method table\n", object->getName());
3637 }
3638 }
3639 while (false);
3640
3641 return( err);
3642 }
3643
3644 kern_return_t shim_io_async_method_scalarI_structureI(
3645 IOExternalAsyncMethod * method,
3646 IOService * object,
3647 mach_port_t asyncWakePort,
3648 io_user_reference_t * asyncReference,
3649 uint32_t asyncReferenceCount,
3650 const io_user_scalar_t * input,
3651 mach_msg_type_number_t inputCount,
3652 io_struct_inband_t inputStruct,
3653 mach_msg_type_number_t inputStructCount )
3654 {
3655 IOAsyncMethod func;
3656 uint32_t i;
3657 IOReturn err = kIOReturnBadArgument;
3658 io_async_ref_t reference;
3659
3660 for (i = 0; i < asyncReferenceCount; i++)
3661 reference[i] = REF32(asyncReference[i]);
3662
3663 do
3664 {
3665 if( (kIOUCVariableStructureSize != method->count0)
3666 && (inputCount != method->count0))
3667 {
3668 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3669 continue;
3670 }
3671 if( (kIOUCVariableStructureSize != method->count1)
3672 && (inputStructCount != method->count1))
3673 {
3674 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3675 continue;
3676 }
3677
3678 func = method->func;
3679
3680 switch( inputCount) {
3681
3682 case 5:
3683 err = (object->*func)( reference,
3684 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3685 ARG32(input[3]), ARG32(input[4]),
3686 inputStruct );
3687 break;
3688 case 4:
3689 err = (object->*func)( reference,
3690 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3691 ARG32(input[3]),
3692 inputStruct, (void *)inputStructCount );
3693 break;
3694 case 3:
3695 err = (object->*func)( reference,
3696 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3697 inputStruct, (void *)inputStructCount,
3698 0 );
3699 break;
3700 case 2:
3701 err = (object->*func)( reference,
3702 ARG32(input[0]), ARG32(input[1]),
3703 inputStruct, (void *)inputStructCount,
3704 0, 0 );
3705 break;
3706 case 1:
3707 err = (object->*func)( reference,
3708 ARG32(input[0]),
3709 inputStruct, (void *)inputStructCount,
3710 0, 0, 0 );
3711 break;
3712 case 0:
3713 err = (object->*func)( reference,
3714 inputStruct, (void *)inputStructCount,
3715 0, 0, 0, 0 );
3716 break;
3717
3718 default:
3719 IOLog("%s: Bad method table\n", object->getName());
3720 }
3721 }
3722 while (false);
3723
3724 return( err);
3725 }
3726
3727 /* Routine io_connect_method_structureI_structureO */
3728 kern_return_t is_io_connect_method_structureI_structureO(
3729 io_object_t connect,
3730 uint32_t index,
3731 io_struct_inband_t input,
3732 mach_msg_type_number_t inputCount,
3733 io_struct_inband_t output,
3734 mach_msg_type_number_t * outputCount )
3735 {
3736 mach_msg_type_number_t scalar_outputCnt = 0;
3737 mach_vm_size_t ool_output_size = 0;
3738
3739 return (is_io_connect_method(connect, index,
3740 NULL, 0,
3741 input, inputCount,
3742 0, 0,
3743 output, outputCount,
3744 NULL, &scalar_outputCnt,
3745 0, &ool_output_size));
3746 }
3747
3748 kern_return_t shim_io_connect_method_structureI_structureO(
3749 IOExternalMethod * method,
3750 IOService * object,
3751 io_struct_inband_t input,
3752 mach_msg_type_number_t inputCount,
3753 io_struct_inband_t output,
3754 IOByteCount * outputCount )
3755 {
3756 IOMethod func;
3757 IOReturn err = kIOReturnBadArgument;
3758
3759 do
3760 {
3761 if( (kIOUCVariableStructureSize != method->count0)
3762 && (inputCount != method->count0))
3763 {
3764 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3765 continue;
3766 }
3767 if( (kIOUCVariableStructureSize != method->count1)
3768 && (*outputCount != method->count1))
3769 {
3770 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3771 continue;
3772 }
3773
3774 func = method->func;
3775
3776 if( method->count1) {
3777 if( method->count0) {
3778 err = (object->*func)( input, output,
3779 (void *)inputCount, outputCount, 0, 0 );
3780 } else {
3781 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
3782 }
3783 } else {
3784 err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 );
3785 }
3786 }
3787 while( false);
3788
3789
3790 return( err);
3791 }
3792
3793 kern_return_t shim_io_async_method_structureI_structureO(
3794 IOExternalAsyncMethod * method,
3795 IOService * object,
3796 mach_port_t asyncWakePort,
3797 io_user_reference_t * asyncReference,
3798 uint32_t asyncReferenceCount,
3799 io_struct_inband_t input,
3800 mach_msg_type_number_t inputCount,
3801 io_struct_inband_t output,
3802 mach_msg_type_number_t * outputCount )
3803 {
3804 IOAsyncMethod func;
3805 uint32_t i;
3806 IOReturn err;
3807 io_async_ref_t reference;
3808
3809 for (i = 0; i < asyncReferenceCount; i++)
3810 reference[i] = REF32(asyncReference[i]);
3811
3812 err = kIOReturnBadArgument;
3813 do
3814 {
3815 if( (kIOUCVariableStructureSize != method->count0)
3816 && (inputCount != method->count0))
3817 {
3818 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3819 continue;
3820 }
3821 if( (kIOUCVariableStructureSize != method->count1)
3822 && (*outputCount != method->count1))
3823 {
3824 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3825 continue;
3826 }
3827
3828 func = method->func;
3829
3830 if( method->count1) {
3831 if( method->count0) {
3832 err = (object->*func)( reference,
3833 input, output,
3834 (void *)inputCount, outputCount, 0, 0 );
3835 } else {
3836 err = (object->*func)( reference,
3837 output, outputCount, 0, 0, 0, 0 );
3838 }
3839 } else {
3840 err = (object->*func)( reference,
3841 input, (void *)inputCount, 0, 0, 0, 0 );
3842 }
3843 }
3844 while( false);
3845
3846 return( err);
3847 }
3848
3849 /* Routine io_make_matching */
3850 kern_return_t is_io_make_matching(
3851 mach_port_t master_port,
3852 uint32_t type,
3853 uint32_t options,
3854 io_struct_inband_t input,
3855 mach_msg_type_number_t inputCount,
3856 io_string_t matching )
3857 {
3858 OSSerialize * s;
3859 IOReturn err = kIOReturnSuccess;
3860 OSDictionary * dict;
3861
3862 if( master_port != master_device_port)
3863 return( kIOReturnNotPrivileged);
3864
3865 switch( type) {
3866
3867 case kIOServiceMatching:
3868 dict = IOService::serviceMatching( gIOServiceKey );
3869 break;
3870
3871 case kIOBSDNameMatching:
3872 dict = IOBSDNameMatching( (const char *) input );
3873 break;
3874
3875 case kIOOFPathMatching:
3876 dict = IOOFPathMatching( (const char *) input,
3877 matching, sizeof( io_string_t));
3878 break;
3879
3880 default:
3881 dict = 0;
3882 }
3883
3884 if( !dict)
3885 return( kIOReturnUnsupported);
3886
3887 do {
3888 s = OSSerialize::withCapacity(4096);
3889 if( !s) {
3890 err = kIOReturnNoMemory;
3891 continue;
3892 }
3893 s->clearText();
3894 if( !dict->serialize( s )) {
3895 err = kIOReturnUnsupported;
3896 continue;
3897 }
3898
3899 if( s->getLength() > sizeof( io_string_t)) {
3900 err = kIOReturnNoMemory;
3901 continue;
3902 } else
3903 strlcpy(matching, s->text(), sizeof(io_string_t));
3904 }
3905 while( false);
3906
3907 if( s)
3908 s->release();
3909 if( dict)
3910 dict->release();
3911
3912 return( err);
3913 }
3914
3915 /* Routine io_catalog_send_data */
3916 kern_return_t is_io_catalog_send_data(
3917 mach_port_t master_port,
3918 uint32_t flag,
3919 io_buf_ptr_t inData,
3920 mach_msg_type_number_t inDataCount,
3921 kern_return_t * result)
3922 {
3923 OSObject * obj = 0;
3924 vm_offset_t data;
3925 kern_return_t kr = kIOReturnError;
3926
3927 //printf("io_catalog_send_data called. flag: %d\n", flag);
3928
3929 if( master_port != master_device_port)
3930 return kIOReturnNotPrivileged;
3931
3932 if( (flag != kIOCatalogRemoveKernelLinker &&
3933 flag != kIOCatalogKextdActive &&
3934 flag != kIOCatalogKextdFinishedLaunching) &&
3935 ( !inData || !inDataCount) )
3936 {
3937 return kIOReturnBadArgument;
3938 }
3939
3940 if (inData) {
3941 vm_map_offset_t map_data;
3942
3943 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
3944 data = CAST_DOWN(vm_offset_t, map_data);
3945
3946 if( kr != KERN_SUCCESS)
3947 return kr;
3948
3949 // must return success after vm_map_copyout() succeeds
3950
3951 if( inDataCount ) {
3952 obj = (OSObject *)OSUnserializeXML((const char *)data);
3953 vm_deallocate( kernel_map, data, inDataCount );
3954 if( !obj) {
3955 *result = kIOReturnNoMemory;
3956 return( KERN_SUCCESS);
3957 }
3958 }
3959 }
3960
3961 switch ( flag ) {
3962 case kIOCatalogResetDrivers:
3963 case kIOCatalogResetDriversNoMatch: {
3964 OSArray * array;
3965
3966 array = OSDynamicCast(OSArray, obj);
3967 if (array) {
3968 if ( !gIOCatalogue->resetAndAddDrivers(array,
3969 flag == kIOCatalogResetDrivers) ) {
3970
3971 kr = kIOReturnError;
3972 }
3973 } else {
3974 kr = kIOReturnBadArgument;
3975 }
3976 }
3977 break;
3978
3979 case kIOCatalogAddDrivers:
3980 case kIOCatalogAddDriversNoMatch: {
3981 OSArray * array;
3982
3983 array = OSDynamicCast(OSArray, obj);
3984 if ( array ) {
3985 if ( !gIOCatalogue->addDrivers( array ,
3986 flag == kIOCatalogAddDrivers) ) {
3987 kr = kIOReturnError;
3988 }
3989 }
3990 else {
3991 kr = kIOReturnBadArgument;
3992 }
3993 }
3994 break;
3995
3996 case kIOCatalogRemoveDrivers:
3997 case kIOCatalogRemoveDriversNoMatch: {
3998 OSDictionary * dict;
3999
4000 dict = OSDynamicCast(OSDictionary, obj);
4001 if ( dict ) {
4002 if ( !gIOCatalogue->removeDrivers( dict,
4003 flag == kIOCatalogRemoveDrivers ) ) {
4004 kr = kIOReturnError;
4005 }
4006 }
4007 else {
4008 kr = kIOReturnBadArgument;
4009 }
4010 }
4011 break;
4012
4013 case kIOCatalogStartMatching: {
4014 OSDictionary * dict;
4015
4016 dict = OSDynamicCast(OSDictionary, obj);
4017 if ( dict ) {
4018 if ( !gIOCatalogue->startMatching( dict ) ) {
4019 kr = kIOReturnError;
4020 }
4021 }
4022 else {
4023 kr = kIOReturnBadArgument;
4024 }
4025 }
4026 break;
4027
4028 case kIOCatalogRemoveKernelLinker:
4029 kr = KERN_NOT_SUPPORTED;
4030 break;
4031
4032 case kIOCatalogKextdActive:
4033 #if !NO_KEXTD
4034 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4035 OSKext::setKextdActive();
4036
4037 /* Dump all nonloaded startup extensions; kextd will now send them
4038 * down on request.
4039 */
4040 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4041 #endif
4042 kr = kIOReturnSuccess;
4043 break;
4044
4045 case kIOCatalogKextdFinishedLaunching: {
4046 #if !NO_KEXTD
4047 static bool clearedBusy = false;
4048
4049 if (!clearedBusy) {
4050 IOService * serviceRoot = IOService::getServiceRoot();
4051 if (serviceRoot) {
4052 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4053 serviceRoot->adjustBusy(-1);
4054 clearedBusy = true;
4055 }
4056 }
4057 #endif
4058 kr = kIOReturnSuccess;
4059 }
4060 break;
4061
4062 default:
4063 kr = kIOReturnBadArgument;
4064 break;
4065 }
4066
4067 if (obj) obj->release();
4068
4069 *result = kr;
4070 return( KERN_SUCCESS);
4071 }
4072
4073 /* Routine io_catalog_terminate */
4074 kern_return_t is_io_catalog_terminate(
4075 mach_port_t master_port,
4076 uint32_t flag,
4077 io_name_t name )
4078 {
4079 kern_return_t kr;
4080
4081 if( master_port != master_device_port )
4082 return kIOReturnNotPrivileged;
4083
4084 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4085 kIOClientPrivilegeAdministrator );
4086 if( kIOReturnSuccess != kr)
4087 return( kr );
4088
4089 switch ( flag ) {
4090 #if !defined(SECURE_KERNEL)
4091 case kIOCatalogServiceTerminate:
4092 OSIterator * iter;
4093 IOService * service;
4094
4095 iter = IORegistryIterator::iterateOver(gIOServicePlane,
4096 kIORegistryIterateRecursively);
4097 if ( !iter )
4098 return kIOReturnNoMemory;
4099
4100 do {
4101 iter->reset();
4102 while( (service = (IOService *)iter->getNextObject()) ) {
4103 if( service->metaCast(name)) {
4104 if ( !service->terminate( kIOServiceRequired
4105 | kIOServiceSynchronous) ) {
4106 kr = kIOReturnUnsupported;
4107 break;
4108 }
4109 }
4110 }
4111 } while( !service && !iter->isValid());
4112 iter->release();
4113 break;
4114
4115 case kIOCatalogModuleUnload:
4116 case kIOCatalogModuleTerminate:
4117 kr = gIOCatalogue->terminateDriversForModule(name,
4118 flag == kIOCatalogModuleUnload);
4119 break;
4120 #endif
4121
4122 default:
4123 kr = kIOReturnBadArgument;
4124 break;
4125 }
4126
4127 return( kr );
4128 }
4129
4130 /* Routine io_catalog_get_data */
4131 kern_return_t is_io_catalog_get_data(
4132 mach_port_t master_port,
4133 uint32_t flag,
4134 io_buf_ptr_t *outData,
4135 mach_msg_type_number_t *outDataCount)
4136 {
4137 kern_return_t kr = kIOReturnSuccess;
4138 OSSerialize * s;
4139
4140 if( master_port != master_device_port)
4141 return kIOReturnNotPrivileged;
4142
4143 //printf("io_catalog_get_data called. flag: %d\n", flag);
4144
4145 s = OSSerialize::withCapacity(4096);
4146 if ( !s )
4147 return kIOReturnNoMemory;
4148
4149 s->clearText();
4150
4151 kr = gIOCatalogue->serializeData(flag, s);
4152
4153 if ( kr == kIOReturnSuccess ) {
4154 vm_offset_t data;
4155 vm_map_copy_t copy;
4156 vm_size_t size;
4157
4158 size = s->getLength();
4159 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
4160 if ( kr == kIOReturnSuccess ) {
4161 bcopy(s->text(), (void *)data, size);
4162 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
4163 (vm_map_size_t)size, true, &copy);
4164 *outData = (char *)copy;
4165 *outDataCount = size;
4166 }
4167 }
4168
4169 s->release();
4170
4171 return kr;
4172 }
4173
4174 /* Routine io_catalog_get_gen_count */
4175 kern_return_t is_io_catalog_get_gen_count(
4176 mach_port_t master_port,
4177 uint32_t *genCount)
4178 {
4179 if( master_port != master_device_port)
4180 return kIOReturnNotPrivileged;
4181
4182 //printf("io_catalog_get_gen_count called.\n");
4183
4184 if ( !genCount )
4185 return kIOReturnBadArgument;
4186
4187 *genCount = gIOCatalogue->getGenerationCount();
4188
4189 return kIOReturnSuccess;
4190 }
4191
4192 /* Routine io_catalog_module_loaded.
4193 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
4194 */
4195 kern_return_t is_io_catalog_module_loaded(
4196 mach_port_t master_port,
4197 io_name_t name)
4198 {
4199 if( master_port != master_device_port)
4200 return kIOReturnNotPrivileged;
4201
4202 //printf("io_catalog_module_loaded called. name %s\n", name);
4203
4204 if ( !name )
4205 return kIOReturnBadArgument;
4206
4207 gIOCatalogue->moduleHasLoaded(name);
4208
4209 return kIOReturnSuccess;
4210 }
4211
4212 kern_return_t is_io_catalog_reset(
4213 mach_port_t master_port,
4214 uint32_t flag)
4215 {
4216 if( master_port != master_device_port)
4217 return kIOReturnNotPrivileged;
4218
4219 switch ( flag ) {
4220 case kIOCatalogResetDefault:
4221 gIOCatalogue->reset();
4222 break;
4223
4224 default:
4225 return kIOReturnBadArgument;
4226 }
4227
4228 return kIOReturnSuccess;
4229 }
4230
4231 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
4232 {
4233 kern_return_t result = kIOReturnBadArgument;
4234 IOUserClient *userClient;
4235
4236 if ((userClient = OSDynamicCast(IOUserClient,
4237 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4238 IOExternalTrap *trap;
4239 IOService *target = NULL;
4240
4241 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4242
4243 if (trap && target) {
4244 IOTrap func;
4245
4246 func = trap->func;
4247
4248 if (func) {
4249 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4250 }
4251 }
4252
4253 userClient->release();
4254 }
4255
4256 return result;
4257 }
4258
4259 } /* extern "C" */
4260
4261 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4262 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4263 {
4264 IOReturn err;
4265 IOService * object;
4266 IOByteCount structureOutputSize;
4267
4268 if (dispatch)
4269 {
4270 uint32_t count;
4271 count = dispatch->checkScalarInputCount;
4272 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
4273 {
4274 return (kIOReturnBadArgument);
4275 }
4276
4277 count = dispatch->checkStructureInputSize;
4278 if ((kIOUCVariableStructureSize != count)
4279 && (count != ((args->structureInputDescriptor)
4280 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
4281 {
4282 return (kIOReturnBadArgument);
4283 }
4284
4285 count = dispatch->checkScalarOutputCount;
4286 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
4287 {
4288 return (kIOReturnBadArgument);
4289 }
4290
4291 count = dispatch->checkStructureOutputSize;
4292 if ((kIOUCVariableStructureSize != count)
4293 && (count != ((args->structureOutputDescriptor)
4294 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
4295 {
4296 return (kIOReturnBadArgument);
4297 }
4298
4299 if (dispatch->function)
4300 err = (*dispatch->function)(target, reference, args);
4301 else
4302 err = kIOReturnNoCompletion; /* implementator can dispatch */
4303
4304 return (err);
4305 }
4306
4307
4308 // pre-Leopard API's don't do ool structs
4309 if (args->structureInputDescriptor || args->structureOutputDescriptor)
4310 {
4311 err = kIOReturnIPCError;
4312 return (err);
4313 }
4314
4315 structureOutputSize = args->structureOutputSize;
4316
4317 if (args->asyncWakePort)
4318 {
4319 IOExternalAsyncMethod * method;
4320
4321 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) )
4322 return (kIOReturnUnsupported);
4323
4324 if (kIOUCForegroundOnly & method->flags)
4325 {
4326 /* is graphics access denied for current task? */
4327 if (proc_get_task_selfgpuacc_deny() != 0)
4328 return (kIOReturnNotPermitted);
4329 }
4330
4331 switch (method->flags & kIOUCTypeMask)
4332 {
4333 case kIOUCScalarIStructI:
4334 err = shim_io_async_method_scalarI_structureI( method, object,
4335 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4336 args->scalarInput, args->scalarInputCount,
4337 (char *)args->structureInput, args->structureInputSize );
4338 break;
4339
4340 case kIOUCScalarIScalarO:
4341 err = shim_io_async_method_scalarI_scalarO( method, object,
4342 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4343 args->scalarInput, args->scalarInputCount,
4344 args->scalarOutput, &args->scalarOutputCount );
4345 break;
4346
4347 case kIOUCScalarIStructO:
4348 err = shim_io_async_method_scalarI_structureO( method, object,
4349 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4350 args->scalarInput, args->scalarInputCount,
4351 (char *) args->structureOutput, &args->structureOutputSize );
4352 break;
4353
4354
4355 case kIOUCStructIStructO:
4356 err = shim_io_async_method_structureI_structureO( method, object,
4357 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4358 (char *)args->structureInput, args->structureInputSize,
4359 (char *) args->structureOutput, &args->structureOutputSize );
4360 break;
4361
4362 default:
4363 err = kIOReturnBadArgument;
4364 break;
4365 }
4366 }
4367 else
4368 {
4369 IOExternalMethod * method;
4370
4371 if( !(method = getTargetAndMethodForIndex(&object, selector)) )
4372 return (kIOReturnUnsupported);
4373
4374 if (kIOUCForegroundOnly & method->flags)
4375 {
4376 /* is graphics access denied for current task? */
4377 if (proc_get_task_selfgpuacc_deny() != 0)
4378 return (kIOReturnNotPermitted);
4379
4380 }
4381
4382 switch (method->flags & kIOUCTypeMask)
4383 {
4384 case kIOUCScalarIStructI:
4385 err = shim_io_connect_method_scalarI_structureI( method, object,
4386 args->scalarInput, args->scalarInputCount,
4387 (char *) args->structureInput, args->structureInputSize );
4388 break;
4389
4390 case kIOUCScalarIScalarO:
4391 err = shim_io_connect_method_scalarI_scalarO( method, object,
4392 args->scalarInput, args->scalarInputCount,
4393 args->scalarOutput, &args->scalarOutputCount );
4394 break;
4395
4396 case kIOUCScalarIStructO:
4397 err = shim_io_connect_method_scalarI_structureO( method, object,
4398 args->scalarInput, args->scalarInputCount,
4399 (char *) args->structureOutput, &structureOutputSize );
4400 break;
4401
4402
4403 case kIOUCStructIStructO:
4404 err = shim_io_connect_method_structureI_structureO( method, object,
4405 (char *) args->structureInput, args->structureInputSize,
4406 (char *) args->structureOutput, &structureOutputSize );
4407 break;
4408
4409 default:
4410 err = kIOReturnBadArgument;
4411 break;
4412 }
4413 }
4414
4415 args->structureOutputSize = structureOutputSize;
4416
4417 return (err);
4418 }
4419
4420
4421 #if __LP64__
4422 OSMetaClassDefineReservedUnused(IOUserClient, 0);
4423 OSMetaClassDefineReservedUnused(IOUserClient, 1);
4424 #else
4425 OSMetaClassDefineReservedUsed(IOUserClient, 0);
4426 OSMetaClassDefineReservedUsed(IOUserClient, 1);
4427 #endif
4428 OSMetaClassDefineReservedUnused(IOUserClient, 2);
4429 OSMetaClassDefineReservedUnused(IOUserClient, 3);
4430 OSMetaClassDefineReservedUnused(IOUserClient, 4);
4431 OSMetaClassDefineReservedUnused(IOUserClient, 5);
4432 OSMetaClassDefineReservedUnused(IOUserClient, 6);
4433 OSMetaClassDefineReservedUnused(IOUserClient, 7);
4434 OSMetaClassDefineReservedUnused(IOUserClient, 8);
4435 OSMetaClassDefineReservedUnused(IOUserClient, 9);
4436 OSMetaClassDefineReservedUnused(IOUserClient, 10);
4437 OSMetaClassDefineReservedUnused(IOUserClient, 11);
4438 OSMetaClassDefineReservedUnused(IOUserClient, 12);
4439 OSMetaClassDefineReservedUnused(IOUserClient, 13);
4440 OSMetaClassDefineReservedUnused(IOUserClient, 14);
4441 OSMetaClassDefineReservedUnused(IOUserClient, 15);
4442