]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-1504.15.3.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <libkern/OSDebug.h>
41 #include <sys/proc.h>
42
43 #include <IOKit/assert.h>
44
45 #include "IOServicePrivate.h"
46 #include "IOKitKernelInternal.h"
47
48 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
49 #define SCALAR32(x) ((uint32_t )x)
50 #define ARG32(x) ((void *)SCALAR32(x))
51 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
52 #define REF32(x) ((int)(x))
53
54 enum
55 {
56 kIOUCAsync0Flags = 3ULL,
57 kIOUCAsync64Flag = 1ULL
58 };
59
60 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
61
62 // definitions we should get from osfmk
63
64 //typedef struct ipc_port * ipc_port_t;
65 typedef natural_t ipc_kobject_type_t;
66
67 #define IKOT_IOKIT_SPARE 27
68 #define IKOT_IOKIT_CONNECT 29
69 #define IKOT_IOKIT_OBJECT 30
70
71 extern "C" {
72
73 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
74 ipc_kobject_type_t type );
75
76 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
77
78 extern mach_port_name_t iokit_make_send_right( task_t task,
79 io_object_t obj, ipc_kobject_type_t type );
80
81 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
82
83 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
84
85 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
86
87 extern ipc_port_t master_device_port;
88
89 extern void iokit_retain_port( ipc_port_t port );
90 extern void iokit_release_port( ipc_port_t port );
91 extern void iokit_release_port_send( ipc_port_t port );
92
93 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
94
95 #include <mach/mach_traps.h>
96 #include <vm/vm_map.h>
97
98 } /* extern "C" */
99
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
104
105 class IOMachPort : public OSObject
106 {
107 OSDeclareDefaultStructors(IOMachPort)
108 public:
109 OSObject * object;
110 ipc_port_t port;
111 UInt32 mscount;
112 UInt8 holdDestroy;
113
114 static IOMachPort * portForObject( OSObject * obj,
115 ipc_kobject_type_t type );
116 static bool noMoreSendersForObject( OSObject * obj,
117 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
118 static void releasePortForObject( OSObject * obj,
119 ipc_kobject_type_t type );
120 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
121
122 static OSDictionary * dictForType( ipc_kobject_type_t type );
123
124 static mach_port_name_t makeSendRightForTask( task_t task,
125 io_object_t obj, ipc_kobject_type_t type );
126
127 virtual void free();
128 };
129
130 #define super OSObject
131 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
132
133 static IOLock * gIOObjectPortLock;
134
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136
137 // not in dictForType() for debugging ease
138 static OSDictionary * gIOObjectPorts;
139 static OSDictionary * gIOConnectPorts;
140
141 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
142 {
143 OSDictionary ** dict;
144
145 if( IKOT_IOKIT_OBJECT == type )
146 dict = &gIOObjectPorts;
147 else if( IKOT_IOKIT_CONNECT == type )
148 dict = &gIOConnectPorts;
149 else
150 return( 0 );
151
152 if( 0 == *dict)
153 *dict = OSDictionary::withCapacity( 1 );
154
155 return( *dict );
156 }
157
158 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
159 ipc_kobject_type_t type )
160 {
161 IOMachPort * inst = 0;
162 OSDictionary * dict;
163
164 IOTakeLock( gIOObjectPortLock);
165
166 do {
167
168 dict = dictForType( type );
169 if( !dict)
170 continue;
171
172 if( (inst = (IOMachPort *)
173 dict->getObject( (const OSSymbol *) obj ))) {
174 inst->mscount++;
175 inst->retain();
176 continue;
177 }
178
179 inst = new IOMachPort;
180 if( inst && !inst->init()) {
181 inst = 0;
182 continue;
183 }
184
185 inst->port = iokit_alloc_object_port( obj, type );
186 if( inst->port) {
187 // retains obj
188 dict->setObject( (const OSSymbol *) obj, inst );
189 inst->mscount++;
190
191 } else {
192 inst->release();
193 inst = 0;
194 }
195
196 } while( false );
197
198 IOUnlock( gIOObjectPortLock);
199
200 return( inst );
201 }
202
203 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
204 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
205 {
206 OSDictionary * dict;
207 IOMachPort * machPort;
208 bool destroyed = true;
209
210 IOTakeLock( gIOObjectPortLock);
211
212 if( (dict = dictForType( type ))) {
213 obj->retain();
214
215 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
216 if( machPort) {
217 destroyed = (machPort->mscount <= *mscount);
218 if( destroyed)
219 dict->removeObject( (const OSSymbol *) obj );
220 else
221 *mscount = machPort->mscount;
222 }
223 obj->release();
224 }
225
226 IOUnlock( gIOObjectPortLock);
227
228 return( destroyed );
229 }
230
231 void IOMachPort::releasePortForObject( OSObject * obj,
232 ipc_kobject_type_t type )
233 {
234 OSDictionary * dict;
235 IOMachPort * machPort;
236
237 IOTakeLock( gIOObjectPortLock);
238
239 if( (dict = dictForType( type ))) {
240 obj->retain();
241 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
242 if( machPort && !machPort->holdDestroy)
243 dict->removeObject( (const OSSymbol *) obj );
244 obj->release();
245 }
246
247 IOUnlock( gIOObjectPortLock);
248 }
249
250 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
251 {
252 OSDictionary * dict;
253 IOMachPort * machPort;
254
255 IOLockLock( gIOObjectPortLock );
256
257 if( (dict = dictForType( type ))) {
258 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
259 if( machPort)
260 machPort->holdDestroy = true;
261 }
262
263 IOLockUnlock( gIOObjectPortLock );
264 }
265
266 void IOUserClient::destroyUserReferences( OSObject * obj )
267 {
268 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
269
270 // panther, 3160200
271 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
272
273 OSDictionary * dict;
274
275 IOTakeLock( gIOObjectPortLock);
276 obj->retain();
277
278 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
279 {
280 IOMachPort * port;
281 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if (port)
283 {
284 IOUserClient * uc;
285 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
286 {
287 dict->setObject((const OSSymbol *) uc->mappings, port);
288 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
289
290 uc->mappings->release();
291 uc->mappings = 0;
292 }
293 dict->removeObject( (const OSSymbol *) obj );
294 }
295 }
296 obj->release();
297 IOUnlock( gIOObjectPortLock);
298 }
299
300 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
301 io_object_t obj, ipc_kobject_type_t type )
302 {
303 return( iokit_make_send_right( task, obj, type ));
304 }
305
306 void IOMachPort::free( void )
307 {
308 if( port)
309 iokit_destroy_object_port( port );
310 super::free();
311 }
312
313 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
314
315 class IOUserNotification : public OSIterator
316 {
317 OSDeclareDefaultStructors(IOUserNotification)
318
319 IONotifier * holdNotify;
320 IOLock * lock;
321
322 public:
323
324 virtual bool init( void );
325 virtual void free();
326
327 virtual void setNotification( IONotifier * obj );
328
329 virtual void reset();
330 virtual bool isValid();
331 };
332
333 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
334
335 extern "C" {
336
337 // functions called from osfmk/device/iokit_rpc.c
338
339 void
340 iokit_add_reference( io_object_t obj )
341 {
342 if( obj)
343 obj->retain();
344 }
345
346 void
347 iokit_remove_reference( io_object_t obj )
348 {
349 if( obj)
350 obj->release();
351 }
352
353 ipc_port_t
354 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
355 {
356 IOMachPort * machPort;
357 ipc_port_t port;
358
359 if( (machPort = IOMachPort::portForObject( obj, type ))) {
360
361 port = machPort->port;
362 if( port)
363 iokit_retain_port( port );
364
365 machPort->release();
366
367 } else
368 port = NULL;
369
370 return( port );
371 }
372
373 kern_return_t
374 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
375 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
376 {
377 IOUserClient * client;
378 IOMemoryMap * map;
379 IOUserNotification * notify;
380
381 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
382 return( kIOReturnNotReady );
383
384 if( IKOT_IOKIT_CONNECT == type)
385 {
386 if( (client = OSDynamicCast( IOUserClient, obj )))
387 client->clientDied();
388 }
389 else if( IKOT_IOKIT_OBJECT == type)
390 {
391 if( (map = OSDynamicCast( IOMemoryMap, obj )))
392 map->taskDied();
393 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
394 notify->setNotification( 0 );
395 }
396
397 return( kIOReturnSuccess );
398 }
399
400 }; /* extern "C" */
401
402 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
403
404 class IOServiceUserNotification : public IOUserNotification
405 {
406 OSDeclareDefaultStructors(IOServiceUserNotification)
407
408 struct PingMsg {
409 mach_msg_header_t msgHdr;
410 OSNotificationHeader64 notifyHeader;
411 };
412
413 enum { kMaxOutstanding = 1024 };
414
415 PingMsg * pingMsg;
416 vm_size_t msgSize;
417 OSArray * newSet;
418 OSObject * lastEntry;
419 bool armed;
420
421 public:
422
423 virtual bool init( mach_port_t port, natural_t type,
424 void * reference, vm_size_t referenceSize,
425 bool clientIs64 );
426 virtual void free();
427
428 static bool _handler( void * target,
429 void * ref, IOService * newService, IONotifier * notifier );
430 virtual bool handler( void * ref, IOService * newService );
431
432 virtual OSObject * getNextObject();
433 };
434
435 class IOServiceMessageUserNotification : public IOUserNotification
436 {
437 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
438
439 struct PingMsg {
440 mach_msg_header_t msgHdr;
441 mach_msg_body_t msgBody;
442 mach_msg_port_descriptor_t ports[1];
443 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
444 };
445
446 PingMsg * pingMsg;
447 vm_size_t msgSize;
448 uint8_t clientIs64;
449 int owningPID;
450
451 public:
452
453 virtual bool init( mach_port_t port, natural_t type,
454 void * reference, vm_size_t referenceSize,
455 vm_size_t extraSize,
456 bool clientIs64 );
457
458 virtual void free();
459
460 static IOReturn _handler( void * target, void * ref,
461 UInt32 messageType, IOService * provider,
462 void * messageArgument, vm_size_t argSize );
463 virtual IOReturn handler( void * ref,
464 UInt32 messageType, IOService * provider,
465 void * messageArgument, vm_size_t argSize );
466
467 virtual OSObject * getNextObject();
468 };
469
470 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
471
472 #undef super
473 #define super OSIterator
474 OSDefineMetaClass( IOUserNotification, OSIterator )
475 OSDefineAbstractStructors( IOUserNotification, OSIterator )
476
477 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
478
479 bool IOUserNotification::init( void )
480 {
481 if( !super::init())
482 return( false );
483
484 lock = IOLockAlloc();
485 if( !lock)
486 return( false );
487
488 return( true );
489 }
490
491 void IOUserNotification::free( void )
492 {
493 if( holdNotify)
494 holdNotify->remove();
495 // can't be in handler now
496
497 if( lock)
498 IOLockFree( lock );
499
500 super::free();
501 }
502
503
504 void IOUserNotification::setNotification( IONotifier * notify )
505 {
506 IONotifier * previousNotify;
507
508 IOLockLock( gIOObjectPortLock);
509
510 previousNotify = holdNotify;
511 holdNotify = notify;
512
513 IOLockUnlock( gIOObjectPortLock);
514
515 if( previousNotify)
516 previousNotify->remove();
517 }
518
519 void IOUserNotification::reset()
520 {
521 // ?
522 }
523
524 bool IOUserNotification::isValid()
525 {
526 return( true );
527 }
528
529 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
530
531 #undef super
532 #define super IOUserNotification
533 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
534
535 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
536
537 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
538 void * reference, vm_size_t referenceSize,
539 bool clientIs64 )
540 {
541 newSet = OSArray::withCapacity( 1 );
542 if( !newSet)
543 return( false );
544
545 if (referenceSize > sizeof(OSAsyncReference64))
546 return( false );
547
548 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
549 pingMsg = (PingMsg *) IOMalloc( msgSize);
550 if( !pingMsg)
551 return( false );
552
553 bzero( pingMsg, msgSize);
554
555 pingMsg->msgHdr.msgh_remote_port = port;
556 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
557 MACH_MSG_TYPE_COPY_SEND /*remote*/,
558 MACH_MSG_TYPE_MAKE_SEND /*local*/);
559 pingMsg->msgHdr.msgh_size = msgSize;
560 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
561
562 pingMsg->notifyHeader.size = 0;
563 pingMsg->notifyHeader.type = type;
564 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
565
566 return( super::init() );
567 }
568
569 void IOServiceUserNotification::free( void )
570 {
571 PingMsg * _pingMsg;
572 vm_size_t _msgSize;
573 OSArray * _newSet;
574 OSObject * _lastEntry;
575
576 _pingMsg = pingMsg;
577 _msgSize = msgSize;
578 _lastEntry = lastEntry;
579 _newSet = newSet;
580
581 super::free();
582
583 if( _pingMsg && _msgSize)
584 IOFree( _pingMsg, _msgSize);
585
586 if( _lastEntry)
587 _lastEntry->release();
588
589 if( _newSet)
590 _newSet->release();
591 }
592
593 bool IOServiceUserNotification::_handler( void * target,
594 void * ref, IOService * newService, IONotifier * notifier )
595 {
596 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
597 }
598
599 bool IOServiceUserNotification::handler( void * ref,
600 IOService * newService )
601 {
602 unsigned int count;
603 kern_return_t kr;
604 ipc_port_t port = NULL;
605 bool sendPing = false;
606
607 IOTakeLock( lock );
608
609 count = newSet->getCount();
610 if( count < kMaxOutstanding) {
611
612 newSet->setObject( newService );
613 if( (sendPing = (armed && (0 == count))))
614 armed = false;
615 }
616
617 IOUnlock( lock );
618
619 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
620 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
621
622 if( sendPing) {
623 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
624 pingMsg->msgHdr.msgh_local_port = port;
625 else
626 pingMsg->msgHdr.msgh_local_port = NULL;
627
628 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr,
629 pingMsg->msgHdr.msgh_size);
630 if( port)
631 iokit_release_port( port );
632
633 if( KERN_SUCCESS != kr)
634 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
635 }
636
637 return( true );
638 }
639
640 OSObject * IOServiceUserNotification::getNextObject()
641 {
642 unsigned int count;
643 OSObject * result;
644
645 IOTakeLock( lock );
646
647 if( lastEntry)
648 lastEntry->release();
649
650 count = newSet->getCount();
651 if( count ) {
652 result = newSet->getObject( count - 1 );
653 result->retain();
654 newSet->removeObject( count - 1);
655 } else {
656 result = 0;
657 armed = true;
658 }
659 lastEntry = result;
660
661 IOUnlock( lock );
662
663 return( result );
664 }
665
666 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
667
668 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
669
670 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
671
672 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
673 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
674 bool client64 )
675 {
676
677 if (referenceSize > sizeof(OSAsyncReference64))
678 return( false );
679
680 clientIs64 = client64;
681
682 owningPID = proc_selfpid();
683
684 extraSize += sizeof(IOServiceInterestContent64);
685 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
686 pingMsg = (PingMsg *) IOMalloc( msgSize);
687 if( !pingMsg)
688 return( false );
689
690 bzero( pingMsg, msgSize);
691
692 pingMsg->msgHdr.msgh_remote_port = port;
693 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
694 | MACH_MSGH_BITS(
695 MACH_MSG_TYPE_COPY_SEND /*remote*/,
696 MACH_MSG_TYPE_MAKE_SEND /*local*/);
697 pingMsg->msgHdr.msgh_size = msgSize;
698 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
699
700 pingMsg->msgBody.msgh_descriptor_count = 1;
701
702 pingMsg->ports[0].name = 0;
703 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
704 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
705
706 pingMsg->notifyHeader.size = extraSize;
707 pingMsg->notifyHeader.type = type;
708 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
709
710 return( super::init() );
711 }
712
713 void IOServiceMessageUserNotification::free( void )
714 {
715 PingMsg * _pingMsg;
716 vm_size_t _msgSize;
717
718 _pingMsg = pingMsg;
719 _msgSize = msgSize;
720
721 super::free();
722
723 if( _pingMsg && _msgSize)
724 IOFree( _pingMsg, _msgSize);
725 }
726
727 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
728 UInt32 messageType, IOService * provider,
729 void * argument, vm_size_t argSize )
730 {
731 return( ((IOServiceMessageUserNotification *) target)->handler(
732 ref, messageType, provider, argument, argSize));
733 }
734
735 IOReturn IOServiceMessageUserNotification::handler( void * ref,
736 UInt32 messageType, IOService * provider,
737 void * messageArgument, vm_size_t argSize )
738 {
739 kern_return_t kr;
740 ipc_port_t thisPort, providerPort;
741 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
742 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
743 // == pingMsg->notifyHeader.content;
744
745 if (kIOMessageCopyClientID == messageType)
746 {
747 *((void **) messageArgument) = IOCopyLogNameForPID(owningPID);
748 return (kIOReturnSuccess);
749 }
750
751 data->messageType = messageType;
752
753 if( argSize == 0)
754 {
755 data->messageArgument[0] = (io_user_reference_t) messageArgument;
756 if (clientIs64)
757 argSize = sizeof(data->messageArgument[0]);
758 else
759 {
760 data->messageArgument[0] |= (data->messageArgument[0] << 32);
761 argSize = sizeof(uint32_t);
762 }
763 }
764 else
765 {
766 if( argSize > kIOUserNotifyMaxMessageSize)
767 argSize = kIOUserNotifyMaxMessageSize;
768 bcopy( messageArgument, data->messageArgument, argSize );
769 }
770 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
771 + sizeof( IOServiceInterestContent64 )
772 - sizeof( data->messageArgument)
773 + argSize;
774
775 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
776 pingMsg->ports[0].name = providerPort;
777 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
778 pingMsg->msgHdr.msgh_local_port = thisPort;
779 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr,
780 pingMsg->msgHdr.msgh_size);
781 if( thisPort)
782 iokit_release_port( thisPort );
783 if( providerPort)
784 iokit_release_port( providerPort );
785
786 if( KERN_SUCCESS != kr)
787 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
788
789 return( kIOReturnSuccess );
790 }
791
792 OSObject * IOServiceMessageUserNotification::getNextObject()
793 {
794 return( 0 );
795 }
796
797 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
798
799 #undef super
800 #define super IOService
801 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
802
803 void IOUserClient::initialize( void )
804 {
805 gIOObjectPortLock = IOLockAlloc();
806
807 assert( gIOObjectPortLock );
808 }
809
810 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
811 mach_port_t wakePort,
812 void *callback, void *refcon)
813 {
814 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
815 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
816 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
817 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
818 }
819
820 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
821 mach_port_t wakePort,
822 mach_vm_address_t callback, io_user_reference_t refcon)
823 {
824 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
825 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
826 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
827 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
828 }
829
830 static OSDictionary * CopyConsoleUser(UInt32 uid)
831 {
832 OSArray * array;
833 OSDictionary * user = 0;
834
835 if ((array = OSDynamicCast(OSArray,
836 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
837 {
838 for (unsigned int idx = 0;
839 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
840 idx++) {
841 OSNumber * num;
842
843 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
844 && (uid == num->unsigned32BitValue())) {
845 user->retain();
846 break;
847 }
848 }
849 array->release();
850 }
851 return user;
852 }
853
854 static bool IOUCIsBackgroundTask(task_t task, bool * isBg)
855 {
856 kern_return_t kr;
857 task_category_policy_data_t info;
858 mach_msg_type_number_t count = TASK_CATEGORY_POLICY_COUNT;
859 boolean_t get_default = false;
860
861 kr = task_policy_get(current_task(),
862 TASK_CATEGORY_POLICY,
863 (task_policy_t) &info,
864 &count,
865 &get_default);
866
867 *isBg = ((KERN_SUCCESS == kr) && (info.role == TASK_THROTTLE_APPLICATION));
868 return (kr);
869 }
870
871 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
872 const char * privilegeName )
873 {
874 kern_return_t kr;
875 security_token_t token;
876 mach_msg_type_number_t count;
877 task_t task;
878 OSDictionary * user;
879 bool secureConsole;
880
881
882 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
883 sizeof(kIOClientPrivilegeForeground)))
884 {
885 bool isBg;
886 kern_return_t kr = IOUCIsBackgroundTask(current_task(), &isBg);
887
888 if (KERN_SUCCESS != kr)
889 return (kr);
890 return (isBg ? kIOReturnNotPrivileged : kIOReturnSuccess);
891 }
892
893 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
894 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
895 task = (task_t)((IOUCProcessToken *)securityToken)->token;
896 else
897 task = (task_t)securityToken;
898
899 count = TASK_SECURITY_TOKEN_COUNT;
900 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
901
902 if (KERN_SUCCESS != kr)
903 {}
904 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
905 sizeof(kIOClientPrivilegeAdministrator))) {
906 if (0 != token.val[0])
907 kr = kIOReturnNotPrivileged;
908 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
909 sizeof(kIOClientPrivilegeLocalUser))) {
910 user = CopyConsoleUser(token.val[0]);
911 if ( user )
912 user->release();
913 else
914 kr = kIOReturnNotPrivileged;
915 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
916 sizeof(kIOClientPrivilegeConsoleUser))) {
917 user = CopyConsoleUser(token.val[0]);
918 if ( user ) {
919 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
920 kr = kIOReturnNotPrivileged;
921 else if ( secureConsole ) {
922 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
923 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
924 kr = kIOReturnNotPrivileged;
925 }
926 user->release();
927 }
928 else
929 kr = kIOReturnNotPrivileged;
930 } else
931 kr = kIOReturnUnsupported;
932
933 return (kr);
934 }
935
936 bool IOUserClient::init()
937 {
938 if( getPropertyTable())
939 return true;
940 else
941 return super::init();
942 }
943
944 bool IOUserClient::init(OSDictionary * dictionary)
945 {
946 if( getPropertyTable())
947 return true;
948 else
949 return super::init(dictionary);
950 }
951
952 bool IOUserClient::initWithTask(task_t owningTask,
953 void * securityID,
954 UInt32 type )
955 {
956 if( getPropertyTable())
957 return true;
958 else
959 return super::init();
960 }
961
962 bool IOUserClient::initWithTask(task_t owningTask,
963 void * securityID,
964 UInt32 type,
965 OSDictionary * properties )
966 {
967 bool ok;
968
969 ok = super::init( properties );
970 ok &= initWithTask( owningTask, securityID, type );
971
972 return( ok );
973 }
974
975 void IOUserClient::free()
976 {
977 if( mappings)
978 mappings->release();
979
980 super::free();
981 }
982
983 IOReturn IOUserClient::clientDied( void )
984 {
985 return( clientClose());
986 }
987
988 IOReturn IOUserClient::clientClose( void )
989 {
990 return( kIOReturnUnsupported );
991 }
992
993 IOService * IOUserClient::getService( void )
994 {
995 return( 0 );
996 }
997
998 IOReturn IOUserClient::registerNotificationPort(
999 mach_port_t /* port */,
1000 UInt32 /* type */,
1001 UInt32 /* refCon */)
1002 {
1003 return( kIOReturnUnsupported);
1004 }
1005
1006 IOReturn IOUserClient::registerNotificationPort(
1007 mach_port_t port,
1008 UInt32 type,
1009 io_user_reference_t refCon)
1010 {
1011 return (registerNotificationPort(port, type, (UInt32) refCon));
1012 }
1013
1014 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1015 semaphore_t * semaphore )
1016 {
1017 return( kIOReturnUnsupported);
1018 }
1019
1020 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1021 {
1022 return( kIOReturnUnsupported);
1023 }
1024
1025 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1026 IOOptionBits * options,
1027 IOMemoryDescriptor ** memory )
1028 {
1029 return( kIOReturnUnsupported);
1030 }
1031
1032 #if !__LP64__
1033 IOMemoryMap * IOUserClient::mapClientMemory(
1034 IOOptionBits type,
1035 task_t task,
1036 IOOptionBits mapFlags,
1037 IOVirtualAddress atAddress )
1038 {
1039 return (NULL);
1040 }
1041 #endif
1042
1043 IOMemoryMap * IOUserClient::mapClientMemory64(
1044 IOOptionBits type,
1045 task_t task,
1046 IOOptionBits mapFlags,
1047 mach_vm_address_t atAddress )
1048 {
1049 IOReturn err;
1050 IOOptionBits options = 0;
1051 IOMemoryDescriptor * memory;
1052 IOMemoryMap * map = 0;
1053
1054 err = clientMemoryForType( (UInt32) type, &options, &memory );
1055
1056 if( memory && (kIOReturnSuccess == err)) {
1057
1058 options = (options & ~kIOMapUserOptionsMask)
1059 | (mapFlags & kIOMapUserOptionsMask);
1060 map = memory->createMappingInTask( task, atAddress, options );
1061 memory->release();
1062 }
1063
1064 return( map );
1065 }
1066
1067 IOReturn IOUserClient::exportObjectToClient(task_t task,
1068 OSObject *obj, io_object_t *clientObj)
1069 {
1070 mach_port_name_t name;
1071
1072 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1073 assert( name );
1074
1075 *(mach_port_name_t *)clientObj = name;
1076 return kIOReturnSuccess;
1077 }
1078
1079 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1080 {
1081 return( 0 );
1082 }
1083
1084 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1085 {
1086 return( 0 );
1087 }
1088
1089 IOExternalMethod * IOUserClient::
1090 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1091 {
1092 IOExternalMethod *method = getExternalMethodForIndex(index);
1093
1094 if (method)
1095 *targetP = (IOService *) method->object;
1096
1097 return method;
1098 }
1099
1100 IOExternalAsyncMethod * IOUserClient::
1101 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1102 {
1103 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1104
1105 if (method)
1106 *targetP = (IOService *) method->object;
1107
1108 return method;
1109 }
1110
1111 IOExternalTrap * IOUserClient::
1112 getExternalTrapForIndex(UInt32 index)
1113 {
1114 return NULL;
1115 }
1116
1117 IOExternalTrap * IOUserClient::
1118 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1119 {
1120 IOExternalTrap *trap = getExternalTrapForIndex(index);
1121
1122 if (trap) {
1123 *targetP = trap->object;
1124 }
1125
1126 return trap;
1127 }
1128
1129 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1130 {
1131 mach_port_t port;
1132 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1133
1134 if (MACH_PORT_NULL != port)
1135 iokit_release_port_send(port);
1136
1137 return (kIOReturnSuccess);
1138 }
1139
1140 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1141 {
1142 if (MACH_PORT_NULL != port)
1143 iokit_release_port_send(port);
1144
1145 return (kIOReturnSuccess);
1146 }
1147
1148 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1149 IOReturn result, void *args[], UInt32 numArgs)
1150 {
1151 OSAsyncReference64 reference64;
1152 io_user_reference_t args64[kMaxAsyncArgs];
1153 unsigned int idx;
1154
1155 if (numArgs > kMaxAsyncArgs)
1156 return kIOReturnMessageTooLarge;
1157
1158 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1159 reference64[idx] = REF64(reference[idx]);
1160
1161 for (idx = 0; idx < numArgs; idx++)
1162 args64[idx] = REF64(args[idx]);
1163
1164 return (sendAsyncResult64(reference64, result, args64, numArgs));
1165 }
1166
1167 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1168 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1169 {
1170 struct ReplyMsg
1171 {
1172 mach_msg_header_t msgHdr;
1173 union
1174 {
1175 struct
1176 {
1177 OSNotificationHeader notifyHdr;
1178 IOAsyncCompletionContent asyncContent;
1179 uint32_t args[kMaxAsyncArgs];
1180 } msg32;
1181 struct
1182 {
1183 OSNotificationHeader64 notifyHdr;
1184 IOAsyncCompletionContent asyncContent;
1185 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1186 } msg64;
1187 } m;
1188 };
1189 ReplyMsg replyMsg;
1190 mach_port_t replyPort;
1191 kern_return_t kr;
1192
1193 // If no reply port, do nothing.
1194 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1195 if (replyPort == MACH_PORT_NULL)
1196 return kIOReturnSuccess;
1197
1198 if (numArgs > kMaxAsyncArgs)
1199 return kIOReturnMessageTooLarge;
1200
1201 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1202 0 /*local*/);
1203 replyMsg.msgHdr.msgh_remote_port = replyPort;
1204 replyMsg.msgHdr.msgh_local_port = 0;
1205 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1206 if (kIOUCAsync64Flag & reference[0])
1207 {
1208 replyMsg.msgHdr.msgh_size =
1209 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1210 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1211 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1212 + numArgs * sizeof(io_user_reference_t);
1213 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1214 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1215
1216 replyMsg.m.msg64.asyncContent.result = result;
1217 if (numArgs)
1218 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1219 }
1220 else
1221 {
1222 unsigned int idx;
1223
1224 replyMsg.msgHdr.msgh_size =
1225 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1226 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1227
1228 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1229 + numArgs * sizeof(uint32_t);
1230 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1231
1232 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1233 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1234
1235 replyMsg.m.msg32.asyncContent.result = result;
1236
1237 for (idx = 0; idx < numArgs; idx++)
1238 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1239 }
1240
1241 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1242 replyMsg.msgHdr.msgh_size);
1243 if( KERN_SUCCESS != kr)
1244 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1245 return kr;
1246 }
1247
1248
1249 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1250
1251 extern "C" {
1252
1253 #define CHECK(cls,obj,out) \
1254 cls * out; \
1255 if( !(out = OSDynamicCast( cls, obj))) \
1256 return( kIOReturnBadArgument )
1257
1258 /* Routine io_object_get_class */
1259 kern_return_t is_io_object_get_class(
1260 io_object_t object,
1261 io_name_t className )
1262 {
1263 const OSMetaClass* my_obj = NULL;
1264
1265 if( !object)
1266 return( kIOReturnBadArgument );
1267
1268 my_obj = object->getMetaClass();
1269 if (!my_obj) {
1270 return (kIOReturnNotFound);
1271 }
1272
1273 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1274 return( kIOReturnSuccess );
1275 }
1276
1277 /* Routine io_object_get_superclass */
1278 kern_return_t is_io_object_get_superclass(
1279 mach_port_t master_port,
1280 io_name_t obj_name,
1281 io_name_t class_name)
1282 {
1283 const OSMetaClass* my_obj = NULL;
1284 const OSMetaClass* superclass = NULL;
1285 const OSSymbol *my_name = NULL;
1286 const char *my_cstr = NULL;
1287
1288 if (!obj_name || !class_name)
1289 return (kIOReturnBadArgument);
1290
1291 if( master_port != master_device_port)
1292 return( kIOReturnNotPrivileged);
1293
1294 my_name = OSSymbol::withCString(obj_name);
1295
1296 if (my_name) {
1297 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1298 my_name->release();
1299 }
1300 if (my_obj) {
1301 superclass = my_obj->getSuperClass();
1302 }
1303
1304 if (!superclass) {
1305 return( kIOReturnNotFound );
1306 }
1307
1308 my_cstr = superclass->getClassName();
1309
1310 if (my_cstr) {
1311 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1312 return( kIOReturnSuccess );
1313 }
1314 return (kIOReturnNotFound);
1315 }
1316
1317 /* Routine io_object_get_bundle_identifier */
1318 kern_return_t is_io_object_get_bundle_identifier(
1319 mach_port_t master_port,
1320 io_name_t obj_name,
1321 io_name_t bundle_name)
1322 {
1323 const OSMetaClass* my_obj = NULL;
1324 const OSSymbol *my_name = NULL;
1325 const OSSymbol *identifier = NULL;
1326 const char *my_cstr = NULL;
1327
1328 if (!obj_name || !bundle_name)
1329 return (kIOReturnBadArgument);
1330
1331 if( master_port != master_device_port)
1332 return( kIOReturnNotPrivileged);
1333
1334 my_name = OSSymbol::withCString(obj_name);
1335
1336 if (my_name) {
1337 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1338 my_name->release();
1339 }
1340
1341 if (my_obj) {
1342 identifier = my_obj->getKmodName();
1343 }
1344 if (!identifier) {
1345 return( kIOReturnNotFound );
1346 }
1347
1348 my_cstr = identifier->getCStringNoCopy();
1349 if (my_cstr) {
1350 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1351 return( kIOReturnSuccess );
1352 }
1353
1354 return (kIOReturnBadArgument);
1355 }
1356
1357 /* Routine io_object_conforms_to */
1358 kern_return_t is_io_object_conforms_to(
1359 io_object_t object,
1360 io_name_t className,
1361 boolean_t *conforms )
1362 {
1363 if( !object)
1364 return( kIOReturnBadArgument );
1365
1366 *conforms = (0 != object->metaCast( className ));
1367 return( kIOReturnSuccess );
1368 }
1369
1370 /* Routine io_object_get_retain_count */
1371 kern_return_t is_io_object_get_retain_count(
1372 io_object_t object,
1373 uint32_t *retainCount )
1374 {
1375 if( !object)
1376 return( kIOReturnBadArgument );
1377
1378 *retainCount = object->getRetainCount();
1379 return( kIOReturnSuccess );
1380 }
1381
1382 /* Routine io_iterator_next */
1383 kern_return_t is_io_iterator_next(
1384 io_object_t iterator,
1385 io_object_t *object )
1386 {
1387 OSObject * obj;
1388
1389 CHECK( OSIterator, iterator, iter );
1390
1391 obj = iter->getNextObject();
1392 if( obj) {
1393 obj->retain();
1394 *object = obj;
1395 return( kIOReturnSuccess );
1396 } else
1397 return( kIOReturnNoDevice );
1398 }
1399
1400 /* Routine io_iterator_reset */
1401 kern_return_t is_io_iterator_reset(
1402 io_object_t iterator )
1403 {
1404 CHECK( OSIterator, iterator, iter );
1405
1406 iter->reset();
1407
1408 return( kIOReturnSuccess );
1409 }
1410
1411 /* Routine io_iterator_is_valid */
1412 kern_return_t is_io_iterator_is_valid(
1413 io_object_t iterator,
1414 boolean_t *is_valid )
1415 {
1416 CHECK( OSIterator, iterator, iter );
1417
1418 *is_valid = iter->isValid();
1419
1420 return( kIOReturnSuccess );
1421 }
1422
1423 /* Routine io_service_match_property_table */
1424 kern_return_t is_io_service_match_property_table(
1425 io_service_t _service,
1426 io_string_t matching,
1427 boolean_t *matches )
1428 {
1429 CHECK( IOService, _service, service );
1430
1431 kern_return_t kr;
1432 OSObject * obj;
1433 OSDictionary * dict;
1434
1435 obj = OSUnserializeXML( matching );
1436
1437 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1438 *matches = service->passiveMatch( dict );
1439 kr = kIOReturnSuccess;
1440 } else
1441 kr = kIOReturnBadArgument;
1442
1443 if( obj)
1444 obj->release();
1445
1446 return( kr );
1447 }
1448
1449 /* Routine io_service_match_property_table_ool */
1450 kern_return_t is_io_service_match_property_table_ool(
1451 io_object_t service,
1452 io_buf_ptr_t matching,
1453 mach_msg_type_number_t matchingCnt,
1454 kern_return_t *result,
1455 boolean_t *matches )
1456 {
1457 kern_return_t kr;
1458 vm_offset_t data;
1459 vm_map_offset_t map_data;
1460
1461 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1462 data = CAST_DOWN(vm_offset_t, map_data);
1463
1464 if( KERN_SUCCESS == kr) {
1465 // must return success after vm_map_copyout() succeeds
1466 *result = is_io_service_match_property_table( service,
1467 (char *) data, matches );
1468 vm_deallocate( kernel_map, data, matchingCnt );
1469 }
1470
1471 return( kr );
1472 }
1473
1474 /* Routine io_service_get_matching_services */
1475 kern_return_t is_io_service_get_matching_services(
1476 mach_port_t master_port,
1477 io_string_t matching,
1478 io_iterator_t *existing )
1479 {
1480 kern_return_t kr;
1481 OSObject * obj;
1482 OSDictionary * dict;
1483
1484 if( master_port != master_device_port)
1485 return( kIOReturnNotPrivileged);
1486
1487 obj = OSUnserializeXML( matching );
1488
1489 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1490 *existing = IOService::getMatchingServices( dict );
1491 kr = kIOReturnSuccess;
1492 } else
1493 kr = kIOReturnBadArgument;
1494
1495 if( obj)
1496 obj->release();
1497
1498 return( kr );
1499 }
1500
1501 /* Routine io_service_get_matching_services_ool */
1502 kern_return_t is_io_service_get_matching_services_ool(
1503 mach_port_t master_port,
1504 io_buf_ptr_t matching,
1505 mach_msg_type_number_t matchingCnt,
1506 kern_return_t *result,
1507 io_object_t *existing )
1508 {
1509 kern_return_t kr;
1510 vm_offset_t data;
1511 vm_map_offset_t map_data;
1512
1513 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1514 data = CAST_DOWN(vm_offset_t, map_data);
1515
1516 if( KERN_SUCCESS == kr) {
1517 // must return success after vm_map_copyout() succeeds
1518 *result = is_io_service_get_matching_services( master_port,
1519 (char *) data, existing );
1520 vm_deallocate( kernel_map, data, matchingCnt );
1521 }
1522
1523 return( kr );
1524 }
1525
1526 static kern_return_t internal_io_service_add_notification(
1527 mach_port_t master_port,
1528 io_name_t notification_type,
1529 io_string_t matching,
1530 mach_port_t port,
1531 void * reference,
1532 vm_size_t referenceSize,
1533 bool client64,
1534 io_object_t * notification )
1535 {
1536 IOServiceUserNotification * userNotify = 0;
1537 IONotifier * notify = 0;
1538 const OSSymbol * sym;
1539 OSDictionary * dict;
1540 IOReturn err;
1541 unsigned long int userMsgType;
1542
1543
1544 if( master_port != master_device_port)
1545 return( kIOReturnNotPrivileged);
1546
1547 do {
1548 err = kIOReturnNoResources;
1549
1550 if( !(sym = OSSymbol::withCString( notification_type )))
1551 err = kIOReturnNoResources;
1552
1553 if( !(dict = OSDynamicCast( OSDictionary,
1554 OSUnserializeXML( matching )))) {
1555 err = kIOReturnBadArgument;
1556 continue;
1557 }
1558
1559 if( (sym == gIOPublishNotification)
1560 || (sym == gIOFirstPublishNotification))
1561 userMsgType = kIOServicePublishNotificationType;
1562 else if( (sym == gIOMatchedNotification)
1563 || (sym == gIOFirstMatchNotification))
1564 userMsgType = kIOServiceMatchedNotificationType;
1565 else if( sym == gIOTerminatedNotification)
1566 userMsgType = kIOServiceTerminatedNotificationType;
1567 else
1568 userMsgType = kLastIOKitNotificationType;
1569
1570 userNotify = new IOServiceUserNotification;
1571
1572 if( userNotify && !userNotify->init( port, userMsgType,
1573 reference, referenceSize, client64)) {
1574 userNotify->release();
1575 userNotify = 0;
1576 }
1577 if( !userNotify)
1578 continue;
1579
1580 notify = IOService::addMatchingNotification( sym, dict,
1581 &userNotify->_handler, userNotify );
1582 if( notify) {
1583 *notification = userNotify;
1584 userNotify->setNotification( notify );
1585 err = kIOReturnSuccess;
1586 } else
1587 err = kIOReturnUnsupported;
1588
1589 } while( false );
1590
1591 if( sym)
1592 sym->release();
1593 if( dict)
1594 dict->release();
1595
1596 return( err );
1597 }
1598
1599
1600 /* Routine io_service_add_notification */
1601 kern_return_t is_io_service_add_notification(
1602 mach_port_t master_port,
1603 io_name_t notification_type,
1604 io_string_t matching,
1605 mach_port_t port,
1606 io_async_ref_t reference,
1607 mach_msg_type_number_t referenceCnt,
1608 io_object_t * notification )
1609 {
1610 return (internal_io_service_add_notification(master_port, notification_type,
1611 matching, port, &reference[0], sizeof(io_async_ref_t),
1612 false, notification));
1613 }
1614
1615 /* Routine io_service_add_notification_64 */
1616 kern_return_t is_io_service_add_notification_64(
1617 mach_port_t master_port,
1618 io_name_t notification_type,
1619 io_string_t matching,
1620 mach_port_t wake_port,
1621 io_async_ref64_t reference,
1622 mach_msg_type_number_t referenceCnt,
1623 io_object_t *notification )
1624 {
1625 return (internal_io_service_add_notification(master_port, notification_type,
1626 matching, wake_port, &reference[0], sizeof(io_async_ref64_t),
1627 true, notification));
1628 }
1629
1630
1631 static kern_return_t internal_io_service_add_notification_ool(
1632 mach_port_t master_port,
1633 io_name_t notification_type,
1634 io_buf_ptr_t matching,
1635 mach_msg_type_number_t matchingCnt,
1636 mach_port_t wake_port,
1637 void * reference,
1638 vm_size_t referenceSize,
1639 bool client64,
1640 kern_return_t *result,
1641 io_object_t *notification )
1642 {
1643 kern_return_t kr;
1644 vm_offset_t data;
1645 vm_map_offset_t map_data;
1646
1647 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1648 data = CAST_DOWN(vm_offset_t, map_data);
1649
1650 if( KERN_SUCCESS == kr) {
1651 // must return success after vm_map_copyout() succeeds
1652 *result = internal_io_service_add_notification( master_port, notification_type,
1653 (char *) data, wake_port, reference, referenceSize, client64, notification );
1654 vm_deallocate( kernel_map, data, matchingCnt );
1655 }
1656
1657 return( kr );
1658 }
1659
1660 /* Routine io_service_add_notification_ool */
1661 kern_return_t is_io_service_add_notification_ool(
1662 mach_port_t master_port,
1663 io_name_t notification_type,
1664 io_buf_ptr_t matching,
1665 mach_msg_type_number_t matchingCnt,
1666 mach_port_t wake_port,
1667 io_async_ref_t reference,
1668 mach_msg_type_number_t referenceCnt,
1669 kern_return_t *result,
1670 io_object_t *notification )
1671 {
1672 return (internal_io_service_add_notification_ool(master_port, notification_type,
1673 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
1674 false, result, notification));
1675 }
1676
1677 /* Routine io_service_add_notification_ool_64 */
1678 kern_return_t is_io_service_add_notification_ool_64(
1679 mach_port_t master_port,
1680 io_name_t notification_type,
1681 io_buf_ptr_t matching,
1682 mach_msg_type_number_t matchingCnt,
1683 mach_port_t wake_port,
1684 io_async_ref64_t reference,
1685 mach_msg_type_number_t referenceCnt,
1686 kern_return_t *result,
1687 io_object_t *notification )
1688 {
1689 return (internal_io_service_add_notification_ool(master_port, notification_type,
1690 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
1691 true, result, notification));
1692 }
1693
1694 /* Routine io_service_add_notification_old */
1695 kern_return_t is_io_service_add_notification_old(
1696 mach_port_t master_port,
1697 io_name_t notification_type,
1698 io_string_t matching,
1699 mach_port_t port,
1700 // for binary compatibility reasons, this must be natural_t for ILP32
1701 natural_t ref,
1702 io_object_t * notification )
1703 {
1704 return( is_io_service_add_notification( master_port, notification_type,
1705 matching, port, &ref, 1, notification ));
1706 }
1707
1708
1709 static kern_return_t internal_io_service_add_interest_notification(
1710 io_object_t _service,
1711 io_name_t type_of_interest,
1712 mach_port_t port,
1713 void * reference,
1714 vm_size_t referenceSize,
1715 bool client64,
1716 io_object_t * notification )
1717 {
1718
1719 IOServiceMessageUserNotification * userNotify = 0;
1720 IONotifier * notify = 0;
1721 const OSSymbol * sym;
1722 IOReturn err;
1723
1724 CHECK( IOService, _service, service );
1725
1726 err = kIOReturnNoResources;
1727 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
1728
1729 userNotify = new IOServiceMessageUserNotification;
1730
1731 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
1732 reference, referenceSize,
1733 kIOUserNotifyMaxMessageSize,
1734 client64 )) {
1735 userNotify->release();
1736 userNotify = 0;
1737 }
1738 if( !userNotify)
1739 continue;
1740
1741 notify = service->registerInterest( sym,
1742 &userNotify->_handler, userNotify );
1743 if( notify) {
1744 *notification = userNotify;
1745 userNotify->setNotification( notify );
1746 err = kIOReturnSuccess;
1747 } else
1748 err = kIOReturnUnsupported;
1749
1750 sym->release();
1751
1752 } while( false );
1753
1754 return( err );
1755 }
1756
1757 /* Routine io_service_add_message_notification */
1758 kern_return_t is_io_service_add_interest_notification(
1759 io_object_t service,
1760 io_name_t type_of_interest,
1761 mach_port_t port,
1762 io_async_ref_t reference,
1763 mach_msg_type_number_t referenceCnt,
1764 io_object_t * notification )
1765 {
1766 return (internal_io_service_add_interest_notification(service, type_of_interest,
1767 port, &reference[0], sizeof(io_async_ref_t), false, notification));
1768 }
1769
1770 /* Routine io_service_add_interest_notification_64 */
1771 kern_return_t is_io_service_add_interest_notification_64(
1772 io_object_t service,
1773 io_name_t type_of_interest,
1774 mach_port_t wake_port,
1775 io_async_ref64_t reference,
1776 mach_msg_type_number_t referenceCnt,
1777 io_object_t *notification )
1778 {
1779 return (internal_io_service_add_interest_notification(service, type_of_interest,
1780 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
1781 }
1782
1783
1784 /* Routine io_service_acknowledge_notification */
1785 kern_return_t is_io_service_acknowledge_notification(
1786 io_object_t _service,
1787 natural_t notify_ref,
1788 natural_t response )
1789 {
1790 CHECK( IOService, _service, service );
1791
1792 return( service->acknowledgeNotification( (IONotificationRef) notify_ref,
1793 (IOOptionBits) response ));
1794
1795 }
1796
1797 /* Routine io_connect_get_semaphore */
1798 kern_return_t is_io_connect_get_notification_semaphore(
1799 io_connect_t connection,
1800 natural_t notification_type,
1801 semaphore_t *semaphore )
1802 {
1803 CHECK( IOUserClient, connection, client );
1804
1805 return( client->getNotificationSemaphore( (UInt32) notification_type,
1806 semaphore ));
1807 }
1808
1809 /* Routine io_registry_get_root_entry */
1810 kern_return_t is_io_registry_get_root_entry(
1811 mach_port_t master_port,
1812 io_object_t *root )
1813 {
1814 IORegistryEntry * entry;
1815
1816 if( master_port != master_device_port)
1817 return( kIOReturnNotPrivileged);
1818
1819 entry = IORegistryEntry::getRegistryRoot();
1820 if( entry)
1821 entry->retain();
1822 *root = entry;
1823
1824 return( kIOReturnSuccess );
1825 }
1826
1827 /* Routine io_registry_create_iterator */
1828 kern_return_t is_io_registry_create_iterator(
1829 mach_port_t master_port,
1830 io_name_t plane,
1831 uint32_t options,
1832 io_object_t *iterator )
1833 {
1834 if( master_port != master_device_port)
1835 return( kIOReturnNotPrivileged);
1836
1837 *iterator = IORegistryIterator::iterateOver(
1838 IORegistryEntry::getPlane( plane ), options );
1839
1840 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
1841 }
1842
1843 /* Routine io_registry_entry_create_iterator */
1844 kern_return_t is_io_registry_entry_create_iterator(
1845 io_object_t registry_entry,
1846 io_name_t plane,
1847 uint32_t options,
1848 io_object_t *iterator )
1849 {
1850 CHECK( IORegistryEntry, registry_entry, entry );
1851
1852 *iterator = IORegistryIterator::iterateOver( entry,
1853 IORegistryEntry::getPlane( plane ), options );
1854
1855 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
1856 }
1857
1858 /* Routine io_registry_iterator_enter */
1859 kern_return_t is_io_registry_iterator_enter_entry(
1860 io_object_t iterator )
1861 {
1862 CHECK( IORegistryIterator, iterator, iter );
1863
1864 iter->enterEntry();
1865
1866 return( kIOReturnSuccess );
1867 }
1868
1869 /* Routine io_registry_iterator_exit */
1870 kern_return_t is_io_registry_iterator_exit_entry(
1871 io_object_t iterator )
1872 {
1873 bool didIt;
1874
1875 CHECK( IORegistryIterator, iterator, iter );
1876
1877 didIt = iter->exitEntry();
1878
1879 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
1880 }
1881
1882 /* Routine io_registry_entry_from_path */
1883 kern_return_t is_io_registry_entry_from_path(
1884 mach_port_t master_port,
1885 io_string_t path,
1886 io_object_t *registry_entry )
1887 {
1888 IORegistryEntry * entry;
1889
1890 if( master_port != master_device_port)
1891 return( kIOReturnNotPrivileged);
1892
1893 entry = IORegistryEntry::fromPath( path );
1894
1895 *registry_entry = entry;
1896
1897 return( kIOReturnSuccess );
1898 }
1899
1900 /* Routine io_registry_entry_in_plane */
1901 kern_return_t is_io_registry_entry_in_plane(
1902 io_object_t registry_entry,
1903 io_name_t plane,
1904 boolean_t *inPlane )
1905 {
1906 CHECK( IORegistryEntry, registry_entry, entry );
1907
1908 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
1909
1910 return( kIOReturnSuccess );
1911 }
1912
1913
1914 /* Routine io_registry_entry_get_path */
1915 kern_return_t is_io_registry_entry_get_path(
1916 io_object_t registry_entry,
1917 io_name_t plane,
1918 io_string_t path )
1919 {
1920 int length;
1921 CHECK( IORegistryEntry, registry_entry, entry );
1922
1923 length = sizeof( io_string_t);
1924 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
1925 return( kIOReturnSuccess );
1926 else
1927 return( kIOReturnBadArgument );
1928 }
1929
1930
1931 /* Routine io_registry_entry_get_name */
1932 kern_return_t is_io_registry_entry_get_name(
1933 io_object_t registry_entry,
1934 io_name_t name )
1935 {
1936 CHECK( IORegistryEntry, registry_entry, entry );
1937
1938 strncpy( name, entry->getName(), sizeof( io_name_t));
1939
1940 return( kIOReturnSuccess );
1941 }
1942
1943 /* Routine io_registry_entry_get_name_in_plane */
1944 kern_return_t is_io_registry_entry_get_name_in_plane(
1945 io_object_t registry_entry,
1946 io_name_t planeName,
1947 io_name_t name )
1948 {
1949 const IORegistryPlane * plane;
1950 CHECK( IORegistryEntry, registry_entry, entry );
1951
1952 if( planeName[0])
1953 plane = IORegistryEntry::getPlane( planeName );
1954 else
1955 plane = 0;
1956
1957 strncpy( name, entry->getName( plane), sizeof( io_name_t));
1958
1959 return( kIOReturnSuccess );
1960 }
1961
1962 /* Routine io_registry_entry_get_location_in_plane */
1963 kern_return_t is_io_registry_entry_get_location_in_plane(
1964 io_object_t registry_entry,
1965 io_name_t planeName,
1966 io_name_t location )
1967 {
1968 const IORegistryPlane * plane;
1969 CHECK( IORegistryEntry, registry_entry, entry );
1970
1971 if( planeName[0])
1972 plane = IORegistryEntry::getPlane( planeName );
1973 else
1974 plane = 0;
1975
1976 const char * cstr = entry->getLocation( plane );
1977
1978 if( cstr) {
1979 strncpy( location, cstr, sizeof( io_name_t));
1980 return( kIOReturnSuccess );
1981 } else
1982 return( kIOReturnNotFound );
1983 }
1984
1985 /* Routine io_registry_entry_get_registry_entry_id */
1986 kern_return_t is_io_registry_entry_get_registry_entry_id(
1987 io_object_t registry_entry,
1988 uint64_t *entry_id )
1989 {
1990 CHECK( IORegistryEntry, registry_entry, entry );
1991
1992 *entry_id = entry->getRegistryEntryID();
1993
1994 return (kIOReturnSuccess);
1995 }
1996
1997 // Create a vm_map_copy_t or kalloc'ed data for memory
1998 // to be copied out. ipc will free after the copyout.
1999
2000 static kern_return_t copyoutkdata( void * data, vm_size_t len,
2001 io_buf_ptr_t * buf )
2002 {
2003 kern_return_t err;
2004 vm_map_copy_t copy;
2005
2006 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2007 false /* src_destroy */, &copy);
2008
2009 assert( err == KERN_SUCCESS );
2010 if( err == KERN_SUCCESS )
2011 *buf = (char *) copy;
2012
2013 return( err );
2014 }
2015
2016 /* Routine io_registry_entry_get_property */
2017 kern_return_t is_io_registry_entry_get_property_bytes(
2018 io_object_t registry_entry,
2019 io_name_t property_name,
2020 io_struct_inband_t buf,
2021 mach_msg_type_number_t *dataCnt )
2022 {
2023 OSObject * obj;
2024 OSData * data;
2025 OSString * str;
2026 OSBoolean * boo;
2027 OSNumber * off;
2028 UInt64 offsetBytes;
2029 unsigned int len = 0;
2030 const void * bytes = 0;
2031 IOReturn ret = kIOReturnSuccess;
2032
2033 CHECK( IORegistryEntry, registry_entry, entry );
2034
2035 obj = entry->copyProperty(property_name);
2036 if( !obj)
2037 return( kIOReturnNoResources );
2038
2039 // One day OSData will be a common container base class
2040 // until then...
2041 if( (data = OSDynamicCast( OSData, obj ))) {
2042 len = data->getLength();
2043 bytes = data->getBytesNoCopy();
2044
2045 } else if( (str = OSDynamicCast( OSString, obj ))) {
2046 len = str->getLength() + 1;
2047 bytes = str->getCStringNoCopy();
2048
2049 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2050 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2051 bytes = boo->isTrue() ? "Yes" : "No";
2052
2053 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2054 offsetBytes = off->unsigned64BitValue();
2055 len = off->numberOfBytes();
2056 bytes = &offsetBytes;
2057 #ifdef __BIG_ENDIAN__
2058 bytes = (const void *)
2059 (((UInt32) bytes) + (sizeof( UInt64) - len));
2060 #endif
2061
2062 } else
2063 ret = kIOReturnBadArgument;
2064
2065 if( bytes) {
2066 if( *dataCnt < len)
2067 ret = kIOReturnIPCError;
2068 else {
2069 *dataCnt = len;
2070 bcopy( bytes, buf, len );
2071 }
2072 }
2073 obj->release();
2074
2075 return( ret );
2076 }
2077
2078
2079 /* Routine io_registry_entry_get_property */
2080 kern_return_t is_io_registry_entry_get_property(
2081 io_object_t registry_entry,
2082 io_name_t property_name,
2083 io_buf_ptr_t *properties,
2084 mach_msg_type_number_t *propertiesCnt )
2085 {
2086 kern_return_t err;
2087 vm_size_t len;
2088 OSObject * obj;
2089
2090 CHECK( IORegistryEntry, registry_entry, entry );
2091
2092 obj = entry->copyProperty(property_name);
2093 if( !obj)
2094 return( kIOReturnNotFound );
2095
2096 OSSerialize * s = OSSerialize::withCapacity(4096);
2097 if( !s) {
2098 obj->release();
2099 return( kIOReturnNoMemory );
2100 }
2101 s->clearText();
2102
2103 if( obj->serialize( s )) {
2104 len = s->getLength();
2105 *propertiesCnt = len;
2106 err = copyoutkdata( s->text(), len, properties );
2107
2108 } else
2109 err = kIOReturnUnsupported;
2110
2111 s->release();
2112 obj->release();
2113
2114 return( err );
2115 }
2116
2117 /* Routine io_registry_entry_get_property_recursively */
2118 kern_return_t is_io_registry_entry_get_property_recursively(
2119 io_object_t registry_entry,
2120 io_name_t plane,
2121 io_name_t property_name,
2122 uint32_t options,
2123 io_buf_ptr_t *properties,
2124 mach_msg_type_number_t *propertiesCnt )
2125 {
2126 kern_return_t err;
2127 vm_size_t len;
2128 OSObject * obj;
2129
2130 CHECK( IORegistryEntry, registry_entry, entry );
2131
2132 obj = entry->copyProperty( property_name,
2133 IORegistryEntry::getPlane( plane ), options);
2134 if( !obj)
2135 return( kIOReturnNotFound );
2136
2137 OSSerialize * s = OSSerialize::withCapacity(4096);
2138 if( !s) {
2139 obj->release();
2140 return( kIOReturnNoMemory );
2141 }
2142
2143 s->clearText();
2144
2145 if( obj->serialize( s )) {
2146 len = s->getLength();
2147 *propertiesCnt = len;
2148 err = copyoutkdata( s->text(), len, properties );
2149
2150 } else
2151 err = kIOReturnUnsupported;
2152
2153 s->release();
2154 obj->release();
2155
2156 return( err );
2157 }
2158
2159 /* Routine io_registry_entry_get_properties */
2160 kern_return_t is_io_registry_entry_get_properties(
2161 io_object_t registry_entry,
2162 io_buf_ptr_t *properties,
2163 mach_msg_type_number_t *propertiesCnt )
2164 {
2165 kern_return_t err;
2166 vm_size_t len;
2167
2168 CHECK( IORegistryEntry, registry_entry, entry );
2169
2170 OSSerialize * s = OSSerialize::withCapacity(4096);
2171 if( !s)
2172 return( kIOReturnNoMemory );
2173
2174 s->clearText();
2175
2176 if( entry->serializeProperties( s )) {
2177 len = s->getLength();
2178 *propertiesCnt = len;
2179 err = copyoutkdata( s->text(), len, properties );
2180
2181 } else
2182 err = kIOReturnUnsupported;
2183
2184 s->release();
2185
2186 return( err );
2187 }
2188
2189 /* Routine io_registry_entry_set_properties */
2190 kern_return_t is_io_registry_entry_set_properties
2191 (
2192 io_object_t registry_entry,
2193 io_buf_ptr_t properties,
2194 mach_msg_type_number_t propertiesCnt,
2195 kern_return_t * result)
2196 {
2197 OSObject * obj;
2198 kern_return_t err;
2199 IOReturn res;
2200 vm_offset_t data;
2201 vm_map_offset_t map_data;
2202
2203 CHECK( IORegistryEntry, registry_entry, entry );
2204
2205 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2206 data = CAST_DOWN(vm_offset_t, map_data);
2207
2208 if( KERN_SUCCESS == err) {
2209
2210 // must return success after vm_map_copyout() succeeds
2211 obj = OSUnserializeXML( (const char *) data );
2212 vm_deallocate( kernel_map, data, propertiesCnt );
2213
2214 if( obj) {
2215 res = entry->setProperties( obj );
2216 obj->release();
2217 } else
2218 res = kIOReturnBadArgument;
2219 } else
2220 res = err;
2221
2222 *result = res;
2223 return( err );
2224 }
2225
2226 /* Routine io_registry_entry_get_child_iterator */
2227 kern_return_t is_io_registry_entry_get_child_iterator(
2228 io_object_t registry_entry,
2229 io_name_t plane,
2230 io_object_t *iterator )
2231 {
2232 CHECK( IORegistryEntry, registry_entry, entry );
2233
2234 *iterator = entry->getChildIterator(
2235 IORegistryEntry::getPlane( plane ));
2236
2237 return( kIOReturnSuccess );
2238 }
2239
2240 /* Routine io_registry_entry_get_parent_iterator */
2241 kern_return_t is_io_registry_entry_get_parent_iterator(
2242 io_object_t registry_entry,
2243 io_name_t plane,
2244 io_object_t *iterator)
2245 {
2246 CHECK( IORegistryEntry, registry_entry, entry );
2247
2248 *iterator = entry->getParentIterator(
2249 IORegistryEntry::getPlane( plane ));
2250
2251 return( kIOReturnSuccess );
2252 }
2253
2254 /* Routine io_service_get_busy_state */
2255 kern_return_t is_io_service_get_busy_state(
2256 io_object_t _service,
2257 uint32_t *busyState )
2258 {
2259 CHECK( IOService, _service, service );
2260
2261 *busyState = service->getBusyState();
2262
2263 return( kIOReturnSuccess );
2264 }
2265
2266 /* Routine io_service_get_state */
2267 kern_return_t is_io_service_get_state(
2268 io_object_t _service,
2269 uint64_t *state,
2270 uint32_t *busy_state,
2271 uint64_t *accumulated_busy_time )
2272 {
2273 CHECK( IOService, _service, service );
2274
2275 *state = service->getState();
2276 *busy_state = service->getBusyState();
2277 *accumulated_busy_time = service->getAccumulatedBusyTime();
2278
2279 return( kIOReturnSuccess );
2280 }
2281
2282 /* Routine io_service_wait_quiet */
2283 kern_return_t is_io_service_wait_quiet(
2284 io_object_t _service,
2285 mach_timespec_t wait_time )
2286 {
2287 uint64_t timeoutNS;
2288
2289 CHECK( IOService, _service, service );
2290
2291 timeoutNS = wait_time.tv_sec;
2292 timeoutNS *= kSecondScale;
2293 timeoutNS += wait_time.tv_nsec;
2294
2295 return( service->waitQuiet(timeoutNS) );
2296 }
2297
2298 /* Routine io_service_request_probe */
2299 kern_return_t is_io_service_request_probe(
2300 io_object_t _service,
2301 uint32_t options )
2302 {
2303 CHECK( IOService, _service, service );
2304
2305 return( service->requestProbe( options ));
2306 }
2307
2308
2309 /* Routine io_service_open */
2310 kern_return_t is_io_service_open(
2311 io_object_t _service,
2312 task_t owningTask,
2313 uint32_t connect_type,
2314 io_object_t *connection )
2315 {
2316 IOUserClient * client;
2317 IOReturn err;
2318
2319 CHECK( IOService, _service, service );
2320
2321 err = service->newUserClient( owningTask, (void *) owningTask,
2322 connect_type, 0, &client );
2323
2324 if( err == kIOReturnSuccess) {
2325 assert( OSDynamicCast(IOUserClient, client) );
2326 *connection = client;
2327 }
2328
2329 return( err);
2330 }
2331
2332 /* Routine io_service_open_ndr */
2333 kern_return_t is_io_service_open_extended(
2334 io_object_t _service,
2335 task_t owningTask,
2336 uint32_t connect_type,
2337 NDR_record_t ndr,
2338 io_buf_ptr_t properties,
2339 mach_msg_type_number_t propertiesCnt,
2340 kern_return_t * result,
2341 io_object_t *connection )
2342 {
2343 IOUserClient * client = 0;
2344 kern_return_t err = KERN_SUCCESS;
2345 IOReturn res = kIOReturnSuccess;
2346 OSDictionary * propertiesDict = 0;
2347 bool crossEndian;
2348 bool disallowAccess;
2349
2350 CHECK( IOService, _service, service );
2351
2352 do
2353 {
2354 if (properties)
2355 {
2356 OSObject * obj;
2357 vm_offset_t data;
2358 vm_map_offset_t map_data;
2359
2360 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2361 res = err;
2362 data = CAST_DOWN(vm_offset_t, map_data);
2363 if (KERN_SUCCESS == err)
2364 {
2365 // must return success after vm_map_copyout() succeeds
2366 obj = OSUnserializeXML( (const char *) data );
2367 vm_deallocate( kernel_map, data, propertiesCnt );
2368 propertiesDict = OSDynamicCast(OSDictionary, obj);
2369 if (!propertiesDict)
2370 {
2371 res = kIOReturnBadArgument;
2372 if (obj)
2373 obj->release();
2374 }
2375 }
2376 if (kIOReturnSuccess != res)
2377 break;
2378 }
2379
2380 crossEndian = (ndr.int_rep != NDR_record.int_rep);
2381 if (crossEndian)
2382 {
2383 if (!propertiesDict)
2384 propertiesDict = OSDictionary::withCapacity(4);
2385 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
2386 if (data)
2387 {
2388 if (propertiesDict)
2389 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
2390 data->release();
2391 }
2392 }
2393
2394 res = service->newUserClient( owningTask, (void *) owningTask,
2395 connect_type, propertiesDict, &client );
2396
2397 if (propertiesDict)
2398 propertiesDict->release();
2399
2400 if (res == kIOReturnSuccess)
2401 {
2402 assert( OSDynamicCast(IOUserClient, client) );
2403
2404 disallowAccess = (crossEndian
2405 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
2406 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
2407
2408 if (disallowAccess)
2409 {
2410 client->clientClose();
2411 client->release();
2412 client = 0;
2413 res = kIOReturnUnsupported;
2414 break;
2415 }
2416 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
2417 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
2418 if (creatorName)
2419 {
2420 client->setProperty(kIOUserClientCreatorKey, creatorName);
2421 creatorName->release();
2422 }
2423 }
2424 }
2425 while (false);
2426
2427 *connection = client;
2428 *result = res;
2429
2430 return (err);
2431 }
2432
2433 /* Routine io_service_close */
2434 kern_return_t is_io_service_close(
2435 io_object_t connection )
2436 {
2437 OSSet * mappings;
2438 if ((mappings = OSDynamicCast(OSSet, connection)))
2439 return( kIOReturnSuccess );
2440
2441 CHECK( IOUserClient, connection, client );
2442
2443 client->clientClose();
2444
2445 return( kIOReturnSuccess );
2446 }
2447
2448 /* Routine io_connect_get_service */
2449 kern_return_t is_io_connect_get_service(
2450 io_object_t connection,
2451 io_object_t *service )
2452 {
2453 IOService * theService;
2454
2455 CHECK( IOUserClient, connection, client );
2456
2457 theService = client->getService();
2458 if( theService)
2459 theService->retain();
2460
2461 *service = theService;
2462
2463 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
2464 }
2465
2466 /* Routine io_connect_set_notification_port */
2467 kern_return_t is_io_connect_set_notification_port(
2468 io_object_t connection,
2469 uint32_t notification_type,
2470 mach_port_t port,
2471 uint32_t reference)
2472 {
2473 CHECK( IOUserClient, connection, client );
2474
2475 return( client->registerNotificationPort( port, notification_type,
2476 (io_user_reference_t) reference ));
2477 }
2478
2479 /* Routine io_connect_set_notification_port */
2480 kern_return_t is_io_connect_set_notification_port_64(
2481 io_object_t connection,
2482 uint32_t notification_type,
2483 mach_port_t port,
2484 io_user_reference_t reference)
2485 {
2486 CHECK( IOUserClient, connection, client );
2487
2488 return( client->registerNotificationPort( port, notification_type,
2489 reference ));
2490 }
2491
2492 /* Routine io_connect_map_memory_into_task */
2493 kern_return_t is_io_connect_map_memory_into_task
2494 (
2495 io_connect_t connection,
2496 uint32_t memory_type,
2497 task_t into_task,
2498 mach_vm_address_t *address,
2499 mach_vm_size_t *size,
2500 uint32_t flags
2501 )
2502 {
2503 IOReturn err;
2504 IOMemoryMap * map;
2505
2506 CHECK( IOUserClient, connection, client );
2507
2508 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
2509
2510 if( map) {
2511 *address = map->getAddress();
2512 if( size)
2513 *size = map->getSize();
2514
2515 if( client->sharedInstance
2516 || (into_task != current_task())) {
2517 // push a name out to the task owning the map,
2518 // so we can clean up maps
2519 mach_port_name_t name __unused =
2520 IOMachPort::makeSendRightForTask(
2521 into_task, map, IKOT_IOKIT_OBJECT );
2522 assert( name );
2523
2524 } else {
2525 // keep it with the user client
2526 IOLockLock( gIOObjectPortLock);
2527 if( 0 == client->mappings)
2528 client->mappings = OSSet::withCapacity(2);
2529 if( client->mappings)
2530 client->mappings->setObject( map);
2531 IOLockUnlock( gIOObjectPortLock);
2532 map->release();
2533 }
2534 err = kIOReturnSuccess;
2535
2536 } else
2537 err = kIOReturnBadArgument;
2538
2539 return( err );
2540 }
2541
2542 /* Routine is_io_connect_map_memory */
2543 kern_return_t is_io_connect_map_memory(
2544 io_object_t connect,
2545 uint32_t type,
2546 task_t task,
2547 vm_address_t * mapAddr,
2548 vm_size_t * mapSize,
2549 uint32_t flags )
2550 {
2551 IOReturn err;
2552 mach_vm_address_t address;
2553 mach_vm_size_t size;
2554
2555 address = SCALAR64(*mapAddr);
2556 size = SCALAR64(*mapSize);
2557
2558 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
2559
2560 *mapAddr = SCALAR32(address);
2561 *mapSize = SCALAR32(size);
2562
2563 return (err);
2564 }
2565
2566 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
2567 {
2568 OSIterator * iter;
2569 IOMemoryMap * map = 0;
2570
2571 IOLockLock(gIOObjectPortLock);
2572
2573 iter = OSCollectionIterator::withCollection(mappings);
2574 if(iter)
2575 {
2576 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
2577 {
2578 if(mem == map->getMemoryDescriptor())
2579 {
2580 map->retain();
2581 mappings->removeObject(map);
2582 break;
2583 }
2584 }
2585 iter->release();
2586 }
2587
2588 IOLockUnlock(gIOObjectPortLock);
2589
2590 return (map);
2591 }
2592
2593 /* Routine io_connect_unmap_memory_from_task */
2594 kern_return_t is_io_connect_unmap_memory_from_task
2595 (
2596 io_connect_t connection,
2597 uint32_t memory_type,
2598 task_t from_task,
2599 mach_vm_address_t address)
2600 {
2601 IOReturn err;
2602 IOOptionBits options = 0;
2603 IOMemoryDescriptor * memory;
2604 IOMemoryMap * map;
2605
2606 CHECK( IOUserClient, connection, client );
2607
2608 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
2609
2610 if( memory && (kIOReturnSuccess == err)) {
2611
2612 options = (options & ~kIOMapUserOptionsMask)
2613 | kIOMapAnywhere | kIOMapReference;
2614
2615 map = memory->createMappingInTask( from_task, address, options );
2616 memory->release();
2617 if( map)
2618 {
2619 IOLockLock( gIOObjectPortLock);
2620 if( client->mappings)
2621 client->mappings->removeObject( map);
2622 IOLockUnlock( gIOObjectPortLock);
2623
2624 mach_port_name_t name = 0;
2625 if (from_task != current_task())
2626 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
2627 if (name)
2628 {
2629 map->userClientUnmap();
2630 err = iokit_mod_send_right( from_task, name, -2 );
2631 err = kIOReturnSuccess;
2632 }
2633 else
2634 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
2635 if (from_task == current_task())
2636 map->release();
2637 }
2638 else
2639 err = kIOReturnBadArgument;
2640 }
2641
2642 return( err );
2643 }
2644
2645 kern_return_t is_io_connect_unmap_memory(
2646 io_object_t connect,
2647 uint32_t type,
2648 task_t task,
2649 vm_address_t mapAddr )
2650 {
2651 IOReturn err;
2652 mach_vm_address_t address;
2653
2654 address = SCALAR64(mapAddr);
2655
2656 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
2657
2658 return (err);
2659 }
2660
2661
2662 /* Routine io_connect_add_client */
2663 kern_return_t is_io_connect_add_client(
2664 io_object_t connection,
2665 io_object_t connect_to)
2666 {
2667 CHECK( IOUserClient, connection, client );
2668 CHECK( IOUserClient, connect_to, to );
2669
2670 return( client->connectClient( to ) );
2671 }
2672
2673
2674 /* Routine io_connect_set_properties */
2675 kern_return_t is_io_connect_set_properties(
2676 io_object_t connection,
2677 io_buf_ptr_t properties,
2678 mach_msg_type_number_t propertiesCnt,
2679 kern_return_t * result)
2680 {
2681 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
2682 }
2683
2684
2685 /* Routine io_user_client_method */
2686 kern_return_t is_io_connect_method
2687 (
2688 io_connect_t connection,
2689 uint32_t selector,
2690 io_scalar_inband64_t scalar_input,
2691 mach_msg_type_number_t scalar_inputCnt,
2692 io_struct_inband_t inband_input,
2693 mach_msg_type_number_t inband_inputCnt,
2694 mach_vm_address_t ool_input,
2695 mach_vm_size_t ool_input_size,
2696 io_scalar_inband64_t scalar_output,
2697 mach_msg_type_number_t *scalar_outputCnt,
2698 io_struct_inband_t inband_output,
2699 mach_msg_type_number_t *inband_outputCnt,
2700 mach_vm_address_t ool_output,
2701 mach_vm_size_t * ool_output_size
2702 )
2703 {
2704 CHECK( IOUserClient, connection, client );
2705
2706 IOExternalMethodArguments args;
2707 IOReturn ret;
2708 IOMemoryDescriptor * inputMD = 0;
2709 IOMemoryDescriptor * outputMD = 0;
2710
2711 bzero(&args.__reserved[0], sizeof(args.__reserved));
2712 args.version = kIOExternalMethodArgumentsCurrentVersion;
2713
2714 args.selector = selector;
2715
2716 args.asyncWakePort = MACH_PORT_NULL;
2717 args.asyncReference = 0;
2718 args.asyncReferenceCount = 0;
2719
2720 args.scalarInput = scalar_input;
2721 args.scalarInputCount = scalar_inputCnt;
2722 args.structureInput = inband_input;
2723 args.structureInputSize = inband_inputCnt;
2724
2725 if (ool_input)
2726 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2727 kIODirectionOut, current_task());
2728
2729 args.structureInputDescriptor = inputMD;
2730
2731 args.scalarOutput = scalar_output;
2732 args.scalarOutputCount = *scalar_outputCnt;
2733 args.structureOutput = inband_output;
2734 args.structureOutputSize = *inband_outputCnt;
2735
2736 if (ool_output)
2737 {
2738 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
2739 kIODirectionIn, current_task());
2740 }
2741
2742 args.structureOutputDescriptor = outputMD;
2743 args.structureOutputDescriptorSize = *ool_output_size;
2744
2745 ret = client->externalMethod( selector, &args );
2746
2747 *scalar_outputCnt = args.scalarOutputCount;
2748 *inband_outputCnt = args.structureOutputSize;
2749 *ool_output_size = args.structureOutputDescriptorSize;
2750
2751 if (inputMD)
2752 inputMD->release();
2753 if (outputMD)
2754 outputMD->release();
2755
2756 return (ret);
2757 }
2758
2759 /* Routine io_async_user_client_method */
2760 kern_return_t is_io_connect_async_method
2761 (
2762 io_connect_t connection,
2763 mach_port_t wake_port,
2764 io_async_ref64_t reference,
2765 mach_msg_type_number_t referenceCnt,
2766 uint32_t selector,
2767 io_scalar_inband64_t scalar_input,
2768 mach_msg_type_number_t scalar_inputCnt,
2769 io_struct_inband_t inband_input,
2770 mach_msg_type_number_t inband_inputCnt,
2771 mach_vm_address_t ool_input,
2772 mach_vm_size_t ool_input_size,
2773 io_scalar_inband64_t scalar_output,
2774 mach_msg_type_number_t *scalar_outputCnt,
2775 io_struct_inband_t inband_output,
2776 mach_msg_type_number_t *inband_outputCnt,
2777 mach_vm_address_t ool_output,
2778 mach_vm_size_t * ool_output_size
2779 )
2780 {
2781 CHECK( IOUserClient, connection, client );
2782
2783 IOExternalMethodArguments args;
2784 IOReturn ret;
2785 IOMemoryDescriptor * inputMD = 0;
2786 IOMemoryDescriptor * outputMD = 0;
2787
2788 bzero(&args.__reserved[0], sizeof(args.__reserved));
2789 args.version = kIOExternalMethodArgumentsCurrentVersion;
2790
2791 reference[0] = (io_user_reference_t) wake_port;
2792 if (vm_map_is_64bit(get_task_map(current_task())))
2793 reference[0] |= kIOUCAsync64Flag;
2794
2795 args.selector = selector;
2796
2797 args.asyncWakePort = wake_port;
2798 args.asyncReference = reference;
2799 args.asyncReferenceCount = referenceCnt;
2800
2801 args.scalarInput = scalar_input;
2802 args.scalarInputCount = scalar_inputCnt;
2803 args.structureInput = inband_input;
2804 args.structureInputSize = inband_inputCnt;
2805
2806 if (ool_input)
2807 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2808 kIODirectionOut, current_task());
2809
2810 args.structureInputDescriptor = inputMD;
2811
2812 args.scalarOutput = scalar_output;
2813 args.scalarOutputCount = *scalar_outputCnt;
2814 args.structureOutput = inband_output;
2815 args.structureOutputSize = *inband_outputCnt;
2816
2817 if (ool_output)
2818 {
2819 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
2820 kIODirectionIn, current_task());
2821 }
2822
2823 args.structureOutputDescriptor = outputMD;
2824 args.structureOutputDescriptorSize = *ool_output_size;
2825
2826 ret = client->externalMethod( selector, &args );
2827
2828 *inband_outputCnt = args.structureOutputSize;
2829 *ool_output_size = args.structureOutputDescriptorSize;
2830
2831 if (inputMD)
2832 inputMD->release();
2833 if (outputMD)
2834 outputMD->release();
2835
2836 return (ret);
2837 }
2838
2839 /* Routine io_connect_method_scalarI_scalarO */
2840 kern_return_t is_io_connect_method_scalarI_scalarO(
2841 io_object_t connect,
2842 uint32_t index,
2843 io_scalar_inband_t input,
2844 mach_msg_type_number_t inputCount,
2845 io_scalar_inband_t output,
2846 mach_msg_type_number_t * outputCount )
2847 {
2848 IOReturn err;
2849 uint32_t i;
2850 io_scalar_inband64_t _input;
2851 io_scalar_inband64_t _output;
2852
2853 mach_msg_type_number_t struct_outputCnt = 0;
2854 mach_vm_size_t ool_output_size = 0;
2855
2856 for (i = 0; i < inputCount; i++)
2857 _input[i] = SCALAR64(input[i]);
2858
2859 err = is_io_connect_method(connect, index,
2860 _input, inputCount,
2861 NULL, 0,
2862 0, 0,
2863 _output, outputCount,
2864 NULL, &struct_outputCnt,
2865 0, &ool_output_size);
2866
2867 for (i = 0; i < *outputCount; i++)
2868 output[i] = SCALAR32(_output[i]);
2869
2870 return (err);
2871 }
2872
2873 kern_return_t shim_io_connect_method_scalarI_scalarO(
2874 IOExternalMethod * method,
2875 IOService * object,
2876 const io_user_scalar_t * input,
2877 mach_msg_type_number_t inputCount,
2878 io_user_scalar_t * output,
2879 mach_msg_type_number_t * outputCount )
2880 {
2881 IOMethod func;
2882 io_scalar_inband_t _output;
2883 IOReturn err;
2884 err = kIOReturnBadArgument;
2885
2886 do {
2887
2888 if( inputCount != method->count0)
2889 {
2890 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
2891 continue;
2892 }
2893 if( *outputCount != method->count1)
2894 {
2895 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
2896 continue;
2897 }
2898
2899 func = method->func;
2900
2901 switch( inputCount) {
2902
2903 case 6:
2904 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2905 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
2906 break;
2907 case 5:
2908 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2909 ARG32(input[3]), ARG32(input[4]),
2910 &_output[0] );
2911 break;
2912 case 4:
2913 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2914 ARG32(input[3]),
2915 &_output[0], &_output[1] );
2916 break;
2917 case 3:
2918 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2919 &_output[0], &_output[1], &_output[2] );
2920 break;
2921 case 2:
2922 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
2923 &_output[0], &_output[1], &_output[2],
2924 &_output[3] );
2925 break;
2926 case 1:
2927 err = (object->*func)( ARG32(input[0]),
2928 &_output[0], &_output[1], &_output[2],
2929 &_output[3], &_output[4] );
2930 break;
2931 case 0:
2932 err = (object->*func)( &_output[0], &_output[1], &_output[2],
2933 &_output[3], &_output[4], &_output[5] );
2934 break;
2935
2936 default:
2937 IOLog("%s: Bad method table\n", object->getName());
2938 }
2939 }
2940 while( false);
2941
2942 uint32_t i;
2943 for (i = 0; i < *outputCount; i++)
2944 output[i] = SCALAR32(_output[i]);
2945
2946 return( err);
2947 }
2948
2949 /* Routine io_async_method_scalarI_scalarO */
2950 kern_return_t is_io_async_method_scalarI_scalarO(
2951 io_object_t connect,
2952 mach_port_t wake_port,
2953 io_async_ref_t reference,
2954 mach_msg_type_number_t referenceCnt,
2955 uint32_t index,
2956 io_scalar_inband_t input,
2957 mach_msg_type_number_t inputCount,
2958 io_scalar_inband_t output,
2959 mach_msg_type_number_t * outputCount )
2960 {
2961 IOReturn err;
2962 uint32_t i;
2963 io_scalar_inband64_t _input;
2964 io_scalar_inband64_t _output;
2965 io_async_ref64_t _reference;
2966
2967 for (i = 0; i < referenceCnt; i++)
2968 _reference[i] = REF64(reference[i]);
2969
2970 mach_msg_type_number_t struct_outputCnt = 0;
2971 mach_vm_size_t ool_output_size = 0;
2972
2973 for (i = 0; i < inputCount; i++)
2974 _input[i] = SCALAR64(input[i]);
2975
2976 err = is_io_connect_async_method(connect,
2977 wake_port, _reference, referenceCnt,
2978 index,
2979 _input, inputCount,
2980 NULL, 0,
2981 0, 0,
2982 _output, outputCount,
2983 NULL, &struct_outputCnt,
2984 0, &ool_output_size);
2985
2986 for (i = 0; i < *outputCount; i++)
2987 output[i] = SCALAR32(_output[i]);
2988
2989 return (err);
2990 }
2991 /* Routine io_async_method_scalarI_structureO */
2992 kern_return_t is_io_async_method_scalarI_structureO(
2993 io_object_t connect,
2994 mach_port_t wake_port,
2995 io_async_ref_t reference,
2996 mach_msg_type_number_t referenceCnt,
2997 uint32_t index,
2998 io_scalar_inband_t input,
2999 mach_msg_type_number_t inputCount,
3000 io_struct_inband_t output,
3001 mach_msg_type_number_t * outputCount )
3002 {
3003 uint32_t i;
3004 io_scalar_inband64_t _input;
3005 io_async_ref64_t _reference;
3006
3007 for (i = 0; i < referenceCnt; i++)
3008 _reference[i] = REF64(reference[i]);
3009
3010 mach_msg_type_number_t scalar_outputCnt = 0;
3011 mach_vm_size_t ool_output_size = 0;
3012
3013 for (i = 0; i < inputCount; i++)
3014 _input[i] = SCALAR64(input[i]);
3015
3016 return (is_io_connect_async_method(connect,
3017 wake_port, _reference, referenceCnt,
3018 index,
3019 _input, inputCount,
3020 NULL, 0,
3021 0, 0,
3022 NULL, &scalar_outputCnt,
3023 output, outputCount,
3024 0, &ool_output_size));
3025 }
3026
3027 /* Routine io_async_method_scalarI_structureI */
3028 kern_return_t is_io_async_method_scalarI_structureI(
3029 io_connect_t connect,
3030 mach_port_t wake_port,
3031 io_async_ref_t reference,
3032 mach_msg_type_number_t referenceCnt,
3033 uint32_t index,
3034 io_scalar_inband_t input,
3035 mach_msg_type_number_t inputCount,
3036 io_struct_inband_t inputStruct,
3037 mach_msg_type_number_t inputStructCount )
3038 {
3039 uint32_t i;
3040 io_scalar_inband64_t _input;
3041 io_async_ref64_t _reference;
3042
3043 for (i = 0; i < referenceCnt; i++)
3044 _reference[i] = REF64(reference[i]);
3045
3046 mach_msg_type_number_t scalar_outputCnt = 0;
3047 mach_msg_type_number_t inband_outputCnt = 0;
3048 mach_vm_size_t ool_output_size = 0;
3049
3050 for (i = 0; i < inputCount; i++)
3051 _input[i] = SCALAR64(input[i]);
3052
3053 return (is_io_connect_async_method(connect,
3054 wake_port, _reference, referenceCnt,
3055 index,
3056 _input, inputCount,
3057 inputStruct, inputStructCount,
3058 0, 0,
3059 NULL, &scalar_outputCnt,
3060 NULL, &inband_outputCnt,
3061 0, &ool_output_size));
3062 }
3063
3064 /* Routine io_async_method_structureI_structureO */
3065 kern_return_t is_io_async_method_structureI_structureO(
3066 io_object_t connect,
3067 mach_port_t wake_port,
3068 io_async_ref_t reference,
3069 mach_msg_type_number_t referenceCnt,
3070 uint32_t index,
3071 io_struct_inband_t input,
3072 mach_msg_type_number_t inputCount,
3073 io_struct_inband_t output,
3074 mach_msg_type_number_t * outputCount )
3075 {
3076 uint32_t i;
3077 mach_msg_type_number_t scalar_outputCnt = 0;
3078 mach_vm_size_t ool_output_size = 0;
3079 io_async_ref64_t _reference;
3080
3081 for (i = 0; i < referenceCnt; i++)
3082 _reference[i] = REF64(reference[i]);
3083
3084 return (is_io_connect_async_method(connect,
3085 wake_port, _reference, referenceCnt,
3086 index,
3087 NULL, 0,
3088 input, inputCount,
3089 0, 0,
3090 NULL, &scalar_outputCnt,
3091 output, outputCount,
3092 0, &ool_output_size));
3093 }
3094
3095
3096 kern_return_t shim_io_async_method_scalarI_scalarO(
3097 IOExternalAsyncMethod * method,
3098 IOService * object,
3099 mach_port_t asyncWakePort,
3100 io_user_reference_t * asyncReference,
3101 uint32_t asyncReferenceCount,
3102 const io_user_scalar_t * input,
3103 mach_msg_type_number_t inputCount,
3104 io_user_scalar_t * output,
3105 mach_msg_type_number_t * outputCount )
3106 {
3107 IOAsyncMethod func;
3108 uint32_t i;
3109 io_scalar_inband_t _output;
3110 IOReturn err;
3111 io_async_ref_t reference;
3112
3113 for (i = 0; i < asyncReferenceCount; i++)
3114 reference[i] = REF32(asyncReference[i]);
3115
3116 err = kIOReturnBadArgument;
3117
3118 do {
3119
3120 if( inputCount != method->count0)
3121 {
3122 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3123 continue;
3124 }
3125 if( *outputCount != method->count1)
3126 {
3127 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3128 continue;
3129 }
3130
3131 func = method->func;
3132
3133 switch( inputCount) {
3134
3135 case 6:
3136 err = (object->*func)( reference,
3137 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3138 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3139 break;
3140 case 5:
3141 err = (object->*func)( reference,
3142 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3143 ARG32(input[3]), ARG32(input[4]),
3144 &_output[0] );
3145 break;
3146 case 4:
3147 err = (object->*func)( reference,
3148 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3149 ARG32(input[3]),
3150 &_output[0], &_output[1] );
3151 break;
3152 case 3:
3153 err = (object->*func)( reference,
3154 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3155 &_output[0], &_output[1], &_output[2] );
3156 break;
3157 case 2:
3158 err = (object->*func)( reference,
3159 ARG32(input[0]), ARG32(input[1]),
3160 &_output[0], &_output[1], &_output[2],
3161 &_output[3] );
3162 break;
3163 case 1:
3164 err = (object->*func)( reference,
3165 ARG32(input[0]),
3166 &_output[0], &_output[1], &_output[2],
3167 &_output[3], &_output[4] );
3168 break;
3169 case 0:
3170 err = (object->*func)( reference,
3171 &_output[0], &_output[1], &_output[2],
3172 &_output[3], &_output[4], &_output[5] );
3173 break;
3174
3175 default:
3176 IOLog("%s: Bad method table\n", object->getName());
3177 }
3178 }
3179 while( false);
3180
3181 for (i = 0; i < *outputCount; i++)
3182 output[i] = SCALAR32(_output[i]);
3183
3184 return( err);
3185 }
3186
3187
3188 /* Routine io_connect_method_scalarI_structureO */
3189 kern_return_t is_io_connect_method_scalarI_structureO(
3190 io_object_t connect,
3191 uint32_t index,
3192 io_scalar_inband_t input,
3193 mach_msg_type_number_t inputCount,
3194 io_struct_inband_t output,
3195 mach_msg_type_number_t * outputCount )
3196 {
3197 uint32_t i;
3198 io_scalar_inband64_t _input;
3199
3200 mach_msg_type_number_t scalar_outputCnt = 0;
3201 mach_vm_size_t ool_output_size = 0;
3202
3203 for (i = 0; i < inputCount; i++)
3204 _input[i] = SCALAR64(input[i]);
3205
3206 return (is_io_connect_method(connect, index,
3207 _input, inputCount,
3208 NULL, 0,
3209 0, 0,
3210 NULL, &scalar_outputCnt,
3211 output, outputCount,
3212 0, &ool_output_size));
3213 }
3214
3215 kern_return_t shim_io_connect_method_scalarI_structureO(
3216
3217 IOExternalMethod * method,
3218 IOService * object,
3219 const io_user_scalar_t * input,
3220 mach_msg_type_number_t inputCount,
3221 io_struct_inband_t output,
3222 IOByteCount * outputCount )
3223 {
3224 IOMethod func;
3225 IOReturn err;
3226
3227 err = kIOReturnBadArgument;
3228
3229 do {
3230 if( inputCount != method->count0)
3231 {
3232 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3233 continue;
3234 }
3235 if( (kIOUCVariableStructureSize != method->count1)
3236 && (*outputCount != method->count1))
3237 {
3238 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3239 continue;
3240 }
3241
3242 func = method->func;
3243
3244 switch( inputCount) {
3245
3246 case 5:
3247 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3248 ARG32(input[3]), ARG32(input[4]),
3249 output );
3250 break;
3251 case 4:
3252 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3253 ARG32(input[3]),
3254 output, (void *)outputCount );
3255 break;
3256 case 3:
3257 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3258 output, (void *)outputCount, 0 );
3259 break;
3260 case 2:
3261 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3262 output, (void *)outputCount, 0, 0 );
3263 break;
3264 case 1:
3265 err = (object->*func)( ARG32(input[0]),
3266 output, (void *)outputCount, 0, 0, 0 );
3267 break;
3268 case 0:
3269 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
3270 break;
3271
3272 default:
3273 IOLog("%s: Bad method table\n", object->getName());
3274 }
3275 }
3276 while( false);
3277
3278 return( err);
3279 }
3280
3281
3282 kern_return_t shim_io_async_method_scalarI_structureO(
3283 IOExternalAsyncMethod * method,
3284 IOService * object,
3285 mach_port_t asyncWakePort,
3286 io_user_reference_t * asyncReference,
3287 uint32_t asyncReferenceCount,
3288 const io_user_scalar_t * input,
3289 mach_msg_type_number_t inputCount,
3290 io_struct_inband_t output,
3291 mach_msg_type_number_t * outputCount )
3292 {
3293 IOAsyncMethod func;
3294 uint32_t i;
3295 IOReturn err;
3296 io_async_ref_t reference;
3297
3298 for (i = 0; i < asyncReferenceCount; i++)
3299 reference[i] = REF32(asyncReference[i]);
3300
3301 err = kIOReturnBadArgument;
3302 do {
3303 if( inputCount != method->count0)
3304 {
3305 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3306 continue;
3307 }
3308 if( (kIOUCVariableStructureSize != method->count1)
3309 && (*outputCount != method->count1))
3310 {
3311 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3312 continue;
3313 }
3314
3315 func = method->func;
3316
3317 switch( inputCount) {
3318
3319 case 5:
3320 err = (object->*func)( reference,
3321 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3322 ARG32(input[3]), ARG32(input[4]),
3323 output );
3324 break;
3325 case 4:
3326 err = (object->*func)( reference,
3327 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3328 ARG32(input[3]),
3329 output, (void *)outputCount );
3330 break;
3331 case 3:
3332 err = (object->*func)( reference,
3333 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3334 output, (void *)outputCount, 0 );
3335 break;
3336 case 2:
3337 err = (object->*func)( reference,
3338 ARG32(input[0]), ARG32(input[1]),
3339 output, (void *)outputCount, 0, 0 );
3340 break;
3341 case 1:
3342 err = (object->*func)( reference,
3343 ARG32(input[0]),
3344 output, (void *)outputCount, 0, 0, 0 );
3345 break;
3346 case 0:
3347 err = (object->*func)( reference,
3348 output, (void *)outputCount, 0, 0, 0, 0 );
3349 break;
3350
3351 default:
3352 IOLog("%s: Bad method table\n", object->getName());
3353 }
3354 }
3355 while( false);
3356
3357 return( err);
3358 }
3359
3360 /* Routine io_connect_method_scalarI_structureI */
3361 kern_return_t is_io_connect_method_scalarI_structureI(
3362 io_connect_t connect,
3363 uint32_t index,
3364 io_scalar_inband_t input,
3365 mach_msg_type_number_t inputCount,
3366 io_struct_inband_t inputStruct,
3367 mach_msg_type_number_t inputStructCount )
3368 {
3369 uint32_t i;
3370 io_scalar_inband64_t _input;
3371
3372 mach_msg_type_number_t scalar_outputCnt = 0;
3373 mach_msg_type_number_t inband_outputCnt = 0;
3374 mach_vm_size_t ool_output_size = 0;
3375
3376 for (i = 0; i < inputCount; i++)
3377 _input[i] = SCALAR64(input[i]);
3378
3379 return (is_io_connect_method(connect, index,
3380 _input, inputCount,
3381 inputStruct, inputStructCount,
3382 0, 0,
3383 NULL, &scalar_outputCnt,
3384 NULL, &inband_outputCnt,
3385 0, &ool_output_size));
3386 }
3387
3388 kern_return_t shim_io_connect_method_scalarI_structureI(
3389 IOExternalMethod * method,
3390 IOService * object,
3391 const io_user_scalar_t * input,
3392 mach_msg_type_number_t inputCount,
3393 io_struct_inband_t inputStruct,
3394 mach_msg_type_number_t inputStructCount )
3395 {
3396 IOMethod func;
3397 IOReturn err = kIOReturnBadArgument;
3398
3399 do
3400 {
3401 if( (kIOUCVariableStructureSize != method->count0)
3402 && (inputCount != method->count0))
3403 {
3404 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3405 continue;
3406 }
3407 if( (kIOUCVariableStructureSize != method->count1)
3408 && (inputStructCount != method->count1))
3409 {
3410 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3411 continue;
3412 }
3413
3414 func = method->func;
3415
3416 switch( inputCount) {
3417
3418 case 5:
3419 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3420 ARG32(input[3]), ARG32(input[4]),
3421 inputStruct );
3422 break;
3423 case 4:
3424 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
3425 ARG32(input[3]),
3426 inputStruct, (void *)inputStructCount );
3427 break;
3428 case 3:
3429 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3430 inputStruct, (void *)inputStructCount,
3431 0 );
3432 break;
3433 case 2:
3434 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3435 inputStruct, (void *)inputStructCount,
3436 0, 0 );
3437 break;
3438 case 1:
3439 err = (object->*func)( ARG32(input[0]),
3440 inputStruct, (void *)inputStructCount,
3441 0, 0, 0 );
3442 break;
3443 case 0:
3444 err = (object->*func)( inputStruct, (void *)inputStructCount,
3445 0, 0, 0, 0 );
3446 break;
3447
3448 default:
3449 IOLog("%s: Bad method table\n", object->getName());
3450 }
3451 }
3452 while (false);
3453
3454 return( err);
3455 }
3456
3457 kern_return_t shim_io_async_method_scalarI_structureI(
3458 IOExternalAsyncMethod * method,
3459 IOService * object,
3460 mach_port_t asyncWakePort,
3461 io_user_reference_t * asyncReference,
3462 uint32_t asyncReferenceCount,
3463 const io_user_scalar_t * input,
3464 mach_msg_type_number_t inputCount,
3465 io_struct_inband_t inputStruct,
3466 mach_msg_type_number_t inputStructCount )
3467 {
3468 IOAsyncMethod func;
3469 uint32_t i;
3470 IOReturn err = kIOReturnBadArgument;
3471 io_async_ref_t reference;
3472
3473 for (i = 0; i < asyncReferenceCount; i++)
3474 reference[i] = REF32(asyncReference[i]);
3475
3476 do
3477 {
3478 if( (kIOUCVariableStructureSize != method->count0)
3479 && (inputCount != method->count0))
3480 {
3481 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3482 continue;
3483 }
3484 if( (kIOUCVariableStructureSize != method->count1)
3485 && (inputStructCount != method->count1))
3486 {
3487 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3488 continue;
3489 }
3490
3491 func = method->func;
3492
3493 switch( inputCount) {
3494
3495 case 5:
3496 err = (object->*func)( reference,
3497 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3498 ARG32(input[3]), ARG32(input[4]),
3499 inputStruct );
3500 break;
3501 case 4:
3502 err = (object->*func)( reference,
3503 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3504 ARG32(input[3]),
3505 inputStruct, (void *)inputStructCount );
3506 break;
3507 case 3:
3508 err = (object->*func)( reference,
3509 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3510 inputStruct, (void *)inputStructCount,
3511 0 );
3512 break;
3513 case 2:
3514 err = (object->*func)( reference,
3515 ARG32(input[0]), ARG32(input[1]),
3516 inputStruct, (void *)inputStructCount,
3517 0, 0 );
3518 break;
3519 case 1:
3520 err = (object->*func)( reference,
3521 ARG32(input[0]),
3522 inputStruct, (void *)inputStructCount,
3523 0, 0, 0 );
3524 break;
3525 case 0:
3526 err = (object->*func)( reference,
3527 inputStruct, (void *)inputStructCount,
3528 0, 0, 0, 0 );
3529 break;
3530
3531 default:
3532 IOLog("%s: Bad method table\n", object->getName());
3533 }
3534 }
3535 while (false);
3536
3537 return( err);
3538 }
3539
3540 /* Routine io_connect_method_structureI_structureO */
3541 kern_return_t is_io_connect_method_structureI_structureO(
3542 io_object_t connect,
3543 uint32_t index,
3544 io_struct_inband_t input,
3545 mach_msg_type_number_t inputCount,
3546 io_struct_inband_t output,
3547 mach_msg_type_number_t * outputCount )
3548 {
3549 mach_msg_type_number_t scalar_outputCnt = 0;
3550 mach_vm_size_t ool_output_size = 0;
3551
3552 return (is_io_connect_method(connect, index,
3553 NULL, 0,
3554 input, inputCount,
3555 0, 0,
3556 NULL, &scalar_outputCnt,
3557 output, outputCount,
3558 0, &ool_output_size));
3559 }
3560
3561 kern_return_t shim_io_connect_method_structureI_structureO(
3562 IOExternalMethod * method,
3563 IOService * object,
3564 io_struct_inband_t input,
3565 mach_msg_type_number_t inputCount,
3566 io_struct_inband_t output,
3567 IOByteCount * outputCount )
3568 {
3569 IOMethod func;
3570 IOReturn err = kIOReturnBadArgument;
3571
3572 do
3573 {
3574 if( (kIOUCVariableStructureSize != method->count0)
3575 && (inputCount != method->count0))
3576 {
3577 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3578 continue;
3579 }
3580 if( (kIOUCVariableStructureSize != method->count1)
3581 && (*outputCount != method->count1))
3582 {
3583 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3584 continue;
3585 }
3586
3587 func = method->func;
3588
3589 if( method->count1) {
3590 if( method->count0) {
3591 err = (object->*func)( input, output,
3592 (void *)inputCount, outputCount, 0, 0 );
3593 } else {
3594 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
3595 }
3596 } else {
3597 err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 );
3598 }
3599 }
3600 while( false);
3601
3602
3603 return( err);
3604 }
3605
3606 kern_return_t shim_io_async_method_structureI_structureO(
3607 IOExternalAsyncMethod * method,
3608 IOService * object,
3609 mach_port_t asyncWakePort,
3610 io_user_reference_t * asyncReference,
3611 uint32_t asyncReferenceCount,
3612 io_struct_inband_t input,
3613 mach_msg_type_number_t inputCount,
3614 io_struct_inband_t output,
3615 mach_msg_type_number_t * outputCount )
3616 {
3617 IOAsyncMethod func;
3618 uint32_t i;
3619 IOReturn err;
3620 io_async_ref_t reference;
3621
3622 for (i = 0; i < asyncReferenceCount; i++)
3623 reference[i] = REF32(asyncReference[i]);
3624
3625 err = kIOReturnBadArgument;
3626 do
3627 {
3628 if( (kIOUCVariableStructureSize != method->count0)
3629 && (inputCount != method->count0))
3630 {
3631 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3632 continue;
3633 }
3634 if( (kIOUCVariableStructureSize != method->count1)
3635 && (*outputCount != method->count1))
3636 {
3637 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3638 continue;
3639 }
3640
3641 func = method->func;
3642
3643 if( method->count1) {
3644 if( method->count0) {
3645 err = (object->*func)( reference,
3646 input, output,
3647 (void *)inputCount, outputCount, 0, 0 );
3648 } else {
3649 err = (object->*func)( reference,
3650 output, outputCount, 0, 0, 0, 0 );
3651 }
3652 } else {
3653 err = (object->*func)( reference,
3654 input, (void *)inputCount, 0, 0, 0, 0 );
3655 }
3656 }
3657 while( false);
3658
3659 return( err);
3660 }
3661
3662 /* Routine io_make_matching */
3663 kern_return_t is_io_make_matching(
3664 mach_port_t master_port,
3665 uint32_t type,
3666 uint32_t options,
3667 io_struct_inband_t input,
3668 mach_msg_type_number_t inputCount,
3669 io_string_t matching )
3670 {
3671 OSSerialize * s;
3672 IOReturn err = kIOReturnSuccess;
3673 OSDictionary * dict;
3674
3675 if( master_port != master_device_port)
3676 return( kIOReturnNotPrivileged);
3677
3678 switch( type) {
3679
3680 case kIOServiceMatching:
3681 dict = IOService::serviceMatching( gIOServiceKey );
3682 break;
3683
3684 case kIOBSDNameMatching:
3685 dict = IOBSDNameMatching( (const char *) input );
3686 break;
3687
3688 case kIOOFPathMatching:
3689 dict = IOOFPathMatching( (const char *) input,
3690 matching, sizeof( io_string_t));
3691 break;
3692
3693 default:
3694 dict = 0;
3695 }
3696
3697 if( !dict)
3698 return( kIOReturnUnsupported);
3699
3700 do {
3701 s = OSSerialize::withCapacity(4096);
3702 if( !s) {
3703 err = kIOReturnNoMemory;
3704 continue;
3705 }
3706 s->clearText();
3707 if( !dict->serialize( s )) {
3708 err = kIOReturnUnsupported;
3709 continue;
3710 }
3711
3712 if( s->getLength() > sizeof( io_string_t)) {
3713 err = kIOReturnNoMemory;
3714 continue;
3715 } else
3716 strlcpy(matching, s->text(), sizeof(io_string_t));
3717 }
3718 while( false);
3719
3720 if( s)
3721 s->release();
3722 if( dict)
3723 dict->release();
3724
3725 return( err);
3726 }
3727
3728 /* Routine io_catalog_send_data */
3729 kern_return_t is_io_catalog_send_data(
3730 mach_port_t master_port,
3731 uint32_t flag,
3732 io_buf_ptr_t inData,
3733 mach_msg_type_number_t inDataCount,
3734 kern_return_t * result)
3735 {
3736 OSObject * obj = 0;
3737 vm_offset_t data;
3738 kern_return_t kr = kIOReturnError;
3739
3740 //printf("io_catalog_send_data called. flag: %d\n", flag);
3741
3742 if( master_port != master_device_port)
3743 return kIOReturnNotPrivileged;
3744
3745 if( (flag != kIOCatalogRemoveKernelLinker &&
3746 flag != kIOCatalogKextdActive &&
3747 flag != kIOCatalogKextdFinishedLaunching) &&
3748 ( !inData || !inDataCount) )
3749 {
3750 return kIOReturnBadArgument;
3751 }
3752
3753 if (inData) {
3754 vm_map_offset_t map_data;
3755
3756 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
3757 data = CAST_DOWN(vm_offset_t, map_data);
3758
3759 if( kr != KERN_SUCCESS)
3760 return kr;
3761
3762 // must return success after vm_map_copyout() succeeds
3763
3764 if( inDataCount ) {
3765 obj = (OSObject *)OSUnserializeXML((const char *)data);
3766 vm_deallocate( kernel_map, data, inDataCount );
3767 if( !obj) {
3768 *result = kIOReturnNoMemory;
3769 return( KERN_SUCCESS);
3770 }
3771 }
3772 }
3773
3774 switch ( flag ) {
3775 case kIOCatalogAddDrivers:
3776 case kIOCatalogAddDriversNoMatch: {
3777 OSArray * array;
3778
3779 array = OSDynamicCast(OSArray, obj);
3780 if ( array ) {
3781 if ( !gIOCatalogue->addDrivers( array ,
3782 flag == kIOCatalogAddDrivers) ) {
3783 kr = kIOReturnError;
3784 }
3785 }
3786 else {
3787 kr = kIOReturnBadArgument;
3788 }
3789 }
3790 break;
3791
3792 case kIOCatalogRemoveDrivers:
3793 case kIOCatalogRemoveDriversNoMatch: {
3794 OSDictionary * dict;
3795
3796 dict = OSDynamicCast(OSDictionary, obj);
3797 if ( dict ) {
3798 if ( !gIOCatalogue->removeDrivers( dict,
3799 flag == kIOCatalogRemoveDrivers ) ) {
3800 kr = kIOReturnError;
3801 }
3802 }
3803 else {
3804 kr = kIOReturnBadArgument;
3805 }
3806 }
3807 break;
3808
3809 case kIOCatalogStartMatching: {
3810 OSDictionary * dict;
3811
3812 dict = OSDynamicCast(OSDictionary, obj);
3813 if ( dict ) {
3814 if ( !gIOCatalogue->startMatching( dict ) ) {
3815 kr = kIOReturnError;
3816 }
3817 }
3818 else {
3819 kr = kIOReturnBadArgument;
3820 }
3821 }
3822 break;
3823
3824 case kIOCatalogRemoveKernelLinker:
3825 kr = KERN_NOT_SUPPORTED;
3826 break;
3827
3828 case kIOCatalogKextdActive:
3829 #if !NO_KEXTD
3830 OSKext::setKextdActive();
3831
3832 /* Dump all nonloaded startup extensions; kextd will now send them
3833 * down on request.
3834 */
3835 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
3836 #endif
3837 kr = kIOReturnSuccess;
3838 break;
3839
3840 case kIOCatalogKextdFinishedLaunching: {
3841 #if !NO_KEXTD
3842 static bool clearedBusy = false;
3843
3844 if (!clearedBusy) {
3845 IOService * serviceRoot = IOService::getServiceRoot();
3846 if (serviceRoot) {
3847 serviceRoot->adjustBusy(-1);
3848 clearedBusy = true;
3849 }
3850 }
3851 #endif
3852 kr = kIOReturnSuccess;
3853 }
3854 break;
3855
3856 default:
3857 kr = kIOReturnBadArgument;
3858 break;
3859 }
3860
3861 if (obj) obj->release();
3862
3863 *result = kr;
3864 return( KERN_SUCCESS);
3865 }
3866
3867 /* Routine io_catalog_terminate */
3868 kern_return_t is_io_catalog_terminate(
3869 mach_port_t master_port,
3870 uint32_t flag,
3871 io_name_t name )
3872 {
3873 kern_return_t kr;
3874
3875 if( master_port != master_device_port )
3876 return kIOReturnNotPrivileged;
3877
3878 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3879 kIOClientPrivilegeAdministrator );
3880 if( kIOReturnSuccess != kr)
3881 return( kr );
3882
3883 switch ( flag ) {
3884 case kIOCatalogServiceTerminate:
3885 OSIterator * iter;
3886 IOService * service;
3887
3888 iter = IORegistryIterator::iterateOver(gIOServicePlane,
3889 kIORegistryIterateRecursively);
3890 if ( !iter )
3891 return kIOReturnNoMemory;
3892
3893 do {
3894 iter->reset();
3895 while( (service = (IOService *)iter->getNextObject()) ) {
3896 if( service->metaCast(name)) {
3897 if ( !service->terminate( kIOServiceRequired
3898 | kIOServiceSynchronous) ) {
3899 kr = kIOReturnUnsupported;
3900 break;
3901 }
3902 }
3903 }
3904 } while( !service && !iter->isValid());
3905 iter->release();
3906 break;
3907
3908 case kIOCatalogModuleUnload:
3909 case kIOCatalogModuleTerminate:
3910 kr = gIOCatalogue->terminateDriversForModule(name,
3911 flag == kIOCatalogModuleUnload);
3912 break;
3913
3914 default:
3915 kr = kIOReturnBadArgument;
3916 break;
3917 }
3918
3919 return( kr );
3920 }
3921
3922 /* Routine io_catalog_get_data */
3923 kern_return_t is_io_catalog_get_data(
3924 mach_port_t master_port,
3925 uint32_t flag,
3926 io_buf_ptr_t *outData,
3927 mach_msg_type_number_t *outDataCount)
3928 {
3929 kern_return_t kr = kIOReturnSuccess;
3930 OSSerialize * s;
3931
3932 if( master_port != master_device_port)
3933 return kIOReturnNotPrivileged;
3934
3935 //printf("io_catalog_get_data called. flag: %d\n", flag);
3936
3937 s = OSSerialize::withCapacity(4096);
3938 if ( !s )
3939 return kIOReturnNoMemory;
3940
3941 s->clearText();
3942
3943 kr = gIOCatalogue->serializeData(flag, s);
3944
3945 if ( kr == kIOReturnSuccess ) {
3946 vm_offset_t data;
3947 vm_map_copy_t copy;
3948 vm_size_t size;
3949
3950 size = s->getLength();
3951 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
3952 if ( kr == kIOReturnSuccess ) {
3953 bcopy(s->text(), (void *)data, size);
3954 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
3955 (vm_map_size_t)size, true, &copy);
3956 *outData = (char *)copy;
3957 *outDataCount = size;
3958 }
3959 }
3960
3961 s->release();
3962
3963 return kr;
3964 }
3965
3966 /* Routine io_catalog_get_gen_count */
3967 kern_return_t is_io_catalog_get_gen_count(
3968 mach_port_t master_port,
3969 uint32_t *genCount)
3970 {
3971 if( master_port != master_device_port)
3972 return kIOReturnNotPrivileged;
3973
3974 //printf("io_catalog_get_gen_count called.\n");
3975
3976 if ( !genCount )
3977 return kIOReturnBadArgument;
3978
3979 *genCount = gIOCatalogue->getGenerationCount();
3980
3981 return kIOReturnSuccess;
3982 }
3983
3984 /* Routine io_catalog_module_loaded.
3985 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
3986 */
3987 kern_return_t is_io_catalog_module_loaded(
3988 mach_port_t master_port,
3989 io_name_t name)
3990 {
3991 if( master_port != master_device_port)
3992 return kIOReturnNotPrivileged;
3993
3994 //printf("io_catalog_module_loaded called. name %s\n", name);
3995
3996 if ( !name )
3997 return kIOReturnBadArgument;
3998
3999 gIOCatalogue->moduleHasLoaded(name);
4000
4001 return kIOReturnSuccess;
4002 }
4003
4004 kern_return_t is_io_catalog_reset(
4005 mach_port_t master_port,
4006 uint32_t flag)
4007 {
4008 if( master_port != master_device_port)
4009 return kIOReturnNotPrivileged;
4010
4011 switch ( flag ) {
4012 case kIOCatalogResetDefault:
4013 gIOCatalogue->reset();
4014 break;
4015
4016 default:
4017 return kIOReturnBadArgument;
4018 }
4019
4020 return kIOReturnSuccess;
4021 }
4022
4023 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
4024 {
4025 kern_return_t result = kIOReturnBadArgument;
4026 IOUserClient *userClient;
4027
4028 if ((userClient = OSDynamicCast(IOUserClient,
4029 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4030 IOExternalTrap *trap;
4031 IOService *target = NULL;
4032
4033 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4034
4035 if (trap && target) {
4036 IOTrap func;
4037
4038 func = trap->func;
4039
4040 if (func) {
4041 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4042 }
4043 }
4044
4045 userClient->release();
4046 }
4047
4048 return result;
4049 }
4050
4051 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4052 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4053 {
4054 IOReturn err;
4055 IOService * object;
4056 IOByteCount structureOutputSize;
4057
4058 if (dispatch)
4059 {
4060 uint32_t count;
4061 count = dispatch->checkScalarInputCount;
4062 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
4063 {
4064 return (kIOReturnBadArgument);
4065 }
4066
4067 count = dispatch->checkStructureInputSize;
4068 if ((kIOUCVariableStructureSize != count)
4069 && (count != ((args->structureInputDescriptor)
4070 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
4071 {
4072 return (kIOReturnBadArgument);
4073 }
4074
4075 count = dispatch->checkScalarOutputCount;
4076 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
4077 {
4078 return (kIOReturnBadArgument);
4079 }
4080
4081 count = dispatch->checkStructureOutputSize;
4082 if ((kIOUCVariableStructureSize != count)
4083 && (count != ((args->structureOutputDescriptor)
4084 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
4085 {
4086 return (kIOReturnBadArgument);
4087 }
4088
4089 if (dispatch->function)
4090 err = (*dispatch->function)(target, reference, args);
4091 else
4092 err = kIOReturnNoCompletion; /* implementator can dispatch */
4093
4094 return (err);
4095 }
4096
4097
4098 // pre-Leopard API's don't do ool structs
4099 if (args->structureInputDescriptor || args->structureOutputDescriptor)
4100 {
4101 err = kIOReturnIPCError;
4102 return (err);
4103 }
4104
4105 structureOutputSize = args->structureOutputSize;
4106
4107 if (args->asyncWakePort)
4108 {
4109 IOExternalAsyncMethod * method;
4110
4111 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) )
4112 return (kIOReturnUnsupported);
4113
4114 if (kIOUCForegroundOnly & method->flags)
4115 {
4116 bool isBg;
4117 kern_return_t kr = IOUCIsBackgroundTask(current_task(), &isBg);
4118
4119 if ((KERN_SUCCESS == kr) && isBg)
4120 return (kIOReturnNotPermitted);
4121 }
4122
4123 switch (method->flags & kIOUCTypeMask)
4124 {
4125 case kIOUCScalarIStructI:
4126 err = shim_io_async_method_scalarI_structureI( method, object,
4127 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4128 args->scalarInput, args->scalarInputCount,
4129 (char *)args->structureInput, args->structureInputSize );
4130 break;
4131
4132 case kIOUCScalarIScalarO:
4133 err = shim_io_async_method_scalarI_scalarO( method, object,
4134 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4135 args->scalarInput, args->scalarInputCount,
4136 args->scalarOutput, &args->scalarOutputCount );
4137 break;
4138
4139 case kIOUCScalarIStructO:
4140 err = shim_io_async_method_scalarI_structureO( method, object,
4141 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4142 args->scalarInput, args->scalarInputCount,
4143 (char *) args->structureOutput, &args->structureOutputSize );
4144 break;
4145
4146
4147 case kIOUCStructIStructO:
4148 err = shim_io_async_method_structureI_structureO( method, object,
4149 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4150 (char *)args->structureInput, args->structureInputSize,
4151 (char *) args->structureOutput, &args->structureOutputSize );
4152 break;
4153
4154 default:
4155 err = kIOReturnBadArgument;
4156 break;
4157 }
4158 }
4159 else
4160 {
4161 IOExternalMethod * method;
4162
4163 if( !(method = getTargetAndMethodForIndex(&object, selector)) )
4164 return (kIOReturnUnsupported);
4165
4166 if (kIOUCForegroundOnly & method->flags)
4167 {
4168 bool isBg;
4169 kern_return_t kr = IOUCIsBackgroundTask(current_task(), &isBg);
4170
4171 if ((KERN_SUCCESS == kr) && isBg)
4172 return (kIOReturnNotPermitted);
4173 }
4174
4175 switch (method->flags & kIOUCTypeMask)
4176 {
4177 case kIOUCScalarIStructI:
4178 err = shim_io_connect_method_scalarI_structureI( method, object,
4179 args->scalarInput, args->scalarInputCount,
4180 (char *) args->structureInput, args->structureInputSize );
4181 break;
4182
4183 case kIOUCScalarIScalarO:
4184 err = shim_io_connect_method_scalarI_scalarO( method, object,
4185 args->scalarInput, args->scalarInputCount,
4186 args->scalarOutput, &args->scalarOutputCount );
4187 break;
4188
4189 case kIOUCScalarIStructO:
4190 err = shim_io_connect_method_scalarI_structureO( method, object,
4191 args->scalarInput, args->scalarInputCount,
4192 (char *) args->structureOutput, &structureOutputSize );
4193 break;
4194
4195
4196 case kIOUCStructIStructO:
4197 err = shim_io_connect_method_structureI_structureO( method, object,
4198 (char *) args->structureInput, args->structureInputSize,
4199 (char *) args->structureOutput, &structureOutputSize );
4200 break;
4201
4202 default:
4203 err = kIOReturnBadArgument;
4204 break;
4205 }
4206 }
4207
4208 args->structureOutputSize = structureOutputSize;
4209
4210 return (err);
4211 }
4212
4213
4214 }; /* extern "C" */
4215
4216 #if __LP64__
4217 OSMetaClassDefineReservedUnused(IOUserClient, 0);
4218 OSMetaClassDefineReservedUnused(IOUserClient, 1);
4219 #else
4220 OSMetaClassDefineReservedUsed(IOUserClient, 0);
4221 OSMetaClassDefineReservedUsed(IOUserClient, 1);
4222 #endif
4223 OSMetaClassDefineReservedUnused(IOUserClient, 2);
4224 OSMetaClassDefineReservedUnused(IOUserClient, 3);
4225 OSMetaClassDefineReservedUnused(IOUserClient, 4);
4226 OSMetaClassDefineReservedUnused(IOUserClient, 5);
4227 OSMetaClassDefineReservedUnused(IOUserClient, 6);
4228 OSMetaClassDefineReservedUnused(IOUserClient, 7);
4229 OSMetaClassDefineReservedUnused(IOUserClient, 8);
4230 OSMetaClassDefineReservedUnused(IOUserClient, 9);
4231 OSMetaClassDefineReservedUnused(IOUserClient, 10);
4232 OSMetaClassDefineReservedUnused(IOUserClient, 11);
4233 OSMetaClassDefineReservedUnused(IOUserClient, 12);
4234 OSMetaClassDefineReservedUnused(IOUserClient, 13);
4235 OSMetaClassDefineReservedUnused(IOUserClient, 14);
4236 OSMetaClassDefineReservedUnused(IOUserClient, 15);
4237