]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-1456.1.26.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <libkern/OSDebug.h>
41 #include <sys/proc.h>
42
43 #include <IOKit/assert.h>
44
45 #include "IOServicePrivate.h"
46 #include "IOKitKernelInternal.h"
47
48 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
49 #define SCALAR32(x) ((uint32_t )x)
50 #define ARG32(x) ((void *)SCALAR32(x))
51 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
52 #define REF32(x) ((int)(x))
53
54 enum
55 {
56 kIOUCAsync0Flags = 3ULL,
57 kIOUCAsync64Flag = 1ULL
58 };
59
60 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
61
62 // definitions we should get from osfmk
63
64 //typedef struct ipc_port * ipc_port_t;
65 typedef natural_t ipc_kobject_type_t;
66
67 #define IKOT_IOKIT_SPARE 27
68 #define IKOT_IOKIT_CONNECT 29
69 #define IKOT_IOKIT_OBJECT 30
70
71 extern "C" {
72
73 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
74 ipc_kobject_type_t type );
75
76 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
77
78 extern mach_port_name_t iokit_make_send_right( task_t task,
79 io_object_t obj, ipc_kobject_type_t type );
80
81 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
82
83 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
84
85 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
86
87 extern ipc_port_t master_device_port;
88
89 extern void iokit_retain_port( ipc_port_t port );
90 extern void iokit_release_port( ipc_port_t port );
91 extern void iokit_release_port_send( ipc_port_t port );
92
93 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
94
95 #include <mach/mach_traps.h>
96 #include <vm/vm_map.h>
97
98 } /* extern "C" */
99
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
104
105 class IOMachPort : public OSObject
106 {
107 OSDeclareDefaultStructors(IOMachPort)
108 public:
109 OSObject * object;
110 ipc_port_t port;
111 UInt32 mscount;
112 UInt8 holdDestroy;
113
114 static IOMachPort * portForObject( OSObject * obj,
115 ipc_kobject_type_t type );
116 static bool noMoreSendersForObject( OSObject * obj,
117 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
118 static void releasePortForObject( OSObject * obj,
119 ipc_kobject_type_t type );
120 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
121
122 static OSDictionary * dictForType( ipc_kobject_type_t type );
123
124 static mach_port_name_t makeSendRightForTask( task_t task,
125 io_object_t obj, ipc_kobject_type_t type );
126
127 virtual void free();
128 };
129
130 #define super OSObject
131 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
132
133 static IOLock * gIOObjectPortLock;
134
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136
137 // not in dictForType() for debugging ease
138 static OSDictionary * gIOObjectPorts;
139 static OSDictionary * gIOConnectPorts;
140
141 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
142 {
143 OSDictionary ** dict;
144
145 if( IKOT_IOKIT_OBJECT == type )
146 dict = &gIOObjectPorts;
147 else if( IKOT_IOKIT_CONNECT == type )
148 dict = &gIOConnectPorts;
149 else
150 return( 0 );
151
152 if( 0 == *dict)
153 *dict = OSDictionary::withCapacity( 1 );
154
155 return( *dict );
156 }
157
158 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
159 ipc_kobject_type_t type )
160 {
161 IOMachPort * inst = 0;
162 OSDictionary * dict;
163
164 IOTakeLock( gIOObjectPortLock);
165
166 do {
167
168 dict = dictForType( type );
169 if( !dict)
170 continue;
171
172 if( (inst = (IOMachPort *)
173 dict->getObject( (const OSSymbol *) obj ))) {
174 inst->mscount++;
175 inst->retain();
176 continue;
177 }
178
179 inst = new IOMachPort;
180 if( inst && !inst->init()) {
181 inst = 0;
182 continue;
183 }
184
185 inst->port = iokit_alloc_object_port( obj, type );
186 if( inst->port) {
187 // retains obj
188 dict->setObject( (const OSSymbol *) obj, inst );
189 inst->mscount++;
190
191 } else {
192 inst->release();
193 inst = 0;
194 }
195
196 } while( false );
197
198 IOUnlock( gIOObjectPortLock);
199
200 return( inst );
201 }
202
203 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
204 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
205 {
206 OSDictionary * dict;
207 IOMachPort * machPort;
208 bool destroyed = true;
209
210 IOTakeLock( gIOObjectPortLock);
211
212 if( (dict = dictForType( type ))) {
213 obj->retain();
214
215 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
216 if( machPort) {
217 destroyed = (machPort->mscount <= *mscount);
218 if( destroyed)
219 dict->removeObject( (const OSSymbol *) obj );
220 else
221 *mscount = machPort->mscount;
222 }
223 obj->release();
224 }
225
226 IOUnlock( gIOObjectPortLock);
227
228 return( destroyed );
229 }
230
231 void IOMachPort::releasePortForObject( OSObject * obj,
232 ipc_kobject_type_t type )
233 {
234 OSDictionary * dict;
235 IOMachPort * machPort;
236
237 IOTakeLock( gIOObjectPortLock);
238
239 if( (dict = dictForType( type ))) {
240 obj->retain();
241 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
242 if( machPort && !machPort->holdDestroy)
243 dict->removeObject( (const OSSymbol *) obj );
244 obj->release();
245 }
246
247 IOUnlock( gIOObjectPortLock);
248 }
249
250 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
251 {
252 OSDictionary * dict;
253 IOMachPort * machPort;
254
255 IOLockLock( gIOObjectPortLock );
256
257 if( (dict = dictForType( type ))) {
258 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
259 if( machPort)
260 machPort->holdDestroy = true;
261 }
262
263 IOLockUnlock( gIOObjectPortLock );
264 }
265
266 void IOUserClient::destroyUserReferences( OSObject * obj )
267 {
268 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
269
270 // panther, 3160200
271 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
272
273 OSDictionary * dict;
274
275 IOTakeLock( gIOObjectPortLock);
276 obj->retain();
277
278 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
279 {
280 IOMachPort * port;
281 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if (port)
283 {
284 IOUserClient * uc;
285 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
286 {
287 dict->setObject((const OSSymbol *) uc->mappings, port);
288 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
289
290 uc->mappings->release();
291 uc->mappings = 0;
292 }
293 dict->removeObject( (const OSSymbol *) obj );
294 }
295 }
296 obj->release();
297 IOUnlock( gIOObjectPortLock);
298 }
299
300 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
301 io_object_t obj, ipc_kobject_type_t type )
302 {
303 return( iokit_make_send_right( task, obj, type ));
304 }
305
306 void IOMachPort::free( void )
307 {
308 if( port)
309 iokit_destroy_object_port( port );
310 super::free();
311 }
312
313 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
314
315 class IOUserNotification : public OSIterator
316 {
317 OSDeclareDefaultStructors(IOUserNotification)
318
319 IONotifier * holdNotify;
320 IOLock * lock;
321
322 public:
323
324 virtual bool init( void );
325 virtual void free();
326
327 virtual void setNotification( IONotifier * obj );
328
329 virtual void reset();
330 virtual bool isValid();
331 };
332
333 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
334
335 extern "C" {
336
337 // functions called from osfmk/device/iokit_rpc.c
338
339 void
340 iokit_add_reference( io_object_t obj )
341 {
342 if( obj)
343 obj->retain();
344 }
345
346 void
347 iokit_remove_reference( io_object_t obj )
348 {
349 if( obj)
350 obj->release();
351 }
352
353 ipc_port_t
354 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
355 {
356 IOMachPort * machPort;
357 ipc_port_t port;
358
359 if( (machPort = IOMachPort::portForObject( obj, type ))) {
360
361 port = machPort->port;
362 if( port)
363 iokit_retain_port( port );
364
365 machPort->release();
366
367 } else
368 port = NULL;
369
370 return( port );
371 }
372
373 kern_return_t
374 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
375 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
376 {
377 IOUserClient * client;
378 IOMemoryMap * map;
379 IOUserNotification * notify;
380
381 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
382 return( kIOReturnNotReady );
383
384 if( IKOT_IOKIT_CONNECT == type)
385 {
386 if( (client = OSDynamicCast( IOUserClient, obj )))
387 client->clientDied();
388 }
389 else if( IKOT_IOKIT_OBJECT == type)
390 {
391 if( (map = OSDynamicCast( IOMemoryMap, obj )))
392 map->taskDied();
393 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
394 notify->setNotification( 0 );
395 }
396
397 return( kIOReturnSuccess );
398 }
399
400 }; /* extern "C" */
401
402 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
403
404 class IOServiceUserNotification : public IOUserNotification
405 {
406 OSDeclareDefaultStructors(IOServiceUserNotification)
407
408 struct PingMsg {
409 mach_msg_header_t msgHdr;
410 OSNotificationHeader64 notifyHeader;
411 };
412
413 enum { kMaxOutstanding = 1024 };
414
415 PingMsg * pingMsg;
416 vm_size_t msgSize;
417 OSArray * newSet;
418 OSObject * lastEntry;
419 bool armed;
420
421 public:
422
423 virtual bool init( mach_port_t port, natural_t type,
424 void * reference, vm_size_t referenceSize,
425 bool clientIs64 );
426 virtual void free();
427
428 static bool _handler( void * target,
429 void * ref, IOService * newService, IONotifier * notifier );
430 virtual bool handler( void * ref, IOService * newService );
431
432 virtual OSObject * getNextObject();
433 };
434
435 class IOServiceMessageUserNotification : public IOUserNotification
436 {
437 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
438
439 struct PingMsg {
440 mach_msg_header_t msgHdr;
441 mach_msg_body_t msgBody;
442 mach_msg_port_descriptor_t ports[1];
443 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
444 };
445
446 PingMsg * pingMsg;
447 vm_size_t msgSize;
448 uint8_t clientIs64;
449 int owningPID;
450
451 public:
452
453 virtual bool init( mach_port_t port, natural_t type,
454 void * reference, vm_size_t referenceSize,
455 vm_size_t extraSize,
456 bool clientIs64 );
457
458 virtual void free();
459
460 static IOReturn _handler( void * target, void * ref,
461 UInt32 messageType, IOService * provider,
462 void * messageArgument, vm_size_t argSize );
463 virtual IOReturn handler( void * ref,
464 UInt32 messageType, IOService * provider,
465 void * messageArgument, vm_size_t argSize );
466
467 virtual OSObject * getNextObject();
468 };
469
470 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
471
472 #undef super
473 #define super OSIterator
474 OSDefineMetaClass( IOUserNotification, OSIterator )
475 OSDefineAbstractStructors( IOUserNotification, OSIterator )
476
477 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
478
479 bool IOUserNotification::init( void )
480 {
481 if( !super::init())
482 return( false );
483
484 lock = IOLockAlloc();
485 if( !lock)
486 return( false );
487
488 return( true );
489 }
490
491 void IOUserNotification::free( void )
492 {
493 if( holdNotify)
494 holdNotify->remove();
495 // can't be in handler now
496
497 if( lock)
498 IOLockFree( lock );
499
500 super::free();
501 }
502
503
504 void IOUserNotification::setNotification( IONotifier * notify )
505 {
506 IONotifier * previousNotify;
507
508 IOLockLock( gIOObjectPortLock);
509
510 previousNotify = holdNotify;
511 holdNotify = notify;
512
513 IOLockUnlock( gIOObjectPortLock);
514
515 if( previousNotify)
516 previousNotify->remove();
517 }
518
519 void IOUserNotification::reset()
520 {
521 // ?
522 }
523
524 bool IOUserNotification::isValid()
525 {
526 return( true );
527 }
528
529 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
530
531 #undef super
532 #define super IOUserNotification
533 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
534
535 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
536
537 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
538 void * reference, vm_size_t referenceSize,
539 bool clientIs64 )
540 {
541 newSet = OSArray::withCapacity( 1 );
542 if( !newSet)
543 return( false );
544
545 if (referenceSize > sizeof(OSAsyncReference64))
546 return( false );
547
548 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
549 pingMsg = (PingMsg *) IOMalloc( msgSize);
550 if( !pingMsg)
551 return( false );
552
553 bzero( pingMsg, msgSize);
554
555 pingMsg->msgHdr.msgh_remote_port = port;
556 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
557 MACH_MSG_TYPE_COPY_SEND /*remote*/,
558 MACH_MSG_TYPE_MAKE_SEND /*local*/);
559 pingMsg->msgHdr.msgh_size = msgSize;
560 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
561
562 pingMsg->notifyHeader.size = 0;
563 pingMsg->notifyHeader.type = type;
564 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
565
566 return( super::init() );
567 }
568
569 void IOServiceUserNotification::free( void )
570 {
571 PingMsg * _pingMsg;
572 vm_size_t _msgSize;
573 OSArray * _newSet;
574 OSObject * _lastEntry;
575
576 _pingMsg = pingMsg;
577 _msgSize = msgSize;
578 _lastEntry = lastEntry;
579 _newSet = newSet;
580
581 super::free();
582
583 if( _pingMsg && _msgSize)
584 IOFree( _pingMsg, _msgSize);
585
586 if( _lastEntry)
587 _lastEntry->release();
588
589 if( _newSet)
590 _newSet->release();
591 }
592
593 bool IOServiceUserNotification::_handler( void * target,
594 void * ref, IOService * newService, IONotifier * notifier )
595 {
596 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
597 }
598
599 bool IOServiceUserNotification::handler( void * ref,
600 IOService * newService )
601 {
602 unsigned int count;
603 kern_return_t kr;
604 ipc_port_t port = NULL;
605 bool sendPing = false;
606
607 IOTakeLock( lock );
608
609 count = newSet->getCount();
610 if( count < kMaxOutstanding) {
611
612 newSet->setObject( newService );
613 if( (sendPing = (armed && (0 == count))))
614 armed = false;
615 }
616
617 IOUnlock( lock );
618
619 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
620 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
621
622 if( sendPing) {
623 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
624 pingMsg->msgHdr.msgh_local_port = port;
625 else
626 pingMsg->msgHdr.msgh_local_port = NULL;
627
628 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr,
629 pingMsg->msgHdr.msgh_size);
630 if( port)
631 iokit_release_port( port );
632
633 if( KERN_SUCCESS != kr)
634 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
635 }
636
637 return( true );
638 }
639
640 OSObject * IOServiceUserNotification::getNextObject()
641 {
642 unsigned int count;
643 OSObject * result;
644
645 IOTakeLock( lock );
646
647 if( lastEntry)
648 lastEntry->release();
649
650 count = newSet->getCount();
651 if( count ) {
652 result = newSet->getObject( count - 1 );
653 result->retain();
654 newSet->removeObject( count - 1);
655 } else {
656 result = 0;
657 armed = true;
658 }
659 lastEntry = result;
660
661 IOUnlock( lock );
662
663 return( result );
664 }
665
666 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
667
668 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
669
670 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
671
672 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
673 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
674 bool client64 )
675 {
676
677 if (referenceSize > sizeof(OSAsyncReference64))
678 return( false );
679
680 clientIs64 = client64;
681
682 owningPID = proc_selfpid();
683
684 extraSize += sizeof(IOServiceInterestContent64);
685 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
686 pingMsg = (PingMsg *) IOMalloc( msgSize);
687 if( !pingMsg)
688 return( false );
689
690 bzero( pingMsg, msgSize);
691
692 pingMsg->msgHdr.msgh_remote_port = port;
693 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
694 | MACH_MSGH_BITS(
695 MACH_MSG_TYPE_COPY_SEND /*remote*/,
696 MACH_MSG_TYPE_MAKE_SEND /*local*/);
697 pingMsg->msgHdr.msgh_size = msgSize;
698 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
699
700 pingMsg->msgBody.msgh_descriptor_count = 1;
701
702 pingMsg->ports[0].name = 0;
703 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
704 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
705
706 pingMsg->notifyHeader.size = extraSize;
707 pingMsg->notifyHeader.type = type;
708 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
709
710 return( super::init() );
711 }
712
713 void IOServiceMessageUserNotification::free( void )
714 {
715 PingMsg * _pingMsg;
716 vm_size_t _msgSize;
717
718 _pingMsg = pingMsg;
719 _msgSize = msgSize;
720
721 super::free();
722
723 if( _pingMsg && _msgSize)
724 IOFree( _pingMsg, _msgSize);
725 }
726
727 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
728 UInt32 messageType, IOService * provider,
729 void * argument, vm_size_t argSize )
730 {
731 return( ((IOServiceMessageUserNotification *) target)->handler(
732 ref, messageType, provider, argument, argSize));
733 }
734
735 IOReturn IOServiceMessageUserNotification::handler( void * ref,
736 UInt32 messageType, IOService * provider,
737 void * messageArgument, vm_size_t argSize )
738 {
739 kern_return_t kr;
740 ipc_port_t thisPort, providerPort;
741 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
742 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
743 // == pingMsg->notifyHeader.content;
744
745 if (kIOMessageCopyClientID == messageType)
746 {
747 *((void **) messageArgument) = IOCopyLogNameForPID(owningPID);
748 return (kIOReturnSuccess);
749 }
750
751 data->messageType = messageType;
752
753 if( argSize == 0)
754 {
755 data->messageArgument[0] = (io_user_reference_t) messageArgument;
756 if (clientIs64)
757 argSize = sizeof(data->messageArgument[0]);
758 else
759 {
760 data->messageArgument[0] |= (data->messageArgument[0] << 32);
761 argSize = sizeof(uint32_t);
762 }
763 }
764 else
765 {
766 if( argSize > kIOUserNotifyMaxMessageSize)
767 argSize = kIOUserNotifyMaxMessageSize;
768 bcopy( messageArgument, data->messageArgument, argSize );
769 }
770 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
771 + sizeof( IOServiceInterestContent64 )
772 - sizeof( data->messageArgument)
773 + argSize;
774
775 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
776 pingMsg->ports[0].name = providerPort;
777 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
778 pingMsg->msgHdr.msgh_local_port = thisPort;
779 kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr,
780 pingMsg->msgHdr.msgh_size);
781 if( thisPort)
782 iokit_release_port( thisPort );
783 if( providerPort)
784 iokit_release_port( providerPort );
785
786 if( KERN_SUCCESS != kr)
787 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
788
789 return( kIOReturnSuccess );
790 }
791
792 OSObject * IOServiceMessageUserNotification::getNextObject()
793 {
794 return( 0 );
795 }
796
797 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
798
799 #undef super
800 #define super IOService
801 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
802
803 void IOUserClient::initialize( void )
804 {
805 gIOObjectPortLock = IOLockAlloc();
806
807 assert( gIOObjectPortLock );
808 }
809
810 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
811 mach_port_t wakePort,
812 void *callback, void *refcon)
813 {
814 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
815 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
816 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
817 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
818 }
819
820 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
821 mach_port_t wakePort,
822 mach_vm_address_t callback, io_user_reference_t refcon)
823 {
824 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
825 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
826 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
827 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
828 }
829
830 static OSDictionary * CopyConsoleUser(UInt32 uid)
831 {
832 OSArray * array;
833 OSDictionary * user = 0;
834
835 if ((array = OSDynamicCast(OSArray,
836 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
837 {
838 for (unsigned int idx = 0;
839 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
840 idx++) {
841 OSNumber * num;
842
843 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
844 && (uid == num->unsigned32BitValue())) {
845 user->retain();
846 break;
847 }
848 }
849 array->release();
850 }
851 return user;
852 }
853
854 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
855 const char * privilegeName )
856 {
857 kern_return_t kr;
858 security_token_t token;
859 mach_msg_type_number_t count;
860 task_t task;
861 OSDictionary * user;
862 bool secureConsole;
863
864 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
865 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
866 task = (task_t)((IOUCProcessToken *)securityToken)->token;
867 else
868 task = (task_t)securityToken;
869
870 count = TASK_SECURITY_TOKEN_COUNT;
871 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
872
873 if (KERN_SUCCESS != kr)
874 {}
875 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
876 sizeof(kIOClientPrivilegeAdministrator))) {
877 if (0 != token.val[0])
878 kr = kIOReturnNotPrivileged;
879 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
880 sizeof(kIOClientPrivilegeLocalUser))) {
881 user = CopyConsoleUser(token.val[0]);
882 if ( user )
883 user->release();
884 else
885 kr = kIOReturnNotPrivileged;
886 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
887 sizeof(kIOClientPrivilegeConsoleUser))) {
888 user = CopyConsoleUser(token.val[0]);
889 if ( user ) {
890 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
891 kr = kIOReturnNotPrivileged;
892 else if ( secureConsole ) {
893 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
894 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
895 kr = kIOReturnNotPrivileged;
896 }
897 user->release();
898 }
899 else
900 kr = kIOReturnNotPrivileged;
901 } else
902 kr = kIOReturnUnsupported;
903
904 return (kr);
905 }
906
907 bool IOUserClient::init()
908 {
909 if( getPropertyTable())
910 return true;
911 else
912 return super::init();
913 }
914
915 bool IOUserClient::init(OSDictionary * dictionary)
916 {
917 if( getPropertyTable())
918 return true;
919 else
920 return super::init(dictionary);
921 }
922
923 bool IOUserClient::initWithTask(task_t owningTask,
924 void * securityID,
925 UInt32 type )
926 {
927 if( getPropertyTable())
928 return true;
929 else
930 return super::init();
931 }
932
933 bool IOUserClient::initWithTask(task_t owningTask,
934 void * securityID,
935 UInt32 type,
936 OSDictionary * properties )
937 {
938 bool ok;
939
940 ok = super::init( properties );
941 ok &= initWithTask( owningTask, securityID, type );
942
943 return( ok );
944 }
945
946 void IOUserClient::free()
947 {
948 if( mappings)
949 mappings->release();
950
951 super::free();
952 }
953
954 IOReturn IOUserClient::clientDied( void )
955 {
956 return( clientClose());
957 }
958
959 IOReturn IOUserClient::clientClose( void )
960 {
961 return( kIOReturnUnsupported );
962 }
963
964 IOService * IOUserClient::getService( void )
965 {
966 return( 0 );
967 }
968
969 IOReturn IOUserClient::registerNotificationPort(
970 mach_port_t /* port */,
971 UInt32 /* type */,
972 UInt32 /* refCon */)
973 {
974 return( kIOReturnUnsupported);
975 }
976
977 IOReturn IOUserClient::registerNotificationPort(
978 mach_port_t port,
979 UInt32 type,
980 io_user_reference_t refCon)
981 {
982 return (registerNotificationPort(port, type, (UInt32) refCon));
983 }
984
985 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
986 semaphore_t * semaphore )
987 {
988 return( kIOReturnUnsupported);
989 }
990
991 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
992 {
993 return( kIOReturnUnsupported);
994 }
995
996 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
997 IOOptionBits * options,
998 IOMemoryDescriptor ** memory )
999 {
1000 return( kIOReturnUnsupported);
1001 }
1002
1003 #if !__LP64__
1004 IOMemoryMap * IOUserClient::mapClientMemory(
1005 IOOptionBits type,
1006 task_t task,
1007 IOOptionBits mapFlags,
1008 IOVirtualAddress atAddress )
1009 {
1010 return (NULL);
1011 }
1012 #endif
1013
1014 IOMemoryMap * IOUserClient::mapClientMemory64(
1015 IOOptionBits type,
1016 task_t task,
1017 IOOptionBits mapFlags,
1018 mach_vm_address_t atAddress )
1019 {
1020 IOReturn err;
1021 IOOptionBits options = 0;
1022 IOMemoryDescriptor * memory;
1023 IOMemoryMap * map = 0;
1024
1025 err = clientMemoryForType( (UInt32) type, &options, &memory );
1026
1027 if( memory && (kIOReturnSuccess == err)) {
1028
1029 options = (options & ~kIOMapUserOptionsMask)
1030 | (mapFlags & kIOMapUserOptionsMask);
1031 map = memory->createMappingInTask( task, atAddress, options );
1032 memory->release();
1033 }
1034
1035 return( map );
1036 }
1037
1038 IOReturn IOUserClient::exportObjectToClient(task_t task,
1039 OSObject *obj, io_object_t *clientObj)
1040 {
1041 mach_port_name_t name;
1042
1043 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1044 assert( name );
1045
1046 *(mach_port_name_t *)clientObj = name;
1047 return kIOReturnSuccess;
1048 }
1049
1050 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1051 {
1052 return( 0 );
1053 }
1054
1055 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1056 {
1057 return( 0 );
1058 }
1059
1060 IOExternalMethod * IOUserClient::
1061 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1062 {
1063 IOExternalMethod *method = getExternalMethodForIndex(index);
1064
1065 if (method)
1066 *targetP = (IOService *) method->object;
1067
1068 return method;
1069 }
1070
1071 IOExternalAsyncMethod * IOUserClient::
1072 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1073 {
1074 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1075
1076 if (method)
1077 *targetP = (IOService *) method->object;
1078
1079 return method;
1080 }
1081
1082 IOExternalTrap * IOUserClient::
1083 getExternalTrapForIndex(UInt32 index)
1084 {
1085 return NULL;
1086 }
1087
1088 IOExternalTrap * IOUserClient::
1089 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1090 {
1091 IOExternalTrap *trap = getExternalTrapForIndex(index);
1092
1093 if (trap) {
1094 *targetP = trap->object;
1095 }
1096
1097 return trap;
1098 }
1099
1100 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1101 {
1102 mach_port_t port;
1103 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1104
1105 if (MACH_PORT_NULL != port)
1106 iokit_release_port_send(port);
1107
1108 return (kIOReturnSuccess);
1109 }
1110
1111 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1112 {
1113 if (MACH_PORT_NULL != port)
1114 iokit_release_port_send(port);
1115
1116 return (kIOReturnSuccess);
1117 }
1118
1119 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1120 IOReturn result, void *args[], UInt32 numArgs)
1121 {
1122 OSAsyncReference64 reference64;
1123 io_user_reference_t args64[kMaxAsyncArgs];
1124 unsigned int idx;
1125
1126 if (numArgs > kMaxAsyncArgs)
1127 return kIOReturnMessageTooLarge;
1128
1129 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1130 reference64[idx] = REF64(reference[idx]);
1131
1132 for (idx = 0; idx < numArgs; idx++)
1133 args64[idx] = REF64(args[idx]);
1134
1135 return (sendAsyncResult64(reference64, result, args64, numArgs));
1136 }
1137
1138 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1139 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1140 {
1141 struct ReplyMsg
1142 {
1143 mach_msg_header_t msgHdr;
1144 union
1145 {
1146 struct
1147 {
1148 OSNotificationHeader notifyHdr;
1149 IOAsyncCompletionContent asyncContent;
1150 uint32_t args[kMaxAsyncArgs];
1151 } msg32;
1152 struct
1153 {
1154 OSNotificationHeader64 notifyHdr;
1155 IOAsyncCompletionContent asyncContent;
1156 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1157 } msg64;
1158 } m;
1159 };
1160 ReplyMsg replyMsg;
1161 mach_port_t replyPort;
1162 kern_return_t kr;
1163
1164 // If no reply port, do nothing.
1165 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1166 if (replyPort == MACH_PORT_NULL)
1167 return kIOReturnSuccess;
1168
1169 if (numArgs > kMaxAsyncArgs)
1170 return kIOReturnMessageTooLarge;
1171
1172 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1173 0 /*local*/);
1174 replyMsg.msgHdr.msgh_remote_port = replyPort;
1175 replyMsg.msgHdr.msgh_local_port = 0;
1176 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1177 if (kIOUCAsync64Flag & reference[0])
1178 {
1179 replyMsg.msgHdr.msgh_size =
1180 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1181 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1182 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1183 + numArgs * sizeof(io_user_reference_t);
1184 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1185 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1186
1187 replyMsg.m.msg64.asyncContent.result = result;
1188 if (numArgs)
1189 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1190 }
1191 else
1192 {
1193 unsigned int idx;
1194
1195 replyMsg.msgHdr.msgh_size =
1196 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1197 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1198
1199 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1200 + numArgs * sizeof(uint32_t);
1201 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1202
1203 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1204 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1205
1206 replyMsg.m.msg32.asyncContent.result = result;
1207
1208 for (idx = 0; idx < numArgs; idx++)
1209 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1210 }
1211
1212 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1213 replyMsg.msgHdr.msgh_size);
1214 if( KERN_SUCCESS != kr)
1215 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1216 return kr;
1217 }
1218
1219
1220 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1221
1222 extern "C" {
1223
1224 #define CHECK(cls,obj,out) \
1225 cls * out; \
1226 if( !(out = OSDynamicCast( cls, obj))) \
1227 return( kIOReturnBadArgument )
1228
1229 /* Routine io_object_get_class */
1230 kern_return_t is_io_object_get_class(
1231 io_object_t object,
1232 io_name_t className )
1233 {
1234 const OSMetaClass* my_obj = NULL;
1235
1236 if( !object)
1237 return( kIOReturnBadArgument );
1238
1239 my_obj = object->getMetaClass();
1240 if (!my_obj) {
1241 return (kIOReturnNotFound);
1242 }
1243
1244 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1245 return( kIOReturnSuccess );
1246 }
1247
1248 /* Routine io_object_get_superclass */
1249 kern_return_t is_io_object_get_superclass(
1250 mach_port_t master_port,
1251 io_name_t obj_name,
1252 io_name_t class_name)
1253 {
1254 const OSMetaClass* my_obj = NULL;
1255 const OSMetaClass* superclass = NULL;
1256 const OSSymbol *my_name = NULL;
1257 const char *my_cstr = NULL;
1258
1259 if (!obj_name || !class_name)
1260 return (kIOReturnBadArgument);
1261
1262 if( master_port != master_device_port)
1263 return( kIOReturnNotPrivileged);
1264
1265 my_name = OSSymbol::withCString(obj_name);
1266
1267 if (my_name) {
1268 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1269 my_name->release();
1270 }
1271 if (my_obj) {
1272 superclass = my_obj->getSuperClass();
1273 }
1274
1275 if (!superclass) {
1276 return( kIOReturnNotFound );
1277 }
1278
1279 my_cstr = superclass->getClassName();
1280
1281 if (my_cstr) {
1282 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1283 return( kIOReturnSuccess );
1284 }
1285 return (kIOReturnNotFound);
1286 }
1287
1288 /* Routine io_object_get_bundle_identifier */
1289 kern_return_t is_io_object_get_bundle_identifier(
1290 mach_port_t master_port,
1291 io_name_t obj_name,
1292 io_name_t bundle_name)
1293 {
1294 const OSMetaClass* my_obj = NULL;
1295 const OSSymbol *my_name = NULL;
1296 const OSSymbol *identifier = NULL;
1297 const char *my_cstr = NULL;
1298
1299 if (!obj_name || !bundle_name)
1300 return (kIOReturnBadArgument);
1301
1302 if( master_port != master_device_port)
1303 return( kIOReturnNotPrivileged);
1304
1305 my_name = OSSymbol::withCString(obj_name);
1306
1307 if (my_name) {
1308 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1309 my_name->release();
1310 }
1311
1312 if (my_obj) {
1313 identifier = my_obj->getKmodName();
1314 }
1315 if (!identifier) {
1316 return( kIOReturnNotFound );
1317 }
1318
1319 my_cstr = identifier->getCStringNoCopy();
1320 if (my_cstr) {
1321 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1322 return( kIOReturnSuccess );
1323 }
1324
1325 return (kIOReturnBadArgument);
1326 }
1327
1328 /* Routine io_object_conforms_to */
1329 kern_return_t is_io_object_conforms_to(
1330 io_object_t object,
1331 io_name_t className,
1332 boolean_t *conforms )
1333 {
1334 if( !object)
1335 return( kIOReturnBadArgument );
1336
1337 *conforms = (0 != object->metaCast( className ));
1338 return( kIOReturnSuccess );
1339 }
1340
1341 /* Routine io_object_get_retain_count */
1342 kern_return_t is_io_object_get_retain_count(
1343 io_object_t object,
1344 uint32_t *retainCount )
1345 {
1346 if( !object)
1347 return( kIOReturnBadArgument );
1348
1349 *retainCount = object->getRetainCount();
1350 return( kIOReturnSuccess );
1351 }
1352
1353 /* Routine io_iterator_next */
1354 kern_return_t is_io_iterator_next(
1355 io_object_t iterator,
1356 io_object_t *object )
1357 {
1358 OSObject * obj;
1359
1360 CHECK( OSIterator, iterator, iter );
1361
1362 obj = iter->getNextObject();
1363 if( obj) {
1364 obj->retain();
1365 *object = obj;
1366 return( kIOReturnSuccess );
1367 } else
1368 return( kIOReturnNoDevice );
1369 }
1370
1371 /* Routine io_iterator_reset */
1372 kern_return_t is_io_iterator_reset(
1373 io_object_t iterator )
1374 {
1375 CHECK( OSIterator, iterator, iter );
1376
1377 iter->reset();
1378
1379 return( kIOReturnSuccess );
1380 }
1381
1382 /* Routine io_iterator_is_valid */
1383 kern_return_t is_io_iterator_is_valid(
1384 io_object_t iterator,
1385 boolean_t *is_valid )
1386 {
1387 CHECK( OSIterator, iterator, iter );
1388
1389 *is_valid = iter->isValid();
1390
1391 return( kIOReturnSuccess );
1392 }
1393
1394 /* Routine io_service_match_property_table */
1395 kern_return_t is_io_service_match_property_table(
1396 io_service_t _service,
1397 io_string_t matching,
1398 boolean_t *matches )
1399 {
1400 CHECK( IOService, _service, service );
1401
1402 kern_return_t kr;
1403 OSObject * obj;
1404 OSDictionary * dict;
1405
1406 obj = OSUnserializeXML( matching );
1407
1408 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1409 *matches = service->passiveMatch( dict );
1410 kr = kIOReturnSuccess;
1411 } else
1412 kr = kIOReturnBadArgument;
1413
1414 if( obj)
1415 obj->release();
1416
1417 return( kr );
1418 }
1419
1420 /* Routine io_service_match_property_table_ool */
1421 kern_return_t is_io_service_match_property_table_ool(
1422 io_object_t service,
1423 io_buf_ptr_t matching,
1424 mach_msg_type_number_t matchingCnt,
1425 kern_return_t *result,
1426 boolean_t *matches )
1427 {
1428 kern_return_t kr;
1429 vm_offset_t data;
1430 vm_map_offset_t map_data;
1431
1432 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1433 data = CAST_DOWN(vm_offset_t, map_data);
1434
1435 if( KERN_SUCCESS == kr) {
1436 // must return success after vm_map_copyout() succeeds
1437 *result = is_io_service_match_property_table( service,
1438 (char *) data, matches );
1439 vm_deallocate( kernel_map, data, matchingCnt );
1440 }
1441
1442 return( kr );
1443 }
1444
1445 /* Routine io_service_get_matching_services */
1446 kern_return_t is_io_service_get_matching_services(
1447 mach_port_t master_port,
1448 io_string_t matching,
1449 io_iterator_t *existing )
1450 {
1451 kern_return_t kr;
1452 OSObject * obj;
1453 OSDictionary * dict;
1454
1455 if( master_port != master_device_port)
1456 return( kIOReturnNotPrivileged);
1457
1458 obj = OSUnserializeXML( matching );
1459
1460 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1461 *existing = IOService::getMatchingServices( dict );
1462 kr = kIOReturnSuccess;
1463 } else
1464 kr = kIOReturnBadArgument;
1465
1466 if( obj)
1467 obj->release();
1468
1469 return( kr );
1470 }
1471
1472 /* Routine io_service_get_matching_services_ool */
1473 kern_return_t is_io_service_get_matching_services_ool(
1474 mach_port_t master_port,
1475 io_buf_ptr_t matching,
1476 mach_msg_type_number_t matchingCnt,
1477 kern_return_t *result,
1478 io_object_t *existing )
1479 {
1480 kern_return_t kr;
1481 vm_offset_t data;
1482 vm_map_offset_t map_data;
1483
1484 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1485 data = CAST_DOWN(vm_offset_t, map_data);
1486
1487 if( KERN_SUCCESS == kr) {
1488 // must return success after vm_map_copyout() succeeds
1489 *result = is_io_service_get_matching_services( master_port,
1490 (char *) data, existing );
1491 vm_deallocate( kernel_map, data, matchingCnt );
1492 }
1493
1494 return( kr );
1495 }
1496
1497 static kern_return_t internal_io_service_add_notification(
1498 mach_port_t master_port,
1499 io_name_t notification_type,
1500 io_string_t matching,
1501 mach_port_t port,
1502 void * reference,
1503 vm_size_t referenceSize,
1504 bool client64,
1505 io_object_t * notification )
1506 {
1507 IOServiceUserNotification * userNotify = 0;
1508 IONotifier * notify = 0;
1509 const OSSymbol * sym;
1510 OSDictionary * dict;
1511 IOReturn err;
1512 unsigned long int userMsgType;
1513
1514
1515 if( master_port != master_device_port)
1516 return( kIOReturnNotPrivileged);
1517
1518 do {
1519 err = kIOReturnNoResources;
1520
1521 if( !(sym = OSSymbol::withCString( notification_type )))
1522 err = kIOReturnNoResources;
1523
1524 if( !(dict = OSDynamicCast( OSDictionary,
1525 OSUnserializeXML( matching )))) {
1526 err = kIOReturnBadArgument;
1527 continue;
1528 }
1529
1530 if( (sym == gIOPublishNotification)
1531 || (sym == gIOFirstPublishNotification))
1532 userMsgType = kIOServicePublishNotificationType;
1533 else if( (sym == gIOMatchedNotification)
1534 || (sym == gIOFirstMatchNotification))
1535 userMsgType = kIOServiceMatchedNotificationType;
1536 else if( sym == gIOTerminatedNotification)
1537 userMsgType = kIOServiceTerminatedNotificationType;
1538 else
1539 userMsgType = kLastIOKitNotificationType;
1540
1541 userNotify = new IOServiceUserNotification;
1542
1543 if( userNotify && !userNotify->init( port, userMsgType,
1544 reference, referenceSize, client64)) {
1545 userNotify->release();
1546 userNotify = 0;
1547 }
1548 if( !userNotify)
1549 continue;
1550
1551 notify = IOService::addMatchingNotification( sym, dict,
1552 &userNotify->_handler, userNotify );
1553 if( notify) {
1554 *notification = userNotify;
1555 userNotify->setNotification( notify );
1556 err = kIOReturnSuccess;
1557 } else
1558 err = kIOReturnUnsupported;
1559
1560 } while( false );
1561
1562 if( sym)
1563 sym->release();
1564 if( dict)
1565 dict->release();
1566
1567 return( err );
1568 }
1569
1570
1571 /* Routine io_service_add_notification */
1572 kern_return_t is_io_service_add_notification(
1573 mach_port_t master_port,
1574 io_name_t notification_type,
1575 io_string_t matching,
1576 mach_port_t port,
1577 io_async_ref_t reference,
1578 mach_msg_type_number_t referenceCnt,
1579 io_object_t * notification )
1580 {
1581 return (internal_io_service_add_notification(master_port, notification_type,
1582 matching, port, &reference[0], sizeof(io_async_ref_t),
1583 false, notification));
1584 }
1585
1586 /* Routine io_service_add_notification_64 */
1587 kern_return_t is_io_service_add_notification_64(
1588 mach_port_t master_port,
1589 io_name_t notification_type,
1590 io_string_t matching,
1591 mach_port_t wake_port,
1592 io_async_ref64_t reference,
1593 mach_msg_type_number_t referenceCnt,
1594 io_object_t *notification )
1595 {
1596 return (internal_io_service_add_notification(master_port, notification_type,
1597 matching, wake_port, &reference[0], sizeof(io_async_ref64_t),
1598 true, notification));
1599 }
1600
1601
1602 static kern_return_t internal_io_service_add_notification_ool(
1603 mach_port_t master_port,
1604 io_name_t notification_type,
1605 io_buf_ptr_t matching,
1606 mach_msg_type_number_t matchingCnt,
1607 mach_port_t wake_port,
1608 void * reference,
1609 vm_size_t referenceSize,
1610 bool client64,
1611 kern_return_t *result,
1612 io_object_t *notification )
1613 {
1614 kern_return_t kr;
1615 vm_offset_t data;
1616 vm_map_offset_t map_data;
1617
1618 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1619 data = CAST_DOWN(vm_offset_t, map_data);
1620
1621 if( KERN_SUCCESS == kr) {
1622 // must return success after vm_map_copyout() succeeds
1623 *result = internal_io_service_add_notification( master_port, notification_type,
1624 (char *) data, wake_port, reference, referenceSize, client64, notification );
1625 vm_deallocate( kernel_map, data, matchingCnt );
1626 }
1627
1628 return( kr );
1629 }
1630
1631 /* Routine io_service_add_notification_ool */
1632 kern_return_t is_io_service_add_notification_ool(
1633 mach_port_t master_port,
1634 io_name_t notification_type,
1635 io_buf_ptr_t matching,
1636 mach_msg_type_number_t matchingCnt,
1637 mach_port_t wake_port,
1638 io_async_ref_t reference,
1639 mach_msg_type_number_t referenceCnt,
1640 kern_return_t *result,
1641 io_object_t *notification )
1642 {
1643 return (internal_io_service_add_notification_ool(master_port, notification_type,
1644 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
1645 false, result, notification));
1646 }
1647
1648 /* Routine io_service_add_notification_ool_64 */
1649 kern_return_t is_io_service_add_notification_ool_64(
1650 mach_port_t master_port,
1651 io_name_t notification_type,
1652 io_buf_ptr_t matching,
1653 mach_msg_type_number_t matchingCnt,
1654 mach_port_t wake_port,
1655 io_async_ref64_t reference,
1656 mach_msg_type_number_t referenceCnt,
1657 kern_return_t *result,
1658 io_object_t *notification )
1659 {
1660 return (internal_io_service_add_notification_ool(master_port, notification_type,
1661 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
1662 true, result, notification));
1663 }
1664
1665 /* Routine io_service_add_notification_old */
1666 kern_return_t is_io_service_add_notification_old(
1667 mach_port_t master_port,
1668 io_name_t notification_type,
1669 io_string_t matching,
1670 mach_port_t port,
1671 // for binary compatibility reasons, this must be natural_t for ILP32
1672 natural_t ref,
1673 io_object_t * notification )
1674 {
1675 return( is_io_service_add_notification( master_port, notification_type,
1676 matching, port, &ref, 1, notification ));
1677 }
1678
1679
1680 static kern_return_t internal_io_service_add_interest_notification(
1681 io_object_t _service,
1682 io_name_t type_of_interest,
1683 mach_port_t port,
1684 void * reference,
1685 vm_size_t referenceSize,
1686 bool client64,
1687 io_object_t * notification )
1688 {
1689
1690 IOServiceMessageUserNotification * userNotify = 0;
1691 IONotifier * notify = 0;
1692 const OSSymbol * sym;
1693 IOReturn err;
1694
1695 CHECK( IOService, _service, service );
1696
1697 err = kIOReturnNoResources;
1698 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
1699
1700 userNotify = new IOServiceMessageUserNotification;
1701
1702 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
1703 reference, referenceSize,
1704 kIOUserNotifyMaxMessageSize,
1705 client64 )) {
1706 userNotify->release();
1707 userNotify = 0;
1708 }
1709 if( !userNotify)
1710 continue;
1711
1712 notify = service->registerInterest( sym,
1713 &userNotify->_handler, userNotify );
1714 if( notify) {
1715 *notification = userNotify;
1716 userNotify->setNotification( notify );
1717 err = kIOReturnSuccess;
1718 } else
1719 err = kIOReturnUnsupported;
1720
1721 sym->release();
1722
1723 } while( false );
1724
1725 return( err );
1726 }
1727
1728 /* Routine io_service_add_message_notification */
1729 kern_return_t is_io_service_add_interest_notification(
1730 io_object_t service,
1731 io_name_t type_of_interest,
1732 mach_port_t port,
1733 io_async_ref_t reference,
1734 mach_msg_type_number_t referenceCnt,
1735 io_object_t * notification )
1736 {
1737 return (internal_io_service_add_interest_notification(service, type_of_interest,
1738 port, &reference[0], sizeof(io_async_ref_t), false, notification));
1739 }
1740
1741 /* Routine io_service_add_interest_notification_64 */
1742 kern_return_t is_io_service_add_interest_notification_64(
1743 io_object_t service,
1744 io_name_t type_of_interest,
1745 mach_port_t wake_port,
1746 io_async_ref64_t reference,
1747 mach_msg_type_number_t referenceCnt,
1748 io_object_t *notification )
1749 {
1750 return (internal_io_service_add_interest_notification(service, type_of_interest,
1751 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
1752 }
1753
1754
1755 /* Routine io_service_acknowledge_notification */
1756 kern_return_t is_io_service_acknowledge_notification(
1757 io_object_t _service,
1758 natural_t notify_ref,
1759 natural_t response )
1760 {
1761 CHECK( IOService, _service, service );
1762
1763 return( service->acknowledgeNotification( (IONotificationRef) notify_ref,
1764 (IOOptionBits) response ));
1765
1766 }
1767
1768 /* Routine io_connect_get_semaphore */
1769 kern_return_t is_io_connect_get_notification_semaphore(
1770 io_connect_t connection,
1771 natural_t notification_type,
1772 semaphore_t *semaphore )
1773 {
1774 CHECK( IOUserClient, connection, client );
1775
1776 return( client->getNotificationSemaphore( (UInt32) notification_type,
1777 semaphore ));
1778 }
1779
1780 /* Routine io_registry_get_root_entry */
1781 kern_return_t is_io_registry_get_root_entry(
1782 mach_port_t master_port,
1783 io_object_t *root )
1784 {
1785 IORegistryEntry * entry;
1786
1787 if( master_port != master_device_port)
1788 return( kIOReturnNotPrivileged);
1789
1790 entry = IORegistryEntry::getRegistryRoot();
1791 if( entry)
1792 entry->retain();
1793 *root = entry;
1794
1795 return( kIOReturnSuccess );
1796 }
1797
1798 /* Routine io_registry_create_iterator */
1799 kern_return_t is_io_registry_create_iterator(
1800 mach_port_t master_port,
1801 io_name_t plane,
1802 uint32_t options,
1803 io_object_t *iterator )
1804 {
1805 if( master_port != master_device_port)
1806 return( kIOReturnNotPrivileged);
1807
1808 *iterator = IORegistryIterator::iterateOver(
1809 IORegistryEntry::getPlane( plane ), options );
1810
1811 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
1812 }
1813
1814 /* Routine io_registry_entry_create_iterator */
1815 kern_return_t is_io_registry_entry_create_iterator(
1816 io_object_t registry_entry,
1817 io_name_t plane,
1818 uint32_t options,
1819 io_object_t *iterator )
1820 {
1821 CHECK( IORegistryEntry, registry_entry, entry );
1822
1823 *iterator = IORegistryIterator::iterateOver( entry,
1824 IORegistryEntry::getPlane( plane ), options );
1825
1826 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
1827 }
1828
1829 /* Routine io_registry_iterator_enter */
1830 kern_return_t is_io_registry_iterator_enter_entry(
1831 io_object_t iterator )
1832 {
1833 CHECK( IORegistryIterator, iterator, iter );
1834
1835 iter->enterEntry();
1836
1837 return( kIOReturnSuccess );
1838 }
1839
1840 /* Routine io_registry_iterator_exit */
1841 kern_return_t is_io_registry_iterator_exit_entry(
1842 io_object_t iterator )
1843 {
1844 bool didIt;
1845
1846 CHECK( IORegistryIterator, iterator, iter );
1847
1848 didIt = iter->exitEntry();
1849
1850 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
1851 }
1852
1853 /* Routine io_registry_entry_from_path */
1854 kern_return_t is_io_registry_entry_from_path(
1855 mach_port_t master_port,
1856 io_string_t path,
1857 io_object_t *registry_entry )
1858 {
1859 IORegistryEntry * entry;
1860
1861 if( master_port != master_device_port)
1862 return( kIOReturnNotPrivileged);
1863
1864 entry = IORegistryEntry::fromPath( path );
1865
1866 *registry_entry = entry;
1867
1868 return( kIOReturnSuccess );
1869 }
1870
1871 /* Routine io_registry_entry_in_plane */
1872 kern_return_t is_io_registry_entry_in_plane(
1873 io_object_t registry_entry,
1874 io_name_t plane,
1875 boolean_t *inPlane )
1876 {
1877 CHECK( IORegistryEntry, registry_entry, entry );
1878
1879 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
1880
1881 return( kIOReturnSuccess );
1882 }
1883
1884
1885 /* Routine io_registry_entry_get_path */
1886 kern_return_t is_io_registry_entry_get_path(
1887 io_object_t registry_entry,
1888 io_name_t plane,
1889 io_string_t path )
1890 {
1891 int length;
1892 CHECK( IORegistryEntry, registry_entry, entry );
1893
1894 length = sizeof( io_string_t);
1895 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
1896 return( kIOReturnSuccess );
1897 else
1898 return( kIOReturnBadArgument );
1899 }
1900
1901
1902 /* Routine io_registry_entry_get_name */
1903 kern_return_t is_io_registry_entry_get_name(
1904 io_object_t registry_entry,
1905 io_name_t name )
1906 {
1907 CHECK( IORegistryEntry, registry_entry, entry );
1908
1909 strncpy( name, entry->getName(), sizeof( io_name_t));
1910
1911 return( kIOReturnSuccess );
1912 }
1913
1914 /* Routine io_registry_entry_get_name_in_plane */
1915 kern_return_t is_io_registry_entry_get_name_in_plane(
1916 io_object_t registry_entry,
1917 io_name_t planeName,
1918 io_name_t name )
1919 {
1920 const IORegistryPlane * plane;
1921 CHECK( IORegistryEntry, registry_entry, entry );
1922
1923 if( planeName[0])
1924 plane = IORegistryEntry::getPlane( planeName );
1925 else
1926 plane = 0;
1927
1928 strncpy( name, entry->getName( plane), sizeof( io_name_t));
1929
1930 return( kIOReturnSuccess );
1931 }
1932
1933 /* Routine io_registry_entry_get_location_in_plane */
1934 kern_return_t is_io_registry_entry_get_location_in_plane(
1935 io_object_t registry_entry,
1936 io_name_t planeName,
1937 io_name_t location )
1938 {
1939 const IORegistryPlane * plane;
1940 CHECK( IORegistryEntry, registry_entry, entry );
1941
1942 if( planeName[0])
1943 plane = IORegistryEntry::getPlane( planeName );
1944 else
1945 plane = 0;
1946
1947 const char * cstr = entry->getLocation( plane );
1948
1949 if( cstr) {
1950 strncpy( location, cstr, sizeof( io_name_t));
1951 return( kIOReturnSuccess );
1952 } else
1953 return( kIOReturnNotFound );
1954 }
1955
1956 /* Routine io_registry_entry_get_registry_entry_id */
1957 kern_return_t is_io_registry_entry_get_registry_entry_id(
1958 io_object_t registry_entry,
1959 uint64_t *entry_id )
1960 {
1961 CHECK( IORegistryEntry, registry_entry, entry );
1962
1963 *entry_id = entry->getRegistryEntryID();
1964
1965 return (kIOReturnSuccess);
1966 }
1967
1968 // Create a vm_map_copy_t or kalloc'ed data for memory
1969 // to be copied out. ipc will free after the copyout.
1970
1971 static kern_return_t copyoutkdata( void * data, vm_size_t len,
1972 io_buf_ptr_t * buf )
1973 {
1974 kern_return_t err;
1975 vm_map_copy_t copy;
1976
1977 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1978 false /* src_destroy */, &copy);
1979
1980 assert( err == KERN_SUCCESS );
1981 if( err == KERN_SUCCESS )
1982 *buf = (char *) copy;
1983
1984 return( err );
1985 }
1986
1987 /* Routine io_registry_entry_get_property */
1988 kern_return_t is_io_registry_entry_get_property_bytes(
1989 io_object_t registry_entry,
1990 io_name_t property_name,
1991 io_struct_inband_t buf,
1992 mach_msg_type_number_t *dataCnt )
1993 {
1994 OSObject * obj;
1995 OSData * data;
1996 OSString * str;
1997 OSBoolean * boo;
1998 OSNumber * off;
1999 UInt64 offsetBytes;
2000 unsigned int len = 0;
2001 const void * bytes = 0;
2002 IOReturn ret = kIOReturnSuccess;
2003
2004 CHECK( IORegistryEntry, registry_entry, entry );
2005
2006 obj = entry->copyProperty(property_name);
2007 if( !obj)
2008 return( kIOReturnNoResources );
2009
2010 // One day OSData will be a common container base class
2011 // until then...
2012 if( (data = OSDynamicCast( OSData, obj ))) {
2013 len = data->getLength();
2014 bytes = data->getBytesNoCopy();
2015
2016 } else if( (str = OSDynamicCast( OSString, obj ))) {
2017 len = str->getLength() + 1;
2018 bytes = str->getCStringNoCopy();
2019
2020 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2021 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2022 bytes = boo->isTrue() ? "Yes" : "No";
2023
2024 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2025 offsetBytes = off->unsigned64BitValue();
2026 len = off->numberOfBytes();
2027 bytes = &offsetBytes;
2028 #ifdef __BIG_ENDIAN__
2029 bytes = (const void *)
2030 (((UInt32) bytes) + (sizeof( UInt64) - len));
2031 #endif
2032
2033 } else
2034 ret = kIOReturnBadArgument;
2035
2036 if( bytes) {
2037 if( *dataCnt < len)
2038 ret = kIOReturnIPCError;
2039 else {
2040 *dataCnt = len;
2041 bcopy( bytes, buf, len );
2042 }
2043 }
2044 obj->release();
2045
2046 return( ret );
2047 }
2048
2049
2050 /* Routine io_registry_entry_get_property */
2051 kern_return_t is_io_registry_entry_get_property(
2052 io_object_t registry_entry,
2053 io_name_t property_name,
2054 io_buf_ptr_t *properties,
2055 mach_msg_type_number_t *propertiesCnt )
2056 {
2057 kern_return_t err;
2058 vm_size_t len;
2059 OSObject * obj;
2060
2061 CHECK( IORegistryEntry, registry_entry, entry );
2062
2063 obj = entry->copyProperty(property_name);
2064 if( !obj)
2065 return( kIOReturnNotFound );
2066
2067 OSSerialize * s = OSSerialize::withCapacity(4096);
2068 if( !s) {
2069 obj->release();
2070 return( kIOReturnNoMemory );
2071 }
2072 s->clearText();
2073
2074 if( obj->serialize( s )) {
2075 len = s->getLength();
2076 *propertiesCnt = len;
2077 err = copyoutkdata( s->text(), len, properties );
2078
2079 } else
2080 err = kIOReturnUnsupported;
2081
2082 s->release();
2083 obj->release();
2084
2085 return( err );
2086 }
2087
2088 /* Routine io_registry_entry_get_property_recursively */
2089 kern_return_t is_io_registry_entry_get_property_recursively(
2090 io_object_t registry_entry,
2091 io_name_t plane,
2092 io_name_t property_name,
2093 uint32_t options,
2094 io_buf_ptr_t *properties,
2095 mach_msg_type_number_t *propertiesCnt )
2096 {
2097 kern_return_t err;
2098 vm_size_t len;
2099 OSObject * obj;
2100
2101 CHECK( IORegistryEntry, registry_entry, entry );
2102
2103 obj = entry->copyProperty( property_name,
2104 IORegistryEntry::getPlane( plane ), options);
2105 if( !obj)
2106 return( kIOReturnNotFound );
2107
2108 OSSerialize * s = OSSerialize::withCapacity(4096);
2109 if( !s) {
2110 obj->release();
2111 return( kIOReturnNoMemory );
2112 }
2113
2114 s->clearText();
2115
2116 if( obj->serialize( s )) {
2117 len = s->getLength();
2118 *propertiesCnt = len;
2119 err = copyoutkdata( s->text(), len, properties );
2120
2121 } else
2122 err = kIOReturnUnsupported;
2123
2124 s->release();
2125 obj->release();
2126
2127 return( err );
2128 }
2129
2130 /* Routine io_registry_entry_get_properties */
2131 kern_return_t is_io_registry_entry_get_properties(
2132 io_object_t registry_entry,
2133 io_buf_ptr_t *properties,
2134 mach_msg_type_number_t *propertiesCnt )
2135 {
2136 kern_return_t err;
2137 vm_size_t len;
2138
2139 CHECK( IORegistryEntry, registry_entry, entry );
2140
2141 OSSerialize * s = OSSerialize::withCapacity(4096);
2142 if( !s)
2143 return( kIOReturnNoMemory );
2144
2145 s->clearText();
2146
2147 if( entry->serializeProperties( s )) {
2148 len = s->getLength();
2149 *propertiesCnt = len;
2150 err = copyoutkdata( s->text(), len, properties );
2151
2152 } else
2153 err = kIOReturnUnsupported;
2154
2155 s->release();
2156
2157 return( err );
2158 }
2159
2160 /* Routine io_registry_entry_set_properties */
2161 kern_return_t is_io_registry_entry_set_properties
2162 (
2163 io_object_t registry_entry,
2164 io_buf_ptr_t properties,
2165 mach_msg_type_number_t propertiesCnt,
2166 kern_return_t * result)
2167 {
2168 OSObject * obj;
2169 kern_return_t err;
2170 IOReturn res;
2171 vm_offset_t data;
2172 vm_map_offset_t map_data;
2173
2174 CHECK( IORegistryEntry, registry_entry, entry );
2175
2176 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2177 data = CAST_DOWN(vm_offset_t, map_data);
2178
2179 if( KERN_SUCCESS == err) {
2180
2181 // must return success after vm_map_copyout() succeeds
2182 obj = OSUnserializeXML( (const char *) data );
2183 vm_deallocate( kernel_map, data, propertiesCnt );
2184
2185 if( obj) {
2186 res = entry->setProperties( obj );
2187 obj->release();
2188 } else
2189 res = kIOReturnBadArgument;
2190 } else
2191 res = err;
2192
2193 *result = res;
2194 return( err );
2195 }
2196
2197 /* Routine io_registry_entry_get_child_iterator */
2198 kern_return_t is_io_registry_entry_get_child_iterator(
2199 io_object_t registry_entry,
2200 io_name_t plane,
2201 io_object_t *iterator )
2202 {
2203 CHECK( IORegistryEntry, registry_entry, entry );
2204
2205 *iterator = entry->getChildIterator(
2206 IORegistryEntry::getPlane( plane ));
2207
2208 return( kIOReturnSuccess );
2209 }
2210
2211 /* Routine io_registry_entry_get_parent_iterator */
2212 kern_return_t is_io_registry_entry_get_parent_iterator(
2213 io_object_t registry_entry,
2214 io_name_t plane,
2215 io_object_t *iterator)
2216 {
2217 CHECK( IORegistryEntry, registry_entry, entry );
2218
2219 *iterator = entry->getParentIterator(
2220 IORegistryEntry::getPlane( plane ));
2221
2222 return( kIOReturnSuccess );
2223 }
2224
2225 /* Routine io_service_get_busy_state */
2226 kern_return_t is_io_service_get_busy_state(
2227 io_object_t _service,
2228 uint32_t *busyState )
2229 {
2230 CHECK( IOService, _service, service );
2231
2232 *busyState = service->getBusyState();
2233
2234 return( kIOReturnSuccess );
2235 }
2236
2237 /* Routine io_service_get_state */
2238 kern_return_t is_io_service_get_state(
2239 io_object_t _service,
2240 uint64_t *state,
2241 uint32_t *busy_state,
2242 uint64_t *accumulated_busy_time )
2243 {
2244 CHECK( IOService, _service, service );
2245
2246 *state = service->getState();
2247 *busy_state = service->getBusyState();
2248 *accumulated_busy_time = service->getAccumulatedBusyTime();
2249
2250 return( kIOReturnSuccess );
2251 }
2252
2253 /* Routine io_service_wait_quiet */
2254 kern_return_t is_io_service_wait_quiet(
2255 io_object_t _service,
2256 mach_timespec_t wait_time )
2257 {
2258 uint64_t timeoutNS;
2259
2260 CHECK( IOService, _service, service );
2261
2262 timeoutNS = wait_time.tv_sec;
2263 timeoutNS *= kSecondScale;
2264 timeoutNS += wait_time.tv_nsec;
2265
2266 return( service->waitQuiet(timeoutNS) );
2267 }
2268
2269 /* Routine io_service_request_probe */
2270 kern_return_t is_io_service_request_probe(
2271 io_object_t _service,
2272 uint32_t options )
2273 {
2274 CHECK( IOService, _service, service );
2275
2276 return( service->requestProbe( options ));
2277 }
2278
2279
2280 /* Routine io_service_open */
2281 kern_return_t is_io_service_open(
2282 io_object_t _service,
2283 task_t owningTask,
2284 uint32_t connect_type,
2285 io_object_t *connection )
2286 {
2287 IOUserClient * client;
2288 IOReturn err;
2289
2290 CHECK( IOService, _service, service );
2291
2292 err = service->newUserClient( owningTask, (void *) owningTask,
2293 connect_type, 0, &client );
2294
2295 if( err == kIOReturnSuccess) {
2296 assert( OSDynamicCast(IOUserClient, client) );
2297 *connection = client;
2298 }
2299
2300 return( err);
2301 }
2302
2303 /* Routine io_service_open_ndr */
2304 kern_return_t is_io_service_open_extended(
2305 io_object_t _service,
2306 task_t owningTask,
2307 uint32_t connect_type,
2308 NDR_record_t ndr,
2309 io_buf_ptr_t properties,
2310 mach_msg_type_number_t propertiesCnt,
2311 kern_return_t * result,
2312 io_object_t *connection )
2313 {
2314 IOUserClient * client = 0;
2315 kern_return_t err = KERN_SUCCESS;
2316 IOReturn res = kIOReturnSuccess;
2317 OSDictionary * propertiesDict = 0;
2318 bool crossEndian;
2319 bool disallowAccess;
2320
2321 CHECK( IOService, _service, service );
2322
2323 do
2324 {
2325 if (properties)
2326 {
2327 OSObject * obj;
2328 vm_offset_t data;
2329 vm_map_offset_t map_data;
2330
2331 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2332 res = err;
2333 data = CAST_DOWN(vm_offset_t, map_data);
2334 if (KERN_SUCCESS == err)
2335 {
2336 // must return success after vm_map_copyout() succeeds
2337 obj = OSUnserializeXML( (const char *) data );
2338 vm_deallocate( kernel_map, data, propertiesCnt );
2339 propertiesDict = OSDynamicCast(OSDictionary, obj);
2340 if (!propertiesDict)
2341 {
2342 res = kIOReturnBadArgument;
2343 if (obj)
2344 obj->release();
2345 }
2346 }
2347 if (kIOReturnSuccess != res)
2348 break;
2349 }
2350
2351 crossEndian = (ndr.int_rep != NDR_record.int_rep);
2352 if (crossEndian)
2353 {
2354 if (!propertiesDict)
2355 propertiesDict = OSDictionary::withCapacity(4);
2356 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
2357 if (data)
2358 {
2359 if (propertiesDict)
2360 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
2361 data->release();
2362 }
2363 }
2364
2365 res = service->newUserClient( owningTask, (void *) owningTask,
2366 connect_type, propertiesDict, &client );
2367
2368 if (propertiesDict)
2369 propertiesDict->release();
2370
2371 if (res == kIOReturnSuccess)
2372 {
2373 assert( OSDynamicCast(IOUserClient, client) );
2374
2375 disallowAccess = (crossEndian
2376 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
2377 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
2378
2379 if (disallowAccess)
2380 {
2381 client->clientClose();
2382 client->release();
2383 client = 0;
2384 res = kIOReturnUnsupported;
2385 break;
2386 }
2387 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
2388 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
2389 if (creatorName)
2390 {
2391 client->setProperty(kIOUserClientCreatorKey, creatorName);
2392 creatorName->release();
2393 }
2394 }
2395 }
2396 while (false);
2397
2398 *connection = client;
2399 *result = res;
2400
2401 return (err);
2402 }
2403
2404 /* Routine io_service_close */
2405 kern_return_t is_io_service_close(
2406 io_object_t connection )
2407 {
2408 OSSet * mappings;
2409 if ((mappings = OSDynamicCast(OSSet, connection)))
2410 return( kIOReturnSuccess );
2411
2412 CHECK( IOUserClient, connection, client );
2413
2414 client->clientClose();
2415
2416 return( kIOReturnSuccess );
2417 }
2418
2419 /* Routine io_connect_get_service */
2420 kern_return_t is_io_connect_get_service(
2421 io_object_t connection,
2422 io_object_t *service )
2423 {
2424 IOService * theService;
2425
2426 CHECK( IOUserClient, connection, client );
2427
2428 theService = client->getService();
2429 if( theService)
2430 theService->retain();
2431
2432 *service = theService;
2433
2434 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
2435 }
2436
2437 /* Routine io_connect_set_notification_port */
2438 kern_return_t is_io_connect_set_notification_port(
2439 io_object_t connection,
2440 uint32_t notification_type,
2441 mach_port_t port,
2442 uint32_t reference)
2443 {
2444 CHECK( IOUserClient, connection, client );
2445
2446 return( client->registerNotificationPort( port, notification_type,
2447 (io_user_reference_t) reference ));
2448 }
2449
2450 /* Routine io_connect_set_notification_port */
2451 kern_return_t is_io_connect_set_notification_port_64(
2452 io_object_t connection,
2453 uint32_t notification_type,
2454 mach_port_t port,
2455 io_user_reference_t reference)
2456 {
2457 CHECK( IOUserClient, connection, client );
2458
2459 return( client->registerNotificationPort( port, notification_type,
2460 reference ));
2461 }
2462
2463 /* Routine io_connect_map_memory_into_task */
2464 kern_return_t is_io_connect_map_memory_into_task
2465 (
2466 io_connect_t connection,
2467 uint32_t memory_type,
2468 task_t into_task,
2469 mach_vm_address_t *address,
2470 mach_vm_size_t *size,
2471 uint32_t flags
2472 )
2473 {
2474 IOReturn err;
2475 IOMemoryMap * map;
2476
2477 CHECK( IOUserClient, connection, client );
2478
2479 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
2480
2481 if( map) {
2482 *address = map->getAddress();
2483 if( size)
2484 *size = map->getSize();
2485
2486 if( client->sharedInstance
2487 || (into_task != current_task())) {
2488 // push a name out to the task owning the map,
2489 // so we can clean up maps
2490 mach_port_name_t name __unused =
2491 IOMachPort::makeSendRightForTask(
2492 into_task, map, IKOT_IOKIT_OBJECT );
2493 assert( name );
2494
2495 } else {
2496 // keep it with the user client
2497 IOLockLock( gIOObjectPortLock);
2498 if( 0 == client->mappings)
2499 client->mappings = OSSet::withCapacity(2);
2500 if( client->mappings)
2501 client->mappings->setObject( map);
2502 IOLockUnlock( gIOObjectPortLock);
2503 map->release();
2504 }
2505 err = kIOReturnSuccess;
2506
2507 } else
2508 err = kIOReturnBadArgument;
2509
2510 return( err );
2511 }
2512
2513 /* Routine is_io_connect_map_memory */
2514 kern_return_t is_io_connect_map_memory(
2515 io_object_t connect,
2516 uint32_t type,
2517 task_t task,
2518 vm_address_t * mapAddr,
2519 vm_size_t * mapSize,
2520 uint32_t flags )
2521 {
2522 IOReturn err;
2523 mach_vm_address_t address;
2524 mach_vm_size_t size;
2525
2526 address = SCALAR64(*mapAddr);
2527 size = SCALAR64(*mapSize);
2528
2529 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
2530
2531 *mapAddr = SCALAR32(address);
2532 *mapSize = SCALAR32(size);
2533
2534 return (err);
2535 }
2536
2537 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
2538 {
2539 OSIterator * iter;
2540 IOMemoryMap * map = 0;
2541
2542 IOLockLock(gIOObjectPortLock);
2543
2544 iter = OSCollectionIterator::withCollection(mappings);
2545 if(iter)
2546 {
2547 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
2548 {
2549 if(mem == map->getMemoryDescriptor())
2550 {
2551 map->retain();
2552 mappings->removeObject(map);
2553 break;
2554 }
2555 }
2556 iter->release();
2557 }
2558
2559 IOLockUnlock(gIOObjectPortLock);
2560
2561 return (map);
2562 }
2563
2564 /* Routine io_connect_unmap_memory_from_task */
2565 kern_return_t is_io_connect_unmap_memory_from_task
2566 (
2567 io_connect_t connection,
2568 uint32_t memory_type,
2569 task_t from_task,
2570 mach_vm_address_t address)
2571 {
2572 IOReturn err;
2573 IOOptionBits options = 0;
2574 IOMemoryDescriptor * memory;
2575 IOMemoryMap * map;
2576
2577 CHECK( IOUserClient, connection, client );
2578
2579 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
2580
2581 if( memory && (kIOReturnSuccess == err)) {
2582
2583 options = (options & ~kIOMapUserOptionsMask)
2584 | kIOMapAnywhere | kIOMapReference;
2585
2586 map = memory->createMappingInTask( from_task, address, options );
2587 memory->release();
2588 if( map)
2589 {
2590 IOLockLock( gIOObjectPortLock);
2591 if( client->mappings)
2592 client->mappings->removeObject( map);
2593 IOLockUnlock( gIOObjectPortLock);
2594
2595 mach_port_name_t name = 0;
2596 if (from_task != current_task())
2597 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
2598 if (name)
2599 {
2600 map->userClientUnmap();
2601 err = iokit_mod_send_right( from_task, name, -2 );
2602 err = kIOReturnSuccess;
2603 }
2604 else
2605 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
2606 if (from_task == current_task())
2607 map->release();
2608 }
2609 else
2610 err = kIOReturnBadArgument;
2611 }
2612
2613 return( err );
2614 }
2615
2616 kern_return_t is_io_connect_unmap_memory(
2617 io_object_t connect,
2618 uint32_t type,
2619 task_t task,
2620 vm_address_t mapAddr )
2621 {
2622 IOReturn err;
2623 mach_vm_address_t address;
2624
2625 address = SCALAR64(mapAddr);
2626
2627 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
2628
2629 return (err);
2630 }
2631
2632
2633 /* Routine io_connect_add_client */
2634 kern_return_t is_io_connect_add_client(
2635 io_object_t connection,
2636 io_object_t connect_to)
2637 {
2638 CHECK( IOUserClient, connection, client );
2639 CHECK( IOUserClient, connect_to, to );
2640
2641 return( client->connectClient( to ) );
2642 }
2643
2644
2645 /* Routine io_connect_set_properties */
2646 kern_return_t is_io_connect_set_properties(
2647 io_object_t connection,
2648 io_buf_ptr_t properties,
2649 mach_msg_type_number_t propertiesCnt,
2650 kern_return_t * result)
2651 {
2652 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
2653 }
2654
2655
2656 /* Routine io_user_client_method */
2657 kern_return_t is_io_connect_method
2658 (
2659 io_connect_t connection,
2660 uint32_t selector,
2661 io_scalar_inband64_t scalar_input,
2662 mach_msg_type_number_t scalar_inputCnt,
2663 io_struct_inband_t inband_input,
2664 mach_msg_type_number_t inband_inputCnt,
2665 mach_vm_address_t ool_input,
2666 mach_vm_size_t ool_input_size,
2667 io_scalar_inband64_t scalar_output,
2668 mach_msg_type_number_t *scalar_outputCnt,
2669 io_struct_inband_t inband_output,
2670 mach_msg_type_number_t *inband_outputCnt,
2671 mach_vm_address_t ool_output,
2672 mach_vm_size_t * ool_output_size
2673 )
2674 {
2675 CHECK( IOUserClient, connection, client );
2676
2677 IOExternalMethodArguments args;
2678 IOReturn ret;
2679 IOMemoryDescriptor * inputMD = 0;
2680 IOMemoryDescriptor * outputMD = 0;
2681
2682 bzero(&args.__reserved[0], sizeof(args.__reserved));
2683 args.version = kIOExternalMethodArgumentsCurrentVersion;
2684
2685 args.selector = selector;
2686
2687 args.asyncWakePort = MACH_PORT_NULL;
2688 args.asyncReference = 0;
2689 args.asyncReferenceCount = 0;
2690
2691 args.scalarInput = scalar_input;
2692 args.scalarInputCount = scalar_inputCnt;
2693 args.structureInput = inband_input;
2694 args.structureInputSize = inband_inputCnt;
2695
2696 if (ool_input)
2697 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2698 kIODirectionOut, current_task());
2699
2700 args.structureInputDescriptor = inputMD;
2701
2702 args.scalarOutput = scalar_output;
2703 args.scalarOutputCount = *scalar_outputCnt;
2704 args.structureOutput = inband_output;
2705 args.structureOutputSize = *inband_outputCnt;
2706
2707 if (ool_output)
2708 {
2709 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
2710 kIODirectionIn, current_task());
2711 }
2712
2713 args.structureOutputDescriptor = outputMD;
2714 args.structureOutputDescriptorSize = *ool_output_size;
2715
2716 ret = client->externalMethod( selector, &args );
2717
2718 *scalar_outputCnt = args.scalarOutputCount;
2719 *inband_outputCnt = args.structureOutputSize;
2720 *ool_output_size = args.structureOutputDescriptorSize;
2721
2722 if (inputMD)
2723 inputMD->release();
2724 if (outputMD)
2725 outputMD->release();
2726
2727 return (ret);
2728 }
2729
2730 /* Routine io_async_user_client_method */
2731 kern_return_t is_io_connect_async_method
2732 (
2733 io_connect_t connection,
2734 mach_port_t wake_port,
2735 io_async_ref64_t reference,
2736 mach_msg_type_number_t referenceCnt,
2737 uint32_t selector,
2738 io_scalar_inband64_t scalar_input,
2739 mach_msg_type_number_t scalar_inputCnt,
2740 io_struct_inband_t inband_input,
2741 mach_msg_type_number_t inband_inputCnt,
2742 mach_vm_address_t ool_input,
2743 mach_vm_size_t ool_input_size,
2744 io_scalar_inband64_t scalar_output,
2745 mach_msg_type_number_t *scalar_outputCnt,
2746 io_struct_inband_t inband_output,
2747 mach_msg_type_number_t *inband_outputCnt,
2748 mach_vm_address_t ool_output,
2749 mach_vm_size_t * ool_output_size
2750 )
2751 {
2752 CHECK( IOUserClient, connection, client );
2753
2754 IOExternalMethodArguments args;
2755 IOReturn ret;
2756 IOMemoryDescriptor * inputMD = 0;
2757 IOMemoryDescriptor * outputMD = 0;
2758
2759 bzero(&args.__reserved[0], sizeof(args.__reserved));
2760 args.version = kIOExternalMethodArgumentsCurrentVersion;
2761
2762 reference[0] = (io_user_reference_t) wake_port;
2763 if (vm_map_is_64bit(get_task_map(current_task())))
2764 reference[0] |= kIOUCAsync64Flag;
2765
2766 args.selector = selector;
2767
2768 args.asyncWakePort = wake_port;
2769 args.asyncReference = reference;
2770 args.asyncReferenceCount = referenceCnt;
2771
2772 args.scalarInput = scalar_input;
2773 args.scalarInputCount = scalar_inputCnt;
2774 args.structureInput = inband_input;
2775 args.structureInputSize = inband_inputCnt;
2776
2777 if (ool_input)
2778 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2779 kIODirectionOut, current_task());
2780
2781 args.structureInputDescriptor = inputMD;
2782
2783 args.scalarOutput = scalar_output;
2784 args.scalarOutputCount = *scalar_outputCnt;
2785 args.structureOutput = inband_output;
2786 args.structureOutputSize = *inband_outputCnt;
2787
2788 if (ool_output)
2789 {
2790 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
2791 kIODirectionIn, current_task());
2792 }
2793
2794 args.structureOutputDescriptor = outputMD;
2795 args.structureOutputDescriptorSize = *ool_output_size;
2796
2797 ret = client->externalMethod( selector, &args );
2798
2799 *inband_outputCnt = args.structureOutputSize;
2800 *ool_output_size = args.structureOutputDescriptorSize;
2801
2802 if (inputMD)
2803 inputMD->release();
2804 if (outputMD)
2805 outputMD->release();
2806
2807 return (ret);
2808 }
2809
2810 /* Routine io_connect_method_scalarI_scalarO */
2811 kern_return_t is_io_connect_method_scalarI_scalarO(
2812 io_object_t connect,
2813 uint32_t index,
2814 io_scalar_inband_t input,
2815 mach_msg_type_number_t inputCount,
2816 io_scalar_inband_t output,
2817 mach_msg_type_number_t * outputCount )
2818 {
2819 IOReturn err;
2820 uint32_t i;
2821 io_scalar_inband64_t _input;
2822 io_scalar_inband64_t _output;
2823
2824 mach_msg_type_number_t struct_outputCnt = 0;
2825 mach_vm_size_t ool_output_size = 0;
2826
2827 for (i = 0; i < inputCount; i++)
2828 _input[i] = SCALAR64(input[i]);
2829
2830 err = is_io_connect_method(connect, index,
2831 _input, inputCount,
2832 NULL, 0,
2833 0, 0,
2834 _output, outputCount,
2835 NULL, &struct_outputCnt,
2836 0, &ool_output_size);
2837
2838 for (i = 0; i < *outputCount; i++)
2839 output[i] = SCALAR32(_output[i]);
2840
2841 return (err);
2842 }
2843
2844 kern_return_t shim_io_connect_method_scalarI_scalarO(
2845 IOExternalMethod * method,
2846 IOService * object,
2847 const io_user_scalar_t * input,
2848 mach_msg_type_number_t inputCount,
2849 io_user_scalar_t * output,
2850 mach_msg_type_number_t * outputCount )
2851 {
2852 IOMethod func;
2853 io_scalar_inband_t _output;
2854 IOReturn err;
2855 err = kIOReturnBadArgument;
2856
2857 do {
2858
2859 if( inputCount != method->count0)
2860 {
2861 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
2862 continue;
2863 }
2864 if( *outputCount != method->count1)
2865 {
2866 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
2867 continue;
2868 }
2869
2870 func = method->func;
2871
2872 switch( inputCount) {
2873
2874 case 6:
2875 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2876 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
2877 break;
2878 case 5:
2879 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2880 ARG32(input[3]), ARG32(input[4]),
2881 &_output[0] );
2882 break;
2883 case 4:
2884 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2885 ARG32(input[3]),
2886 &_output[0], &_output[1] );
2887 break;
2888 case 3:
2889 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
2890 &_output[0], &_output[1], &_output[2] );
2891 break;
2892 case 2:
2893 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
2894 &_output[0], &_output[1], &_output[2],
2895 &_output[3] );
2896 break;
2897 case 1:
2898 err = (object->*func)( ARG32(input[0]),
2899 &_output[0], &_output[1], &_output[2],
2900 &_output[3], &_output[4] );
2901 break;
2902 case 0:
2903 err = (object->*func)( &_output[0], &_output[1], &_output[2],
2904 &_output[3], &_output[4], &_output[5] );
2905 break;
2906
2907 default:
2908 IOLog("%s: Bad method table\n", object->getName());
2909 }
2910 }
2911 while( false);
2912
2913 uint32_t i;
2914 for (i = 0; i < *outputCount; i++)
2915 output[i] = SCALAR32(_output[i]);
2916
2917 return( err);
2918 }
2919
2920 /* Routine io_async_method_scalarI_scalarO */
2921 kern_return_t is_io_async_method_scalarI_scalarO(
2922 io_object_t connect,
2923 mach_port_t wake_port,
2924 io_async_ref_t reference,
2925 mach_msg_type_number_t referenceCnt,
2926 uint32_t index,
2927 io_scalar_inband_t input,
2928 mach_msg_type_number_t inputCount,
2929 io_scalar_inband_t output,
2930 mach_msg_type_number_t * outputCount )
2931 {
2932 IOReturn err;
2933 uint32_t i;
2934 io_scalar_inband64_t _input;
2935 io_scalar_inband64_t _output;
2936 io_async_ref64_t _reference;
2937
2938 for (i = 0; i < referenceCnt; i++)
2939 _reference[i] = REF64(reference[i]);
2940
2941 mach_msg_type_number_t struct_outputCnt = 0;
2942 mach_vm_size_t ool_output_size = 0;
2943
2944 for (i = 0; i < inputCount; i++)
2945 _input[i] = SCALAR64(input[i]);
2946
2947 err = is_io_connect_async_method(connect,
2948 wake_port, _reference, referenceCnt,
2949 index,
2950 _input, inputCount,
2951 NULL, 0,
2952 0, 0,
2953 _output, outputCount,
2954 NULL, &struct_outputCnt,
2955 0, &ool_output_size);
2956
2957 for (i = 0; i < *outputCount; i++)
2958 output[i] = SCALAR32(_output[i]);
2959
2960 return (err);
2961 }
2962 /* Routine io_async_method_scalarI_structureO */
2963 kern_return_t is_io_async_method_scalarI_structureO(
2964 io_object_t connect,
2965 mach_port_t wake_port,
2966 io_async_ref_t reference,
2967 mach_msg_type_number_t referenceCnt,
2968 uint32_t index,
2969 io_scalar_inband_t input,
2970 mach_msg_type_number_t inputCount,
2971 io_struct_inband_t output,
2972 mach_msg_type_number_t * outputCount )
2973 {
2974 uint32_t i;
2975 io_scalar_inband64_t _input;
2976 io_async_ref64_t _reference;
2977
2978 for (i = 0; i < referenceCnt; i++)
2979 _reference[i] = REF64(reference[i]);
2980
2981 mach_msg_type_number_t scalar_outputCnt = 0;
2982 mach_vm_size_t ool_output_size = 0;
2983
2984 for (i = 0; i < inputCount; i++)
2985 _input[i] = SCALAR64(input[i]);
2986
2987 return (is_io_connect_async_method(connect,
2988 wake_port, _reference, referenceCnt,
2989 index,
2990 _input, inputCount,
2991 NULL, 0,
2992 0, 0,
2993 NULL, &scalar_outputCnt,
2994 output, outputCount,
2995 0, &ool_output_size));
2996 }
2997
2998 /* Routine io_async_method_scalarI_structureI */
2999 kern_return_t is_io_async_method_scalarI_structureI(
3000 io_connect_t connect,
3001 mach_port_t wake_port,
3002 io_async_ref_t reference,
3003 mach_msg_type_number_t referenceCnt,
3004 uint32_t index,
3005 io_scalar_inband_t input,
3006 mach_msg_type_number_t inputCount,
3007 io_struct_inband_t inputStruct,
3008 mach_msg_type_number_t inputStructCount )
3009 {
3010 uint32_t i;
3011 io_scalar_inband64_t _input;
3012 io_async_ref64_t _reference;
3013
3014 for (i = 0; i < referenceCnt; i++)
3015 _reference[i] = REF64(reference[i]);
3016
3017 mach_msg_type_number_t scalar_outputCnt = 0;
3018 mach_msg_type_number_t inband_outputCnt = 0;
3019 mach_vm_size_t ool_output_size = 0;
3020
3021 for (i = 0; i < inputCount; i++)
3022 _input[i] = SCALAR64(input[i]);
3023
3024 return (is_io_connect_async_method(connect,
3025 wake_port, _reference, referenceCnt,
3026 index,
3027 _input, inputCount,
3028 inputStruct, inputStructCount,
3029 0, 0,
3030 NULL, &scalar_outputCnt,
3031 NULL, &inband_outputCnt,
3032 0, &ool_output_size));
3033 }
3034
3035 /* Routine io_async_method_structureI_structureO */
3036 kern_return_t is_io_async_method_structureI_structureO(
3037 io_object_t connect,
3038 mach_port_t wake_port,
3039 io_async_ref_t reference,
3040 mach_msg_type_number_t referenceCnt,
3041 uint32_t index,
3042 io_struct_inband_t input,
3043 mach_msg_type_number_t inputCount,
3044 io_struct_inband_t output,
3045 mach_msg_type_number_t * outputCount )
3046 {
3047 uint32_t i;
3048 mach_msg_type_number_t scalar_outputCnt = 0;
3049 mach_vm_size_t ool_output_size = 0;
3050 io_async_ref64_t _reference;
3051
3052 for (i = 0; i < referenceCnt; i++)
3053 _reference[i] = REF64(reference[i]);
3054
3055 return (is_io_connect_async_method(connect,
3056 wake_port, _reference, referenceCnt,
3057 index,
3058 NULL, 0,
3059 input, inputCount,
3060 0, 0,
3061 NULL, &scalar_outputCnt,
3062 output, outputCount,
3063 0, &ool_output_size));
3064 }
3065
3066
3067 kern_return_t shim_io_async_method_scalarI_scalarO(
3068 IOExternalAsyncMethod * method,
3069 IOService * object,
3070 mach_port_t asyncWakePort,
3071 io_user_reference_t * asyncReference,
3072 uint32_t asyncReferenceCount,
3073 const io_user_scalar_t * input,
3074 mach_msg_type_number_t inputCount,
3075 io_user_scalar_t * output,
3076 mach_msg_type_number_t * outputCount )
3077 {
3078 IOAsyncMethod func;
3079 uint32_t i;
3080 io_scalar_inband_t _output;
3081 IOReturn err;
3082 io_async_ref_t reference;
3083
3084 for (i = 0; i < asyncReferenceCount; i++)
3085 reference[i] = REF32(asyncReference[i]);
3086
3087 err = kIOReturnBadArgument;
3088
3089 do {
3090
3091 if( inputCount != method->count0)
3092 {
3093 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3094 continue;
3095 }
3096 if( *outputCount != method->count1)
3097 {
3098 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3099 continue;
3100 }
3101
3102 func = method->func;
3103
3104 switch( inputCount) {
3105
3106 case 6:
3107 err = (object->*func)( reference,
3108 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3109 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3110 break;
3111 case 5:
3112 err = (object->*func)( reference,
3113 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3114 ARG32(input[3]), ARG32(input[4]),
3115 &_output[0] );
3116 break;
3117 case 4:
3118 err = (object->*func)( reference,
3119 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3120 ARG32(input[3]),
3121 &_output[0], &_output[1] );
3122 break;
3123 case 3:
3124 err = (object->*func)( reference,
3125 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3126 &_output[0], &_output[1], &_output[2] );
3127 break;
3128 case 2:
3129 err = (object->*func)( reference,
3130 ARG32(input[0]), ARG32(input[1]),
3131 &_output[0], &_output[1], &_output[2],
3132 &_output[3] );
3133 break;
3134 case 1:
3135 err = (object->*func)( reference,
3136 ARG32(input[0]),
3137 &_output[0], &_output[1], &_output[2],
3138 &_output[3], &_output[4] );
3139 break;
3140 case 0:
3141 err = (object->*func)( reference,
3142 &_output[0], &_output[1], &_output[2],
3143 &_output[3], &_output[4], &_output[5] );
3144 break;
3145
3146 default:
3147 IOLog("%s: Bad method table\n", object->getName());
3148 }
3149 }
3150 while( false);
3151
3152 for (i = 0; i < *outputCount; i++)
3153 output[i] = SCALAR32(_output[i]);
3154
3155 return( err);
3156 }
3157
3158
3159 /* Routine io_connect_method_scalarI_structureO */
3160 kern_return_t is_io_connect_method_scalarI_structureO(
3161 io_object_t connect,
3162 uint32_t index,
3163 io_scalar_inband_t input,
3164 mach_msg_type_number_t inputCount,
3165 io_struct_inband_t output,
3166 mach_msg_type_number_t * outputCount )
3167 {
3168 uint32_t i;
3169 io_scalar_inband64_t _input;
3170
3171 mach_msg_type_number_t scalar_outputCnt = 0;
3172 mach_vm_size_t ool_output_size = 0;
3173
3174 for (i = 0; i < inputCount; i++)
3175 _input[i] = SCALAR64(input[i]);
3176
3177 return (is_io_connect_method(connect, index,
3178 _input, inputCount,
3179 NULL, 0,
3180 0, 0,
3181 NULL, &scalar_outputCnt,
3182 output, outputCount,
3183 0, &ool_output_size));
3184 }
3185
3186 kern_return_t shim_io_connect_method_scalarI_structureO(
3187
3188 IOExternalMethod * method,
3189 IOService * object,
3190 const io_user_scalar_t * input,
3191 mach_msg_type_number_t inputCount,
3192 io_struct_inband_t output,
3193 IOByteCount * outputCount )
3194 {
3195 IOMethod func;
3196 IOReturn err;
3197
3198 err = kIOReturnBadArgument;
3199
3200 do {
3201 if( inputCount != method->count0)
3202 {
3203 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3204 continue;
3205 }
3206 if( (kIOUCVariableStructureSize != method->count1)
3207 && (*outputCount != method->count1))
3208 {
3209 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3210 continue;
3211 }
3212
3213 func = method->func;
3214
3215 switch( inputCount) {
3216
3217 case 5:
3218 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3219 ARG32(input[3]), ARG32(input[4]),
3220 output );
3221 break;
3222 case 4:
3223 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3224 ARG32(input[3]),
3225 output, (void *)outputCount );
3226 break;
3227 case 3:
3228 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3229 output, (void *)outputCount, 0 );
3230 break;
3231 case 2:
3232 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3233 output, (void *)outputCount, 0, 0 );
3234 break;
3235 case 1:
3236 err = (object->*func)( ARG32(input[0]),
3237 output, (void *)outputCount, 0, 0, 0 );
3238 break;
3239 case 0:
3240 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
3241 break;
3242
3243 default:
3244 IOLog("%s: Bad method table\n", object->getName());
3245 }
3246 }
3247 while( false);
3248
3249 return( err);
3250 }
3251
3252
3253 kern_return_t shim_io_async_method_scalarI_structureO(
3254 IOExternalAsyncMethod * method,
3255 IOService * object,
3256 mach_port_t asyncWakePort,
3257 io_user_reference_t * asyncReference,
3258 uint32_t asyncReferenceCount,
3259 const io_user_scalar_t * input,
3260 mach_msg_type_number_t inputCount,
3261 io_struct_inband_t output,
3262 mach_msg_type_number_t * outputCount )
3263 {
3264 IOAsyncMethod func;
3265 uint32_t i;
3266 IOReturn err;
3267 io_async_ref_t reference;
3268
3269 for (i = 0; i < asyncReferenceCount; i++)
3270 reference[i] = REF32(asyncReference[i]);
3271
3272 err = kIOReturnBadArgument;
3273 do {
3274 if( inputCount != method->count0)
3275 {
3276 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3277 continue;
3278 }
3279 if( (kIOUCVariableStructureSize != method->count1)
3280 && (*outputCount != method->count1))
3281 {
3282 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3283 continue;
3284 }
3285
3286 func = method->func;
3287
3288 switch( inputCount) {
3289
3290 case 5:
3291 err = (object->*func)( reference,
3292 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3293 ARG32(input[3]), ARG32(input[4]),
3294 output );
3295 break;
3296 case 4:
3297 err = (object->*func)( reference,
3298 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3299 ARG32(input[3]),
3300 output, (void *)outputCount );
3301 break;
3302 case 3:
3303 err = (object->*func)( reference,
3304 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3305 output, (void *)outputCount, 0 );
3306 break;
3307 case 2:
3308 err = (object->*func)( reference,
3309 ARG32(input[0]), ARG32(input[1]),
3310 output, (void *)outputCount, 0, 0 );
3311 break;
3312 case 1:
3313 err = (object->*func)( reference,
3314 ARG32(input[0]),
3315 output, (void *)outputCount, 0, 0, 0 );
3316 break;
3317 case 0:
3318 err = (object->*func)( reference,
3319 output, (void *)outputCount, 0, 0, 0, 0 );
3320 break;
3321
3322 default:
3323 IOLog("%s: Bad method table\n", object->getName());
3324 }
3325 }
3326 while( false);
3327
3328 return( err);
3329 }
3330
3331 /* Routine io_connect_method_scalarI_structureI */
3332 kern_return_t is_io_connect_method_scalarI_structureI(
3333 io_connect_t connect,
3334 uint32_t index,
3335 io_scalar_inband_t input,
3336 mach_msg_type_number_t inputCount,
3337 io_struct_inband_t inputStruct,
3338 mach_msg_type_number_t inputStructCount )
3339 {
3340 uint32_t i;
3341 io_scalar_inband64_t _input;
3342
3343 mach_msg_type_number_t scalar_outputCnt = 0;
3344 mach_msg_type_number_t inband_outputCnt = 0;
3345 mach_vm_size_t ool_output_size = 0;
3346
3347 for (i = 0; i < inputCount; i++)
3348 _input[i] = SCALAR64(input[i]);
3349
3350 return (is_io_connect_method(connect, index,
3351 _input, inputCount,
3352 inputStruct, inputStructCount,
3353 0, 0,
3354 NULL, &scalar_outputCnt,
3355 NULL, &inband_outputCnt,
3356 0, &ool_output_size));
3357 }
3358
3359 kern_return_t shim_io_connect_method_scalarI_structureI(
3360 IOExternalMethod * method,
3361 IOService * object,
3362 const io_user_scalar_t * input,
3363 mach_msg_type_number_t inputCount,
3364 io_struct_inband_t inputStruct,
3365 mach_msg_type_number_t inputStructCount )
3366 {
3367 IOMethod func;
3368 IOReturn err = kIOReturnBadArgument;
3369
3370 do
3371 {
3372 if( (kIOUCVariableStructureSize != method->count0)
3373 && (inputCount != method->count0))
3374 {
3375 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3376 continue;
3377 }
3378 if( (kIOUCVariableStructureSize != method->count1)
3379 && (inputStructCount != method->count1))
3380 {
3381 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3382 continue;
3383 }
3384
3385 func = method->func;
3386
3387 switch( inputCount) {
3388
3389 case 5:
3390 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3391 ARG32(input[3]), ARG32(input[4]),
3392 inputStruct );
3393 break;
3394 case 4:
3395 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
3396 ARG32(input[3]),
3397 inputStruct, (void *)inputStructCount );
3398 break;
3399 case 3:
3400 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3401 inputStruct, (void *)inputStructCount,
3402 0 );
3403 break;
3404 case 2:
3405 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3406 inputStruct, (void *)inputStructCount,
3407 0, 0 );
3408 break;
3409 case 1:
3410 err = (object->*func)( ARG32(input[0]),
3411 inputStruct, (void *)inputStructCount,
3412 0, 0, 0 );
3413 break;
3414 case 0:
3415 err = (object->*func)( inputStruct, (void *)inputStructCount,
3416 0, 0, 0, 0 );
3417 break;
3418
3419 default:
3420 IOLog("%s: Bad method table\n", object->getName());
3421 }
3422 }
3423 while (false);
3424
3425 return( err);
3426 }
3427
3428 kern_return_t shim_io_async_method_scalarI_structureI(
3429 IOExternalAsyncMethod * method,
3430 IOService * object,
3431 mach_port_t asyncWakePort,
3432 io_user_reference_t * asyncReference,
3433 uint32_t asyncReferenceCount,
3434 const io_user_scalar_t * input,
3435 mach_msg_type_number_t inputCount,
3436 io_struct_inband_t inputStruct,
3437 mach_msg_type_number_t inputStructCount )
3438 {
3439 IOAsyncMethod func;
3440 uint32_t i;
3441 IOReturn err = kIOReturnBadArgument;
3442 io_async_ref_t reference;
3443
3444 for (i = 0; i < asyncReferenceCount; i++)
3445 reference[i] = REF32(asyncReference[i]);
3446
3447 do
3448 {
3449 if( (kIOUCVariableStructureSize != method->count0)
3450 && (inputCount != method->count0))
3451 {
3452 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3453 continue;
3454 }
3455 if( (kIOUCVariableStructureSize != method->count1)
3456 && (inputStructCount != method->count1))
3457 {
3458 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3459 continue;
3460 }
3461
3462 func = method->func;
3463
3464 switch( inputCount) {
3465
3466 case 5:
3467 err = (object->*func)( reference,
3468 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3469 ARG32(input[3]), ARG32(input[4]),
3470 inputStruct );
3471 break;
3472 case 4:
3473 err = (object->*func)( reference,
3474 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3475 ARG32(input[3]),
3476 inputStruct, (void *)inputStructCount );
3477 break;
3478 case 3:
3479 err = (object->*func)( reference,
3480 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3481 inputStruct, (void *)inputStructCount,
3482 0 );
3483 break;
3484 case 2:
3485 err = (object->*func)( reference,
3486 ARG32(input[0]), ARG32(input[1]),
3487 inputStruct, (void *)inputStructCount,
3488 0, 0 );
3489 break;
3490 case 1:
3491 err = (object->*func)( reference,
3492 ARG32(input[0]),
3493 inputStruct, (void *)inputStructCount,
3494 0, 0, 0 );
3495 break;
3496 case 0:
3497 err = (object->*func)( reference,
3498 inputStruct, (void *)inputStructCount,
3499 0, 0, 0, 0 );
3500 break;
3501
3502 default:
3503 IOLog("%s: Bad method table\n", object->getName());
3504 }
3505 }
3506 while (false);
3507
3508 return( err);
3509 }
3510
3511 /* Routine io_connect_method_structureI_structureO */
3512 kern_return_t is_io_connect_method_structureI_structureO(
3513 io_object_t connect,
3514 uint32_t index,
3515 io_struct_inband_t input,
3516 mach_msg_type_number_t inputCount,
3517 io_struct_inband_t output,
3518 mach_msg_type_number_t * outputCount )
3519 {
3520 mach_msg_type_number_t scalar_outputCnt = 0;
3521 mach_vm_size_t ool_output_size = 0;
3522
3523 return (is_io_connect_method(connect, index,
3524 NULL, 0,
3525 input, inputCount,
3526 0, 0,
3527 NULL, &scalar_outputCnt,
3528 output, outputCount,
3529 0, &ool_output_size));
3530 }
3531
3532 kern_return_t shim_io_connect_method_structureI_structureO(
3533 IOExternalMethod * method,
3534 IOService * object,
3535 io_struct_inband_t input,
3536 mach_msg_type_number_t inputCount,
3537 io_struct_inband_t output,
3538 IOByteCount * outputCount )
3539 {
3540 IOMethod func;
3541 IOReturn err = kIOReturnBadArgument;
3542
3543 do
3544 {
3545 if( (kIOUCVariableStructureSize != method->count0)
3546 && (inputCount != method->count0))
3547 {
3548 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3549 continue;
3550 }
3551 if( (kIOUCVariableStructureSize != method->count1)
3552 && (*outputCount != method->count1))
3553 {
3554 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3555 continue;
3556 }
3557
3558 func = method->func;
3559
3560 if( method->count1) {
3561 if( method->count0) {
3562 err = (object->*func)( input, output,
3563 (void *)inputCount, outputCount, 0, 0 );
3564 } else {
3565 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
3566 }
3567 } else {
3568 err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 );
3569 }
3570 }
3571 while( false);
3572
3573
3574 return( err);
3575 }
3576
3577 kern_return_t shim_io_async_method_structureI_structureO(
3578 IOExternalAsyncMethod * method,
3579 IOService * object,
3580 mach_port_t asyncWakePort,
3581 io_user_reference_t * asyncReference,
3582 uint32_t asyncReferenceCount,
3583 io_struct_inband_t input,
3584 mach_msg_type_number_t inputCount,
3585 io_struct_inband_t output,
3586 mach_msg_type_number_t * outputCount )
3587 {
3588 IOAsyncMethod func;
3589 uint32_t i;
3590 IOReturn err;
3591 io_async_ref_t reference;
3592
3593 for (i = 0; i < asyncReferenceCount; i++)
3594 reference[i] = REF32(asyncReference[i]);
3595
3596 err = kIOReturnBadArgument;
3597 do
3598 {
3599 if( (kIOUCVariableStructureSize != method->count0)
3600 && (inputCount != method->count0))
3601 {
3602 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3603 continue;
3604 }
3605 if( (kIOUCVariableStructureSize != method->count1)
3606 && (*outputCount != method->count1))
3607 {
3608 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3609 continue;
3610 }
3611
3612 func = method->func;
3613
3614 if( method->count1) {
3615 if( method->count0) {
3616 err = (object->*func)( reference,
3617 input, output,
3618 (void *)inputCount, outputCount, 0, 0 );
3619 } else {
3620 err = (object->*func)( reference,
3621 output, outputCount, 0, 0, 0, 0 );
3622 }
3623 } else {
3624 err = (object->*func)( reference,
3625 input, (void *)inputCount, 0, 0, 0, 0 );
3626 }
3627 }
3628 while( false);
3629
3630 return( err);
3631 }
3632
3633 /* Routine io_make_matching */
3634 kern_return_t is_io_make_matching(
3635 mach_port_t master_port,
3636 uint32_t type,
3637 uint32_t options,
3638 io_struct_inband_t input,
3639 mach_msg_type_number_t inputCount,
3640 io_string_t matching )
3641 {
3642 OSSerialize * s;
3643 IOReturn err = kIOReturnSuccess;
3644 OSDictionary * dict;
3645
3646 if( master_port != master_device_port)
3647 return( kIOReturnNotPrivileged);
3648
3649 switch( type) {
3650
3651 case kIOServiceMatching:
3652 dict = IOService::serviceMatching( gIOServiceKey );
3653 break;
3654
3655 case kIOBSDNameMatching:
3656 dict = IOBSDNameMatching( (const char *) input );
3657 break;
3658
3659 case kIOOFPathMatching:
3660 dict = IOOFPathMatching( (const char *) input,
3661 matching, sizeof( io_string_t));
3662 break;
3663
3664 default:
3665 dict = 0;
3666 }
3667
3668 if( !dict)
3669 return( kIOReturnUnsupported);
3670
3671 do {
3672 s = OSSerialize::withCapacity(4096);
3673 if( !s) {
3674 err = kIOReturnNoMemory;
3675 continue;
3676 }
3677 s->clearText();
3678 if( !dict->serialize( s )) {
3679 err = kIOReturnUnsupported;
3680 continue;
3681 }
3682
3683 if( s->getLength() > sizeof( io_string_t)) {
3684 err = kIOReturnNoMemory;
3685 continue;
3686 } else
3687 strlcpy(matching, s->text(), sizeof(io_string_t));
3688 }
3689 while( false);
3690
3691 if( s)
3692 s->release();
3693 if( dict)
3694 dict->release();
3695
3696 return( err);
3697 }
3698
3699 /* Routine io_catalog_send_data */
3700 kern_return_t is_io_catalog_send_data(
3701 mach_port_t master_port,
3702 uint32_t flag,
3703 io_buf_ptr_t inData,
3704 mach_msg_type_number_t inDataCount,
3705 kern_return_t * result)
3706 {
3707 OSObject * obj = 0;
3708 vm_offset_t data;
3709 kern_return_t kr = kIOReturnError;
3710
3711 //printf("io_catalog_send_data called. flag: %d\n", flag);
3712
3713 if( master_port != master_device_port)
3714 return kIOReturnNotPrivileged;
3715
3716 if( (flag != kIOCatalogRemoveKernelLinker &&
3717 flag != kIOCatalogKextdActive &&
3718 flag != kIOCatalogKextdFinishedLaunching) &&
3719 ( !inData || !inDataCount) )
3720 {
3721 return kIOReturnBadArgument;
3722 }
3723
3724 if (inData) {
3725 vm_map_offset_t map_data;
3726
3727 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
3728 data = CAST_DOWN(vm_offset_t, map_data);
3729
3730 if( kr != KERN_SUCCESS)
3731 return kr;
3732
3733 // must return success after vm_map_copyout() succeeds
3734
3735 if( inDataCount ) {
3736 obj = (OSObject *)OSUnserializeXML((const char *)data);
3737 vm_deallocate( kernel_map, data, inDataCount );
3738 if( !obj) {
3739 *result = kIOReturnNoMemory;
3740 return( KERN_SUCCESS);
3741 }
3742 }
3743 }
3744
3745 switch ( flag ) {
3746 case kIOCatalogAddDrivers:
3747 case kIOCatalogAddDriversNoMatch: {
3748 OSArray * array;
3749
3750 array = OSDynamicCast(OSArray, obj);
3751 if ( array ) {
3752 if ( !gIOCatalogue->addDrivers( array ,
3753 flag == kIOCatalogAddDrivers) ) {
3754 kr = kIOReturnError;
3755 }
3756 }
3757 else {
3758 kr = kIOReturnBadArgument;
3759 }
3760 }
3761 break;
3762
3763 case kIOCatalogRemoveDrivers:
3764 case kIOCatalogRemoveDriversNoMatch: {
3765 OSDictionary * dict;
3766
3767 dict = OSDynamicCast(OSDictionary, obj);
3768 if ( dict ) {
3769 if ( !gIOCatalogue->removeDrivers( dict,
3770 flag == kIOCatalogRemoveDrivers ) ) {
3771 kr = kIOReturnError;
3772 }
3773 }
3774 else {
3775 kr = kIOReturnBadArgument;
3776 }
3777 }
3778 break;
3779
3780 case kIOCatalogStartMatching: {
3781 OSDictionary * dict;
3782
3783 dict = OSDynamicCast(OSDictionary, obj);
3784 if ( dict ) {
3785 if ( !gIOCatalogue->startMatching( dict ) ) {
3786 kr = kIOReturnError;
3787 }
3788 }
3789 else {
3790 kr = kIOReturnBadArgument;
3791 }
3792 }
3793 break;
3794
3795 case kIOCatalogRemoveKernelLinker:
3796 kr = KERN_NOT_SUPPORTED;
3797 break;
3798
3799 case kIOCatalogKextdActive:
3800 #if !NO_KEXTD
3801 OSKext::setKextdActive();
3802
3803 /* Dump all nonloaded startup extensions; kextd will now send them
3804 * down on request.
3805 */
3806 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
3807 #endif
3808 kr = kIOReturnSuccess;
3809 break;
3810
3811 case kIOCatalogKextdFinishedLaunching: {
3812 #if !NO_KEXTD
3813 static bool clearedBusy = false;
3814
3815 if (!clearedBusy) {
3816 IOService * serviceRoot = IOService::getServiceRoot();
3817 if (serviceRoot) {
3818 serviceRoot->adjustBusy(-1);
3819 clearedBusy = true;
3820 }
3821 }
3822 #endif
3823 kr = kIOReturnSuccess;
3824 }
3825 break;
3826
3827 default:
3828 kr = kIOReturnBadArgument;
3829 break;
3830 }
3831
3832 if (obj) obj->release();
3833
3834 *result = kr;
3835 return( KERN_SUCCESS);
3836 }
3837
3838 /* Routine io_catalog_terminate */
3839 kern_return_t is_io_catalog_terminate(
3840 mach_port_t master_port,
3841 uint32_t flag,
3842 io_name_t name )
3843 {
3844 kern_return_t kr;
3845
3846 if( master_port != master_device_port )
3847 return kIOReturnNotPrivileged;
3848
3849 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3850 kIOClientPrivilegeAdministrator );
3851 if( kIOReturnSuccess != kr)
3852 return( kr );
3853
3854 switch ( flag ) {
3855 case kIOCatalogServiceTerminate:
3856 OSIterator * iter;
3857 IOService * service;
3858
3859 iter = IORegistryIterator::iterateOver(gIOServicePlane,
3860 kIORegistryIterateRecursively);
3861 if ( !iter )
3862 return kIOReturnNoMemory;
3863
3864 do {
3865 iter->reset();
3866 while( (service = (IOService *)iter->getNextObject()) ) {
3867 if( service->metaCast(name)) {
3868 if ( !service->terminate( kIOServiceRequired
3869 | kIOServiceSynchronous) ) {
3870 kr = kIOReturnUnsupported;
3871 break;
3872 }
3873 }
3874 }
3875 } while( !service && !iter->isValid());
3876 iter->release();
3877 break;
3878
3879 case kIOCatalogModuleUnload:
3880 case kIOCatalogModuleTerminate:
3881 kr = gIOCatalogue->terminateDriversForModule(name,
3882 flag == kIOCatalogModuleUnload);
3883 break;
3884
3885 default:
3886 kr = kIOReturnBadArgument;
3887 break;
3888 }
3889
3890 return( kr );
3891 }
3892
3893 /* Routine io_catalog_get_data */
3894 kern_return_t is_io_catalog_get_data(
3895 mach_port_t master_port,
3896 uint32_t flag,
3897 io_buf_ptr_t *outData,
3898 mach_msg_type_number_t *outDataCount)
3899 {
3900 kern_return_t kr = kIOReturnSuccess;
3901 OSSerialize * s;
3902
3903 if( master_port != master_device_port)
3904 return kIOReturnNotPrivileged;
3905
3906 //printf("io_catalog_get_data called. flag: %d\n", flag);
3907
3908 s = OSSerialize::withCapacity(4096);
3909 if ( !s )
3910 return kIOReturnNoMemory;
3911
3912 s->clearText();
3913
3914 kr = gIOCatalogue->serializeData(flag, s);
3915
3916 if ( kr == kIOReturnSuccess ) {
3917 vm_offset_t data;
3918 vm_map_copy_t copy;
3919 vm_size_t size;
3920
3921 size = s->getLength();
3922 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
3923 if ( kr == kIOReturnSuccess ) {
3924 bcopy(s->text(), (void *)data, size);
3925 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
3926 (vm_map_size_t)size, true, &copy);
3927 *outData = (char *)copy;
3928 *outDataCount = size;
3929 }
3930 }
3931
3932 s->release();
3933
3934 return kr;
3935 }
3936
3937 /* Routine io_catalog_get_gen_count */
3938 kern_return_t is_io_catalog_get_gen_count(
3939 mach_port_t master_port,
3940 uint32_t *genCount)
3941 {
3942 if( master_port != master_device_port)
3943 return kIOReturnNotPrivileged;
3944
3945 //printf("io_catalog_get_gen_count called.\n");
3946
3947 if ( !genCount )
3948 return kIOReturnBadArgument;
3949
3950 *genCount = gIOCatalogue->getGenerationCount();
3951
3952 return kIOReturnSuccess;
3953 }
3954
3955 /* Routine io_catalog_module_loaded.
3956 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
3957 */
3958 kern_return_t is_io_catalog_module_loaded(
3959 mach_port_t master_port,
3960 io_name_t name)
3961 {
3962 if( master_port != master_device_port)
3963 return kIOReturnNotPrivileged;
3964
3965 //printf("io_catalog_module_loaded called. name %s\n", name);
3966
3967 if ( !name )
3968 return kIOReturnBadArgument;
3969
3970 gIOCatalogue->moduleHasLoaded(name);
3971
3972 return kIOReturnSuccess;
3973 }
3974
3975 kern_return_t is_io_catalog_reset(
3976 mach_port_t master_port,
3977 uint32_t flag)
3978 {
3979 if( master_port != master_device_port)
3980 return kIOReturnNotPrivileged;
3981
3982 switch ( flag ) {
3983 case kIOCatalogResetDefault:
3984 gIOCatalogue->reset();
3985 break;
3986
3987 default:
3988 return kIOReturnBadArgument;
3989 }
3990
3991 return kIOReturnSuccess;
3992 }
3993
3994 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
3995 {
3996 kern_return_t result = kIOReturnBadArgument;
3997 IOUserClient *userClient;
3998
3999 if ((userClient = OSDynamicCast(IOUserClient,
4000 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4001 IOExternalTrap *trap;
4002 IOService *target = NULL;
4003
4004 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4005
4006 if (trap && target) {
4007 IOTrap func;
4008
4009 func = trap->func;
4010
4011 if (func) {
4012 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4013 }
4014 }
4015
4016 userClient->release();
4017 }
4018
4019 return result;
4020 }
4021
4022 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4023 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4024 {
4025 IOReturn err;
4026 IOService * object;
4027 IOByteCount structureOutputSize;
4028
4029 if (dispatch)
4030 {
4031 uint32_t count;
4032 count = dispatch->checkScalarInputCount;
4033 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
4034 {
4035 return (kIOReturnBadArgument);
4036 }
4037
4038 count = dispatch->checkStructureInputSize;
4039 if ((kIOUCVariableStructureSize != count)
4040 && (count != ((args->structureInputDescriptor)
4041 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
4042 {
4043 return (kIOReturnBadArgument);
4044 }
4045
4046 count = dispatch->checkScalarOutputCount;
4047 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
4048 {
4049 return (kIOReturnBadArgument);
4050 }
4051
4052 count = dispatch->checkStructureOutputSize;
4053 if ((kIOUCVariableStructureSize != count)
4054 && (count != ((args->structureOutputDescriptor)
4055 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
4056 {
4057 return (kIOReturnBadArgument);
4058 }
4059
4060 if (dispatch->function)
4061 err = (*dispatch->function)(target, reference, args);
4062 else
4063 err = kIOReturnNoCompletion; /* implementator can dispatch */
4064
4065 return (err);
4066 }
4067
4068
4069 // pre-Leopard API's don't do ool structs
4070 if (args->structureInputDescriptor || args->structureOutputDescriptor)
4071 {
4072 err = kIOReturnIPCError;
4073 return (err);
4074 }
4075
4076 structureOutputSize = args->structureOutputSize;
4077
4078 if (args->asyncWakePort)
4079 {
4080 IOExternalAsyncMethod * method;
4081
4082 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) )
4083 return (kIOReturnUnsupported);
4084
4085 switch (method->flags & kIOUCTypeMask)
4086 {
4087 case kIOUCScalarIStructI:
4088 err = shim_io_async_method_scalarI_structureI( method, object,
4089 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4090 args->scalarInput, args->scalarInputCount,
4091 (char *)args->structureInput, args->structureInputSize );
4092 break;
4093
4094 case kIOUCScalarIScalarO:
4095 err = shim_io_async_method_scalarI_scalarO( method, object,
4096 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4097 args->scalarInput, args->scalarInputCount,
4098 args->scalarOutput, &args->scalarOutputCount );
4099 break;
4100
4101 case kIOUCScalarIStructO:
4102 err = shim_io_async_method_scalarI_structureO( method, object,
4103 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4104 args->scalarInput, args->scalarInputCount,
4105 (char *) args->structureOutput, &args->structureOutputSize );
4106 break;
4107
4108
4109 case kIOUCStructIStructO:
4110 err = shim_io_async_method_structureI_structureO( method, object,
4111 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4112 (char *)args->structureInput, args->structureInputSize,
4113 (char *) args->structureOutput, &args->structureOutputSize );
4114 break;
4115
4116 default:
4117 err = kIOReturnBadArgument;
4118 break;
4119 }
4120 }
4121 else
4122 {
4123 IOExternalMethod * method;
4124
4125 if( !(method = getTargetAndMethodForIndex(&object, selector)) )
4126 return (kIOReturnUnsupported);
4127
4128 switch (method->flags & kIOUCTypeMask)
4129 {
4130 case kIOUCScalarIStructI:
4131 err = shim_io_connect_method_scalarI_structureI( method, object,
4132 args->scalarInput, args->scalarInputCount,
4133 (char *) args->structureInput, args->structureInputSize );
4134 break;
4135
4136 case kIOUCScalarIScalarO:
4137 err = shim_io_connect_method_scalarI_scalarO( method, object,
4138 args->scalarInput, args->scalarInputCount,
4139 args->scalarOutput, &args->scalarOutputCount );
4140 break;
4141
4142 case kIOUCScalarIStructO:
4143 err = shim_io_connect_method_scalarI_structureO( method, object,
4144 args->scalarInput, args->scalarInputCount,
4145 (char *) args->structureOutput, &structureOutputSize );
4146 break;
4147
4148
4149 case kIOUCStructIStructO:
4150 err = shim_io_connect_method_structureI_structureO( method, object,
4151 (char *) args->structureInput, args->structureInputSize,
4152 (char *) args->structureOutput, &structureOutputSize );
4153 break;
4154
4155 default:
4156 err = kIOReturnBadArgument;
4157 break;
4158 }
4159 }
4160
4161 args->structureOutputSize = structureOutputSize;
4162
4163 return (err);
4164 }
4165
4166
4167 }; /* extern "C" */
4168
4169 #if __LP64__
4170 OSMetaClassDefineReservedUnused(IOUserClient, 0);
4171 OSMetaClassDefineReservedUnused(IOUserClient, 1);
4172 #else
4173 OSMetaClassDefineReservedUsed(IOUserClient, 0);
4174 OSMetaClassDefineReservedUsed(IOUserClient, 1);
4175 #endif
4176 OSMetaClassDefineReservedUnused(IOUserClient, 2);
4177 OSMetaClassDefineReservedUnused(IOUserClient, 3);
4178 OSMetaClassDefineReservedUnused(IOUserClient, 4);
4179 OSMetaClassDefineReservedUnused(IOUserClient, 5);
4180 OSMetaClassDefineReservedUnused(IOUserClient, 6);
4181 OSMetaClassDefineReservedUnused(IOUserClient, 7);
4182 OSMetaClassDefineReservedUnused(IOUserClient, 8);
4183 OSMetaClassDefineReservedUnused(IOUserClient, 9);
4184 OSMetaClassDefineReservedUnused(IOUserClient, 10);
4185 OSMetaClassDefineReservedUnused(IOUserClient, 11);
4186 OSMetaClassDefineReservedUnused(IOUserClient, 12);
4187 OSMetaClassDefineReservedUnused(IOUserClient, 13);
4188 OSMetaClassDefineReservedUnused(IOUserClient, 14);
4189 OSMetaClassDefineReservedUnused(IOUserClient, 15);
4190