]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
6c9ec5df7e1248ae9717c5862c8e2f65f0381302
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #if CONFIG_MACF
49
50 extern "C" {
51 #include <security/mac_framework.h>
52 };
53 #include <sys/kauth.h>
54
55 #define IOMACF_LOG 0
56
57 #endif /* CONFIG_MACF */
58
59 #include <IOKit/assert.h>
60
61 #include "IOServicePrivate.h"
62 #include "IOKitKernelInternal.h"
63
64 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
65 #define SCALAR32(x) ((uint32_t )x)
66 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
67 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
68 #define REF32(x) ((int)(x))
69
70 enum
71 {
72 kIOUCAsync0Flags = 3ULL,
73 kIOUCAsync64Flag = 1ULL
74 };
75
76 #if IOKITSTATS
77
78 #define IOStatisticsRegisterCounter() \
79 do { \
80 reserved->counter = IOStatistics::registerUserClient(this); \
81 } while (0)
82
83 #define IOStatisticsUnregisterCounter() \
84 do { \
85 if (reserved) \
86 IOStatistics::unregisterUserClient(reserved->counter); \
87 } while (0)
88
89 #define IOStatisticsClientCall() \
90 do { \
91 IOStatistics::countUserClientCall(client); \
92 } while (0)
93
94 #else
95
96 #define IOStatisticsRegisterCounter()
97 #define IOStatisticsUnregisterCounter()
98 #define IOStatisticsClientCall()
99
100 #endif /* IOKITSTATS */
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 // definitions we should get from osfmk
105
106 //typedef struct ipc_port * ipc_port_t;
107 typedef natural_t ipc_kobject_type_t;
108
109 #define IKOT_IOKIT_SPARE 27
110 #define IKOT_IOKIT_CONNECT 29
111 #define IKOT_IOKIT_OBJECT 30
112
113 extern "C" {
114
115 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
116 ipc_kobject_type_t type );
117
118 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
119
120 extern mach_port_name_t iokit_make_send_right( task_t task,
121 io_object_t obj, ipc_kobject_type_t type );
122
123 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
124
125 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
126
127 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
128
129 extern ipc_port_t master_device_port;
130
131 extern void iokit_retain_port( ipc_port_t port );
132 extern void iokit_release_port( ipc_port_t port );
133 extern void iokit_release_port_send( ipc_port_t port );
134
135 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
136
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139
140 } /* extern "C" */
141
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146
147 class IOMachPort : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort)
150 public:
151 OSObject * object;
152 ipc_port_t port;
153 UInt32 mscount;
154 UInt8 holdDestroy;
155
156 static IOMachPort * portForObject( OSObject * obj,
157 ipc_kobject_type_t type );
158 static bool noMoreSendersForObject( OSObject * obj,
159 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
160 static void releasePortForObject( OSObject * obj,
161 ipc_kobject_type_t type );
162 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
163
164 static OSDictionary * dictForType( ipc_kobject_type_t type );
165
166 static mach_port_name_t makeSendRightForTask( task_t task,
167 io_object_t obj, ipc_kobject_type_t type );
168
169 virtual void free() APPLE_KEXT_OVERRIDE;
170 };
171
172 #define super OSObject
173 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
174
175 static IOLock * gIOObjectPortLock;
176
177 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
178
179 // not in dictForType() for debugging ease
180 static OSDictionary * gIOObjectPorts;
181 static OSDictionary * gIOConnectPorts;
182
183 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
184 {
185 OSDictionary ** dict;
186
187 if( IKOT_IOKIT_OBJECT == type )
188 dict = &gIOObjectPorts;
189 else if( IKOT_IOKIT_CONNECT == type )
190 dict = &gIOConnectPorts;
191 else
192 return( 0 );
193
194 if( 0 == *dict)
195 *dict = OSDictionary::withCapacity( 1 );
196
197 return( *dict );
198 }
199
200 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
201 ipc_kobject_type_t type )
202 {
203 IOMachPort * inst = 0;
204 OSDictionary * dict;
205
206 IOTakeLock( gIOObjectPortLock);
207
208 do {
209
210 dict = dictForType( type );
211 if( !dict)
212 continue;
213
214 if( (inst = (IOMachPort *)
215 dict->getObject( (const OSSymbol *) obj ))) {
216 inst->mscount++;
217 inst->retain();
218 continue;
219 }
220
221 inst = new IOMachPort;
222 if( inst && !inst->init()) {
223 inst = 0;
224 continue;
225 }
226
227 inst->port = iokit_alloc_object_port( obj, type );
228 if( inst->port) {
229 // retains obj
230 dict->setObject( (const OSSymbol *) obj, inst );
231 inst->mscount++;
232
233 } else {
234 inst->release();
235 inst = 0;
236 }
237
238 } while( false );
239
240 IOUnlock( gIOObjectPortLock);
241
242 return( inst );
243 }
244
245 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 OSDictionary * dict;
249 IOMachPort * machPort;
250 bool destroyed = true;
251
252 IOTakeLock( gIOObjectPortLock);
253
254 if( (dict = dictForType( type ))) {
255 obj->retain();
256
257 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
258 if( machPort) {
259 destroyed = (machPort->mscount <= *mscount);
260 if( destroyed)
261 dict->removeObject( (const OSSymbol *) obj );
262 else
263 *mscount = machPort->mscount;
264 }
265 obj->release();
266 }
267
268 IOUnlock( gIOObjectPortLock);
269
270 return( destroyed );
271 }
272
273 void IOMachPort::releasePortForObject( OSObject * obj,
274 ipc_kobject_type_t type )
275 {
276 OSDictionary * dict;
277 IOMachPort * machPort;
278
279 IOTakeLock( gIOObjectPortLock);
280
281 if( (dict = dictForType( type ))) {
282 obj->retain();
283 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
284 if( machPort && !machPort->holdDestroy)
285 dict->removeObject( (const OSSymbol *) obj );
286 obj->release();
287 }
288
289 IOUnlock( gIOObjectPortLock);
290 }
291
292 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
293 {
294 OSDictionary * dict;
295 IOMachPort * machPort;
296
297 IOLockLock( gIOObjectPortLock );
298
299 if( (dict = dictForType( type ))) {
300 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
301 if( machPort)
302 machPort->holdDestroy = true;
303 }
304
305 IOLockUnlock( gIOObjectPortLock );
306 }
307
308 void IOUserClient::destroyUserReferences( OSObject * obj )
309 {
310 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
311
312 // panther, 3160200
313 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
314
315 OSDictionary * dict;
316
317 IOTakeLock( gIOObjectPortLock);
318 obj->retain();
319
320 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
321 {
322 IOMachPort * port;
323 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
324 if (port)
325 {
326 IOUserClient * uc;
327 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
328 {
329 dict->setObject((const OSSymbol *) uc->mappings, port);
330 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
331
332 uc->mappings->release();
333 uc->mappings = 0;
334 }
335 dict->removeObject( (const OSSymbol *) obj );
336 }
337 }
338 obj->release();
339 IOUnlock( gIOObjectPortLock);
340 }
341
342 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
343 io_object_t obj, ipc_kobject_type_t type )
344 {
345 return( iokit_make_send_right( task, obj, type ));
346 }
347
348 void IOMachPort::free( void )
349 {
350 if( port)
351 iokit_destroy_object_port( port );
352 super::free();
353 }
354
355 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
356
357 class IOUserNotification : public OSIterator
358 {
359 OSDeclareDefaultStructors(IOUserNotification)
360
361 IONotifier * holdNotify;
362 IOLock * lock;
363
364 public:
365
366 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
367 virtual void free() APPLE_KEXT_OVERRIDE;
368
369 virtual void setNotification( IONotifier * obj );
370
371 virtual void reset() APPLE_KEXT_OVERRIDE;
372 virtual bool isValid() APPLE_KEXT_OVERRIDE;
373 };
374
375 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
376 extern "C" {
377
378 // functions called from osfmk/device/iokit_rpc.c
379
380 void
381 iokit_add_reference( io_object_t obj )
382 {
383 if( obj)
384 obj->retain();
385 }
386
387 void
388 iokit_remove_reference( io_object_t obj )
389 {
390 if( obj)
391 obj->release();
392 }
393
394 ipc_port_t
395 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
396 {
397 IOMachPort * machPort;
398 ipc_port_t port;
399
400 if( (machPort = IOMachPort::portForObject( obj, type ))) {
401
402 port = machPort->port;
403 if( port)
404 iokit_retain_port( port );
405
406 machPort->release();
407
408 } else
409 port = NULL;
410
411 return( port );
412 }
413
414 kern_return_t
415 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
416 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
417 {
418 IOUserClient * client;
419 IOMemoryMap * map;
420 IOUserNotification * notify;
421
422 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
423 return( kIOReturnNotReady );
424
425 if( IKOT_IOKIT_CONNECT == type)
426 {
427 if( (client = OSDynamicCast( IOUserClient, obj ))) {
428 IOStatisticsClientCall();
429 client->clientDied();
430 }
431 }
432 else if( IKOT_IOKIT_OBJECT == type)
433 {
434 if( (map = OSDynamicCast( IOMemoryMap, obj )))
435 map->taskDied();
436 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
437 notify->setNotification( 0 );
438 }
439
440 return( kIOReturnSuccess );
441 }
442
443 }; /* extern "C" */
444
445 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
446
447 class IOServiceUserNotification : public IOUserNotification
448 {
449 OSDeclareDefaultStructors(IOServiceUserNotification)
450
451 struct PingMsg {
452 mach_msg_header_t msgHdr;
453 OSNotificationHeader64 notifyHeader;
454 };
455
456 enum { kMaxOutstanding = 1024 };
457
458 PingMsg * pingMsg;
459 vm_size_t msgSize;
460 OSArray * newSet;
461 OSObject * lastEntry;
462 bool armed;
463
464 public:
465
466 virtual bool init( mach_port_t port, natural_t type,
467 void * reference, vm_size_t referenceSize,
468 bool clientIs64 );
469 virtual void free() APPLE_KEXT_OVERRIDE;
470
471 static bool _handler( void * target,
472 void * ref, IOService * newService, IONotifier * notifier );
473 virtual bool handler( void * ref, IOService * newService );
474
475 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
476 };
477
478 class IOServiceMessageUserNotification : public IOUserNotification
479 {
480 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
481
482 struct PingMsg {
483 mach_msg_header_t msgHdr;
484 mach_msg_body_t msgBody;
485 mach_msg_port_descriptor_t ports[1];
486 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
487 };
488
489 PingMsg * pingMsg;
490 vm_size_t msgSize;
491 uint8_t clientIs64;
492 int owningPID;
493
494 public:
495
496 virtual bool init( mach_port_t port, natural_t type,
497 void * reference, vm_size_t referenceSize,
498 vm_size_t extraSize,
499 bool clientIs64 );
500
501 virtual void free() APPLE_KEXT_OVERRIDE;
502
503 static IOReturn _handler( void * target, void * ref,
504 UInt32 messageType, IOService * provider,
505 void * messageArgument, vm_size_t argSize );
506 virtual IOReturn handler( void * ref,
507 UInt32 messageType, IOService * provider,
508 void * messageArgument, vm_size_t argSize );
509
510 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
511 };
512
513 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
514
515 #undef super
516 #define super OSIterator
517 OSDefineMetaClass( IOUserNotification, OSIterator )
518 OSDefineAbstractStructors( IOUserNotification, OSIterator )
519
520 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
521
522 bool IOUserNotification::init( void )
523 {
524 if( !super::init())
525 return( false );
526
527 lock = IOLockAlloc();
528 if( !lock)
529 return( false );
530
531 return( true );
532 }
533
534 void IOUserNotification::free( void )
535 {
536 if( holdNotify)
537 holdNotify->remove();
538 // can't be in handler now
539
540 if( lock)
541 IOLockFree( lock );
542
543 super::free();
544 }
545
546
547 void IOUserNotification::setNotification( IONotifier * notify )
548 {
549 IONotifier * previousNotify;
550
551 IOLockLock( gIOObjectPortLock);
552
553 previousNotify = holdNotify;
554 holdNotify = notify;
555
556 IOLockUnlock( gIOObjectPortLock);
557
558 if( previousNotify)
559 previousNotify->remove();
560 }
561
562 void IOUserNotification::reset()
563 {
564 // ?
565 }
566
567 bool IOUserNotification::isValid()
568 {
569 return( true );
570 }
571
572 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
573
574 #undef super
575 #define super IOUserNotification
576 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
577
578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
579
580 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
581 void * reference, vm_size_t referenceSize,
582 bool clientIs64 )
583 {
584 if( !super::init())
585 return( false );
586
587 newSet = OSArray::withCapacity( 1 );
588 if( !newSet)
589 return( false );
590
591 if (referenceSize > sizeof(OSAsyncReference64))
592 return( false );
593
594 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
595 pingMsg = (PingMsg *) IOMalloc( msgSize);
596 if( !pingMsg)
597 return( false );
598
599 bzero( pingMsg, msgSize);
600
601 pingMsg->msgHdr.msgh_remote_port = port;
602 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
603 MACH_MSG_TYPE_COPY_SEND /*remote*/,
604 MACH_MSG_TYPE_MAKE_SEND /*local*/);
605 pingMsg->msgHdr.msgh_size = msgSize;
606 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
607
608 pingMsg->notifyHeader.size = 0;
609 pingMsg->notifyHeader.type = type;
610 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
611
612 return( true );
613 }
614
615 void IOServiceUserNotification::free( void )
616 {
617 PingMsg * _pingMsg;
618 vm_size_t _msgSize;
619 OSArray * _newSet;
620 OSObject * _lastEntry;
621
622 _pingMsg = pingMsg;
623 _msgSize = msgSize;
624 _lastEntry = lastEntry;
625 _newSet = newSet;
626
627 super::free();
628
629 if( _pingMsg && _msgSize) {
630 if (_pingMsg->msgHdr.msgh_remote_port) {
631 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
632 }
633 IOFree(_pingMsg, _msgSize);
634 }
635
636 if( _lastEntry)
637 _lastEntry->release();
638
639 if( _newSet)
640 _newSet->release();
641 }
642
643 bool IOServiceUserNotification::_handler( void * target,
644 void * ref, IOService * newService, IONotifier * notifier )
645 {
646 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
647 }
648
649 bool IOServiceUserNotification::handler( void * ref,
650 IOService * newService )
651 {
652 unsigned int count;
653 kern_return_t kr;
654 ipc_port_t port = NULL;
655 bool sendPing = false;
656
657 IOTakeLock( lock );
658
659 count = newSet->getCount();
660 if( count < kMaxOutstanding) {
661
662 newSet->setObject( newService );
663 if( (sendPing = (armed && (0 == count))))
664 armed = false;
665 }
666
667 IOUnlock( lock );
668
669 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
670 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
671
672 if( sendPing) {
673 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
674 pingMsg->msgHdr.msgh_local_port = port;
675 else
676 pingMsg->msgHdr.msgh_local_port = NULL;
677
678 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
679 pingMsg->msgHdr.msgh_size,
680 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
681 0);
682 if( port)
683 iokit_release_port( port );
684
685 if( KERN_SUCCESS != kr)
686 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
687 }
688
689 return( true );
690 }
691
692 OSObject * IOServiceUserNotification::getNextObject()
693 {
694 unsigned int count;
695 OSObject * result;
696
697 IOTakeLock( lock );
698
699 if( lastEntry)
700 lastEntry->release();
701
702 count = newSet->getCount();
703 if( count ) {
704 result = newSet->getObject( count - 1 );
705 result->retain();
706 newSet->removeObject( count - 1);
707 } else {
708 result = 0;
709 armed = true;
710 }
711 lastEntry = result;
712
713 IOUnlock( lock );
714
715 return( result );
716 }
717
718 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
719
720 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
721
722 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
723
724 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
725 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
726 bool client64 )
727 {
728 if( !super::init())
729 return( false );
730
731 if (referenceSize > sizeof(OSAsyncReference64))
732 return( false );
733
734 clientIs64 = client64;
735
736 owningPID = proc_selfpid();
737
738 extraSize += sizeof(IOServiceInterestContent64);
739 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
740 pingMsg = (PingMsg *) IOMalloc( msgSize);
741 if( !pingMsg)
742 return( false );
743
744 bzero( pingMsg, msgSize);
745
746 pingMsg->msgHdr.msgh_remote_port = port;
747 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
748 | MACH_MSGH_BITS(
749 MACH_MSG_TYPE_COPY_SEND /*remote*/,
750 MACH_MSG_TYPE_MAKE_SEND /*local*/);
751 pingMsg->msgHdr.msgh_size = msgSize;
752 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
753
754 pingMsg->msgBody.msgh_descriptor_count = 1;
755
756 pingMsg->ports[0].name = 0;
757 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
758 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
759
760 pingMsg->notifyHeader.size = extraSize;
761 pingMsg->notifyHeader.type = type;
762 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
763
764 return( true );
765 }
766
767 void IOServiceMessageUserNotification::free( void )
768 {
769 PingMsg * _pingMsg;
770 vm_size_t _msgSize;
771
772 _pingMsg = pingMsg;
773 _msgSize = msgSize;
774
775 super::free();
776
777 if( _pingMsg && _msgSize) {
778 if (_pingMsg->msgHdr.msgh_remote_port) {
779 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
780 }
781 IOFree( _pingMsg, _msgSize);
782 }
783 }
784
785 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
786 UInt32 messageType, IOService * provider,
787 void * argument, vm_size_t argSize )
788 {
789 return( ((IOServiceMessageUserNotification *) target)->handler(
790 ref, messageType, provider, argument, argSize));
791 }
792
793 IOReturn IOServiceMessageUserNotification::handler( void * ref,
794 UInt32 messageType, IOService * provider,
795 void * messageArgument, vm_size_t argSize )
796 {
797 kern_return_t kr;
798 ipc_port_t thisPort, providerPort;
799 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
800 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
801 // == pingMsg->notifyHeader.content;
802
803 if (kIOMessageCopyClientID == messageType)
804 {
805 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
806 return (kIOReturnSuccess);
807 }
808
809 data->messageType = messageType;
810
811 if( argSize == 0)
812 {
813 data->messageArgument[0] = (io_user_reference_t) messageArgument;
814 if (clientIs64)
815 argSize = sizeof(data->messageArgument[0]);
816 else
817 {
818 data->messageArgument[0] |= (data->messageArgument[0] << 32);
819 argSize = sizeof(uint32_t);
820 }
821 }
822 else
823 {
824 if( argSize > kIOUserNotifyMaxMessageSize)
825 argSize = kIOUserNotifyMaxMessageSize;
826 bcopy( messageArgument, data->messageArgument, argSize );
827 }
828
829 // adjust message size for ipc restrictions
830 natural_t type;
831 type = pingMsg->notifyHeader.type;
832 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
833 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
834 pingMsg->notifyHeader.type = type;
835 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
836
837 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
838 + sizeof( IOServiceInterestContent64 )
839 - sizeof( data->messageArgument)
840 + argSize;
841
842 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
843 pingMsg->ports[0].name = providerPort;
844 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
845 pingMsg->msgHdr.msgh_local_port = thisPort;
846 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
847 pingMsg->msgHdr.msgh_size,
848 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
849 0);
850 if( thisPort)
851 iokit_release_port( thisPort );
852 if( providerPort)
853 iokit_release_port( providerPort );
854
855 if( KERN_SUCCESS != kr)
856 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
857
858 return( kIOReturnSuccess );
859 }
860
861 OSObject * IOServiceMessageUserNotification::getNextObject()
862 {
863 return( 0 );
864 }
865
866 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
867
868 #undef super
869 #define super IOService
870 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
871
872 void IOUserClient::initialize( void )
873 {
874 gIOObjectPortLock = IOLockAlloc();
875
876 assert( gIOObjectPortLock );
877 }
878
879 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
880 mach_port_t wakePort,
881 void *callback, void *refcon)
882 {
883 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
884 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
885 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
886 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
887 }
888
889 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
890 mach_port_t wakePort,
891 mach_vm_address_t callback, io_user_reference_t refcon)
892 {
893 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
894 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
895 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
896 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
897 }
898
899 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
900 mach_port_t wakePort,
901 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
902 {
903 setAsyncReference64(asyncRef, wakePort, callback, refcon);
904 if (vm_map_is_64bit(get_task_map(task))) {
905 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
906 }
907 }
908
909 static OSDictionary * CopyConsoleUser(UInt32 uid)
910 {
911 OSArray * array;
912 OSDictionary * user = 0;
913
914 if ((array = OSDynamicCast(OSArray,
915 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
916 {
917 for (unsigned int idx = 0;
918 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
919 idx++) {
920 OSNumber * num;
921
922 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
923 && (uid == num->unsigned32BitValue())) {
924 user->retain();
925 break;
926 }
927 }
928 array->release();
929 }
930 return user;
931 }
932
933 static OSDictionary * CopyUserOnConsole(void)
934 {
935 OSArray * array;
936 OSDictionary * user = 0;
937
938 if ((array = OSDynamicCast(OSArray,
939 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
940 {
941 for (unsigned int idx = 0;
942 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
943 idx++)
944 {
945 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
946 {
947 user->retain();
948 break;
949 }
950 }
951 array->release();
952 }
953 return (user);
954 }
955
956 IOReturn IOUserClient::clientHasAuthorization( task_t task,
957 IOService * service )
958 {
959 proc_t p;
960
961 p = (proc_t) get_bsdtask_info(task);
962 if (p)
963 {
964 uint64_t authorizationID;
965
966 authorizationID = proc_uniqueid(p);
967 if (authorizationID)
968 {
969 if (service->getAuthorizationID() == authorizationID)
970 {
971 return (kIOReturnSuccess);
972 }
973 }
974 }
975
976 return (kIOReturnNotPermitted);
977 }
978
979 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
980 const char * privilegeName )
981 {
982 kern_return_t kr;
983 security_token_t token;
984 mach_msg_type_number_t count;
985 task_t task;
986 OSDictionary * user;
987 bool secureConsole;
988
989
990 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
991 sizeof(kIOClientPrivilegeForeground)))
992 {
993 if (task_is_gpu_denied(current_task()))
994 return (kIOReturnNotPrivileged);
995 else
996 return (kIOReturnSuccess);
997 }
998
999 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1000 sizeof(kIOClientPrivilegeConsoleSession)))
1001 {
1002 kauth_cred_t cred;
1003 proc_t p;
1004
1005 task = (task_t) securityToken;
1006 if (!task)
1007 task = current_task();
1008 p = (proc_t) get_bsdtask_info(task);
1009 kr = kIOReturnNotPrivileged;
1010
1011 if (p && (cred = kauth_cred_proc_ref(p)))
1012 {
1013 user = CopyUserOnConsole();
1014 if (user)
1015 {
1016 OSNumber * num;
1017 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1018 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1019 {
1020 kr = kIOReturnSuccess;
1021 }
1022 user->release();
1023 }
1024 kauth_cred_unref(&cred);
1025 }
1026 return (kr);
1027 }
1028
1029 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1030 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1031 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1032 else
1033 task = (task_t)securityToken;
1034
1035 count = TASK_SECURITY_TOKEN_COUNT;
1036 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1037
1038 if (KERN_SUCCESS != kr)
1039 {}
1040 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1041 sizeof(kIOClientPrivilegeAdministrator))) {
1042 if (0 != token.val[0])
1043 kr = kIOReturnNotPrivileged;
1044 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1045 sizeof(kIOClientPrivilegeLocalUser))) {
1046 user = CopyConsoleUser(token.val[0]);
1047 if ( user )
1048 user->release();
1049 else
1050 kr = kIOReturnNotPrivileged;
1051 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1052 sizeof(kIOClientPrivilegeConsoleUser))) {
1053 user = CopyConsoleUser(token.val[0]);
1054 if ( user ) {
1055 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1056 kr = kIOReturnNotPrivileged;
1057 else if ( secureConsole ) {
1058 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1059 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1060 kr = kIOReturnNotPrivileged;
1061 }
1062 user->release();
1063 }
1064 else
1065 kr = kIOReturnNotPrivileged;
1066 } else
1067 kr = kIOReturnUnsupported;
1068
1069 return (kr);
1070 }
1071
1072 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1073 const char * entitlement )
1074 {
1075 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1076
1077 proc_t p = NULL;
1078 pid_t pid = 0;
1079 char procname[MAXCOMLEN + 1] = "";
1080 size_t len = 0;
1081 void *entitlements_blob = NULL;
1082 char *entitlements_data = NULL;
1083 OSObject *entitlements_obj = NULL;
1084 OSDictionary *entitlements = NULL;
1085 OSString *errorString = NULL;
1086 OSObject *value = NULL;
1087
1088 p = (proc_t)get_bsdtask_info(task);
1089 if (p == NULL)
1090 goto fail;
1091 pid = proc_pid(p);
1092 proc_name(pid, procname, (int)sizeof(procname));
1093
1094 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1095 goto fail;
1096
1097 if (len <= offsetof(CS_GenericBlob, data))
1098 goto fail;
1099
1100 /*
1101 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1102 * we'll try to parse in the kernel.
1103 */
1104 len -= offsetof(CS_GenericBlob, data);
1105 if (len > MAX_ENTITLEMENTS_LEN) {
1106 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1107 goto fail;
1108 }
1109
1110 /*
1111 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1112 * what is stored in the entitlements blob. Copy the string and
1113 * terminate it.
1114 */
1115 entitlements_data = (char *)IOMalloc(len + 1);
1116 if (entitlements_data == NULL)
1117 goto fail;
1118 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1119 entitlements_data[len] = '\0';
1120
1121 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1122 if (errorString != NULL) {
1123 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1124 goto fail;
1125 }
1126 if (entitlements_obj == NULL)
1127 goto fail;
1128
1129 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1130 if (entitlements == NULL)
1131 goto fail;
1132
1133 /* Fetch the entitlement value from the dictionary. */
1134 value = entitlements->getObject(entitlement);
1135 if (value != NULL)
1136 value->retain();
1137
1138 fail:
1139 if (entitlements_data != NULL)
1140 IOFree(entitlements_data, len + 1);
1141 if (entitlements_obj != NULL)
1142 entitlements_obj->release();
1143 if (errorString != NULL)
1144 errorString->release();
1145 return value;
1146 }
1147
1148 bool IOUserClient::init()
1149 {
1150 if (getPropertyTable() || super::init())
1151 return reserve();
1152
1153 return false;
1154 }
1155
1156 bool IOUserClient::init(OSDictionary * dictionary)
1157 {
1158 if (getPropertyTable() || super::init(dictionary))
1159 return reserve();
1160
1161 return false;
1162 }
1163
1164 bool IOUserClient::initWithTask(task_t owningTask,
1165 void * securityID,
1166 UInt32 type )
1167 {
1168 if (getPropertyTable() || super::init())
1169 return reserve();
1170
1171 return false;
1172 }
1173
1174 bool IOUserClient::initWithTask(task_t owningTask,
1175 void * securityID,
1176 UInt32 type,
1177 OSDictionary * properties )
1178 {
1179 bool ok;
1180
1181 ok = super::init( properties );
1182 ok &= initWithTask( owningTask, securityID, type );
1183
1184 return( ok );
1185 }
1186
1187 bool IOUserClient::reserve()
1188 {
1189 if(!reserved) {
1190 reserved = IONew(ExpansionData, 1);
1191 if (!reserved) {
1192 return false;
1193 }
1194 }
1195 setTerminateDefer(NULL, true);
1196 IOStatisticsRegisterCounter();
1197
1198 return true;
1199 }
1200
1201 void IOUserClient::free()
1202 {
1203 if( mappings)
1204 mappings->release();
1205
1206 IOStatisticsUnregisterCounter();
1207
1208 if (reserved)
1209 IODelete(reserved, ExpansionData, 1);
1210
1211 super::free();
1212 }
1213
1214 IOReturn IOUserClient::clientDied( void )
1215 {
1216 return( clientClose());
1217 }
1218
1219 IOReturn IOUserClient::clientClose( void )
1220 {
1221 return( kIOReturnUnsupported );
1222 }
1223
1224 IOService * IOUserClient::getService( void )
1225 {
1226 return( 0 );
1227 }
1228
1229 IOReturn IOUserClient::registerNotificationPort(
1230 mach_port_t /* port */,
1231 UInt32 /* type */,
1232 UInt32 /* refCon */)
1233 {
1234 return( kIOReturnUnsupported);
1235 }
1236
1237 IOReturn IOUserClient::registerNotificationPort(
1238 mach_port_t port,
1239 UInt32 type,
1240 io_user_reference_t refCon)
1241 {
1242 return (registerNotificationPort(port, type, (UInt32) refCon));
1243 }
1244
1245 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1246 semaphore_t * semaphore )
1247 {
1248 return( kIOReturnUnsupported);
1249 }
1250
1251 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1252 {
1253 return( kIOReturnUnsupported);
1254 }
1255
1256 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1257 IOOptionBits * options,
1258 IOMemoryDescriptor ** memory )
1259 {
1260 return( kIOReturnUnsupported);
1261 }
1262
1263 #if !__LP64__
1264 IOMemoryMap * IOUserClient::mapClientMemory(
1265 IOOptionBits type,
1266 task_t task,
1267 IOOptionBits mapFlags,
1268 IOVirtualAddress atAddress )
1269 {
1270 return (NULL);
1271 }
1272 #endif
1273
1274 IOMemoryMap * IOUserClient::mapClientMemory64(
1275 IOOptionBits type,
1276 task_t task,
1277 IOOptionBits mapFlags,
1278 mach_vm_address_t atAddress )
1279 {
1280 IOReturn err;
1281 IOOptionBits options = 0;
1282 IOMemoryDescriptor * memory;
1283 IOMemoryMap * map = 0;
1284
1285 err = clientMemoryForType( (UInt32) type, &options, &memory );
1286
1287 if( memory && (kIOReturnSuccess == err)) {
1288
1289 options = (options & ~kIOMapUserOptionsMask)
1290 | (mapFlags & kIOMapUserOptionsMask);
1291 map = memory->createMappingInTask( task, atAddress, options );
1292 memory->release();
1293 }
1294
1295 return( map );
1296 }
1297
1298 IOReturn IOUserClient::exportObjectToClient(task_t task,
1299 OSObject *obj, io_object_t *clientObj)
1300 {
1301 mach_port_name_t name;
1302
1303 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1304
1305 *(mach_port_name_t *)clientObj = name;
1306 return kIOReturnSuccess;
1307 }
1308
1309 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1310 {
1311 return( 0 );
1312 }
1313
1314 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1315 {
1316 return( 0 );
1317 }
1318
1319 IOExternalMethod * IOUserClient::
1320 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1321 {
1322 IOExternalMethod *method = getExternalMethodForIndex(index);
1323
1324 if (method)
1325 *targetP = (IOService *) method->object;
1326
1327 return method;
1328 }
1329
1330 IOExternalAsyncMethod * IOUserClient::
1331 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1332 {
1333 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1334
1335 if (method)
1336 *targetP = (IOService *) method->object;
1337
1338 return method;
1339 }
1340
1341 IOExternalTrap * IOUserClient::
1342 getExternalTrapForIndex(UInt32 index)
1343 {
1344 return NULL;
1345 }
1346
1347 IOExternalTrap * IOUserClient::
1348 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1349 {
1350 IOExternalTrap *trap = getExternalTrapForIndex(index);
1351
1352 if (trap) {
1353 *targetP = trap->object;
1354 }
1355
1356 return trap;
1357 }
1358
1359 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1360 {
1361 mach_port_t port;
1362 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1363
1364 if (MACH_PORT_NULL != port)
1365 iokit_release_port_send(port);
1366
1367 return (kIOReturnSuccess);
1368 }
1369
1370 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1371 {
1372 if (MACH_PORT_NULL != port)
1373 iokit_release_port_send(port);
1374
1375 return (kIOReturnSuccess);
1376 }
1377
1378 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1379 IOReturn result, void *args[], UInt32 numArgs)
1380 {
1381 OSAsyncReference64 reference64;
1382 io_user_reference_t args64[kMaxAsyncArgs];
1383 unsigned int idx;
1384
1385 if (numArgs > kMaxAsyncArgs)
1386 return kIOReturnMessageTooLarge;
1387
1388 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1389 reference64[idx] = REF64(reference[idx]);
1390
1391 for (idx = 0; idx < numArgs; idx++)
1392 args64[idx] = REF64(args[idx]);
1393
1394 return (sendAsyncResult64(reference64, result, args64, numArgs));
1395 }
1396
1397 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1398 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1399 {
1400 return _sendAsyncResult64(reference, result, args, numArgs, options);
1401 }
1402
1403 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1404 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1405 {
1406 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1407 }
1408
1409 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1410 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1411 {
1412 struct ReplyMsg
1413 {
1414 mach_msg_header_t msgHdr;
1415 union
1416 {
1417 struct
1418 {
1419 OSNotificationHeader notifyHdr;
1420 IOAsyncCompletionContent asyncContent;
1421 uint32_t args[kMaxAsyncArgs];
1422 } msg32;
1423 struct
1424 {
1425 OSNotificationHeader64 notifyHdr;
1426 IOAsyncCompletionContent asyncContent;
1427 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1428 } msg64;
1429 } m;
1430 };
1431 ReplyMsg replyMsg;
1432 mach_port_t replyPort;
1433 kern_return_t kr;
1434
1435 // If no reply port, do nothing.
1436 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1437 if (replyPort == MACH_PORT_NULL)
1438 return kIOReturnSuccess;
1439
1440 if (numArgs > kMaxAsyncArgs)
1441 return kIOReturnMessageTooLarge;
1442
1443 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1444 0 /*local*/);
1445 replyMsg.msgHdr.msgh_remote_port = replyPort;
1446 replyMsg.msgHdr.msgh_local_port = 0;
1447 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1448 if (kIOUCAsync64Flag & reference[0])
1449 {
1450 replyMsg.msgHdr.msgh_size =
1451 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1452 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1453 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1454 + numArgs * sizeof(io_user_reference_t);
1455 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1456 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1457
1458 replyMsg.m.msg64.asyncContent.result = result;
1459 if (numArgs)
1460 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1461 }
1462 else
1463 {
1464 unsigned int idx;
1465
1466 replyMsg.msgHdr.msgh_size =
1467 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1468 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1469
1470 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1471 + numArgs * sizeof(uint32_t);
1472 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1473
1474 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1475 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1476
1477 replyMsg.m.msg32.asyncContent.result = result;
1478
1479 for (idx = 0; idx < numArgs; idx++)
1480 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1481 }
1482
1483 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1484 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1485 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1486 } else {
1487 /* Fail on full queue. */
1488 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1489 replyMsg.msgHdr.msgh_size);
1490 }
1491 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr))
1492 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1493 return kr;
1494 }
1495
1496
1497 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1498
1499 extern "C" {
1500
1501 #define CHECK(cls,obj,out) \
1502 cls * out; \
1503 if( !(out = OSDynamicCast( cls, obj))) \
1504 return( kIOReturnBadArgument )
1505
1506 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1507
1508 // Create a vm_map_copy_t or kalloc'ed data for memory
1509 // to be copied out. ipc will free after the copyout.
1510
1511 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1512 io_buf_ptr_t * buf )
1513 {
1514 kern_return_t err;
1515 vm_map_copy_t copy;
1516
1517 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1518 false /* src_destroy */, &copy);
1519
1520 assert( err == KERN_SUCCESS );
1521 if( err == KERN_SUCCESS )
1522 *buf = (char *) copy;
1523
1524 return( err );
1525 }
1526
1527 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1528
1529 /* Routine io_server_version */
1530 kern_return_t is_io_server_version(
1531 mach_port_t master_port,
1532 uint64_t *version)
1533 {
1534 *version = IOKIT_SERVER_VERSION;
1535 return (kIOReturnSuccess);
1536 }
1537
1538 /* Routine io_object_get_class */
1539 kern_return_t is_io_object_get_class(
1540 io_object_t object,
1541 io_name_t className )
1542 {
1543 const OSMetaClass* my_obj = NULL;
1544 const char * my_class_name = NULL;
1545
1546 if( !object)
1547 return( kIOReturnBadArgument );
1548
1549 if ( !my_class_name ) {
1550 my_obj = object->getMetaClass();
1551 if (!my_obj) {
1552 return (kIOReturnNotFound);
1553 }
1554
1555 my_class_name = my_obj->getClassName();
1556 }
1557
1558 strlcpy( className, my_class_name, sizeof(io_name_t));
1559
1560 return( kIOReturnSuccess );
1561 }
1562
1563 /* Routine io_object_get_superclass */
1564 kern_return_t is_io_object_get_superclass(
1565 mach_port_t master_port,
1566 io_name_t obj_name,
1567 io_name_t class_name)
1568 {
1569 const OSMetaClass* my_obj = NULL;
1570 const OSMetaClass* superclass = NULL;
1571 const OSSymbol *my_name = NULL;
1572 const char *my_cstr = NULL;
1573
1574 if (!obj_name || !class_name)
1575 return (kIOReturnBadArgument);
1576
1577 if( master_port != master_device_port)
1578 return( kIOReturnNotPrivileged);
1579
1580 my_name = OSSymbol::withCString(obj_name);
1581
1582 if (my_name) {
1583 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1584 my_name->release();
1585 }
1586 if (my_obj) {
1587 superclass = my_obj->getSuperClass();
1588 }
1589
1590 if (!superclass) {
1591 return( kIOReturnNotFound );
1592 }
1593
1594 my_cstr = superclass->getClassName();
1595
1596 if (my_cstr) {
1597 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1598 return( kIOReturnSuccess );
1599 }
1600 return (kIOReturnNotFound);
1601 }
1602
1603 /* Routine io_object_get_bundle_identifier */
1604 kern_return_t is_io_object_get_bundle_identifier(
1605 mach_port_t master_port,
1606 io_name_t obj_name,
1607 io_name_t bundle_name)
1608 {
1609 const OSMetaClass* my_obj = NULL;
1610 const OSSymbol *my_name = NULL;
1611 const OSSymbol *identifier = NULL;
1612 const char *my_cstr = NULL;
1613
1614 if (!obj_name || !bundle_name)
1615 return (kIOReturnBadArgument);
1616
1617 if( master_port != master_device_port)
1618 return( kIOReturnNotPrivileged);
1619
1620 my_name = OSSymbol::withCString(obj_name);
1621
1622 if (my_name) {
1623 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1624 my_name->release();
1625 }
1626
1627 if (my_obj) {
1628 identifier = my_obj->getKmodName();
1629 }
1630 if (!identifier) {
1631 return( kIOReturnNotFound );
1632 }
1633
1634 my_cstr = identifier->getCStringNoCopy();
1635 if (my_cstr) {
1636 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1637 return( kIOReturnSuccess );
1638 }
1639
1640 return (kIOReturnBadArgument);
1641 }
1642
1643 /* Routine io_object_conforms_to */
1644 kern_return_t is_io_object_conforms_to(
1645 io_object_t object,
1646 io_name_t className,
1647 boolean_t *conforms )
1648 {
1649 if( !object)
1650 return( kIOReturnBadArgument );
1651
1652 *conforms = (0 != object->metaCast( className ));
1653
1654 return( kIOReturnSuccess );
1655 }
1656
1657 /* Routine io_object_get_retain_count */
1658 kern_return_t is_io_object_get_retain_count(
1659 io_object_t object,
1660 uint32_t *retainCount )
1661 {
1662 if( !object)
1663 return( kIOReturnBadArgument );
1664
1665 *retainCount = object->getRetainCount();
1666 return( kIOReturnSuccess );
1667 }
1668
1669 /* Routine io_iterator_next */
1670 kern_return_t is_io_iterator_next(
1671 io_object_t iterator,
1672 io_object_t *object )
1673 {
1674 OSObject * obj;
1675
1676 CHECK( OSIterator, iterator, iter );
1677
1678 obj = iter->getNextObject();
1679 if( obj) {
1680 obj->retain();
1681 *object = obj;
1682 return( kIOReturnSuccess );
1683 } else
1684 return( kIOReturnNoDevice );
1685 }
1686
1687 /* Routine io_iterator_reset */
1688 kern_return_t is_io_iterator_reset(
1689 io_object_t iterator )
1690 {
1691 CHECK( OSIterator, iterator, iter );
1692
1693 iter->reset();
1694
1695 return( kIOReturnSuccess );
1696 }
1697
1698 /* Routine io_iterator_is_valid */
1699 kern_return_t is_io_iterator_is_valid(
1700 io_object_t iterator,
1701 boolean_t *is_valid )
1702 {
1703 CHECK( OSIterator, iterator, iter );
1704
1705 *is_valid = iter->isValid();
1706
1707 return( kIOReturnSuccess );
1708 }
1709
1710
1711 static kern_return_t internal_io_service_match_property_table(
1712 io_service_t _service,
1713 const char * matching,
1714 mach_msg_type_number_t matching_size,
1715 boolean_t *matches)
1716 {
1717 CHECK( IOService, _service, service );
1718
1719 kern_return_t kr;
1720 OSObject * obj;
1721 OSDictionary * dict;
1722
1723 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1724 : OSUnserializeXML(matching);
1725 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1726 *matches = service->passiveMatch( dict );
1727 kr = kIOReturnSuccess;
1728 } else
1729 kr = kIOReturnBadArgument;
1730
1731 if( obj)
1732 obj->release();
1733
1734 return( kr );
1735 }
1736
1737 /* Routine io_service_match_property_table */
1738 kern_return_t is_io_service_match_property_table(
1739 io_service_t service,
1740 io_string_t matching,
1741 boolean_t *matches )
1742 {
1743 return (internal_io_service_match_property_table(service, matching, 0, matches));
1744 }
1745
1746
1747 /* Routine io_service_match_property_table_ool */
1748 kern_return_t is_io_service_match_property_table_ool(
1749 io_object_t service,
1750 io_buf_ptr_t matching,
1751 mach_msg_type_number_t matchingCnt,
1752 kern_return_t *result,
1753 boolean_t *matches )
1754 {
1755 kern_return_t kr;
1756 vm_offset_t data;
1757 vm_map_offset_t map_data;
1758
1759 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1760 data = CAST_DOWN(vm_offset_t, map_data);
1761
1762 if( KERN_SUCCESS == kr) {
1763 // must return success after vm_map_copyout() succeeds
1764 *result = internal_io_service_match_property_table(service,
1765 (const char *)data, matchingCnt, matches );
1766 vm_deallocate( kernel_map, data, matchingCnt );
1767 }
1768
1769 return( kr );
1770 }
1771
1772 /* Routine io_service_match_property_table_bin */
1773 kern_return_t is_io_service_match_property_table_bin(
1774 io_object_t service,
1775 io_struct_inband_t matching,
1776 mach_msg_type_number_t matchingCnt,
1777 boolean_t *matches)
1778 {
1779 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
1780 }
1781
1782 static kern_return_t internal_io_service_get_matching_services(
1783 mach_port_t master_port,
1784 const char * matching,
1785 mach_msg_type_number_t matching_size,
1786 io_iterator_t *existing )
1787 {
1788 kern_return_t kr;
1789 OSObject * obj;
1790 OSDictionary * dict;
1791
1792 if( master_port != master_device_port)
1793 return( kIOReturnNotPrivileged);
1794
1795 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1796 : OSUnserializeXML(matching);
1797 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1798 *existing = IOService::getMatchingServices( dict );
1799 kr = kIOReturnSuccess;
1800 } else
1801 kr = kIOReturnBadArgument;
1802
1803 if( obj)
1804 obj->release();
1805
1806 return( kr );
1807 }
1808
1809 /* Routine io_service_get_matching_services */
1810 kern_return_t is_io_service_get_matching_services(
1811 mach_port_t master_port,
1812 io_string_t matching,
1813 io_iterator_t *existing )
1814 {
1815 return (internal_io_service_get_matching_services(master_port, matching, 0, existing));
1816 }
1817
1818 /* Routine io_service_get_matching_services_ool */
1819 kern_return_t is_io_service_get_matching_services_ool(
1820 mach_port_t master_port,
1821 io_buf_ptr_t matching,
1822 mach_msg_type_number_t matchingCnt,
1823 kern_return_t *result,
1824 io_object_t *existing )
1825 {
1826 kern_return_t kr;
1827 vm_offset_t data;
1828 vm_map_offset_t map_data;
1829
1830 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1831 data = CAST_DOWN(vm_offset_t, map_data);
1832
1833 if( KERN_SUCCESS == kr) {
1834 // must return success after vm_map_copyout() succeeds
1835 *result = internal_io_service_get_matching_services(master_port,
1836 (const char *) data, matchingCnt, existing);
1837 vm_deallocate( kernel_map, data, matchingCnt );
1838 }
1839
1840 return( kr );
1841 }
1842
1843 /* Routine io_service_get_matching_services_bin */
1844 kern_return_t is_io_service_get_matching_services_bin(
1845 mach_port_t master_port,
1846 io_struct_inband_t matching,
1847 mach_msg_type_number_t matchingCnt,
1848 io_object_t *existing)
1849 {
1850 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
1851 }
1852
1853
1854 static kern_return_t internal_io_service_get_matching_service(
1855 mach_port_t master_port,
1856 const char * matching,
1857 mach_msg_type_number_t matching_size,
1858 io_service_t *service )
1859 {
1860 kern_return_t kr;
1861 OSObject * obj;
1862 OSDictionary * dict;
1863
1864 if( master_port != master_device_port)
1865 return( kIOReturnNotPrivileged);
1866
1867 obj = matching_size ? OSUnserializeXML(matching, matching_size)
1868 : OSUnserializeXML(matching);
1869 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1870 *service = IOService::copyMatchingService( dict );
1871 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
1872 } else
1873 kr = kIOReturnBadArgument;
1874
1875 if( obj)
1876 obj->release();
1877
1878 return( kr );
1879 }
1880
1881 /* Routine io_service_get_matching_service */
1882 kern_return_t is_io_service_get_matching_service(
1883 mach_port_t master_port,
1884 io_string_t matching,
1885 io_service_t *service )
1886 {
1887 return (internal_io_service_get_matching_service(master_port, matching, 0, service));
1888 }
1889
1890 /* Routine io_service_get_matching_services_ool */
1891 kern_return_t is_io_service_get_matching_service_ool(
1892 mach_port_t master_port,
1893 io_buf_ptr_t matching,
1894 mach_msg_type_number_t matchingCnt,
1895 kern_return_t *result,
1896 io_object_t *service )
1897 {
1898 kern_return_t kr;
1899 vm_offset_t data;
1900 vm_map_offset_t map_data;
1901
1902 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1903 data = CAST_DOWN(vm_offset_t, map_data);
1904
1905 if( KERN_SUCCESS == kr) {
1906 // must return success after vm_map_copyout() succeeds
1907 *result = internal_io_service_get_matching_service(master_port,
1908 (const char *) data, matchingCnt, service );
1909 vm_deallocate( kernel_map, data, matchingCnt );
1910 }
1911
1912 return( kr );
1913 }
1914
1915 /* Routine io_service_get_matching_service_bin */
1916 kern_return_t is_io_service_get_matching_service_bin(
1917 mach_port_t master_port,
1918 io_struct_inband_t matching,
1919 mach_msg_type_number_t matchingCnt,
1920 io_object_t *service)
1921 {
1922 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
1923 }
1924
1925 static kern_return_t internal_io_service_add_notification(
1926 mach_port_t master_port,
1927 io_name_t notification_type,
1928 const char * matching,
1929 size_t matching_size,
1930 mach_port_t port,
1931 void * reference,
1932 vm_size_t referenceSize,
1933 bool client64,
1934 io_object_t * notification )
1935 {
1936 IOServiceUserNotification * userNotify = 0;
1937 IONotifier * notify = 0;
1938 const OSSymbol * sym;
1939 OSDictionary * dict;
1940 IOReturn err;
1941 unsigned long int userMsgType;
1942
1943 if( master_port != master_device_port)
1944 return( kIOReturnNotPrivileged);
1945
1946 do {
1947 err = kIOReturnNoResources;
1948
1949 if( !(sym = OSSymbol::withCString( notification_type )))
1950 err = kIOReturnNoResources;
1951
1952 if (matching_size)
1953 {
1954 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
1955 }
1956 else
1957 {
1958 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching));
1959 }
1960
1961 if (!dict) {
1962 err = kIOReturnBadArgument;
1963 continue;
1964 }
1965
1966 if( (sym == gIOPublishNotification)
1967 || (sym == gIOFirstPublishNotification))
1968 userMsgType = kIOServicePublishNotificationType;
1969 else if( (sym == gIOMatchedNotification)
1970 || (sym == gIOFirstMatchNotification))
1971 userMsgType = kIOServiceMatchedNotificationType;
1972 else if( sym == gIOTerminatedNotification)
1973 userMsgType = kIOServiceTerminatedNotificationType;
1974 else
1975 userMsgType = kLastIOKitNotificationType;
1976
1977 userNotify = new IOServiceUserNotification;
1978
1979 if( userNotify && !userNotify->init( port, userMsgType,
1980 reference, referenceSize, client64)) {
1981 iokit_release_port_send(port);
1982 userNotify->release();
1983 userNotify = 0;
1984 }
1985 if( !userNotify)
1986 continue;
1987
1988 notify = IOService::addMatchingNotification( sym, dict,
1989 &userNotify->_handler, userNotify );
1990 if( notify) {
1991 *notification = userNotify;
1992 userNotify->setNotification( notify );
1993 err = kIOReturnSuccess;
1994 } else
1995 err = kIOReturnUnsupported;
1996
1997 } while( false );
1998
1999 if( sym)
2000 sym->release();
2001 if( dict)
2002 dict->release();
2003
2004 return( err );
2005 }
2006
2007
2008 /* Routine io_service_add_notification */
2009 kern_return_t is_io_service_add_notification(
2010 mach_port_t master_port,
2011 io_name_t notification_type,
2012 io_string_t matching,
2013 mach_port_t port,
2014 io_async_ref_t reference,
2015 mach_msg_type_number_t referenceCnt,
2016 io_object_t * notification )
2017 {
2018 return (internal_io_service_add_notification(master_port, notification_type,
2019 matching, 0, port, &reference[0], sizeof(io_async_ref_t),
2020 false, notification));
2021 }
2022
2023 /* Routine io_service_add_notification_64 */
2024 kern_return_t is_io_service_add_notification_64(
2025 mach_port_t master_port,
2026 io_name_t notification_type,
2027 io_string_t matching,
2028 mach_port_t wake_port,
2029 io_async_ref64_t reference,
2030 mach_msg_type_number_t referenceCnt,
2031 io_object_t *notification )
2032 {
2033 return (internal_io_service_add_notification(master_port, notification_type,
2034 matching, 0, wake_port, &reference[0], sizeof(io_async_ref64_t),
2035 true, notification));
2036 }
2037
2038 /* Routine io_service_add_notification_bin */
2039 kern_return_t is_io_service_add_notification_bin
2040 (
2041 mach_port_t master_port,
2042 io_name_t notification_type,
2043 io_struct_inband_t matching,
2044 mach_msg_type_number_t matchingCnt,
2045 mach_port_t wake_port,
2046 io_async_ref_t reference,
2047 mach_msg_type_number_t referenceCnt,
2048 io_object_t *notification)
2049 {
2050 return (internal_io_service_add_notification(master_port, notification_type,
2051 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2052 false, notification));
2053 }
2054
2055 /* Routine io_service_add_notification_bin_64 */
2056 kern_return_t is_io_service_add_notification_bin_64
2057 (
2058 mach_port_t master_port,
2059 io_name_t notification_type,
2060 io_struct_inband_t matching,
2061 mach_msg_type_number_t matchingCnt,
2062 mach_port_t wake_port,
2063 io_async_ref64_t reference,
2064 mach_msg_type_number_t referenceCnt,
2065 io_object_t *notification)
2066 {
2067 return (internal_io_service_add_notification(master_port, notification_type,
2068 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2069 true, notification));
2070 }
2071
2072 static kern_return_t internal_io_service_add_notification_ool(
2073 mach_port_t master_port,
2074 io_name_t notification_type,
2075 io_buf_ptr_t matching,
2076 mach_msg_type_number_t matchingCnt,
2077 mach_port_t wake_port,
2078 void * reference,
2079 vm_size_t referenceSize,
2080 bool client64,
2081 kern_return_t *result,
2082 io_object_t *notification )
2083 {
2084 kern_return_t kr;
2085 vm_offset_t data;
2086 vm_map_offset_t map_data;
2087
2088 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2089 data = CAST_DOWN(vm_offset_t, map_data);
2090
2091 if( KERN_SUCCESS == kr) {
2092 // must return success after vm_map_copyout() succeeds
2093 *result = internal_io_service_add_notification( master_port, notification_type,
2094 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2095 vm_deallocate( kernel_map, data, matchingCnt );
2096 }
2097
2098 return( kr );
2099 }
2100
2101 /* Routine io_service_add_notification_ool */
2102 kern_return_t is_io_service_add_notification_ool(
2103 mach_port_t master_port,
2104 io_name_t notification_type,
2105 io_buf_ptr_t matching,
2106 mach_msg_type_number_t matchingCnt,
2107 mach_port_t wake_port,
2108 io_async_ref_t reference,
2109 mach_msg_type_number_t referenceCnt,
2110 kern_return_t *result,
2111 io_object_t *notification )
2112 {
2113 return (internal_io_service_add_notification_ool(master_port, notification_type,
2114 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2115 false, result, notification));
2116 }
2117
2118 /* Routine io_service_add_notification_ool_64 */
2119 kern_return_t is_io_service_add_notification_ool_64(
2120 mach_port_t master_port,
2121 io_name_t notification_type,
2122 io_buf_ptr_t matching,
2123 mach_msg_type_number_t matchingCnt,
2124 mach_port_t wake_port,
2125 io_async_ref64_t reference,
2126 mach_msg_type_number_t referenceCnt,
2127 kern_return_t *result,
2128 io_object_t *notification )
2129 {
2130 return (internal_io_service_add_notification_ool(master_port, notification_type,
2131 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2132 true, result, notification));
2133 }
2134
2135 /* Routine io_service_add_notification_old */
2136 kern_return_t is_io_service_add_notification_old(
2137 mach_port_t master_port,
2138 io_name_t notification_type,
2139 io_string_t matching,
2140 mach_port_t port,
2141 // for binary compatibility reasons, this must be natural_t for ILP32
2142 natural_t ref,
2143 io_object_t * notification )
2144 {
2145 return( is_io_service_add_notification( master_port, notification_type,
2146 matching, port, &ref, 1, notification ));
2147 }
2148
2149
2150 static kern_return_t internal_io_service_add_interest_notification(
2151 io_object_t _service,
2152 io_name_t type_of_interest,
2153 mach_port_t port,
2154 void * reference,
2155 vm_size_t referenceSize,
2156 bool client64,
2157 io_object_t * notification )
2158 {
2159
2160 IOServiceMessageUserNotification * userNotify = 0;
2161 IONotifier * notify = 0;
2162 const OSSymbol * sym;
2163 IOReturn err;
2164
2165 CHECK( IOService, _service, service );
2166
2167 err = kIOReturnNoResources;
2168 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2169
2170 userNotify = new IOServiceMessageUserNotification;
2171
2172 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2173 reference, referenceSize,
2174 kIOUserNotifyMaxMessageSize,
2175 client64 )) {
2176 iokit_release_port_send(port);
2177 userNotify->release();
2178 userNotify = 0;
2179 }
2180 if( !userNotify)
2181 continue;
2182
2183 notify = service->registerInterest( sym,
2184 &userNotify->_handler, userNotify );
2185 if( notify) {
2186 *notification = userNotify;
2187 userNotify->setNotification( notify );
2188 err = kIOReturnSuccess;
2189 } else
2190 err = kIOReturnUnsupported;
2191
2192 sym->release();
2193
2194 } while( false );
2195
2196 return( err );
2197 }
2198
2199 /* Routine io_service_add_message_notification */
2200 kern_return_t is_io_service_add_interest_notification(
2201 io_object_t service,
2202 io_name_t type_of_interest,
2203 mach_port_t port,
2204 io_async_ref_t reference,
2205 mach_msg_type_number_t referenceCnt,
2206 io_object_t * notification )
2207 {
2208 return (internal_io_service_add_interest_notification(service, type_of_interest,
2209 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2210 }
2211
2212 /* Routine io_service_add_interest_notification_64 */
2213 kern_return_t is_io_service_add_interest_notification_64(
2214 io_object_t service,
2215 io_name_t type_of_interest,
2216 mach_port_t wake_port,
2217 io_async_ref64_t reference,
2218 mach_msg_type_number_t referenceCnt,
2219 io_object_t *notification )
2220 {
2221 return (internal_io_service_add_interest_notification(service, type_of_interest,
2222 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2223 }
2224
2225
2226 /* Routine io_service_acknowledge_notification */
2227 kern_return_t is_io_service_acknowledge_notification(
2228 io_object_t _service,
2229 natural_t notify_ref,
2230 natural_t response )
2231 {
2232 CHECK( IOService, _service, service );
2233
2234 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2235 (IOOptionBits) response ));
2236
2237 }
2238
2239 /* Routine io_connect_get_semaphore */
2240 kern_return_t is_io_connect_get_notification_semaphore(
2241 io_connect_t connection,
2242 natural_t notification_type,
2243 semaphore_t *semaphore )
2244 {
2245 CHECK( IOUserClient, connection, client );
2246
2247 IOStatisticsClientCall();
2248 return( client->getNotificationSemaphore( (UInt32) notification_type,
2249 semaphore ));
2250 }
2251
2252 /* Routine io_registry_get_root_entry */
2253 kern_return_t is_io_registry_get_root_entry(
2254 mach_port_t master_port,
2255 io_object_t *root )
2256 {
2257 IORegistryEntry * entry;
2258
2259 if( master_port != master_device_port)
2260 return( kIOReturnNotPrivileged);
2261
2262 entry = IORegistryEntry::getRegistryRoot();
2263 if( entry)
2264 entry->retain();
2265 *root = entry;
2266
2267 return( kIOReturnSuccess );
2268 }
2269
2270 /* Routine io_registry_create_iterator */
2271 kern_return_t is_io_registry_create_iterator(
2272 mach_port_t master_port,
2273 io_name_t plane,
2274 uint32_t options,
2275 io_object_t *iterator )
2276 {
2277 if( master_port != master_device_port)
2278 return( kIOReturnNotPrivileged);
2279
2280 *iterator = IORegistryIterator::iterateOver(
2281 IORegistryEntry::getPlane( plane ), options );
2282
2283 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2284 }
2285
2286 /* Routine io_registry_entry_create_iterator */
2287 kern_return_t is_io_registry_entry_create_iterator(
2288 io_object_t registry_entry,
2289 io_name_t plane,
2290 uint32_t options,
2291 io_object_t *iterator )
2292 {
2293 CHECK( IORegistryEntry, registry_entry, entry );
2294
2295 *iterator = IORegistryIterator::iterateOver( entry,
2296 IORegistryEntry::getPlane( plane ), options );
2297
2298 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2299 }
2300
2301 /* Routine io_registry_iterator_enter */
2302 kern_return_t is_io_registry_iterator_enter_entry(
2303 io_object_t iterator )
2304 {
2305 CHECK( IORegistryIterator, iterator, iter );
2306
2307 iter->enterEntry();
2308
2309 return( kIOReturnSuccess );
2310 }
2311
2312 /* Routine io_registry_iterator_exit */
2313 kern_return_t is_io_registry_iterator_exit_entry(
2314 io_object_t iterator )
2315 {
2316 bool didIt;
2317
2318 CHECK( IORegistryIterator, iterator, iter );
2319
2320 didIt = iter->exitEntry();
2321
2322 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2323 }
2324
2325 /* Routine io_registry_entry_from_path */
2326 kern_return_t is_io_registry_entry_from_path(
2327 mach_port_t master_port,
2328 io_string_t path,
2329 io_object_t *registry_entry )
2330 {
2331 IORegistryEntry * entry;
2332
2333 if( master_port != master_device_port)
2334 return( kIOReturnNotPrivileged);
2335
2336 entry = IORegistryEntry::fromPath( path );
2337
2338 *registry_entry = entry;
2339
2340 return( kIOReturnSuccess );
2341 }
2342
2343
2344 /* Routine io_registry_entry_from_path */
2345 kern_return_t is_io_registry_entry_from_path_ool(
2346 mach_port_t master_port,
2347 io_string_inband_t path,
2348 io_buf_ptr_t path_ool,
2349 mach_msg_type_number_t path_oolCnt,
2350 kern_return_t *result,
2351 io_object_t *registry_entry)
2352 {
2353 IORegistryEntry * entry;
2354 vm_map_offset_t map_data;
2355 const char * cpath;
2356 IOReturn res;
2357 kern_return_t err;
2358
2359 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2360
2361 map_data = 0;
2362 entry = 0;
2363 res = err = KERN_SUCCESS;
2364 if (path[0]) cpath = path;
2365 else
2366 {
2367 if (!path_oolCnt) return(kIOReturnBadArgument);
2368 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2369
2370 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2371 if (KERN_SUCCESS == err)
2372 {
2373 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2374 cpath = CAST_DOWN(const char *, map_data);
2375 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2376 }
2377 }
2378
2379 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2380 {
2381 entry = IORegistryEntry::fromPath(cpath);
2382 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2383 }
2384
2385 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2386
2387 if (KERN_SUCCESS != err) res = err;
2388 *registry_entry = entry;
2389 *result = res;
2390
2391 return (err);
2392 }
2393
2394
2395 /* Routine io_registry_entry_in_plane */
2396 kern_return_t is_io_registry_entry_in_plane(
2397 io_object_t registry_entry,
2398 io_name_t plane,
2399 boolean_t *inPlane )
2400 {
2401 CHECK( IORegistryEntry, registry_entry, entry );
2402
2403 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2404
2405 return( kIOReturnSuccess );
2406 }
2407
2408
2409 /* Routine io_registry_entry_get_path */
2410 kern_return_t is_io_registry_entry_get_path(
2411 io_object_t registry_entry,
2412 io_name_t plane,
2413 io_string_t path )
2414 {
2415 int length;
2416 CHECK( IORegistryEntry, registry_entry, entry );
2417
2418 length = sizeof( io_string_t);
2419 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2420 return( kIOReturnSuccess );
2421 else
2422 return( kIOReturnBadArgument );
2423 }
2424
2425 /* Routine io_registry_entry_get_path */
2426 kern_return_t is_io_registry_entry_get_path_ool(
2427 io_object_t registry_entry,
2428 io_name_t plane,
2429 io_string_inband_t path,
2430 io_buf_ptr_t *path_ool,
2431 mach_msg_type_number_t *path_oolCnt)
2432 {
2433 enum { kMaxPath = 16384 };
2434 IOReturn err;
2435 int length;
2436 char * buf;
2437
2438 CHECK( IORegistryEntry, registry_entry, entry );
2439
2440 *path_ool = NULL;
2441 *path_oolCnt = 0;
2442 length = sizeof(io_string_inband_t);
2443 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2444 else
2445 {
2446 length = kMaxPath;
2447 buf = IONew(char, length);
2448 if (!buf) err = kIOReturnNoMemory;
2449 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2450 else
2451 {
2452 *path_oolCnt = length;
2453 err = copyoutkdata(buf, length, path_ool);
2454 }
2455 if (buf) IODelete(buf, char, kMaxPath);
2456 }
2457
2458 return (err);
2459 }
2460
2461
2462 /* Routine io_registry_entry_get_name */
2463 kern_return_t is_io_registry_entry_get_name(
2464 io_object_t registry_entry,
2465 io_name_t name )
2466 {
2467 CHECK( IORegistryEntry, registry_entry, entry );
2468
2469 strncpy( name, entry->getName(), sizeof( io_name_t));
2470
2471 return( kIOReturnSuccess );
2472 }
2473
2474 /* Routine io_registry_entry_get_name_in_plane */
2475 kern_return_t is_io_registry_entry_get_name_in_plane(
2476 io_object_t registry_entry,
2477 io_name_t planeName,
2478 io_name_t name )
2479 {
2480 const IORegistryPlane * plane;
2481 CHECK( IORegistryEntry, registry_entry, entry );
2482
2483 if( planeName[0])
2484 plane = IORegistryEntry::getPlane( planeName );
2485 else
2486 plane = 0;
2487
2488 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2489
2490 return( kIOReturnSuccess );
2491 }
2492
2493 /* Routine io_registry_entry_get_location_in_plane */
2494 kern_return_t is_io_registry_entry_get_location_in_plane(
2495 io_object_t registry_entry,
2496 io_name_t planeName,
2497 io_name_t location )
2498 {
2499 const IORegistryPlane * plane;
2500 CHECK( IORegistryEntry, registry_entry, entry );
2501
2502 if( planeName[0])
2503 plane = IORegistryEntry::getPlane( planeName );
2504 else
2505 plane = 0;
2506
2507 const char * cstr = entry->getLocation( plane );
2508
2509 if( cstr) {
2510 strncpy( location, cstr, sizeof( io_name_t));
2511 return( kIOReturnSuccess );
2512 } else
2513 return( kIOReturnNotFound );
2514 }
2515
2516 /* Routine io_registry_entry_get_registry_entry_id */
2517 kern_return_t is_io_registry_entry_get_registry_entry_id(
2518 io_object_t registry_entry,
2519 uint64_t *entry_id )
2520 {
2521 CHECK( IORegistryEntry, registry_entry, entry );
2522
2523 *entry_id = entry->getRegistryEntryID();
2524
2525 return (kIOReturnSuccess);
2526 }
2527
2528 /* Routine io_registry_entry_get_property */
2529 kern_return_t is_io_registry_entry_get_property_bytes(
2530 io_object_t registry_entry,
2531 io_name_t property_name,
2532 io_struct_inband_t buf,
2533 mach_msg_type_number_t *dataCnt )
2534 {
2535 OSObject * obj;
2536 OSData * data;
2537 OSString * str;
2538 OSBoolean * boo;
2539 OSNumber * off;
2540 UInt64 offsetBytes;
2541 unsigned int len = 0;
2542 const void * bytes = 0;
2543 IOReturn ret = kIOReturnSuccess;
2544
2545 CHECK( IORegistryEntry, registry_entry, entry );
2546
2547 #if CONFIG_MACF
2548 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2549 return kIOReturnNotPermitted;
2550 #endif
2551
2552 obj = entry->copyProperty(property_name);
2553 if( !obj)
2554 return( kIOReturnNoResources );
2555
2556 // One day OSData will be a common container base class
2557 // until then...
2558 if( (data = OSDynamicCast( OSData, obj ))) {
2559 len = data->getLength();
2560 bytes = data->getBytesNoCopy();
2561
2562 } else if( (str = OSDynamicCast( OSString, obj ))) {
2563 len = str->getLength() + 1;
2564 bytes = str->getCStringNoCopy();
2565
2566 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2567 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2568 bytes = boo->isTrue() ? "Yes" : "No";
2569
2570 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2571 offsetBytes = off->unsigned64BitValue();
2572 len = off->numberOfBytes();
2573 bytes = &offsetBytes;
2574 #ifdef __BIG_ENDIAN__
2575 bytes = (const void *)
2576 (((UInt32) bytes) + (sizeof( UInt64) - len));
2577 #endif
2578
2579 } else
2580 ret = kIOReturnBadArgument;
2581
2582 if( bytes) {
2583 if( *dataCnt < len)
2584 ret = kIOReturnIPCError;
2585 else {
2586 *dataCnt = len;
2587 bcopy( bytes, buf, len );
2588 }
2589 }
2590 obj->release();
2591
2592 return( ret );
2593 }
2594
2595
2596 /* Routine io_registry_entry_get_property */
2597 kern_return_t is_io_registry_entry_get_property(
2598 io_object_t registry_entry,
2599 io_name_t property_name,
2600 io_buf_ptr_t *properties,
2601 mach_msg_type_number_t *propertiesCnt )
2602 {
2603 kern_return_t err;
2604 vm_size_t len;
2605 OSObject * obj;
2606
2607 CHECK( IORegistryEntry, registry_entry, entry );
2608
2609 #if CONFIG_MACF
2610 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2611 return kIOReturnNotPermitted;
2612 #endif
2613
2614 obj = entry->copyProperty(property_name);
2615 if( !obj)
2616 return( kIOReturnNotFound );
2617
2618 OSSerialize * s = OSSerialize::withCapacity(4096);
2619 if( !s) {
2620 obj->release();
2621 return( kIOReturnNoMemory );
2622 }
2623
2624 if( obj->serialize( s )) {
2625 len = s->getLength();
2626 *propertiesCnt = len;
2627 err = copyoutkdata( s->text(), len, properties );
2628
2629 } else
2630 err = kIOReturnUnsupported;
2631
2632 s->release();
2633 obj->release();
2634
2635 return( err );
2636 }
2637
2638 /* Routine io_registry_entry_get_property_recursively */
2639 kern_return_t is_io_registry_entry_get_property_recursively(
2640 io_object_t registry_entry,
2641 io_name_t plane,
2642 io_name_t property_name,
2643 uint32_t options,
2644 io_buf_ptr_t *properties,
2645 mach_msg_type_number_t *propertiesCnt )
2646 {
2647 kern_return_t err;
2648 vm_size_t len;
2649 OSObject * obj;
2650
2651 CHECK( IORegistryEntry, registry_entry, entry );
2652
2653 #if CONFIG_MACF
2654 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2655 return kIOReturnNotPermitted;
2656 #endif
2657
2658 obj = entry->copyProperty( property_name,
2659 IORegistryEntry::getPlane( plane ), options);
2660 if( !obj)
2661 return( kIOReturnNotFound );
2662
2663 OSSerialize * s = OSSerialize::withCapacity(4096);
2664 if( !s) {
2665 obj->release();
2666 return( kIOReturnNoMemory );
2667 }
2668
2669 if( obj->serialize( s )) {
2670 len = s->getLength();
2671 *propertiesCnt = len;
2672 err = copyoutkdata( s->text(), len, properties );
2673
2674 } else
2675 err = kIOReturnUnsupported;
2676
2677 s->release();
2678 obj->release();
2679
2680 return( err );
2681 }
2682
2683 #if CONFIG_MACF
2684
2685 static kern_return_t
2686 filteredProperties(IORegistryEntry *entry, OSDictionary *properties, OSDictionary **filteredp)
2687 {
2688 kern_return_t err = 0;
2689 OSDictionary *filtered = NULL;
2690 OSCollectionIterator *iter = NULL;
2691 OSSymbol *key;
2692 OSObject *p;
2693 kauth_cred_t cred = kauth_cred_get();
2694
2695 if (properties == NULL)
2696 return kIOReturnUnsupported;
2697
2698 if ((iter = OSCollectionIterator::withCollection(properties)) == NULL ||
2699 (filtered = OSDictionary::withCapacity(properties->getCapacity())) == NULL) {
2700 err = kIOReturnNoMemory;
2701 goto out;
2702 }
2703
2704 while ((p = iter->getNextObject()) != NULL) {
2705 if ((key = OSDynamicCast(OSSymbol, p)) == NULL ||
2706 mac_iokit_check_get_property(cred, entry, key->getCStringNoCopy()) != 0)
2707 continue;
2708 filtered->setObject(key, properties->getObject(key));
2709 }
2710
2711 out:
2712 if (iter != NULL)
2713 iter->release();
2714 *filteredp = filtered;
2715 return err;
2716 }
2717
2718 #endif
2719
2720 /* Routine io_registry_entry_get_properties */
2721 kern_return_t is_io_registry_entry_get_properties(
2722 io_object_t registry_entry,
2723 io_buf_ptr_t *properties,
2724 mach_msg_type_number_t *propertiesCnt )
2725 {
2726 kern_return_t err = 0;
2727 vm_size_t len;
2728
2729 CHECK( IORegistryEntry, registry_entry, entry );
2730
2731 OSSerialize * s = OSSerialize::withCapacity(4096);
2732 if( !s)
2733 return( kIOReturnNoMemory );
2734
2735 if (!entry->serializeProperties(s))
2736 err = kIOReturnUnsupported;
2737
2738 #if CONFIG_MACF
2739 if (!err && mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
2740 OSObject *propobj = OSUnserializeXML(s->text(), s->getLength());
2741 OSDictionary *filteredprops = NULL;
2742 err = filteredProperties(entry, OSDynamicCast(OSDictionary, propobj), &filteredprops);
2743 if (propobj) propobj->release();
2744
2745 if (!err) {
2746 s->clearText();
2747 if (!filteredprops->serialize(s))
2748 err = kIOReturnUnsupported;
2749 }
2750 if (filteredprops != NULL)
2751 filteredprops->release();
2752 }
2753 #endif /* CONFIG_MACF */
2754
2755 if (!err) {
2756 len = s->getLength();
2757 *propertiesCnt = len;
2758 err = copyoutkdata( s->text(), len, properties );
2759 }
2760
2761 s->release();
2762 return( err );
2763 }
2764
2765 #if CONFIG_MACF
2766
2767 struct GetPropertiesEditorRef
2768 {
2769 kauth_cred_t cred;
2770 IORegistryEntry * entry;
2771 OSCollection * root;
2772 };
2773
2774 static const OSMetaClassBase *
2775 GetPropertiesEditor(void * reference,
2776 OSSerialize * s,
2777 OSCollection * container,
2778 const OSSymbol * name,
2779 const OSMetaClassBase * value)
2780 {
2781 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
2782
2783 if (!ref->root) ref->root = container;
2784 if (ref->root == container)
2785 {
2786 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
2787 {
2788 value = 0;
2789 }
2790 }
2791 if (value) value->retain();
2792 return (value);
2793 }
2794
2795 #endif /* CONFIG_MACF */
2796
2797 /* Routine io_registry_entry_get_properties */
2798 kern_return_t is_io_registry_entry_get_properties_bin(
2799 io_object_t registry_entry,
2800 io_buf_ptr_t *properties,
2801 mach_msg_type_number_t *propertiesCnt)
2802 {
2803 kern_return_t err = kIOReturnSuccess;
2804 vm_size_t len;
2805 OSSerialize * s;
2806 OSSerialize::Editor editor = 0;
2807 void * editRef = 0;
2808
2809 CHECK(IORegistryEntry, registry_entry, entry);
2810
2811 #if CONFIG_MACF
2812 GetPropertiesEditorRef ref;
2813 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
2814 {
2815 editor = &GetPropertiesEditor;
2816 editRef = &ref;
2817 ref.cred = kauth_cred_get();
2818 ref.entry = entry;
2819 ref.root = 0;
2820 }
2821 #endif
2822
2823 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
2824 if (!s) return (kIOReturnNoMemory);
2825
2826 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
2827
2828 if (kIOReturnSuccess == err)
2829 {
2830 len = s->getLength();
2831 *propertiesCnt = len;
2832 err = copyoutkdata(s->text(), len, properties);
2833 }
2834 s->release();
2835
2836 return (err);
2837 }
2838
2839 /* Routine io_registry_entry_get_property_bin */
2840 kern_return_t is_io_registry_entry_get_property_bin(
2841 io_object_t registry_entry,
2842 io_name_t plane,
2843 io_name_t property_name,
2844 uint32_t options,
2845 io_buf_ptr_t *properties,
2846 mach_msg_type_number_t *propertiesCnt )
2847 {
2848 kern_return_t err;
2849 vm_size_t len;
2850 OSObject * obj;
2851 const OSSymbol * sym;
2852
2853 CHECK( IORegistryEntry, registry_entry, entry );
2854
2855 #if CONFIG_MACF
2856 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2857 return kIOReturnNotPermitted;
2858 #endif
2859
2860 if ((kIORegistryIterateRecursively & options) && plane[0])
2861 {
2862 obj = entry->copyProperty(property_name,
2863 IORegistryEntry::getPlane(plane), options);
2864 }
2865 else
2866 {
2867 obj = entry->copyProperty(property_name);
2868 }
2869
2870 if( !obj)
2871 return( kIOReturnNotFound );
2872
2873 sym = OSSymbol::withCString(property_name);
2874 if (sym)
2875 {
2876 if (gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
2877 sym->release();
2878 }
2879
2880 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
2881 if( !s) {
2882 obj->release();
2883 return( kIOReturnNoMemory );
2884 }
2885
2886 if( obj->serialize( s )) {
2887 len = s->getLength();
2888 *propertiesCnt = len;
2889 err = copyoutkdata( s->text(), len, properties );
2890
2891 } else err = kIOReturnUnsupported;
2892
2893 s->release();
2894 obj->release();
2895
2896 return( err );
2897 }
2898
2899
2900 /* Routine io_registry_entry_set_properties */
2901 kern_return_t is_io_registry_entry_set_properties
2902 (
2903 io_object_t registry_entry,
2904 io_buf_ptr_t properties,
2905 mach_msg_type_number_t propertiesCnt,
2906 kern_return_t * result)
2907 {
2908 OSObject * obj;
2909 kern_return_t err;
2910 IOReturn res;
2911 vm_offset_t data;
2912 vm_map_offset_t map_data;
2913
2914 CHECK( IORegistryEntry, registry_entry, entry );
2915
2916 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
2917 return( kIOReturnMessageTooLarge);
2918
2919 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2920 data = CAST_DOWN(vm_offset_t, map_data);
2921
2922 if( KERN_SUCCESS == err) {
2923
2924 // must return success after vm_map_copyout() succeeds
2925 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
2926 vm_deallocate( kernel_map, data, propertiesCnt );
2927
2928 if (!obj)
2929 res = kIOReturnBadArgument;
2930 #if CONFIG_MACF
2931 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
2932 registry_entry, obj))
2933 {
2934 res = kIOReturnNotPermitted;
2935 }
2936 #endif
2937 else
2938 {
2939 res = entry->setProperties( obj );
2940 }
2941
2942 if (obj)
2943 obj->release();
2944 } else
2945 res = err;
2946
2947 *result = res;
2948 return( err );
2949 }
2950
2951 /* Routine io_registry_entry_get_child_iterator */
2952 kern_return_t is_io_registry_entry_get_child_iterator(
2953 io_object_t registry_entry,
2954 io_name_t plane,
2955 io_object_t *iterator )
2956 {
2957 CHECK( IORegistryEntry, registry_entry, entry );
2958
2959 *iterator = entry->getChildIterator(
2960 IORegistryEntry::getPlane( plane ));
2961
2962 return( kIOReturnSuccess );
2963 }
2964
2965 /* Routine io_registry_entry_get_parent_iterator */
2966 kern_return_t is_io_registry_entry_get_parent_iterator(
2967 io_object_t registry_entry,
2968 io_name_t plane,
2969 io_object_t *iterator)
2970 {
2971 CHECK( IORegistryEntry, registry_entry, entry );
2972
2973 *iterator = entry->getParentIterator(
2974 IORegistryEntry::getPlane( plane ));
2975
2976 return( kIOReturnSuccess );
2977 }
2978
2979 /* Routine io_service_get_busy_state */
2980 kern_return_t is_io_service_get_busy_state(
2981 io_object_t _service,
2982 uint32_t *busyState )
2983 {
2984 CHECK( IOService, _service, service );
2985
2986 *busyState = service->getBusyState();
2987
2988 return( kIOReturnSuccess );
2989 }
2990
2991 /* Routine io_service_get_state */
2992 kern_return_t is_io_service_get_state(
2993 io_object_t _service,
2994 uint64_t *state,
2995 uint32_t *busy_state,
2996 uint64_t *accumulated_busy_time )
2997 {
2998 CHECK( IOService, _service, service );
2999
3000 *state = service->getState();
3001 *busy_state = service->getBusyState();
3002 *accumulated_busy_time = service->getAccumulatedBusyTime();
3003
3004 return( kIOReturnSuccess );
3005 }
3006
3007 /* Routine io_service_wait_quiet */
3008 kern_return_t is_io_service_wait_quiet(
3009 io_object_t _service,
3010 mach_timespec_t wait_time )
3011 {
3012 uint64_t timeoutNS;
3013
3014 CHECK( IOService, _service, service );
3015
3016 timeoutNS = wait_time.tv_sec;
3017 timeoutNS *= kSecondScale;
3018 timeoutNS += wait_time.tv_nsec;
3019
3020 return( service->waitQuiet(timeoutNS) );
3021 }
3022
3023 /* Routine io_service_request_probe */
3024 kern_return_t is_io_service_request_probe(
3025 io_object_t _service,
3026 uint32_t options )
3027 {
3028 CHECK( IOService, _service, service );
3029
3030 return( service->requestProbe( options ));
3031 }
3032
3033 /* Routine io_service_get_authorization_id */
3034 kern_return_t is_io_service_get_authorization_id(
3035 io_object_t _service,
3036 uint64_t *authorization_id )
3037 {
3038 kern_return_t kr;
3039
3040 CHECK( IOService, _service, service );
3041
3042 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3043 kIOClientPrivilegeAdministrator );
3044 if( kIOReturnSuccess != kr)
3045 return( kr );
3046
3047 *authorization_id = service->getAuthorizationID();
3048
3049 return( kr );
3050 }
3051
3052 /* Routine io_service_set_authorization_id */
3053 kern_return_t is_io_service_set_authorization_id(
3054 io_object_t _service,
3055 uint64_t authorization_id )
3056 {
3057 CHECK( IOService, _service, service );
3058
3059 return( service->setAuthorizationID( authorization_id ) );
3060 }
3061
3062 /* Routine io_service_open_ndr */
3063 kern_return_t is_io_service_open_extended(
3064 io_object_t _service,
3065 task_t owningTask,
3066 uint32_t connect_type,
3067 NDR_record_t ndr,
3068 io_buf_ptr_t properties,
3069 mach_msg_type_number_t propertiesCnt,
3070 kern_return_t * result,
3071 io_object_t *connection )
3072 {
3073 IOUserClient * client = 0;
3074 kern_return_t err = KERN_SUCCESS;
3075 IOReturn res = kIOReturnSuccess;
3076 OSDictionary * propertiesDict = 0;
3077 bool crossEndian;
3078 bool disallowAccess;
3079
3080 CHECK( IOService, _service, service );
3081
3082 if (!owningTask) return (kIOReturnBadArgument);
3083
3084 do
3085 {
3086 if (properties)
3087 {
3088 OSObject * obj;
3089 vm_offset_t data;
3090 vm_map_offset_t map_data;
3091
3092 if( propertiesCnt > sizeof(io_struct_inband_t))
3093 return( kIOReturnMessageTooLarge);
3094
3095 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3096 res = err;
3097 data = CAST_DOWN(vm_offset_t, map_data);
3098 if (KERN_SUCCESS == err)
3099 {
3100 // must return success after vm_map_copyout() succeeds
3101 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3102 vm_deallocate( kernel_map, data, propertiesCnt );
3103 propertiesDict = OSDynamicCast(OSDictionary, obj);
3104 if (!propertiesDict)
3105 {
3106 res = kIOReturnBadArgument;
3107 if (obj)
3108 obj->release();
3109 }
3110 }
3111 if (kIOReturnSuccess != res)
3112 break;
3113 }
3114
3115 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3116 if (crossEndian)
3117 {
3118 if (!propertiesDict)
3119 propertiesDict = OSDictionary::withCapacity(4);
3120 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3121 if (data)
3122 {
3123 if (propertiesDict)
3124 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3125 data->release();
3126 }
3127 }
3128
3129 res = service->newUserClient( owningTask, (void *) owningTask,
3130 connect_type, propertiesDict, &client );
3131
3132 if (propertiesDict)
3133 propertiesDict->release();
3134
3135 if (res == kIOReturnSuccess)
3136 {
3137 assert( OSDynamicCast(IOUserClient, client) );
3138
3139 disallowAccess = (crossEndian
3140 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3141 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3142 if (disallowAccess) res = kIOReturnUnsupported;
3143 #if CONFIG_MACF
3144 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3145 res = kIOReturnNotPermitted;
3146 #endif
3147 if (kIOReturnSuccess != res)
3148 {
3149 IOStatisticsClientCall();
3150 client->clientClose();
3151 client->release();
3152 client = 0;
3153 break;
3154 }
3155 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3156 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3157 if (creatorName)
3158 {
3159 client->setProperty(kIOUserClientCreatorKey, creatorName);
3160 creatorName->release();
3161 }
3162 client->setTerminateDefer(service, false);
3163 }
3164 }
3165 while (false);
3166
3167 *connection = client;
3168 *result = res;
3169
3170 return (err);
3171 }
3172
3173 /* Routine io_service_close */
3174 kern_return_t is_io_service_close(
3175 io_object_t connection )
3176 {
3177 OSSet * mappings;
3178 if ((mappings = OSDynamicCast(OSSet, connection)))
3179 return( kIOReturnSuccess );
3180
3181 CHECK( IOUserClient, connection, client );
3182
3183 IOStatisticsClientCall();
3184 client->clientClose();
3185
3186 return( kIOReturnSuccess );
3187 }
3188
3189 /* Routine io_connect_get_service */
3190 kern_return_t is_io_connect_get_service(
3191 io_object_t connection,
3192 io_object_t *service )
3193 {
3194 IOService * theService;
3195
3196 CHECK( IOUserClient, connection, client );
3197
3198 theService = client->getService();
3199 if( theService)
3200 theService->retain();
3201
3202 *service = theService;
3203
3204 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3205 }
3206
3207 /* Routine io_connect_set_notification_port */
3208 kern_return_t is_io_connect_set_notification_port(
3209 io_object_t connection,
3210 uint32_t notification_type,
3211 mach_port_t port,
3212 uint32_t reference)
3213 {
3214 CHECK( IOUserClient, connection, client );
3215
3216 IOStatisticsClientCall();
3217 return( client->registerNotificationPort( port, notification_type,
3218 (io_user_reference_t) reference ));
3219 }
3220
3221 /* Routine io_connect_set_notification_port */
3222 kern_return_t is_io_connect_set_notification_port_64(
3223 io_object_t connection,
3224 uint32_t notification_type,
3225 mach_port_t port,
3226 io_user_reference_t reference)
3227 {
3228 CHECK( IOUserClient, connection, client );
3229
3230 IOStatisticsClientCall();
3231 return( client->registerNotificationPort( port, notification_type,
3232 reference ));
3233 }
3234
3235 /* Routine io_connect_map_memory_into_task */
3236 kern_return_t is_io_connect_map_memory_into_task
3237 (
3238 io_connect_t connection,
3239 uint32_t memory_type,
3240 task_t into_task,
3241 mach_vm_address_t *address,
3242 mach_vm_size_t *size,
3243 uint32_t flags
3244 )
3245 {
3246 IOReturn err;
3247 IOMemoryMap * map;
3248
3249 CHECK( IOUserClient, connection, client );
3250
3251 if (!into_task) return (kIOReturnBadArgument);
3252
3253 IOStatisticsClientCall();
3254 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3255
3256 if( map) {
3257 *address = map->getAddress();
3258 if( size)
3259 *size = map->getSize();
3260
3261 if( client->sharedInstance
3262 || (into_task != current_task())) {
3263 // push a name out to the task owning the map,
3264 // so we can clean up maps
3265 mach_port_name_t name __unused =
3266 IOMachPort::makeSendRightForTask(
3267 into_task, map, IKOT_IOKIT_OBJECT );
3268
3269 } else {
3270 // keep it with the user client
3271 IOLockLock( gIOObjectPortLock);
3272 if( 0 == client->mappings)
3273 client->mappings = OSSet::withCapacity(2);
3274 if( client->mappings)
3275 client->mappings->setObject( map);
3276 IOLockUnlock( gIOObjectPortLock);
3277 map->release();
3278 }
3279 err = kIOReturnSuccess;
3280
3281 } else
3282 err = kIOReturnBadArgument;
3283
3284 return( err );
3285 }
3286
3287 /* Routine is_io_connect_map_memory */
3288 kern_return_t is_io_connect_map_memory(
3289 io_object_t connect,
3290 uint32_t type,
3291 task_t task,
3292 uint32_t * mapAddr,
3293 uint32_t * mapSize,
3294 uint32_t flags )
3295 {
3296 IOReturn err;
3297 mach_vm_address_t address;
3298 mach_vm_size_t size;
3299
3300 address = SCALAR64(*mapAddr);
3301 size = SCALAR64(*mapSize);
3302
3303 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3304
3305 *mapAddr = SCALAR32(address);
3306 *mapSize = SCALAR32(size);
3307
3308 return (err);
3309 }
3310
3311 } /* extern "C" */
3312
3313 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3314 {
3315 OSIterator * iter;
3316 IOMemoryMap * map = 0;
3317
3318 IOLockLock(gIOObjectPortLock);
3319
3320 iter = OSCollectionIterator::withCollection(mappings);
3321 if(iter)
3322 {
3323 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3324 {
3325 if(mem == map->getMemoryDescriptor())
3326 {
3327 map->retain();
3328 mappings->removeObject(map);
3329 break;
3330 }
3331 }
3332 iter->release();
3333 }
3334
3335 IOLockUnlock(gIOObjectPortLock);
3336
3337 return (map);
3338 }
3339
3340 extern "C" {
3341
3342 /* Routine io_connect_unmap_memory_from_task */
3343 kern_return_t is_io_connect_unmap_memory_from_task
3344 (
3345 io_connect_t connection,
3346 uint32_t memory_type,
3347 task_t from_task,
3348 mach_vm_address_t address)
3349 {
3350 IOReturn err;
3351 IOOptionBits options = 0;
3352 IOMemoryDescriptor * memory;
3353 IOMemoryMap * map;
3354
3355 CHECK( IOUserClient, connection, client );
3356
3357 if (!from_task) return (kIOReturnBadArgument);
3358
3359 IOStatisticsClientCall();
3360 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3361
3362 if( memory && (kIOReturnSuccess == err)) {
3363
3364 options = (options & ~kIOMapUserOptionsMask)
3365 | kIOMapAnywhere | kIOMapReference;
3366
3367 map = memory->createMappingInTask( from_task, address, options );
3368 memory->release();
3369 if( map)
3370 {
3371 IOLockLock( gIOObjectPortLock);
3372 if( client->mappings)
3373 client->mappings->removeObject( map);
3374 IOLockUnlock( gIOObjectPortLock);
3375
3376 mach_port_name_t name = 0;
3377 if (from_task != current_task())
3378 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3379 if (name)
3380 {
3381 map->userClientUnmap();
3382 err = iokit_mod_send_right( from_task, name, -2 );
3383 err = kIOReturnSuccess;
3384 }
3385 else
3386 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3387 if (from_task == current_task())
3388 map->release();
3389 }
3390 else
3391 err = kIOReturnBadArgument;
3392 }
3393
3394 return( err );
3395 }
3396
3397 kern_return_t is_io_connect_unmap_memory(
3398 io_object_t connect,
3399 uint32_t type,
3400 task_t task,
3401 uint32_t mapAddr )
3402 {
3403 IOReturn err;
3404 mach_vm_address_t address;
3405
3406 address = SCALAR64(mapAddr);
3407
3408 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3409
3410 return (err);
3411 }
3412
3413
3414 /* Routine io_connect_add_client */
3415 kern_return_t is_io_connect_add_client(
3416 io_object_t connection,
3417 io_object_t connect_to)
3418 {
3419 CHECK( IOUserClient, connection, client );
3420 CHECK( IOUserClient, connect_to, to );
3421
3422 IOStatisticsClientCall();
3423 return( client->connectClient( to ) );
3424 }
3425
3426
3427 /* Routine io_connect_set_properties */
3428 kern_return_t is_io_connect_set_properties(
3429 io_object_t connection,
3430 io_buf_ptr_t properties,
3431 mach_msg_type_number_t propertiesCnt,
3432 kern_return_t * result)
3433 {
3434 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3435 }
3436
3437 /* Routine io_user_client_method */
3438 kern_return_t is_io_connect_method_var_output
3439 (
3440 io_connect_t connection,
3441 uint32_t selector,
3442 io_scalar_inband64_t scalar_input,
3443 mach_msg_type_number_t scalar_inputCnt,
3444 io_struct_inband_t inband_input,
3445 mach_msg_type_number_t inband_inputCnt,
3446 mach_vm_address_t ool_input,
3447 mach_vm_size_t ool_input_size,
3448 io_struct_inband_t inband_output,
3449 mach_msg_type_number_t *inband_outputCnt,
3450 io_scalar_inband64_t scalar_output,
3451 mach_msg_type_number_t *scalar_outputCnt,
3452 io_buf_ptr_t *var_output,
3453 mach_msg_type_number_t *var_outputCnt
3454 )
3455 {
3456 CHECK( IOUserClient, connection, client );
3457
3458 IOExternalMethodArguments args;
3459 IOReturn ret;
3460 IOMemoryDescriptor * inputMD = 0;
3461 OSObject * structureVariableOutputData = 0;
3462
3463 bzero(&args.__reserved[0], sizeof(args.__reserved));
3464 args.version = kIOExternalMethodArgumentsCurrentVersion;
3465
3466 args.selector = selector;
3467
3468 args.asyncWakePort = MACH_PORT_NULL;
3469 args.asyncReference = 0;
3470 args.asyncReferenceCount = 0;
3471 args.structureVariableOutputData = &structureVariableOutputData;
3472
3473 args.scalarInput = scalar_input;
3474 args.scalarInputCount = scalar_inputCnt;
3475 args.structureInput = inband_input;
3476 args.structureInputSize = inband_inputCnt;
3477
3478 if (ool_input)
3479 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3480 kIODirectionOut, current_task());
3481
3482 args.structureInputDescriptor = inputMD;
3483
3484 args.scalarOutput = scalar_output;
3485 args.scalarOutputCount = *scalar_outputCnt;
3486 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3487 args.structureOutput = inband_output;
3488 args.structureOutputSize = *inband_outputCnt;
3489 args.structureOutputDescriptor = NULL;
3490 args.structureOutputDescriptorSize = 0;
3491
3492 IOStatisticsClientCall();
3493 ret = client->externalMethod( selector, &args );
3494
3495 *scalar_outputCnt = args.scalarOutputCount;
3496 *inband_outputCnt = args.structureOutputSize;
3497
3498 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3499 {
3500 OSSerialize * serialize;
3501 OSData * data;
3502 vm_size_t len;
3503
3504 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3505 {
3506 len = serialize->getLength();
3507 *var_outputCnt = len;
3508 ret = copyoutkdata(serialize->text(), len, var_output);
3509 }
3510 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3511 {
3512 len = data->getLength();
3513 *var_outputCnt = len;
3514 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3515 }
3516 else
3517 {
3518 ret = kIOReturnUnderrun;
3519 }
3520 }
3521
3522 if (inputMD)
3523 inputMD->release();
3524 if (structureVariableOutputData)
3525 structureVariableOutputData->release();
3526
3527 return (ret);
3528 }
3529
3530 /* Routine io_user_client_method */
3531 kern_return_t is_io_connect_method
3532 (
3533 io_connect_t connection,
3534 uint32_t selector,
3535 io_scalar_inband64_t scalar_input,
3536 mach_msg_type_number_t scalar_inputCnt,
3537 io_struct_inband_t inband_input,
3538 mach_msg_type_number_t inband_inputCnt,
3539 mach_vm_address_t ool_input,
3540 mach_vm_size_t ool_input_size,
3541 io_struct_inband_t inband_output,
3542 mach_msg_type_number_t *inband_outputCnt,
3543 io_scalar_inband64_t scalar_output,
3544 mach_msg_type_number_t *scalar_outputCnt,
3545 mach_vm_address_t ool_output,
3546 mach_vm_size_t *ool_output_size
3547 )
3548 {
3549 CHECK( IOUserClient, connection, client );
3550
3551 IOExternalMethodArguments args;
3552 IOReturn ret;
3553 IOMemoryDescriptor * inputMD = 0;
3554 IOMemoryDescriptor * outputMD = 0;
3555
3556 bzero(&args.__reserved[0], sizeof(args.__reserved));
3557 args.version = kIOExternalMethodArgumentsCurrentVersion;
3558
3559 args.selector = selector;
3560
3561 args.asyncWakePort = MACH_PORT_NULL;
3562 args.asyncReference = 0;
3563 args.asyncReferenceCount = 0;
3564 args.structureVariableOutputData = 0;
3565
3566 args.scalarInput = scalar_input;
3567 args.scalarInputCount = scalar_inputCnt;
3568 args.structureInput = inband_input;
3569 args.structureInputSize = inband_inputCnt;
3570
3571 if (ool_input)
3572 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3573 kIODirectionOut, current_task());
3574
3575 args.structureInputDescriptor = inputMD;
3576
3577 args.scalarOutput = scalar_output;
3578 args.scalarOutputCount = *scalar_outputCnt;
3579 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3580 args.structureOutput = inband_output;
3581 args.structureOutputSize = *inband_outputCnt;
3582
3583 if (ool_output && ool_output_size)
3584 {
3585 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3586 kIODirectionIn, current_task());
3587 }
3588
3589 args.structureOutputDescriptor = outputMD;
3590 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3591
3592 IOStatisticsClientCall();
3593 ret = client->externalMethod( selector, &args );
3594
3595 *scalar_outputCnt = args.scalarOutputCount;
3596 *inband_outputCnt = args.structureOutputSize;
3597 *ool_output_size = args.structureOutputDescriptorSize;
3598
3599 if (inputMD)
3600 inputMD->release();
3601 if (outputMD)
3602 outputMD->release();
3603
3604 return (ret);
3605 }
3606
3607 /* Routine io_async_user_client_method */
3608 kern_return_t is_io_connect_async_method
3609 (
3610 io_connect_t connection,
3611 mach_port_t wake_port,
3612 io_async_ref64_t reference,
3613 mach_msg_type_number_t referenceCnt,
3614 uint32_t selector,
3615 io_scalar_inband64_t scalar_input,
3616 mach_msg_type_number_t scalar_inputCnt,
3617 io_struct_inband_t inband_input,
3618 mach_msg_type_number_t inband_inputCnt,
3619 mach_vm_address_t ool_input,
3620 mach_vm_size_t ool_input_size,
3621 io_struct_inband_t inband_output,
3622 mach_msg_type_number_t *inband_outputCnt,
3623 io_scalar_inband64_t scalar_output,
3624 mach_msg_type_number_t *scalar_outputCnt,
3625 mach_vm_address_t ool_output,
3626 mach_vm_size_t * ool_output_size
3627 )
3628 {
3629 CHECK( IOUserClient, connection, client );
3630
3631 IOExternalMethodArguments args;
3632 IOReturn ret;
3633 IOMemoryDescriptor * inputMD = 0;
3634 IOMemoryDescriptor * outputMD = 0;
3635
3636 bzero(&args.__reserved[0], sizeof(args.__reserved));
3637 args.version = kIOExternalMethodArgumentsCurrentVersion;
3638
3639 reference[0] = (io_user_reference_t) wake_port;
3640 if (vm_map_is_64bit(get_task_map(current_task())))
3641 reference[0] |= kIOUCAsync64Flag;
3642
3643 args.selector = selector;
3644
3645 args.asyncWakePort = wake_port;
3646 args.asyncReference = reference;
3647 args.asyncReferenceCount = referenceCnt;
3648
3649 args.scalarInput = scalar_input;
3650 args.scalarInputCount = scalar_inputCnt;
3651 args.structureInput = inband_input;
3652 args.structureInputSize = inband_inputCnt;
3653
3654 if (ool_input)
3655 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3656 kIODirectionOut, current_task());
3657
3658 args.structureInputDescriptor = inputMD;
3659
3660 args.scalarOutput = scalar_output;
3661 args.scalarOutputCount = *scalar_outputCnt;
3662 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3663 args.structureOutput = inband_output;
3664 args.structureOutputSize = *inband_outputCnt;
3665
3666 if (ool_output)
3667 {
3668 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3669 kIODirectionIn, current_task());
3670 }
3671
3672 args.structureOutputDescriptor = outputMD;
3673 args.structureOutputDescriptorSize = *ool_output_size;
3674
3675 IOStatisticsClientCall();
3676 ret = client->externalMethod( selector, &args );
3677
3678 *inband_outputCnt = args.structureOutputSize;
3679 *ool_output_size = args.structureOutputDescriptorSize;
3680
3681 if (inputMD)
3682 inputMD->release();
3683 if (outputMD)
3684 outputMD->release();
3685
3686 return (ret);
3687 }
3688
3689 /* Routine io_connect_method_scalarI_scalarO */
3690 kern_return_t is_io_connect_method_scalarI_scalarO(
3691 io_object_t connect,
3692 uint32_t index,
3693 io_scalar_inband_t input,
3694 mach_msg_type_number_t inputCount,
3695 io_scalar_inband_t output,
3696 mach_msg_type_number_t * outputCount )
3697 {
3698 IOReturn err;
3699 uint32_t i;
3700 io_scalar_inband64_t _input;
3701 io_scalar_inband64_t _output;
3702
3703 mach_msg_type_number_t struct_outputCnt = 0;
3704 mach_vm_size_t ool_output_size = 0;
3705
3706 bzero(&_output[0], sizeof(_output));
3707 for (i = 0; i < inputCount; i++)
3708 _input[i] = SCALAR64(input[i]);
3709
3710 err = is_io_connect_method(connect, index,
3711 _input, inputCount,
3712 NULL, 0,
3713 0, 0,
3714 NULL, &struct_outputCnt,
3715 _output, outputCount,
3716 0, &ool_output_size);
3717
3718 for (i = 0; i < *outputCount; i++)
3719 output[i] = SCALAR32(_output[i]);
3720
3721 return (err);
3722 }
3723
3724 kern_return_t shim_io_connect_method_scalarI_scalarO(
3725 IOExternalMethod * method,
3726 IOService * object,
3727 const io_user_scalar_t * input,
3728 mach_msg_type_number_t inputCount,
3729 io_user_scalar_t * output,
3730 mach_msg_type_number_t * outputCount )
3731 {
3732 IOMethod func;
3733 io_scalar_inband_t _output;
3734 IOReturn err;
3735 err = kIOReturnBadArgument;
3736
3737 bzero(&_output[0], sizeof(_output));
3738 do {
3739
3740 if( inputCount != method->count0)
3741 {
3742 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3743 continue;
3744 }
3745 if( *outputCount != method->count1)
3746 {
3747 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3748 continue;
3749 }
3750
3751 func = method->func;
3752
3753 switch( inputCount) {
3754
3755 case 6:
3756 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3757 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3758 break;
3759 case 5:
3760 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3761 ARG32(input[3]), ARG32(input[4]),
3762 &_output[0] );
3763 break;
3764 case 4:
3765 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3766 ARG32(input[3]),
3767 &_output[0], &_output[1] );
3768 break;
3769 case 3:
3770 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3771 &_output[0], &_output[1], &_output[2] );
3772 break;
3773 case 2:
3774 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3775 &_output[0], &_output[1], &_output[2],
3776 &_output[3] );
3777 break;
3778 case 1:
3779 err = (object->*func)( ARG32(input[0]),
3780 &_output[0], &_output[1], &_output[2],
3781 &_output[3], &_output[4] );
3782 break;
3783 case 0:
3784 err = (object->*func)( &_output[0], &_output[1], &_output[2],
3785 &_output[3], &_output[4], &_output[5] );
3786 break;
3787
3788 default:
3789 IOLog("%s: Bad method table\n", object->getName());
3790 }
3791 }
3792 while( false);
3793
3794 uint32_t i;
3795 for (i = 0; i < *outputCount; i++)
3796 output[i] = SCALAR32(_output[i]);
3797
3798 return( err);
3799 }
3800
3801 /* Routine io_async_method_scalarI_scalarO */
3802 kern_return_t is_io_async_method_scalarI_scalarO(
3803 io_object_t connect,
3804 mach_port_t wake_port,
3805 io_async_ref_t reference,
3806 mach_msg_type_number_t referenceCnt,
3807 uint32_t index,
3808 io_scalar_inband_t input,
3809 mach_msg_type_number_t inputCount,
3810 io_scalar_inband_t output,
3811 mach_msg_type_number_t * outputCount )
3812 {
3813 IOReturn err;
3814 uint32_t i;
3815 io_scalar_inband64_t _input;
3816 io_scalar_inband64_t _output;
3817 io_async_ref64_t _reference;
3818
3819 bzero(&_output[0], sizeof(_output));
3820 for (i = 0; i < referenceCnt; i++)
3821 _reference[i] = REF64(reference[i]);
3822
3823 mach_msg_type_number_t struct_outputCnt = 0;
3824 mach_vm_size_t ool_output_size = 0;
3825
3826 for (i = 0; i < inputCount; i++)
3827 _input[i] = SCALAR64(input[i]);
3828
3829 err = is_io_connect_async_method(connect,
3830 wake_port, _reference, referenceCnt,
3831 index,
3832 _input, inputCount,
3833 NULL, 0,
3834 0, 0,
3835 NULL, &struct_outputCnt,
3836 _output, outputCount,
3837 0, &ool_output_size);
3838
3839 for (i = 0; i < *outputCount; i++)
3840 output[i] = SCALAR32(_output[i]);
3841
3842 return (err);
3843 }
3844 /* Routine io_async_method_scalarI_structureO */
3845 kern_return_t is_io_async_method_scalarI_structureO(
3846 io_object_t connect,
3847 mach_port_t wake_port,
3848 io_async_ref_t reference,
3849 mach_msg_type_number_t referenceCnt,
3850 uint32_t index,
3851 io_scalar_inband_t input,
3852 mach_msg_type_number_t inputCount,
3853 io_struct_inband_t output,
3854 mach_msg_type_number_t * outputCount )
3855 {
3856 uint32_t i;
3857 io_scalar_inband64_t _input;
3858 io_async_ref64_t _reference;
3859
3860 for (i = 0; i < referenceCnt; i++)
3861 _reference[i] = REF64(reference[i]);
3862
3863 mach_msg_type_number_t scalar_outputCnt = 0;
3864 mach_vm_size_t ool_output_size = 0;
3865
3866 for (i = 0; i < inputCount; i++)
3867 _input[i] = SCALAR64(input[i]);
3868
3869 return (is_io_connect_async_method(connect,
3870 wake_port, _reference, referenceCnt,
3871 index,
3872 _input, inputCount,
3873 NULL, 0,
3874 0, 0,
3875 output, outputCount,
3876 NULL, &scalar_outputCnt,
3877 0, &ool_output_size));
3878 }
3879
3880 /* Routine io_async_method_scalarI_structureI */
3881 kern_return_t is_io_async_method_scalarI_structureI(
3882 io_connect_t connect,
3883 mach_port_t wake_port,
3884 io_async_ref_t reference,
3885 mach_msg_type_number_t referenceCnt,
3886 uint32_t index,
3887 io_scalar_inband_t input,
3888 mach_msg_type_number_t inputCount,
3889 io_struct_inband_t inputStruct,
3890 mach_msg_type_number_t inputStructCount )
3891 {
3892 uint32_t i;
3893 io_scalar_inband64_t _input;
3894 io_async_ref64_t _reference;
3895
3896 for (i = 0; i < referenceCnt; i++)
3897 _reference[i] = REF64(reference[i]);
3898
3899 mach_msg_type_number_t scalar_outputCnt = 0;
3900 mach_msg_type_number_t inband_outputCnt = 0;
3901 mach_vm_size_t ool_output_size = 0;
3902
3903 for (i = 0; i < inputCount; i++)
3904 _input[i] = SCALAR64(input[i]);
3905
3906 return (is_io_connect_async_method(connect,
3907 wake_port, _reference, referenceCnt,
3908 index,
3909 _input, inputCount,
3910 inputStruct, inputStructCount,
3911 0, 0,
3912 NULL, &inband_outputCnt,
3913 NULL, &scalar_outputCnt,
3914 0, &ool_output_size));
3915 }
3916
3917 /* Routine io_async_method_structureI_structureO */
3918 kern_return_t is_io_async_method_structureI_structureO(
3919 io_object_t connect,
3920 mach_port_t wake_port,
3921 io_async_ref_t reference,
3922 mach_msg_type_number_t referenceCnt,
3923 uint32_t index,
3924 io_struct_inband_t input,
3925 mach_msg_type_number_t inputCount,
3926 io_struct_inband_t output,
3927 mach_msg_type_number_t * outputCount )
3928 {
3929 uint32_t i;
3930 mach_msg_type_number_t scalar_outputCnt = 0;
3931 mach_vm_size_t ool_output_size = 0;
3932 io_async_ref64_t _reference;
3933
3934 for (i = 0; i < referenceCnt; i++)
3935 _reference[i] = REF64(reference[i]);
3936
3937 return (is_io_connect_async_method(connect,
3938 wake_port, _reference, referenceCnt,
3939 index,
3940 NULL, 0,
3941 input, inputCount,
3942 0, 0,
3943 output, outputCount,
3944 NULL, &scalar_outputCnt,
3945 0, &ool_output_size));
3946 }
3947
3948
3949 kern_return_t shim_io_async_method_scalarI_scalarO(
3950 IOExternalAsyncMethod * method,
3951 IOService * object,
3952 mach_port_t asyncWakePort,
3953 io_user_reference_t * asyncReference,
3954 uint32_t asyncReferenceCount,
3955 const io_user_scalar_t * input,
3956 mach_msg_type_number_t inputCount,
3957 io_user_scalar_t * output,
3958 mach_msg_type_number_t * outputCount )
3959 {
3960 IOAsyncMethod func;
3961 uint32_t i;
3962 io_scalar_inband_t _output;
3963 IOReturn err;
3964 io_async_ref_t reference;
3965
3966 bzero(&_output[0], sizeof(_output));
3967 for (i = 0; i < asyncReferenceCount; i++)
3968 reference[i] = REF32(asyncReference[i]);
3969
3970 err = kIOReturnBadArgument;
3971
3972 do {
3973
3974 if( inputCount != method->count0)
3975 {
3976 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3977 continue;
3978 }
3979 if( *outputCount != method->count1)
3980 {
3981 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3982 continue;
3983 }
3984
3985 func = method->func;
3986
3987 switch( inputCount) {
3988
3989 case 6:
3990 err = (object->*func)( reference,
3991 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3992 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3993 break;
3994 case 5:
3995 err = (object->*func)( reference,
3996 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3997 ARG32(input[3]), ARG32(input[4]),
3998 &_output[0] );
3999 break;
4000 case 4:
4001 err = (object->*func)( reference,
4002 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4003 ARG32(input[3]),
4004 &_output[0], &_output[1] );
4005 break;
4006 case 3:
4007 err = (object->*func)( reference,
4008 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4009 &_output[0], &_output[1], &_output[2] );
4010 break;
4011 case 2:
4012 err = (object->*func)( reference,
4013 ARG32(input[0]), ARG32(input[1]),
4014 &_output[0], &_output[1], &_output[2],
4015 &_output[3] );
4016 break;
4017 case 1:
4018 err = (object->*func)( reference,
4019 ARG32(input[0]),
4020 &_output[0], &_output[1], &_output[2],
4021 &_output[3], &_output[4] );
4022 break;
4023 case 0:
4024 err = (object->*func)( reference,
4025 &_output[0], &_output[1], &_output[2],
4026 &_output[3], &_output[4], &_output[5] );
4027 break;
4028
4029 default:
4030 IOLog("%s: Bad method table\n", object->getName());
4031 }
4032 }
4033 while( false);
4034
4035 for (i = 0; i < *outputCount; i++)
4036 output[i] = SCALAR32(_output[i]);
4037
4038 return( err);
4039 }
4040
4041
4042 /* Routine io_connect_method_scalarI_structureO */
4043 kern_return_t is_io_connect_method_scalarI_structureO(
4044 io_object_t connect,
4045 uint32_t index,
4046 io_scalar_inband_t input,
4047 mach_msg_type_number_t inputCount,
4048 io_struct_inband_t output,
4049 mach_msg_type_number_t * outputCount )
4050 {
4051 uint32_t i;
4052 io_scalar_inband64_t _input;
4053
4054 mach_msg_type_number_t scalar_outputCnt = 0;
4055 mach_vm_size_t ool_output_size = 0;
4056
4057 for (i = 0; i < inputCount; i++)
4058 _input[i] = SCALAR64(input[i]);
4059
4060 return (is_io_connect_method(connect, index,
4061 _input, inputCount,
4062 NULL, 0,
4063 0, 0,
4064 output, outputCount,
4065 NULL, &scalar_outputCnt,
4066 0, &ool_output_size));
4067 }
4068
4069 kern_return_t shim_io_connect_method_scalarI_structureO(
4070
4071 IOExternalMethod * method,
4072 IOService * object,
4073 const io_user_scalar_t * input,
4074 mach_msg_type_number_t inputCount,
4075 io_struct_inband_t output,
4076 IOByteCount * outputCount )
4077 {
4078 IOMethod func;
4079 IOReturn err;
4080
4081 err = kIOReturnBadArgument;
4082
4083 do {
4084 if( inputCount != method->count0)
4085 {
4086 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4087 continue;
4088 }
4089 if( (kIOUCVariableStructureSize != method->count1)
4090 && (*outputCount != method->count1))
4091 {
4092 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4093 continue;
4094 }
4095
4096 func = method->func;
4097
4098 switch( inputCount) {
4099
4100 case 5:
4101 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4102 ARG32(input[3]), ARG32(input[4]),
4103 output );
4104 break;
4105 case 4:
4106 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4107 ARG32(input[3]),
4108 output, (void *)outputCount );
4109 break;
4110 case 3:
4111 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4112 output, (void *)outputCount, 0 );
4113 break;
4114 case 2:
4115 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4116 output, (void *)outputCount, 0, 0 );
4117 break;
4118 case 1:
4119 err = (object->*func)( ARG32(input[0]),
4120 output, (void *)outputCount, 0, 0, 0 );
4121 break;
4122 case 0:
4123 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4124 break;
4125
4126 default:
4127 IOLog("%s: Bad method table\n", object->getName());
4128 }
4129 }
4130 while( false);
4131
4132 return( err);
4133 }
4134
4135
4136 kern_return_t shim_io_async_method_scalarI_structureO(
4137 IOExternalAsyncMethod * method,
4138 IOService * object,
4139 mach_port_t asyncWakePort,
4140 io_user_reference_t * asyncReference,
4141 uint32_t asyncReferenceCount,
4142 const io_user_scalar_t * input,
4143 mach_msg_type_number_t inputCount,
4144 io_struct_inband_t output,
4145 mach_msg_type_number_t * outputCount )
4146 {
4147 IOAsyncMethod func;
4148 uint32_t i;
4149 IOReturn err;
4150 io_async_ref_t reference;
4151
4152 for (i = 0; i < asyncReferenceCount; i++)
4153 reference[i] = REF32(asyncReference[i]);
4154
4155 err = kIOReturnBadArgument;
4156 do {
4157 if( inputCount != method->count0)
4158 {
4159 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4160 continue;
4161 }
4162 if( (kIOUCVariableStructureSize != method->count1)
4163 && (*outputCount != method->count1))
4164 {
4165 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4166 continue;
4167 }
4168
4169 func = method->func;
4170
4171 switch( inputCount) {
4172
4173 case 5:
4174 err = (object->*func)( reference,
4175 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4176 ARG32(input[3]), ARG32(input[4]),
4177 output );
4178 break;
4179 case 4:
4180 err = (object->*func)( reference,
4181 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4182 ARG32(input[3]),
4183 output, (void *)outputCount );
4184 break;
4185 case 3:
4186 err = (object->*func)( reference,
4187 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4188 output, (void *)outputCount, 0 );
4189 break;
4190 case 2:
4191 err = (object->*func)( reference,
4192 ARG32(input[0]), ARG32(input[1]),
4193 output, (void *)outputCount, 0, 0 );
4194 break;
4195 case 1:
4196 err = (object->*func)( reference,
4197 ARG32(input[0]),
4198 output, (void *)outputCount, 0, 0, 0 );
4199 break;
4200 case 0:
4201 err = (object->*func)( reference,
4202 output, (void *)outputCount, 0, 0, 0, 0 );
4203 break;
4204
4205 default:
4206 IOLog("%s: Bad method table\n", object->getName());
4207 }
4208 }
4209 while( false);
4210
4211 return( err);
4212 }
4213
4214 /* Routine io_connect_method_scalarI_structureI */
4215 kern_return_t is_io_connect_method_scalarI_structureI(
4216 io_connect_t connect,
4217 uint32_t index,
4218 io_scalar_inband_t input,
4219 mach_msg_type_number_t inputCount,
4220 io_struct_inband_t inputStruct,
4221 mach_msg_type_number_t inputStructCount )
4222 {
4223 uint32_t i;
4224 io_scalar_inband64_t _input;
4225
4226 mach_msg_type_number_t scalar_outputCnt = 0;
4227 mach_msg_type_number_t inband_outputCnt = 0;
4228 mach_vm_size_t ool_output_size = 0;
4229
4230 for (i = 0; i < inputCount; i++)
4231 _input[i] = SCALAR64(input[i]);
4232
4233 return (is_io_connect_method(connect, index,
4234 _input, inputCount,
4235 inputStruct, inputStructCount,
4236 0, 0,
4237 NULL, &inband_outputCnt,
4238 NULL, &scalar_outputCnt,
4239 0, &ool_output_size));
4240 }
4241
4242 kern_return_t shim_io_connect_method_scalarI_structureI(
4243 IOExternalMethod * method,
4244 IOService * object,
4245 const io_user_scalar_t * input,
4246 mach_msg_type_number_t inputCount,
4247 io_struct_inband_t inputStruct,
4248 mach_msg_type_number_t inputStructCount )
4249 {
4250 IOMethod func;
4251 IOReturn err = kIOReturnBadArgument;
4252
4253 do
4254 {
4255 if (inputCount != method->count0)
4256 {
4257 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4258 continue;
4259 }
4260 if( (kIOUCVariableStructureSize != method->count1)
4261 && (inputStructCount != method->count1))
4262 {
4263 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4264 continue;
4265 }
4266
4267 func = method->func;
4268
4269 switch( inputCount) {
4270
4271 case 5:
4272 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4273 ARG32(input[3]), ARG32(input[4]),
4274 inputStruct );
4275 break;
4276 case 4:
4277 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4278 ARG32(input[3]),
4279 inputStruct, (void *)(uintptr_t)inputStructCount );
4280 break;
4281 case 3:
4282 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4283 inputStruct, (void *)(uintptr_t)inputStructCount,
4284 0 );
4285 break;
4286 case 2:
4287 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4288 inputStruct, (void *)(uintptr_t)inputStructCount,
4289 0, 0 );
4290 break;
4291 case 1:
4292 err = (object->*func)( ARG32(input[0]),
4293 inputStruct, (void *)(uintptr_t)inputStructCount,
4294 0, 0, 0 );
4295 break;
4296 case 0:
4297 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4298 0, 0, 0, 0 );
4299 break;
4300
4301 default:
4302 IOLog("%s: Bad method table\n", object->getName());
4303 }
4304 }
4305 while (false);
4306
4307 return( err);
4308 }
4309
4310 kern_return_t shim_io_async_method_scalarI_structureI(
4311 IOExternalAsyncMethod * method,
4312 IOService * object,
4313 mach_port_t asyncWakePort,
4314 io_user_reference_t * asyncReference,
4315 uint32_t asyncReferenceCount,
4316 const io_user_scalar_t * input,
4317 mach_msg_type_number_t inputCount,
4318 io_struct_inband_t inputStruct,
4319 mach_msg_type_number_t inputStructCount )
4320 {
4321 IOAsyncMethod func;
4322 uint32_t i;
4323 IOReturn err = kIOReturnBadArgument;
4324 io_async_ref_t reference;
4325
4326 for (i = 0; i < asyncReferenceCount; i++)
4327 reference[i] = REF32(asyncReference[i]);
4328
4329 do
4330 {
4331 if (inputCount != method->count0)
4332 {
4333 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4334 continue;
4335 }
4336 if( (kIOUCVariableStructureSize != method->count1)
4337 && (inputStructCount != method->count1))
4338 {
4339 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4340 continue;
4341 }
4342
4343 func = method->func;
4344
4345 switch( inputCount) {
4346
4347 case 5:
4348 err = (object->*func)( reference,
4349 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4350 ARG32(input[3]), ARG32(input[4]),
4351 inputStruct );
4352 break;
4353 case 4:
4354 err = (object->*func)( reference,
4355 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4356 ARG32(input[3]),
4357 inputStruct, (void *)(uintptr_t)inputStructCount );
4358 break;
4359 case 3:
4360 err = (object->*func)( reference,
4361 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4362 inputStruct, (void *)(uintptr_t)inputStructCount,
4363 0 );
4364 break;
4365 case 2:
4366 err = (object->*func)( reference,
4367 ARG32(input[0]), ARG32(input[1]),
4368 inputStruct, (void *)(uintptr_t)inputStructCount,
4369 0, 0 );
4370 break;
4371 case 1:
4372 err = (object->*func)( reference,
4373 ARG32(input[0]),
4374 inputStruct, (void *)(uintptr_t)inputStructCount,
4375 0, 0, 0 );
4376 break;
4377 case 0:
4378 err = (object->*func)( reference,
4379 inputStruct, (void *)(uintptr_t)inputStructCount,
4380 0, 0, 0, 0 );
4381 break;
4382
4383 default:
4384 IOLog("%s: Bad method table\n", object->getName());
4385 }
4386 }
4387 while (false);
4388
4389 return( err);
4390 }
4391
4392 /* Routine io_connect_method_structureI_structureO */
4393 kern_return_t is_io_connect_method_structureI_structureO(
4394 io_object_t connect,
4395 uint32_t index,
4396 io_struct_inband_t input,
4397 mach_msg_type_number_t inputCount,
4398 io_struct_inband_t output,
4399 mach_msg_type_number_t * outputCount )
4400 {
4401 mach_msg_type_number_t scalar_outputCnt = 0;
4402 mach_vm_size_t ool_output_size = 0;
4403
4404 return (is_io_connect_method(connect, index,
4405 NULL, 0,
4406 input, inputCount,
4407 0, 0,
4408 output, outputCount,
4409 NULL, &scalar_outputCnt,
4410 0, &ool_output_size));
4411 }
4412
4413 kern_return_t shim_io_connect_method_structureI_structureO(
4414 IOExternalMethod * method,
4415 IOService * object,
4416 io_struct_inband_t input,
4417 mach_msg_type_number_t inputCount,
4418 io_struct_inband_t output,
4419 IOByteCount * outputCount )
4420 {
4421 IOMethod func;
4422 IOReturn err = kIOReturnBadArgument;
4423
4424 do
4425 {
4426 if( (kIOUCVariableStructureSize != method->count0)
4427 && (inputCount != method->count0))
4428 {
4429 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4430 continue;
4431 }
4432 if( (kIOUCVariableStructureSize != method->count1)
4433 && (*outputCount != method->count1))
4434 {
4435 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4436 continue;
4437 }
4438
4439 func = method->func;
4440
4441 if( method->count1) {
4442 if( method->count0) {
4443 err = (object->*func)( input, output,
4444 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4445 } else {
4446 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4447 }
4448 } else {
4449 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4450 }
4451 }
4452 while( false);
4453
4454
4455 return( err);
4456 }
4457
4458 kern_return_t shim_io_async_method_structureI_structureO(
4459 IOExternalAsyncMethod * method,
4460 IOService * object,
4461 mach_port_t asyncWakePort,
4462 io_user_reference_t * asyncReference,
4463 uint32_t asyncReferenceCount,
4464 io_struct_inband_t input,
4465 mach_msg_type_number_t inputCount,
4466 io_struct_inband_t output,
4467 mach_msg_type_number_t * outputCount )
4468 {
4469 IOAsyncMethod func;
4470 uint32_t i;
4471 IOReturn err;
4472 io_async_ref_t reference;
4473
4474 for (i = 0; i < asyncReferenceCount; i++)
4475 reference[i] = REF32(asyncReference[i]);
4476
4477 err = kIOReturnBadArgument;
4478 do
4479 {
4480 if( (kIOUCVariableStructureSize != method->count0)
4481 && (inputCount != method->count0))
4482 {
4483 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4484 continue;
4485 }
4486 if( (kIOUCVariableStructureSize != method->count1)
4487 && (*outputCount != method->count1))
4488 {
4489 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4490 continue;
4491 }
4492
4493 func = method->func;
4494
4495 if( method->count1) {
4496 if( method->count0) {
4497 err = (object->*func)( reference,
4498 input, output,
4499 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4500 } else {
4501 err = (object->*func)( reference,
4502 output, outputCount, 0, 0, 0, 0 );
4503 }
4504 } else {
4505 err = (object->*func)( reference,
4506 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4507 }
4508 }
4509 while( false);
4510
4511 return( err);
4512 }
4513
4514 /* Routine io_catalog_send_data */
4515 kern_return_t is_io_catalog_send_data(
4516 mach_port_t master_port,
4517 uint32_t flag,
4518 io_buf_ptr_t inData,
4519 mach_msg_type_number_t inDataCount,
4520 kern_return_t * result)
4521 {
4522 OSObject * obj = 0;
4523 vm_offset_t data;
4524 kern_return_t kr = kIOReturnError;
4525
4526 //printf("io_catalog_send_data called. flag: %d\n", flag);
4527
4528 if( master_port != master_device_port)
4529 return kIOReturnNotPrivileged;
4530
4531 if( (flag != kIOCatalogRemoveKernelLinker &&
4532 flag != kIOCatalogKextdActive &&
4533 flag != kIOCatalogKextdFinishedLaunching) &&
4534 ( !inData || !inDataCount) )
4535 {
4536 return kIOReturnBadArgument;
4537 }
4538
4539 if (inData) {
4540 vm_map_offset_t map_data;
4541
4542 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4543 return( kIOReturnMessageTooLarge);
4544
4545 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4546 data = CAST_DOWN(vm_offset_t, map_data);
4547
4548 if( kr != KERN_SUCCESS)
4549 return kr;
4550
4551 // must return success after vm_map_copyout() succeeds
4552
4553 if( inDataCount ) {
4554 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4555 vm_deallocate( kernel_map, data, inDataCount );
4556 if( !obj) {
4557 *result = kIOReturnNoMemory;
4558 return( KERN_SUCCESS);
4559 }
4560 }
4561 }
4562
4563 switch ( flag ) {
4564 case kIOCatalogResetDrivers:
4565 case kIOCatalogResetDriversNoMatch: {
4566 OSArray * array;
4567
4568 array = OSDynamicCast(OSArray, obj);
4569 if (array) {
4570 if ( !gIOCatalogue->resetAndAddDrivers(array,
4571 flag == kIOCatalogResetDrivers) ) {
4572
4573 kr = kIOReturnError;
4574 }
4575 } else {
4576 kr = kIOReturnBadArgument;
4577 }
4578 }
4579 break;
4580
4581 case kIOCatalogAddDrivers:
4582 case kIOCatalogAddDriversNoMatch: {
4583 OSArray * array;
4584
4585 array = OSDynamicCast(OSArray, obj);
4586 if ( array ) {
4587 if ( !gIOCatalogue->addDrivers( array ,
4588 flag == kIOCatalogAddDrivers) ) {
4589 kr = kIOReturnError;
4590 }
4591 }
4592 else {
4593 kr = kIOReturnBadArgument;
4594 }
4595 }
4596 break;
4597
4598 case kIOCatalogRemoveDrivers:
4599 case kIOCatalogRemoveDriversNoMatch: {
4600 OSDictionary * dict;
4601
4602 dict = OSDynamicCast(OSDictionary, obj);
4603 if ( dict ) {
4604 if ( !gIOCatalogue->removeDrivers( dict,
4605 flag == kIOCatalogRemoveDrivers ) ) {
4606 kr = kIOReturnError;
4607 }
4608 }
4609 else {
4610 kr = kIOReturnBadArgument;
4611 }
4612 }
4613 break;
4614
4615 case kIOCatalogStartMatching: {
4616 OSDictionary * dict;
4617
4618 dict = OSDynamicCast(OSDictionary, obj);
4619 if ( dict ) {
4620 if ( !gIOCatalogue->startMatching( dict ) ) {
4621 kr = kIOReturnError;
4622 }
4623 }
4624 else {
4625 kr = kIOReturnBadArgument;
4626 }
4627 }
4628 break;
4629
4630 case kIOCatalogRemoveKernelLinker:
4631 kr = KERN_NOT_SUPPORTED;
4632 break;
4633
4634 case kIOCatalogKextdActive:
4635 #if !NO_KEXTD
4636 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4637 OSKext::setKextdActive();
4638
4639 /* Dump all nonloaded startup extensions; kextd will now send them
4640 * down on request.
4641 */
4642 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4643 #endif
4644 kr = kIOReturnSuccess;
4645 break;
4646
4647 case kIOCatalogKextdFinishedLaunching: {
4648 #if !NO_KEXTD
4649 static bool clearedBusy = false;
4650
4651 if (!clearedBusy) {
4652 IOService * serviceRoot = IOService::getServiceRoot();
4653 if (serviceRoot) {
4654 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4655 serviceRoot->adjustBusy(-1);
4656 clearedBusy = true;
4657 }
4658 }
4659 #endif
4660 kr = kIOReturnSuccess;
4661 }
4662 break;
4663
4664 default:
4665 kr = kIOReturnBadArgument;
4666 break;
4667 }
4668
4669 if (obj) obj->release();
4670
4671 *result = kr;
4672 return( KERN_SUCCESS);
4673 }
4674
4675 /* Routine io_catalog_terminate */
4676 kern_return_t is_io_catalog_terminate(
4677 mach_port_t master_port,
4678 uint32_t flag,
4679 io_name_t name )
4680 {
4681 kern_return_t kr;
4682
4683 if( master_port != master_device_port )
4684 return kIOReturnNotPrivileged;
4685
4686 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4687 kIOClientPrivilegeAdministrator );
4688 if( kIOReturnSuccess != kr)
4689 return( kr );
4690
4691 switch ( flag ) {
4692 #if !defined(SECURE_KERNEL)
4693 case kIOCatalogServiceTerminate:
4694 OSIterator * iter;
4695 IOService * service;
4696
4697 iter = IORegistryIterator::iterateOver(gIOServicePlane,
4698 kIORegistryIterateRecursively);
4699 if ( !iter )
4700 return kIOReturnNoMemory;
4701
4702 do {
4703 iter->reset();
4704 while( (service = (IOService *)iter->getNextObject()) ) {
4705 if( service->metaCast(name)) {
4706 if ( !service->terminate( kIOServiceRequired
4707 | kIOServiceSynchronous) ) {
4708 kr = kIOReturnUnsupported;
4709 break;
4710 }
4711 }
4712 }
4713 } while( !service && !iter->isValid());
4714 iter->release();
4715 break;
4716
4717 case kIOCatalogModuleUnload:
4718 case kIOCatalogModuleTerminate:
4719 kr = gIOCatalogue->terminateDriversForModule(name,
4720 flag == kIOCatalogModuleUnload);
4721 break;
4722 #endif
4723
4724 default:
4725 kr = kIOReturnBadArgument;
4726 break;
4727 }
4728
4729 return( kr );
4730 }
4731
4732 /* Routine io_catalog_get_data */
4733 kern_return_t is_io_catalog_get_data(
4734 mach_port_t master_port,
4735 uint32_t flag,
4736 io_buf_ptr_t *outData,
4737 mach_msg_type_number_t *outDataCount)
4738 {
4739 kern_return_t kr = kIOReturnSuccess;
4740 OSSerialize * s;
4741
4742 if( master_port != master_device_port)
4743 return kIOReturnNotPrivileged;
4744
4745 //printf("io_catalog_get_data called. flag: %d\n", flag);
4746
4747 s = OSSerialize::withCapacity(4096);
4748 if ( !s )
4749 return kIOReturnNoMemory;
4750
4751 kr = gIOCatalogue->serializeData(flag, s);
4752
4753 if ( kr == kIOReturnSuccess ) {
4754 vm_offset_t data;
4755 vm_map_copy_t copy;
4756 vm_size_t size;
4757
4758 size = s->getLength();
4759 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
4760 if ( kr == kIOReturnSuccess ) {
4761 bcopy(s->text(), (void *)data, size);
4762 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
4763 (vm_map_size_t)size, true, &copy);
4764 *outData = (char *)copy;
4765 *outDataCount = size;
4766 }
4767 }
4768
4769 s->release();
4770
4771 return kr;
4772 }
4773
4774 /* Routine io_catalog_get_gen_count */
4775 kern_return_t is_io_catalog_get_gen_count(
4776 mach_port_t master_port,
4777 uint32_t *genCount)
4778 {
4779 if( master_port != master_device_port)
4780 return kIOReturnNotPrivileged;
4781
4782 //printf("io_catalog_get_gen_count called.\n");
4783
4784 if ( !genCount )
4785 return kIOReturnBadArgument;
4786
4787 *genCount = gIOCatalogue->getGenerationCount();
4788
4789 return kIOReturnSuccess;
4790 }
4791
4792 /* Routine io_catalog_module_loaded.
4793 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
4794 */
4795 kern_return_t is_io_catalog_module_loaded(
4796 mach_port_t master_port,
4797 io_name_t name)
4798 {
4799 if( master_port != master_device_port)
4800 return kIOReturnNotPrivileged;
4801
4802 //printf("io_catalog_module_loaded called. name %s\n", name);
4803
4804 if ( !name )
4805 return kIOReturnBadArgument;
4806
4807 gIOCatalogue->moduleHasLoaded(name);
4808
4809 return kIOReturnSuccess;
4810 }
4811
4812 kern_return_t is_io_catalog_reset(
4813 mach_port_t master_port,
4814 uint32_t flag)
4815 {
4816 if( master_port != master_device_port)
4817 return kIOReturnNotPrivileged;
4818
4819 switch ( flag ) {
4820 case kIOCatalogResetDefault:
4821 gIOCatalogue->reset();
4822 break;
4823
4824 default:
4825 return kIOReturnBadArgument;
4826 }
4827
4828 return kIOReturnSuccess;
4829 }
4830
4831 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
4832 {
4833 kern_return_t result = kIOReturnBadArgument;
4834 IOUserClient *userClient;
4835
4836 if ((userClient = OSDynamicCast(IOUserClient,
4837 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4838 IOExternalTrap *trap;
4839 IOService *target = NULL;
4840
4841 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4842
4843 if (trap && target) {
4844 IOTrap func;
4845
4846 func = trap->func;
4847
4848 if (func) {
4849 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4850 }
4851 }
4852
4853 userClient->release();
4854 }
4855
4856 return result;
4857 }
4858
4859 } /* extern "C" */
4860
4861 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4862 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4863 {
4864 IOReturn err;
4865 IOService * object;
4866 IOByteCount structureOutputSize;
4867
4868 if (dispatch)
4869 {
4870 uint32_t count;
4871 count = dispatch->checkScalarInputCount;
4872 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
4873 {
4874 return (kIOReturnBadArgument);
4875 }
4876
4877 count = dispatch->checkStructureInputSize;
4878 if ((kIOUCVariableStructureSize != count)
4879 && (count != ((args->structureInputDescriptor)
4880 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
4881 {
4882 return (kIOReturnBadArgument);
4883 }
4884
4885 count = dispatch->checkScalarOutputCount;
4886 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
4887 {
4888 return (kIOReturnBadArgument);
4889 }
4890
4891 count = dispatch->checkStructureOutputSize;
4892 if ((kIOUCVariableStructureSize != count)
4893 && (count != ((args->structureOutputDescriptor)
4894 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
4895 {
4896 return (kIOReturnBadArgument);
4897 }
4898
4899 if (dispatch->function)
4900 err = (*dispatch->function)(target, reference, args);
4901 else
4902 err = kIOReturnNoCompletion; /* implementator can dispatch */
4903
4904 return (err);
4905 }
4906
4907
4908 // pre-Leopard API's don't do ool structs
4909 if (args->structureInputDescriptor || args->structureOutputDescriptor)
4910 {
4911 err = kIOReturnIPCError;
4912 return (err);
4913 }
4914
4915 structureOutputSize = args->structureOutputSize;
4916
4917 if (args->asyncWakePort)
4918 {
4919 IOExternalAsyncMethod * method;
4920 object = 0;
4921 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
4922 return (kIOReturnUnsupported);
4923
4924 if (kIOUCForegroundOnly & method->flags)
4925 {
4926 if (task_is_gpu_denied(current_task()))
4927 return (kIOReturnNotPermitted);
4928 }
4929
4930 switch (method->flags & kIOUCTypeMask)
4931 {
4932 case kIOUCScalarIStructI:
4933 err = shim_io_async_method_scalarI_structureI( method, object,
4934 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4935 args->scalarInput, args->scalarInputCount,
4936 (char *)args->structureInput, args->structureInputSize );
4937 break;
4938
4939 case kIOUCScalarIScalarO:
4940 err = shim_io_async_method_scalarI_scalarO( method, object,
4941 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4942 args->scalarInput, args->scalarInputCount,
4943 args->scalarOutput, &args->scalarOutputCount );
4944 break;
4945
4946 case kIOUCScalarIStructO:
4947 err = shim_io_async_method_scalarI_structureO( method, object,
4948 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4949 args->scalarInput, args->scalarInputCount,
4950 (char *) args->structureOutput, &args->structureOutputSize );
4951 break;
4952
4953
4954 case kIOUCStructIStructO:
4955 err = shim_io_async_method_structureI_structureO( method, object,
4956 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4957 (char *)args->structureInput, args->structureInputSize,
4958 (char *) args->structureOutput, &args->structureOutputSize );
4959 break;
4960
4961 default:
4962 err = kIOReturnBadArgument;
4963 break;
4964 }
4965 }
4966 else
4967 {
4968 IOExternalMethod * method;
4969 object = 0;
4970 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
4971 return (kIOReturnUnsupported);
4972
4973 if (kIOUCForegroundOnly & method->flags)
4974 {
4975 if (task_is_gpu_denied(current_task()))
4976 return (kIOReturnNotPermitted);
4977 }
4978
4979 switch (method->flags & kIOUCTypeMask)
4980 {
4981 case kIOUCScalarIStructI:
4982 err = shim_io_connect_method_scalarI_structureI( method, object,
4983 args->scalarInput, args->scalarInputCount,
4984 (char *) args->structureInput, args->structureInputSize );
4985 break;
4986
4987 case kIOUCScalarIScalarO:
4988 err = shim_io_connect_method_scalarI_scalarO( method, object,
4989 args->scalarInput, args->scalarInputCount,
4990 args->scalarOutput, &args->scalarOutputCount );
4991 break;
4992
4993 case kIOUCScalarIStructO:
4994 err = shim_io_connect_method_scalarI_structureO( method, object,
4995 args->scalarInput, args->scalarInputCount,
4996 (char *) args->structureOutput, &structureOutputSize );
4997 break;
4998
4999
5000 case kIOUCStructIStructO:
5001 err = shim_io_connect_method_structureI_structureO( method, object,
5002 (char *) args->structureInput, args->structureInputSize,
5003 (char *) args->structureOutput, &structureOutputSize );
5004 break;
5005
5006 default:
5007 err = kIOReturnBadArgument;
5008 break;
5009 }
5010 }
5011
5012 args->structureOutputSize = structureOutputSize;
5013
5014 return (err);
5015 }
5016
5017
5018 #if __LP64__
5019 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5020 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5021 #else
5022 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5023 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5024 #endif
5025 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5026 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5027 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5028 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5029 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5030 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5031 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5032 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5033 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5034 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5035 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5036 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5037 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5038 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5039