]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-2422.90.20.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46
47 #if CONFIG_MACF
48
49 extern "C" {
50 #include <security/mac_framework.h>
51 };
52 #include <sys/kauth.h>
53
54 #define IOMACF_LOG 0
55
56 #endif /* CONFIG_MACF */
57
58 #include <IOKit/assert.h>
59
60 #include "IOServicePrivate.h"
61 #include "IOKitKernelInternal.h"
62
63 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
64 #define SCALAR32(x) ((uint32_t )x)
65 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
66 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
67 #define REF32(x) ((int)(x))
68
69 enum
70 {
71 kIOUCAsync0Flags = 3ULL,
72 kIOUCAsync64Flag = 1ULL
73 };
74
75 #if IOKITSTATS
76
77 #define IOStatisticsRegisterCounter() \
78 do { \
79 reserved->counter = IOStatistics::registerUserClient(this); \
80 } while (0)
81
82 #define IOStatisticsUnregisterCounter() \
83 do { \
84 if (reserved) \
85 IOStatistics::unregisterUserClient(reserved->counter); \
86 } while (0)
87
88 #define IOStatisticsClientCall() \
89 do { \
90 IOStatistics::countUserClientCall(client); \
91 } while (0)
92
93 #else
94
95 #define IOStatisticsRegisterCounter()
96 #define IOStatisticsUnregisterCounter()
97 #define IOStatisticsClientCall()
98
99 #endif /* IOKITSTATS */
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 // definitions we should get from osfmk
104
105 //typedef struct ipc_port * ipc_port_t;
106 typedef natural_t ipc_kobject_type_t;
107
108 #define IKOT_IOKIT_SPARE 27
109 #define IKOT_IOKIT_CONNECT 29
110 #define IKOT_IOKIT_OBJECT 30
111
112 extern "C" {
113
114 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
115 ipc_kobject_type_t type );
116
117 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
118
119 extern mach_port_name_t iokit_make_send_right( task_t task,
120 io_object_t obj, ipc_kobject_type_t type );
121
122 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
123
124 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
125
126 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
127
128 extern ipc_port_t master_device_port;
129
130 extern void iokit_retain_port( ipc_port_t port );
131 extern void iokit_release_port( ipc_port_t port );
132 extern void iokit_release_port_send( ipc_port_t port );
133
134 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
135
136 #include <mach/mach_traps.h>
137 #include <vm/vm_map.h>
138
139 } /* extern "C" */
140
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
145
146 class IOMachPort : public OSObject
147 {
148 OSDeclareDefaultStructors(IOMachPort)
149 public:
150 OSObject * object;
151 ipc_port_t port;
152 UInt32 mscount;
153 UInt8 holdDestroy;
154
155 static IOMachPort * portForObject( OSObject * obj,
156 ipc_kobject_type_t type );
157 static bool noMoreSendersForObject( OSObject * obj,
158 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
159 static void releasePortForObject( OSObject * obj,
160 ipc_kobject_type_t type );
161 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
162
163 static OSDictionary * dictForType( ipc_kobject_type_t type );
164
165 static mach_port_name_t makeSendRightForTask( task_t task,
166 io_object_t obj, ipc_kobject_type_t type );
167
168 virtual void free();
169 };
170
171 #define super OSObject
172 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
173
174 static IOLock * gIOObjectPortLock;
175
176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
178 // not in dictForType() for debugging ease
179 static OSDictionary * gIOObjectPorts;
180 static OSDictionary * gIOConnectPorts;
181
182 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
183 {
184 OSDictionary ** dict;
185
186 if( IKOT_IOKIT_OBJECT == type )
187 dict = &gIOObjectPorts;
188 else if( IKOT_IOKIT_CONNECT == type )
189 dict = &gIOConnectPorts;
190 else
191 return( 0 );
192
193 if( 0 == *dict)
194 *dict = OSDictionary::withCapacity( 1 );
195
196 return( *dict );
197 }
198
199 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
200 ipc_kobject_type_t type )
201 {
202 IOMachPort * inst = 0;
203 OSDictionary * dict;
204
205 IOTakeLock( gIOObjectPortLock);
206
207 do {
208
209 dict = dictForType( type );
210 if( !dict)
211 continue;
212
213 if( (inst = (IOMachPort *)
214 dict->getObject( (const OSSymbol *) obj ))) {
215 inst->mscount++;
216 inst->retain();
217 continue;
218 }
219
220 inst = new IOMachPort;
221 if( inst && !inst->init()) {
222 inst = 0;
223 continue;
224 }
225
226 inst->port = iokit_alloc_object_port( obj, type );
227 if( inst->port) {
228 // retains obj
229 dict->setObject( (const OSSymbol *) obj, inst );
230 inst->mscount++;
231
232 } else {
233 inst->release();
234 inst = 0;
235 }
236
237 } while( false );
238
239 IOUnlock( gIOObjectPortLock);
240
241 return( inst );
242 }
243
244 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
245 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
246 {
247 OSDictionary * dict;
248 IOMachPort * machPort;
249 bool destroyed = true;
250
251 IOTakeLock( gIOObjectPortLock);
252
253 if( (dict = dictForType( type ))) {
254 obj->retain();
255
256 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
257 if( machPort) {
258 destroyed = (machPort->mscount <= *mscount);
259 if( destroyed)
260 dict->removeObject( (const OSSymbol *) obj );
261 else
262 *mscount = machPort->mscount;
263 }
264 obj->release();
265 }
266
267 IOUnlock( gIOObjectPortLock);
268
269 return( destroyed );
270 }
271
272 void IOMachPort::releasePortForObject( OSObject * obj,
273 ipc_kobject_type_t type )
274 {
275 OSDictionary * dict;
276 IOMachPort * machPort;
277
278 IOTakeLock( gIOObjectPortLock);
279
280 if( (dict = dictForType( type ))) {
281 obj->retain();
282 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
283 if( machPort && !machPort->holdDestroy)
284 dict->removeObject( (const OSSymbol *) obj );
285 obj->release();
286 }
287
288 IOUnlock( gIOObjectPortLock);
289 }
290
291 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
292 {
293 OSDictionary * dict;
294 IOMachPort * machPort;
295
296 IOLockLock( gIOObjectPortLock );
297
298 if( (dict = dictForType( type ))) {
299 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
300 if( machPort)
301 machPort->holdDestroy = true;
302 }
303
304 IOLockUnlock( gIOObjectPortLock );
305 }
306
307 void IOUserClient::destroyUserReferences( OSObject * obj )
308 {
309 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
310
311 // panther, 3160200
312 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
313
314 OSDictionary * dict;
315
316 IOTakeLock( gIOObjectPortLock);
317 obj->retain();
318
319 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
320 {
321 IOMachPort * port;
322 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
323 if (port)
324 {
325 IOUserClient * uc;
326 if ((uc = OSDynamicCast(IOUserClient, obj)) && uc->mappings)
327 {
328 dict->setObject((const OSSymbol *) uc->mappings, port);
329 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
330
331 uc->mappings->release();
332 uc->mappings = 0;
333 }
334 dict->removeObject( (const OSSymbol *) obj );
335 }
336 }
337 obj->release();
338 IOUnlock( gIOObjectPortLock);
339 }
340
341 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
342 io_object_t obj, ipc_kobject_type_t type )
343 {
344 return( iokit_make_send_right( task, obj, type ));
345 }
346
347 void IOMachPort::free( void )
348 {
349 if( port)
350 iokit_destroy_object_port( port );
351 super::free();
352 }
353
354 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
355
356 class IOUserNotification : public OSIterator
357 {
358 OSDeclareDefaultStructors(IOUserNotification)
359
360 IONotifier * holdNotify;
361 IOLock * lock;
362
363 public:
364
365 virtual bool init( void );
366 virtual void free();
367
368 virtual void setNotification( IONotifier * obj );
369
370 virtual void reset();
371 virtual bool isValid();
372 };
373
374 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
375
376 extern "C" {
377
378 // functions called from osfmk/device/iokit_rpc.c
379
380 void
381 iokit_add_reference( io_object_t obj )
382 {
383 if( obj)
384 obj->retain();
385 }
386
387 void
388 iokit_remove_reference( io_object_t obj )
389 {
390 if( obj)
391 obj->release();
392 }
393
394 ipc_port_t
395 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
396 {
397 IOMachPort * machPort;
398 ipc_port_t port;
399
400 if( (machPort = IOMachPort::portForObject( obj, type ))) {
401
402 port = machPort->port;
403 if( port)
404 iokit_retain_port( port );
405
406 machPort->release();
407
408 } else
409 port = NULL;
410
411 return( port );
412 }
413
414 kern_return_t
415 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
416 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
417 {
418 IOUserClient * client;
419 IOMemoryMap * map;
420 IOUserNotification * notify;
421
422 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
423 return( kIOReturnNotReady );
424
425 if( IKOT_IOKIT_CONNECT == type)
426 {
427 if( (client = OSDynamicCast( IOUserClient, obj ))) {
428 IOStatisticsClientCall();
429 client->clientDied();
430 }
431 }
432 else if( IKOT_IOKIT_OBJECT == type)
433 {
434 if( (map = OSDynamicCast( IOMemoryMap, obj )))
435 map->taskDied();
436 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
437 notify->setNotification( 0 );
438 }
439
440 return( kIOReturnSuccess );
441 }
442
443 }; /* extern "C" */
444
445 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
446
447 class IOServiceUserNotification : public IOUserNotification
448 {
449 OSDeclareDefaultStructors(IOServiceUserNotification)
450
451 struct PingMsg {
452 mach_msg_header_t msgHdr;
453 OSNotificationHeader64 notifyHeader;
454 };
455
456 enum { kMaxOutstanding = 1024 };
457
458 PingMsg * pingMsg;
459 vm_size_t msgSize;
460 OSArray * newSet;
461 OSObject * lastEntry;
462 bool armed;
463
464 public:
465
466 virtual bool init( mach_port_t port, natural_t type,
467 void * reference, vm_size_t referenceSize,
468 bool clientIs64 );
469 virtual void free();
470
471 static bool _handler( void * target,
472 void * ref, IOService * newService, IONotifier * notifier );
473 virtual bool handler( void * ref, IOService * newService );
474
475 virtual OSObject * getNextObject();
476 };
477
478 class IOServiceMessageUserNotification : public IOUserNotification
479 {
480 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
481
482 struct PingMsg {
483 mach_msg_header_t msgHdr;
484 mach_msg_body_t msgBody;
485 mach_msg_port_descriptor_t ports[1];
486 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
487 };
488
489 PingMsg * pingMsg;
490 vm_size_t msgSize;
491 uint8_t clientIs64;
492 int owningPID;
493
494 public:
495
496 virtual bool init( mach_port_t port, natural_t type,
497 void * reference, vm_size_t referenceSize,
498 vm_size_t extraSize,
499 bool clientIs64 );
500
501 virtual void free();
502
503 static IOReturn _handler( void * target, void * ref,
504 UInt32 messageType, IOService * provider,
505 void * messageArgument, vm_size_t argSize );
506 virtual IOReturn handler( void * ref,
507 UInt32 messageType, IOService * provider,
508 void * messageArgument, vm_size_t argSize );
509
510 virtual OSObject * getNextObject();
511 };
512
513 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
514
515 #undef super
516 #define super OSIterator
517 OSDefineMetaClass( IOUserNotification, OSIterator )
518 OSDefineAbstractStructors( IOUserNotification, OSIterator )
519
520 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
521
522 bool IOUserNotification::init( void )
523 {
524 if( !super::init())
525 return( false );
526
527 lock = IOLockAlloc();
528 if( !lock)
529 return( false );
530
531 return( true );
532 }
533
534 void IOUserNotification::free( void )
535 {
536 if( holdNotify)
537 holdNotify->remove();
538 // can't be in handler now
539
540 if( lock)
541 IOLockFree( lock );
542
543 super::free();
544 }
545
546
547 void IOUserNotification::setNotification( IONotifier * notify )
548 {
549 IONotifier * previousNotify;
550
551 IOLockLock( gIOObjectPortLock);
552
553 previousNotify = holdNotify;
554 holdNotify = notify;
555
556 IOLockUnlock( gIOObjectPortLock);
557
558 if( previousNotify)
559 previousNotify->remove();
560 }
561
562 void IOUserNotification::reset()
563 {
564 // ?
565 }
566
567 bool IOUserNotification::isValid()
568 {
569 return( true );
570 }
571
572 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
573
574 #undef super
575 #define super IOUserNotification
576 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
577
578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
579
580 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
581 void * reference, vm_size_t referenceSize,
582 bool clientIs64 )
583 {
584 if( !super::init())
585 return( false );
586
587 newSet = OSArray::withCapacity( 1 );
588 if( !newSet)
589 return( false );
590
591 if (referenceSize > sizeof(OSAsyncReference64))
592 return( false );
593
594 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
595 pingMsg = (PingMsg *) IOMalloc( msgSize);
596 if( !pingMsg)
597 return( false );
598
599 bzero( pingMsg, msgSize);
600
601 pingMsg->msgHdr.msgh_remote_port = port;
602 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
603 MACH_MSG_TYPE_COPY_SEND /*remote*/,
604 MACH_MSG_TYPE_MAKE_SEND /*local*/);
605 pingMsg->msgHdr.msgh_size = msgSize;
606 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
607
608 pingMsg->notifyHeader.size = 0;
609 pingMsg->notifyHeader.type = type;
610 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
611
612 return( true );
613 }
614
615 void IOServiceUserNotification::free( void )
616 {
617 PingMsg * _pingMsg;
618 vm_size_t _msgSize;
619 OSArray * _newSet;
620 OSObject * _lastEntry;
621
622 _pingMsg = pingMsg;
623 _msgSize = msgSize;
624 _lastEntry = lastEntry;
625 _newSet = newSet;
626
627 super::free();
628
629 if( _pingMsg && _msgSize) {
630 if (_pingMsg->msgHdr.msgh_remote_port) {
631 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
632 }
633 IOFree(_pingMsg, _msgSize);
634 }
635
636 if( _lastEntry)
637 _lastEntry->release();
638
639 if( _newSet)
640 _newSet->release();
641 }
642
643 bool IOServiceUserNotification::_handler( void * target,
644 void * ref, IOService * newService, IONotifier * notifier )
645 {
646 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
647 }
648
649 bool IOServiceUserNotification::handler( void * ref,
650 IOService * newService )
651 {
652 unsigned int count;
653 kern_return_t kr;
654 ipc_port_t port = NULL;
655 bool sendPing = false;
656
657 IOTakeLock( lock );
658
659 count = newSet->getCount();
660 if( count < kMaxOutstanding) {
661
662 newSet->setObject( newService );
663 if( (sendPing = (armed && (0 == count))))
664 armed = false;
665 }
666
667 IOUnlock( lock );
668
669 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
670 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
671
672 if( sendPing) {
673 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
674 pingMsg->msgHdr.msgh_local_port = port;
675 else
676 pingMsg->msgHdr.msgh_local_port = NULL;
677
678 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
679 pingMsg->msgHdr.msgh_size,
680 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
681 0);
682 if( port)
683 iokit_release_port( port );
684
685 if( KERN_SUCCESS != kr)
686 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
687 }
688
689 return( true );
690 }
691
692 OSObject * IOServiceUserNotification::getNextObject()
693 {
694 unsigned int count;
695 OSObject * result;
696
697 IOTakeLock( lock );
698
699 if( lastEntry)
700 lastEntry->release();
701
702 count = newSet->getCount();
703 if( count ) {
704 result = newSet->getObject( count - 1 );
705 result->retain();
706 newSet->removeObject( count - 1);
707 } else {
708 result = 0;
709 armed = true;
710 }
711 lastEntry = result;
712
713 IOUnlock( lock );
714
715 return( result );
716 }
717
718 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
719
720 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
721
722 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
723
724 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
725 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
726 bool client64 )
727 {
728 if( !super::init())
729 return( false );
730
731 if (referenceSize > sizeof(OSAsyncReference64))
732 return( false );
733
734 clientIs64 = client64;
735
736 owningPID = proc_selfpid();
737
738 extraSize += sizeof(IOServiceInterestContent64);
739 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
740 pingMsg = (PingMsg *) IOMalloc( msgSize);
741 if( !pingMsg)
742 return( false );
743
744 bzero( pingMsg, msgSize);
745
746 pingMsg->msgHdr.msgh_remote_port = port;
747 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
748 | MACH_MSGH_BITS(
749 MACH_MSG_TYPE_COPY_SEND /*remote*/,
750 MACH_MSG_TYPE_MAKE_SEND /*local*/);
751 pingMsg->msgHdr.msgh_size = msgSize;
752 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
753
754 pingMsg->msgBody.msgh_descriptor_count = 1;
755
756 pingMsg->ports[0].name = 0;
757 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
758 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
759
760 pingMsg->notifyHeader.size = extraSize;
761 pingMsg->notifyHeader.type = type;
762 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
763
764 return( true );
765 }
766
767 void IOServiceMessageUserNotification::free( void )
768 {
769 PingMsg * _pingMsg;
770 vm_size_t _msgSize;
771
772 _pingMsg = pingMsg;
773 _msgSize = msgSize;
774
775 super::free();
776
777 if( _pingMsg && _msgSize) {
778 if (_pingMsg->msgHdr.msgh_remote_port) {
779 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
780 }
781 IOFree( _pingMsg, _msgSize);
782 }
783 }
784
785 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
786 UInt32 messageType, IOService * provider,
787 void * argument, vm_size_t argSize )
788 {
789 return( ((IOServiceMessageUserNotification *) target)->handler(
790 ref, messageType, provider, argument, argSize));
791 }
792
793 IOReturn IOServiceMessageUserNotification::handler( void * ref,
794 UInt32 messageType, IOService * provider,
795 void * messageArgument, vm_size_t argSize )
796 {
797 kern_return_t kr;
798 ipc_port_t thisPort, providerPort;
799 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
800 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
801 // == pingMsg->notifyHeader.content;
802
803 if (kIOMessageCopyClientID == messageType)
804 {
805 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
806 return (kIOReturnSuccess);
807 }
808
809 data->messageType = messageType;
810
811 if( argSize == 0)
812 {
813 data->messageArgument[0] = (io_user_reference_t) messageArgument;
814 if (clientIs64)
815 argSize = sizeof(data->messageArgument[0]);
816 else
817 {
818 data->messageArgument[0] |= (data->messageArgument[0] << 32);
819 argSize = sizeof(uint32_t);
820 }
821 }
822 else
823 {
824 if( argSize > kIOUserNotifyMaxMessageSize)
825 argSize = kIOUserNotifyMaxMessageSize;
826 bcopy( messageArgument, data->messageArgument, argSize );
827 }
828 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
829 + sizeof( IOServiceInterestContent64 )
830 - sizeof( data->messageArgument)
831 + argSize;
832
833 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
834 pingMsg->ports[0].name = providerPort;
835 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
836 pingMsg->msgHdr.msgh_local_port = thisPort;
837 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
838 pingMsg->msgHdr.msgh_size,
839 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
840 0);
841 if( thisPort)
842 iokit_release_port( thisPort );
843 if( providerPort)
844 iokit_release_port( providerPort );
845
846 if( KERN_SUCCESS != kr)
847 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
848
849 return( kIOReturnSuccess );
850 }
851
852 OSObject * IOServiceMessageUserNotification::getNextObject()
853 {
854 return( 0 );
855 }
856
857 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
858
859 #undef super
860 #define super IOService
861 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
862
863 void IOUserClient::initialize( void )
864 {
865 gIOObjectPortLock = IOLockAlloc();
866
867 assert( gIOObjectPortLock );
868 }
869
870 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
871 mach_port_t wakePort,
872 void *callback, void *refcon)
873 {
874 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
875 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
876 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
877 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
878 }
879
880 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
881 mach_port_t wakePort,
882 mach_vm_address_t callback, io_user_reference_t refcon)
883 {
884 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
885 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
886 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
887 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
888 }
889
890 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
891 mach_port_t wakePort,
892 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
893 {
894 setAsyncReference64(asyncRef, wakePort, callback, refcon);
895 if (vm_map_is_64bit(get_task_map(task))) {
896 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
897 }
898 }
899
900 static OSDictionary * CopyConsoleUser(UInt32 uid)
901 {
902 OSArray * array;
903 OSDictionary * user = 0;
904
905 if ((array = OSDynamicCast(OSArray,
906 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
907 {
908 for (unsigned int idx = 0;
909 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
910 idx++) {
911 OSNumber * num;
912
913 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
914 && (uid == num->unsigned32BitValue())) {
915 user->retain();
916 break;
917 }
918 }
919 array->release();
920 }
921 return user;
922 }
923
924 static OSDictionary * CopyUserOnConsole(void)
925 {
926 OSArray * array;
927 OSDictionary * user = 0;
928
929 if ((array = OSDynamicCast(OSArray,
930 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
931 {
932 for (unsigned int idx = 0;
933 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
934 idx++)
935 {
936 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
937 {
938 user->retain();
939 break;
940 }
941 }
942 array->release();
943 }
944 return (user);
945 }
946
947 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
948 const char * privilegeName )
949 {
950 kern_return_t kr;
951 security_token_t token;
952 mach_msg_type_number_t count;
953 task_t task;
954 OSDictionary * user;
955 bool secureConsole;
956
957
958 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
959 sizeof(kIOClientPrivilegeForeground)))
960 {
961 /* is graphics access denied for current task? */
962 if (proc_get_effective_task_policy(current_task(), TASK_POLICY_GPU_DENY) != 0)
963 return (kIOReturnNotPrivileged);
964 else
965 return (kIOReturnSuccess);
966 }
967
968 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
969 sizeof(kIOClientPrivilegeConsoleSession)))
970 {
971 kauth_cred_t cred;
972 proc_t p;
973
974 task = (task_t) securityToken;
975 if (!task)
976 task = current_task();
977 p = (proc_t) get_bsdtask_info(task);
978 kr = kIOReturnNotPrivileged;
979
980 if (p && (cred = kauth_cred_proc_ref(p)))
981 {
982 user = CopyUserOnConsole();
983 if (user)
984 {
985 OSNumber * num;
986 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
987 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
988 {
989 kr = kIOReturnSuccess;
990 }
991 user->release();
992 }
993 kauth_cred_unref(&cred);
994 }
995 return (kr);
996 }
997
998 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
999 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1000 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1001 else
1002 task = (task_t)securityToken;
1003
1004 count = TASK_SECURITY_TOKEN_COUNT;
1005 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1006
1007 if (KERN_SUCCESS != kr)
1008 {}
1009 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1010 sizeof(kIOClientPrivilegeAdministrator))) {
1011 if (0 != token.val[0])
1012 kr = kIOReturnNotPrivileged;
1013 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1014 sizeof(kIOClientPrivilegeLocalUser))) {
1015 user = CopyConsoleUser(token.val[0]);
1016 if ( user )
1017 user->release();
1018 else
1019 kr = kIOReturnNotPrivileged;
1020 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1021 sizeof(kIOClientPrivilegeConsoleUser))) {
1022 user = CopyConsoleUser(token.val[0]);
1023 if ( user ) {
1024 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1025 kr = kIOReturnNotPrivileged;
1026 else if ( secureConsole ) {
1027 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1028 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1029 kr = kIOReturnNotPrivileged;
1030 }
1031 user->release();
1032 }
1033 else
1034 kr = kIOReturnNotPrivileged;
1035 } else
1036 kr = kIOReturnUnsupported;
1037
1038 return (kr);
1039 }
1040
1041 bool IOUserClient::init()
1042 {
1043 if (getPropertyTable() || super::init())
1044 return reserve();
1045
1046 return false;
1047 }
1048
1049 bool IOUserClient::init(OSDictionary * dictionary)
1050 {
1051 if (getPropertyTable() || super::init(dictionary))
1052 return reserve();
1053
1054 return false;
1055 }
1056
1057 bool IOUserClient::initWithTask(task_t owningTask,
1058 void * securityID,
1059 UInt32 type )
1060 {
1061 if (getPropertyTable() || super::init())
1062 return reserve();
1063
1064 return false;
1065 }
1066
1067 bool IOUserClient::initWithTask(task_t owningTask,
1068 void * securityID,
1069 UInt32 type,
1070 OSDictionary * properties )
1071 {
1072 bool ok;
1073
1074 ok = super::init( properties );
1075 ok &= initWithTask( owningTask, securityID, type );
1076
1077 return( ok );
1078 }
1079
1080 bool IOUserClient::reserve()
1081 {
1082 if(!reserved) {
1083 reserved = IONew(ExpansionData, 1);
1084 if (!reserved) {
1085 return false;
1086 }
1087 }
1088 setTerminateDefer(NULL, true);
1089 IOStatisticsRegisterCounter();
1090
1091 return true;
1092 }
1093
1094 void IOUserClient::free()
1095 {
1096 if( mappings)
1097 mappings->release();
1098
1099 IOStatisticsUnregisterCounter();
1100
1101 if (reserved)
1102 IODelete(reserved, ExpansionData, 1);
1103
1104 super::free();
1105 }
1106
1107 IOReturn IOUserClient::clientDied( void )
1108 {
1109 return( clientClose());
1110 }
1111
1112 IOReturn IOUserClient::clientClose( void )
1113 {
1114 return( kIOReturnUnsupported );
1115 }
1116
1117 IOService * IOUserClient::getService( void )
1118 {
1119 return( 0 );
1120 }
1121
1122 IOReturn IOUserClient::registerNotificationPort(
1123 mach_port_t /* port */,
1124 UInt32 /* type */,
1125 UInt32 /* refCon */)
1126 {
1127 return( kIOReturnUnsupported);
1128 }
1129
1130 IOReturn IOUserClient::registerNotificationPort(
1131 mach_port_t port,
1132 UInt32 type,
1133 io_user_reference_t refCon)
1134 {
1135 return (registerNotificationPort(port, type, (UInt32) refCon));
1136 }
1137
1138 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1139 semaphore_t * semaphore )
1140 {
1141 return( kIOReturnUnsupported);
1142 }
1143
1144 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1145 {
1146 return( kIOReturnUnsupported);
1147 }
1148
1149 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1150 IOOptionBits * options,
1151 IOMemoryDescriptor ** memory )
1152 {
1153 return( kIOReturnUnsupported);
1154 }
1155
1156 #if !__LP64__
1157 IOMemoryMap * IOUserClient::mapClientMemory(
1158 IOOptionBits type,
1159 task_t task,
1160 IOOptionBits mapFlags,
1161 IOVirtualAddress atAddress )
1162 {
1163 return (NULL);
1164 }
1165 #endif
1166
1167 IOMemoryMap * IOUserClient::mapClientMemory64(
1168 IOOptionBits type,
1169 task_t task,
1170 IOOptionBits mapFlags,
1171 mach_vm_address_t atAddress )
1172 {
1173 IOReturn err;
1174 IOOptionBits options = 0;
1175 IOMemoryDescriptor * memory;
1176 IOMemoryMap * map = 0;
1177
1178 err = clientMemoryForType( (UInt32) type, &options, &memory );
1179
1180 if( memory && (kIOReturnSuccess == err)) {
1181
1182 options = (options & ~kIOMapUserOptionsMask)
1183 | (mapFlags & kIOMapUserOptionsMask);
1184 map = memory->createMappingInTask( task, atAddress, options );
1185 memory->release();
1186 }
1187
1188 return( map );
1189 }
1190
1191 IOReturn IOUserClient::exportObjectToClient(task_t task,
1192 OSObject *obj, io_object_t *clientObj)
1193 {
1194 mach_port_name_t name;
1195
1196 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1197
1198 *(mach_port_name_t *)clientObj = name;
1199 return kIOReturnSuccess;
1200 }
1201
1202 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1203 {
1204 return( 0 );
1205 }
1206
1207 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1208 {
1209 return( 0 );
1210 }
1211
1212 IOExternalMethod * IOUserClient::
1213 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1214 {
1215 IOExternalMethod *method = getExternalMethodForIndex(index);
1216
1217 if (method)
1218 *targetP = (IOService *) method->object;
1219
1220 return method;
1221 }
1222
1223 IOExternalAsyncMethod * IOUserClient::
1224 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1225 {
1226 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1227
1228 if (method)
1229 *targetP = (IOService *) method->object;
1230
1231 return method;
1232 }
1233
1234 IOExternalTrap * IOUserClient::
1235 getExternalTrapForIndex(UInt32 index)
1236 {
1237 return NULL;
1238 }
1239
1240 IOExternalTrap * IOUserClient::
1241 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1242 {
1243 IOExternalTrap *trap = getExternalTrapForIndex(index);
1244
1245 if (trap) {
1246 *targetP = trap->object;
1247 }
1248
1249 return trap;
1250 }
1251
1252 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1253 {
1254 mach_port_t port;
1255 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1256
1257 if (MACH_PORT_NULL != port)
1258 iokit_release_port_send(port);
1259
1260 return (kIOReturnSuccess);
1261 }
1262
1263 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1264 {
1265 if (MACH_PORT_NULL != port)
1266 iokit_release_port_send(port);
1267
1268 return (kIOReturnSuccess);
1269 }
1270
1271 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1272 IOReturn result, void *args[], UInt32 numArgs)
1273 {
1274 OSAsyncReference64 reference64;
1275 io_user_reference_t args64[kMaxAsyncArgs];
1276 unsigned int idx;
1277
1278 if (numArgs > kMaxAsyncArgs)
1279 return kIOReturnMessageTooLarge;
1280
1281 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1282 reference64[idx] = REF64(reference[idx]);
1283
1284 for (idx = 0; idx < numArgs; idx++)
1285 args64[idx] = REF64(args[idx]);
1286
1287 return (sendAsyncResult64(reference64, result, args64, numArgs));
1288 }
1289
1290 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1291 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1292 {
1293 return _sendAsyncResult64(reference, result, args, numArgs, options);
1294 }
1295
1296 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1297 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1298 {
1299 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1300 }
1301
1302 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1303 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1304 {
1305 struct ReplyMsg
1306 {
1307 mach_msg_header_t msgHdr;
1308 union
1309 {
1310 struct
1311 {
1312 OSNotificationHeader notifyHdr;
1313 IOAsyncCompletionContent asyncContent;
1314 uint32_t args[kMaxAsyncArgs];
1315 } msg32;
1316 struct
1317 {
1318 OSNotificationHeader64 notifyHdr;
1319 IOAsyncCompletionContent asyncContent;
1320 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1321 } msg64;
1322 } m;
1323 };
1324 ReplyMsg replyMsg;
1325 mach_port_t replyPort;
1326 kern_return_t kr;
1327
1328 // If no reply port, do nothing.
1329 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1330 if (replyPort == MACH_PORT_NULL)
1331 return kIOReturnSuccess;
1332
1333 if (numArgs > kMaxAsyncArgs)
1334 return kIOReturnMessageTooLarge;
1335
1336 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1337 0 /*local*/);
1338 replyMsg.msgHdr.msgh_remote_port = replyPort;
1339 replyMsg.msgHdr.msgh_local_port = 0;
1340 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1341 if (kIOUCAsync64Flag & reference[0])
1342 {
1343 replyMsg.msgHdr.msgh_size =
1344 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1345 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1346 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1347 + numArgs * sizeof(io_user_reference_t);
1348 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1349 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1350
1351 replyMsg.m.msg64.asyncContent.result = result;
1352 if (numArgs)
1353 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1354 }
1355 else
1356 {
1357 unsigned int idx;
1358
1359 replyMsg.msgHdr.msgh_size =
1360 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1361 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1362
1363 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1364 + numArgs * sizeof(uint32_t);
1365 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1366
1367 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1368 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1369
1370 replyMsg.m.msg32.asyncContent.result = result;
1371
1372 for (idx = 0; idx < numArgs; idx++)
1373 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1374 }
1375
1376 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1377 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1378 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1379 } else {
1380 /* Fail on full queue. */
1381 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1382 replyMsg.msgHdr.msgh_size);
1383 }
1384 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr))
1385 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1386 return kr;
1387 }
1388
1389
1390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1391
1392 extern "C" {
1393
1394 #define CHECK(cls,obj,out) \
1395 cls * out; \
1396 if( !(out = OSDynamicCast( cls, obj))) \
1397 return( kIOReturnBadArgument )
1398
1399 /* Routine io_object_get_class */
1400 kern_return_t is_io_object_get_class(
1401 io_object_t object,
1402 io_name_t className )
1403 {
1404 const OSMetaClass* my_obj = NULL;
1405
1406 if( !object)
1407 return( kIOReturnBadArgument );
1408
1409 my_obj = object->getMetaClass();
1410 if (!my_obj) {
1411 return (kIOReturnNotFound);
1412 }
1413
1414 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1415 return( kIOReturnSuccess );
1416 }
1417
1418 /* Routine io_object_get_superclass */
1419 kern_return_t is_io_object_get_superclass(
1420 mach_port_t master_port,
1421 io_name_t obj_name,
1422 io_name_t class_name)
1423 {
1424 const OSMetaClass* my_obj = NULL;
1425 const OSMetaClass* superclass = NULL;
1426 const OSSymbol *my_name = NULL;
1427 const char *my_cstr = NULL;
1428
1429 if (!obj_name || !class_name)
1430 return (kIOReturnBadArgument);
1431
1432 if( master_port != master_device_port)
1433 return( kIOReturnNotPrivileged);
1434
1435 my_name = OSSymbol::withCString(obj_name);
1436
1437 if (my_name) {
1438 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1439 my_name->release();
1440 }
1441 if (my_obj) {
1442 superclass = my_obj->getSuperClass();
1443 }
1444
1445 if (!superclass) {
1446 return( kIOReturnNotFound );
1447 }
1448
1449 my_cstr = superclass->getClassName();
1450
1451 if (my_cstr) {
1452 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1453 return( kIOReturnSuccess );
1454 }
1455 return (kIOReturnNotFound);
1456 }
1457
1458 /* Routine io_object_get_bundle_identifier */
1459 kern_return_t is_io_object_get_bundle_identifier(
1460 mach_port_t master_port,
1461 io_name_t obj_name,
1462 io_name_t bundle_name)
1463 {
1464 const OSMetaClass* my_obj = NULL;
1465 const OSSymbol *my_name = NULL;
1466 const OSSymbol *identifier = NULL;
1467 const char *my_cstr = NULL;
1468
1469 if (!obj_name || !bundle_name)
1470 return (kIOReturnBadArgument);
1471
1472 if( master_port != master_device_port)
1473 return( kIOReturnNotPrivileged);
1474
1475 my_name = OSSymbol::withCString(obj_name);
1476
1477 if (my_name) {
1478 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1479 my_name->release();
1480 }
1481
1482 if (my_obj) {
1483 identifier = my_obj->getKmodName();
1484 }
1485 if (!identifier) {
1486 return( kIOReturnNotFound );
1487 }
1488
1489 my_cstr = identifier->getCStringNoCopy();
1490 if (my_cstr) {
1491 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1492 return( kIOReturnSuccess );
1493 }
1494
1495 return (kIOReturnBadArgument);
1496 }
1497
1498 /* Routine io_object_conforms_to */
1499 kern_return_t is_io_object_conforms_to(
1500 io_object_t object,
1501 io_name_t className,
1502 boolean_t *conforms )
1503 {
1504 if( !object)
1505 return( kIOReturnBadArgument );
1506
1507 *conforms = (0 != object->metaCast( className ));
1508 return( kIOReturnSuccess );
1509 }
1510
1511 /* Routine io_object_get_retain_count */
1512 kern_return_t is_io_object_get_retain_count(
1513 io_object_t object,
1514 uint32_t *retainCount )
1515 {
1516 if( !object)
1517 return( kIOReturnBadArgument );
1518
1519 *retainCount = object->getRetainCount();
1520 return( kIOReturnSuccess );
1521 }
1522
1523 /* Routine io_iterator_next */
1524 kern_return_t is_io_iterator_next(
1525 io_object_t iterator,
1526 io_object_t *object )
1527 {
1528 OSObject * obj;
1529
1530 CHECK( OSIterator, iterator, iter );
1531
1532 obj = iter->getNextObject();
1533 if( obj) {
1534 obj->retain();
1535 *object = obj;
1536 return( kIOReturnSuccess );
1537 } else
1538 return( kIOReturnNoDevice );
1539 }
1540
1541 /* Routine io_iterator_reset */
1542 kern_return_t is_io_iterator_reset(
1543 io_object_t iterator )
1544 {
1545 CHECK( OSIterator, iterator, iter );
1546
1547 iter->reset();
1548
1549 return( kIOReturnSuccess );
1550 }
1551
1552 /* Routine io_iterator_is_valid */
1553 kern_return_t is_io_iterator_is_valid(
1554 io_object_t iterator,
1555 boolean_t *is_valid )
1556 {
1557 CHECK( OSIterator, iterator, iter );
1558
1559 *is_valid = iter->isValid();
1560
1561 return( kIOReturnSuccess );
1562 }
1563
1564 /* Routine io_service_match_property_table */
1565 kern_return_t is_io_service_match_property_table(
1566 io_service_t _service,
1567 io_string_t matching,
1568 boolean_t *matches )
1569 {
1570 CHECK( IOService, _service, service );
1571
1572 kern_return_t kr;
1573 OSObject * obj;
1574 OSDictionary * dict;
1575
1576 obj = OSUnserializeXML( matching );
1577
1578 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1579 *matches = service->passiveMatch( dict );
1580 kr = kIOReturnSuccess;
1581 } else
1582 kr = kIOReturnBadArgument;
1583
1584 if( obj)
1585 obj->release();
1586
1587 return( kr );
1588 }
1589
1590 /* Routine io_service_match_property_table_ool */
1591 kern_return_t is_io_service_match_property_table_ool(
1592 io_object_t service,
1593 io_buf_ptr_t matching,
1594 mach_msg_type_number_t matchingCnt,
1595 kern_return_t *result,
1596 boolean_t *matches )
1597 {
1598 kern_return_t kr;
1599 vm_offset_t data;
1600 vm_map_offset_t map_data;
1601
1602 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1603 data = CAST_DOWN(vm_offset_t, map_data);
1604
1605 if( KERN_SUCCESS == kr) {
1606 // must return success after vm_map_copyout() succeeds
1607 *result = is_io_service_match_property_table( service,
1608 (char *) data, matches );
1609 vm_deallocate( kernel_map, data, matchingCnt );
1610 }
1611
1612 return( kr );
1613 }
1614
1615 /* Routine io_service_get_matching_services */
1616 kern_return_t is_io_service_get_matching_services(
1617 mach_port_t master_port,
1618 io_string_t matching,
1619 io_iterator_t *existing )
1620 {
1621 kern_return_t kr;
1622 OSObject * obj;
1623 OSDictionary * dict;
1624
1625 if( master_port != master_device_port)
1626 return( kIOReturnNotPrivileged);
1627
1628 obj = OSUnserializeXML( matching );
1629
1630 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1631 *existing = IOService::getMatchingServices( dict );
1632 kr = kIOReturnSuccess;
1633 } else
1634 kr = kIOReturnBadArgument;
1635
1636 if( obj)
1637 obj->release();
1638
1639 return( kr );
1640 }
1641
1642 /* Routine io_service_get_matching_services_ool */
1643 kern_return_t is_io_service_get_matching_services_ool(
1644 mach_port_t master_port,
1645 io_buf_ptr_t matching,
1646 mach_msg_type_number_t matchingCnt,
1647 kern_return_t *result,
1648 io_object_t *existing )
1649 {
1650 kern_return_t kr;
1651 vm_offset_t data;
1652 vm_map_offset_t map_data;
1653
1654 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1655 data = CAST_DOWN(vm_offset_t, map_data);
1656
1657 if( KERN_SUCCESS == kr) {
1658 // must return success after vm_map_copyout() succeeds
1659 *result = is_io_service_get_matching_services( master_port,
1660 (char *) data, existing );
1661 vm_deallocate( kernel_map, data, matchingCnt );
1662 }
1663
1664 return( kr );
1665 }
1666
1667
1668 /* Routine io_service_get_matching_service */
1669 kern_return_t is_io_service_get_matching_service(
1670 mach_port_t master_port,
1671 io_string_t matching,
1672 io_service_t *service )
1673 {
1674 kern_return_t kr;
1675 OSObject * obj;
1676 OSDictionary * dict;
1677
1678 if( master_port != master_device_port)
1679 return( kIOReturnNotPrivileged);
1680
1681 obj = OSUnserializeXML( matching );
1682
1683 if( (dict = OSDynamicCast( OSDictionary, obj))) {
1684 *service = IOService::copyMatchingService( dict );
1685 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
1686 } else
1687 kr = kIOReturnBadArgument;
1688
1689 if( obj)
1690 obj->release();
1691
1692 return( kr );
1693 }
1694
1695 /* Routine io_service_get_matching_services_ool */
1696 kern_return_t is_io_service_get_matching_service_ool(
1697 mach_port_t master_port,
1698 io_buf_ptr_t matching,
1699 mach_msg_type_number_t matchingCnt,
1700 kern_return_t *result,
1701 io_object_t *service )
1702 {
1703 kern_return_t kr;
1704 vm_offset_t data;
1705 vm_map_offset_t map_data;
1706
1707 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1708 data = CAST_DOWN(vm_offset_t, map_data);
1709
1710 if( KERN_SUCCESS == kr) {
1711 // must return success after vm_map_copyout() succeeds
1712 *result = is_io_service_get_matching_service( master_port,
1713 (char *) data, service );
1714 vm_deallocate( kernel_map, data, matchingCnt );
1715 }
1716
1717 return( kr );
1718 }
1719
1720
1721 static kern_return_t internal_io_service_add_notification(
1722 mach_port_t master_port,
1723 io_name_t notification_type,
1724 io_string_t matching,
1725 mach_port_t port,
1726 void * reference,
1727 vm_size_t referenceSize,
1728 bool client64,
1729 io_object_t * notification )
1730 {
1731 IOServiceUserNotification * userNotify = 0;
1732 IONotifier * notify = 0;
1733 const OSSymbol * sym;
1734 OSDictionary * dict;
1735 IOReturn err;
1736 unsigned long int userMsgType;
1737
1738
1739 if( master_port != master_device_port)
1740 return( kIOReturnNotPrivileged);
1741
1742 do {
1743 err = kIOReturnNoResources;
1744
1745 if( !(sym = OSSymbol::withCString( notification_type )))
1746 err = kIOReturnNoResources;
1747
1748 if( !(dict = OSDynamicCast( OSDictionary,
1749 OSUnserializeXML( matching )))) {
1750 err = kIOReturnBadArgument;
1751 continue;
1752 }
1753
1754 if( (sym == gIOPublishNotification)
1755 || (sym == gIOFirstPublishNotification))
1756 userMsgType = kIOServicePublishNotificationType;
1757 else if( (sym == gIOMatchedNotification)
1758 || (sym == gIOFirstMatchNotification))
1759 userMsgType = kIOServiceMatchedNotificationType;
1760 else if( sym == gIOTerminatedNotification)
1761 userMsgType = kIOServiceTerminatedNotificationType;
1762 else
1763 userMsgType = kLastIOKitNotificationType;
1764
1765 userNotify = new IOServiceUserNotification;
1766
1767 if( userNotify && !userNotify->init( port, userMsgType,
1768 reference, referenceSize, client64)) {
1769 iokit_release_port_send(port);
1770 userNotify->release();
1771 userNotify = 0;
1772 }
1773 if( !userNotify)
1774 continue;
1775
1776 notify = IOService::addMatchingNotification( sym, dict,
1777 &userNotify->_handler, userNotify );
1778 if( notify) {
1779 *notification = userNotify;
1780 userNotify->setNotification( notify );
1781 err = kIOReturnSuccess;
1782 } else
1783 err = kIOReturnUnsupported;
1784
1785 } while( false );
1786
1787 if( sym)
1788 sym->release();
1789 if( dict)
1790 dict->release();
1791
1792 return( err );
1793 }
1794
1795
1796 /* Routine io_service_add_notification */
1797 kern_return_t is_io_service_add_notification(
1798 mach_port_t master_port,
1799 io_name_t notification_type,
1800 io_string_t matching,
1801 mach_port_t port,
1802 io_async_ref_t reference,
1803 mach_msg_type_number_t referenceCnt,
1804 io_object_t * notification )
1805 {
1806 return (internal_io_service_add_notification(master_port, notification_type,
1807 matching, port, &reference[0], sizeof(io_async_ref_t),
1808 false, notification));
1809 }
1810
1811 /* Routine io_service_add_notification_64 */
1812 kern_return_t is_io_service_add_notification_64(
1813 mach_port_t master_port,
1814 io_name_t notification_type,
1815 io_string_t matching,
1816 mach_port_t wake_port,
1817 io_async_ref64_t reference,
1818 mach_msg_type_number_t referenceCnt,
1819 io_object_t *notification )
1820 {
1821 return (internal_io_service_add_notification(master_port, notification_type,
1822 matching, wake_port, &reference[0], sizeof(io_async_ref64_t),
1823 true, notification));
1824 }
1825
1826
1827 static kern_return_t internal_io_service_add_notification_ool(
1828 mach_port_t master_port,
1829 io_name_t notification_type,
1830 io_buf_ptr_t matching,
1831 mach_msg_type_number_t matchingCnt,
1832 mach_port_t wake_port,
1833 void * reference,
1834 vm_size_t referenceSize,
1835 bool client64,
1836 kern_return_t *result,
1837 io_object_t *notification )
1838 {
1839 kern_return_t kr;
1840 vm_offset_t data;
1841 vm_map_offset_t map_data;
1842
1843 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
1844 data = CAST_DOWN(vm_offset_t, map_data);
1845
1846 if( KERN_SUCCESS == kr) {
1847 // must return success after vm_map_copyout() succeeds
1848 *result = internal_io_service_add_notification( master_port, notification_type,
1849 (char *) data, wake_port, reference, referenceSize, client64, notification );
1850 vm_deallocate( kernel_map, data, matchingCnt );
1851 }
1852
1853 return( kr );
1854 }
1855
1856 /* Routine io_service_add_notification_ool */
1857 kern_return_t is_io_service_add_notification_ool(
1858 mach_port_t master_port,
1859 io_name_t notification_type,
1860 io_buf_ptr_t matching,
1861 mach_msg_type_number_t matchingCnt,
1862 mach_port_t wake_port,
1863 io_async_ref_t reference,
1864 mach_msg_type_number_t referenceCnt,
1865 kern_return_t *result,
1866 io_object_t *notification )
1867 {
1868 return (internal_io_service_add_notification_ool(master_port, notification_type,
1869 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
1870 false, result, notification));
1871 }
1872
1873 /* Routine io_service_add_notification_ool_64 */
1874 kern_return_t is_io_service_add_notification_ool_64(
1875 mach_port_t master_port,
1876 io_name_t notification_type,
1877 io_buf_ptr_t matching,
1878 mach_msg_type_number_t matchingCnt,
1879 mach_port_t wake_port,
1880 io_async_ref64_t reference,
1881 mach_msg_type_number_t referenceCnt,
1882 kern_return_t *result,
1883 io_object_t *notification )
1884 {
1885 return (internal_io_service_add_notification_ool(master_port, notification_type,
1886 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
1887 true, result, notification));
1888 }
1889
1890 /* Routine io_service_add_notification_old */
1891 kern_return_t is_io_service_add_notification_old(
1892 mach_port_t master_port,
1893 io_name_t notification_type,
1894 io_string_t matching,
1895 mach_port_t port,
1896 // for binary compatibility reasons, this must be natural_t for ILP32
1897 natural_t ref,
1898 io_object_t * notification )
1899 {
1900 return( is_io_service_add_notification( master_port, notification_type,
1901 matching, port, &ref, 1, notification ));
1902 }
1903
1904
1905 static kern_return_t internal_io_service_add_interest_notification(
1906 io_object_t _service,
1907 io_name_t type_of_interest,
1908 mach_port_t port,
1909 void * reference,
1910 vm_size_t referenceSize,
1911 bool client64,
1912 io_object_t * notification )
1913 {
1914
1915 IOServiceMessageUserNotification * userNotify = 0;
1916 IONotifier * notify = 0;
1917 const OSSymbol * sym;
1918 IOReturn err;
1919
1920 CHECK( IOService, _service, service );
1921
1922 err = kIOReturnNoResources;
1923 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
1924
1925 userNotify = new IOServiceMessageUserNotification;
1926
1927 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
1928 reference, referenceSize,
1929 kIOUserNotifyMaxMessageSize,
1930 client64 )) {
1931 iokit_release_port_send(port);
1932 userNotify->release();
1933 userNotify = 0;
1934 }
1935 if( !userNotify)
1936 continue;
1937
1938 notify = service->registerInterest( sym,
1939 &userNotify->_handler, userNotify );
1940 if( notify) {
1941 *notification = userNotify;
1942 userNotify->setNotification( notify );
1943 err = kIOReturnSuccess;
1944 } else
1945 err = kIOReturnUnsupported;
1946
1947 sym->release();
1948
1949 } while( false );
1950
1951 return( err );
1952 }
1953
1954 /* Routine io_service_add_message_notification */
1955 kern_return_t is_io_service_add_interest_notification(
1956 io_object_t service,
1957 io_name_t type_of_interest,
1958 mach_port_t port,
1959 io_async_ref_t reference,
1960 mach_msg_type_number_t referenceCnt,
1961 io_object_t * notification )
1962 {
1963 return (internal_io_service_add_interest_notification(service, type_of_interest,
1964 port, &reference[0], sizeof(io_async_ref_t), false, notification));
1965 }
1966
1967 /* Routine io_service_add_interest_notification_64 */
1968 kern_return_t is_io_service_add_interest_notification_64(
1969 io_object_t service,
1970 io_name_t type_of_interest,
1971 mach_port_t wake_port,
1972 io_async_ref64_t reference,
1973 mach_msg_type_number_t referenceCnt,
1974 io_object_t *notification )
1975 {
1976 return (internal_io_service_add_interest_notification(service, type_of_interest,
1977 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
1978 }
1979
1980
1981 /* Routine io_service_acknowledge_notification */
1982 kern_return_t is_io_service_acknowledge_notification(
1983 io_object_t _service,
1984 natural_t notify_ref,
1985 natural_t response )
1986 {
1987 CHECK( IOService, _service, service );
1988
1989 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
1990 (IOOptionBits) response ));
1991
1992 }
1993
1994 /* Routine io_connect_get_semaphore */
1995 kern_return_t is_io_connect_get_notification_semaphore(
1996 io_connect_t connection,
1997 natural_t notification_type,
1998 semaphore_t *semaphore )
1999 {
2000 CHECK( IOUserClient, connection, client );
2001
2002 IOStatisticsClientCall();
2003 return( client->getNotificationSemaphore( (UInt32) notification_type,
2004 semaphore ));
2005 }
2006
2007 /* Routine io_registry_get_root_entry */
2008 kern_return_t is_io_registry_get_root_entry(
2009 mach_port_t master_port,
2010 io_object_t *root )
2011 {
2012 IORegistryEntry * entry;
2013
2014 if( master_port != master_device_port)
2015 return( kIOReturnNotPrivileged);
2016
2017 entry = IORegistryEntry::getRegistryRoot();
2018 if( entry)
2019 entry->retain();
2020 *root = entry;
2021
2022 return( kIOReturnSuccess );
2023 }
2024
2025 /* Routine io_registry_create_iterator */
2026 kern_return_t is_io_registry_create_iterator(
2027 mach_port_t master_port,
2028 io_name_t plane,
2029 uint32_t options,
2030 io_object_t *iterator )
2031 {
2032 if( master_port != master_device_port)
2033 return( kIOReturnNotPrivileged);
2034
2035 *iterator = IORegistryIterator::iterateOver(
2036 IORegistryEntry::getPlane( plane ), options );
2037
2038 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2039 }
2040
2041 /* Routine io_registry_entry_create_iterator */
2042 kern_return_t is_io_registry_entry_create_iterator(
2043 io_object_t registry_entry,
2044 io_name_t plane,
2045 uint32_t options,
2046 io_object_t *iterator )
2047 {
2048 CHECK( IORegistryEntry, registry_entry, entry );
2049
2050 *iterator = IORegistryIterator::iterateOver( entry,
2051 IORegistryEntry::getPlane( plane ), options );
2052
2053 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2054 }
2055
2056 /* Routine io_registry_iterator_enter */
2057 kern_return_t is_io_registry_iterator_enter_entry(
2058 io_object_t iterator )
2059 {
2060 CHECK( IORegistryIterator, iterator, iter );
2061
2062 iter->enterEntry();
2063
2064 return( kIOReturnSuccess );
2065 }
2066
2067 /* Routine io_registry_iterator_exit */
2068 kern_return_t is_io_registry_iterator_exit_entry(
2069 io_object_t iterator )
2070 {
2071 bool didIt;
2072
2073 CHECK( IORegistryIterator, iterator, iter );
2074
2075 didIt = iter->exitEntry();
2076
2077 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2078 }
2079
2080 /* Routine io_registry_entry_from_path */
2081 kern_return_t is_io_registry_entry_from_path(
2082 mach_port_t master_port,
2083 io_string_t path,
2084 io_object_t *registry_entry )
2085 {
2086 IORegistryEntry * entry;
2087
2088 if( master_port != master_device_port)
2089 return( kIOReturnNotPrivileged);
2090
2091 entry = IORegistryEntry::fromPath( path );
2092
2093 *registry_entry = entry;
2094
2095 return( kIOReturnSuccess );
2096 }
2097
2098 /* Routine io_registry_entry_in_plane */
2099 kern_return_t is_io_registry_entry_in_plane(
2100 io_object_t registry_entry,
2101 io_name_t plane,
2102 boolean_t *inPlane )
2103 {
2104 CHECK( IORegistryEntry, registry_entry, entry );
2105
2106 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2107
2108 return( kIOReturnSuccess );
2109 }
2110
2111
2112 /* Routine io_registry_entry_get_path */
2113 kern_return_t is_io_registry_entry_get_path(
2114 io_object_t registry_entry,
2115 io_name_t plane,
2116 io_string_t path )
2117 {
2118 int length;
2119 CHECK( IORegistryEntry, registry_entry, entry );
2120
2121 length = sizeof( io_string_t);
2122 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2123 return( kIOReturnSuccess );
2124 else
2125 return( kIOReturnBadArgument );
2126 }
2127
2128
2129 /* Routine io_registry_entry_get_name */
2130 kern_return_t is_io_registry_entry_get_name(
2131 io_object_t registry_entry,
2132 io_name_t name )
2133 {
2134 CHECK( IORegistryEntry, registry_entry, entry );
2135
2136 strncpy( name, entry->getName(), sizeof( io_name_t));
2137
2138 return( kIOReturnSuccess );
2139 }
2140
2141 /* Routine io_registry_entry_get_name_in_plane */
2142 kern_return_t is_io_registry_entry_get_name_in_plane(
2143 io_object_t registry_entry,
2144 io_name_t planeName,
2145 io_name_t name )
2146 {
2147 const IORegistryPlane * plane;
2148 CHECK( IORegistryEntry, registry_entry, entry );
2149
2150 if( planeName[0])
2151 plane = IORegistryEntry::getPlane( planeName );
2152 else
2153 plane = 0;
2154
2155 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2156
2157 return( kIOReturnSuccess );
2158 }
2159
2160 /* Routine io_registry_entry_get_location_in_plane */
2161 kern_return_t is_io_registry_entry_get_location_in_plane(
2162 io_object_t registry_entry,
2163 io_name_t planeName,
2164 io_name_t location )
2165 {
2166 const IORegistryPlane * plane;
2167 CHECK( IORegistryEntry, registry_entry, entry );
2168
2169 if( planeName[0])
2170 plane = IORegistryEntry::getPlane( planeName );
2171 else
2172 plane = 0;
2173
2174 const char * cstr = entry->getLocation( plane );
2175
2176 if( cstr) {
2177 strncpy( location, cstr, sizeof( io_name_t));
2178 return( kIOReturnSuccess );
2179 } else
2180 return( kIOReturnNotFound );
2181 }
2182
2183 /* Routine io_registry_entry_get_registry_entry_id */
2184 kern_return_t is_io_registry_entry_get_registry_entry_id(
2185 io_object_t registry_entry,
2186 uint64_t *entry_id )
2187 {
2188 CHECK( IORegistryEntry, registry_entry, entry );
2189
2190 *entry_id = entry->getRegistryEntryID();
2191
2192 return (kIOReturnSuccess);
2193 }
2194
2195 // Create a vm_map_copy_t or kalloc'ed data for memory
2196 // to be copied out. ipc will free after the copyout.
2197
2198 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
2199 io_buf_ptr_t * buf )
2200 {
2201 kern_return_t err;
2202 vm_map_copy_t copy;
2203
2204 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
2205 false /* src_destroy */, &copy);
2206
2207 assert( err == KERN_SUCCESS );
2208 if( err == KERN_SUCCESS )
2209 *buf = (char *) copy;
2210
2211 return( err );
2212 }
2213
2214 /* Routine io_registry_entry_get_property */
2215 kern_return_t is_io_registry_entry_get_property_bytes(
2216 io_object_t registry_entry,
2217 io_name_t property_name,
2218 io_struct_inband_t buf,
2219 mach_msg_type_number_t *dataCnt )
2220 {
2221 OSObject * obj;
2222 OSData * data;
2223 OSString * str;
2224 OSBoolean * boo;
2225 OSNumber * off;
2226 UInt64 offsetBytes;
2227 unsigned int len = 0;
2228 const void * bytes = 0;
2229 IOReturn ret = kIOReturnSuccess;
2230
2231 CHECK( IORegistryEntry, registry_entry, entry );
2232
2233 obj = entry->copyProperty(property_name);
2234 if( !obj)
2235 return( kIOReturnNoResources );
2236
2237 // One day OSData will be a common container base class
2238 // until then...
2239 if( (data = OSDynamicCast( OSData, obj ))) {
2240 len = data->getLength();
2241 bytes = data->getBytesNoCopy();
2242
2243 } else if( (str = OSDynamicCast( OSString, obj ))) {
2244 len = str->getLength() + 1;
2245 bytes = str->getCStringNoCopy();
2246
2247 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2248 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2249 bytes = boo->isTrue() ? "Yes" : "No";
2250
2251 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2252 offsetBytes = off->unsigned64BitValue();
2253 len = off->numberOfBytes();
2254 bytes = &offsetBytes;
2255 #ifdef __BIG_ENDIAN__
2256 bytes = (const void *)
2257 (((UInt32) bytes) + (sizeof( UInt64) - len));
2258 #endif
2259
2260 } else
2261 ret = kIOReturnBadArgument;
2262
2263 if( bytes) {
2264 if( *dataCnt < len)
2265 ret = kIOReturnIPCError;
2266 else {
2267 *dataCnt = len;
2268 bcopy( bytes, buf, len );
2269 }
2270 }
2271 obj->release();
2272
2273 return( ret );
2274 }
2275
2276
2277 /* Routine io_registry_entry_get_property */
2278 kern_return_t is_io_registry_entry_get_property(
2279 io_object_t registry_entry,
2280 io_name_t property_name,
2281 io_buf_ptr_t *properties,
2282 mach_msg_type_number_t *propertiesCnt )
2283 {
2284 kern_return_t err;
2285 vm_size_t len;
2286 OSObject * obj;
2287
2288 CHECK( IORegistryEntry, registry_entry, entry );
2289
2290 obj = entry->copyProperty(property_name);
2291 if( !obj)
2292 return( kIOReturnNotFound );
2293
2294 OSSerialize * s = OSSerialize::withCapacity(4096);
2295 if( !s) {
2296 obj->release();
2297 return( kIOReturnNoMemory );
2298 }
2299 s->clearText();
2300
2301 if( obj->serialize( s )) {
2302 len = s->getLength();
2303 *propertiesCnt = len;
2304 err = copyoutkdata( s->text(), len, properties );
2305
2306 } else
2307 err = kIOReturnUnsupported;
2308
2309 s->release();
2310 obj->release();
2311
2312 return( err );
2313 }
2314
2315 /* Routine io_registry_entry_get_property_recursively */
2316 kern_return_t is_io_registry_entry_get_property_recursively(
2317 io_object_t registry_entry,
2318 io_name_t plane,
2319 io_name_t property_name,
2320 uint32_t options,
2321 io_buf_ptr_t *properties,
2322 mach_msg_type_number_t *propertiesCnt )
2323 {
2324 kern_return_t err;
2325 vm_size_t len;
2326 OSObject * obj;
2327
2328 CHECK( IORegistryEntry, registry_entry, entry );
2329
2330 obj = entry->copyProperty( property_name,
2331 IORegistryEntry::getPlane( plane ), options);
2332 if( !obj)
2333 return( kIOReturnNotFound );
2334
2335 OSSerialize * s = OSSerialize::withCapacity(4096);
2336 if( !s) {
2337 obj->release();
2338 return( kIOReturnNoMemory );
2339 }
2340
2341 s->clearText();
2342
2343 if( obj->serialize( s )) {
2344 len = s->getLength();
2345 *propertiesCnt = len;
2346 err = copyoutkdata( s->text(), len, properties );
2347
2348 } else
2349 err = kIOReturnUnsupported;
2350
2351 s->release();
2352 obj->release();
2353
2354 return( err );
2355 }
2356
2357 /* Routine io_registry_entry_get_properties */
2358 kern_return_t is_io_registry_entry_get_properties(
2359 io_object_t registry_entry,
2360 io_buf_ptr_t *properties,
2361 mach_msg_type_number_t *propertiesCnt )
2362 {
2363 kern_return_t err;
2364 vm_size_t len;
2365
2366 CHECK( IORegistryEntry, registry_entry, entry );
2367
2368 OSSerialize * s = OSSerialize::withCapacity(4096);
2369 if( !s)
2370 return( kIOReturnNoMemory );
2371
2372 s->clearText();
2373
2374 if( entry->serializeProperties( s )) {
2375 len = s->getLength();
2376 *propertiesCnt = len;
2377 err = copyoutkdata( s->text(), len, properties );
2378
2379 } else
2380 err = kIOReturnUnsupported;
2381
2382 s->release();
2383
2384 return( err );
2385 }
2386
2387 /* Routine io_registry_entry_set_properties */
2388 kern_return_t is_io_registry_entry_set_properties
2389 (
2390 io_object_t registry_entry,
2391 io_buf_ptr_t properties,
2392 mach_msg_type_number_t propertiesCnt,
2393 kern_return_t * result)
2394 {
2395 OSObject * obj;
2396 kern_return_t err;
2397 IOReturn res;
2398 vm_offset_t data;
2399 vm_map_offset_t map_data;
2400
2401 CHECK( IORegistryEntry, registry_entry, entry );
2402
2403 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
2404 return( kIOReturnMessageTooLarge);
2405
2406 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2407 data = CAST_DOWN(vm_offset_t, map_data);
2408
2409 if( KERN_SUCCESS == err) {
2410
2411 // must return success after vm_map_copyout() succeeds
2412 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
2413 vm_deallocate( kernel_map, data, propertiesCnt );
2414
2415 if (!obj)
2416 res = kIOReturnBadArgument;
2417 #if CONFIG_MACF
2418 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
2419 registry_entry, obj))
2420 res = kIOReturnNotPermitted;
2421 #endif
2422 else
2423 res = entry->setProperties( obj );
2424 if (obj)
2425 obj->release();
2426 } else
2427 res = err;
2428
2429 *result = res;
2430 return( err );
2431 }
2432
2433 /* Routine io_registry_entry_get_child_iterator */
2434 kern_return_t is_io_registry_entry_get_child_iterator(
2435 io_object_t registry_entry,
2436 io_name_t plane,
2437 io_object_t *iterator )
2438 {
2439 CHECK( IORegistryEntry, registry_entry, entry );
2440
2441 *iterator = entry->getChildIterator(
2442 IORegistryEntry::getPlane( plane ));
2443
2444 return( kIOReturnSuccess );
2445 }
2446
2447 /* Routine io_registry_entry_get_parent_iterator */
2448 kern_return_t is_io_registry_entry_get_parent_iterator(
2449 io_object_t registry_entry,
2450 io_name_t plane,
2451 io_object_t *iterator)
2452 {
2453 CHECK( IORegistryEntry, registry_entry, entry );
2454
2455 *iterator = entry->getParentIterator(
2456 IORegistryEntry::getPlane( plane ));
2457
2458 return( kIOReturnSuccess );
2459 }
2460
2461 /* Routine io_service_get_busy_state */
2462 kern_return_t is_io_service_get_busy_state(
2463 io_object_t _service,
2464 uint32_t *busyState )
2465 {
2466 CHECK( IOService, _service, service );
2467
2468 *busyState = service->getBusyState();
2469
2470 return( kIOReturnSuccess );
2471 }
2472
2473 /* Routine io_service_get_state */
2474 kern_return_t is_io_service_get_state(
2475 io_object_t _service,
2476 uint64_t *state,
2477 uint32_t *busy_state,
2478 uint64_t *accumulated_busy_time )
2479 {
2480 CHECK( IOService, _service, service );
2481
2482 *state = service->getState();
2483 *busy_state = service->getBusyState();
2484 *accumulated_busy_time = service->getAccumulatedBusyTime();
2485
2486 return( kIOReturnSuccess );
2487 }
2488
2489 /* Routine io_service_wait_quiet */
2490 kern_return_t is_io_service_wait_quiet(
2491 io_object_t _service,
2492 mach_timespec_t wait_time )
2493 {
2494 uint64_t timeoutNS;
2495
2496 CHECK( IOService, _service, service );
2497
2498 timeoutNS = wait_time.tv_sec;
2499 timeoutNS *= kSecondScale;
2500 timeoutNS += wait_time.tv_nsec;
2501
2502 return( service->waitQuiet(timeoutNS) );
2503 }
2504
2505 /* Routine io_service_request_probe */
2506 kern_return_t is_io_service_request_probe(
2507 io_object_t _service,
2508 uint32_t options )
2509 {
2510 CHECK( IOService, _service, service );
2511
2512 return( service->requestProbe( options ));
2513 }
2514
2515 /* Routine io_service_open_ndr */
2516 kern_return_t is_io_service_open_extended(
2517 io_object_t _service,
2518 task_t owningTask,
2519 uint32_t connect_type,
2520 NDR_record_t ndr,
2521 io_buf_ptr_t properties,
2522 mach_msg_type_number_t propertiesCnt,
2523 kern_return_t * result,
2524 io_object_t *connection )
2525 {
2526 IOUserClient * client = 0;
2527 kern_return_t err = KERN_SUCCESS;
2528 IOReturn res = kIOReturnSuccess;
2529 OSDictionary * propertiesDict = 0;
2530 bool crossEndian;
2531 bool disallowAccess;
2532
2533 CHECK( IOService, _service, service );
2534
2535 do
2536 {
2537 if (properties)
2538 {
2539 OSObject * obj;
2540 vm_offset_t data;
2541 vm_map_offset_t map_data;
2542
2543 if( propertiesCnt > sizeof(io_struct_inband_t))
2544 return( kIOReturnMessageTooLarge);
2545
2546 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
2547 res = err;
2548 data = CAST_DOWN(vm_offset_t, map_data);
2549 if (KERN_SUCCESS == err)
2550 {
2551 // must return success after vm_map_copyout() succeeds
2552 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
2553 vm_deallocate( kernel_map, data, propertiesCnt );
2554 propertiesDict = OSDynamicCast(OSDictionary, obj);
2555 if (!propertiesDict)
2556 {
2557 res = kIOReturnBadArgument;
2558 if (obj)
2559 obj->release();
2560 }
2561 }
2562 if (kIOReturnSuccess != res)
2563 break;
2564 }
2565
2566 crossEndian = (ndr.int_rep != NDR_record.int_rep);
2567 if (crossEndian)
2568 {
2569 if (!propertiesDict)
2570 propertiesDict = OSDictionary::withCapacity(4);
2571 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
2572 if (data)
2573 {
2574 if (propertiesDict)
2575 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
2576 data->release();
2577 }
2578 }
2579
2580 res = service->newUserClient( owningTask, (void *) owningTask,
2581 connect_type, propertiesDict, &client );
2582
2583 if (propertiesDict)
2584 propertiesDict->release();
2585
2586 if (res == kIOReturnSuccess)
2587 {
2588 assert( OSDynamicCast(IOUserClient, client) );
2589
2590 disallowAccess = (crossEndian
2591 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
2592 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
2593 if (disallowAccess) res = kIOReturnUnsupported;
2594 #if CONFIG_MACF
2595 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
2596 res = kIOReturnNotPermitted;
2597 #endif
2598 if (kIOReturnSuccess != res)
2599 {
2600 IOStatisticsClientCall();
2601 client->clientClose();
2602 client->release();
2603 client = 0;
2604 break;
2605 }
2606 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
2607 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
2608 if (creatorName)
2609 {
2610 client->setProperty(kIOUserClientCreatorKey, creatorName);
2611 creatorName->release();
2612 }
2613 client->setTerminateDefer(service, false);
2614 }
2615 }
2616 while (false);
2617
2618 *connection = client;
2619 *result = res;
2620
2621 return (err);
2622 }
2623
2624 /* Routine io_service_close */
2625 kern_return_t is_io_service_close(
2626 io_object_t connection )
2627 {
2628 OSSet * mappings;
2629 if ((mappings = OSDynamicCast(OSSet, connection)))
2630 return( kIOReturnSuccess );
2631
2632 CHECK( IOUserClient, connection, client );
2633
2634 IOStatisticsClientCall();
2635 client->clientClose();
2636
2637 return( kIOReturnSuccess );
2638 }
2639
2640 /* Routine io_connect_get_service */
2641 kern_return_t is_io_connect_get_service(
2642 io_object_t connection,
2643 io_object_t *service )
2644 {
2645 IOService * theService;
2646
2647 CHECK( IOUserClient, connection, client );
2648
2649 theService = client->getService();
2650 if( theService)
2651 theService->retain();
2652
2653 *service = theService;
2654
2655 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
2656 }
2657
2658 /* Routine io_connect_set_notification_port */
2659 kern_return_t is_io_connect_set_notification_port(
2660 io_object_t connection,
2661 uint32_t notification_type,
2662 mach_port_t port,
2663 uint32_t reference)
2664 {
2665 CHECK( IOUserClient, connection, client );
2666
2667 IOStatisticsClientCall();
2668 return( client->registerNotificationPort( port, notification_type,
2669 (io_user_reference_t) reference ));
2670 }
2671
2672 /* Routine io_connect_set_notification_port */
2673 kern_return_t is_io_connect_set_notification_port_64(
2674 io_object_t connection,
2675 uint32_t notification_type,
2676 mach_port_t port,
2677 io_user_reference_t reference)
2678 {
2679 CHECK( IOUserClient, connection, client );
2680
2681 IOStatisticsClientCall();
2682 return( client->registerNotificationPort( port, notification_type,
2683 reference ));
2684 }
2685
2686 /* Routine io_connect_map_memory_into_task */
2687 kern_return_t is_io_connect_map_memory_into_task
2688 (
2689 io_connect_t connection,
2690 uint32_t memory_type,
2691 task_t into_task,
2692 mach_vm_address_t *address,
2693 mach_vm_size_t *size,
2694 uint32_t flags
2695 )
2696 {
2697 IOReturn err;
2698 IOMemoryMap * map;
2699
2700 CHECK( IOUserClient, connection, client );
2701
2702 IOStatisticsClientCall();
2703 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
2704
2705 if( map) {
2706 *address = map->getAddress();
2707 if( size)
2708 *size = map->getSize();
2709
2710 if( client->sharedInstance
2711 || (into_task != current_task())) {
2712 // push a name out to the task owning the map,
2713 // so we can clean up maps
2714 mach_port_name_t name __unused =
2715 IOMachPort::makeSendRightForTask(
2716 into_task, map, IKOT_IOKIT_OBJECT );
2717
2718 } else {
2719 // keep it with the user client
2720 IOLockLock( gIOObjectPortLock);
2721 if( 0 == client->mappings)
2722 client->mappings = OSSet::withCapacity(2);
2723 if( client->mappings)
2724 client->mappings->setObject( map);
2725 IOLockUnlock( gIOObjectPortLock);
2726 map->release();
2727 }
2728 err = kIOReturnSuccess;
2729
2730 } else
2731 err = kIOReturnBadArgument;
2732
2733 return( err );
2734 }
2735
2736 /* Routine is_io_connect_map_memory */
2737 kern_return_t is_io_connect_map_memory(
2738 io_object_t connect,
2739 uint32_t type,
2740 task_t task,
2741 vm_address_t * mapAddr,
2742 vm_size_t * mapSize,
2743 uint32_t flags )
2744 {
2745 IOReturn err;
2746 mach_vm_address_t address;
2747 mach_vm_size_t size;
2748
2749 address = SCALAR64(*mapAddr);
2750 size = SCALAR64(*mapSize);
2751
2752 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
2753
2754 *mapAddr = SCALAR32(address);
2755 *mapSize = SCALAR32(size);
2756
2757 return (err);
2758 }
2759
2760 } /* extern "C" */
2761
2762 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
2763 {
2764 OSIterator * iter;
2765 IOMemoryMap * map = 0;
2766
2767 IOLockLock(gIOObjectPortLock);
2768
2769 iter = OSCollectionIterator::withCollection(mappings);
2770 if(iter)
2771 {
2772 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
2773 {
2774 if(mem == map->getMemoryDescriptor())
2775 {
2776 map->retain();
2777 mappings->removeObject(map);
2778 break;
2779 }
2780 }
2781 iter->release();
2782 }
2783
2784 IOLockUnlock(gIOObjectPortLock);
2785
2786 return (map);
2787 }
2788
2789 extern "C" {
2790
2791 /* Routine io_connect_unmap_memory_from_task */
2792 kern_return_t is_io_connect_unmap_memory_from_task
2793 (
2794 io_connect_t connection,
2795 uint32_t memory_type,
2796 task_t from_task,
2797 mach_vm_address_t address)
2798 {
2799 IOReturn err;
2800 IOOptionBits options = 0;
2801 IOMemoryDescriptor * memory;
2802 IOMemoryMap * map;
2803
2804 CHECK( IOUserClient, connection, client );
2805
2806 IOStatisticsClientCall();
2807 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
2808
2809 if( memory && (kIOReturnSuccess == err)) {
2810
2811 options = (options & ~kIOMapUserOptionsMask)
2812 | kIOMapAnywhere | kIOMapReference;
2813
2814 map = memory->createMappingInTask( from_task, address, options );
2815 memory->release();
2816 if( map)
2817 {
2818 IOLockLock( gIOObjectPortLock);
2819 if( client->mappings)
2820 client->mappings->removeObject( map);
2821 IOLockUnlock( gIOObjectPortLock);
2822
2823 mach_port_name_t name = 0;
2824 if (from_task != current_task())
2825 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
2826 if (name)
2827 {
2828 map->userClientUnmap();
2829 err = iokit_mod_send_right( from_task, name, -2 );
2830 err = kIOReturnSuccess;
2831 }
2832 else
2833 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
2834 if (from_task == current_task())
2835 map->release();
2836 }
2837 else
2838 err = kIOReturnBadArgument;
2839 }
2840
2841 return( err );
2842 }
2843
2844 kern_return_t is_io_connect_unmap_memory(
2845 io_object_t connect,
2846 uint32_t type,
2847 task_t task,
2848 vm_address_t mapAddr )
2849 {
2850 IOReturn err;
2851 mach_vm_address_t address;
2852
2853 address = SCALAR64(mapAddr);
2854
2855 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
2856
2857 return (err);
2858 }
2859
2860
2861 /* Routine io_connect_add_client */
2862 kern_return_t is_io_connect_add_client(
2863 io_object_t connection,
2864 io_object_t connect_to)
2865 {
2866 CHECK( IOUserClient, connection, client );
2867 CHECK( IOUserClient, connect_to, to );
2868
2869 IOStatisticsClientCall();
2870 return( client->connectClient( to ) );
2871 }
2872
2873
2874 /* Routine io_connect_set_properties */
2875 kern_return_t is_io_connect_set_properties(
2876 io_object_t connection,
2877 io_buf_ptr_t properties,
2878 mach_msg_type_number_t propertiesCnt,
2879 kern_return_t * result)
2880 {
2881 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
2882 }
2883
2884 /* Routine io_user_client_method */
2885 kern_return_t is_io_connect_method_var_output
2886 (
2887 io_connect_t connection,
2888 uint32_t selector,
2889 io_scalar_inband64_t scalar_input,
2890 mach_msg_type_number_t scalar_inputCnt,
2891 io_struct_inband_t inband_input,
2892 mach_msg_type_number_t inband_inputCnt,
2893 mach_vm_address_t ool_input,
2894 mach_vm_size_t ool_input_size,
2895 io_struct_inband_t inband_output,
2896 mach_msg_type_number_t *inband_outputCnt,
2897 io_scalar_inband64_t scalar_output,
2898 mach_msg_type_number_t *scalar_outputCnt,
2899 io_buf_ptr_t *var_output,
2900 mach_msg_type_number_t *var_outputCnt
2901 )
2902 {
2903 CHECK( IOUserClient, connection, client );
2904
2905 IOExternalMethodArguments args;
2906 IOReturn ret;
2907 IOMemoryDescriptor * inputMD = 0;
2908 OSObject * structureVariableOutputData = 0;
2909
2910 bzero(&args.__reserved[0], sizeof(args.__reserved));
2911 args.version = kIOExternalMethodArgumentsCurrentVersion;
2912
2913 args.selector = selector;
2914
2915 args.asyncWakePort = MACH_PORT_NULL;
2916 args.asyncReference = 0;
2917 args.asyncReferenceCount = 0;
2918 args.structureVariableOutputData = &structureVariableOutputData;
2919
2920 args.scalarInput = scalar_input;
2921 args.scalarInputCount = scalar_inputCnt;
2922 args.structureInput = inband_input;
2923 args.structureInputSize = inband_inputCnt;
2924
2925 if (ool_input)
2926 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
2927 kIODirectionOut, current_task());
2928
2929 args.structureInputDescriptor = inputMD;
2930
2931 args.scalarOutput = scalar_output;
2932 args.scalarOutputCount = *scalar_outputCnt;
2933 args.structureOutput = inband_output;
2934 args.structureOutputSize = *inband_outputCnt;
2935 args.structureOutputDescriptor = NULL;
2936 args.structureOutputDescriptorSize = 0;
2937
2938 IOStatisticsClientCall();
2939 ret = client->externalMethod( selector, &args );
2940
2941 *scalar_outputCnt = args.scalarOutputCount;
2942 *inband_outputCnt = args.structureOutputSize;
2943
2944 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
2945 {
2946 OSSerialize * serialize;
2947 OSData * data;
2948 vm_size_t len;
2949
2950 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
2951 {
2952 len = serialize->getLength();
2953 *var_outputCnt = len;
2954 ret = copyoutkdata(serialize->text(), len, var_output);
2955 }
2956 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
2957 {
2958 len = data->getLength();
2959 *var_outputCnt = len;
2960 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
2961 }
2962 else
2963 {
2964 ret = kIOReturnUnderrun;
2965 }
2966 }
2967
2968 if (inputMD)
2969 inputMD->release();
2970 if (structureVariableOutputData)
2971 structureVariableOutputData->release();
2972
2973 return (ret);
2974 }
2975
2976 /* Routine io_user_client_method */
2977 kern_return_t is_io_connect_method
2978 (
2979 io_connect_t connection,
2980 uint32_t selector,
2981 io_scalar_inband64_t scalar_input,
2982 mach_msg_type_number_t scalar_inputCnt,
2983 io_struct_inband_t inband_input,
2984 mach_msg_type_number_t inband_inputCnt,
2985 mach_vm_address_t ool_input,
2986 mach_vm_size_t ool_input_size,
2987 io_struct_inband_t inband_output,
2988 mach_msg_type_number_t *inband_outputCnt,
2989 io_scalar_inband64_t scalar_output,
2990 mach_msg_type_number_t *scalar_outputCnt,
2991 mach_vm_address_t ool_output,
2992 mach_vm_size_t *ool_output_size
2993 )
2994 {
2995 CHECK( IOUserClient, connection, client );
2996
2997 IOExternalMethodArguments args;
2998 IOReturn ret;
2999 IOMemoryDescriptor * inputMD = 0;
3000 IOMemoryDescriptor * outputMD = 0;
3001
3002 bzero(&args.__reserved[0], sizeof(args.__reserved));
3003 args.version = kIOExternalMethodArgumentsCurrentVersion;
3004
3005 args.selector = selector;
3006
3007 args.asyncWakePort = MACH_PORT_NULL;
3008 args.asyncReference = 0;
3009 args.asyncReferenceCount = 0;
3010 args.structureVariableOutputData = 0;
3011
3012 args.scalarInput = scalar_input;
3013 args.scalarInputCount = scalar_inputCnt;
3014 args.structureInput = inband_input;
3015 args.structureInputSize = inband_inputCnt;
3016
3017 if (ool_input)
3018 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3019 kIODirectionOut, current_task());
3020
3021 args.structureInputDescriptor = inputMD;
3022
3023 args.scalarOutput = scalar_output;
3024 args.scalarOutputCount = *scalar_outputCnt;
3025 args.structureOutput = inband_output;
3026 args.structureOutputSize = *inband_outputCnt;
3027
3028 if (ool_output && ool_output_size)
3029 {
3030 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3031 kIODirectionIn, current_task());
3032 }
3033
3034 args.structureOutputDescriptor = outputMD;
3035 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3036
3037 IOStatisticsClientCall();
3038 ret = client->externalMethod( selector, &args );
3039
3040 *scalar_outputCnt = args.scalarOutputCount;
3041 *inband_outputCnt = args.structureOutputSize;
3042 *ool_output_size = args.structureOutputDescriptorSize;
3043
3044 if (inputMD)
3045 inputMD->release();
3046 if (outputMD)
3047 outputMD->release();
3048
3049 return (ret);
3050 }
3051
3052 /* Routine io_async_user_client_method */
3053 kern_return_t is_io_connect_async_method
3054 (
3055 io_connect_t connection,
3056 mach_port_t wake_port,
3057 io_async_ref64_t reference,
3058 mach_msg_type_number_t referenceCnt,
3059 uint32_t selector,
3060 io_scalar_inband64_t scalar_input,
3061 mach_msg_type_number_t scalar_inputCnt,
3062 io_struct_inband_t inband_input,
3063 mach_msg_type_number_t inband_inputCnt,
3064 mach_vm_address_t ool_input,
3065 mach_vm_size_t ool_input_size,
3066 io_struct_inband_t inband_output,
3067 mach_msg_type_number_t *inband_outputCnt,
3068 io_scalar_inband64_t scalar_output,
3069 mach_msg_type_number_t *scalar_outputCnt,
3070 mach_vm_address_t ool_output,
3071 mach_vm_size_t * ool_output_size
3072 )
3073 {
3074 CHECK( IOUserClient, connection, client );
3075
3076 IOExternalMethodArguments args;
3077 IOReturn ret;
3078 IOMemoryDescriptor * inputMD = 0;
3079 IOMemoryDescriptor * outputMD = 0;
3080
3081 bzero(&args.__reserved[0], sizeof(args.__reserved));
3082 args.version = kIOExternalMethodArgumentsCurrentVersion;
3083
3084 reference[0] = (io_user_reference_t) wake_port;
3085 if (vm_map_is_64bit(get_task_map(current_task())))
3086 reference[0] |= kIOUCAsync64Flag;
3087
3088 args.selector = selector;
3089
3090 args.asyncWakePort = wake_port;
3091 args.asyncReference = reference;
3092 args.asyncReferenceCount = referenceCnt;
3093
3094 args.scalarInput = scalar_input;
3095 args.scalarInputCount = scalar_inputCnt;
3096 args.structureInput = inband_input;
3097 args.structureInputSize = inband_inputCnt;
3098
3099 if (ool_input)
3100 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3101 kIODirectionOut, current_task());
3102
3103 args.structureInputDescriptor = inputMD;
3104
3105 args.scalarOutput = scalar_output;
3106 args.scalarOutputCount = *scalar_outputCnt;
3107 args.structureOutput = inband_output;
3108 args.structureOutputSize = *inband_outputCnt;
3109
3110 if (ool_output)
3111 {
3112 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3113 kIODirectionIn, current_task());
3114 }
3115
3116 args.structureOutputDescriptor = outputMD;
3117 args.structureOutputDescriptorSize = *ool_output_size;
3118
3119 IOStatisticsClientCall();
3120 ret = client->externalMethod( selector, &args );
3121
3122 *inband_outputCnt = args.structureOutputSize;
3123 *ool_output_size = args.structureOutputDescriptorSize;
3124
3125 if (inputMD)
3126 inputMD->release();
3127 if (outputMD)
3128 outputMD->release();
3129
3130 return (ret);
3131 }
3132
3133 /* Routine io_connect_method_scalarI_scalarO */
3134 kern_return_t is_io_connect_method_scalarI_scalarO(
3135 io_object_t connect,
3136 uint32_t index,
3137 io_scalar_inband_t input,
3138 mach_msg_type_number_t inputCount,
3139 io_scalar_inband_t output,
3140 mach_msg_type_number_t * outputCount )
3141 {
3142 IOReturn err;
3143 uint32_t i;
3144 io_scalar_inband64_t _input;
3145 io_scalar_inband64_t _output;
3146
3147 mach_msg_type_number_t struct_outputCnt = 0;
3148 mach_vm_size_t ool_output_size = 0;
3149
3150 for (i = 0; i < inputCount; i++)
3151 _input[i] = SCALAR64(input[i]);
3152
3153 err = is_io_connect_method(connect, index,
3154 _input, inputCount,
3155 NULL, 0,
3156 0, 0,
3157 NULL, &struct_outputCnt,
3158 _output, outputCount,
3159 0, &ool_output_size);
3160
3161 for (i = 0; i < *outputCount; i++)
3162 output[i] = SCALAR32(_output[i]);
3163
3164 return (err);
3165 }
3166
3167 kern_return_t shim_io_connect_method_scalarI_scalarO(
3168 IOExternalMethod * method,
3169 IOService * object,
3170 const io_user_scalar_t * input,
3171 mach_msg_type_number_t inputCount,
3172 io_user_scalar_t * output,
3173 mach_msg_type_number_t * outputCount )
3174 {
3175 IOMethod func;
3176 io_scalar_inband_t _output;
3177 IOReturn err;
3178 err = kIOReturnBadArgument;
3179
3180 do {
3181
3182 if( inputCount != method->count0)
3183 {
3184 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3185 continue;
3186 }
3187 if( *outputCount != method->count1)
3188 {
3189 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3190 continue;
3191 }
3192
3193 func = method->func;
3194
3195 switch( inputCount) {
3196
3197 case 6:
3198 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3199 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3200 break;
3201 case 5:
3202 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3203 ARG32(input[3]), ARG32(input[4]),
3204 &_output[0] );
3205 break;
3206 case 4:
3207 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3208 ARG32(input[3]),
3209 &_output[0], &_output[1] );
3210 break;
3211 case 3:
3212 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3213 &_output[0], &_output[1], &_output[2] );
3214 break;
3215 case 2:
3216 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3217 &_output[0], &_output[1], &_output[2],
3218 &_output[3] );
3219 break;
3220 case 1:
3221 err = (object->*func)( ARG32(input[0]),
3222 &_output[0], &_output[1], &_output[2],
3223 &_output[3], &_output[4] );
3224 break;
3225 case 0:
3226 err = (object->*func)( &_output[0], &_output[1], &_output[2],
3227 &_output[3], &_output[4], &_output[5] );
3228 break;
3229
3230 default:
3231 IOLog("%s: Bad method table\n", object->getName());
3232 }
3233 }
3234 while( false);
3235
3236 uint32_t i;
3237 for (i = 0; i < *outputCount; i++)
3238 output[i] = SCALAR32(_output[i]);
3239
3240 return( err);
3241 }
3242
3243 /* Routine io_async_method_scalarI_scalarO */
3244 kern_return_t is_io_async_method_scalarI_scalarO(
3245 io_object_t connect,
3246 mach_port_t wake_port,
3247 io_async_ref_t reference,
3248 mach_msg_type_number_t referenceCnt,
3249 uint32_t index,
3250 io_scalar_inband_t input,
3251 mach_msg_type_number_t inputCount,
3252 io_scalar_inband_t output,
3253 mach_msg_type_number_t * outputCount )
3254 {
3255 IOReturn err;
3256 uint32_t i;
3257 io_scalar_inband64_t _input;
3258 io_scalar_inband64_t _output;
3259 io_async_ref64_t _reference;
3260
3261 for (i = 0; i < referenceCnt; i++)
3262 _reference[i] = REF64(reference[i]);
3263
3264 mach_msg_type_number_t struct_outputCnt = 0;
3265 mach_vm_size_t ool_output_size = 0;
3266
3267 for (i = 0; i < inputCount; i++)
3268 _input[i] = SCALAR64(input[i]);
3269
3270 err = is_io_connect_async_method(connect,
3271 wake_port, _reference, referenceCnt,
3272 index,
3273 _input, inputCount,
3274 NULL, 0,
3275 0, 0,
3276 NULL, &struct_outputCnt,
3277 _output, outputCount,
3278 0, &ool_output_size);
3279
3280 for (i = 0; i < *outputCount; i++)
3281 output[i] = SCALAR32(_output[i]);
3282
3283 return (err);
3284 }
3285 /* Routine io_async_method_scalarI_structureO */
3286 kern_return_t is_io_async_method_scalarI_structureO(
3287 io_object_t connect,
3288 mach_port_t wake_port,
3289 io_async_ref_t reference,
3290 mach_msg_type_number_t referenceCnt,
3291 uint32_t index,
3292 io_scalar_inband_t input,
3293 mach_msg_type_number_t inputCount,
3294 io_struct_inband_t output,
3295 mach_msg_type_number_t * outputCount )
3296 {
3297 uint32_t i;
3298 io_scalar_inband64_t _input;
3299 io_async_ref64_t _reference;
3300
3301 for (i = 0; i < referenceCnt; i++)
3302 _reference[i] = REF64(reference[i]);
3303
3304 mach_msg_type_number_t scalar_outputCnt = 0;
3305 mach_vm_size_t ool_output_size = 0;
3306
3307 for (i = 0; i < inputCount; i++)
3308 _input[i] = SCALAR64(input[i]);
3309
3310 return (is_io_connect_async_method(connect,
3311 wake_port, _reference, referenceCnt,
3312 index,
3313 _input, inputCount,
3314 NULL, 0,
3315 0, 0,
3316 output, outputCount,
3317 NULL, &scalar_outputCnt,
3318 0, &ool_output_size));
3319 }
3320
3321 /* Routine io_async_method_scalarI_structureI */
3322 kern_return_t is_io_async_method_scalarI_structureI(
3323 io_connect_t connect,
3324 mach_port_t wake_port,
3325 io_async_ref_t reference,
3326 mach_msg_type_number_t referenceCnt,
3327 uint32_t index,
3328 io_scalar_inband_t input,
3329 mach_msg_type_number_t inputCount,
3330 io_struct_inband_t inputStruct,
3331 mach_msg_type_number_t inputStructCount )
3332 {
3333 uint32_t i;
3334 io_scalar_inband64_t _input;
3335 io_async_ref64_t _reference;
3336
3337 for (i = 0; i < referenceCnt; i++)
3338 _reference[i] = REF64(reference[i]);
3339
3340 mach_msg_type_number_t scalar_outputCnt = 0;
3341 mach_msg_type_number_t inband_outputCnt = 0;
3342 mach_vm_size_t ool_output_size = 0;
3343
3344 for (i = 0; i < inputCount; i++)
3345 _input[i] = SCALAR64(input[i]);
3346
3347 return (is_io_connect_async_method(connect,
3348 wake_port, _reference, referenceCnt,
3349 index,
3350 _input, inputCount,
3351 inputStruct, inputStructCount,
3352 0, 0,
3353 NULL, &inband_outputCnt,
3354 NULL, &scalar_outputCnt,
3355 0, &ool_output_size));
3356 }
3357
3358 /* Routine io_async_method_structureI_structureO */
3359 kern_return_t is_io_async_method_structureI_structureO(
3360 io_object_t connect,
3361 mach_port_t wake_port,
3362 io_async_ref_t reference,
3363 mach_msg_type_number_t referenceCnt,
3364 uint32_t index,
3365 io_struct_inband_t input,
3366 mach_msg_type_number_t inputCount,
3367 io_struct_inband_t output,
3368 mach_msg_type_number_t * outputCount )
3369 {
3370 uint32_t i;
3371 mach_msg_type_number_t scalar_outputCnt = 0;
3372 mach_vm_size_t ool_output_size = 0;
3373 io_async_ref64_t _reference;
3374
3375 for (i = 0; i < referenceCnt; i++)
3376 _reference[i] = REF64(reference[i]);
3377
3378 return (is_io_connect_async_method(connect,
3379 wake_port, _reference, referenceCnt,
3380 index,
3381 NULL, 0,
3382 input, inputCount,
3383 0, 0,
3384 output, outputCount,
3385 NULL, &scalar_outputCnt,
3386 0, &ool_output_size));
3387 }
3388
3389
3390 kern_return_t shim_io_async_method_scalarI_scalarO(
3391 IOExternalAsyncMethod * method,
3392 IOService * object,
3393 mach_port_t asyncWakePort,
3394 io_user_reference_t * asyncReference,
3395 uint32_t asyncReferenceCount,
3396 const io_user_scalar_t * input,
3397 mach_msg_type_number_t inputCount,
3398 io_user_scalar_t * output,
3399 mach_msg_type_number_t * outputCount )
3400 {
3401 IOAsyncMethod func;
3402 uint32_t i;
3403 io_scalar_inband_t _output;
3404 IOReturn err;
3405 io_async_ref_t reference;
3406
3407 for (i = 0; i < asyncReferenceCount; i++)
3408 reference[i] = REF32(asyncReference[i]);
3409
3410 err = kIOReturnBadArgument;
3411
3412 do {
3413
3414 if( inputCount != method->count0)
3415 {
3416 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3417 continue;
3418 }
3419 if( *outputCount != method->count1)
3420 {
3421 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3422 continue;
3423 }
3424
3425 func = method->func;
3426
3427 switch( inputCount) {
3428
3429 case 6:
3430 err = (object->*func)( reference,
3431 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3432 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
3433 break;
3434 case 5:
3435 err = (object->*func)( reference,
3436 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3437 ARG32(input[3]), ARG32(input[4]),
3438 &_output[0] );
3439 break;
3440 case 4:
3441 err = (object->*func)( reference,
3442 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3443 ARG32(input[3]),
3444 &_output[0], &_output[1] );
3445 break;
3446 case 3:
3447 err = (object->*func)( reference,
3448 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3449 &_output[0], &_output[1], &_output[2] );
3450 break;
3451 case 2:
3452 err = (object->*func)( reference,
3453 ARG32(input[0]), ARG32(input[1]),
3454 &_output[0], &_output[1], &_output[2],
3455 &_output[3] );
3456 break;
3457 case 1:
3458 err = (object->*func)( reference,
3459 ARG32(input[0]),
3460 &_output[0], &_output[1], &_output[2],
3461 &_output[3], &_output[4] );
3462 break;
3463 case 0:
3464 err = (object->*func)( reference,
3465 &_output[0], &_output[1], &_output[2],
3466 &_output[3], &_output[4], &_output[5] );
3467 break;
3468
3469 default:
3470 IOLog("%s: Bad method table\n", object->getName());
3471 }
3472 }
3473 while( false);
3474
3475 for (i = 0; i < *outputCount; i++)
3476 output[i] = SCALAR32(_output[i]);
3477
3478 return( err);
3479 }
3480
3481
3482 /* Routine io_connect_method_scalarI_structureO */
3483 kern_return_t is_io_connect_method_scalarI_structureO(
3484 io_object_t connect,
3485 uint32_t index,
3486 io_scalar_inband_t input,
3487 mach_msg_type_number_t inputCount,
3488 io_struct_inband_t output,
3489 mach_msg_type_number_t * outputCount )
3490 {
3491 uint32_t i;
3492 io_scalar_inband64_t _input;
3493
3494 mach_msg_type_number_t scalar_outputCnt = 0;
3495 mach_vm_size_t ool_output_size = 0;
3496
3497 for (i = 0; i < inputCount; i++)
3498 _input[i] = SCALAR64(input[i]);
3499
3500 return (is_io_connect_method(connect, index,
3501 _input, inputCount,
3502 NULL, 0,
3503 0, 0,
3504 output, outputCount,
3505 NULL, &scalar_outputCnt,
3506 0, &ool_output_size));
3507 }
3508
3509 kern_return_t shim_io_connect_method_scalarI_structureO(
3510
3511 IOExternalMethod * method,
3512 IOService * object,
3513 const io_user_scalar_t * input,
3514 mach_msg_type_number_t inputCount,
3515 io_struct_inband_t output,
3516 IOByteCount * outputCount )
3517 {
3518 IOMethod func;
3519 IOReturn err;
3520
3521 err = kIOReturnBadArgument;
3522
3523 do {
3524 if( inputCount != method->count0)
3525 {
3526 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3527 continue;
3528 }
3529 if( (kIOUCVariableStructureSize != method->count1)
3530 && (*outputCount != method->count1))
3531 {
3532 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3533 continue;
3534 }
3535
3536 func = method->func;
3537
3538 switch( inputCount) {
3539
3540 case 5:
3541 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3542 ARG32(input[3]), ARG32(input[4]),
3543 output );
3544 break;
3545 case 4:
3546 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3547 ARG32(input[3]),
3548 output, (void *)outputCount );
3549 break;
3550 case 3:
3551 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3552 output, (void *)outputCount, 0 );
3553 break;
3554 case 2:
3555 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3556 output, (void *)outputCount, 0, 0 );
3557 break;
3558 case 1:
3559 err = (object->*func)( ARG32(input[0]),
3560 output, (void *)outputCount, 0, 0, 0 );
3561 break;
3562 case 0:
3563 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
3564 break;
3565
3566 default:
3567 IOLog("%s: Bad method table\n", object->getName());
3568 }
3569 }
3570 while( false);
3571
3572 return( err);
3573 }
3574
3575
3576 kern_return_t shim_io_async_method_scalarI_structureO(
3577 IOExternalAsyncMethod * method,
3578 IOService * object,
3579 mach_port_t asyncWakePort,
3580 io_user_reference_t * asyncReference,
3581 uint32_t asyncReferenceCount,
3582 const io_user_scalar_t * input,
3583 mach_msg_type_number_t inputCount,
3584 io_struct_inband_t output,
3585 mach_msg_type_number_t * outputCount )
3586 {
3587 IOAsyncMethod func;
3588 uint32_t i;
3589 IOReturn err;
3590 io_async_ref_t reference;
3591
3592 for (i = 0; i < asyncReferenceCount; i++)
3593 reference[i] = REF32(asyncReference[i]);
3594
3595 err = kIOReturnBadArgument;
3596 do {
3597 if( inputCount != method->count0)
3598 {
3599 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3600 continue;
3601 }
3602 if( (kIOUCVariableStructureSize != method->count1)
3603 && (*outputCount != method->count1))
3604 {
3605 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3606 continue;
3607 }
3608
3609 func = method->func;
3610
3611 switch( inputCount) {
3612
3613 case 5:
3614 err = (object->*func)( reference,
3615 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3616 ARG32(input[3]), ARG32(input[4]),
3617 output );
3618 break;
3619 case 4:
3620 err = (object->*func)( reference,
3621 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3622 ARG32(input[3]),
3623 output, (void *)outputCount );
3624 break;
3625 case 3:
3626 err = (object->*func)( reference,
3627 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3628 output, (void *)outputCount, 0 );
3629 break;
3630 case 2:
3631 err = (object->*func)( reference,
3632 ARG32(input[0]), ARG32(input[1]),
3633 output, (void *)outputCount, 0, 0 );
3634 break;
3635 case 1:
3636 err = (object->*func)( reference,
3637 ARG32(input[0]),
3638 output, (void *)outputCount, 0, 0, 0 );
3639 break;
3640 case 0:
3641 err = (object->*func)( reference,
3642 output, (void *)outputCount, 0, 0, 0, 0 );
3643 break;
3644
3645 default:
3646 IOLog("%s: Bad method table\n", object->getName());
3647 }
3648 }
3649 while( false);
3650
3651 return( err);
3652 }
3653
3654 /* Routine io_connect_method_scalarI_structureI */
3655 kern_return_t is_io_connect_method_scalarI_structureI(
3656 io_connect_t connect,
3657 uint32_t index,
3658 io_scalar_inband_t input,
3659 mach_msg_type_number_t inputCount,
3660 io_struct_inband_t inputStruct,
3661 mach_msg_type_number_t inputStructCount )
3662 {
3663 uint32_t i;
3664 io_scalar_inband64_t _input;
3665
3666 mach_msg_type_number_t scalar_outputCnt = 0;
3667 mach_msg_type_number_t inband_outputCnt = 0;
3668 mach_vm_size_t ool_output_size = 0;
3669
3670 for (i = 0; i < inputCount; i++)
3671 _input[i] = SCALAR64(input[i]);
3672
3673 return (is_io_connect_method(connect, index,
3674 _input, inputCount,
3675 inputStruct, inputStructCount,
3676 0, 0,
3677 NULL, &inband_outputCnt,
3678 NULL, &scalar_outputCnt,
3679 0, &ool_output_size));
3680 }
3681
3682 kern_return_t shim_io_connect_method_scalarI_structureI(
3683 IOExternalMethod * method,
3684 IOService * object,
3685 const io_user_scalar_t * input,
3686 mach_msg_type_number_t inputCount,
3687 io_struct_inband_t inputStruct,
3688 mach_msg_type_number_t inputStructCount )
3689 {
3690 IOMethod func;
3691 IOReturn err = kIOReturnBadArgument;
3692
3693 do
3694 {
3695 if( (kIOUCVariableStructureSize != method->count0)
3696 && (inputCount != method->count0))
3697 {
3698 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3699 continue;
3700 }
3701 if( (kIOUCVariableStructureSize != method->count1)
3702 && (inputStructCount != method->count1))
3703 {
3704 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3705 continue;
3706 }
3707
3708 func = method->func;
3709
3710 switch( inputCount) {
3711
3712 case 5:
3713 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3714 ARG32(input[3]), ARG32(input[4]),
3715 inputStruct );
3716 break;
3717 case 4:
3718 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
3719 ARG32(input[3]),
3720 inputStruct, (void *)(uintptr_t)inputStructCount );
3721 break;
3722 case 3:
3723 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3724 inputStruct, (void *)(uintptr_t)inputStructCount,
3725 0 );
3726 break;
3727 case 2:
3728 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
3729 inputStruct, (void *)(uintptr_t)inputStructCount,
3730 0, 0 );
3731 break;
3732 case 1:
3733 err = (object->*func)( ARG32(input[0]),
3734 inputStruct, (void *)(uintptr_t)inputStructCount,
3735 0, 0, 0 );
3736 break;
3737 case 0:
3738 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
3739 0, 0, 0, 0 );
3740 break;
3741
3742 default:
3743 IOLog("%s: Bad method table\n", object->getName());
3744 }
3745 }
3746 while (false);
3747
3748 return( err);
3749 }
3750
3751 kern_return_t shim_io_async_method_scalarI_structureI(
3752 IOExternalAsyncMethod * method,
3753 IOService * object,
3754 mach_port_t asyncWakePort,
3755 io_user_reference_t * asyncReference,
3756 uint32_t asyncReferenceCount,
3757 const io_user_scalar_t * input,
3758 mach_msg_type_number_t inputCount,
3759 io_struct_inband_t inputStruct,
3760 mach_msg_type_number_t inputStructCount )
3761 {
3762 IOAsyncMethod func;
3763 uint32_t i;
3764 IOReturn err = kIOReturnBadArgument;
3765 io_async_ref_t reference;
3766
3767 for (i = 0; i < asyncReferenceCount; i++)
3768 reference[i] = REF32(asyncReference[i]);
3769
3770 do
3771 {
3772 if( (kIOUCVariableStructureSize != method->count0)
3773 && (inputCount != method->count0))
3774 {
3775 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3776 continue;
3777 }
3778 if( (kIOUCVariableStructureSize != method->count1)
3779 && (inputStructCount != method->count1))
3780 {
3781 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3782 continue;
3783 }
3784
3785 func = method->func;
3786
3787 switch( inputCount) {
3788
3789 case 5:
3790 err = (object->*func)( reference,
3791 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3792 ARG32(input[3]), ARG32(input[4]),
3793 inputStruct );
3794 break;
3795 case 4:
3796 err = (object->*func)( reference,
3797 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3798 ARG32(input[3]),
3799 inputStruct, (void *)(uintptr_t)inputStructCount );
3800 break;
3801 case 3:
3802 err = (object->*func)( reference,
3803 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
3804 inputStruct, (void *)(uintptr_t)inputStructCount,
3805 0 );
3806 break;
3807 case 2:
3808 err = (object->*func)( reference,
3809 ARG32(input[0]), ARG32(input[1]),
3810 inputStruct, (void *)(uintptr_t)inputStructCount,
3811 0, 0 );
3812 break;
3813 case 1:
3814 err = (object->*func)( reference,
3815 ARG32(input[0]),
3816 inputStruct, (void *)(uintptr_t)inputStructCount,
3817 0, 0, 0 );
3818 break;
3819 case 0:
3820 err = (object->*func)( reference,
3821 inputStruct, (void *)(uintptr_t)inputStructCount,
3822 0, 0, 0, 0 );
3823 break;
3824
3825 default:
3826 IOLog("%s: Bad method table\n", object->getName());
3827 }
3828 }
3829 while (false);
3830
3831 return( err);
3832 }
3833
3834 /* Routine io_connect_method_structureI_structureO */
3835 kern_return_t is_io_connect_method_structureI_structureO(
3836 io_object_t connect,
3837 uint32_t index,
3838 io_struct_inband_t input,
3839 mach_msg_type_number_t inputCount,
3840 io_struct_inband_t output,
3841 mach_msg_type_number_t * outputCount )
3842 {
3843 mach_msg_type_number_t scalar_outputCnt = 0;
3844 mach_vm_size_t ool_output_size = 0;
3845
3846 return (is_io_connect_method(connect, index,
3847 NULL, 0,
3848 input, inputCount,
3849 0, 0,
3850 output, outputCount,
3851 NULL, &scalar_outputCnt,
3852 0, &ool_output_size));
3853 }
3854
3855 kern_return_t shim_io_connect_method_structureI_structureO(
3856 IOExternalMethod * method,
3857 IOService * object,
3858 io_struct_inband_t input,
3859 mach_msg_type_number_t inputCount,
3860 io_struct_inband_t output,
3861 IOByteCount * outputCount )
3862 {
3863 IOMethod func;
3864 IOReturn err = kIOReturnBadArgument;
3865
3866 do
3867 {
3868 if( (kIOUCVariableStructureSize != method->count0)
3869 && (inputCount != method->count0))
3870 {
3871 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3872 continue;
3873 }
3874 if( (kIOUCVariableStructureSize != method->count1)
3875 && (*outputCount != method->count1))
3876 {
3877 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3878 continue;
3879 }
3880
3881 func = method->func;
3882
3883 if( method->count1) {
3884 if( method->count0) {
3885 err = (object->*func)( input, output,
3886 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
3887 } else {
3888 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
3889 }
3890 } else {
3891 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
3892 }
3893 }
3894 while( false);
3895
3896
3897 return( err);
3898 }
3899
3900 kern_return_t shim_io_async_method_structureI_structureO(
3901 IOExternalAsyncMethod * method,
3902 IOService * object,
3903 mach_port_t asyncWakePort,
3904 io_user_reference_t * asyncReference,
3905 uint32_t asyncReferenceCount,
3906 io_struct_inband_t input,
3907 mach_msg_type_number_t inputCount,
3908 io_struct_inband_t output,
3909 mach_msg_type_number_t * outputCount )
3910 {
3911 IOAsyncMethod func;
3912 uint32_t i;
3913 IOReturn err;
3914 io_async_ref_t reference;
3915
3916 for (i = 0; i < asyncReferenceCount; i++)
3917 reference[i] = REF32(asyncReference[i]);
3918
3919 err = kIOReturnBadArgument;
3920 do
3921 {
3922 if( (kIOUCVariableStructureSize != method->count0)
3923 && (inputCount != method->count0))
3924 {
3925 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
3926 continue;
3927 }
3928 if( (kIOUCVariableStructureSize != method->count1)
3929 && (*outputCount != method->count1))
3930 {
3931 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
3932 continue;
3933 }
3934
3935 func = method->func;
3936
3937 if( method->count1) {
3938 if( method->count0) {
3939 err = (object->*func)( reference,
3940 input, output,
3941 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
3942 } else {
3943 err = (object->*func)( reference,
3944 output, outputCount, 0, 0, 0, 0 );
3945 }
3946 } else {
3947 err = (object->*func)( reference,
3948 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
3949 }
3950 }
3951 while( false);
3952
3953 return( err);
3954 }
3955
3956 /* Routine io_catalog_send_data */
3957 kern_return_t is_io_catalog_send_data(
3958 mach_port_t master_port,
3959 uint32_t flag,
3960 io_buf_ptr_t inData,
3961 mach_msg_type_number_t inDataCount,
3962 kern_return_t * result)
3963 {
3964 OSObject * obj = 0;
3965 vm_offset_t data;
3966 kern_return_t kr = kIOReturnError;
3967
3968 //printf("io_catalog_send_data called. flag: %d\n", flag);
3969
3970 if( master_port != master_device_port)
3971 return kIOReturnNotPrivileged;
3972
3973 if( (flag != kIOCatalogRemoveKernelLinker &&
3974 flag != kIOCatalogKextdActive &&
3975 flag != kIOCatalogKextdFinishedLaunching) &&
3976 ( !inData || !inDataCount) )
3977 {
3978 return kIOReturnBadArgument;
3979 }
3980
3981 if (inData) {
3982 vm_map_offset_t map_data;
3983
3984 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
3985 return( kIOReturnMessageTooLarge);
3986
3987 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
3988 data = CAST_DOWN(vm_offset_t, map_data);
3989
3990 if( kr != KERN_SUCCESS)
3991 return kr;
3992
3993 // must return success after vm_map_copyout() succeeds
3994
3995 if( inDataCount ) {
3996 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
3997 vm_deallocate( kernel_map, data, inDataCount );
3998 if( !obj) {
3999 *result = kIOReturnNoMemory;
4000 return( KERN_SUCCESS);
4001 }
4002 }
4003 }
4004
4005 switch ( flag ) {
4006 case kIOCatalogResetDrivers:
4007 case kIOCatalogResetDriversNoMatch: {
4008 OSArray * array;
4009
4010 array = OSDynamicCast(OSArray, obj);
4011 if (array) {
4012 if ( !gIOCatalogue->resetAndAddDrivers(array,
4013 flag == kIOCatalogResetDrivers) ) {
4014
4015 kr = kIOReturnError;
4016 }
4017 } else {
4018 kr = kIOReturnBadArgument;
4019 }
4020 }
4021 break;
4022
4023 case kIOCatalogAddDrivers:
4024 case kIOCatalogAddDriversNoMatch: {
4025 OSArray * array;
4026
4027 array = OSDynamicCast(OSArray, obj);
4028 if ( array ) {
4029 if ( !gIOCatalogue->addDrivers( array ,
4030 flag == kIOCatalogAddDrivers) ) {
4031 kr = kIOReturnError;
4032 }
4033 }
4034 else {
4035 kr = kIOReturnBadArgument;
4036 }
4037 }
4038 break;
4039
4040 case kIOCatalogRemoveDrivers:
4041 case kIOCatalogRemoveDriversNoMatch: {
4042 OSDictionary * dict;
4043
4044 dict = OSDynamicCast(OSDictionary, obj);
4045 if ( dict ) {
4046 if ( !gIOCatalogue->removeDrivers( dict,
4047 flag == kIOCatalogRemoveDrivers ) ) {
4048 kr = kIOReturnError;
4049 }
4050 }
4051 else {
4052 kr = kIOReturnBadArgument;
4053 }
4054 }
4055 break;
4056
4057 case kIOCatalogStartMatching: {
4058 OSDictionary * dict;
4059
4060 dict = OSDynamicCast(OSDictionary, obj);
4061 if ( dict ) {
4062 if ( !gIOCatalogue->startMatching( dict ) ) {
4063 kr = kIOReturnError;
4064 }
4065 }
4066 else {
4067 kr = kIOReturnBadArgument;
4068 }
4069 }
4070 break;
4071
4072 case kIOCatalogRemoveKernelLinker:
4073 kr = KERN_NOT_SUPPORTED;
4074 break;
4075
4076 case kIOCatalogKextdActive:
4077 #if !NO_KEXTD
4078 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4079 OSKext::setKextdActive();
4080
4081 /* Dump all nonloaded startup extensions; kextd will now send them
4082 * down on request.
4083 */
4084 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4085 #endif
4086 kr = kIOReturnSuccess;
4087 break;
4088
4089 case kIOCatalogKextdFinishedLaunching: {
4090 #if !NO_KEXTD
4091 static bool clearedBusy = false;
4092
4093 if (!clearedBusy) {
4094 IOService * serviceRoot = IOService::getServiceRoot();
4095 if (serviceRoot) {
4096 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4097 serviceRoot->adjustBusy(-1);
4098 clearedBusy = true;
4099 }
4100 }
4101 #endif
4102 kr = kIOReturnSuccess;
4103 }
4104 break;
4105
4106 default:
4107 kr = kIOReturnBadArgument;
4108 break;
4109 }
4110
4111 if (obj) obj->release();
4112
4113 *result = kr;
4114 return( KERN_SUCCESS);
4115 }
4116
4117 /* Routine io_catalog_terminate */
4118 kern_return_t is_io_catalog_terminate(
4119 mach_port_t master_port,
4120 uint32_t flag,
4121 io_name_t name )
4122 {
4123 kern_return_t kr;
4124
4125 if( master_port != master_device_port )
4126 return kIOReturnNotPrivileged;
4127
4128 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4129 kIOClientPrivilegeAdministrator );
4130 if( kIOReturnSuccess != kr)
4131 return( kr );
4132
4133 switch ( flag ) {
4134 #if !defined(SECURE_KERNEL)
4135 case kIOCatalogServiceTerminate:
4136 OSIterator * iter;
4137 IOService * service;
4138
4139 iter = IORegistryIterator::iterateOver(gIOServicePlane,
4140 kIORegistryIterateRecursively);
4141 if ( !iter )
4142 return kIOReturnNoMemory;
4143
4144 do {
4145 iter->reset();
4146 while( (service = (IOService *)iter->getNextObject()) ) {
4147 if( service->metaCast(name)) {
4148 if ( !service->terminate( kIOServiceRequired
4149 | kIOServiceSynchronous) ) {
4150 kr = kIOReturnUnsupported;
4151 break;
4152 }
4153 }
4154 }
4155 } while( !service && !iter->isValid());
4156 iter->release();
4157 break;
4158
4159 case kIOCatalogModuleUnload:
4160 case kIOCatalogModuleTerminate:
4161 kr = gIOCatalogue->terminateDriversForModule(name,
4162 flag == kIOCatalogModuleUnload);
4163 break;
4164 #endif
4165
4166 default:
4167 kr = kIOReturnBadArgument;
4168 break;
4169 }
4170
4171 return( kr );
4172 }
4173
4174 /* Routine io_catalog_get_data */
4175 kern_return_t is_io_catalog_get_data(
4176 mach_port_t master_port,
4177 uint32_t flag,
4178 io_buf_ptr_t *outData,
4179 mach_msg_type_number_t *outDataCount)
4180 {
4181 kern_return_t kr = kIOReturnSuccess;
4182 OSSerialize * s;
4183
4184 if( master_port != master_device_port)
4185 return kIOReturnNotPrivileged;
4186
4187 //printf("io_catalog_get_data called. flag: %d\n", flag);
4188
4189 s = OSSerialize::withCapacity(4096);
4190 if ( !s )
4191 return kIOReturnNoMemory;
4192
4193 s->clearText();
4194
4195 kr = gIOCatalogue->serializeData(flag, s);
4196
4197 if ( kr == kIOReturnSuccess ) {
4198 vm_offset_t data;
4199 vm_map_copy_t copy;
4200 vm_size_t size;
4201
4202 size = s->getLength();
4203 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
4204 if ( kr == kIOReturnSuccess ) {
4205 bcopy(s->text(), (void *)data, size);
4206 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
4207 (vm_map_size_t)size, true, &copy);
4208 *outData = (char *)copy;
4209 *outDataCount = size;
4210 }
4211 }
4212
4213 s->release();
4214
4215 return kr;
4216 }
4217
4218 /* Routine io_catalog_get_gen_count */
4219 kern_return_t is_io_catalog_get_gen_count(
4220 mach_port_t master_port,
4221 uint32_t *genCount)
4222 {
4223 if( master_port != master_device_port)
4224 return kIOReturnNotPrivileged;
4225
4226 //printf("io_catalog_get_gen_count called.\n");
4227
4228 if ( !genCount )
4229 return kIOReturnBadArgument;
4230
4231 *genCount = gIOCatalogue->getGenerationCount();
4232
4233 return kIOReturnSuccess;
4234 }
4235
4236 /* Routine io_catalog_module_loaded.
4237 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
4238 */
4239 kern_return_t is_io_catalog_module_loaded(
4240 mach_port_t master_port,
4241 io_name_t name)
4242 {
4243 if( master_port != master_device_port)
4244 return kIOReturnNotPrivileged;
4245
4246 //printf("io_catalog_module_loaded called. name %s\n", name);
4247
4248 if ( !name )
4249 return kIOReturnBadArgument;
4250
4251 gIOCatalogue->moduleHasLoaded(name);
4252
4253 return kIOReturnSuccess;
4254 }
4255
4256 kern_return_t is_io_catalog_reset(
4257 mach_port_t master_port,
4258 uint32_t flag)
4259 {
4260 if( master_port != master_device_port)
4261 return kIOReturnNotPrivileged;
4262
4263 switch ( flag ) {
4264 case kIOCatalogResetDefault:
4265 gIOCatalogue->reset();
4266 break;
4267
4268 default:
4269 return kIOReturnBadArgument;
4270 }
4271
4272 return kIOReturnSuccess;
4273 }
4274
4275 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
4276 {
4277 kern_return_t result = kIOReturnBadArgument;
4278 IOUserClient *userClient;
4279
4280 if ((userClient = OSDynamicCast(IOUserClient,
4281 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
4282 IOExternalTrap *trap;
4283 IOService *target = NULL;
4284
4285 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
4286
4287 if (trap && target) {
4288 IOTrap func;
4289
4290 func = trap->func;
4291
4292 if (func) {
4293 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
4294 }
4295 }
4296
4297 userClient->release();
4298 }
4299
4300 return result;
4301 }
4302
4303 } /* extern "C" */
4304
4305 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
4306 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
4307 {
4308 IOReturn err;
4309 IOService * object;
4310 IOByteCount structureOutputSize;
4311
4312 if (dispatch)
4313 {
4314 uint32_t count;
4315 count = dispatch->checkScalarInputCount;
4316 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
4317 {
4318 return (kIOReturnBadArgument);
4319 }
4320
4321 count = dispatch->checkStructureInputSize;
4322 if ((kIOUCVariableStructureSize != count)
4323 && (count != ((args->structureInputDescriptor)
4324 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
4325 {
4326 return (kIOReturnBadArgument);
4327 }
4328
4329 count = dispatch->checkScalarOutputCount;
4330 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
4331 {
4332 return (kIOReturnBadArgument);
4333 }
4334
4335 count = dispatch->checkStructureOutputSize;
4336 if ((kIOUCVariableStructureSize != count)
4337 && (count != ((args->structureOutputDescriptor)
4338 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
4339 {
4340 return (kIOReturnBadArgument);
4341 }
4342
4343 if (dispatch->function)
4344 err = (*dispatch->function)(target, reference, args);
4345 else
4346 err = kIOReturnNoCompletion; /* implementator can dispatch */
4347
4348 return (err);
4349 }
4350
4351
4352 // pre-Leopard API's don't do ool structs
4353 if (args->structureInputDescriptor || args->structureOutputDescriptor)
4354 {
4355 err = kIOReturnIPCError;
4356 return (err);
4357 }
4358
4359 structureOutputSize = args->structureOutputSize;
4360
4361 if (args->asyncWakePort)
4362 {
4363 IOExternalAsyncMethod * method;
4364
4365 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) )
4366 return (kIOReturnUnsupported);
4367
4368 if (kIOUCForegroundOnly & method->flags)
4369 {
4370 /* is graphics access denied for current task? */
4371 if (proc_get_effective_task_policy(current_task(), TASK_POLICY_GPU_DENY) != 0)
4372 return (kIOReturnNotPermitted);
4373 }
4374
4375 switch (method->flags & kIOUCTypeMask)
4376 {
4377 case kIOUCScalarIStructI:
4378 err = shim_io_async_method_scalarI_structureI( method, object,
4379 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4380 args->scalarInput, args->scalarInputCount,
4381 (char *)args->structureInput, args->structureInputSize );
4382 break;
4383
4384 case kIOUCScalarIScalarO:
4385 err = shim_io_async_method_scalarI_scalarO( method, object,
4386 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4387 args->scalarInput, args->scalarInputCount,
4388 args->scalarOutput, &args->scalarOutputCount );
4389 break;
4390
4391 case kIOUCScalarIStructO:
4392 err = shim_io_async_method_scalarI_structureO( method, object,
4393 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4394 args->scalarInput, args->scalarInputCount,
4395 (char *) args->structureOutput, &args->structureOutputSize );
4396 break;
4397
4398
4399 case kIOUCStructIStructO:
4400 err = shim_io_async_method_structureI_structureO( method, object,
4401 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
4402 (char *)args->structureInput, args->structureInputSize,
4403 (char *) args->structureOutput, &args->structureOutputSize );
4404 break;
4405
4406 default:
4407 err = kIOReturnBadArgument;
4408 break;
4409 }
4410 }
4411 else
4412 {
4413 IOExternalMethod * method;
4414
4415 if( !(method = getTargetAndMethodForIndex(&object, selector)) )
4416 return (kIOReturnUnsupported);
4417
4418 if (kIOUCForegroundOnly & method->flags)
4419 {
4420 /* is graphics access denied for current task? */
4421 if (proc_get_effective_task_policy(current_task(), TASK_POLICY_GPU_DENY) != 0)
4422 return (kIOReturnNotPermitted);
4423
4424 }
4425
4426 switch (method->flags & kIOUCTypeMask)
4427 {
4428 case kIOUCScalarIStructI:
4429 err = shim_io_connect_method_scalarI_structureI( method, object,
4430 args->scalarInput, args->scalarInputCount,
4431 (char *) args->structureInput, args->structureInputSize );
4432 break;
4433
4434 case kIOUCScalarIScalarO:
4435 err = shim_io_connect_method_scalarI_scalarO( method, object,
4436 args->scalarInput, args->scalarInputCount,
4437 args->scalarOutput, &args->scalarOutputCount );
4438 break;
4439
4440 case kIOUCScalarIStructO:
4441 err = shim_io_connect_method_scalarI_structureO( method, object,
4442 args->scalarInput, args->scalarInputCount,
4443 (char *) args->structureOutput, &structureOutputSize );
4444 break;
4445
4446
4447 case kIOUCStructIStructO:
4448 err = shim_io_connect_method_structureI_structureO( method, object,
4449 (char *) args->structureInput, args->structureInputSize,
4450 (char *) args->structureOutput, &structureOutputSize );
4451 break;
4452
4453 default:
4454 err = kIOReturnBadArgument;
4455 break;
4456 }
4457 }
4458
4459 args->structureOutputSize = structureOutputSize;
4460
4461 return (err);
4462 }
4463
4464
4465 #if __LP64__
4466 OSMetaClassDefineReservedUnused(IOUserClient, 0);
4467 OSMetaClassDefineReservedUnused(IOUserClient, 1);
4468 #else
4469 OSMetaClassDefineReservedUsed(IOUserClient, 0);
4470 OSMetaClassDefineReservedUsed(IOUserClient, 1);
4471 #endif
4472 OSMetaClassDefineReservedUnused(IOUserClient, 2);
4473 OSMetaClassDefineReservedUnused(IOUserClient, 3);
4474 OSMetaClassDefineReservedUnused(IOUserClient, 4);
4475 OSMetaClassDefineReservedUnused(IOUserClient, 5);
4476 OSMetaClassDefineReservedUnused(IOUserClient, 6);
4477 OSMetaClassDefineReservedUnused(IOUserClient, 7);
4478 OSMetaClassDefineReservedUnused(IOUserClient, 8);
4479 OSMetaClassDefineReservedUnused(IOUserClient, 9);
4480 OSMetaClassDefineReservedUnused(IOUserClient, 10);
4481 OSMetaClassDefineReservedUnused(IOUserClient, 11);
4482 OSMetaClassDefineReservedUnused(IOUserClient, 12);
4483 OSMetaClassDefineReservedUnused(IOUserClient, 13);
4484 OSMetaClassDefineReservedUnused(IOUserClient, 14);
4485 OSMetaClassDefineReservedUnused(IOUserClient, 15);
4486