]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
5cfa033774d2b0a1d01b1cb51bca21148da429b9
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #if CONFIG_MACF
49
50 extern "C" {
51 #include <security/mac_framework.h>
52 };
53 #include <sys/kauth.h>
54
55 #define IOMACF_LOG 0
56
57 #endif /* CONFIG_MACF */
58
59 #include <IOKit/assert.h>
60
61 #include "IOServicePrivate.h"
62 #include "IOKitKernelInternal.h"
63
64 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
65 #define SCALAR32(x) ((uint32_t )x)
66 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
67 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
68 #define REF32(x) ((int)(x))
69
70 enum
71 {
72 kIOUCAsync0Flags = 3ULL,
73 kIOUCAsync64Flag = 1ULL
74 };
75
76 #if IOKITSTATS
77
78 #define IOStatisticsRegisterCounter() \
79 do { \
80 reserved->counter = IOStatistics::registerUserClient(this); \
81 } while (0)
82
83 #define IOStatisticsUnregisterCounter() \
84 do { \
85 if (reserved) \
86 IOStatistics::unregisterUserClient(reserved->counter); \
87 } while (0)
88
89 #define IOStatisticsClientCall() \
90 do { \
91 IOStatistics::countUserClientCall(client); \
92 } while (0)
93
94 #else
95
96 #define IOStatisticsRegisterCounter()
97 #define IOStatisticsUnregisterCounter()
98 #define IOStatisticsClientCall()
99
100 #endif /* IOKITSTATS */
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 // definitions we should get from osfmk
105
106 //typedef struct ipc_port * ipc_port_t;
107 typedef natural_t ipc_kobject_type_t;
108
109 #define IKOT_IOKIT_SPARE 27
110 #define IKOT_IOKIT_CONNECT 29
111 #define IKOT_IOKIT_OBJECT 30
112
113 extern "C" {
114
115 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
116 ipc_kobject_type_t type );
117
118 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
119
120 extern mach_port_name_t iokit_make_send_right( task_t task,
121 io_object_t obj, ipc_kobject_type_t type );
122
123 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
124
125 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
126
127 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
128
129 extern ipc_port_t master_device_port;
130
131 extern void iokit_retain_port( ipc_port_t port );
132 extern void iokit_release_port( ipc_port_t port );
133 extern void iokit_release_port_send( ipc_port_t port );
134
135 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
136
137 #include <mach/mach_traps.h>
138 #include <vm/vm_map.h>
139
140 } /* extern "C" */
141
142
143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
144
145 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
146
147 class IOMachPort : public OSObject
148 {
149 OSDeclareDefaultStructors(IOMachPort)
150 public:
151 OSObject * object;
152 ipc_port_t port;
153 UInt32 mscount;
154 UInt8 holdDestroy;
155
156 static IOMachPort * portForObject( OSObject * obj,
157 ipc_kobject_type_t type );
158 static bool noMoreSendersForObject( OSObject * obj,
159 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
160 static void releasePortForObject( OSObject * obj,
161 ipc_kobject_type_t type );
162 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
163
164 static OSDictionary * dictForType( ipc_kobject_type_t type );
165
166 static mach_port_name_t makeSendRightForTask( task_t task,
167 io_object_t obj, ipc_kobject_type_t type );
168
169 virtual void free() APPLE_KEXT_OVERRIDE;
170 };
171
172 #define super OSObject
173 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
174
175 static IOLock * gIOObjectPortLock;
176
177 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
178
179 // not in dictForType() for debugging ease
180 static OSDictionary * gIOObjectPorts;
181 static OSDictionary * gIOConnectPorts;
182
183 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
184 {
185 OSDictionary ** dict;
186
187 if( IKOT_IOKIT_OBJECT == type )
188 dict = &gIOObjectPorts;
189 else if( IKOT_IOKIT_CONNECT == type )
190 dict = &gIOConnectPorts;
191 else
192 return( 0 );
193
194 if( 0 == *dict)
195 *dict = OSDictionary::withCapacity( 1 );
196
197 return( *dict );
198 }
199
200 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
201 ipc_kobject_type_t type )
202 {
203 IOMachPort * inst = 0;
204 OSDictionary * dict;
205
206 IOTakeLock( gIOObjectPortLock);
207
208 do {
209
210 dict = dictForType( type );
211 if( !dict)
212 continue;
213
214 if( (inst = (IOMachPort *)
215 dict->getObject( (const OSSymbol *) obj ))) {
216 inst->mscount++;
217 inst->retain();
218 continue;
219 }
220
221 inst = new IOMachPort;
222 if( inst && !inst->init()) {
223 inst = 0;
224 continue;
225 }
226
227 inst->port = iokit_alloc_object_port( obj, type );
228 if( inst->port) {
229 // retains obj
230 dict->setObject( (const OSSymbol *) obj, inst );
231 inst->mscount++;
232
233 } else {
234 inst->release();
235 inst = 0;
236 }
237
238 } while( false );
239
240 IOUnlock( gIOObjectPortLock);
241
242 return( inst );
243 }
244
245 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
246 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
247 {
248 OSDictionary * dict;
249 IOMachPort * machPort;
250 IOUserClient * uc;
251 bool destroyed = true;
252
253 IOTakeLock( gIOObjectPortLock);
254
255 if( (dict = dictForType( type ))) {
256 obj->retain();
257
258 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
259 if( machPort) {
260 destroyed = (machPort->mscount <= *mscount);
261 if (!destroyed) *mscount = machPort->mscount;
262 else
263 {
264 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
265 {
266 uc->noMoreSenders();
267 }
268 dict->removeObject( (const OSSymbol *) obj );
269 }
270 }
271 obj->release();
272 }
273
274 IOUnlock( gIOObjectPortLock);
275
276 return( destroyed );
277 }
278
279 void IOMachPort::releasePortForObject( OSObject * obj,
280 ipc_kobject_type_t type )
281 {
282 OSDictionary * dict;
283 IOMachPort * machPort;
284
285 assert(IKOT_IOKIT_CONNECT != type);
286
287 IOTakeLock( gIOObjectPortLock);
288
289 if( (dict = dictForType( type ))) {
290 obj->retain();
291 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
292 if( machPort && !machPort->holdDestroy)
293 dict->removeObject( (const OSSymbol *) obj );
294 obj->release();
295 }
296
297 IOUnlock( gIOObjectPortLock);
298 }
299
300 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
301 {
302 OSDictionary * dict;
303 IOMachPort * machPort;
304
305 IOLockLock( gIOObjectPortLock );
306
307 if( (dict = dictForType( type ))) {
308 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
309 if( machPort)
310 machPort->holdDestroy = true;
311 }
312
313 IOLockUnlock( gIOObjectPortLock );
314 }
315
316 void IOUserClient::destroyUserReferences( OSObject * obj )
317 {
318 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
319
320 // panther, 3160200
321 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
322
323 OSDictionary * dict;
324
325 IOTakeLock( gIOObjectPortLock);
326 obj->retain();
327
328 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
329 {
330 IOMachPort * port;
331 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
332 if (port)
333 {
334 IOUserClient * uc;
335 if ((uc = OSDynamicCast(IOUserClient, obj)))
336 {
337 uc->noMoreSenders();
338 if (uc->mappings)
339 {
340 dict->setObject((const OSSymbol *) uc->mappings, port);
341 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
342
343 uc->mappings->release();
344 uc->mappings = 0;
345 }
346 }
347 dict->removeObject( (const OSSymbol *) obj );
348 }
349 }
350 obj->release();
351 IOUnlock( gIOObjectPortLock);
352 }
353
354 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
355 io_object_t obj, ipc_kobject_type_t type )
356 {
357 return( iokit_make_send_right( task, obj, type ));
358 }
359
360 void IOMachPort::free( void )
361 {
362 if( port)
363 iokit_destroy_object_port( port );
364 super::free();
365 }
366
367 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
368
369 class IOUserIterator : public OSIterator
370 {
371 OSDeclareDefaultStructors(IOUserIterator)
372 public:
373 OSObject * userIteratorObject;
374 IOLock * lock;
375
376 static IOUserIterator * withIterator(OSIterator * iter);
377 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
378 virtual void free() APPLE_KEXT_OVERRIDE;
379
380 virtual void reset() APPLE_KEXT_OVERRIDE;
381 virtual bool isValid() APPLE_KEXT_OVERRIDE;
382 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
383 };
384
385 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
386
387 class IOUserNotification : public IOUserIterator
388 {
389 OSDeclareDefaultStructors(IOUserNotification)
390
391 #define holdNotify userIteratorObject
392
393 public:
394
395 virtual void free() APPLE_KEXT_OVERRIDE;
396
397 virtual void setNotification( IONotifier * obj );
398
399 virtual void reset() APPLE_KEXT_OVERRIDE;
400 virtual bool isValid() APPLE_KEXT_OVERRIDE;
401 };
402
403 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
404
405 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
406
407 IOUserIterator *
408 IOUserIterator::withIterator(OSIterator * iter)
409 {
410 IOUserIterator * me;
411
412 if (!iter) return (0);
413
414 me = new IOUserIterator;
415 if (me && !me->init())
416 {
417 me->release();
418 me = 0;
419 }
420 if (!me) return me;
421 me->userIteratorObject = iter;
422
423 return (me);
424 }
425
426 bool
427 IOUserIterator::init( void )
428 {
429 if (!OSObject::init()) return (false);
430
431 lock = IOLockAlloc();
432 if( !lock)
433 return( false );
434
435 return (true);
436 }
437
438 void
439 IOUserIterator::free()
440 {
441 if (userIteratorObject) userIteratorObject->release();
442 if (lock) IOLockFree(lock);
443 OSObject::free();
444 }
445
446 void
447 IOUserIterator::reset()
448 {
449 IOLockLock(lock);
450 assert(OSDynamicCast(OSIterator, userIteratorObject));
451 ((OSIterator *)userIteratorObject)->reset();
452 IOLockUnlock(lock);
453 }
454
455 bool
456 IOUserIterator::isValid()
457 {
458 bool ret;
459
460 IOLockLock(lock);
461 assert(OSDynamicCast(OSIterator, userIteratorObject));
462 ret = ((OSIterator *)userIteratorObject)->isValid();
463 IOLockUnlock(lock);
464
465 return (ret);
466 }
467
468 OSObject *
469 IOUserIterator::getNextObject()
470 {
471 OSObject * ret;
472
473 IOLockLock(lock);
474 assert(OSDynamicCast(OSIterator, userIteratorObject));
475 ret = ((OSIterator *)userIteratorObject)->getNextObject();
476 IOLockUnlock(lock);
477
478 return (ret);
479 }
480
481 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
482 extern "C" {
483
484 // functions called from osfmk/device/iokit_rpc.c
485
486 void
487 iokit_add_reference( io_object_t obj )
488 {
489 if( obj)
490 obj->retain();
491 }
492
493 void
494 iokit_remove_reference( io_object_t obj )
495 {
496 if( obj)
497 obj->release();
498 }
499
500 void
501 iokit_add_connect_reference( io_object_t obj )
502 {
503 IOUserClient * uc;
504
505 if (!obj) return;
506
507 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
508
509 obj->retain();
510 }
511
512 void
513 iokit_remove_connect_reference( io_object_t obj )
514 {
515 IOUserClient * uc;
516 bool finalize = false;
517
518 if (!obj) return;
519
520 if ((uc = OSDynamicCast(IOUserClient, obj)))
521 {
522 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
523 {
524 IOLockLock(gIOObjectPortLock);
525 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
526 IOLockUnlock(gIOObjectPortLock);
527 }
528 if (finalize) uc->scheduleFinalize(true);
529 }
530
531 obj->release();
532 }
533
534 bool
535 IOUserClient::finalizeUserReferences(OSObject * obj)
536 {
537 IOUserClient * uc;
538 bool ok = true;
539
540 if ((uc = OSDynamicCast(IOUserClient, obj)))
541 {
542 IOLockLock(gIOObjectPortLock);
543 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
544 IOLockUnlock(gIOObjectPortLock);
545 }
546 return (ok);
547 }
548
549 ipc_port_t
550 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
551 {
552 IOMachPort * machPort;
553 ipc_port_t port;
554
555 if( (machPort = IOMachPort::portForObject( obj, type ))) {
556
557 port = machPort->port;
558 if( port)
559 iokit_retain_port( port );
560
561 machPort->release();
562
563 } else
564 port = NULL;
565
566 return( port );
567 }
568
569 kern_return_t
570 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
571 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
572 {
573 IOUserClient * client;
574 IOMemoryMap * map;
575 IOUserNotification * notify;
576
577 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
578 return( kIOReturnNotReady );
579
580 if( IKOT_IOKIT_CONNECT == type)
581 {
582 if( (client = OSDynamicCast( IOUserClient, obj )))
583 {
584 IOStatisticsClientCall();
585 client->clientDied();
586 }
587 }
588 else if( IKOT_IOKIT_OBJECT == type)
589 {
590 if( (map = OSDynamicCast( IOMemoryMap, obj )))
591 map->taskDied();
592 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
593 notify->setNotification( 0 );
594 }
595
596 return( kIOReturnSuccess );
597 }
598
599 }; /* extern "C" */
600
601 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
602
603 class IOServiceUserNotification : public IOUserNotification
604 {
605 OSDeclareDefaultStructors(IOServiceUserNotification)
606
607 struct PingMsg {
608 mach_msg_header_t msgHdr;
609 OSNotificationHeader64 notifyHeader;
610 };
611
612 enum { kMaxOutstanding = 1024 };
613
614 PingMsg * pingMsg;
615 vm_size_t msgSize;
616 OSArray * newSet;
617 OSObject * lastEntry;
618 bool armed;
619
620 public:
621
622 virtual bool init( mach_port_t port, natural_t type,
623 void * reference, vm_size_t referenceSize,
624 bool clientIs64 );
625 virtual void free() APPLE_KEXT_OVERRIDE;
626
627 static bool _handler( void * target,
628 void * ref, IOService * newService, IONotifier * notifier );
629 virtual bool handler( void * ref, IOService * newService );
630
631 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
632 };
633
634 class IOServiceMessageUserNotification : public IOUserNotification
635 {
636 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
637
638 struct PingMsg {
639 mach_msg_header_t msgHdr;
640 mach_msg_body_t msgBody;
641 mach_msg_port_descriptor_t ports[1];
642 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
643 };
644
645 PingMsg * pingMsg;
646 vm_size_t msgSize;
647 uint8_t clientIs64;
648 int owningPID;
649
650 public:
651
652 virtual bool init( mach_port_t port, natural_t type,
653 void * reference, vm_size_t referenceSize,
654 vm_size_t extraSize,
655 bool clientIs64 );
656
657 virtual void free() APPLE_KEXT_OVERRIDE;
658
659 static IOReturn _handler( void * target, void * ref,
660 UInt32 messageType, IOService * provider,
661 void * messageArgument, vm_size_t argSize );
662 virtual IOReturn handler( void * ref,
663 UInt32 messageType, IOService * provider,
664 void * messageArgument, vm_size_t argSize );
665
666 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
667 };
668
669 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
670
671 #undef super
672 #define super IOUserIterator
673 OSDefineMetaClass( IOUserNotification, IOUserIterator )
674 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
675
676 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
677
678 void IOUserNotification::free( void )
679 {
680 if (holdNotify)
681 {
682 assert(OSDynamicCast(IONotifier, holdNotify));
683 ((IONotifier *)holdNotify)->remove();
684 holdNotify = 0;
685 }
686 // can't be in handler now
687
688 super::free();
689 }
690
691
692 void IOUserNotification::setNotification( IONotifier * notify )
693 {
694 OSObject * previousNotify;
695
696 IOLockLock( gIOObjectPortLock);
697
698 previousNotify = holdNotify;
699 holdNotify = notify;
700
701 IOLockUnlock( gIOObjectPortLock);
702
703 if( previousNotify)
704 {
705 assert(OSDynamicCast(IONotifier, previousNotify));
706 ((IONotifier *)previousNotify)->remove();
707 }
708 }
709
710 void IOUserNotification::reset()
711 {
712 // ?
713 }
714
715 bool IOUserNotification::isValid()
716 {
717 return( true );
718 }
719
720 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
721
722 #undef super
723 #define super IOUserNotification
724 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
725
726 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
727
728 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
729 void * reference, vm_size_t referenceSize,
730 bool clientIs64 )
731 {
732 if( !super::init())
733 return( false );
734
735 newSet = OSArray::withCapacity( 1 );
736 if( !newSet)
737 return( false );
738
739 if (referenceSize > sizeof(OSAsyncReference64))
740 return( false );
741
742 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
743 pingMsg = (PingMsg *) IOMalloc( msgSize);
744 if( !pingMsg)
745 return( false );
746
747 bzero( pingMsg, msgSize);
748
749 pingMsg->msgHdr.msgh_remote_port = port;
750 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
751 MACH_MSG_TYPE_COPY_SEND /*remote*/,
752 MACH_MSG_TYPE_MAKE_SEND /*local*/);
753 pingMsg->msgHdr.msgh_size = msgSize;
754 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
755
756 pingMsg->notifyHeader.size = 0;
757 pingMsg->notifyHeader.type = type;
758 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
759
760 return( true );
761 }
762
763 void IOServiceUserNotification::free( void )
764 {
765 PingMsg * _pingMsg;
766 vm_size_t _msgSize;
767 OSArray * _newSet;
768 OSObject * _lastEntry;
769
770 _pingMsg = pingMsg;
771 _msgSize = msgSize;
772 _lastEntry = lastEntry;
773 _newSet = newSet;
774
775 super::free();
776
777 if( _pingMsg && _msgSize) {
778 if (_pingMsg->msgHdr.msgh_remote_port) {
779 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
780 }
781 IOFree(_pingMsg, _msgSize);
782 }
783
784 if( _lastEntry)
785 _lastEntry->release();
786
787 if( _newSet)
788 _newSet->release();
789 }
790
791 bool IOServiceUserNotification::_handler( void * target,
792 void * ref, IOService * newService, IONotifier * notifier )
793 {
794 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
795 }
796
797 bool IOServiceUserNotification::handler( void * ref,
798 IOService * newService )
799 {
800 unsigned int count;
801 kern_return_t kr;
802 ipc_port_t port = NULL;
803 bool sendPing = false;
804
805 IOTakeLock( lock );
806
807 count = newSet->getCount();
808 if( count < kMaxOutstanding) {
809
810 newSet->setObject( newService );
811 if( (sendPing = (armed && (0 == count))))
812 armed = false;
813 }
814
815 IOUnlock( lock );
816
817 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
818 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
819
820 if( sendPing) {
821 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
822 pingMsg->msgHdr.msgh_local_port = port;
823 else
824 pingMsg->msgHdr.msgh_local_port = NULL;
825
826 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
827 pingMsg->msgHdr.msgh_size,
828 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
829 0);
830 if( port)
831 iokit_release_port( port );
832
833 if( KERN_SUCCESS != kr)
834 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
835 }
836
837 return( true );
838 }
839
840 OSObject * IOServiceUserNotification::getNextObject()
841 {
842 unsigned int count;
843 OSObject * result;
844
845 IOTakeLock( lock );
846
847 if( lastEntry)
848 lastEntry->release();
849
850 count = newSet->getCount();
851 if( count ) {
852 result = newSet->getObject( count - 1 );
853 result->retain();
854 newSet->removeObject( count - 1);
855 } else {
856 result = 0;
857 armed = true;
858 }
859 lastEntry = result;
860
861 IOUnlock( lock );
862
863 return( result );
864 }
865
866 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
867
868 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
869
870 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
871
872 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
873 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
874 bool client64 )
875 {
876 if( !super::init())
877 return( false );
878
879 if (referenceSize > sizeof(OSAsyncReference64))
880 return( false );
881
882 clientIs64 = client64;
883
884 owningPID = proc_selfpid();
885
886 extraSize += sizeof(IOServiceInterestContent64);
887 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize;
888 pingMsg = (PingMsg *) IOMalloc( msgSize);
889 if( !pingMsg)
890 return( false );
891
892 bzero( pingMsg, msgSize);
893
894 pingMsg->msgHdr.msgh_remote_port = port;
895 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
896 | MACH_MSGH_BITS(
897 MACH_MSG_TYPE_COPY_SEND /*remote*/,
898 MACH_MSG_TYPE_MAKE_SEND /*local*/);
899 pingMsg->msgHdr.msgh_size = msgSize;
900 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
901
902 pingMsg->msgBody.msgh_descriptor_count = 1;
903
904 pingMsg->ports[0].name = 0;
905 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
906 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
907
908 pingMsg->notifyHeader.size = extraSize;
909 pingMsg->notifyHeader.type = type;
910 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
911
912 return( true );
913 }
914
915 void IOServiceMessageUserNotification::free( void )
916 {
917 PingMsg * _pingMsg;
918 vm_size_t _msgSize;
919
920 _pingMsg = pingMsg;
921 _msgSize = msgSize;
922
923 super::free();
924
925 if( _pingMsg && _msgSize) {
926 if (_pingMsg->msgHdr.msgh_remote_port) {
927 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
928 }
929 IOFree( _pingMsg, _msgSize);
930 }
931 }
932
933 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
934 UInt32 messageType, IOService * provider,
935 void * argument, vm_size_t argSize )
936 {
937 return( ((IOServiceMessageUserNotification *) target)->handler(
938 ref, messageType, provider, argument, argSize));
939 }
940
941 IOReturn IOServiceMessageUserNotification::handler( void * ref,
942 UInt32 messageType, IOService * provider,
943 void * messageArgument, vm_size_t argSize )
944 {
945 kern_return_t kr;
946 ipc_port_t thisPort, providerPort;
947 IOServiceInterestContent64 * data = (IOServiceInterestContent64 *)
948 ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size);
949 // == pingMsg->notifyHeader.content;
950
951 if (kIOMessageCopyClientID == messageType)
952 {
953 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
954 return (kIOReturnSuccess);
955 }
956
957 data->messageType = messageType;
958
959 if( argSize == 0)
960 {
961 data->messageArgument[0] = (io_user_reference_t) messageArgument;
962 if (clientIs64)
963 argSize = sizeof(data->messageArgument[0]);
964 else
965 {
966 data->messageArgument[0] |= (data->messageArgument[0] << 32);
967 argSize = sizeof(uint32_t);
968 }
969 }
970 else
971 {
972 if( argSize > kIOUserNotifyMaxMessageSize)
973 argSize = kIOUserNotifyMaxMessageSize;
974 bcopy( messageArgument, data->messageArgument, argSize );
975 }
976
977 // adjust message size for ipc restrictions
978 natural_t type;
979 type = pingMsg->notifyHeader.type;
980 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
981 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
982 pingMsg->notifyHeader.type = type;
983 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
984
985 pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size
986 + sizeof( IOServiceInterestContent64 )
987 - sizeof( data->messageArgument)
988 + argSize;
989
990 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
991 pingMsg->ports[0].name = providerPort;
992 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
993 pingMsg->msgHdr.msgh_local_port = thisPort;
994 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
995 pingMsg->msgHdr.msgh_size,
996 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
997 0);
998 if( thisPort)
999 iokit_release_port( thisPort );
1000 if( providerPort)
1001 iokit_release_port( providerPort );
1002
1003 if( KERN_SUCCESS != kr)
1004 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1005
1006 return( kIOReturnSuccess );
1007 }
1008
1009 OSObject * IOServiceMessageUserNotification::getNextObject()
1010 {
1011 return( 0 );
1012 }
1013
1014 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1015
1016 #undef super
1017 #define super IOService
1018 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1019
1020 IOLock * gIOUserClientOwnersLock;
1021
1022 void IOUserClient::initialize( void )
1023 {
1024 gIOObjectPortLock = IOLockAlloc();
1025 gIOUserClientOwnersLock = IOLockAlloc();
1026 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1027 }
1028
1029 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1030 mach_port_t wakePort,
1031 void *callback, void *refcon)
1032 {
1033 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1034 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1035 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1036 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1037 }
1038
1039 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1040 mach_port_t wakePort,
1041 mach_vm_address_t callback, io_user_reference_t refcon)
1042 {
1043 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1044 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1045 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1046 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1047 }
1048
1049 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1050 mach_port_t wakePort,
1051 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1052 {
1053 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1054 if (vm_map_is_64bit(get_task_map(task))) {
1055 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1056 }
1057 }
1058
1059 static OSDictionary * CopyConsoleUser(UInt32 uid)
1060 {
1061 OSArray * array;
1062 OSDictionary * user = 0;
1063
1064 if ((array = OSDynamicCast(OSArray,
1065 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1066 {
1067 for (unsigned int idx = 0;
1068 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1069 idx++) {
1070 OSNumber * num;
1071
1072 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1073 && (uid == num->unsigned32BitValue())) {
1074 user->retain();
1075 break;
1076 }
1077 }
1078 array->release();
1079 }
1080 return user;
1081 }
1082
1083 static OSDictionary * CopyUserOnConsole(void)
1084 {
1085 OSArray * array;
1086 OSDictionary * user = 0;
1087
1088 if ((array = OSDynamicCast(OSArray,
1089 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1090 {
1091 for (unsigned int idx = 0;
1092 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1093 idx++)
1094 {
1095 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1096 {
1097 user->retain();
1098 break;
1099 }
1100 }
1101 array->release();
1102 }
1103 return (user);
1104 }
1105
1106 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1107 IOService * service )
1108 {
1109 proc_t p;
1110
1111 p = (proc_t) get_bsdtask_info(task);
1112 if (p)
1113 {
1114 uint64_t authorizationID;
1115
1116 authorizationID = proc_uniqueid(p);
1117 if (authorizationID)
1118 {
1119 if (service->getAuthorizationID() == authorizationID)
1120 {
1121 return (kIOReturnSuccess);
1122 }
1123 }
1124 }
1125
1126 return (kIOReturnNotPermitted);
1127 }
1128
1129 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1130 const char * privilegeName )
1131 {
1132 kern_return_t kr;
1133 security_token_t token;
1134 mach_msg_type_number_t count;
1135 task_t task;
1136 OSDictionary * user;
1137 bool secureConsole;
1138
1139
1140 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1141 sizeof(kIOClientPrivilegeForeground)))
1142 {
1143 if (task_is_gpu_denied(current_task()))
1144 return (kIOReturnNotPrivileged);
1145 else
1146 return (kIOReturnSuccess);
1147 }
1148
1149 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1150 sizeof(kIOClientPrivilegeConsoleSession)))
1151 {
1152 kauth_cred_t cred;
1153 proc_t p;
1154
1155 task = (task_t) securityToken;
1156 if (!task)
1157 task = current_task();
1158 p = (proc_t) get_bsdtask_info(task);
1159 kr = kIOReturnNotPrivileged;
1160
1161 if (p && (cred = kauth_cred_proc_ref(p)))
1162 {
1163 user = CopyUserOnConsole();
1164 if (user)
1165 {
1166 OSNumber * num;
1167 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1168 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1169 {
1170 kr = kIOReturnSuccess;
1171 }
1172 user->release();
1173 }
1174 kauth_cred_unref(&cred);
1175 }
1176 return (kr);
1177 }
1178
1179 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1180 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1181 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1182 else
1183 task = (task_t)securityToken;
1184
1185 count = TASK_SECURITY_TOKEN_COUNT;
1186 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1187
1188 if (KERN_SUCCESS != kr)
1189 {}
1190 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1191 sizeof(kIOClientPrivilegeAdministrator))) {
1192 if (0 != token.val[0])
1193 kr = kIOReturnNotPrivileged;
1194 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1195 sizeof(kIOClientPrivilegeLocalUser))) {
1196 user = CopyConsoleUser(token.val[0]);
1197 if ( user )
1198 user->release();
1199 else
1200 kr = kIOReturnNotPrivileged;
1201 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1202 sizeof(kIOClientPrivilegeConsoleUser))) {
1203 user = CopyConsoleUser(token.val[0]);
1204 if ( user ) {
1205 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1206 kr = kIOReturnNotPrivileged;
1207 else if ( secureConsole ) {
1208 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1209 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1210 kr = kIOReturnNotPrivileged;
1211 }
1212 user->release();
1213 }
1214 else
1215 kr = kIOReturnNotPrivileged;
1216 } else
1217 kr = kIOReturnUnsupported;
1218
1219 return (kr);
1220 }
1221
1222 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1223 const char * entitlement )
1224 {
1225 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1226
1227 proc_t p = NULL;
1228 pid_t pid = 0;
1229 char procname[MAXCOMLEN + 1] = "";
1230 size_t len = 0;
1231 void *entitlements_blob = NULL;
1232 char *entitlements_data = NULL;
1233 OSObject *entitlements_obj = NULL;
1234 OSDictionary *entitlements = NULL;
1235 OSString *errorString = NULL;
1236 OSObject *value = NULL;
1237
1238 p = (proc_t)get_bsdtask_info(task);
1239 if (p == NULL)
1240 goto fail;
1241 pid = proc_pid(p);
1242 proc_name(pid, procname, (int)sizeof(procname));
1243
1244 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1245 goto fail;
1246
1247 if (len <= offsetof(CS_GenericBlob, data))
1248 goto fail;
1249
1250 /*
1251 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1252 * we'll try to parse in the kernel.
1253 */
1254 len -= offsetof(CS_GenericBlob, data);
1255 if (len > MAX_ENTITLEMENTS_LEN) {
1256 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1257 goto fail;
1258 }
1259
1260 /*
1261 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1262 * what is stored in the entitlements blob. Copy the string and
1263 * terminate it.
1264 */
1265 entitlements_data = (char *)IOMalloc(len + 1);
1266 if (entitlements_data == NULL)
1267 goto fail;
1268 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1269 entitlements_data[len] = '\0';
1270
1271 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1272 if (errorString != NULL) {
1273 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1274 goto fail;
1275 }
1276 if (entitlements_obj == NULL)
1277 goto fail;
1278
1279 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1280 if (entitlements == NULL)
1281 goto fail;
1282
1283 /* Fetch the entitlement value from the dictionary. */
1284 value = entitlements->getObject(entitlement);
1285 if (value != NULL)
1286 value->retain();
1287
1288 fail:
1289 if (entitlements_data != NULL)
1290 IOFree(entitlements_data, len + 1);
1291 if (entitlements_obj != NULL)
1292 entitlements_obj->release();
1293 if (errorString != NULL)
1294 errorString->release();
1295 return value;
1296 }
1297
1298 bool IOUserClient::init()
1299 {
1300 if (getPropertyTable() || super::init())
1301 return reserve();
1302
1303 return false;
1304 }
1305
1306 bool IOUserClient::init(OSDictionary * dictionary)
1307 {
1308 if (getPropertyTable() || super::init(dictionary))
1309 return reserve();
1310
1311 return false;
1312 }
1313
1314 bool IOUserClient::initWithTask(task_t owningTask,
1315 void * securityID,
1316 UInt32 type )
1317 {
1318 if (getPropertyTable() || super::init())
1319 return reserve();
1320
1321 return false;
1322 }
1323
1324 bool IOUserClient::initWithTask(task_t owningTask,
1325 void * securityID,
1326 UInt32 type,
1327 OSDictionary * properties )
1328 {
1329 bool ok;
1330
1331 ok = super::init( properties );
1332 ok &= initWithTask( owningTask, securityID, type );
1333
1334 return( ok );
1335 }
1336
1337 bool IOUserClient::reserve()
1338 {
1339 if(!reserved) {
1340 reserved = IONew(ExpansionData, 1);
1341 if (!reserved) {
1342 return false;
1343 }
1344 }
1345 setTerminateDefer(NULL, true);
1346 IOStatisticsRegisterCounter();
1347
1348 return true;
1349 }
1350
1351 struct IOUserClientOwner
1352 {
1353 task_t task;
1354 queue_chain_t taskLink;
1355 IOUserClient * uc;
1356 queue_chain_t ucLink;
1357 };
1358
1359 IOReturn
1360 IOUserClient::registerOwner(task_t task)
1361 {
1362 IOUserClientOwner * owner;
1363 IOReturn ret;
1364 bool newOwner;
1365
1366 IOLockLock(gIOUserClientOwnersLock);
1367
1368 newOwner = true;
1369 ret = kIOReturnSuccess;
1370
1371 if (!owners.next) queue_init(&owners);
1372 else
1373 {
1374 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1375 {
1376 if (task != owner->task) continue;
1377 newOwner = false;
1378 break;
1379 }
1380 }
1381 if (newOwner)
1382 {
1383 owner = IONew(IOUserClientOwner, 1);
1384 if (!newOwner) ret = kIOReturnNoMemory;
1385 else
1386 {
1387 owner->task = task;
1388 owner->uc = this;
1389 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1390 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1391 }
1392 }
1393
1394 IOLockUnlock(gIOUserClientOwnersLock);
1395
1396 return (ret);
1397 }
1398
1399 void
1400 IOUserClient::noMoreSenders(void)
1401 {
1402 IOUserClientOwner * owner;
1403
1404 IOLockLock(gIOUserClientOwnersLock);
1405
1406 if (owners.next)
1407 {
1408 while (!queue_empty(&owners))
1409 {
1410 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1411 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1412 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1413 IODelete(owner, IOUserClientOwner, 1);
1414 }
1415 owners.next = owners.prev = NULL;
1416 }
1417
1418 IOLockUnlock(gIOUserClientOwnersLock);
1419 }
1420
1421 extern "C" kern_return_t
1422 iokit_task_terminate(task_t task)
1423 {
1424 IOUserClientOwner * owner;
1425 IOUserClient * dead;
1426 IOUserClient * uc;
1427 queue_head_t * taskque;
1428
1429 IOLockLock(gIOUserClientOwnersLock);
1430
1431 taskque = task_io_user_clients(task);
1432 dead = NULL;
1433 while (!queue_empty(taskque))
1434 {
1435 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1436 uc = owner->uc;
1437 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1438 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1439 if (queue_empty(&uc->owners))
1440 {
1441 uc->retain();
1442 IOLog("destroying out of band connect for %s\n", uc->getName());
1443 // now using the uc queue head as a singly linked queue,
1444 // leaving .next as NULL to mark it empty
1445 uc->owners.next = NULL;
1446 uc->owners.prev = (queue_entry_t) dead;
1447 dead = uc;
1448 }
1449 IODelete(owner, IOUserClientOwner, 1);
1450 }
1451
1452 IOLockUnlock(gIOUserClientOwnersLock);
1453
1454 while (dead)
1455 {
1456 uc = dead;
1457 dead = (IOUserClient *)(void *) dead->owners.prev;
1458 uc->owners.prev = NULL;
1459 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1460 uc->release();
1461 }
1462
1463 return (KERN_SUCCESS);
1464 }
1465
1466 void IOUserClient::free()
1467 {
1468 if( mappings) mappings->release();
1469
1470 IOStatisticsUnregisterCounter();
1471
1472 assert(!owners.next);
1473 assert(!owners.prev);
1474
1475 if (reserved) IODelete(reserved, ExpansionData, 1);
1476
1477 super::free();
1478 }
1479
1480 IOReturn IOUserClient::clientDied( void )
1481 {
1482 IOReturn ret = kIOReturnNotReady;
1483
1484 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1485 {
1486 ret = clientClose();
1487 }
1488
1489 return (ret);
1490 }
1491
1492 IOReturn IOUserClient::clientClose( void )
1493 {
1494 return( kIOReturnUnsupported );
1495 }
1496
1497 IOService * IOUserClient::getService( void )
1498 {
1499 return( 0 );
1500 }
1501
1502 IOReturn IOUserClient::registerNotificationPort(
1503 mach_port_t /* port */,
1504 UInt32 /* type */,
1505 UInt32 /* refCon */)
1506 {
1507 return( kIOReturnUnsupported);
1508 }
1509
1510 IOReturn IOUserClient::registerNotificationPort(
1511 mach_port_t port,
1512 UInt32 type,
1513 io_user_reference_t refCon)
1514 {
1515 return (registerNotificationPort(port, type, (UInt32) refCon));
1516 }
1517
1518 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1519 semaphore_t * semaphore )
1520 {
1521 return( kIOReturnUnsupported);
1522 }
1523
1524 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1525 {
1526 return( kIOReturnUnsupported);
1527 }
1528
1529 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1530 IOOptionBits * options,
1531 IOMemoryDescriptor ** memory )
1532 {
1533 return( kIOReturnUnsupported);
1534 }
1535
1536 #if !__LP64__
1537 IOMemoryMap * IOUserClient::mapClientMemory(
1538 IOOptionBits type,
1539 task_t task,
1540 IOOptionBits mapFlags,
1541 IOVirtualAddress atAddress )
1542 {
1543 return (NULL);
1544 }
1545 #endif
1546
1547 IOMemoryMap * IOUserClient::mapClientMemory64(
1548 IOOptionBits type,
1549 task_t task,
1550 IOOptionBits mapFlags,
1551 mach_vm_address_t atAddress )
1552 {
1553 IOReturn err;
1554 IOOptionBits options = 0;
1555 IOMemoryDescriptor * memory;
1556 IOMemoryMap * map = 0;
1557
1558 err = clientMemoryForType( (UInt32) type, &options, &memory );
1559
1560 if( memory && (kIOReturnSuccess == err)) {
1561
1562 options = (options & ~kIOMapUserOptionsMask)
1563 | (mapFlags & kIOMapUserOptionsMask);
1564 map = memory->createMappingInTask( task, atAddress, options );
1565 memory->release();
1566 }
1567
1568 return( map );
1569 }
1570
1571 IOReturn IOUserClient::exportObjectToClient(task_t task,
1572 OSObject *obj, io_object_t *clientObj)
1573 {
1574 mach_port_name_t name;
1575
1576 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1577
1578 *(mach_port_name_t *)clientObj = name;
1579 return kIOReturnSuccess;
1580 }
1581
1582 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1583 {
1584 return( 0 );
1585 }
1586
1587 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1588 {
1589 return( 0 );
1590 }
1591
1592 IOExternalMethod * IOUserClient::
1593 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1594 {
1595 IOExternalMethod *method = getExternalMethodForIndex(index);
1596
1597 if (method)
1598 *targetP = (IOService *) method->object;
1599
1600 return method;
1601 }
1602
1603 IOExternalAsyncMethod * IOUserClient::
1604 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1605 {
1606 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1607
1608 if (method)
1609 *targetP = (IOService *) method->object;
1610
1611 return method;
1612 }
1613
1614 IOExternalTrap * IOUserClient::
1615 getExternalTrapForIndex(UInt32 index)
1616 {
1617 return NULL;
1618 }
1619
1620 IOExternalTrap * IOUserClient::
1621 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1622 {
1623 IOExternalTrap *trap = getExternalTrapForIndex(index);
1624
1625 if (trap) {
1626 *targetP = trap->object;
1627 }
1628
1629 return trap;
1630 }
1631
1632 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1633 {
1634 mach_port_t port;
1635 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1636
1637 if (MACH_PORT_NULL != port)
1638 iokit_release_port_send(port);
1639
1640 return (kIOReturnSuccess);
1641 }
1642
1643 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1644 {
1645 if (MACH_PORT_NULL != port)
1646 iokit_release_port_send(port);
1647
1648 return (kIOReturnSuccess);
1649 }
1650
1651 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1652 IOReturn result, void *args[], UInt32 numArgs)
1653 {
1654 OSAsyncReference64 reference64;
1655 io_user_reference_t args64[kMaxAsyncArgs];
1656 unsigned int idx;
1657
1658 if (numArgs > kMaxAsyncArgs)
1659 return kIOReturnMessageTooLarge;
1660
1661 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1662 reference64[idx] = REF64(reference[idx]);
1663
1664 for (idx = 0; idx < numArgs; idx++)
1665 args64[idx] = REF64(args[idx]);
1666
1667 return (sendAsyncResult64(reference64, result, args64, numArgs));
1668 }
1669
1670 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1671 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1672 {
1673 return _sendAsyncResult64(reference, result, args, numArgs, options);
1674 }
1675
1676 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1677 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1678 {
1679 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1680 }
1681
1682 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1683 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1684 {
1685 struct ReplyMsg
1686 {
1687 mach_msg_header_t msgHdr;
1688 union
1689 {
1690 struct
1691 {
1692 OSNotificationHeader notifyHdr;
1693 IOAsyncCompletionContent asyncContent;
1694 uint32_t args[kMaxAsyncArgs];
1695 } msg32;
1696 struct
1697 {
1698 OSNotificationHeader64 notifyHdr;
1699 IOAsyncCompletionContent asyncContent;
1700 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1701 } msg64;
1702 } m;
1703 };
1704 ReplyMsg replyMsg;
1705 mach_port_t replyPort;
1706 kern_return_t kr;
1707
1708 // If no reply port, do nothing.
1709 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1710 if (replyPort == MACH_PORT_NULL)
1711 return kIOReturnSuccess;
1712
1713 if (numArgs > kMaxAsyncArgs)
1714 return kIOReturnMessageTooLarge;
1715
1716 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1717 0 /*local*/);
1718 replyMsg.msgHdr.msgh_remote_port = replyPort;
1719 replyMsg.msgHdr.msgh_local_port = 0;
1720 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1721 if (kIOUCAsync64Flag & reference[0])
1722 {
1723 replyMsg.msgHdr.msgh_size =
1724 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1725 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1726 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1727 + numArgs * sizeof(io_user_reference_t);
1728 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1729 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1730
1731 replyMsg.m.msg64.asyncContent.result = result;
1732 if (numArgs)
1733 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1734 }
1735 else
1736 {
1737 unsigned int idx;
1738
1739 replyMsg.msgHdr.msgh_size =
1740 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1741 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1742
1743 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1744 + numArgs * sizeof(uint32_t);
1745 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1746
1747 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1748 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1749
1750 replyMsg.m.msg32.asyncContent.result = result;
1751
1752 for (idx = 0; idx < numArgs; idx++)
1753 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1754 }
1755
1756 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1757 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1758 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1759 } else {
1760 /* Fail on full queue. */
1761 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1762 replyMsg.msgHdr.msgh_size);
1763 }
1764 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr))
1765 IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr );
1766 return kr;
1767 }
1768
1769
1770 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1771
1772 extern "C" {
1773
1774 #define CHECK(cls,obj,out) \
1775 cls * out; \
1776 if( !(out = OSDynamicCast( cls, obj))) \
1777 return( kIOReturnBadArgument )
1778
1779 #define CHECKLOCKED(cls,obj,out) \
1780 IOUserIterator * oIter; \
1781 cls * out; \
1782 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1783 return (kIOReturnBadArgument); \
1784 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1785 return (kIOReturnBadArgument)
1786
1787 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1788
1789 // Create a vm_map_copy_t or kalloc'ed data for memory
1790 // to be copied out. ipc will free after the copyout.
1791
1792 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1793 io_buf_ptr_t * buf )
1794 {
1795 kern_return_t err;
1796 vm_map_copy_t copy;
1797
1798 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1799 false /* src_destroy */, &copy);
1800
1801 assert( err == KERN_SUCCESS );
1802 if( err == KERN_SUCCESS )
1803 *buf = (char *) copy;
1804
1805 return( err );
1806 }
1807
1808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1809
1810 /* Routine io_server_version */
1811 kern_return_t is_io_server_version(
1812 mach_port_t master_port,
1813 uint64_t *version)
1814 {
1815 *version = IOKIT_SERVER_VERSION;
1816 return (kIOReturnSuccess);
1817 }
1818
1819 /* Routine io_object_get_class */
1820 kern_return_t is_io_object_get_class(
1821 io_object_t object,
1822 io_name_t className )
1823 {
1824 const OSMetaClass* my_obj = NULL;
1825
1826 if( !object)
1827 return( kIOReturnBadArgument );
1828
1829 my_obj = object->getMetaClass();
1830 if (!my_obj) {
1831 return (kIOReturnNotFound);
1832 }
1833
1834 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1835
1836 return( kIOReturnSuccess );
1837 }
1838
1839 /* Routine io_object_get_superclass */
1840 kern_return_t is_io_object_get_superclass(
1841 mach_port_t master_port,
1842 io_name_t obj_name,
1843 io_name_t class_name)
1844 {
1845 const OSMetaClass* my_obj = NULL;
1846 const OSMetaClass* superclass = NULL;
1847 const OSSymbol *my_name = NULL;
1848 const char *my_cstr = NULL;
1849
1850 if (!obj_name || !class_name)
1851 return (kIOReturnBadArgument);
1852
1853 if( master_port != master_device_port)
1854 return( kIOReturnNotPrivileged);
1855
1856 my_name = OSSymbol::withCString(obj_name);
1857
1858 if (my_name) {
1859 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1860 my_name->release();
1861 }
1862 if (my_obj) {
1863 superclass = my_obj->getSuperClass();
1864 }
1865
1866 if (!superclass) {
1867 return( kIOReturnNotFound );
1868 }
1869
1870 my_cstr = superclass->getClassName();
1871
1872 if (my_cstr) {
1873 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1874 return( kIOReturnSuccess );
1875 }
1876 return (kIOReturnNotFound);
1877 }
1878
1879 /* Routine io_object_get_bundle_identifier */
1880 kern_return_t is_io_object_get_bundle_identifier(
1881 mach_port_t master_port,
1882 io_name_t obj_name,
1883 io_name_t bundle_name)
1884 {
1885 const OSMetaClass* my_obj = NULL;
1886 const OSSymbol *my_name = NULL;
1887 const OSSymbol *identifier = NULL;
1888 const char *my_cstr = NULL;
1889
1890 if (!obj_name || !bundle_name)
1891 return (kIOReturnBadArgument);
1892
1893 if( master_port != master_device_port)
1894 return( kIOReturnNotPrivileged);
1895
1896 my_name = OSSymbol::withCString(obj_name);
1897
1898 if (my_name) {
1899 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1900 my_name->release();
1901 }
1902
1903 if (my_obj) {
1904 identifier = my_obj->getKmodName();
1905 }
1906 if (!identifier) {
1907 return( kIOReturnNotFound );
1908 }
1909
1910 my_cstr = identifier->getCStringNoCopy();
1911 if (my_cstr) {
1912 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1913 return( kIOReturnSuccess );
1914 }
1915
1916 return (kIOReturnBadArgument);
1917 }
1918
1919 /* Routine io_object_conforms_to */
1920 kern_return_t is_io_object_conforms_to(
1921 io_object_t object,
1922 io_name_t className,
1923 boolean_t *conforms )
1924 {
1925 if( !object)
1926 return( kIOReturnBadArgument );
1927
1928 *conforms = (0 != object->metaCast( className ));
1929
1930 return( kIOReturnSuccess );
1931 }
1932
1933 /* Routine io_object_get_retain_count */
1934 kern_return_t is_io_object_get_retain_count(
1935 io_object_t object,
1936 uint32_t *retainCount )
1937 {
1938 if( !object)
1939 return( kIOReturnBadArgument );
1940
1941 *retainCount = object->getRetainCount();
1942 return( kIOReturnSuccess );
1943 }
1944
1945 /* Routine io_iterator_next */
1946 kern_return_t is_io_iterator_next(
1947 io_object_t iterator,
1948 io_object_t *object )
1949 {
1950 IOReturn ret;
1951 OSObject * obj;
1952
1953 CHECK( OSIterator, iterator, iter );
1954
1955 obj = iter->getNextObject();
1956 if( obj) {
1957 obj->retain();
1958 *object = obj;
1959 ret = kIOReturnSuccess;
1960 } else
1961 ret = kIOReturnNoDevice;
1962
1963 return (ret);
1964 }
1965
1966 /* Routine io_iterator_reset */
1967 kern_return_t is_io_iterator_reset(
1968 io_object_t iterator )
1969 {
1970 CHECK( OSIterator, iterator, iter );
1971
1972 iter->reset();
1973
1974 return( kIOReturnSuccess );
1975 }
1976
1977 /* Routine io_iterator_is_valid */
1978 kern_return_t is_io_iterator_is_valid(
1979 io_object_t iterator,
1980 boolean_t *is_valid )
1981 {
1982 CHECK( OSIterator, iterator, iter );
1983
1984 *is_valid = iter->isValid();
1985
1986 return( kIOReturnSuccess );
1987 }
1988
1989
1990 static kern_return_t internal_io_service_match_property_table(
1991 io_service_t _service,
1992 const char * matching,
1993 mach_msg_type_number_t matching_size,
1994 boolean_t *matches)
1995 {
1996 CHECK( IOService, _service, service );
1997
1998 kern_return_t kr;
1999 OSObject * obj;
2000 OSDictionary * dict;
2001
2002 obj = matching_size ? OSUnserializeXML(matching, matching_size)
2003 : OSUnserializeXML(matching);
2004 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2005 *matches = service->passiveMatch( dict );
2006 kr = kIOReturnSuccess;
2007 } else
2008 kr = kIOReturnBadArgument;
2009
2010 if( obj)
2011 obj->release();
2012
2013 return( kr );
2014 }
2015
2016 /* Routine io_service_match_property_table */
2017 kern_return_t is_io_service_match_property_table(
2018 io_service_t service,
2019 io_string_t matching,
2020 boolean_t *matches )
2021 {
2022 return (internal_io_service_match_property_table(service, matching, 0, matches));
2023 }
2024
2025
2026 /* Routine io_service_match_property_table_ool */
2027 kern_return_t is_io_service_match_property_table_ool(
2028 io_object_t service,
2029 io_buf_ptr_t matching,
2030 mach_msg_type_number_t matchingCnt,
2031 kern_return_t *result,
2032 boolean_t *matches )
2033 {
2034 kern_return_t kr;
2035 vm_offset_t data;
2036 vm_map_offset_t map_data;
2037
2038 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2039 data = CAST_DOWN(vm_offset_t, map_data);
2040
2041 if( KERN_SUCCESS == kr) {
2042 // must return success after vm_map_copyout() succeeds
2043 *result = internal_io_service_match_property_table(service,
2044 (const char *)data, matchingCnt, matches );
2045 vm_deallocate( kernel_map, data, matchingCnt );
2046 }
2047
2048 return( kr );
2049 }
2050
2051 /* Routine io_service_match_property_table_bin */
2052 kern_return_t is_io_service_match_property_table_bin(
2053 io_object_t service,
2054 io_struct_inband_t matching,
2055 mach_msg_type_number_t matchingCnt,
2056 boolean_t *matches)
2057 {
2058 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2059 }
2060
2061 static kern_return_t internal_io_service_get_matching_services(
2062 mach_port_t master_port,
2063 const char * matching,
2064 mach_msg_type_number_t matching_size,
2065 io_iterator_t *existing )
2066 {
2067 kern_return_t kr;
2068 OSObject * obj;
2069 OSDictionary * dict;
2070
2071 if( master_port != master_device_port)
2072 return( kIOReturnNotPrivileged);
2073
2074 obj = matching_size ? OSUnserializeXML(matching, matching_size)
2075 : OSUnserializeXML(matching);
2076 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2077 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2078 kr = kIOReturnSuccess;
2079 } else
2080 kr = kIOReturnBadArgument;
2081
2082 if( obj)
2083 obj->release();
2084
2085 return( kr );
2086 }
2087
2088 /* Routine io_service_get_matching_services */
2089 kern_return_t is_io_service_get_matching_services(
2090 mach_port_t master_port,
2091 io_string_t matching,
2092 io_iterator_t *existing )
2093 {
2094 return (internal_io_service_get_matching_services(master_port, matching, 0, existing));
2095 }
2096
2097 /* Routine io_service_get_matching_services_ool */
2098 kern_return_t is_io_service_get_matching_services_ool(
2099 mach_port_t master_port,
2100 io_buf_ptr_t matching,
2101 mach_msg_type_number_t matchingCnt,
2102 kern_return_t *result,
2103 io_object_t *existing )
2104 {
2105 kern_return_t kr;
2106 vm_offset_t data;
2107 vm_map_offset_t map_data;
2108
2109 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2110 data = CAST_DOWN(vm_offset_t, map_data);
2111
2112 if( KERN_SUCCESS == kr) {
2113 // must return success after vm_map_copyout() succeeds
2114 // and mig will copy out objects on success
2115 *existing = 0;
2116 *result = internal_io_service_get_matching_services(master_port,
2117 (const char *) data, matchingCnt, existing);
2118 vm_deallocate( kernel_map, data, matchingCnt );
2119 }
2120
2121 return( kr );
2122 }
2123
2124 /* Routine io_service_get_matching_services_bin */
2125 kern_return_t is_io_service_get_matching_services_bin(
2126 mach_port_t master_port,
2127 io_struct_inband_t matching,
2128 mach_msg_type_number_t matchingCnt,
2129 io_object_t *existing)
2130 {
2131 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2132 }
2133
2134
2135 static kern_return_t internal_io_service_get_matching_service(
2136 mach_port_t master_port,
2137 const char * matching,
2138 mach_msg_type_number_t matching_size,
2139 io_service_t *service )
2140 {
2141 kern_return_t kr;
2142 OSObject * obj;
2143 OSDictionary * dict;
2144
2145 if( master_port != master_device_port)
2146 return( kIOReturnNotPrivileged);
2147
2148 obj = matching_size ? OSUnserializeXML(matching, matching_size)
2149 : OSUnserializeXML(matching);
2150 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2151 *service = IOService::copyMatchingService( dict );
2152 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2153 } else
2154 kr = kIOReturnBadArgument;
2155
2156 if( obj)
2157 obj->release();
2158
2159 return( kr );
2160 }
2161
2162 /* Routine io_service_get_matching_service */
2163 kern_return_t is_io_service_get_matching_service(
2164 mach_port_t master_port,
2165 io_string_t matching,
2166 io_service_t *service )
2167 {
2168 return (internal_io_service_get_matching_service(master_port, matching, 0, service));
2169 }
2170
2171 /* Routine io_service_get_matching_services_ool */
2172 kern_return_t is_io_service_get_matching_service_ool(
2173 mach_port_t master_port,
2174 io_buf_ptr_t matching,
2175 mach_msg_type_number_t matchingCnt,
2176 kern_return_t *result,
2177 io_object_t *service )
2178 {
2179 kern_return_t kr;
2180 vm_offset_t data;
2181 vm_map_offset_t map_data;
2182
2183 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2184 data = CAST_DOWN(vm_offset_t, map_data);
2185
2186 if( KERN_SUCCESS == kr) {
2187 // must return success after vm_map_copyout() succeeds
2188 // and mig will copy out objects on success
2189 *service = 0;
2190 *result = internal_io_service_get_matching_service(master_port,
2191 (const char *) data, matchingCnt, service );
2192 vm_deallocate( kernel_map, data, matchingCnt );
2193 }
2194
2195 return( kr );
2196 }
2197
2198 /* Routine io_service_get_matching_service_bin */
2199 kern_return_t is_io_service_get_matching_service_bin(
2200 mach_port_t master_port,
2201 io_struct_inband_t matching,
2202 mach_msg_type_number_t matchingCnt,
2203 io_object_t *service)
2204 {
2205 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2206 }
2207
2208 static kern_return_t internal_io_service_add_notification(
2209 mach_port_t master_port,
2210 io_name_t notification_type,
2211 const char * matching,
2212 size_t matching_size,
2213 mach_port_t port,
2214 void * reference,
2215 vm_size_t referenceSize,
2216 bool client64,
2217 io_object_t * notification )
2218 {
2219 IOServiceUserNotification * userNotify = 0;
2220 IONotifier * notify = 0;
2221 const OSSymbol * sym;
2222 OSDictionary * dict;
2223 IOReturn err;
2224 unsigned long int userMsgType;
2225
2226 if( master_port != master_device_port)
2227 return( kIOReturnNotPrivileged);
2228
2229 do {
2230 err = kIOReturnNoResources;
2231
2232 if( !(sym = OSSymbol::withCString( notification_type )))
2233 err = kIOReturnNoResources;
2234
2235 if (matching_size)
2236 {
2237 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2238 }
2239 else
2240 {
2241 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching));
2242 }
2243
2244 if (!dict) {
2245 err = kIOReturnBadArgument;
2246 continue;
2247 }
2248
2249 if( (sym == gIOPublishNotification)
2250 || (sym == gIOFirstPublishNotification))
2251 userMsgType = kIOServicePublishNotificationType;
2252 else if( (sym == gIOMatchedNotification)
2253 || (sym == gIOFirstMatchNotification))
2254 userMsgType = kIOServiceMatchedNotificationType;
2255 else if( sym == gIOTerminatedNotification)
2256 userMsgType = kIOServiceTerminatedNotificationType;
2257 else
2258 userMsgType = kLastIOKitNotificationType;
2259
2260 userNotify = new IOServiceUserNotification;
2261
2262 if( userNotify && !userNotify->init( port, userMsgType,
2263 reference, referenceSize, client64)) {
2264 iokit_release_port_send(port);
2265 userNotify->release();
2266 userNotify = 0;
2267 }
2268 if( !userNotify)
2269 continue;
2270
2271 notify = IOService::addMatchingNotification( sym, dict,
2272 &userNotify->_handler, userNotify );
2273 if( notify) {
2274 *notification = userNotify;
2275 userNotify->setNotification( notify );
2276 err = kIOReturnSuccess;
2277 } else
2278 err = kIOReturnUnsupported;
2279
2280 } while( false );
2281
2282 if( sym)
2283 sym->release();
2284 if( dict)
2285 dict->release();
2286
2287 return( err );
2288 }
2289
2290
2291 /* Routine io_service_add_notification */
2292 kern_return_t is_io_service_add_notification(
2293 mach_port_t master_port,
2294 io_name_t notification_type,
2295 io_string_t matching,
2296 mach_port_t port,
2297 io_async_ref_t reference,
2298 mach_msg_type_number_t referenceCnt,
2299 io_object_t * notification )
2300 {
2301 return (internal_io_service_add_notification(master_port, notification_type,
2302 matching, 0, port, &reference[0], sizeof(io_async_ref_t),
2303 false, notification));
2304 }
2305
2306 /* Routine io_service_add_notification_64 */
2307 kern_return_t is_io_service_add_notification_64(
2308 mach_port_t master_port,
2309 io_name_t notification_type,
2310 io_string_t matching,
2311 mach_port_t wake_port,
2312 io_async_ref64_t reference,
2313 mach_msg_type_number_t referenceCnt,
2314 io_object_t *notification )
2315 {
2316 return (internal_io_service_add_notification(master_port, notification_type,
2317 matching, 0, wake_port, &reference[0], sizeof(io_async_ref64_t),
2318 true, notification));
2319 }
2320
2321 /* Routine io_service_add_notification_bin */
2322 kern_return_t is_io_service_add_notification_bin
2323 (
2324 mach_port_t master_port,
2325 io_name_t notification_type,
2326 io_struct_inband_t matching,
2327 mach_msg_type_number_t matchingCnt,
2328 mach_port_t wake_port,
2329 io_async_ref_t reference,
2330 mach_msg_type_number_t referenceCnt,
2331 io_object_t *notification)
2332 {
2333 return (internal_io_service_add_notification(master_port, notification_type,
2334 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2335 false, notification));
2336 }
2337
2338 /* Routine io_service_add_notification_bin_64 */
2339 kern_return_t is_io_service_add_notification_bin_64
2340 (
2341 mach_port_t master_port,
2342 io_name_t notification_type,
2343 io_struct_inband_t matching,
2344 mach_msg_type_number_t matchingCnt,
2345 mach_port_t wake_port,
2346 io_async_ref64_t reference,
2347 mach_msg_type_number_t referenceCnt,
2348 io_object_t *notification)
2349 {
2350 return (internal_io_service_add_notification(master_port, notification_type,
2351 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2352 true, notification));
2353 }
2354
2355 static kern_return_t internal_io_service_add_notification_ool(
2356 mach_port_t master_port,
2357 io_name_t notification_type,
2358 io_buf_ptr_t matching,
2359 mach_msg_type_number_t matchingCnt,
2360 mach_port_t wake_port,
2361 void * reference,
2362 vm_size_t referenceSize,
2363 bool client64,
2364 kern_return_t *result,
2365 io_object_t *notification )
2366 {
2367 kern_return_t kr;
2368 vm_offset_t data;
2369 vm_map_offset_t map_data;
2370
2371 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2372 data = CAST_DOWN(vm_offset_t, map_data);
2373
2374 if( KERN_SUCCESS == kr) {
2375 // must return success after vm_map_copyout() succeeds
2376 // and mig will copy out objects on success
2377 *notification = 0;
2378 *result = internal_io_service_add_notification( master_port, notification_type,
2379 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2380 vm_deallocate( kernel_map, data, matchingCnt );
2381 }
2382
2383 return( kr );
2384 }
2385
2386 /* Routine io_service_add_notification_ool */
2387 kern_return_t is_io_service_add_notification_ool(
2388 mach_port_t master_port,
2389 io_name_t notification_type,
2390 io_buf_ptr_t matching,
2391 mach_msg_type_number_t matchingCnt,
2392 mach_port_t wake_port,
2393 io_async_ref_t reference,
2394 mach_msg_type_number_t referenceCnt,
2395 kern_return_t *result,
2396 io_object_t *notification )
2397 {
2398 return (internal_io_service_add_notification_ool(master_port, notification_type,
2399 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2400 false, result, notification));
2401 }
2402
2403 /* Routine io_service_add_notification_ool_64 */
2404 kern_return_t is_io_service_add_notification_ool_64(
2405 mach_port_t master_port,
2406 io_name_t notification_type,
2407 io_buf_ptr_t matching,
2408 mach_msg_type_number_t matchingCnt,
2409 mach_port_t wake_port,
2410 io_async_ref64_t reference,
2411 mach_msg_type_number_t referenceCnt,
2412 kern_return_t *result,
2413 io_object_t *notification )
2414 {
2415 return (internal_io_service_add_notification_ool(master_port, notification_type,
2416 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2417 true, result, notification));
2418 }
2419
2420 /* Routine io_service_add_notification_old */
2421 kern_return_t is_io_service_add_notification_old(
2422 mach_port_t master_port,
2423 io_name_t notification_type,
2424 io_string_t matching,
2425 mach_port_t port,
2426 // for binary compatibility reasons, this must be natural_t for ILP32
2427 natural_t ref,
2428 io_object_t * notification )
2429 {
2430 return( is_io_service_add_notification( master_port, notification_type,
2431 matching, port, &ref, 1, notification ));
2432 }
2433
2434
2435 static kern_return_t internal_io_service_add_interest_notification(
2436 io_object_t _service,
2437 io_name_t type_of_interest,
2438 mach_port_t port,
2439 void * reference,
2440 vm_size_t referenceSize,
2441 bool client64,
2442 io_object_t * notification )
2443 {
2444
2445 IOServiceMessageUserNotification * userNotify = 0;
2446 IONotifier * notify = 0;
2447 const OSSymbol * sym;
2448 IOReturn err;
2449
2450 CHECK( IOService, _service, service );
2451
2452 err = kIOReturnNoResources;
2453 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2454
2455 userNotify = new IOServiceMessageUserNotification;
2456
2457 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2458 reference, referenceSize,
2459 kIOUserNotifyMaxMessageSize,
2460 client64 )) {
2461 iokit_release_port_send(port);
2462 userNotify->release();
2463 userNotify = 0;
2464 }
2465 if( !userNotify)
2466 continue;
2467
2468 notify = service->registerInterest( sym,
2469 &userNotify->_handler, userNotify );
2470 if( notify) {
2471 *notification = userNotify;
2472 userNotify->setNotification( notify );
2473 err = kIOReturnSuccess;
2474 } else
2475 err = kIOReturnUnsupported;
2476
2477 sym->release();
2478
2479 } while( false );
2480
2481 return( err );
2482 }
2483
2484 /* Routine io_service_add_message_notification */
2485 kern_return_t is_io_service_add_interest_notification(
2486 io_object_t service,
2487 io_name_t type_of_interest,
2488 mach_port_t port,
2489 io_async_ref_t reference,
2490 mach_msg_type_number_t referenceCnt,
2491 io_object_t * notification )
2492 {
2493 return (internal_io_service_add_interest_notification(service, type_of_interest,
2494 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2495 }
2496
2497 /* Routine io_service_add_interest_notification_64 */
2498 kern_return_t is_io_service_add_interest_notification_64(
2499 io_object_t service,
2500 io_name_t type_of_interest,
2501 mach_port_t wake_port,
2502 io_async_ref64_t reference,
2503 mach_msg_type_number_t referenceCnt,
2504 io_object_t *notification )
2505 {
2506 return (internal_io_service_add_interest_notification(service, type_of_interest,
2507 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2508 }
2509
2510
2511 /* Routine io_service_acknowledge_notification */
2512 kern_return_t is_io_service_acknowledge_notification(
2513 io_object_t _service,
2514 natural_t notify_ref,
2515 natural_t response )
2516 {
2517 CHECK( IOService, _service, service );
2518
2519 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2520 (IOOptionBits) response ));
2521
2522 }
2523
2524 /* Routine io_connect_get_semaphore */
2525 kern_return_t is_io_connect_get_notification_semaphore(
2526 io_connect_t connection,
2527 natural_t notification_type,
2528 semaphore_t *semaphore )
2529 {
2530 CHECK( IOUserClient, connection, client );
2531
2532 IOStatisticsClientCall();
2533 return( client->getNotificationSemaphore( (UInt32) notification_type,
2534 semaphore ));
2535 }
2536
2537 /* Routine io_registry_get_root_entry */
2538 kern_return_t is_io_registry_get_root_entry(
2539 mach_port_t master_port,
2540 io_object_t *root )
2541 {
2542 IORegistryEntry * entry;
2543
2544 if( master_port != master_device_port)
2545 return( kIOReturnNotPrivileged);
2546
2547 entry = IORegistryEntry::getRegistryRoot();
2548 if( entry)
2549 entry->retain();
2550 *root = entry;
2551
2552 return( kIOReturnSuccess );
2553 }
2554
2555 /* Routine io_registry_create_iterator */
2556 kern_return_t is_io_registry_create_iterator(
2557 mach_port_t master_port,
2558 io_name_t plane,
2559 uint32_t options,
2560 io_object_t *iterator )
2561 {
2562 if( master_port != master_device_port)
2563 return( kIOReturnNotPrivileged);
2564
2565 *iterator = IOUserIterator::withIterator(
2566 IORegistryIterator::iterateOver(
2567 IORegistryEntry::getPlane( plane ), options ));
2568
2569 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2570 }
2571
2572 /* Routine io_registry_entry_create_iterator */
2573 kern_return_t is_io_registry_entry_create_iterator(
2574 io_object_t registry_entry,
2575 io_name_t plane,
2576 uint32_t options,
2577 io_object_t *iterator )
2578 {
2579 CHECK( IORegistryEntry, registry_entry, entry );
2580
2581 *iterator = IOUserIterator::withIterator(
2582 IORegistryIterator::iterateOver( entry,
2583 IORegistryEntry::getPlane( plane ), options ));
2584
2585 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2586 }
2587
2588 /* Routine io_registry_iterator_enter */
2589 kern_return_t is_io_registry_iterator_enter_entry(
2590 io_object_t iterator )
2591 {
2592 CHECKLOCKED( IORegistryIterator, iterator, iter );
2593
2594 IOLockLock(oIter->lock);
2595 iter->enterEntry();
2596 IOLockUnlock(oIter->lock);
2597
2598 return( kIOReturnSuccess );
2599 }
2600
2601 /* Routine io_registry_iterator_exit */
2602 kern_return_t is_io_registry_iterator_exit_entry(
2603 io_object_t iterator )
2604 {
2605 bool didIt;
2606
2607 CHECKLOCKED( IORegistryIterator, iterator, iter );
2608
2609 IOLockLock(oIter->lock);
2610 didIt = iter->exitEntry();
2611 IOLockUnlock(oIter->lock);
2612
2613 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2614 }
2615
2616 /* Routine io_registry_entry_from_path */
2617 kern_return_t is_io_registry_entry_from_path(
2618 mach_port_t master_port,
2619 io_string_t path,
2620 io_object_t *registry_entry )
2621 {
2622 IORegistryEntry * entry;
2623
2624 if( master_port != master_device_port)
2625 return( kIOReturnNotPrivileged);
2626
2627 entry = IORegistryEntry::fromPath( path );
2628
2629 *registry_entry = entry;
2630
2631 return( kIOReturnSuccess );
2632 }
2633
2634
2635 /* Routine io_registry_entry_from_path */
2636 kern_return_t is_io_registry_entry_from_path_ool(
2637 mach_port_t master_port,
2638 io_string_inband_t path,
2639 io_buf_ptr_t path_ool,
2640 mach_msg_type_number_t path_oolCnt,
2641 kern_return_t *result,
2642 io_object_t *registry_entry)
2643 {
2644 IORegistryEntry * entry;
2645 vm_map_offset_t map_data;
2646 const char * cpath;
2647 IOReturn res;
2648 kern_return_t err;
2649
2650 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2651
2652 map_data = 0;
2653 entry = 0;
2654 res = err = KERN_SUCCESS;
2655 if (path[0]) cpath = path;
2656 else
2657 {
2658 if (!path_oolCnt) return(kIOReturnBadArgument);
2659 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2660
2661 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2662 if (KERN_SUCCESS == err)
2663 {
2664 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2665 cpath = CAST_DOWN(const char *, map_data);
2666 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2667 }
2668 }
2669
2670 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2671 {
2672 entry = IORegistryEntry::fromPath(cpath);
2673 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2674 }
2675
2676 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2677
2678 if (KERN_SUCCESS != err) res = err;
2679 *registry_entry = entry;
2680 *result = res;
2681
2682 return (err);
2683 }
2684
2685
2686 /* Routine io_registry_entry_in_plane */
2687 kern_return_t is_io_registry_entry_in_plane(
2688 io_object_t registry_entry,
2689 io_name_t plane,
2690 boolean_t *inPlane )
2691 {
2692 CHECK( IORegistryEntry, registry_entry, entry );
2693
2694 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2695
2696 return( kIOReturnSuccess );
2697 }
2698
2699
2700 /* Routine io_registry_entry_get_path */
2701 kern_return_t is_io_registry_entry_get_path(
2702 io_object_t registry_entry,
2703 io_name_t plane,
2704 io_string_t path )
2705 {
2706 int length;
2707 CHECK( IORegistryEntry, registry_entry, entry );
2708
2709 length = sizeof( io_string_t);
2710 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2711 return( kIOReturnSuccess );
2712 else
2713 return( kIOReturnBadArgument );
2714 }
2715
2716 /* Routine io_registry_entry_get_path */
2717 kern_return_t is_io_registry_entry_get_path_ool(
2718 io_object_t registry_entry,
2719 io_name_t plane,
2720 io_string_inband_t path,
2721 io_buf_ptr_t *path_ool,
2722 mach_msg_type_number_t *path_oolCnt)
2723 {
2724 enum { kMaxPath = 16384 };
2725 IOReturn err;
2726 int length;
2727 char * buf;
2728
2729 CHECK( IORegistryEntry, registry_entry, entry );
2730
2731 *path_ool = NULL;
2732 *path_oolCnt = 0;
2733 length = sizeof(io_string_inband_t);
2734 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2735 else
2736 {
2737 length = kMaxPath;
2738 buf = IONew(char, length);
2739 if (!buf) err = kIOReturnNoMemory;
2740 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2741 else
2742 {
2743 *path_oolCnt = length;
2744 err = copyoutkdata(buf, length, path_ool);
2745 }
2746 if (buf) IODelete(buf, char, kMaxPath);
2747 }
2748
2749 return (err);
2750 }
2751
2752
2753 /* Routine io_registry_entry_get_name */
2754 kern_return_t is_io_registry_entry_get_name(
2755 io_object_t registry_entry,
2756 io_name_t name )
2757 {
2758 CHECK( IORegistryEntry, registry_entry, entry );
2759
2760 strncpy( name, entry->getName(), sizeof( io_name_t));
2761
2762 return( kIOReturnSuccess );
2763 }
2764
2765 /* Routine io_registry_entry_get_name_in_plane */
2766 kern_return_t is_io_registry_entry_get_name_in_plane(
2767 io_object_t registry_entry,
2768 io_name_t planeName,
2769 io_name_t name )
2770 {
2771 const IORegistryPlane * plane;
2772 CHECK( IORegistryEntry, registry_entry, entry );
2773
2774 if( planeName[0])
2775 plane = IORegistryEntry::getPlane( planeName );
2776 else
2777 plane = 0;
2778
2779 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2780
2781 return( kIOReturnSuccess );
2782 }
2783
2784 /* Routine io_registry_entry_get_location_in_plane */
2785 kern_return_t is_io_registry_entry_get_location_in_plane(
2786 io_object_t registry_entry,
2787 io_name_t planeName,
2788 io_name_t location )
2789 {
2790 const IORegistryPlane * plane;
2791 CHECK( IORegistryEntry, registry_entry, entry );
2792
2793 if( planeName[0])
2794 plane = IORegistryEntry::getPlane( planeName );
2795 else
2796 plane = 0;
2797
2798 const char * cstr = entry->getLocation( plane );
2799
2800 if( cstr) {
2801 strncpy( location, cstr, sizeof( io_name_t));
2802 return( kIOReturnSuccess );
2803 } else
2804 return( kIOReturnNotFound );
2805 }
2806
2807 /* Routine io_registry_entry_get_registry_entry_id */
2808 kern_return_t is_io_registry_entry_get_registry_entry_id(
2809 io_object_t registry_entry,
2810 uint64_t *entry_id )
2811 {
2812 CHECK( IORegistryEntry, registry_entry, entry );
2813
2814 *entry_id = entry->getRegistryEntryID();
2815
2816 return (kIOReturnSuccess);
2817 }
2818
2819 /* Routine io_registry_entry_get_property */
2820 kern_return_t is_io_registry_entry_get_property_bytes(
2821 io_object_t registry_entry,
2822 io_name_t property_name,
2823 io_struct_inband_t buf,
2824 mach_msg_type_number_t *dataCnt )
2825 {
2826 OSObject * obj;
2827 OSData * data;
2828 OSString * str;
2829 OSBoolean * boo;
2830 OSNumber * off;
2831 UInt64 offsetBytes;
2832 unsigned int len = 0;
2833 const void * bytes = 0;
2834 IOReturn ret = kIOReturnSuccess;
2835
2836 CHECK( IORegistryEntry, registry_entry, entry );
2837
2838 #if CONFIG_MACF
2839 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2840 return kIOReturnNotPermitted;
2841 #endif
2842
2843 obj = entry->copyProperty(property_name);
2844 if( !obj)
2845 return( kIOReturnNoResources );
2846
2847 // One day OSData will be a common container base class
2848 // until then...
2849 if( (data = OSDynamicCast( OSData, obj ))) {
2850 len = data->getLength();
2851 bytes = data->getBytesNoCopy();
2852
2853 } else if( (str = OSDynamicCast( OSString, obj ))) {
2854 len = str->getLength() + 1;
2855 bytes = str->getCStringNoCopy();
2856
2857 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2858 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2859 bytes = boo->isTrue() ? "Yes" : "No";
2860
2861 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2862 offsetBytes = off->unsigned64BitValue();
2863 len = off->numberOfBytes();
2864 bytes = &offsetBytes;
2865 #ifdef __BIG_ENDIAN__
2866 bytes = (const void *)
2867 (((UInt32) bytes) + (sizeof( UInt64) - len));
2868 #endif
2869
2870 } else
2871 ret = kIOReturnBadArgument;
2872
2873 if( bytes) {
2874 if( *dataCnt < len)
2875 ret = kIOReturnIPCError;
2876 else {
2877 *dataCnt = len;
2878 bcopy( bytes, buf, len );
2879 }
2880 }
2881 obj->release();
2882
2883 return( ret );
2884 }
2885
2886
2887 /* Routine io_registry_entry_get_property */
2888 kern_return_t is_io_registry_entry_get_property(
2889 io_object_t registry_entry,
2890 io_name_t property_name,
2891 io_buf_ptr_t *properties,
2892 mach_msg_type_number_t *propertiesCnt )
2893 {
2894 kern_return_t err;
2895 vm_size_t len;
2896 OSObject * obj;
2897
2898 CHECK( IORegistryEntry, registry_entry, entry );
2899
2900 #if CONFIG_MACF
2901 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2902 return kIOReturnNotPermitted;
2903 #endif
2904
2905 obj = entry->copyProperty(property_name);
2906 if( !obj)
2907 return( kIOReturnNotFound );
2908
2909 OSSerialize * s = OSSerialize::withCapacity(4096);
2910 if( !s) {
2911 obj->release();
2912 return( kIOReturnNoMemory );
2913 }
2914
2915 if( obj->serialize( s )) {
2916 len = s->getLength();
2917 *propertiesCnt = len;
2918 err = copyoutkdata( s->text(), len, properties );
2919
2920 } else
2921 err = kIOReturnUnsupported;
2922
2923 s->release();
2924 obj->release();
2925
2926 return( err );
2927 }
2928
2929 /* Routine io_registry_entry_get_property_recursively */
2930 kern_return_t is_io_registry_entry_get_property_recursively(
2931 io_object_t registry_entry,
2932 io_name_t plane,
2933 io_name_t property_name,
2934 uint32_t options,
2935 io_buf_ptr_t *properties,
2936 mach_msg_type_number_t *propertiesCnt )
2937 {
2938 kern_return_t err;
2939 vm_size_t len;
2940 OSObject * obj;
2941
2942 CHECK( IORegistryEntry, registry_entry, entry );
2943
2944 #if CONFIG_MACF
2945 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2946 return kIOReturnNotPermitted;
2947 #endif
2948
2949 obj = entry->copyProperty( property_name,
2950 IORegistryEntry::getPlane( plane ), options);
2951 if( !obj)
2952 return( kIOReturnNotFound );
2953
2954 OSSerialize * s = OSSerialize::withCapacity(4096);
2955 if( !s) {
2956 obj->release();
2957 return( kIOReturnNoMemory );
2958 }
2959
2960 if( obj->serialize( s )) {
2961 len = s->getLength();
2962 *propertiesCnt = len;
2963 err = copyoutkdata( s->text(), len, properties );
2964
2965 } else
2966 err = kIOReturnUnsupported;
2967
2968 s->release();
2969 obj->release();
2970
2971 return( err );
2972 }
2973
2974 #if CONFIG_MACF
2975
2976 static kern_return_t
2977 filteredProperties(IORegistryEntry *entry, OSDictionary *properties, OSDictionary **filteredp)
2978 {
2979 kern_return_t err = 0;
2980 OSDictionary *filtered = NULL;
2981 OSCollectionIterator *iter = NULL;
2982 OSSymbol *key;
2983 OSObject *p;
2984 kauth_cred_t cred = kauth_cred_get();
2985
2986 if (properties == NULL)
2987 return kIOReturnUnsupported;
2988
2989 if ((iter = OSCollectionIterator::withCollection(properties)) == NULL ||
2990 (filtered = OSDictionary::withCapacity(properties->getCapacity())) == NULL) {
2991 err = kIOReturnNoMemory;
2992 goto out;
2993 }
2994
2995 while ((p = iter->getNextObject()) != NULL) {
2996 if ((key = OSDynamicCast(OSSymbol, p)) == NULL ||
2997 mac_iokit_check_get_property(cred, entry, key->getCStringNoCopy()) != 0)
2998 continue;
2999 filtered->setObject(key, properties->getObject(key));
3000 }
3001
3002 out:
3003 if (iter != NULL)
3004 iter->release();
3005 *filteredp = filtered;
3006 return err;
3007 }
3008
3009 #endif
3010
3011 /* Routine io_registry_entry_get_properties */
3012 kern_return_t is_io_registry_entry_get_properties(
3013 io_object_t registry_entry,
3014 io_buf_ptr_t *properties,
3015 mach_msg_type_number_t *propertiesCnt )
3016 {
3017 kern_return_t err = 0;
3018 vm_size_t len;
3019
3020 CHECK( IORegistryEntry, registry_entry, entry );
3021
3022 OSSerialize * s = OSSerialize::withCapacity(4096);
3023 if( !s)
3024 return( kIOReturnNoMemory );
3025
3026 if (!entry->serializeProperties(s))
3027 err = kIOReturnUnsupported;
3028
3029 #if CONFIG_MACF
3030 if (!err && mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
3031 OSObject *propobj = OSUnserializeXML(s->text(), s->getLength());
3032 OSDictionary *filteredprops = NULL;
3033 err = filteredProperties(entry, OSDynamicCast(OSDictionary, propobj), &filteredprops);
3034 if (propobj) propobj->release();
3035
3036 if (!err) {
3037 s->clearText();
3038 if (!filteredprops->serialize(s))
3039 err = kIOReturnUnsupported;
3040 }
3041 if (filteredprops != NULL)
3042 filteredprops->release();
3043 }
3044 #endif /* CONFIG_MACF */
3045
3046 if (!err) {
3047 len = s->getLength();
3048 *propertiesCnt = len;
3049 err = copyoutkdata( s->text(), len, properties );
3050 }
3051
3052 s->release();
3053 return( err );
3054 }
3055
3056 #if CONFIG_MACF
3057
3058 struct GetPropertiesEditorRef
3059 {
3060 kauth_cred_t cred;
3061 IORegistryEntry * entry;
3062 OSCollection * root;
3063 };
3064
3065 static const OSMetaClassBase *
3066 GetPropertiesEditor(void * reference,
3067 OSSerialize * s,
3068 OSCollection * container,
3069 const OSSymbol * name,
3070 const OSMetaClassBase * value)
3071 {
3072 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3073
3074 if (!ref->root) ref->root = container;
3075 if (ref->root == container)
3076 {
3077 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3078 {
3079 value = 0;
3080 }
3081 }
3082 if (value) value->retain();
3083 return (value);
3084 }
3085
3086 #endif /* CONFIG_MACF */
3087
3088 /* Routine io_registry_entry_get_properties */
3089 kern_return_t is_io_registry_entry_get_properties_bin(
3090 io_object_t registry_entry,
3091 io_buf_ptr_t *properties,
3092 mach_msg_type_number_t *propertiesCnt)
3093 {
3094 kern_return_t err = kIOReturnSuccess;
3095 vm_size_t len;
3096 OSSerialize * s;
3097 OSSerialize::Editor editor = 0;
3098 void * editRef = 0;
3099
3100 CHECK(IORegistryEntry, registry_entry, entry);
3101
3102 #if CONFIG_MACF
3103 GetPropertiesEditorRef ref;
3104 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3105 {
3106 editor = &GetPropertiesEditor;
3107 editRef = &ref;
3108 ref.cred = kauth_cred_get();
3109 ref.entry = entry;
3110 ref.root = 0;
3111 }
3112 #endif
3113
3114 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3115 if (!s) return (kIOReturnNoMemory);
3116
3117 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3118
3119 if (kIOReturnSuccess == err)
3120 {
3121 len = s->getLength();
3122 *propertiesCnt = len;
3123 err = copyoutkdata(s->text(), len, properties);
3124 }
3125 s->release();
3126
3127 return (err);
3128 }
3129
3130 /* Routine io_registry_entry_get_property_bin */
3131 kern_return_t is_io_registry_entry_get_property_bin(
3132 io_object_t registry_entry,
3133 io_name_t plane,
3134 io_name_t property_name,
3135 uint32_t options,
3136 io_buf_ptr_t *properties,
3137 mach_msg_type_number_t *propertiesCnt )
3138 {
3139 kern_return_t err;
3140 vm_size_t len;
3141 OSObject * obj;
3142 const OSSymbol * sym;
3143
3144 CHECK( IORegistryEntry, registry_entry, entry );
3145
3146 #if CONFIG_MACF
3147 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3148 return kIOReturnNotPermitted;
3149 #endif
3150
3151 if ((kIORegistryIterateRecursively & options) && plane[0])
3152 {
3153 obj = entry->copyProperty(property_name,
3154 IORegistryEntry::getPlane(plane), options);
3155 }
3156 else
3157 {
3158 obj = entry->copyProperty(property_name);
3159 }
3160
3161 if( !obj)
3162 return( kIOReturnNotFound );
3163
3164 sym = OSSymbol::withCString(property_name);
3165 if (sym)
3166 {
3167 if (gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3168 sym->release();
3169 }
3170
3171 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3172 if( !s) {
3173 obj->release();
3174 return( kIOReturnNoMemory );
3175 }
3176
3177 if( obj->serialize( s )) {
3178 len = s->getLength();
3179 *propertiesCnt = len;
3180 err = copyoutkdata( s->text(), len, properties );
3181
3182 } else err = kIOReturnUnsupported;
3183
3184 s->release();
3185 obj->release();
3186
3187 return( err );
3188 }
3189
3190
3191 /* Routine io_registry_entry_set_properties */
3192 kern_return_t is_io_registry_entry_set_properties
3193 (
3194 io_object_t registry_entry,
3195 io_buf_ptr_t properties,
3196 mach_msg_type_number_t propertiesCnt,
3197 kern_return_t * result)
3198 {
3199 OSObject * obj;
3200 kern_return_t err;
3201 IOReturn res;
3202 vm_offset_t data;
3203 vm_map_offset_t map_data;
3204
3205 CHECK( IORegistryEntry, registry_entry, entry );
3206
3207 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3208 return( kIOReturnMessageTooLarge);
3209
3210 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3211 data = CAST_DOWN(vm_offset_t, map_data);
3212
3213 if( KERN_SUCCESS == err) {
3214
3215 // must return success after vm_map_copyout() succeeds
3216 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3217 vm_deallocate( kernel_map, data, propertiesCnt );
3218
3219 if (!obj)
3220 res = kIOReturnBadArgument;
3221 #if CONFIG_MACF
3222 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3223 registry_entry, obj))
3224 {
3225 res = kIOReturnNotPermitted;
3226 }
3227 #endif
3228 else
3229 {
3230 res = entry->setProperties( obj );
3231 }
3232
3233 if (obj)
3234 obj->release();
3235 } else
3236 res = err;
3237
3238 *result = res;
3239 return( err );
3240 }
3241
3242 /* Routine io_registry_entry_get_child_iterator */
3243 kern_return_t is_io_registry_entry_get_child_iterator(
3244 io_object_t registry_entry,
3245 io_name_t plane,
3246 io_object_t *iterator )
3247 {
3248 CHECK( IORegistryEntry, registry_entry, entry );
3249
3250 *iterator = entry->getChildIterator(
3251 IORegistryEntry::getPlane( plane ));
3252
3253 return( kIOReturnSuccess );
3254 }
3255
3256 /* Routine io_registry_entry_get_parent_iterator */
3257 kern_return_t is_io_registry_entry_get_parent_iterator(
3258 io_object_t registry_entry,
3259 io_name_t plane,
3260 io_object_t *iterator)
3261 {
3262 CHECK( IORegistryEntry, registry_entry, entry );
3263
3264 *iterator = entry->getParentIterator(
3265 IORegistryEntry::getPlane( plane ));
3266
3267 return( kIOReturnSuccess );
3268 }
3269
3270 /* Routine io_service_get_busy_state */
3271 kern_return_t is_io_service_get_busy_state(
3272 io_object_t _service,
3273 uint32_t *busyState )
3274 {
3275 CHECK( IOService, _service, service );
3276
3277 *busyState = service->getBusyState();
3278
3279 return( kIOReturnSuccess );
3280 }
3281
3282 /* Routine io_service_get_state */
3283 kern_return_t is_io_service_get_state(
3284 io_object_t _service,
3285 uint64_t *state,
3286 uint32_t *busy_state,
3287 uint64_t *accumulated_busy_time )
3288 {
3289 CHECK( IOService, _service, service );
3290
3291 *state = service->getState();
3292 *busy_state = service->getBusyState();
3293 *accumulated_busy_time = service->getAccumulatedBusyTime();
3294
3295 return( kIOReturnSuccess );
3296 }
3297
3298 /* Routine io_service_wait_quiet */
3299 kern_return_t is_io_service_wait_quiet(
3300 io_object_t _service,
3301 mach_timespec_t wait_time )
3302 {
3303 uint64_t timeoutNS;
3304
3305 CHECK( IOService, _service, service );
3306
3307 timeoutNS = wait_time.tv_sec;
3308 timeoutNS *= kSecondScale;
3309 timeoutNS += wait_time.tv_nsec;
3310
3311 return( service->waitQuiet(timeoutNS) );
3312 }
3313
3314 /* Routine io_service_request_probe */
3315 kern_return_t is_io_service_request_probe(
3316 io_object_t _service,
3317 uint32_t options )
3318 {
3319 CHECK( IOService, _service, service );
3320
3321 return( service->requestProbe( options ));
3322 }
3323
3324 /* Routine io_service_get_authorization_id */
3325 kern_return_t is_io_service_get_authorization_id(
3326 io_object_t _service,
3327 uint64_t *authorization_id )
3328 {
3329 kern_return_t kr;
3330
3331 CHECK( IOService, _service, service );
3332
3333 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3334 kIOClientPrivilegeAdministrator );
3335 if( kIOReturnSuccess != kr)
3336 return( kr );
3337
3338 *authorization_id = service->getAuthorizationID();
3339
3340 return( kr );
3341 }
3342
3343 /* Routine io_service_set_authorization_id */
3344 kern_return_t is_io_service_set_authorization_id(
3345 io_object_t _service,
3346 uint64_t authorization_id )
3347 {
3348 CHECK( IOService, _service, service );
3349
3350 return( service->setAuthorizationID( authorization_id ) );
3351 }
3352
3353 /* Routine io_service_open_ndr */
3354 kern_return_t is_io_service_open_extended(
3355 io_object_t _service,
3356 task_t owningTask,
3357 uint32_t connect_type,
3358 NDR_record_t ndr,
3359 io_buf_ptr_t properties,
3360 mach_msg_type_number_t propertiesCnt,
3361 kern_return_t * result,
3362 io_object_t *connection )
3363 {
3364 IOUserClient * client = 0;
3365 kern_return_t err = KERN_SUCCESS;
3366 IOReturn res = kIOReturnSuccess;
3367 OSDictionary * propertiesDict = 0;
3368 bool crossEndian;
3369 bool disallowAccess;
3370
3371 CHECK( IOService, _service, service );
3372
3373 if (!owningTask) return (kIOReturnBadArgument);
3374 assert(owningTask == current_task());
3375 if (owningTask != current_task()) return (kIOReturnBadArgument);
3376
3377 do
3378 {
3379 if (properties)
3380 {
3381 OSObject * obj;
3382 vm_offset_t data;
3383 vm_map_offset_t map_data;
3384
3385 if( propertiesCnt > sizeof(io_struct_inband_t))
3386 return( kIOReturnMessageTooLarge);
3387
3388 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3389 res = err;
3390 data = CAST_DOWN(vm_offset_t, map_data);
3391 if (KERN_SUCCESS == err)
3392 {
3393 // must return success after vm_map_copyout() succeeds
3394 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3395 vm_deallocate( kernel_map, data, propertiesCnt );
3396 propertiesDict = OSDynamicCast(OSDictionary, obj);
3397 if (!propertiesDict)
3398 {
3399 res = kIOReturnBadArgument;
3400 if (obj)
3401 obj->release();
3402 }
3403 }
3404 if (kIOReturnSuccess != res)
3405 break;
3406 }
3407
3408 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3409 if (crossEndian)
3410 {
3411 if (!propertiesDict)
3412 propertiesDict = OSDictionary::withCapacity(4);
3413 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3414 if (data)
3415 {
3416 if (propertiesDict)
3417 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3418 data->release();
3419 }
3420 }
3421
3422 res = service->newUserClient( owningTask, (void *) owningTask,
3423 connect_type, propertiesDict, &client );
3424
3425 if (propertiesDict)
3426 propertiesDict->release();
3427
3428 if (res == kIOReturnSuccess)
3429 {
3430 assert( OSDynamicCast(IOUserClient, client) );
3431
3432 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3433 client->closed = false;
3434
3435 disallowAccess = (crossEndian
3436 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3437 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3438 if (disallowAccess) res = kIOReturnUnsupported;
3439 #if CONFIG_MACF
3440 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3441 res = kIOReturnNotPermitted;
3442 #endif
3443
3444 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3445
3446 if (kIOReturnSuccess != res)
3447 {
3448 IOStatisticsClientCall();
3449 client->clientClose();
3450 client->release();
3451 client = 0;
3452 break;
3453 }
3454 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3455 if (creatorName)
3456 {
3457 client->setProperty(kIOUserClientCreatorKey, creatorName);
3458 creatorName->release();
3459 }
3460 client->setTerminateDefer(service, false);
3461 }
3462 }
3463 while (false);
3464
3465 *connection = client;
3466 *result = res;
3467
3468 return (err);
3469 }
3470
3471 /* Routine io_service_close */
3472 kern_return_t is_io_service_close(
3473 io_object_t connection )
3474 {
3475 OSSet * mappings;
3476 if ((mappings = OSDynamicCast(OSSet, connection)))
3477 return( kIOReturnSuccess );
3478
3479 CHECK( IOUserClient, connection, client );
3480
3481 IOStatisticsClientCall();
3482
3483 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3484 {
3485 client->clientClose();
3486 }
3487 else
3488 {
3489 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3490 client->getRegistryEntryID(), client->getName());
3491 }
3492
3493 return( kIOReturnSuccess );
3494 }
3495
3496 /* Routine io_connect_get_service */
3497 kern_return_t is_io_connect_get_service(
3498 io_object_t connection,
3499 io_object_t *service )
3500 {
3501 IOService * theService;
3502
3503 CHECK( IOUserClient, connection, client );
3504
3505 theService = client->getService();
3506 if( theService)
3507 theService->retain();
3508
3509 *service = theService;
3510
3511 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3512 }
3513
3514 /* Routine io_connect_set_notification_port */
3515 kern_return_t is_io_connect_set_notification_port(
3516 io_object_t connection,
3517 uint32_t notification_type,
3518 mach_port_t port,
3519 uint32_t reference)
3520 {
3521 CHECK( IOUserClient, connection, client );
3522
3523 IOStatisticsClientCall();
3524 return( client->registerNotificationPort( port, notification_type,
3525 (io_user_reference_t) reference ));
3526 }
3527
3528 /* Routine io_connect_set_notification_port */
3529 kern_return_t is_io_connect_set_notification_port_64(
3530 io_object_t connection,
3531 uint32_t notification_type,
3532 mach_port_t port,
3533 io_user_reference_t reference)
3534 {
3535 CHECK( IOUserClient, connection, client );
3536
3537 IOStatisticsClientCall();
3538 return( client->registerNotificationPort( port, notification_type,
3539 reference ));
3540 }
3541
3542 /* Routine io_connect_map_memory_into_task */
3543 kern_return_t is_io_connect_map_memory_into_task
3544 (
3545 io_connect_t connection,
3546 uint32_t memory_type,
3547 task_t into_task,
3548 mach_vm_address_t *address,
3549 mach_vm_size_t *size,
3550 uint32_t flags
3551 )
3552 {
3553 IOReturn err;
3554 IOMemoryMap * map;
3555
3556 CHECK( IOUserClient, connection, client );
3557
3558 if (!into_task) return (kIOReturnBadArgument);
3559
3560 IOStatisticsClientCall();
3561 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3562
3563 if( map) {
3564 *address = map->getAddress();
3565 if( size)
3566 *size = map->getSize();
3567
3568 if( client->sharedInstance
3569 || (into_task != current_task())) {
3570 // push a name out to the task owning the map,
3571 // so we can clean up maps
3572 mach_port_name_t name __unused =
3573 IOMachPort::makeSendRightForTask(
3574 into_task, map, IKOT_IOKIT_OBJECT );
3575
3576 } else {
3577 // keep it with the user client
3578 IOLockLock( gIOObjectPortLock);
3579 if( 0 == client->mappings)
3580 client->mappings = OSSet::withCapacity(2);
3581 if( client->mappings)
3582 client->mappings->setObject( map);
3583 IOLockUnlock( gIOObjectPortLock);
3584 map->release();
3585 }
3586 err = kIOReturnSuccess;
3587
3588 } else
3589 err = kIOReturnBadArgument;
3590
3591 return( err );
3592 }
3593
3594 /* Routine is_io_connect_map_memory */
3595 kern_return_t is_io_connect_map_memory(
3596 io_object_t connect,
3597 uint32_t type,
3598 task_t task,
3599 uint32_t * mapAddr,
3600 uint32_t * mapSize,
3601 uint32_t flags )
3602 {
3603 IOReturn err;
3604 mach_vm_address_t address;
3605 mach_vm_size_t size;
3606
3607 address = SCALAR64(*mapAddr);
3608 size = SCALAR64(*mapSize);
3609
3610 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3611
3612 *mapAddr = SCALAR32(address);
3613 *mapSize = SCALAR32(size);
3614
3615 return (err);
3616 }
3617
3618 } /* extern "C" */
3619
3620 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3621 {
3622 OSIterator * iter;
3623 IOMemoryMap * map = 0;
3624
3625 IOLockLock(gIOObjectPortLock);
3626
3627 iter = OSCollectionIterator::withCollection(mappings);
3628 if(iter)
3629 {
3630 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3631 {
3632 if(mem == map->getMemoryDescriptor())
3633 {
3634 map->retain();
3635 mappings->removeObject(map);
3636 break;
3637 }
3638 }
3639 iter->release();
3640 }
3641
3642 IOLockUnlock(gIOObjectPortLock);
3643
3644 return (map);
3645 }
3646
3647 extern "C" {
3648
3649 /* Routine io_connect_unmap_memory_from_task */
3650 kern_return_t is_io_connect_unmap_memory_from_task
3651 (
3652 io_connect_t connection,
3653 uint32_t memory_type,
3654 task_t from_task,
3655 mach_vm_address_t address)
3656 {
3657 IOReturn err;
3658 IOOptionBits options = 0;
3659 IOMemoryDescriptor * memory;
3660 IOMemoryMap * map;
3661
3662 CHECK( IOUserClient, connection, client );
3663
3664 if (!from_task) return (kIOReturnBadArgument);
3665
3666 IOStatisticsClientCall();
3667 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3668
3669 if( memory && (kIOReturnSuccess == err)) {
3670
3671 options = (options & ~kIOMapUserOptionsMask)
3672 | kIOMapAnywhere | kIOMapReference;
3673
3674 map = memory->createMappingInTask( from_task, address, options );
3675 memory->release();
3676 if( map)
3677 {
3678 IOLockLock( gIOObjectPortLock);
3679 if( client->mappings)
3680 client->mappings->removeObject( map);
3681 IOLockUnlock( gIOObjectPortLock);
3682
3683 mach_port_name_t name = 0;
3684 if (from_task != current_task())
3685 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3686 if (name)
3687 {
3688 map->userClientUnmap();
3689 err = iokit_mod_send_right( from_task, name, -2 );
3690 err = kIOReturnSuccess;
3691 }
3692 else
3693 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3694 if (from_task == current_task())
3695 map->release();
3696 }
3697 else
3698 err = kIOReturnBadArgument;
3699 }
3700
3701 return( err );
3702 }
3703
3704 kern_return_t is_io_connect_unmap_memory(
3705 io_object_t connect,
3706 uint32_t type,
3707 task_t task,
3708 uint32_t mapAddr )
3709 {
3710 IOReturn err;
3711 mach_vm_address_t address;
3712
3713 address = SCALAR64(mapAddr);
3714
3715 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3716
3717 return (err);
3718 }
3719
3720
3721 /* Routine io_connect_add_client */
3722 kern_return_t is_io_connect_add_client(
3723 io_object_t connection,
3724 io_object_t connect_to)
3725 {
3726 CHECK( IOUserClient, connection, client );
3727 CHECK( IOUserClient, connect_to, to );
3728
3729 IOStatisticsClientCall();
3730 return( client->connectClient( to ) );
3731 }
3732
3733
3734 /* Routine io_connect_set_properties */
3735 kern_return_t is_io_connect_set_properties(
3736 io_object_t connection,
3737 io_buf_ptr_t properties,
3738 mach_msg_type_number_t propertiesCnt,
3739 kern_return_t * result)
3740 {
3741 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3742 }
3743
3744 /* Routine io_user_client_method */
3745 kern_return_t is_io_connect_method_var_output
3746 (
3747 io_connect_t connection,
3748 uint32_t selector,
3749 io_scalar_inband64_t scalar_input,
3750 mach_msg_type_number_t scalar_inputCnt,
3751 io_struct_inband_t inband_input,
3752 mach_msg_type_number_t inband_inputCnt,
3753 mach_vm_address_t ool_input,
3754 mach_vm_size_t ool_input_size,
3755 io_struct_inband_t inband_output,
3756 mach_msg_type_number_t *inband_outputCnt,
3757 io_scalar_inband64_t scalar_output,
3758 mach_msg_type_number_t *scalar_outputCnt,
3759 io_buf_ptr_t *var_output,
3760 mach_msg_type_number_t *var_outputCnt
3761 )
3762 {
3763 CHECK( IOUserClient, connection, client );
3764
3765 IOExternalMethodArguments args;
3766 IOReturn ret;
3767 IOMemoryDescriptor * inputMD = 0;
3768 OSObject * structureVariableOutputData = 0;
3769
3770 bzero(&args.__reserved[0], sizeof(args.__reserved));
3771 args.version = kIOExternalMethodArgumentsCurrentVersion;
3772
3773 args.selector = selector;
3774
3775 args.asyncWakePort = MACH_PORT_NULL;
3776 args.asyncReference = 0;
3777 args.asyncReferenceCount = 0;
3778 args.structureVariableOutputData = &structureVariableOutputData;
3779
3780 args.scalarInput = scalar_input;
3781 args.scalarInputCount = scalar_inputCnt;
3782 args.structureInput = inband_input;
3783 args.structureInputSize = inband_inputCnt;
3784
3785 if (ool_input)
3786 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3787 kIODirectionOut, current_task());
3788
3789 args.structureInputDescriptor = inputMD;
3790
3791 args.scalarOutput = scalar_output;
3792 args.scalarOutputCount = *scalar_outputCnt;
3793 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3794 args.structureOutput = inband_output;
3795 args.structureOutputSize = *inband_outputCnt;
3796 args.structureOutputDescriptor = NULL;
3797 args.structureOutputDescriptorSize = 0;
3798
3799 IOStatisticsClientCall();
3800 ret = client->externalMethod( selector, &args );
3801
3802 *scalar_outputCnt = args.scalarOutputCount;
3803 *inband_outputCnt = args.structureOutputSize;
3804
3805 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3806 {
3807 OSSerialize * serialize;
3808 OSData * data;
3809 vm_size_t len;
3810
3811 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3812 {
3813 len = serialize->getLength();
3814 *var_outputCnt = len;
3815 ret = copyoutkdata(serialize->text(), len, var_output);
3816 }
3817 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3818 {
3819 len = data->getLength();
3820 *var_outputCnt = len;
3821 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3822 }
3823 else
3824 {
3825 ret = kIOReturnUnderrun;
3826 }
3827 }
3828
3829 if (inputMD)
3830 inputMD->release();
3831 if (structureVariableOutputData)
3832 structureVariableOutputData->release();
3833
3834 return (ret);
3835 }
3836
3837 /* Routine io_user_client_method */
3838 kern_return_t is_io_connect_method
3839 (
3840 io_connect_t connection,
3841 uint32_t selector,
3842 io_scalar_inband64_t scalar_input,
3843 mach_msg_type_number_t scalar_inputCnt,
3844 io_struct_inband_t inband_input,
3845 mach_msg_type_number_t inband_inputCnt,
3846 mach_vm_address_t ool_input,
3847 mach_vm_size_t ool_input_size,
3848 io_struct_inband_t inband_output,
3849 mach_msg_type_number_t *inband_outputCnt,
3850 io_scalar_inband64_t scalar_output,
3851 mach_msg_type_number_t *scalar_outputCnt,
3852 mach_vm_address_t ool_output,
3853 mach_vm_size_t *ool_output_size
3854 )
3855 {
3856 CHECK( IOUserClient, connection, client );
3857
3858 IOExternalMethodArguments args;
3859 IOReturn ret;
3860 IOMemoryDescriptor * inputMD = 0;
3861 IOMemoryDescriptor * outputMD = 0;
3862
3863 bzero(&args.__reserved[0], sizeof(args.__reserved));
3864 args.version = kIOExternalMethodArgumentsCurrentVersion;
3865
3866 args.selector = selector;
3867
3868 args.asyncWakePort = MACH_PORT_NULL;
3869 args.asyncReference = 0;
3870 args.asyncReferenceCount = 0;
3871 args.structureVariableOutputData = 0;
3872
3873 args.scalarInput = scalar_input;
3874 args.scalarInputCount = scalar_inputCnt;
3875 args.structureInput = inband_input;
3876 args.structureInputSize = inband_inputCnt;
3877
3878 if (ool_input)
3879 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3880 kIODirectionOut, current_task());
3881
3882 args.structureInputDescriptor = inputMD;
3883
3884 args.scalarOutput = scalar_output;
3885 args.scalarOutputCount = *scalar_outputCnt;
3886 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3887 args.structureOutput = inband_output;
3888 args.structureOutputSize = *inband_outputCnt;
3889
3890 if (ool_output && ool_output_size)
3891 {
3892 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3893 kIODirectionIn, current_task());
3894 }
3895
3896 args.structureOutputDescriptor = outputMD;
3897 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3898
3899 IOStatisticsClientCall();
3900 ret = client->externalMethod( selector, &args );
3901
3902 *scalar_outputCnt = args.scalarOutputCount;
3903 *inband_outputCnt = args.structureOutputSize;
3904 *ool_output_size = args.structureOutputDescriptorSize;
3905
3906 if (inputMD)
3907 inputMD->release();
3908 if (outputMD)
3909 outputMD->release();
3910
3911 return (ret);
3912 }
3913
3914 /* Routine io_async_user_client_method */
3915 kern_return_t is_io_connect_async_method
3916 (
3917 io_connect_t connection,
3918 mach_port_t wake_port,
3919 io_async_ref64_t reference,
3920 mach_msg_type_number_t referenceCnt,
3921 uint32_t selector,
3922 io_scalar_inband64_t scalar_input,
3923 mach_msg_type_number_t scalar_inputCnt,
3924 io_struct_inband_t inband_input,
3925 mach_msg_type_number_t inband_inputCnt,
3926 mach_vm_address_t ool_input,
3927 mach_vm_size_t ool_input_size,
3928 io_struct_inband_t inband_output,
3929 mach_msg_type_number_t *inband_outputCnt,
3930 io_scalar_inband64_t scalar_output,
3931 mach_msg_type_number_t *scalar_outputCnt,
3932 mach_vm_address_t ool_output,
3933 mach_vm_size_t * ool_output_size
3934 )
3935 {
3936 CHECK( IOUserClient, connection, client );
3937
3938 IOExternalMethodArguments args;
3939 IOReturn ret;
3940 IOMemoryDescriptor * inputMD = 0;
3941 IOMemoryDescriptor * outputMD = 0;
3942
3943 bzero(&args.__reserved[0], sizeof(args.__reserved));
3944 args.version = kIOExternalMethodArgumentsCurrentVersion;
3945
3946 reference[0] = (io_user_reference_t) wake_port;
3947 if (vm_map_is_64bit(get_task_map(current_task())))
3948 reference[0] |= kIOUCAsync64Flag;
3949
3950 args.selector = selector;
3951
3952 args.asyncWakePort = wake_port;
3953 args.asyncReference = reference;
3954 args.asyncReferenceCount = referenceCnt;
3955
3956 args.scalarInput = scalar_input;
3957 args.scalarInputCount = scalar_inputCnt;
3958 args.structureInput = inband_input;
3959 args.structureInputSize = inband_inputCnt;
3960
3961 if (ool_input)
3962 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3963 kIODirectionOut, current_task());
3964
3965 args.structureInputDescriptor = inputMD;
3966
3967 args.scalarOutput = scalar_output;
3968 args.scalarOutputCount = *scalar_outputCnt;
3969 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3970 args.structureOutput = inband_output;
3971 args.structureOutputSize = *inband_outputCnt;
3972
3973 if (ool_output)
3974 {
3975 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3976 kIODirectionIn, current_task());
3977 }
3978
3979 args.structureOutputDescriptor = outputMD;
3980 args.structureOutputDescriptorSize = *ool_output_size;
3981
3982 IOStatisticsClientCall();
3983 ret = client->externalMethod( selector, &args );
3984
3985 *inband_outputCnt = args.structureOutputSize;
3986 *ool_output_size = args.structureOutputDescriptorSize;
3987
3988 if (inputMD)
3989 inputMD->release();
3990 if (outputMD)
3991 outputMD->release();
3992
3993 return (ret);
3994 }
3995
3996 /* Routine io_connect_method_scalarI_scalarO */
3997 kern_return_t is_io_connect_method_scalarI_scalarO(
3998 io_object_t connect,
3999 uint32_t index,
4000 io_scalar_inband_t input,
4001 mach_msg_type_number_t inputCount,
4002 io_scalar_inband_t output,
4003 mach_msg_type_number_t * outputCount )
4004 {
4005 IOReturn err;
4006 uint32_t i;
4007 io_scalar_inband64_t _input;
4008 io_scalar_inband64_t _output;
4009
4010 mach_msg_type_number_t struct_outputCnt = 0;
4011 mach_vm_size_t ool_output_size = 0;
4012
4013 bzero(&_output[0], sizeof(_output));
4014 for (i = 0; i < inputCount; i++)
4015 _input[i] = SCALAR64(input[i]);
4016
4017 err = is_io_connect_method(connect, index,
4018 _input, inputCount,
4019 NULL, 0,
4020 0, 0,
4021 NULL, &struct_outputCnt,
4022 _output, outputCount,
4023 0, &ool_output_size);
4024
4025 for (i = 0; i < *outputCount; i++)
4026 output[i] = SCALAR32(_output[i]);
4027
4028 return (err);
4029 }
4030
4031 kern_return_t shim_io_connect_method_scalarI_scalarO(
4032 IOExternalMethod * method,
4033 IOService * object,
4034 const io_user_scalar_t * input,
4035 mach_msg_type_number_t inputCount,
4036 io_user_scalar_t * output,
4037 mach_msg_type_number_t * outputCount )
4038 {
4039 IOMethod func;
4040 io_scalar_inband_t _output;
4041 IOReturn err;
4042 err = kIOReturnBadArgument;
4043
4044 bzero(&_output[0], sizeof(_output));
4045 do {
4046
4047 if( inputCount != method->count0)
4048 {
4049 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4050 continue;
4051 }
4052 if( *outputCount != method->count1)
4053 {
4054 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4055 continue;
4056 }
4057
4058 func = method->func;
4059
4060 switch( inputCount) {
4061
4062 case 6:
4063 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4064 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4065 break;
4066 case 5:
4067 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4068 ARG32(input[3]), ARG32(input[4]),
4069 &_output[0] );
4070 break;
4071 case 4:
4072 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4073 ARG32(input[3]),
4074 &_output[0], &_output[1] );
4075 break;
4076 case 3:
4077 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4078 &_output[0], &_output[1], &_output[2] );
4079 break;
4080 case 2:
4081 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4082 &_output[0], &_output[1], &_output[2],
4083 &_output[3] );
4084 break;
4085 case 1:
4086 err = (object->*func)( ARG32(input[0]),
4087 &_output[0], &_output[1], &_output[2],
4088 &_output[3], &_output[4] );
4089 break;
4090 case 0:
4091 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4092 &_output[3], &_output[4], &_output[5] );
4093 break;
4094
4095 default:
4096 IOLog("%s: Bad method table\n", object->getName());
4097 }
4098 }
4099 while( false);
4100
4101 uint32_t i;
4102 for (i = 0; i < *outputCount; i++)
4103 output[i] = SCALAR32(_output[i]);
4104
4105 return( err);
4106 }
4107
4108 /* Routine io_async_method_scalarI_scalarO */
4109 kern_return_t is_io_async_method_scalarI_scalarO(
4110 io_object_t connect,
4111 mach_port_t wake_port,
4112 io_async_ref_t reference,
4113 mach_msg_type_number_t referenceCnt,
4114 uint32_t index,
4115 io_scalar_inband_t input,
4116 mach_msg_type_number_t inputCount,
4117 io_scalar_inband_t output,
4118 mach_msg_type_number_t * outputCount )
4119 {
4120 IOReturn err;
4121 uint32_t i;
4122 io_scalar_inband64_t _input;
4123 io_scalar_inband64_t _output;
4124 io_async_ref64_t _reference;
4125
4126 bzero(&_output[0], sizeof(_output));
4127 for (i = 0; i < referenceCnt; i++)
4128 _reference[i] = REF64(reference[i]);
4129
4130 mach_msg_type_number_t struct_outputCnt = 0;
4131 mach_vm_size_t ool_output_size = 0;
4132
4133 for (i = 0; i < inputCount; i++)
4134 _input[i] = SCALAR64(input[i]);
4135
4136 err = is_io_connect_async_method(connect,
4137 wake_port, _reference, referenceCnt,
4138 index,
4139 _input, inputCount,
4140 NULL, 0,
4141 0, 0,
4142 NULL, &struct_outputCnt,
4143 _output, outputCount,
4144 0, &ool_output_size);
4145
4146 for (i = 0; i < *outputCount; i++)
4147 output[i] = SCALAR32(_output[i]);
4148
4149 return (err);
4150 }
4151 /* Routine io_async_method_scalarI_structureO */
4152 kern_return_t is_io_async_method_scalarI_structureO(
4153 io_object_t connect,
4154 mach_port_t wake_port,
4155 io_async_ref_t reference,
4156 mach_msg_type_number_t referenceCnt,
4157 uint32_t index,
4158 io_scalar_inband_t input,
4159 mach_msg_type_number_t inputCount,
4160 io_struct_inband_t output,
4161 mach_msg_type_number_t * outputCount )
4162 {
4163 uint32_t i;
4164 io_scalar_inband64_t _input;
4165 io_async_ref64_t _reference;
4166
4167 for (i = 0; i < referenceCnt; i++)
4168 _reference[i] = REF64(reference[i]);
4169
4170 mach_msg_type_number_t scalar_outputCnt = 0;
4171 mach_vm_size_t ool_output_size = 0;
4172
4173 for (i = 0; i < inputCount; i++)
4174 _input[i] = SCALAR64(input[i]);
4175
4176 return (is_io_connect_async_method(connect,
4177 wake_port, _reference, referenceCnt,
4178 index,
4179 _input, inputCount,
4180 NULL, 0,
4181 0, 0,
4182 output, outputCount,
4183 NULL, &scalar_outputCnt,
4184 0, &ool_output_size));
4185 }
4186
4187 /* Routine io_async_method_scalarI_structureI */
4188 kern_return_t is_io_async_method_scalarI_structureI(
4189 io_connect_t connect,
4190 mach_port_t wake_port,
4191 io_async_ref_t reference,
4192 mach_msg_type_number_t referenceCnt,
4193 uint32_t index,
4194 io_scalar_inband_t input,
4195 mach_msg_type_number_t inputCount,
4196 io_struct_inband_t inputStruct,
4197 mach_msg_type_number_t inputStructCount )
4198 {
4199 uint32_t i;
4200 io_scalar_inband64_t _input;
4201 io_async_ref64_t _reference;
4202
4203 for (i = 0; i < referenceCnt; i++)
4204 _reference[i] = REF64(reference[i]);
4205
4206 mach_msg_type_number_t scalar_outputCnt = 0;
4207 mach_msg_type_number_t inband_outputCnt = 0;
4208 mach_vm_size_t ool_output_size = 0;
4209
4210 for (i = 0; i < inputCount; i++)
4211 _input[i] = SCALAR64(input[i]);
4212
4213 return (is_io_connect_async_method(connect,
4214 wake_port, _reference, referenceCnt,
4215 index,
4216 _input, inputCount,
4217 inputStruct, inputStructCount,
4218 0, 0,
4219 NULL, &inband_outputCnt,
4220 NULL, &scalar_outputCnt,
4221 0, &ool_output_size));
4222 }
4223
4224 /* Routine io_async_method_structureI_structureO */
4225 kern_return_t is_io_async_method_structureI_structureO(
4226 io_object_t connect,
4227 mach_port_t wake_port,
4228 io_async_ref_t reference,
4229 mach_msg_type_number_t referenceCnt,
4230 uint32_t index,
4231 io_struct_inband_t input,
4232 mach_msg_type_number_t inputCount,
4233 io_struct_inband_t output,
4234 mach_msg_type_number_t * outputCount )
4235 {
4236 uint32_t i;
4237 mach_msg_type_number_t scalar_outputCnt = 0;
4238 mach_vm_size_t ool_output_size = 0;
4239 io_async_ref64_t _reference;
4240
4241 for (i = 0; i < referenceCnt; i++)
4242 _reference[i] = REF64(reference[i]);
4243
4244 return (is_io_connect_async_method(connect,
4245 wake_port, _reference, referenceCnt,
4246 index,
4247 NULL, 0,
4248 input, inputCount,
4249 0, 0,
4250 output, outputCount,
4251 NULL, &scalar_outputCnt,
4252 0, &ool_output_size));
4253 }
4254
4255
4256 kern_return_t shim_io_async_method_scalarI_scalarO(
4257 IOExternalAsyncMethod * method,
4258 IOService * object,
4259 mach_port_t asyncWakePort,
4260 io_user_reference_t * asyncReference,
4261 uint32_t asyncReferenceCount,
4262 const io_user_scalar_t * input,
4263 mach_msg_type_number_t inputCount,
4264 io_user_scalar_t * output,
4265 mach_msg_type_number_t * outputCount )
4266 {
4267 IOAsyncMethod func;
4268 uint32_t i;
4269 io_scalar_inband_t _output;
4270 IOReturn err;
4271 io_async_ref_t reference;
4272
4273 bzero(&_output[0], sizeof(_output));
4274 for (i = 0; i < asyncReferenceCount; i++)
4275 reference[i] = REF32(asyncReference[i]);
4276
4277 err = kIOReturnBadArgument;
4278
4279 do {
4280
4281 if( inputCount != method->count0)
4282 {
4283 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4284 continue;
4285 }
4286 if( *outputCount != method->count1)
4287 {
4288 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4289 continue;
4290 }
4291
4292 func = method->func;
4293
4294 switch( inputCount) {
4295
4296 case 6:
4297 err = (object->*func)( reference,
4298 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4299 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4300 break;
4301 case 5:
4302 err = (object->*func)( reference,
4303 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4304 ARG32(input[3]), ARG32(input[4]),
4305 &_output[0] );
4306 break;
4307 case 4:
4308 err = (object->*func)( reference,
4309 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4310 ARG32(input[3]),
4311 &_output[0], &_output[1] );
4312 break;
4313 case 3:
4314 err = (object->*func)( reference,
4315 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4316 &_output[0], &_output[1], &_output[2] );
4317 break;
4318 case 2:
4319 err = (object->*func)( reference,
4320 ARG32(input[0]), ARG32(input[1]),
4321 &_output[0], &_output[1], &_output[2],
4322 &_output[3] );
4323 break;
4324 case 1:
4325 err = (object->*func)( reference,
4326 ARG32(input[0]),
4327 &_output[0], &_output[1], &_output[2],
4328 &_output[3], &_output[4] );
4329 break;
4330 case 0:
4331 err = (object->*func)( reference,
4332 &_output[0], &_output[1], &_output[2],
4333 &_output[3], &_output[4], &_output[5] );
4334 break;
4335
4336 default:
4337 IOLog("%s: Bad method table\n", object->getName());
4338 }
4339 }
4340 while( false);
4341
4342 for (i = 0; i < *outputCount; i++)
4343 output[i] = SCALAR32(_output[i]);
4344
4345 return( err);
4346 }
4347
4348
4349 /* Routine io_connect_method_scalarI_structureO */
4350 kern_return_t is_io_connect_method_scalarI_structureO(
4351 io_object_t connect,
4352 uint32_t index,
4353 io_scalar_inband_t input,
4354 mach_msg_type_number_t inputCount,
4355 io_struct_inband_t output,
4356 mach_msg_type_number_t * outputCount )
4357 {
4358 uint32_t i;
4359 io_scalar_inband64_t _input;
4360
4361 mach_msg_type_number_t scalar_outputCnt = 0;
4362 mach_vm_size_t ool_output_size = 0;
4363
4364 for (i = 0; i < inputCount; i++)
4365 _input[i] = SCALAR64(input[i]);
4366
4367 return (is_io_connect_method(connect, index,
4368 _input, inputCount,
4369 NULL, 0,
4370 0, 0,
4371 output, outputCount,
4372 NULL, &scalar_outputCnt,
4373 0, &ool_output_size));
4374 }
4375
4376 kern_return_t shim_io_connect_method_scalarI_structureO(
4377
4378 IOExternalMethod * method,
4379 IOService * object,
4380 const io_user_scalar_t * input,
4381 mach_msg_type_number_t inputCount,
4382 io_struct_inband_t output,
4383 IOByteCount * outputCount )
4384 {
4385 IOMethod func;
4386 IOReturn err;
4387
4388 err = kIOReturnBadArgument;
4389
4390 do {
4391 if( inputCount != method->count0)
4392 {
4393 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4394 continue;
4395 }
4396 if( (kIOUCVariableStructureSize != method->count1)
4397 && (*outputCount != method->count1))
4398 {
4399 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4400 continue;
4401 }
4402
4403 func = method->func;
4404
4405 switch( inputCount) {
4406
4407 case 5:
4408 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4409 ARG32(input[3]), ARG32(input[4]),
4410 output );
4411 break;
4412 case 4:
4413 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4414 ARG32(input[3]),
4415 output, (void *)outputCount );
4416 break;
4417 case 3:
4418 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4419 output, (void *)outputCount, 0 );
4420 break;
4421 case 2:
4422 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4423 output, (void *)outputCount, 0, 0 );
4424 break;
4425 case 1:
4426 err = (object->*func)( ARG32(input[0]),
4427 output, (void *)outputCount, 0, 0, 0 );
4428 break;
4429 case 0:
4430 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4431 break;
4432
4433 default:
4434 IOLog("%s: Bad method table\n", object->getName());
4435 }
4436 }
4437 while( false);
4438
4439 return( err);
4440 }
4441
4442
4443 kern_return_t shim_io_async_method_scalarI_structureO(
4444 IOExternalAsyncMethod * method,
4445 IOService * object,
4446 mach_port_t asyncWakePort,
4447 io_user_reference_t * asyncReference,
4448 uint32_t asyncReferenceCount,
4449 const io_user_scalar_t * input,
4450 mach_msg_type_number_t inputCount,
4451 io_struct_inband_t output,
4452 mach_msg_type_number_t * outputCount )
4453 {
4454 IOAsyncMethod func;
4455 uint32_t i;
4456 IOReturn err;
4457 io_async_ref_t reference;
4458
4459 for (i = 0; i < asyncReferenceCount; i++)
4460 reference[i] = REF32(asyncReference[i]);
4461
4462 err = kIOReturnBadArgument;
4463 do {
4464 if( inputCount != method->count0)
4465 {
4466 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4467 continue;
4468 }
4469 if( (kIOUCVariableStructureSize != method->count1)
4470 && (*outputCount != method->count1))
4471 {
4472 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4473 continue;
4474 }
4475
4476 func = method->func;
4477
4478 switch( inputCount) {
4479
4480 case 5:
4481 err = (object->*func)( reference,
4482 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4483 ARG32(input[3]), ARG32(input[4]),
4484 output );
4485 break;
4486 case 4:
4487 err = (object->*func)( reference,
4488 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4489 ARG32(input[3]),
4490 output, (void *)outputCount );
4491 break;
4492 case 3:
4493 err = (object->*func)( reference,
4494 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4495 output, (void *)outputCount, 0 );
4496 break;
4497 case 2:
4498 err = (object->*func)( reference,
4499 ARG32(input[0]), ARG32(input[1]),
4500 output, (void *)outputCount, 0, 0 );
4501 break;
4502 case 1:
4503 err = (object->*func)( reference,
4504 ARG32(input[0]),
4505 output, (void *)outputCount, 0, 0, 0 );
4506 break;
4507 case 0:
4508 err = (object->*func)( reference,
4509 output, (void *)outputCount, 0, 0, 0, 0 );
4510 break;
4511
4512 default:
4513 IOLog("%s: Bad method table\n", object->getName());
4514 }
4515 }
4516 while( false);
4517
4518 return( err);
4519 }
4520
4521 /* Routine io_connect_method_scalarI_structureI */
4522 kern_return_t is_io_connect_method_scalarI_structureI(
4523 io_connect_t connect,
4524 uint32_t index,
4525 io_scalar_inband_t input,
4526 mach_msg_type_number_t inputCount,
4527 io_struct_inband_t inputStruct,
4528 mach_msg_type_number_t inputStructCount )
4529 {
4530 uint32_t i;
4531 io_scalar_inband64_t _input;
4532
4533 mach_msg_type_number_t scalar_outputCnt = 0;
4534 mach_msg_type_number_t inband_outputCnt = 0;
4535 mach_vm_size_t ool_output_size = 0;
4536
4537 for (i = 0; i < inputCount; i++)
4538 _input[i] = SCALAR64(input[i]);
4539
4540 return (is_io_connect_method(connect, index,
4541 _input, inputCount,
4542 inputStruct, inputStructCount,
4543 0, 0,
4544 NULL, &inband_outputCnt,
4545 NULL, &scalar_outputCnt,
4546 0, &ool_output_size));
4547 }
4548
4549 kern_return_t shim_io_connect_method_scalarI_structureI(
4550 IOExternalMethod * method,
4551 IOService * object,
4552 const io_user_scalar_t * input,
4553 mach_msg_type_number_t inputCount,
4554 io_struct_inband_t inputStruct,
4555 mach_msg_type_number_t inputStructCount )
4556 {
4557 IOMethod func;
4558 IOReturn err = kIOReturnBadArgument;
4559
4560 do
4561 {
4562 if (inputCount != method->count0)
4563 {
4564 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4565 continue;
4566 }
4567 if( (kIOUCVariableStructureSize != method->count1)
4568 && (inputStructCount != method->count1))
4569 {
4570 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4571 continue;
4572 }
4573
4574 func = method->func;
4575
4576 switch( inputCount) {
4577
4578 case 5:
4579 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4580 ARG32(input[3]), ARG32(input[4]),
4581 inputStruct );
4582 break;
4583 case 4:
4584 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4585 ARG32(input[3]),
4586 inputStruct, (void *)(uintptr_t)inputStructCount );
4587 break;
4588 case 3:
4589 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4590 inputStruct, (void *)(uintptr_t)inputStructCount,
4591 0 );
4592 break;
4593 case 2:
4594 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4595 inputStruct, (void *)(uintptr_t)inputStructCount,
4596 0, 0 );
4597 break;
4598 case 1:
4599 err = (object->*func)( ARG32(input[0]),
4600 inputStruct, (void *)(uintptr_t)inputStructCount,
4601 0, 0, 0 );
4602 break;
4603 case 0:
4604 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4605 0, 0, 0, 0 );
4606 break;
4607
4608 default:
4609 IOLog("%s: Bad method table\n", object->getName());
4610 }
4611 }
4612 while (false);
4613
4614 return( err);
4615 }
4616
4617 kern_return_t shim_io_async_method_scalarI_structureI(
4618 IOExternalAsyncMethod * method,
4619 IOService * object,
4620 mach_port_t asyncWakePort,
4621 io_user_reference_t * asyncReference,
4622 uint32_t asyncReferenceCount,
4623 const io_user_scalar_t * input,
4624 mach_msg_type_number_t inputCount,
4625 io_struct_inband_t inputStruct,
4626 mach_msg_type_number_t inputStructCount )
4627 {
4628 IOAsyncMethod func;
4629 uint32_t i;
4630 IOReturn err = kIOReturnBadArgument;
4631 io_async_ref_t reference;
4632
4633 for (i = 0; i < asyncReferenceCount; i++)
4634 reference[i] = REF32(asyncReference[i]);
4635
4636 do
4637 {
4638 if (inputCount != method->count0)
4639 {
4640 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4641 continue;
4642 }
4643 if( (kIOUCVariableStructureSize != method->count1)
4644 && (inputStructCount != method->count1))
4645 {
4646 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4647 continue;
4648 }
4649
4650 func = method->func;
4651
4652 switch( inputCount) {
4653
4654 case 5:
4655 err = (object->*func)( reference,
4656 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4657 ARG32(input[3]), ARG32(input[4]),
4658 inputStruct );
4659 break;
4660 case 4:
4661 err = (object->*func)( reference,
4662 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4663 ARG32(input[3]),
4664 inputStruct, (void *)(uintptr_t)inputStructCount );
4665 break;
4666 case 3:
4667 err = (object->*func)( reference,
4668 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4669 inputStruct, (void *)(uintptr_t)inputStructCount,
4670 0 );
4671 break;
4672 case 2:
4673 err = (object->*func)( reference,
4674 ARG32(input[0]), ARG32(input[1]),
4675 inputStruct, (void *)(uintptr_t)inputStructCount,
4676 0, 0 );
4677 break;
4678 case 1:
4679 err = (object->*func)( reference,
4680 ARG32(input[0]),
4681 inputStruct, (void *)(uintptr_t)inputStructCount,
4682 0, 0, 0 );
4683 break;
4684 case 0:
4685 err = (object->*func)( reference,
4686 inputStruct, (void *)(uintptr_t)inputStructCount,
4687 0, 0, 0, 0 );
4688 break;
4689
4690 default:
4691 IOLog("%s: Bad method table\n", object->getName());
4692 }
4693 }
4694 while (false);
4695
4696 return( err);
4697 }
4698
4699 /* Routine io_connect_method_structureI_structureO */
4700 kern_return_t is_io_connect_method_structureI_structureO(
4701 io_object_t connect,
4702 uint32_t index,
4703 io_struct_inband_t input,
4704 mach_msg_type_number_t inputCount,
4705 io_struct_inband_t output,
4706 mach_msg_type_number_t * outputCount )
4707 {
4708 mach_msg_type_number_t scalar_outputCnt = 0;
4709 mach_vm_size_t ool_output_size = 0;
4710
4711 return (is_io_connect_method(connect, index,
4712 NULL, 0,
4713 input, inputCount,
4714 0, 0,
4715 output, outputCount,
4716 NULL, &scalar_outputCnt,
4717 0, &ool_output_size));
4718 }
4719
4720 kern_return_t shim_io_connect_method_structureI_structureO(
4721 IOExternalMethod * method,
4722 IOService * object,
4723 io_struct_inband_t input,
4724 mach_msg_type_number_t inputCount,
4725 io_struct_inband_t output,
4726 IOByteCount * outputCount )
4727 {
4728 IOMethod func;
4729 IOReturn err = kIOReturnBadArgument;
4730
4731 do
4732 {
4733 if( (kIOUCVariableStructureSize != method->count0)
4734 && (inputCount != method->count0))
4735 {
4736 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4737 continue;
4738 }
4739 if( (kIOUCVariableStructureSize != method->count1)
4740 && (*outputCount != method->count1))
4741 {
4742 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4743 continue;
4744 }
4745
4746 func = method->func;
4747
4748 if( method->count1) {
4749 if( method->count0) {
4750 err = (object->*func)( input, output,
4751 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4752 } else {
4753 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4754 }
4755 } else {
4756 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4757 }
4758 }
4759 while( false);
4760
4761
4762 return( err);
4763 }
4764
4765 kern_return_t shim_io_async_method_structureI_structureO(
4766 IOExternalAsyncMethod * method,
4767 IOService * object,
4768 mach_port_t asyncWakePort,
4769 io_user_reference_t * asyncReference,
4770 uint32_t asyncReferenceCount,
4771 io_struct_inband_t input,
4772 mach_msg_type_number_t inputCount,
4773 io_struct_inband_t output,
4774 mach_msg_type_number_t * outputCount )
4775 {
4776 IOAsyncMethod func;
4777 uint32_t i;
4778 IOReturn err;
4779 io_async_ref_t reference;
4780
4781 for (i = 0; i < asyncReferenceCount; i++)
4782 reference[i] = REF32(asyncReference[i]);
4783
4784 err = kIOReturnBadArgument;
4785 do
4786 {
4787 if( (kIOUCVariableStructureSize != method->count0)
4788 && (inputCount != method->count0))
4789 {
4790 IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName());
4791 continue;
4792 }
4793 if( (kIOUCVariableStructureSize != method->count1)
4794 && (*outputCount != method->count1))
4795 {
4796 IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName());
4797 continue;
4798 }
4799
4800 func = method->func;
4801
4802 if( method->count1) {
4803 if( method->count0) {
4804 err = (object->*func)( reference,
4805 input, output,
4806 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4807 } else {
4808 err = (object->*func)( reference,
4809 output, outputCount, 0, 0, 0, 0 );
4810 }
4811 } else {
4812 err = (object->*func)( reference,
4813 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4814 }
4815 }
4816 while( false);
4817
4818 return( err);
4819 }
4820
4821 /* Routine io_catalog_send_data */
4822 kern_return_t is_io_catalog_send_data(
4823 mach_port_t master_port,
4824 uint32_t flag,
4825 io_buf_ptr_t inData,
4826 mach_msg_type_number_t inDataCount,
4827 kern_return_t * result)
4828 {
4829 OSObject * obj = 0;
4830 vm_offset_t data;
4831 kern_return_t kr = kIOReturnError;
4832
4833 //printf("io_catalog_send_data called. flag: %d\n", flag);
4834
4835 if( master_port != master_device_port)
4836 return kIOReturnNotPrivileged;
4837
4838 if( (flag != kIOCatalogRemoveKernelLinker &&
4839 flag != kIOCatalogKextdActive &&
4840 flag != kIOCatalogKextdFinishedLaunching) &&
4841 ( !inData || !inDataCount) )
4842 {
4843 return kIOReturnBadArgument;
4844 }
4845
4846 if (inData) {
4847 vm_map_offset_t map_data;
4848
4849 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4850 return( kIOReturnMessageTooLarge);
4851
4852 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4853 data = CAST_DOWN(vm_offset_t, map_data);
4854
4855 if( kr != KERN_SUCCESS)
4856 return kr;
4857
4858 // must return success after vm_map_copyout() succeeds
4859
4860 if( inDataCount ) {
4861 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4862 vm_deallocate( kernel_map, data, inDataCount );
4863 if( !obj) {
4864 *result = kIOReturnNoMemory;
4865 return( KERN_SUCCESS);
4866 }
4867 }
4868 }
4869
4870 switch ( flag ) {
4871 case kIOCatalogResetDrivers:
4872 case kIOCatalogResetDriversNoMatch: {
4873 OSArray * array;
4874
4875 array = OSDynamicCast(OSArray, obj);
4876 if (array) {
4877 if ( !gIOCatalogue->resetAndAddDrivers(array,
4878 flag == kIOCatalogResetDrivers) ) {
4879
4880 kr = kIOReturnError;
4881 }
4882 } else {
4883 kr = kIOReturnBadArgument;
4884 }
4885 }
4886 break;
4887
4888 case kIOCatalogAddDrivers:
4889 case kIOCatalogAddDriversNoMatch: {
4890 OSArray * array;
4891
4892 array = OSDynamicCast(OSArray, obj);
4893 if ( array ) {
4894 if ( !gIOCatalogue->addDrivers( array ,
4895 flag == kIOCatalogAddDrivers) ) {
4896 kr = kIOReturnError;
4897 }
4898 }
4899 else {
4900 kr = kIOReturnBadArgument;
4901 }
4902 }
4903 break;
4904
4905 case kIOCatalogRemoveDrivers:
4906 case kIOCatalogRemoveDriversNoMatch: {
4907 OSDictionary * dict;
4908
4909 dict = OSDynamicCast(OSDictionary, obj);
4910 if ( dict ) {
4911 if ( !gIOCatalogue->removeDrivers( dict,
4912 flag == kIOCatalogRemoveDrivers ) ) {
4913 kr = kIOReturnError;
4914 }
4915 }
4916 else {
4917 kr = kIOReturnBadArgument;
4918 }
4919 }
4920 break;
4921
4922 case kIOCatalogStartMatching: {
4923 OSDictionary * dict;
4924
4925 dict = OSDynamicCast(OSDictionary, obj);
4926 if ( dict ) {
4927 if ( !gIOCatalogue->startMatching( dict ) ) {
4928 kr = kIOReturnError;
4929 }
4930 }
4931 else {
4932 kr = kIOReturnBadArgument;
4933 }
4934 }
4935 break;
4936
4937 case kIOCatalogRemoveKernelLinker:
4938 kr = KERN_NOT_SUPPORTED;
4939 break;
4940
4941 case kIOCatalogKextdActive:
4942 #if !NO_KEXTD
4943 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4944 OSKext::setKextdActive();
4945
4946 /* Dump all nonloaded startup extensions; kextd will now send them
4947 * down on request.
4948 */
4949 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4950 #endif
4951 kr = kIOReturnSuccess;
4952 break;
4953
4954 case kIOCatalogKextdFinishedLaunching: {
4955 #if !NO_KEXTD
4956 static bool clearedBusy = false;
4957
4958 if (!clearedBusy) {
4959 IOService * serviceRoot = IOService::getServiceRoot();
4960 if (serviceRoot) {
4961 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4962 serviceRoot->adjustBusy(-1);
4963 clearedBusy = true;
4964 }
4965 }
4966 #endif
4967 kr = kIOReturnSuccess;
4968 }
4969 break;
4970
4971 default:
4972 kr = kIOReturnBadArgument;
4973 break;
4974 }
4975
4976 if (obj) obj->release();
4977
4978 *result = kr;
4979 return( KERN_SUCCESS);
4980 }
4981
4982 /* Routine io_catalog_terminate */
4983 kern_return_t is_io_catalog_terminate(
4984 mach_port_t master_port,
4985 uint32_t flag,
4986 io_name_t name )
4987 {
4988 kern_return_t kr;
4989
4990 if( master_port != master_device_port )
4991 return kIOReturnNotPrivileged;
4992
4993 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
4994 kIOClientPrivilegeAdministrator );
4995 if( kIOReturnSuccess != kr)
4996 return( kr );
4997
4998 switch ( flag ) {
4999 #if !defined(SECURE_KERNEL)
5000 case kIOCatalogServiceTerminate:
5001 OSIterator * iter;
5002 IOService * service;
5003
5004 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5005 kIORegistryIterateRecursively);
5006 if ( !iter )
5007 return kIOReturnNoMemory;
5008
5009 do {
5010 iter->reset();
5011 while( (service = (IOService *)iter->getNextObject()) ) {
5012 if( service->metaCast(name)) {
5013 if ( !service->terminate( kIOServiceRequired
5014 | kIOServiceSynchronous) ) {
5015 kr = kIOReturnUnsupported;
5016 break;
5017 }
5018 }
5019 }
5020 } while( !service && !iter->isValid());
5021 iter->release();
5022 break;
5023
5024 case kIOCatalogModuleUnload:
5025 case kIOCatalogModuleTerminate:
5026 kr = gIOCatalogue->terminateDriversForModule(name,
5027 flag == kIOCatalogModuleUnload);
5028 break;
5029 #endif
5030
5031 default:
5032 kr = kIOReturnBadArgument;
5033 break;
5034 }
5035
5036 return( kr );
5037 }
5038
5039 /* Routine io_catalog_get_data */
5040 kern_return_t is_io_catalog_get_data(
5041 mach_port_t master_port,
5042 uint32_t flag,
5043 io_buf_ptr_t *outData,
5044 mach_msg_type_number_t *outDataCount)
5045 {
5046 kern_return_t kr = kIOReturnSuccess;
5047 OSSerialize * s;
5048
5049 if( master_port != master_device_port)
5050 return kIOReturnNotPrivileged;
5051
5052 //printf("io_catalog_get_data called. flag: %d\n", flag);
5053
5054 s = OSSerialize::withCapacity(4096);
5055 if ( !s )
5056 return kIOReturnNoMemory;
5057
5058 kr = gIOCatalogue->serializeData(flag, s);
5059
5060 if ( kr == kIOReturnSuccess ) {
5061 vm_offset_t data;
5062 vm_map_copy_t copy;
5063 vm_size_t size;
5064
5065 size = s->getLength();
5066 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
5067 if ( kr == kIOReturnSuccess ) {
5068 bcopy(s->text(), (void *)data, size);
5069 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5070 (vm_map_size_t)size, true, &copy);
5071 *outData = (char *)copy;
5072 *outDataCount = size;
5073 }
5074 }
5075
5076 s->release();
5077
5078 return kr;
5079 }
5080
5081 /* Routine io_catalog_get_gen_count */
5082 kern_return_t is_io_catalog_get_gen_count(
5083 mach_port_t master_port,
5084 uint32_t *genCount)
5085 {
5086 if( master_port != master_device_port)
5087 return kIOReturnNotPrivileged;
5088
5089 //printf("io_catalog_get_gen_count called.\n");
5090
5091 if ( !genCount )
5092 return kIOReturnBadArgument;
5093
5094 *genCount = gIOCatalogue->getGenerationCount();
5095
5096 return kIOReturnSuccess;
5097 }
5098
5099 /* Routine io_catalog_module_loaded.
5100 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5101 */
5102 kern_return_t is_io_catalog_module_loaded(
5103 mach_port_t master_port,
5104 io_name_t name)
5105 {
5106 if( master_port != master_device_port)
5107 return kIOReturnNotPrivileged;
5108
5109 //printf("io_catalog_module_loaded called. name %s\n", name);
5110
5111 if ( !name )
5112 return kIOReturnBadArgument;
5113
5114 gIOCatalogue->moduleHasLoaded(name);
5115
5116 return kIOReturnSuccess;
5117 }
5118
5119 kern_return_t is_io_catalog_reset(
5120 mach_port_t master_port,
5121 uint32_t flag)
5122 {
5123 if( master_port != master_device_port)
5124 return kIOReturnNotPrivileged;
5125
5126 switch ( flag ) {
5127 case kIOCatalogResetDefault:
5128 gIOCatalogue->reset();
5129 break;
5130
5131 default:
5132 return kIOReturnBadArgument;
5133 }
5134
5135 return kIOReturnSuccess;
5136 }
5137
5138 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5139 {
5140 kern_return_t result = kIOReturnBadArgument;
5141 IOUserClient *userClient;
5142
5143 if ((userClient = OSDynamicCast(IOUserClient,
5144 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5145 IOExternalTrap *trap;
5146 IOService *target = NULL;
5147
5148 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5149
5150 if (trap && target) {
5151 IOTrap func;
5152
5153 func = trap->func;
5154
5155 if (func) {
5156 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5157 }
5158 }
5159
5160 iokit_remove_connect_reference(userClient);
5161 }
5162
5163 return result;
5164 }
5165
5166 } /* extern "C" */
5167
5168 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5169 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5170 {
5171 IOReturn err;
5172 IOService * object;
5173 IOByteCount structureOutputSize;
5174
5175 if (dispatch)
5176 {
5177 uint32_t count;
5178 count = dispatch->checkScalarInputCount;
5179 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5180 {
5181 return (kIOReturnBadArgument);
5182 }
5183
5184 count = dispatch->checkStructureInputSize;
5185 if ((kIOUCVariableStructureSize != count)
5186 && (count != ((args->structureInputDescriptor)
5187 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5188 {
5189 return (kIOReturnBadArgument);
5190 }
5191
5192 count = dispatch->checkScalarOutputCount;
5193 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5194 {
5195 return (kIOReturnBadArgument);
5196 }
5197
5198 count = dispatch->checkStructureOutputSize;
5199 if ((kIOUCVariableStructureSize != count)
5200 && (count != ((args->structureOutputDescriptor)
5201 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5202 {
5203 return (kIOReturnBadArgument);
5204 }
5205
5206 if (dispatch->function)
5207 err = (*dispatch->function)(target, reference, args);
5208 else
5209 err = kIOReturnNoCompletion; /* implementator can dispatch */
5210
5211 return (err);
5212 }
5213
5214
5215 // pre-Leopard API's don't do ool structs
5216 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5217 {
5218 err = kIOReturnIPCError;
5219 return (err);
5220 }
5221
5222 structureOutputSize = args->structureOutputSize;
5223
5224 if (args->asyncWakePort)
5225 {
5226 IOExternalAsyncMethod * method;
5227 object = 0;
5228 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5229 return (kIOReturnUnsupported);
5230
5231 if (kIOUCForegroundOnly & method->flags)
5232 {
5233 if (task_is_gpu_denied(current_task()))
5234 return (kIOReturnNotPermitted);
5235 }
5236
5237 switch (method->flags & kIOUCTypeMask)
5238 {
5239 case kIOUCScalarIStructI:
5240 err = shim_io_async_method_scalarI_structureI( method, object,
5241 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5242 args->scalarInput, args->scalarInputCount,
5243 (char *)args->structureInput, args->structureInputSize );
5244 break;
5245
5246 case kIOUCScalarIScalarO:
5247 err = shim_io_async_method_scalarI_scalarO( method, object,
5248 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5249 args->scalarInput, args->scalarInputCount,
5250 args->scalarOutput, &args->scalarOutputCount );
5251 break;
5252
5253 case kIOUCScalarIStructO:
5254 err = shim_io_async_method_scalarI_structureO( method, object,
5255 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5256 args->scalarInput, args->scalarInputCount,
5257 (char *) args->structureOutput, &args->structureOutputSize );
5258 break;
5259
5260
5261 case kIOUCStructIStructO:
5262 err = shim_io_async_method_structureI_structureO( method, object,
5263 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5264 (char *)args->structureInput, args->structureInputSize,
5265 (char *) args->structureOutput, &args->structureOutputSize );
5266 break;
5267
5268 default:
5269 err = kIOReturnBadArgument;
5270 break;
5271 }
5272 }
5273 else
5274 {
5275 IOExternalMethod * method;
5276 object = 0;
5277 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5278 return (kIOReturnUnsupported);
5279
5280 if (kIOUCForegroundOnly & method->flags)
5281 {
5282 if (task_is_gpu_denied(current_task()))
5283 return (kIOReturnNotPermitted);
5284 }
5285
5286 switch (method->flags & kIOUCTypeMask)
5287 {
5288 case kIOUCScalarIStructI:
5289 err = shim_io_connect_method_scalarI_structureI( method, object,
5290 args->scalarInput, args->scalarInputCount,
5291 (char *) args->structureInput, args->structureInputSize );
5292 break;
5293
5294 case kIOUCScalarIScalarO:
5295 err = shim_io_connect_method_scalarI_scalarO( method, object,
5296 args->scalarInput, args->scalarInputCount,
5297 args->scalarOutput, &args->scalarOutputCount );
5298 break;
5299
5300 case kIOUCScalarIStructO:
5301 err = shim_io_connect_method_scalarI_structureO( method, object,
5302 args->scalarInput, args->scalarInputCount,
5303 (char *) args->structureOutput, &structureOutputSize );
5304 break;
5305
5306
5307 case kIOUCStructIStructO:
5308 err = shim_io_connect_method_structureI_structureO( method, object,
5309 (char *) args->structureInput, args->structureInputSize,
5310 (char *) args->structureOutput, &structureOutputSize );
5311 break;
5312
5313 default:
5314 err = kIOReturnBadArgument;
5315 break;
5316 }
5317 }
5318
5319 args->structureOutputSize = structureOutputSize;
5320
5321 return (err);
5322 }
5323
5324 #if __LP64__
5325 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5326 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5327 #else
5328 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5329 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5330 #endif
5331 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5332 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5333 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5334 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5335 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5336 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5337 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5338 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5339 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5340 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5341 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5342 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5343 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5344 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5345