]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
xnu-3789.51.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #include <mach/sdt.h>
49
50 #if CONFIG_MACF
51
52 extern "C" {
53 #include <security/mac_framework.h>
54 };
55 #include <sys/kauth.h>
56
57 #define IOMACF_LOG 0
58
59 #endif /* CONFIG_MACF */
60
61 #include <IOKit/assert.h>
62
63 #include "IOServicePrivate.h"
64 #include "IOKitKernelInternal.h"
65
66 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
67 #define SCALAR32(x) ((uint32_t )x)
68 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
69 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
70 #define REF32(x) ((int)(x))
71
72 enum
73 {
74 kIOUCAsync0Flags = 3ULL,
75 kIOUCAsync64Flag = 1ULL,
76 kIOUCAsyncErrorLoggedFlag = 2ULL
77 };
78
79 #if IOKITSTATS
80
81 #define IOStatisticsRegisterCounter() \
82 do { \
83 reserved->counter = IOStatistics::registerUserClient(this); \
84 } while (0)
85
86 #define IOStatisticsUnregisterCounter() \
87 do { \
88 if (reserved) \
89 IOStatistics::unregisterUserClient(reserved->counter); \
90 } while (0)
91
92 #define IOStatisticsClientCall() \
93 do { \
94 IOStatistics::countUserClientCall(client); \
95 } while (0)
96
97 #else
98
99 #define IOStatisticsRegisterCounter()
100 #define IOStatisticsUnregisterCounter()
101 #define IOStatisticsClientCall()
102
103 #endif /* IOKITSTATS */
104
105 #if DEVELOPMENT || DEBUG
106
107 #define FAKE_STACK_FRAME(a) \
108 const void ** __frameptr; \
109 const void * __retaddr; \
110 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
111 __retaddr = __frameptr[1]; \
112 __frameptr[1] = (a);
113
114 #define FAKE_STACK_FRAME_END() \
115 __frameptr[1] = __retaddr;
116
117 #else /* DEVELOPMENT || DEBUG */
118
119 #define FAKE_STACK_FRAME(a)
120 #define FAKE_STACK_FRAME_END()
121
122 #endif /* DEVELOPMENT || DEBUG */
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 // definitions we should get from osfmk
127
128 //typedef struct ipc_port * ipc_port_t;
129 typedef natural_t ipc_kobject_type_t;
130
131 #define IKOT_IOKIT_SPARE 27
132 #define IKOT_IOKIT_CONNECT 29
133 #define IKOT_IOKIT_OBJECT 30
134
135 extern "C" {
136
137 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
138 ipc_kobject_type_t type );
139
140 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
141
142 extern mach_port_name_t iokit_make_send_right( task_t task,
143 io_object_t obj, ipc_kobject_type_t type );
144
145 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
146
147 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
148
149 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
150
151 extern ipc_port_t master_device_port;
152
153 extern void iokit_retain_port( ipc_port_t port );
154 extern void iokit_release_port( ipc_port_t port );
155 extern void iokit_release_port_send( ipc_port_t port );
156
157 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
158
159 #include <mach/mach_traps.h>
160 #include <vm/vm_map.h>
161
162 } /* extern "C" */
163
164
165 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
166
167 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
168
169 class IOMachPort : public OSObject
170 {
171 OSDeclareDefaultStructors(IOMachPort)
172 public:
173 OSObject * object;
174 ipc_port_t port;
175 UInt32 mscount;
176 UInt8 holdDestroy;
177
178 static IOMachPort * portForObject( OSObject * obj,
179 ipc_kobject_type_t type );
180 static bool noMoreSendersForObject( OSObject * obj,
181 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
182 static void releasePortForObject( OSObject * obj,
183 ipc_kobject_type_t type );
184 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
185
186 static OSDictionary * dictForType( ipc_kobject_type_t type );
187
188 static mach_port_name_t makeSendRightForTask( task_t task,
189 io_object_t obj, ipc_kobject_type_t type );
190
191 virtual void free() APPLE_KEXT_OVERRIDE;
192 };
193
194 #define super OSObject
195 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
196
197 static IOLock * gIOObjectPortLock;
198
199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
200
201 // not in dictForType() for debugging ease
202 static OSDictionary * gIOObjectPorts;
203 static OSDictionary * gIOConnectPorts;
204
205 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
206 {
207 OSDictionary ** dict;
208
209 if( IKOT_IOKIT_OBJECT == type )
210 dict = &gIOObjectPorts;
211 else if( IKOT_IOKIT_CONNECT == type )
212 dict = &gIOConnectPorts;
213 else
214 return( 0 );
215
216 if( 0 == *dict)
217 *dict = OSDictionary::withCapacity( 1 );
218
219 return( *dict );
220 }
221
222 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
223 ipc_kobject_type_t type )
224 {
225 IOMachPort * inst = 0;
226 OSDictionary * dict;
227
228 IOTakeLock( gIOObjectPortLock);
229
230 do {
231
232 dict = dictForType( type );
233 if( !dict)
234 continue;
235
236 if( (inst = (IOMachPort *)
237 dict->getObject( (const OSSymbol *) obj ))) {
238 inst->mscount++;
239 inst->retain();
240 continue;
241 }
242
243 inst = new IOMachPort;
244 if( inst && !inst->init()) {
245 inst = 0;
246 continue;
247 }
248
249 inst->port = iokit_alloc_object_port( obj, type );
250 if( inst->port) {
251 // retains obj
252 dict->setObject( (const OSSymbol *) obj, inst );
253 inst->mscount++;
254
255 } else {
256 inst->release();
257 inst = 0;
258 }
259
260 } while( false );
261
262 IOUnlock( gIOObjectPortLock);
263
264 return( inst );
265 }
266
267 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
268 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
269 {
270 OSDictionary * dict;
271 IOMachPort * machPort;
272 IOUserClient * uc;
273 bool destroyed = true;
274
275 IOTakeLock( gIOObjectPortLock);
276
277 if( (dict = dictForType( type ))) {
278 obj->retain();
279
280 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
281 if( machPort) {
282 destroyed = (machPort->mscount <= *mscount);
283 if (!destroyed) *mscount = machPort->mscount;
284 else
285 {
286 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
287 {
288 uc->noMoreSenders();
289 }
290 dict->removeObject( (const OSSymbol *) obj );
291 }
292 }
293 obj->release();
294 }
295
296 IOUnlock( gIOObjectPortLock);
297
298 return( destroyed );
299 }
300
301 void IOMachPort::releasePortForObject( OSObject * obj,
302 ipc_kobject_type_t type )
303 {
304 OSDictionary * dict;
305 IOMachPort * machPort;
306
307 assert(IKOT_IOKIT_CONNECT != type);
308
309 IOTakeLock( gIOObjectPortLock);
310
311 if( (dict = dictForType( type ))) {
312 obj->retain();
313 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
314 if( machPort && !machPort->holdDestroy)
315 dict->removeObject( (const OSSymbol *) obj );
316 obj->release();
317 }
318
319 IOUnlock( gIOObjectPortLock);
320 }
321
322 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
323 {
324 OSDictionary * dict;
325 IOMachPort * machPort;
326
327 IOLockLock( gIOObjectPortLock );
328
329 if( (dict = dictForType( type ))) {
330 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
331 if( machPort)
332 machPort->holdDestroy = true;
333 }
334
335 IOLockUnlock( gIOObjectPortLock );
336 }
337
338 void IOUserClient::destroyUserReferences( OSObject * obj )
339 {
340 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
341
342 // panther, 3160200
343 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
344
345 OSDictionary * dict;
346
347 IOTakeLock( gIOObjectPortLock);
348 obj->retain();
349
350 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
351 {
352 IOMachPort * port;
353 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
354 if (port)
355 {
356 IOUserClient * uc;
357 if ((uc = OSDynamicCast(IOUserClient, obj)))
358 {
359 uc->noMoreSenders();
360 if (uc->mappings)
361 {
362 dict->setObject((const OSSymbol *) uc->mappings, port);
363 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
364
365 uc->mappings->release();
366 uc->mappings = 0;
367 }
368 }
369 dict->removeObject( (const OSSymbol *) obj );
370 }
371 }
372 obj->release();
373 IOUnlock( gIOObjectPortLock);
374 }
375
376 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
377 io_object_t obj, ipc_kobject_type_t type )
378 {
379 return( iokit_make_send_right( task, obj, type ));
380 }
381
382 void IOMachPort::free( void )
383 {
384 if( port)
385 iokit_destroy_object_port( port );
386 super::free();
387 }
388
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390
391 class IOUserIterator : public OSIterator
392 {
393 OSDeclareDefaultStructors(IOUserIterator)
394 public:
395 OSObject * userIteratorObject;
396 IOLock * lock;
397
398 static IOUserIterator * withIterator(OSIterator * iter);
399 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
400 virtual void free() APPLE_KEXT_OVERRIDE;
401
402 virtual void reset() APPLE_KEXT_OVERRIDE;
403 virtual bool isValid() APPLE_KEXT_OVERRIDE;
404 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
405 };
406
407 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
408
409 class IOUserNotification : public IOUserIterator
410 {
411 OSDeclareDefaultStructors(IOUserNotification)
412
413 #define holdNotify userIteratorObject
414
415 public:
416
417 virtual void free() APPLE_KEXT_OVERRIDE;
418
419 virtual void setNotification( IONotifier * obj );
420
421 virtual void reset() APPLE_KEXT_OVERRIDE;
422 virtual bool isValid() APPLE_KEXT_OVERRIDE;
423 };
424
425 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
426
427 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
428
429 IOUserIterator *
430 IOUserIterator::withIterator(OSIterator * iter)
431 {
432 IOUserIterator * me;
433
434 if (!iter) return (0);
435
436 me = new IOUserIterator;
437 if (me && !me->init())
438 {
439 me->release();
440 me = 0;
441 }
442 if (!me) return me;
443 me->userIteratorObject = iter;
444
445 return (me);
446 }
447
448 bool
449 IOUserIterator::init( void )
450 {
451 if (!OSObject::init()) return (false);
452
453 lock = IOLockAlloc();
454 if( !lock)
455 return( false );
456
457 return (true);
458 }
459
460 void
461 IOUserIterator::free()
462 {
463 if (userIteratorObject) userIteratorObject->release();
464 if (lock) IOLockFree(lock);
465 OSObject::free();
466 }
467
468 void
469 IOUserIterator::reset()
470 {
471 IOLockLock(lock);
472 assert(OSDynamicCast(OSIterator, userIteratorObject));
473 ((OSIterator *)userIteratorObject)->reset();
474 IOLockUnlock(lock);
475 }
476
477 bool
478 IOUserIterator::isValid()
479 {
480 bool ret;
481
482 IOLockLock(lock);
483 assert(OSDynamicCast(OSIterator, userIteratorObject));
484 ret = ((OSIterator *)userIteratorObject)->isValid();
485 IOLockUnlock(lock);
486
487 return (ret);
488 }
489
490 OSObject *
491 IOUserIterator::getNextObject()
492 {
493 OSObject * ret;
494
495 IOLockLock(lock);
496 assert(OSDynamicCast(OSIterator, userIteratorObject));
497 ret = ((OSIterator *)userIteratorObject)->getNextObject();
498 IOLockUnlock(lock);
499
500 return (ret);
501 }
502
503 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
504 extern "C" {
505
506 // functions called from osfmk/device/iokit_rpc.c
507
508 void
509 iokit_add_reference( io_object_t obj )
510 {
511 if( obj)
512 obj->retain();
513 }
514
515 void
516 iokit_remove_reference( io_object_t obj )
517 {
518 if( obj)
519 obj->release();
520 }
521
522 void
523 iokit_add_connect_reference( io_object_t obj )
524 {
525 IOUserClient * uc;
526
527 if (!obj) return;
528
529 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
530
531 obj->retain();
532 }
533
534 void
535 iokit_remove_connect_reference( io_object_t obj )
536 {
537 IOUserClient * uc;
538 bool finalize = false;
539
540 if (!obj) return;
541
542 if ((uc = OSDynamicCast(IOUserClient, obj)))
543 {
544 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
545 {
546 IOLockLock(gIOObjectPortLock);
547 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
548 IOLockUnlock(gIOObjectPortLock);
549 }
550 if (finalize) uc->scheduleFinalize(true);
551 }
552
553 obj->release();
554 }
555
556 bool
557 IOUserClient::finalizeUserReferences(OSObject * obj)
558 {
559 IOUserClient * uc;
560 bool ok = true;
561
562 if ((uc = OSDynamicCast(IOUserClient, obj)))
563 {
564 IOLockLock(gIOObjectPortLock);
565 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
566 IOLockUnlock(gIOObjectPortLock);
567 }
568 return (ok);
569 }
570
571 ipc_port_t
572 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
573 {
574 IOMachPort * machPort;
575 ipc_port_t port;
576
577 if( (machPort = IOMachPort::portForObject( obj, type ))) {
578
579 port = machPort->port;
580 if( port)
581 iokit_retain_port( port );
582
583 machPort->release();
584
585 } else
586 port = NULL;
587
588 return( port );
589 }
590
591 kern_return_t
592 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
593 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
594 {
595 IOUserClient * client;
596 IOMemoryMap * map;
597 IOUserNotification * notify;
598
599 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
600 return( kIOReturnNotReady );
601
602 if( IKOT_IOKIT_CONNECT == type)
603 {
604 if( (client = OSDynamicCast( IOUserClient, obj )))
605 {
606 IOStatisticsClientCall();
607 client->clientDied();
608 }
609 }
610 else if( IKOT_IOKIT_OBJECT == type)
611 {
612 if( (map = OSDynamicCast( IOMemoryMap, obj )))
613 map->taskDied();
614 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
615 notify->setNotification( 0 );
616 }
617
618 return( kIOReturnSuccess );
619 }
620
621 }; /* extern "C" */
622
623 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
624
625 class IOServiceUserNotification : public IOUserNotification
626 {
627 OSDeclareDefaultStructors(IOServiceUserNotification)
628
629 struct PingMsg {
630 mach_msg_header_t msgHdr;
631 OSNotificationHeader64 notifyHeader;
632 };
633
634 enum { kMaxOutstanding = 1024 };
635
636 PingMsg * pingMsg;
637 vm_size_t msgSize;
638 OSArray * newSet;
639 OSObject * lastEntry;
640 bool armed;
641 bool ipcLogged;
642
643 public:
644
645 virtual bool init( mach_port_t port, natural_t type,
646 void * reference, vm_size_t referenceSize,
647 bool clientIs64 );
648 virtual void free() APPLE_KEXT_OVERRIDE;
649
650 static bool _handler( void * target,
651 void * ref, IOService * newService, IONotifier * notifier );
652 virtual bool handler( void * ref, IOService * newService );
653
654 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
655 };
656
657 class IOServiceMessageUserNotification : public IOUserNotification
658 {
659 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
660
661 struct PingMsg {
662 mach_msg_header_t msgHdr;
663 mach_msg_body_t msgBody;
664 mach_msg_port_descriptor_t ports[1];
665 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
666 };
667
668 PingMsg * pingMsg;
669 vm_size_t msgSize;
670 uint8_t clientIs64;
671 int owningPID;
672 bool ipcLogged;
673
674 public:
675
676 virtual bool init( mach_port_t port, natural_t type,
677 void * reference, vm_size_t referenceSize,
678 vm_size_t extraSize,
679 bool clientIs64 );
680
681 virtual void free() APPLE_KEXT_OVERRIDE;
682
683 static IOReturn _handler( void * target, void * ref,
684 UInt32 messageType, IOService * provider,
685 void * messageArgument, vm_size_t argSize );
686 virtual IOReturn handler( void * ref,
687 UInt32 messageType, IOService * provider,
688 void * messageArgument, vm_size_t argSize );
689
690 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
691 };
692
693 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
694
695 #undef super
696 #define super IOUserIterator
697 OSDefineMetaClass( IOUserNotification, IOUserIterator )
698 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
699
700 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
701
702 void IOUserNotification::free( void )
703 {
704 if (holdNotify)
705 {
706 assert(OSDynamicCast(IONotifier, holdNotify));
707 ((IONotifier *)holdNotify)->remove();
708 holdNotify = 0;
709 }
710 // can't be in handler now
711
712 super::free();
713 }
714
715
716 void IOUserNotification::setNotification( IONotifier * notify )
717 {
718 OSObject * previousNotify;
719
720 IOLockLock( gIOObjectPortLock);
721
722 previousNotify = holdNotify;
723 holdNotify = notify;
724
725 IOLockUnlock( gIOObjectPortLock);
726
727 if( previousNotify)
728 {
729 assert(OSDynamicCast(IONotifier, previousNotify));
730 ((IONotifier *)previousNotify)->remove();
731 }
732 }
733
734 void IOUserNotification::reset()
735 {
736 // ?
737 }
738
739 bool IOUserNotification::isValid()
740 {
741 return( true );
742 }
743
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745
746 #undef super
747 #define super IOUserNotification
748 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
749
750 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
751
752 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
753 void * reference, vm_size_t referenceSize,
754 bool clientIs64 )
755 {
756 if( !super::init())
757 return( false );
758
759 newSet = OSArray::withCapacity( 1 );
760 if( !newSet)
761 return( false );
762
763 if (referenceSize > sizeof(OSAsyncReference64))
764 return( false );
765
766 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
767 pingMsg = (PingMsg *) IOMalloc( msgSize);
768 if( !pingMsg)
769 return( false );
770
771 bzero( pingMsg, msgSize);
772
773 pingMsg->msgHdr.msgh_remote_port = port;
774 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
775 MACH_MSG_TYPE_COPY_SEND /*remote*/,
776 MACH_MSG_TYPE_MAKE_SEND /*local*/);
777 pingMsg->msgHdr.msgh_size = msgSize;
778 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
779
780 pingMsg->notifyHeader.size = 0;
781 pingMsg->notifyHeader.type = type;
782 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
783
784 return( true );
785 }
786
787 void IOServiceUserNotification::free( void )
788 {
789 PingMsg * _pingMsg;
790 vm_size_t _msgSize;
791 OSArray * _newSet;
792 OSObject * _lastEntry;
793
794 _pingMsg = pingMsg;
795 _msgSize = msgSize;
796 _lastEntry = lastEntry;
797 _newSet = newSet;
798
799 super::free();
800
801 if( _pingMsg && _msgSize) {
802 if (_pingMsg->msgHdr.msgh_remote_port) {
803 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
804 }
805 IOFree(_pingMsg, _msgSize);
806 }
807
808 if( _lastEntry)
809 _lastEntry->release();
810
811 if( _newSet)
812 _newSet->release();
813 }
814
815 bool IOServiceUserNotification::_handler( void * target,
816 void * ref, IOService * newService, IONotifier * notifier )
817 {
818 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
819 }
820
821 bool IOServiceUserNotification::handler( void * ref,
822 IOService * newService )
823 {
824 unsigned int count;
825 kern_return_t kr;
826 ipc_port_t port = NULL;
827 bool sendPing = false;
828
829 IOTakeLock( lock );
830
831 count = newSet->getCount();
832 if( count < kMaxOutstanding) {
833
834 newSet->setObject( newService );
835 if( (sendPing = (armed && (0 == count))))
836 armed = false;
837 }
838
839 IOUnlock( lock );
840
841 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
842 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
843
844 if( sendPing) {
845 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
846 pingMsg->msgHdr.msgh_local_port = port;
847 else
848 pingMsg->msgHdr.msgh_local_port = NULL;
849
850 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
851 pingMsg->msgHdr.msgh_size,
852 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
853 0);
854 if( port)
855 iokit_release_port( port );
856
857 if( (KERN_SUCCESS != kr) && !ipcLogged)
858 {
859 ipcLogged = true;
860 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
861 }
862 }
863
864 return( true );
865 }
866
867 OSObject * IOServiceUserNotification::getNextObject()
868 {
869 unsigned int count;
870 OSObject * result;
871 OSObject * releaseEntry;
872
873 IOLockLock(lock);
874
875 releaseEntry = lastEntry;
876 count = newSet->getCount();
877 if( count ) {
878 result = newSet->getObject( count - 1 );
879 result->retain();
880 newSet->removeObject( count - 1);
881 } else {
882 result = 0;
883 armed = true;
884 }
885 lastEntry = result;
886
887 IOLockUnlock(lock);
888
889 if (releaseEntry) releaseEntry->release();
890
891 return( result );
892 }
893
894 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
895
896 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
897
898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
899
900 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
901 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
902 bool client64 )
903 {
904 if( !super::init())
905 return( false );
906
907 if (referenceSize > sizeof(OSAsyncReference64))
908 return( false );
909
910 clientIs64 = client64;
911
912 owningPID = proc_selfpid();
913
914 extraSize += sizeof(IOServiceInterestContent64);
915 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
916 pingMsg = (PingMsg *) IOMalloc( msgSize);
917 if( !pingMsg)
918 return( false );
919
920 bzero( pingMsg, msgSize);
921
922 pingMsg->msgHdr.msgh_remote_port = port;
923 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
924 | MACH_MSGH_BITS(
925 MACH_MSG_TYPE_COPY_SEND /*remote*/,
926 MACH_MSG_TYPE_MAKE_SEND /*local*/);
927 pingMsg->msgHdr.msgh_size = msgSize;
928 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
929
930 pingMsg->msgBody.msgh_descriptor_count = 1;
931
932 pingMsg->ports[0].name = 0;
933 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
934 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
935
936 pingMsg->notifyHeader.size = extraSize;
937 pingMsg->notifyHeader.type = type;
938 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
939
940 return( true );
941 }
942
943 void IOServiceMessageUserNotification::free( void )
944 {
945 PingMsg * _pingMsg;
946 vm_size_t _msgSize;
947
948 _pingMsg = pingMsg;
949 _msgSize = msgSize;
950
951 super::free();
952
953 if( _pingMsg && _msgSize) {
954 if (_pingMsg->msgHdr.msgh_remote_port) {
955 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
956 }
957 IOFree( _pingMsg, _msgSize);
958 }
959 }
960
961 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
962 UInt32 messageType, IOService * provider,
963 void * argument, vm_size_t argSize )
964 {
965 return( ((IOServiceMessageUserNotification *) target)->handler(
966 ref, messageType, provider, argument, argSize));
967 }
968
969 IOReturn IOServiceMessageUserNotification::handler( void * ref,
970 UInt32 messageType, IOService * provider,
971 void * messageArgument, vm_size_t callerArgSize )
972 {
973 enum { kLocalMsgSize = 0x100 };
974 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
975 void * allocMsg;
976 kern_return_t kr;
977 vm_size_t argSize;
978 vm_size_t thisMsgSize;
979 ipc_port_t thisPort, providerPort;
980 struct PingMsg * thisMsg;
981 IOServiceInterestContent64 * data;
982
983 if (kIOMessageCopyClientID == messageType)
984 {
985 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
986 return (kIOReturnSuccess);
987 }
988
989 if (callerArgSize == 0)
990 {
991 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
992 else argSize = sizeof(uint32_t);
993 }
994 else
995 {
996 argSize = callerArgSize;
997 if( argSize > kIOUserNotifyMaxMessageSize)
998 argSize = kIOUserNotifyMaxMessageSize;
999 }
1000
1001 // adjust message size for ipc restrictions
1002 natural_t type;
1003 type = pingMsg->notifyHeader.type;
1004 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1005 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1006 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1007
1008 thisMsgSize = msgSize
1009 + sizeof( IOServiceInterestContent64 )
1010 - sizeof( data->messageArgument)
1011 + argSize;
1012
1013 if (thisMsgSize > sizeof(stackMsg))
1014 {
1015 allocMsg = IOMalloc(thisMsgSize);
1016 if (!allocMsg) return (kIOReturnNoMemory);
1017 thisMsg = (typeof(thisMsg)) allocMsg;
1018 }
1019 else
1020 {
1021 allocMsg = 0;
1022 thisMsg = (typeof(thisMsg)) stackMsg;
1023 }
1024
1025 bcopy(pingMsg, thisMsg, msgSize);
1026 thisMsg->notifyHeader.type = type;
1027 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1028 // == pingMsg->notifyHeader.content;
1029 data->messageType = messageType;
1030
1031 if (callerArgSize == 0)
1032 {
1033 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1034 if (!clientIs64)
1035 {
1036 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1037 }
1038 }
1039 else
1040 {
1041 bcopy( messageArgument, data->messageArgument, callerArgSize );
1042 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1043 }
1044
1045 thisMsg->notifyHeader.type = type;
1046 thisMsg->msgHdr.msgh_size = thisMsgSize;
1047
1048 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1049 thisMsg->ports[0].name = providerPort;
1050 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1051 thisMsg->msgHdr.msgh_local_port = thisPort;
1052
1053 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1054 thisMsg->msgHdr.msgh_size,
1055 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1056 0);
1057 if( thisPort)
1058 iokit_release_port( thisPort );
1059 if( providerPort)
1060 iokit_release_port( providerPort );
1061
1062 if (allocMsg)
1063 IOFree(allocMsg, thisMsgSize);
1064
1065 if((KERN_SUCCESS != kr) && !ipcLogged)
1066 {
1067 ipcLogged = true;
1068 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1069 }
1070
1071 return( kIOReturnSuccess );
1072 }
1073
1074 OSObject * IOServiceMessageUserNotification::getNextObject()
1075 {
1076 return( 0 );
1077 }
1078
1079 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1080
1081 #undef super
1082 #define super IOService
1083 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1084
1085 IOLock * gIOUserClientOwnersLock;
1086
1087 void IOUserClient::initialize( void )
1088 {
1089 gIOObjectPortLock = IOLockAlloc();
1090 gIOUserClientOwnersLock = IOLockAlloc();
1091 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1092 }
1093
1094 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1095 mach_port_t wakePort,
1096 void *callback, void *refcon)
1097 {
1098 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1099 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1100 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1101 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1102 }
1103
1104 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1105 mach_port_t wakePort,
1106 mach_vm_address_t callback, io_user_reference_t refcon)
1107 {
1108 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1109 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1110 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1111 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1112 }
1113
1114 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1115 mach_port_t wakePort,
1116 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1117 {
1118 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1119 if (vm_map_is_64bit(get_task_map(task))) {
1120 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1121 }
1122 }
1123
1124 static OSDictionary * CopyConsoleUser(UInt32 uid)
1125 {
1126 OSArray * array;
1127 OSDictionary * user = 0;
1128
1129 if ((array = OSDynamicCast(OSArray,
1130 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1131 {
1132 for (unsigned int idx = 0;
1133 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1134 idx++) {
1135 OSNumber * num;
1136
1137 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1138 && (uid == num->unsigned32BitValue())) {
1139 user->retain();
1140 break;
1141 }
1142 }
1143 array->release();
1144 }
1145 return user;
1146 }
1147
1148 static OSDictionary * CopyUserOnConsole(void)
1149 {
1150 OSArray * array;
1151 OSDictionary * user = 0;
1152
1153 if ((array = OSDynamicCast(OSArray,
1154 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1155 {
1156 for (unsigned int idx = 0;
1157 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1158 idx++)
1159 {
1160 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1161 {
1162 user->retain();
1163 break;
1164 }
1165 }
1166 array->release();
1167 }
1168 return (user);
1169 }
1170
1171 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1172 IOService * service )
1173 {
1174 proc_t p;
1175
1176 p = (proc_t) get_bsdtask_info(task);
1177 if (p)
1178 {
1179 uint64_t authorizationID;
1180
1181 authorizationID = proc_uniqueid(p);
1182 if (authorizationID)
1183 {
1184 if (service->getAuthorizationID() == authorizationID)
1185 {
1186 return (kIOReturnSuccess);
1187 }
1188 }
1189 }
1190
1191 return (kIOReturnNotPermitted);
1192 }
1193
1194 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1195 const char * privilegeName )
1196 {
1197 kern_return_t kr;
1198 security_token_t token;
1199 mach_msg_type_number_t count;
1200 task_t task;
1201 OSDictionary * user;
1202 bool secureConsole;
1203
1204
1205 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1206 sizeof(kIOClientPrivilegeForeground)))
1207 {
1208 if (task_is_gpu_denied(current_task()))
1209 return (kIOReturnNotPrivileged);
1210 else
1211 return (kIOReturnSuccess);
1212 }
1213
1214 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1215 sizeof(kIOClientPrivilegeConsoleSession)))
1216 {
1217 kauth_cred_t cred;
1218 proc_t p;
1219
1220 task = (task_t) securityToken;
1221 if (!task)
1222 task = current_task();
1223 p = (proc_t) get_bsdtask_info(task);
1224 kr = kIOReturnNotPrivileged;
1225
1226 if (p && (cred = kauth_cred_proc_ref(p)))
1227 {
1228 user = CopyUserOnConsole();
1229 if (user)
1230 {
1231 OSNumber * num;
1232 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1233 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1234 {
1235 kr = kIOReturnSuccess;
1236 }
1237 user->release();
1238 }
1239 kauth_cred_unref(&cred);
1240 }
1241 return (kr);
1242 }
1243
1244 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1245 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1246 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1247 else
1248 task = (task_t)securityToken;
1249
1250 count = TASK_SECURITY_TOKEN_COUNT;
1251 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1252
1253 if (KERN_SUCCESS != kr)
1254 {}
1255 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1256 sizeof(kIOClientPrivilegeAdministrator))) {
1257 if (0 != token.val[0])
1258 kr = kIOReturnNotPrivileged;
1259 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1260 sizeof(kIOClientPrivilegeLocalUser))) {
1261 user = CopyConsoleUser(token.val[0]);
1262 if ( user )
1263 user->release();
1264 else
1265 kr = kIOReturnNotPrivileged;
1266 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1267 sizeof(kIOClientPrivilegeConsoleUser))) {
1268 user = CopyConsoleUser(token.val[0]);
1269 if ( user ) {
1270 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1271 kr = kIOReturnNotPrivileged;
1272 else if ( secureConsole ) {
1273 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1274 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1275 kr = kIOReturnNotPrivileged;
1276 }
1277 user->release();
1278 }
1279 else
1280 kr = kIOReturnNotPrivileged;
1281 } else
1282 kr = kIOReturnUnsupported;
1283
1284 return (kr);
1285 }
1286
1287 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1288 const char * entitlement )
1289 {
1290 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1291
1292 proc_t p = NULL;
1293 pid_t pid = 0;
1294 char procname[MAXCOMLEN + 1] = "";
1295 size_t len = 0;
1296 void *entitlements_blob = NULL;
1297 char *entitlements_data = NULL;
1298 OSObject *entitlements_obj = NULL;
1299 OSDictionary *entitlements = NULL;
1300 OSString *errorString = NULL;
1301 OSObject *value = NULL;
1302
1303 p = (proc_t)get_bsdtask_info(task);
1304 if (p == NULL)
1305 goto fail;
1306 pid = proc_pid(p);
1307 proc_name(pid, procname, (int)sizeof(procname));
1308
1309 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1310 goto fail;
1311
1312 if (len <= offsetof(CS_GenericBlob, data))
1313 goto fail;
1314
1315 /*
1316 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1317 * we'll try to parse in the kernel.
1318 */
1319 len -= offsetof(CS_GenericBlob, data);
1320 if (len > MAX_ENTITLEMENTS_LEN) {
1321 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1322 goto fail;
1323 }
1324
1325 /*
1326 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1327 * what is stored in the entitlements blob. Copy the string and
1328 * terminate it.
1329 */
1330 entitlements_data = (char *)IOMalloc(len + 1);
1331 if (entitlements_data == NULL)
1332 goto fail;
1333 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1334 entitlements_data[len] = '\0';
1335
1336 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1337 if (errorString != NULL) {
1338 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1339 goto fail;
1340 }
1341 if (entitlements_obj == NULL)
1342 goto fail;
1343
1344 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1345 if (entitlements == NULL)
1346 goto fail;
1347
1348 /* Fetch the entitlement value from the dictionary. */
1349 value = entitlements->getObject(entitlement);
1350 if (value != NULL)
1351 value->retain();
1352
1353 fail:
1354 if (entitlements_data != NULL)
1355 IOFree(entitlements_data, len + 1);
1356 if (entitlements_obj != NULL)
1357 entitlements_obj->release();
1358 if (errorString != NULL)
1359 errorString->release();
1360 return value;
1361 }
1362
1363 bool IOUserClient::init()
1364 {
1365 if (getPropertyTable() || super::init())
1366 return reserve();
1367
1368 return false;
1369 }
1370
1371 bool IOUserClient::init(OSDictionary * dictionary)
1372 {
1373 if (getPropertyTable() || super::init(dictionary))
1374 return reserve();
1375
1376 return false;
1377 }
1378
1379 bool IOUserClient::initWithTask(task_t owningTask,
1380 void * securityID,
1381 UInt32 type )
1382 {
1383 if (getPropertyTable() || super::init())
1384 return reserve();
1385
1386 return false;
1387 }
1388
1389 bool IOUserClient::initWithTask(task_t owningTask,
1390 void * securityID,
1391 UInt32 type,
1392 OSDictionary * properties )
1393 {
1394 bool ok;
1395
1396 ok = super::init( properties );
1397 ok &= initWithTask( owningTask, securityID, type );
1398
1399 return( ok );
1400 }
1401
1402 bool IOUserClient::reserve()
1403 {
1404 if(!reserved) {
1405 reserved = IONew(ExpansionData, 1);
1406 if (!reserved) {
1407 return false;
1408 }
1409 }
1410 setTerminateDefer(NULL, true);
1411 IOStatisticsRegisterCounter();
1412
1413 return true;
1414 }
1415
1416 struct IOUserClientOwner
1417 {
1418 task_t task;
1419 queue_chain_t taskLink;
1420 IOUserClient * uc;
1421 queue_chain_t ucLink;
1422 };
1423
1424 IOReturn
1425 IOUserClient::registerOwner(task_t task)
1426 {
1427 IOUserClientOwner * owner;
1428 IOReturn ret;
1429 bool newOwner;
1430
1431 IOLockLock(gIOUserClientOwnersLock);
1432
1433 newOwner = true;
1434 ret = kIOReturnSuccess;
1435
1436 if (!owners.next) queue_init(&owners);
1437 else
1438 {
1439 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1440 {
1441 if (task != owner->task) continue;
1442 newOwner = false;
1443 break;
1444 }
1445 }
1446 if (newOwner)
1447 {
1448 owner = IONew(IOUserClientOwner, 1);
1449 if (!newOwner) ret = kIOReturnNoMemory;
1450 else
1451 {
1452 owner->task = task;
1453 owner->uc = this;
1454 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1455 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1456 }
1457 }
1458
1459 IOLockUnlock(gIOUserClientOwnersLock);
1460
1461 return (ret);
1462 }
1463
1464 void
1465 IOUserClient::noMoreSenders(void)
1466 {
1467 IOUserClientOwner * owner;
1468
1469 IOLockLock(gIOUserClientOwnersLock);
1470
1471 if (owners.next)
1472 {
1473 while (!queue_empty(&owners))
1474 {
1475 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1476 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1477 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1478 IODelete(owner, IOUserClientOwner, 1);
1479 }
1480 owners.next = owners.prev = NULL;
1481 }
1482
1483 IOLockUnlock(gIOUserClientOwnersLock);
1484 }
1485
1486 extern "C" kern_return_t
1487 iokit_task_terminate(task_t task)
1488 {
1489 IOUserClientOwner * owner;
1490 IOUserClient * dead;
1491 IOUserClient * uc;
1492 queue_head_t * taskque;
1493
1494 IOLockLock(gIOUserClientOwnersLock);
1495
1496 taskque = task_io_user_clients(task);
1497 dead = NULL;
1498 while (!queue_empty(taskque))
1499 {
1500 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1501 uc = owner->uc;
1502 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1503 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1504 if (queue_empty(&uc->owners))
1505 {
1506 uc->retain();
1507 IOLog("destroying out of band connect for %s\n", uc->getName());
1508 // now using the uc queue head as a singly linked queue,
1509 // leaving .next as NULL to mark it empty
1510 uc->owners.next = NULL;
1511 uc->owners.prev = (queue_entry_t) dead;
1512 dead = uc;
1513 }
1514 IODelete(owner, IOUserClientOwner, 1);
1515 }
1516
1517 IOLockUnlock(gIOUserClientOwnersLock);
1518
1519 while (dead)
1520 {
1521 uc = dead;
1522 dead = (IOUserClient *)(void *) dead->owners.prev;
1523 uc->owners.prev = NULL;
1524 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1525 uc->release();
1526 }
1527
1528 return (KERN_SUCCESS);
1529 }
1530
1531 void IOUserClient::free()
1532 {
1533 if( mappings) mappings->release();
1534
1535 IOStatisticsUnregisterCounter();
1536
1537 assert(!owners.next);
1538 assert(!owners.prev);
1539
1540 if (reserved) IODelete(reserved, ExpansionData, 1);
1541
1542 super::free();
1543 }
1544
1545 IOReturn IOUserClient::clientDied( void )
1546 {
1547 IOReturn ret = kIOReturnNotReady;
1548
1549 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1550 {
1551 ret = clientClose();
1552 }
1553
1554 return (ret);
1555 }
1556
1557 IOReturn IOUserClient::clientClose( void )
1558 {
1559 return( kIOReturnUnsupported );
1560 }
1561
1562 IOService * IOUserClient::getService( void )
1563 {
1564 return( 0 );
1565 }
1566
1567 IOReturn IOUserClient::registerNotificationPort(
1568 mach_port_t /* port */,
1569 UInt32 /* type */,
1570 UInt32 /* refCon */)
1571 {
1572 return( kIOReturnUnsupported);
1573 }
1574
1575 IOReturn IOUserClient::registerNotificationPort(
1576 mach_port_t port,
1577 UInt32 type,
1578 io_user_reference_t refCon)
1579 {
1580 return (registerNotificationPort(port, type, (UInt32) refCon));
1581 }
1582
1583 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1584 semaphore_t * semaphore )
1585 {
1586 return( kIOReturnUnsupported);
1587 }
1588
1589 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1590 {
1591 return( kIOReturnUnsupported);
1592 }
1593
1594 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1595 IOOptionBits * options,
1596 IOMemoryDescriptor ** memory )
1597 {
1598 return( kIOReturnUnsupported);
1599 }
1600
1601 #if !__LP64__
1602 IOMemoryMap * IOUserClient::mapClientMemory(
1603 IOOptionBits type,
1604 task_t task,
1605 IOOptionBits mapFlags,
1606 IOVirtualAddress atAddress )
1607 {
1608 return (NULL);
1609 }
1610 #endif
1611
1612 IOMemoryMap * IOUserClient::mapClientMemory64(
1613 IOOptionBits type,
1614 task_t task,
1615 IOOptionBits mapFlags,
1616 mach_vm_address_t atAddress )
1617 {
1618 IOReturn err;
1619 IOOptionBits options = 0;
1620 IOMemoryDescriptor * memory;
1621 IOMemoryMap * map = 0;
1622
1623 err = clientMemoryForType( (UInt32) type, &options, &memory );
1624
1625 if( memory && (kIOReturnSuccess == err)) {
1626
1627 FAKE_STACK_FRAME(getMetaClass());
1628
1629 options = (options & ~kIOMapUserOptionsMask)
1630 | (mapFlags & kIOMapUserOptionsMask);
1631 map = memory->createMappingInTask( task, atAddress, options );
1632 memory->release();
1633
1634 FAKE_STACK_FRAME_END();
1635 }
1636
1637 return( map );
1638 }
1639
1640 IOReturn IOUserClient::exportObjectToClient(task_t task,
1641 OSObject *obj, io_object_t *clientObj)
1642 {
1643 mach_port_name_t name;
1644
1645 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1646
1647 *(mach_port_name_t *)clientObj = name;
1648 return kIOReturnSuccess;
1649 }
1650
1651 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1652 {
1653 return( 0 );
1654 }
1655
1656 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1657 {
1658 return( 0 );
1659 }
1660
1661 IOExternalTrap * IOUserClient::
1662 getExternalTrapForIndex(UInt32 index)
1663 {
1664 return NULL;
1665 }
1666
1667 #pragma clang diagnostic push
1668 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1669
1670 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1671 // functions can break clients of kexts implementing getExternalMethodForIndex()
1672 IOExternalMethod * IOUserClient::
1673 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1674 {
1675 IOExternalMethod *method = getExternalMethodForIndex(index);
1676
1677 if (method)
1678 *targetP = (IOService *) method->object;
1679
1680 return method;
1681 }
1682
1683 IOExternalAsyncMethod * IOUserClient::
1684 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1685 {
1686 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1687
1688 if (method)
1689 *targetP = (IOService *) method->object;
1690
1691 return method;
1692 }
1693
1694 IOExternalTrap * IOUserClient::
1695 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1696 {
1697 IOExternalTrap *trap = getExternalTrapForIndex(index);
1698
1699 if (trap) {
1700 *targetP = trap->object;
1701 }
1702
1703 return trap;
1704 }
1705 #pragma clang diagnostic pop
1706
1707 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1708 {
1709 mach_port_t port;
1710 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1711
1712 if (MACH_PORT_NULL != port)
1713 iokit_release_port_send(port);
1714
1715 return (kIOReturnSuccess);
1716 }
1717
1718 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1719 {
1720 if (MACH_PORT_NULL != port)
1721 iokit_release_port_send(port);
1722
1723 return (kIOReturnSuccess);
1724 }
1725
1726 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1727 IOReturn result, void *args[], UInt32 numArgs)
1728 {
1729 OSAsyncReference64 reference64;
1730 io_user_reference_t args64[kMaxAsyncArgs];
1731 unsigned int idx;
1732
1733 if (numArgs > kMaxAsyncArgs)
1734 return kIOReturnMessageTooLarge;
1735
1736 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1737 reference64[idx] = REF64(reference[idx]);
1738
1739 for (idx = 0; idx < numArgs; idx++)
1740 args64[idx] = REF64(args[idx]);
1741
1742 return (sendAsyncResult64(reference64, result, args64, numArgs));
1743 }
1744
1745 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1746 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1747 {
1748 return _sendAsyncResult64(reference, result, args, numArgs, options);
1749 }
1750
1751 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1752 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1753 {
1754 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1755 }
1756
1757 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1758 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1759 {
1760 struct ReplyMsg
1761 {
1762 mach_msg_header_t msgHdr;
1763 union
1764 {
1765 struct
1766 {
1767 OSNotificationHeader notifyHdr;
1768 IOAsyncCompletionContent asyncContent;
1769 uint32_t args[kMaxAsyncArgs];
1770 } msg32;
1771 struct
1772 {
1773 OSNotificationHeader64 notifyHdr;
1774 IOAsyncCompletionContent asyncContent;
1775 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1776 } msg64;
1777 } m;
1778 };
1779 ReplyMsg replyMsg;
1780 mach_port_t replyPort;
1781 kern_return_t kr;
1782
1783 // If no reply port, do nothing.
1784 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1785 if (replyPort == MACH_PORT_NULL)
1786 return kIOReturnSuccess;
1787
1788 if (numArgs > kMaxAsyncArgs)
1789 return kIOReturnMessageTooLarge;
1790
1791 bzero(&replyMsg, sizeof(replyMsg));
1792 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1793 0 /*local*/);
1794 replyMsg.msgHdr.msgh_remote_port = replyPort;
1795 replyMsg.msgHdr.msgh_local_port = 0;
1796 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1797 if (kIOUCAsync64Flag & reference[0])
1798 {
1799 replyMsg.msgHdr.msgh_size =
1800 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1801 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1802 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1803 + numArgs * sizeof(io_user_reference_t);
1804 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1805 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1806
1807 replyMsg.m.msg64.asyncContent.result = result;
1808 if (numArgs)
1809 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1810 }
1811 else
1812 {
1813 unsigned int idx;
1814
1815 replyMsg.msgHdr.msgh_size =
1816 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1817 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1818
1819 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1820 + numArgs * sizeof(uint32_t);
1821 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1822
1823 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1824 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1825
1826 replyMsg.m.msg32.asyncContent.result = result;
1827
1828 for (idx = 0; idx < numArgs; idx++)
1829 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1830 }
1831
1832 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1833 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1834 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1835 } else {
1836 /* Fail on full queue. */
1837 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1838 replyMsg.msgHdr.msgh_size);
1839 }
1840 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1841 {
1842 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1843 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1844 }
1845 return kr;
1846 }
1847
1848
1849 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1850
1851 extern "C" {
1852
1853 #define CHECK(cls,obj,out) \
1854 cls * out; \
1855 if( !(out = OSDynamicCast( cls, obj))) \
1856 return( kIOReturnBadArgument )
1857
1858 #define CHECKLOCKED(cls,obj,out) \
1859 IOUserIterator * oIter; \
1860 cls * out; \
1861 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1862 return (kIOReturnBadArgument); \
1863 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1864 return (kIOReturnBadArgument)
1865
1866 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1867
1868 // Create a vm_map_copy_t or kalloc'ed data for memory
1869 // to be copied out. ipc will free after the copyout.
1870
1871 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1872 io_buf_ptr_t * buf )
1873 {
1874 kern_return_t err;
1875 vm_map_copy_t copy;
1876
1877 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1878 false /* src_destroy */, &copy);
1879
1880 assert( err == KERN_SUCCESS );
1881 if( err == KERN_SUCCESS )
1882 *buf = (char *) copy;
1883
1884 return( err );
1885 }
1886
1887 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1888
1889 /* Routine io_server_version */
1890 kern_return_t is_io_server_version(
1891 mach_port_t master_port,
1892 uint64_t *version)
1893 {
1894 *version = IOKIT_SERVER_VERSION;
1895 return (kIOReturnSuccess);
1896 }
1897
1898 /* Routine io_object_get_class */
1899 kern_return_t is_io_object_get_class(
1900 io_object_t object,
1901 io_name_t className )
1902 {
1903 const OSMetaClass* my_obj = NULL;
1904
1905 if( !object)
1906 return( kIOReturnBadArgument );
1907
1908 my_obj = object->getMetaClass();
1909 if (!my_obj) {
1910 return (kIOReturnNotFound);
1911 }
1912
1913 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1914
1915 return( kIOReturnSuccess );
1916 }
1917
1918 /* Routine io_object_get_superclass */
1919 kern_return_t is_io_object_get_superclass(
1920 mach_port_t master_port,
1921 io_name_t obj_name,
1922 io_name_t class_name)
1923 {
1924 const OSMetaClass* my_obj = NULL;
1925 const OSMetaClass* superclass = NULL;
1926 const OSSymbol *my_name = NULL;
1927 const char *my_cstr = NULL;
1928
1929 if (!obj_name || !class_name)
1930 return (kIOReturnBadArgument);
1931
1932 if( master_port != master_device_port)
1933 return( kIOReturnNotPrivileged);
1934
1935 my_name = OSSymbol::withCString(obj_name);
1936
1937 if (my_name) {
1938 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1939 my_name->release();
1940 }
1941 if (my_obj) {
1942 superclass = my_obj->getSuperClass();
1943 }
1944
1945 if (!superclass) {
1946 return( kIOReturnNotFound );
1947 }
1948
1949 my_cstr = superclass->getClassName();
1950
1951 if (my_cstr) {
1952 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1953 return( kIOReturnSuccess );
1954 }
1955 return (kIOReturnNotFound);
1956 }
1957
1958 /* Routine io_object_get_bundle_identifier */
1959 kern_return_t is_io_object_get_bundle_identifier(
1960 mach_port_t master_port,
1961 io_name_t obj_name,
1962 io_name_t bundle_name)
1963 {
1964 const OSMetaClass* my_obj = NULL;
1965 const OSSymbol *my_name = NULL;
1966 const OSSymbol *identifier = NULL;
1967 const char *my_cstr = NULL;
1968
1969 if (!obj_name || !bundle_name)
1970 return (kIOReturnBadArgument);
1971
1972 if( master_port != master_device_port)
1973 return( kIOReturnNotPrivileged);
1974
1975 my_name = OSSymbol::withCString(obj_name);
1976
1977 if (my_name) {
1978 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1979 my_name->release();
1980 }
1981
1982 if (my_obj) {
1983 identifier = my_obj->getKmodName();
1984 }
1985 if (!identifier) {
1986 return( kIOReturnNotFound );
1987 }
1988
1989 my_cstr = identifier->getCStringNoCopy();
1990 if (my_cstr) {
1991 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1992 return( kIOReturnSuccess );
1993 }
1994
1995 return (kIOReturnBadArgument);
1996 }
1997
1998 /* Routine io_object_conforms_to */
1999 kern_return_t is_io_object_conforms_to(
2000 io_object_t object,
2001 io_name_t className,
2002 boolean_t *conforms )
2003 {
2004 if( !object)
2005 return( kIOReturnBadArgument );
2006
2007 *conforms = (0 != object->metaCast( className ));
2008
2009 return( kIOReturnSuccess );
2010 }
2011
2012 /* Routine io_object_get_retain_count */
2013 kern_return_t is_io_object_get_retain_count(
2014 io_object_t object,
2015 uint32_t *retainCount )
2016 {
2017 if( !object)
2018 return( kIOReturnBadArgument );
2019
2020 *retainCount = object->getRetainCount();
2021 return( kIOReturnSuccess );
2022 }
2023
2024 /* Routine io_iterator_next */
2025 kern_return_t is_io_iterator_next(
2026 io_object_t iterator,
2027 io_object_t *object )
2028 {
2029 IOReturn ret;
2030 OSObject * obj;
2031
2032 CHECK( OSIterator, iterator, iter );
2033
2034 obj = iter->getNextObject();
2035 if( obj) {
2036 obj->retain();
2037 *object = obj;
2038 ret = kIOReturnSuccess;
2039 } else
2040 ret = kIOReturnNoDevice;
2041
2042 return (ret);
2043 }
2044
2045 /* Routine io_iterator_reset */
2046 kern_return_t is_io_iterator_reset(
2047 io_object_t iterator )
2048 {
2049 CHECK( OSIterator, iterator, iter );
2050
2051 iter->reset();
2052
2053 return( kIOReturnSuccess );
2054 }
2055
2056 /* Routine io_iterator_is_valid */
2057 kern_return_t is_io_iterator_is_valid(
2058 io_object_t iterator,
2059 boolean_t *is_valid )
2060 {
2061 CHECK( OSIterator, iterator, iter );
2062
2063 *is_valid = iter->isValid();
2064
2065 return( kIOReturnSuccess );
2066 }
2067
2068
2069 static kern_return_t internal_io_service_match_property_table(
2070 io_service_t _service,
2071 const char * matching,
2072 mach_msg_type_number_t matching_size,
2073 boolean_t *matches)
2074 {
2075 CHECK( IOService, _service, service );
2076
2077 kern_return_t kr;
2078 OSObject * obj;
2079 OSDictionary * dict;
2080
2081 assert(matching_size);
2082 obj = OSUnserializeXML(matching, matching_size);
2083
2084 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2085 *matches = service->passiveMatch( dict );
2086 kr = kIOReturnSuccess;
2087 } else
2088 kr = kIOReturnBadArgument;
2089
2090 if( obj)
2091 obj->release();
2092
2093 return( kr );
2094 }
2095
2096 /* Routine io_service_match_property_table */
2097 kern_return_t is_io_service_match_property_table(
2098 io_service_t service,
2099 io_string_t matching,
2100 boolean_t *matches )
2101 {
2102 return (kIOReturnUnsupported);
2103 }
2104
2105
2106 /* Routine io_service_match_property_table_ool */
2107 kern_return_t is_io_service_match_property_table_ool(
2108 io_object_t service,
2109 io_buf_ptr_t matching,
2110 mach_msg_type_number_t matchingCnt,
2111 kern_return_t *result,
2112 boolean_t *matches )
2113 {
2114 kern_return_t kr;
2115 vm_offset_t data;
2116 vm_map_offset_t map_data;
2117
2118 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2119 data = CAST_DOWN(vm_offset_t, map_data);
2120
2121 if( KERN_SUCCESS == kr) {
2122 // must return success after vm_map_copyout() succeeds
2123 *result = internal_io_service_match_property_table(service,
2124 (const char *)data, matchingCnt, matches );
2125 vm_deallocate( kernel_map, data, matchingCnt );
2126 }
2127
2128 return( kr );
2129 }
2130
2131 /* Routine io_service_match_property_table_bin */
2132 kern_return_t is_io_service_match_property_table_bin(
2133 io_object_t service,
2134 io_struct_inband_t matching,
2135 mach_msg_type_number_t matchingCnt,
2136 boolean_t *matches)
2137 {
2138 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2139 }
2140
2141 static kern_return_t internal_io_service_get_matching_services(
2142 mach_port_t master_port,
2143 const char * matching,
2144 mach_msg_type_number_t matching_size,
2145 io_iterator_t *existing )
2146 {
2147 kern_return_t kr;
2148 OSObject * obj;
2149 OSDictionary * dict;
2150
2151 if( master_port != master_device_port)
2152 return( kIOReturnNotPrivileged);
2153
2154 assert(matching_size);
2155 obj = OSUnserializeXML(matching, matching_size);
2156
2157 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2158 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2159 kr = kIOReturnSuccess;
2160 } else
2161 kr = kIOReturnBadArgument;
2162
2163 if( obj)
2164 obj->release();
2165
2166 return( kr );
2167 }
2168
2169 /* Routine io_service_get_matching_services */
2170 kern_return_t is_io_service_get_matching_services(
2171 mach_port_t master_port,
2172 io_string_t matching,
2173 io_iterator_t *existing )
2174 {
2175 return (kIOReturnUnsupported);
2176 }
2177
2178 /* Routine io_service_get_matching_services_ool */
2179 kern_return_t is_io_service_get_matching_services_ool(
2180 mach_port_t master_port,
2181 io_buf_ptr_t matching,
2182 mach_msg_type_number_t matchingCnt,
2183 kern_return_t *result,
2184 io_object_t *existing )
2185 {
2186 kern_return_t kr;
2187 vm_offset_t data;
2188 vm_map_offset_t map_data;
2189
2190 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2191 data = CAST_DOWN(vm_offset_t, map_data);
2192
2193 if( KERN_SUCCESS == kr) {
2194 // must return success after vm_map_copyout() succeeds
2195 // and mig will copy out objects on success
2196 *existing = 0;
2197 *result = internal_io_service_get_matching_services(master_port,
2198 (const char *) data, matchingCnt, existing);
2199 vm_deallocate( kernel_map, data, matchingCnt );
2200 }
2201
2202 return( kr );
2203 }
2204
2205 /* Routine io_service_get_matching_services_bin */
2206 kern_return_t is_io_service_get_matching_services_bin(
2207 mach_port_t master_port,
2208 io_struct_inband_t matching,
2209 mach_msg_type_number_t matchingCnt,
2210 io_object_t *existing)
2211 {
2212 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2213 }
2214
2215
2216 static kern_return_t internal_io_service_get_matching_service(
2217 mach_port_t master_port,
2218 const char * matching,
2219 mach_msg_type_number_t matching_size,
2220 io_service_t *service )
2221 {
2222 kern_return_t kr;
2223 OSObject * obj;
2224 OSDictionary * dict;
2225
2226 if( master_port != master_device_port)
2227 return( kIOReturnNotPrivileged);
2228
2229 assert(matching_size);
2230 obj = OSUnserializeXML(matching, matching_size);
2231
2232 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2233 *service = IOService::copyMatchingService( dict );
2234 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2235 } else
2236 kr = kIOReturnBadArgument;
2237
2238 if( obj)
2239 obj->release();
2240
2241 return( kr );
2242 }
2243
2244 /* Routine io_service_get_matching_service */
2245 kern_return_t is_io_service_get_matching_service(
2246 mach_port_t master_port,
2247 io_string_t matching,
2248 io_service_t *service )
2249 {
2250 return (kIOReturnUnsupported);
2251 }
2252
2253 /* Routine io_service_get_matching_services_ool */
2254 kern_return_t is_io_service_get_matching_service_ool(
2255 mach_port_t master_port,
2256 io_buf_ptr_t matching,
2257 mach_msg_type_number_t matchingCnt,
2258 kern_return_t *result,
2259 io_object_t *service )
2260 {
2261 kern_return_t kr;
2262 vm_offset_t data;
2263 vm_map_offset_t map_data;
2264
2265 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2266 data = CAST_DOWN(vm_offset_t, map_data);
2267
2268 if( KERN_SUCCESS == kr) {
2269 // must return success after vm_map_copyout() succeeds
2270 // and mig will copy out objects on success
2271 *service = 0;
2272 *result = internal_io_service_get_matching_service(master_port,
2273 (const char *) data, matchingCnt, service );
2274 vm_deallocate( kernel_map, data, matchingCnt );
2275 }
2276
2277 return( kr );
2278 }
2279
2280 /* Routine io_service_get_matching_service_bin */
2281 kern_return_t is_io_service_get_matching_service_bin(
2282 mach_port_t master_port,
2283 io_struct_inband_t matching,
2284 mach_msg_type_number_t matchingCnt,
2285 io_object_t *service)
2286 {
2287 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2288 }
2289
2290 static kern_return_t internal_io_service_add_notification(
2291 mach_port_t master_port,
2292 io_name_t notification_type,
2293 const char * matching,
2294 size_t matching_size,
2295 mach_port_t port,
2296 void * reference,
2297 vm_size_t referenceSize,
2298 bool client64,
2299 io_object_t * notification )
2300 {
2301 IOServiceUserNotification * userNotify = 0;
2302 IONotifier * notify = 0;
2303 const OSSymbol * sym;
2304 OSDictionary * dict;
2305 IOReturn err;
2306 unsigned long int userMsgType;
2307
2308 if( master_port != master_device_port)
2309 return( kIOReturnNotPrivileged);
2310
2311 do {
2312 err = kIOReturnNoResources;
2313
2314 if( !(sym = OSSymbol::withCString( notification_type )))
2315 err = kIOReturnNoResources;
2316
2317 assert(matching_size);
2318 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2319 if (!dict) {
2320 err = kIOReturnBadArgument;
2321 continue;
2322 }
2323
2324 if( (sym == gIOPublishNotification)
2325 || (sym == gIOFirstPublishNotification))
2326 userMsgType = kIOServicePublishNotificationType;
2327 else if( (sym == gIOMatchedNotification)
2328 || (sym == gIOFirstMatchNotification))
2329 userMsgType = kIOServiceMatchedNotificationType;
2330 else if( sym == gIOTerminatedNotification)
2331 userMsgType = kIOServiceTerminatedNotificationType;
2332 else
2333 userMsgType = kLastIOKitNotificationType;
2334
2335 userNotify = new IOServiceUserNotification;
2336
2337 if( userNotify && !userNotify->init( port, userMsgType,
2338 reference, referenceSize, client64)) {
2339 iokit_release_port_send(port);
2340 userNotify->release();
2341 userNotify = 0;
2342 }
2343 if( !userNotify)
2344 continue;
2345
2346 notify = IOService::addMatchingNotification( sym, dict,
2347 &userNotify->_handler, userNotify );
2348 if( notify) {
2349 *notification = userNotify;
2350 userNotify->setNotification( notify );
2351 err = kIOReturnSuccess;
2352 } else
2353 err = kIOReturnUnsupported;
2354
2355 } while( false );
2356
2357 if( sym)
2358 sym->release();
2359 if( dict)
2360 dict->release();
2361
2362 return( err );
2363 }
2364
2365
2366 /* Routine io_service_add_notification */
2367 kern_return_t is_io_service_add_notification(
2368 mach_port_t master_port,
2369 io_name_t notification_type,
2370 io_string_t matching,
2371 mach_port_t port,
2372 io_async_ref_t reference,
2373 mach_msg_type_number_t referenceCnt,
2374 io_object_t * notification )
2375 {
2376 return (kIOReturnUnsupported);
2377 }
2378
2379 /* Routine io_service_add_notification_64 */
2380 kern_return_t is_io_service_add_notification_64(
2381 mach_port_t master_port,
2382 io_name_t notification_type,
2383 io_string_t matching,
2384 mach_port_t wake_port,
2385 io_async_ref64_t reference,
2386 mach_msg_type_number_t referenceCnt,
2387 io_object_t *notification )
2388 {
2389 return (kIOReturnUnsupported);
2390 }
2391
2392 /* Routine io_service_add_notification_bin */
2393 kern_return_t is_io_service_add_notification_bin
2394 (
2395 mach_port_t master_port,
2396 io_name_t notification_type,
2397 io_struct_inband_t matching,
2398 mach_msg_type_number_t matchingCnt,
2399 mach_port_t wake_port,
2400 io_async_ref_t reference,
2401 mach_msg_type_number_t referenceCnt,
2402 io_object_t *notification)
2403 {
2404 return (internal_io_service_add_notification(master_port, notification_type,
2405 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2406 false, notification));
2407 }
2408
2409 /* Routine io_service_add_notification_bin_64 */
2410 kern_return_t is_io_service_add_notification_bin_64
2411 (
2412 mach_port_t master_port,
2413 io_name_t notification_type,
2414 io_struct_inband_t matching,
2415 mach_msg_type_number_t matchingCnt,
2416 mach_port_t wake_port,
2417 io_async_ref64_t reference,
2418 mach_msg_type_number_t referenceCnt,
2419 io_object_t *notification)
2420 {
2421 return (internal_io_service_add_notification(master_port, notification_type,
2422 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2423 true, notification));
2424 }
2425
2426 static kern_return_t internal_io_service_add_notification_ool(
2427 mach_port_t master_port,
2428 io_name_t notification_type,
2429 io_buf_ptr_t matching,
2430 mach_msg_type_number_t matchingCnt,
2431 mach_port_t wake_port,
2432 void * reference,
2433 vm_size_t referenceSize,
2434 bool client64,
2435 kern_return_t *result,
2436 io_object_t *notification )
2437 {
2438 kern_return_t kr;
2439 vm_offset_t data;
2440 vm_map_offset_t map_data;
2441
2442 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2443 data = CAST_DOWN(vm_offset_t, map_data);
2444
2445 if( KERN_SUCCESS == kr) {
2446 // must return success after vm_map_copyout() succeeds
2447 // and mig will copy out objects on success
2448 *notification = 0;
2449 *result = internal_io_service_add_notification( master_port, notification_type,
2450 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2451 vm_deallocate( kernel_map, data, matchingCnt );
2452 }
2453
2454 return( kr );
2455 }
2456
2457 /* Routine io_service_add_notification_ool */
2458 kern_return_t is_io_service_add_notification_ool(
2459 mach_port_t master_port,
2460 io_name_t notification_type,
2461 io_buf_ptr_t matching,
2462 mach_msg_type_number_t matchingCnt,
2463 mach_port_t wake_port,
2464 io_async_ref_t reference,
2465 mach_msg_type_number_t referenceCnt,
2466 kern_return_t *result,
2467 io_object_t *notification )
2468 {
2469 return (internal_io_service_add_notification_ool(master_port, notification_type,
2470 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2471 false, result, notification));
2472 }
2473
2474 /* Routine io_service_add_notification_ool_64 */
2475 kern_return_t is_io_service_add_notification_ool_64(
2476 mach_port_t master_port,
2477 io_name_t notification_type,
2478 io_buf_ptr_t matching,
2479 mach_msg_type_number_t matchingCnt,
2480 mach_port_t wake_port,
2481 io_async_ref64_t reference,
2482 mach_msg_type_number_t referenceCnt,
2483 kern_return_t *result,
2484 io_object_t *notification )
2485 {
2486 return (internal_io_service_add_notification_ool(master_port, notification_type,
2487 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2488 true, result, notification));
2489 }
2490
2491 /* Routine io_service_add_notification_old */
2492 kern_return_t is_io_service_add_notification_old(
2493 mach_port_t master_port,
2494 io_name_t notification_type,
2495 io_string_t matching,
2496 mach_port_t port,
2497 // for binary compatibility reasons, this must be natural_t for ILP32
2498 natural_t ref,
2499 io_object_t * notification )
2500 {
2501 return( is_io_service_add_notification( master_port, notification_type,
2502 matching, port, &ref, 1, notification ));
2503 }
2504
2505
2506 static kern_return_t internal_io_service_add_interest_notification(
2507 io_object_t _service,
2508 io_name_t type_of_interest,
2509 mach_port_t port,
2510 void * reference,
2511 vm_size_t referenceSize,
2512 bool client64,
2513 io_object_t * notification )
2514 {
2515
2516 IOServiceMessageUserNotification * userNotify = 0;
2517 IONotifier * notify = 0;
2518 const OSSymbol * sym;
2519 IOReturn err;
2520
2521 CHECK( IOService, _service, service );
2522
2523 err = kIOReturnNoResources;
2524 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2525
2526 userNotify = new IOServiceMessageUserNotification;
2527
2528 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2529 reference, referenceSize,
2530 kIOUserNotifyMaxMessageSize,
2531 client64 )) {
2532 iokit_release_port_send(port);
2533 userNotify->release();
2534 userNotify = 0;
2535 }
2536 if( !userNotify)
2537 continue;
2538
2539 notify = service->registerInterest( sym,
2540 &userNotify->_handler, userNotify );
2541 if( notify) {
2542 *notification = userNotify;
2543 userNotify->setNotification( notify );
2544 err = kIOReturnSuccess;
2545 } else
2546 err = kIOReturnUnsupported;
2547
2548 sym->release();
2549
2550 } while( false );
2551
2552 return( err );
2553 }
2554
2555 /* Routine io_service_add_message_notification */
2556 kern_return_t is_io_service_add_interest_notification(
2557 io_object_t service,
2558 io_name_t type_of_interest,
2559 mach_port_t port,
2560 io_async_ref_t reference,
2561 mach_msg_type_number_t referenceCnt,
2562 io_object_t * notification )
2563 {
2564 return (internal_io_service_add_interest_notification(service, type_of_interest,
2565 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2566 }
2567
2568 /* Routine io_service_add_interest_notification_64 */
2569 kern_return_t is_io_service_add_interest_notification_64(
2570 io_object_t service,
2571 io_name_t type_of_interest,
2572 mach_port_t wake_port,
2573 io_async_ref64_t reference,
2574 mach_msg_type_number_t referenceCnt,
2575 io_object_t *notification )
2576 {
2577 return (internal_io_service_add_interest_notification(service, type_of_interest,
2578 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2579 }
2580
2581
2582 /* Routine io_service_acknowledge_notification */
2583 kern_return_t is_io_service_acknowledge_notification(
2584 io_object_t _service,
2585 natural_t notify_ref,
2586 natural_t response )
2587 {
2588 CHECK( IOService, _service, service );
2589
2590 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2591 (IOOptionBits) response ));
2592
2593 }
2594
2595 /* Routine io_connect_get_semaphore */
2596 kern_return_t is_io_connect_get_notification_semaphore(
2597 io_connect_t connection,
2598 natural_t notification_type,
2599 semaphore_t *semaphore )
2600 {
2601 CHECK( IOUserClient, connection, client );
2602
2603 IOStatisticsClientCall();
2604 return( client->getNotificationSemaphore( (UInt32) notification_type,
2605 semaphore ));
2606 }
2607
2608 /* Routine io_registry_get_root_entry */
2609 kern_return_t is_io_registry_get_root_entry(
2610 mach_port_t master_port,
2611 io_object_t *root )
2612 {
2613 IORegistryEntry * entry;
2614
2615 if( master_port != master_device_port)
2616 return( kIOReturnNotPrivileged);
2617
2618 entry = IORegistryEntry::getRegistryRoot();
2619 if( entry)
2620 entry->retain();
2621 *root = entry;
2622
2623 return( kIOReturnSuccess );
2624 }
2625
2626 /* Routine io_registry_create_iterator */
2627 kern_return_t is_io_registry_create_iterator(
2628 mach_port_t master_port,
2629 io_name_t plane,
2630 uint32_t options,
2631 io_object_t *iterator )
2632 {
2633 if( master_port != master_device_port)
2634 return( kIOReturnNotPrivileged);
2635
2636 *iterator = IOUserIterator::withIterator(
2637 IORegistryIterator::iterateOver(
2638 IORegistryEntry::getPlane( plane ), options ));
2639
2640 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2641 }
2642
2643 /* Routine io_registry_entry_create_iterator */
2644 kern_return_t is_io_registry_entry_create_iterator(
2645 io_object_t registry_entry,
2646 io_name_t plane,
2647 uint32_t options,
2648 io_object_t *iterator )
2649 {
2650 CHECK( IORegistryEntry, registry_entry, entry );
2651
2652 *iterator = IOUserIterator::withIterator(
2653 IORegistryIterator::iterateOver( entry,
2654 IORegistryEntry::getPlane( plane ), options ));
2655
2656 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2657 }
2658
2659 /* Routine io_registry_iterator_enter */
2660 kern_return_t is_io_registry_iterator_enter_entry(
2661 io_object_t iterator )
2662 {
2663 CHECKLOCKED( IORegistryIterator, iterator, iter );
2664
2665 IOLockLock(oIter->lock);
2666 iter->enterEntry();
2667 IOLockUnlock(oIter->lock);
2668
2669 return( kIOReturnSuccess );
2670 }
2671
2672 /* Routine io_registry_iterator_exit */
2673 kern_return_t is_io_registry_iterator_exit_entry(
2674 io_object_t iterator )
2675 {
2676 bool didIt;
2677
2678 CHECKLOCKED( IORegistryIterator, iterator, iter );
2679
2680 IOLockLock(oIter->lock);
2681 didIt = iter->exitEntry();
2682 IOLockUnlock(oIter->lock);
2683
2684 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2685 }
2686
2687 /* Routine io_registry_entry_from_path */
2688 kern_return_t is_io_registry_entry_from_path(
2689 mach_port_t master_port,
2690 io_string_t path,
2691 io_object_t *registry_entry )
2692 {
2693 IORegistryEntry * entry;
2694
2695 if( master_port != master_device_port)
2696 return( kIOReturnNotPrivileged);
2697
2698 entry = IORegistryEntry::fromPath( path );
2699
2700 *registry_entry = entry;
2701
2702 return( kIOReturnSuccess );
2703 }
2704
2705
2706 /* Routine io_registry_entry_from_path */
2707 kern_return_t is_io_registry_entry_from_path_ool(
2708 mach_port_t master_port,
2709 io_string_inband_t path,
2710 io_buf_ptr_t path_ool,
2711 mach_msg_type_number_t path_oolCnt,
2712 kern_return_t *result,
2713 io_object_t *registry_entry)
2714 {
2715 IORegistryEntry * entry;
2716 vm_map_offset_t map_data;
2717 const char * cpath;
2718 IOReturn res;
2719 kern_return_t err;
2720
2721 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2722
2723 map_data = 0;
2724 entry = 0;
2725 res = err = KERN_SUCCESS;
2726 if (path[0]) cpath = path;
2727 else
2728 {
2729 if (!path_oolCnt) return(kIOReturnBadArgument);
2730 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2731
2732 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2733 if (KERN_SUCCESS == err)
2734 {
2735 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2736 cpath = CAST_DOWN(const char *, map_data);
2737 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2738 }
2739 }
2740
2741 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2742 {
2743 entry = IORegistryEntry::fromPath(cpath);
2744 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2745 }
2746
2747 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2748
2749 if (KERN_SUCCESS != err) res = err;
2750 *registry_entry = entry;
2751 *result = res;
2752
2753 return (err);
2754 }
2755
2756
2757 /* Routine io_registry_entry_in_plane */
2758 kern_return_t is_io_registry_entry_in_plane(
2759 io_object_t registry_entry,
2760 io_name_t plane,
2761 boolean_t *inPlane )
2762 {
2763 CHECK( IORegistryEntry, registry_entry, entry );
2764
2765 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2766
2767 return( kIOReturnSuccess );
2768 }
2769
2770
2771 /* Routine io_registry_entry_get_path */
2772 kern_return_t is_io_registry_entry_get_path(
2773 io_object_t registry_entry,
2774 io_name_t plane,
2775 io_string_t path )
2776 {
2777 int length;
2778 CHECK( IORegistryEntry, registry_entry, entry );
2779
2780 length = sizeof( io_string_t);
2781 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2782 return( kIOReturnSuccess );
2783 else
2784 return( kIOReturnBadArgument );
2785 }
2786
2787 /* Routine io_registry_entry_get_path */
2788 kern_return_t is_io_registry_entry_get_path_ool(
2789 io_object_t registry_entry,
2790 io_name_t plane,
2791 io_string_inband_t path,
2792 io_buf_ptr_t *path_ool,
2793 mach_msg_type_number_t *path_oolCnt)
2794 {
2795 enum { kMaxPath = 16384 };
2796 IOReturn err;
2797 int length;
2798 char * buf;
2799
2800 CHECK( IORegistryEntry, registry_entry, entry );
2801
2802 *path_ool = NULL;
2803 *path_oolCnt = 0;
2804 length = sizeof(io_string_inband_t);
2805 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2806 else
2807 {
2808 length = kMaxPath;
2809 buf = IONew(char, length);
2810 if (!buf) err = kIOReturnNoMemory;
2811 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2812 else
2813 {
2814 *path_oolCnt = length;
2815 err = copyoutkdata(buf, length, path_ool);
2816 }
2817 if (buf) IODelete(buf, char, kMaxPath);
2818 }
2819
2820 return (err);
2821 }
2822
2823
2824 /* Routine io_registry_entry_get_name */
2825 kern_return_t is_io_registry_entry_get_name(
2826 io_object_t registry_entry,
2827 io_name_t name )
2828 {
2829 CHECK( IORegistryEntry, registry_entry, entry );
2830
2831 strncpy( name, entry->getName(), sizeof( io_name_t));
2832
2833 return( kIOReturnSuccess );
2834 }
2835
2836 /* Routine io_registry_entry_get_name_in_plane */
2837 kern_return_t is_io_registry_entry_get_name_in_plane(
2838 io_object_t registry_entry,
2839 io_name_t planeName,
2840 io_name_t name )
2841 {
2842 const IORegistryPlane * plane;
2843 CHECK( IORegistryEntry, registry_entry, entry );
2844
2845 if( planeName[0])
2846 plane = IORegistryEntry::getPlane( planeName );
2847 else
2848 plane = 0;
2849
2850 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2851
2852 return( kIOReturnSuccess );
2853 }
2854
2855 /* Routine io_registry_entry_get_location_in_plane */
2856 kern_return_t is_io_registry_entry_get_location_in_plane(
2857 io_object_t registry_entry,
2858 io_name_t planeName,
2859 io_name_t location )
2860 {
2861 const IORegistryPlane * plane;
2862 CHECK( IORegistryEntry, registry_entry, entry );
2863
2864 if( planeName[0])
2865 plane = IORegistryEntry::getPlane( planeName );
2866 else
2867 plane = 0;
2868
2869 const char * cstr = entry->getLocation( plane );
2870
2871 if( cstr) {
2872 strncpy( location, cstr, sizeof( io_name_t));
2873 return( kIOReturnSuccess );
2874 } else
2875 return( kIOReturnNotFound );
2876 }
2877
2878 /* Routine io_registry_entry_get_registry_entry_id */
2879 kern_return_t is_io_registry_entry_get_registry_entry_id(
2880 io_object_t registry_entry,
2881 uint64_t *entry_id )
2882 {
2883 CHECK( IORegistryEntry, registry_entry, entry );
2884
2885 *entry_id = entry->getRegistryEntryID();
2886
2887 return (kIOReturnSuccess);
2888 }
2889
2890 /* Routine io_registry_entry_get_property */
2891 kern_return_t is_io_registry_entry_get_property_bytes(
2892 io_object_t registry_entry,
2893 io_name_t property_name,
2894 io_struct_inband_t buf,
2895 mach_msg_type_number_t *dataCnt )
2896 {
2897 OSObject * obj;
2898 OSData * data;
2899 OSString * str;
2900 OSBoolean * boo;
2901 OSNumber * off;
2902 UInt64 offsetBytes;
2903 unsigned int len = 0;
2904 const void * bytes = 0;
2905 IOReturn ret = kIOReturnSuccess;
2906
2907 CHECK( IORegistryEntry, registry_entry, entry );
2908
2909 #if CONFIG_MACF
2910 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2911 return kIOReturnNotPermitted;
2912 #endif
2913
2914 obj = entry->copyProperty(property_name);
2915 if( !obj)
2916 return( kIOReturnNoResources );
2917
2918 // One day OSData will be a common container base class
2919 // until then...
2920 if( (data = OSDynamicCast( OSData, obj ))) {
2921 len = data->getLength();
2922 bytes = data->getBytesNoCopy();
2923 if (!data->isSerializable()) len = 0;
2924
2925 } else if( (str = OSDynamicCast( OSString, obj ))) {
2926 len = str->getLength() + 1;
2927 bytes = str->getCStringNoCopy();
2928
2929 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2930 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2931 bytes = boo->isTrue() ? "Yes" : "No";
2932
2933 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2934 offsetBytes = off->unsigned64BitValue();
2935 len = off->numberOfBytes();
2936 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2937 bytes = &offsetBytes;
2938 #ifdef __BIG_ENDIAN__
2939 bytes = (const void *)
2940 (((UInt32) bytes) + (sizeof( UInt64) - len));
2941 #endif
2942
2943 } else
2944 ret = kIOReturnBadArgument;
2945
2946 if( bytes) {
2947 if( *dataCnt < len)
2948 ret = kIOReturnIPCError;
2949 else {
2950 *dataCnt = len;
2951 bcopy( bytes, buf, len );
2952 }
2953 }
2954 obj->release();
2955
2956 return( ret );
2957 }
2958
2959
2960 /* Routine io_registry_entry_get_property */
2961 kern_return_t is_io_registry_entry_get_property(
2962 io_object_t registry_entry,
2963 io_name_t property_name,
2964 io_buf_ptr_t *properties,
2965 mach_msg_type_number_t *propertiesCnt )
2966 {
2967 kern_return_t err;
2968 vm_size_t len;
2969 OSObject * obj;
2970
2971 CHECK( IORegistryEntry, registry_entry, entry );
2972
2973 #if CONFIG_MACF
2974 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2975 return kIOReturnNotPermitted;
2976 #endif
2977
2978 obj = entry->copyProperty(property_name);
2979 if( !obj)
2980 return( kIOReturnNotFound );
2981
2982 OSSerialize * s = OSSerialize::withCapacity(4096);
2983 if( !s) {
2984 obj->release();
2985 return( kIOReturnNoMemory );
2986 }
2987
2988 if( obj->serialize( s )) {
2989 len = s->getLength();
2990 *propertiesCnt = len;
2991 err = copyoutkdata( s->text(), len, properties );
2992
2993 } else
2994 err = kIOReturnUnsupported;
2995
2996 s->release();
2997 obj->release();
2998
2999 return( err );
3000 }
3001
3002 /* Routine io_registry_entry_get_property_recursively */
3003 kern_return_t is_io_registry_entry_get_property_recursively(
3004 io_object_t registry_entry,
3005 io_name_t plane,
3006 io_name_t property_name,
3007 uint32_t options,
3008 io_buf_ptr_t *properties,
3009 mach_msg_type_number_t *propertiesCnt )
3010 {
3011 kern_return_t err;
3012 vm_size_t len;
3013 OSObject * obj;
3014
3015 CHECK( IORegistryEntry, registry_entry, entry );
3016
3017 #if CONFIG_MACF
3018 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3019 return kIOReturnNotPermitted;
3020 #endif
3021
3022 obj = entry->copyProperty( property_name,
3023 IORegistryEntry::getPlane( plane ), options );
3024 if( !obj)
3025 return( kIOReturnNotFound );
3026
3027 OSSerialize * s = OSSerialize::withCapacity(4096);
3028 if( !s) {
3029 obj->release();
3030 return( kIOReturnNoMemory );
3031 }
3032
3033 if( obj->serialize( s )) {
3034 len = s->getLength();
3035 *propertiesCnt = len;
3036 err = copyoutkdata( s->text(), len, properties );
3037
3038 } else
3039 err = kIOReturnUnsupported;
3040
3041 s->release();
3042 obj->release();
3043
3044 return( err );
3045 }
3046
3047 /* Routine io_registry_entry_get_properties */
3048 kern_return_t is_io_registry_entry_get_properties(
3049 io_object_t registry_entry,
3050 io_buf_ptr_t *properties,
3051 mach_msg_type_number_t *propertiesCnt )
3052 {
3053 return (kIOReturnUnsupported);
3054 }
3055
3056 #if CONFIG_MACF
3057
3058 struct GetPropertiesEditorRef
3059 {
3060 kauth_cred_t cred;
3061 IORegistryEntry * entry;
3062 OSCollection * root;
3063 };
3064
3065 static const OSMetaClassBase *
3066 GetPropertiesEditor(void * reference,
3067 OSSerialize * s,
3068 OSCollection * container,
3069 const OSSymbol * name,
3070 const OSMetaClassBase * value)
3071 {
3072 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3073
3074 if (!ref->root) ref->root = container;
3075 if (ref->root == container)
3076 {
3077 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3078 {
3079 value = 0;
3080 }
3081 }
3082 if (value) value->retain();
3083 return (value);
3084 }
3085
3086 #endif /* CONFIG_MACF */
3087
3088 /* Routine io_registry_entry_get_properties */
3089 kern_return_t is_io_registry_entry_get_properties_bin(
3090 io_object_t registry_entry,
3091 io_buf_ptr_t *properties,
3092 mach_msg_type_number_t *propertiesCnt)
3093 {
3094 kern_return_t err = kIOReturnSuccess;
3095 vm_size_t len;
3096 OSSerialize * s;
3097 OSSerialize::Editor editor = 0;
3098 void * editRef = 0;
3099
3100 CHECK(IORegistryEntry, registry_entry, entry);
3101
3102 #if CONFIG_MACF
3103 GetPropertiesEditorRef ref;
3104 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3105 {
3106 editor = &GetPropertiesEditor;
3107 editRef = &ref;
3108 ref.cred = kauth_cred_get();
3109 ref.entry = entry;
3110 ref.root = 0;
3111 }
3112 #endif
3113
3114 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3115 if (!s) return (kIOReturnNoMemory);
3116
3117 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3118
3119 if (kIOReturnSuccess == err)
3120 {
3121 len = s->getLength();
3122 *propertiesCnt = len;
3123 err = copyoutkdata(s->text(), len, properties);
3124 }
3125 s->release();
3126
3127 return (err);
3128 }
3129
3130 /* Routine io_registry_entry_get_property_bin */
3131 kern_return_t is_io_registry_entry_get_property_bin(
3132 io_object_t registry_entry,
3133 io_name_t plane,
3134 io_name_t property_name,
3135 uint32_t options,
3136 io_buf_ptr_t *properties,
3137 mach_msg_type_number_t *propertiesCnt )
3138 {
3139 kern_return_t err;
3140 vm_size_t len;
3141 OSObject * obj;
3142 const OSSymbol * sym;
3143
3144 CHECK( IORegistryEntry, registry_entry, entry );
3145
3146 #if CONFIG_MACF
3147 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3148 return kIOReturnNotPermitted;
3149 #endif
3150
3151 sym = OSSymbol::withCString(property_name);
3152 if (!sym) return (kIOReturnNoMemory);
3153
3154 if (gIORegistryEntryPropertyKeysKey == sym)
3155 {
3156 obj = entry->copyPropertyKeys();
3157 }
3158 else
3159 {
3160 if ((kIORegistryIterateRecursively & options) && plane[0])
3161 {
3162 obj = entry->copyProperty(property_name,
3163 IORegistryEntry::getPlane(plane), options );
3164 }
3165 else
3166 {
3167 obj = entry->copyProperty(property_name);
3168 }
3169 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3170 }
3171
3172 sym->release();
3173 if (!obj) return (kIOReturnNotFound);
3174
3175 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3176 if( !s) {
3177 obj->release();
3178 return( kIOReturnNoMemory );
3179 }
3180
3181 if( obj->serialize( s )) {
3182 len = s->getLength();
3183 *propertiesCnt = len;
3184 err = copyoutkdata( s->text(), len, properties );
3185
3186 } else err = kIOReturnUnsupported;
3187
3188 s->release();
3189 obj->release();
3190
3191 return( err );
3192 }
3193
3194
3195 /* Routine io_registry_entry_set_properties */
3196 kern_return_t is_io_registry_entry_set_properties
3197 (
3198 io_object_t registry_entry,
3199 io_buf_ptr_t properties,
3200 mach_msg_type_number_t propertiesCnt,
3201 kern_return_t * result)
3202 {
3203 OSObject * obj;
3204 kern_return_t err;
3205 IOReturn res;
3206 vm_offset_t data;
3207 vm_map_offset_t map_data;
3208
3209 CHECK( IORegistryEntry, registry_entry, entry );
3210
3211 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3212 return( kIOReturnMessageTooLarge);
3213
3214 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3215 data = CAST_DOWN(vm_offset_t, map_data);
3216
3217 if( KERN_SUCCESS == err) {
3218
3219 FAKE_STACK_FRAME(entry->getMetaClass());
3220
3221 // must return success after vm_map_copyout() succeeds
3222 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3223 vm_deallocate( kernel_map, data, propertiesCnt );
3224
3225 if (!obj)
3226 res = kIOReturnBadArgument;
3227 #if CONFIG_MACF
3228 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3229 registry_entry, obj))
3230 {
3231 res = kIOReturnNotPermitted;
3232 }
3233 #endif
3234 else
3235 {
3236 res = entry->setProperties( obj );
3237 }
3238
3239 if (obj)
3240 obj->release();
3241
3242 FAKE_STACK_FRAME_END();
3243
3244 } else
3245 res = err;
3246
3247 *result = res;
3248 return( err );
3249 }
3250
3251 /* Routine io_registry_entry_get_child_iterator */
3252 kern_return_t is_io_registry_entry_get_child_iterator(
3253 io_object_t registry_entry,
3254 io_name_t plane,
3255 io_object_t *iterator )
3256 {
3257 CHECK( IORegistryEntry, registry_entry, entry );
3258
3259 *iterator = entry->getChildIterator(
3260 IORegistryEntry::getPlane( plane ));
3261
3262 return( kIOReturnSuccess );
3263 }
3264
3265 /* Routine io_registry_entry_get_parent_iterator */
3266 kern_return_t is_io_registry_entry_get_parent_iterator(
3267 io_object_t registry_entry,
3268 io_name_t plane,
3269 io_object_t *iterator)
3270 {
3271 CHECK( IORegistryEntry, registry_entry, entry );
3272
3273 *iterator = entry->getParentIterator(
3274 IORegistryEntry::getPlane( plane ));
3275
3276 return( kIOReturnSuccess );
3277 }
3278
3279 /* Routine io_service_get_busy_state */
3280 kern_return_t is_io_service_get_busy_state(
3281 io_object_t _service,
3282 uint32_t *busyState )
3283 {
3284 CHECK( IOService, _service, service );
3285
3286 *busyState = service->getBusyState();
3287
3288 return( kIOReturnSuccess );
3289 }
3290
3291 /* Routine io_service_get_state */
3292 kern_return_t is_io_service_get_state(
3293 io_object_t _service,
3294 uint64_t *state,
3295 uint32_t *busy_state,
3296 uint64_t *accumulated_busy_time )
3297 {
3298 CHECK( IOService, _service, service );
3299
3300 *state = service->getState();
3301 *busy_state = service->getBusyState();
3302 *accumulated_busy_time = service->getAccumulatedBusyTime();
3303
3304 return( kIOReturnSuccess );
3305 }
3306
3307 /* Routine io_service_wait_quiet */
3308 kern_return_t is_io_service_wait_quiet(
3309 io_object_t _service,
3310 mach_timespec_t wait_time )
3311 {
3312 uint64_t timeoutNS;
3313
3314 CHECK( IOService, _service, service );
3315
3316 timeoutNS = wait_time.tv_sec;
3317 timeoutNS *= kSecondScale;
3318 timeoutNS += wait_time.tv_nsec;
3319
3320 return( service->waitQuiet(timeoutNS) );
3321 }
3322
3323 /* Routine io_service_request_probe */
3324 kern_return_t is_io_service_request_probe(
3325 io_object_t _service,
3326 uint32_t options )
3327 {
3328 CHECK( IOService, _service, service );
3329
3330 return( service->requestProbe( options ));
3331 }
3332
3333 /* Routine io_service_get_authorization_id */
3334 kern_return_t is_io_service_get_authorization_id(
3335 io_object_t _service,
3336 uint64_t *authorization_id )
3337 {
3338 kern_return_t kr;
3339
3340 CHECK( IOService, _service, service );
3341
3342 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3343 kIOClientPrivilegeAdministrator );
3344 if( kIOReturnSuccess != kr)
3345 return( kr );
3346
3347 *authorization_id = service->getAuthorizationID();
3348
3349 return( kr );
3350 }
3351
3352 /* Routine io_service_set_authorization_id */
3353 kern_return_t is_io_service_set_authorization_id(
3354 io_object_t _service,
3355 uint64_t authorization_id )
3356 {
3357 CHECK( IOService, _service, service );
3358
3359 return( service->setAuthorizationID( authorization_id ) );
3360 }
3361
3362 /* Routine io_service_open_ndr */
3363 kern_return_t is_io_service_open_extended(
3364 io_object_t _service,
3365 task_t owningTask,
3366 uint32_t connect_type,
3367 NDR_record_t ndr,
3368 io_buf_ptr_t properties,
3369 mach_msg_type_number_t propertiesCnt,
3370 kern_return_t * result,
3371 io_object_t *connection )
3372 {
3373 IOUserClient * client = 0;
3374 kern_return_t err = KERN_SUCCESS;
3375 IOReturn res = kIOReturnSuccess;
3376 OSDictionary * propertiesDict = 0;
3377 bool crossEndian;
3378 bool disallowAccess;
3379
3380 CHECK( IOService, _service, service );
3381
3382 if (!owningTask) return (kIOReturnBadArgument);
3383 assert(owningTask == current_task());
3384 if (owningTask != current_task()) return (kIOReturnBadArgument);
3385
3386 do
3387 {
3388 if (properties) return (kIOReturnUnsupported);
3389 #if 0
3390 {
3391 OSObject * obj;
3392 vm_offset_t data;
3393 vm_map_offset_t map_data;
3394
3395 if( propertiesCnt > sizeof(io_struct_inband_t))
3396 return( kIOReturnMessageTooLarge);
3397
3398 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3399 res = err;
3400 data = CAST_DOWN(vm_offset_t, map_data);
3401 if (KERN_SUCCESS == err)
3402 {
3403 // must return success after vm_map_copyout() succeeds
3404 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3405 vm_deallocate( kernel_map, data, propertiesCnt );
3406 propertiesDict = OSDynamicCast(OSDictionary, obj);
3407 if (!propertiesDict)
3408 {
3409 res = kIOReturnBadArgument;
3410 if (obj)
3411 obj->release();
3412 }
3413 }
3414 if (kIOReturnSuccess != res)
3415 break;
3416 }
3417 #endif
3418 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3419 if (crossEndian)
3420 {
3421 if (!propertiesDict)
3422 propertiesDict = OSDictionary::withCapacity(4);
3423 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3424 if (data)
3425 {
3426 if (propertiesDict)
3427 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3428 data->release();
3429 }
3430 }
3431
3432 res = service->newUserClient( owningTask, (void *) owningTask,
3433 connect_type, propertiesDict, &client );
3434
3435 if (propertiesDict)
3436 propertiesDict->release();
3437
3438 if (res == kIOReturnSuccess)
3439 {
3440 assert( OSDynamicCast(IOUserClient, client) );
3441
3442 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3443 client->closed = false;
3444
3445 disallowAccess = (crossEndian
3446 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3447 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3448 if (disallowAccess) res = kIOReturnUnsupported;
3449 #if CONFIG_MACF
3450 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3451 res = kIOReturnNotPermitted;
3452 #endif
3453
3454 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3455
3456 if (kIOReturnSuccess != res)
3457 {
3458 IOStatisticsClientCall();
3459 client->clientClose();
3460 client->release();
3461 client = 0;
3462 break;
3463 }
3464 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3465 if (creatorName)
3466 {
3467 client->setProperty(kIOUserClientCreatorKey, creatorName);
3468 creatorName->release();
3469 }
3470 client->setTerminateDefer(service, false);
3471 }
3472 }
3473 while (false);
3474
3475 *connection = client;
3476 *result = res;
3477
3478 return (err);
3479 }
3480
3481 /* Routine io_service_close */
3482 kern_return_t is_io_service_close(
3483 io_object_t connection )
3484 {
3485 OSSet * mappings;
3486 if ((mappings = OSDynamicCast(OSSet, connection)))
3487 return( kIOReturnSuccess );
3488
3489 CHECK( IOUserClient, connection, client );
3490
3491 IOStatisticsClientCall();
3492
3493 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3494 {
3495 client->clientClose();
3496 }
3497 else
3498 {
3499 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3500 client->getRegistryEntryID(), client->getName());
3501 }
3502
3503 return( kIOReturnSuccess );
3504 }
3505
3506 /* Routine io_connect_get_service */
3507 kern_return_t is_io_connect_get_service(
3508 io_object_t connection,
3509 io_object_t *service )
3510 {
3511 IOService * theService;
3512
3513 CHECK( IOUserClient, connection, client );
3514
3515 theService = client->getService();
3516 if( theService)
3517 theService->retain();
3518
3519 *service = theService;
3520
3521 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3522 }
3523
3524 /* Routine io_connect_set_notification_port */
3525 kern_return_t is_io_connect_set_notification_port(
3526 io_object_t connection,
3527 uint32_t notification_type,
3528 mach_port_t port,
3529 uint32_t reference)
3530 {
3531 CHECK( IOUserClient, connection, client );
3532
3533 IOStatisticsClientCall();
3534 return( client->registerNotificationPort( port, notification_type,
3535 (io_user_reference_t) reference ));
3536 }
3537
3538 /* Routine io_connect_set_notification_port */
3539 kern_return_t is_io_connect_set_notification_port_64(
3540 io_object_t connection,
3541 uint32_t notification_type,
3542 mach_port_t port,
3543 io_user_reference_t reference)
3544 {
3545 CHECK( IOUserClient, connection, client );
3546
3547 IOStatisticsClientCall();
3548 return( client->registerNotificationPort( port, notification_type,
3549 reference ));
3550 }
3551
3552 /* Routine io_connect_map_memory_into_task */
3553 kern_return_t is_io_connect_map_memory_into_task
3554 (
3555 io_connect_t connection,
3556 uint32_t memory_type,
3557 task_t into_task,
3558 mach_vm_address_t *address,
3559 mach_vm_size_t *size,
3560 uint32_t flags
3561 )
3562 {
3563 IOReturn err;
3564 IOMemoryMap * map;
3565
3566 CHECK( IOUserClient, connection, client );
3567
3568 if (!into_task) return (kIOReturnBadArgument);
3569
3570 IOStatisticsClientCall();
3571 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3572
3573 if( map) {
3574 *address = map->getAddress();
3575 if( size)
3576 *size = map->getSize();
3577
3578 if( client->sharedInstance
3579 || (into_task != current_task())) {
3580 // push a name out to the task owning the map,
3581 // so we can clean up maps
3582 mach_port_name_t name __unused =
3583 IOMachPort::makeSendRightForTask(
3584 into_task, map, IKOT_IOKIT_OBJECT );
3585
3586 } else {
3587 // keep it with the user client
3588 IOLockLock( gIOObjectPortLock);
3589 if( 0 == client->mappings)
3590 client->mappings = OSSet::withCapacity(2);
3591 if( client->mappings)
3592 client->mappings->setObject( map);
3593 IOLockUnlock( gIOObjectPortLock);
3594 map->release();
3595 }
3596 err = kIOReturnSuccess;
3597
3598 } else
3599 err = kIOReturnBadArgument;
3600
3601 return( err );
3602 }
3603
3604 /* Routine is_io_connect_map_memory */
3605 kern_return_t is_io_connect_map_memory(
3606 io_object_t connect,
3607 uint32_t type,
3608 task_t task,
3609 uint32_t * mapAddr,
3610 uint32_t * mapSize,
3611 uint32_t flags )
3612 {
3613 IOReturn err;
3614 mach_vm_address_t address;
3615 mach_vm_size_t size;
3616
3617 address = SCALAR64(*mapAddr);
3618 size = SCALAR64(*mapSize);
3619
3620 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3621
3622 *mapAddr = SCALAR32(address);
3623 *mapSize = SCALAR32(size);
3624
3625 return (err);
3626 }
3627
3628 } /* extern "C" */
3629
3630 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3631 {
3632 OSIterator * iter;
3633 IOMemoryMap * map = 0;
3634
3635 IOLockLock(gIOObjectPortLock);
3636
3637 iter = OSCollectionIterator::withCollection(mappings);
3638 if(iter)
3639 {
3640 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3641 {
3642 if(mem == map->getMemoryDescriptor())
3643 {
3644 map->retain();
3645 mappings->removeObject(map);
3646 break;
3647 }
3648 }
3649 iter->release();
3650 }
3651
3652 IOLockUnlock(gIOObjectPortLock);
3653
3654 return (map);
3655 }
3656
3657 extern "C" {
3658
3659 /* Routine io_connect_unmap_memory_from_task */
3660 kern_return_t is_io_connect_unmap_memory_from_task
3661 (
3662 io_connect_t connection,
3663 uint32_t memory_type,
3664 task_t from_task,
3665 mach_vm_address_t address)
3666 {
3667 IOReturn err;
3668 IOOptionBits options = 0;
3669 IOMemoryDescriptor * memory;
3670 IOMemoryMap * map;
3671
3672 CHECK( IOUserClient, connection, client );
3673
3674 if (!from_task) return (kIOReturnBadArgument);
3675
3676 IOStatisticsClientCall();
3677 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3678
3679 if( memory && (kIOReturnSuccess == err)) {
3680
3681 options = (options & ~kIOMapUserOptionsMask)
3682 | kIOMapAnywhere | kIOMapReference;
3683
3684 map = memory->createMappingInTask( from_task, address, options );
3685 memory->release();
3686 if( map)
3687 {
3688 IOLockLock( gIOObjectPortLock);
3689 if( client->mappings)
3690 client->mappings->removeObject( map);
3691 IOLockUnlock( gIOObjectPortLock);
3692
3693 mach_port_name_t name = 0;
3694 if (from_task != current_task())
3695 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3696 if (name)
3697 {
3698 map->userClientUnmap();
3699 err = iokit_mod_send_right( from_task, name, -2 );
3700 err = kIOReturnSuccess;
3701 }
3702 else
3703 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3704 if (from_task == current_task())
3705 map->release();
3706 }
3707 else
3708 err = kIOReturnBadArgument;
3709 }
3710
3711 return( err );
3712 }
3713
3714 kern_return_t is_io_connect_unmap_memory(
3715 io_object_t connect,
3716 uint32_t type,
3717 task_t task,
3718 uint32_t mapAddr )
3719 {
3720 IOReturn err;
3721 mach_vm_address_t address;
3722
3723 address = SCALAR64(mapAddr);
3724
3725 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3726
3727 return (err);
3728 }
3729
3730
3731 /* Routine io_connect_add_client */
3732 kern_return_t is_io_connect_add_client(
3733 io_object_t connection,
3734 io_object_t connect_to)
3735 {
3736 CHECK( IOUserClient, connection, client );
3737 CHECK( IOUserClient, connect_to, to );
3738
3739 IOStatisticsClientCall();
3740 return( client->connectClient( to ) );
3741 }
3742
3743
3744 /* Routine io_connect_set_properties */
3745 kern_return_t is_io_connect_set_properties(
3746 io_object_t connection,
3747 io_buf_ptr_t properties,
3748 mach_msg_type_number_t propertiesCnt,
3749 kern_return_t * result)
3750 {
3751 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3752 }
3753
3754 /* Routine io_user_client_method */
3755 kern_return_t is_io_connect_method_var_output
3756 (
3757 io_connect_t connection,
3758 uint32_t selector,
3759 io_scalar_inband64_t scalar_input,
3760 mach_msg_type_number_t scalar_inputCnt,
3761 io_struct_inband_t inband_input,
3762 mach_msg_type_number_t inband_inputCnt,
3763 mach_vm_address_t ool_input,
3764 mach_vm_size_t ool_input_size,
3765 io_struct_inband_t inband_output,
3766 mach_msg_type_number_t *inband_outputCnt,
3767 io_scalar_inband64_t scalar_output,
3768 mach_msg_type_number_t *scalar_outputCnt,
3769 io_buf_ptr_t *var_output,
3770 mach_msg_type_number_t *var_outputCnt
3771 )
3772 {
3773 CHECK( IOUserClient, connection, client );
3774
3775 IOExternalMethodArguments args;
3776 IOReturn ret;
3777 IOMemoryDescriptor * inputMD = 0;
3778 OSObject * structureVariableOutputData = 0;
3779
3780 bzero(&args.__reserved[0], sizeof(args.__reserved));
3781 args.__reservedA = 0;
3782 args.version = kIOExternalMethodArgumentsCurrentVersion;
3783
3784 args.selector = selector;
3785
3786 args.asyncWakePort = MACH_PORT_NULL;
3787 args.asyncReference = 0;
3788 args.asyncReferenceCount = 0;
3789 args.structureVariableOutputData = &structureVariableOutputData;
3790
3791 args.scalarInput = scalar_input;
3792 args.scalarInputCount = scalar_inputCnt;
3793 args.structureInput = inband_input;
3794 args.structureInputSize = inband_inputCnt;
3795
3796 if (ool_input)
3797 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3798 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3799 current_task());
3800
3801 args.structureInputDescriptor = inputMD;
3802
3803 args.scalarOutput = scalar_output;
3804 args.scalarOutputCount = *scalar_outputCnt;
3805 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3806 args.structureOutput = inband_output;
3807 args.structureOutputSize = *inband_outputCnt;
3808 args.structureOutputDescriptor = NULL;
3809 args.structureOutputDescriptorSize = 0;
3810
3811 IOStatisticsClientCall();
3812 ret = client->externalMethod( selector, &args );
3813
3814 *scalar_outputCnt = args.scalarOutputCount;
3815 *inband_outputCnt = args.structureOutputSize;
3816
3817 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3818 {
3819 OSSerialize * serialize;
3820 OSData * data;
3821 vm_size_t len;
3822
3823 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3824 {
3825 len = serialize->getLength();
3826 *var_outputCnt = len;
3827 ret = copyoutkdata(serialize->text(), len, var_output);
3828 }
3829 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3830 {
3831 len = data->getLength();
3832 *var_outputCnt = len;
3833 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3834 }
3835 else
3836 {
3837 ret = kIOReturnUnderrun;
3838 }
3839 }
3840
3841 if (inputMD)
3842 inputMD->release();
3843 if (structureVariableOutputData)
3844 structureVariableOutputData->release();
3845
3846 return (ret);
3847 }
3848
3849 /* Routine io_user_client_method */
3850 kern_return_t is_io_connect_method
3851 (
3852 io_connect_t connection,
3853 uint32_t selector,
3854 io_scalar_inband64_t scalar_input,
3855 mach_msg_type_number_t scalar_inputCnt,
3856 io_struct_inband_t inband_input,
3857 mach_msg_type_number_t inband_inputCnt,
3858 mach_vm_address_t ool_input,
3859 mach_vm_size_t ool_input_size,
3860 io_struct_inband_t inband_output,
3861 mach_msg_type_number_t *inband_outputCnt,
3862 io_scalar_inband64_t scalar_output,
3863 mach_msg_type_number_t *scalar_outputCnt,
3864 mach_vm_address_t ool_output,
3865 mach_vm_size_t *ool_output_size
3866 )
3867 {
3868 CHECK( IOUserClient, connection, client );
3869
3870 IOExternalMethodArguments args;
3871 IOReturn ret;
3872 IOMemoryDescriptor * inputMD = 0;
3873 IOMemoryDescriptor * outputMD = 0;
3874
3875 bzero(&args.__reserved[0], sizeof(args.__reserved));
3876 args.__reservedA = 0;
3877 args.version = kIOExternalMethodArgumentsCurrentVersion;
3878
3879 args.selector = selector;
3880
3881 args.asyncWakePort = MACH_PORT_NULL;
3882 args.asyncReference = 0;
3883 args.asyncReferenceCount = 0;
3884 args.structureVariableOutputData = 0;
3885
3886 args.scalarInput = scalar_input;
3887 args.scalarInputCount = scalar_inputCnt;
3888 args.structureInput = inband_input;
3889 args.structureInputSize = inband_inputCnt;
3890
3891 if (ool_input)
3892 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3893 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3894 current_task());
3895
3896 args.structureInputDescriptor = inputMD;
3897
3898 args.scalarOutput = scalar_output;
3899 args.scalarOutputCount = *scalar_outputCnt;
3900 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3901 args.structureOutput = inband_output;
3902 args.structureOutputSize = *inband_outputCnt;
3903
3904 if (ool_output && ool_output_size)
3905 {
3906 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3907 kIODirectionIn, current_task());
3908 }
3909
3910 args.structureOutputDescriptor = outputMD;
3911 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3912
3913 IOStatisticsClientCall();
3914 ret = client->externalMethod( selector, &args );
3915
3916 *scalar_outputCnt = args.scalarOutputCount;
3917 *inband_outputCnt = args.structureOutputSize;
3918 *ool_output_size = args.structureOutputDescriptorSize;
3919
3920 if (inputMD)
3921 inputMD->release();
3922 if (outputMD)
3923 outputMD->release();
3924
3925 return (ret);
3926 }
3927
3928 /* Routine io_async_user_client_method */
3929 kern_return_t is_io_connect_async_method
3930 (
3931 io_connect_t connection,
3932 mach_port_t wake_port,
3933 io_async_ref64_t reference,
3934 mach_msg_type_number_t referenceCnt,
3935 uint32_t selector,
3936 io_scalar_inband64_t scalar_input,
3937 mach_msg_type_number_t scalar_inputCnt,
3938 io_struct_inband_t inband_input,
3939 mach_msg_type_number_t inband_inputCnt,
3940 mach_vm_address_t ool_input,
3941 mach_vm_size_t ool_input_size,
3942 io_struct_inband_t inband_output,
3943 mach_msg_type_number_t *inband_outputCnt,
3944 io_scalar_inband64_t scalar_output,
3945 mach_msg_type_number_t *scalar_outputCnt,
3946 mach_vm_address_t ool_output,
3947 mach_vm_size_t * ool_output_size
3948 )
3949 {
3950 CHECK( IOUserClient, connection, client );
3951
3952 IOExternalMethodArguments args;
3953 IOReturn ret;
3954 IOMemoryDescriptor * inputMD = 0;
3955 IOMemoryDescriptor * outputMD = 0;
3956
3957 bzero(&args.__reserved[0], sizeof(args.__reserved));
3958 args.__reservedA = 0;
3959 args.version = kIOExternalMethodArgumentsCurrentVersion;
3960
3961 reference[0] = (io_user_reference_t) wake_port;
3962 if (vm_map_is_64bit(get_task_map(current_task())))
3963 reference[0] |= kIOUCAsync64Flag;
3964
3965 args.selector = selector;
3966
3967 args.asyncWakePort = wake_port;
3968 args.asyncReference = reference;
3969 args.asyncReferenceCount = referenceCnt;
3970
3971 args.scalarInput = scalar_input;
3972 args.scalarInputCount = scalar_inputCnt;
3973 args.structureInput = inband_input;
3974 args.structureInputSize = inband_inputCnt;
3975
3976 if (ool_input)
3977 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3978 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3979 current_task());
3980
3981 args.structureInputDescriptor = inputMD;
3982
3983 args.scalarOutput = scalar_output;
3984 args.scalarOutputCount = *scalar_outputCnt;
3985 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3986 args.structureOutput = inband_output;
3987 args.structureOutputSize = *inband_outputCnt;
3988
3989 if (ool_output)
3990 {
3991 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3992 kIODirectionIn, current_task());
3993 }
3994
3995 args.structureOutputDescriptor = outputMD;
3996 args.structureOutputDescriptorSize = *ool_output_size;
3997
3998 IOStatisticsClientCall();
3999 ret = client->externalMethod( selector, &args );
4000
4001 *inband_outputCnt = args.structureOutputSize;
4002 *ool_output_size = args.structureOutputDescriptorSize;
4003
4004 if (inputMD)
4005 inputMD->release();
4006 if (outputMD)
4007 outputMD->release();
4008
4009 return (ret);
4010 }
4011
4012 /* Routine io_connect_method_scalarI_scalarO */
4013 kern_return_t is_io_connect_method_scalarI_scalarO(
4014 io_object_t connect,
4015 uint32_t index,
4016 io_scalar_inband_t input,
4017 mach_msg_type_number_t inputCount,
4018 io_scalar_inband_t output,
4019 mach_msg_type_number_t * outputCount )
4020 {
4021 IOReturn err;
4022 uint32_t i;
4023 io_scalar_inband64_t _input;
4024 io_scalar_inband64_t _output;
4025
4026 mach_msg_type_number_t struct_outputCnt = 0;
4027 mach_vm_size_t ool_output_size = 0;
4028
4029 bzero(&_output[0], sizeof(_output));
4030 for (i = 0; i < inputCount; i++)
4031 _input[i] = SCALAR64(input[i]);
4032
4033 err = is_io_connect_method(connect, index,
4034 _input, inputCount,
4035 NULL, 0,
4036 0, 0,
4037 NULL, &struct_outputCnt,
4038 _output, outputCount,
4039 0, &ool_output_size);
4040
4041 for (i = 0; i < *outputCount; i++)
4042 output[i] = SCALAR32(_output[i]);
4043
4044 return (err);
4045 }
4046
4047 kern_return_t shim_io_connect_method_scalarI_scalarO(
4048 IOExternalMethod * method,
4049 IOService * object,
4050 const io_user_scalar_t * input,
4051 mach_msg_type_number_t inputCount,
4052 io_user_scalar_t * output,
4053 mach_msg_type_number_t * outputCount )
4054 {
4055 IOMethod func;
4056 io_scalar_inband_t _output;
4057 IOReturn err;
4058 err = kIOReturnBadArgument;
4059
4060 bzero(&_output[0], sizeof(_output));
4061 do {
4062
4063 if( inputCount != method->count0)
4064 {
4065 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4066 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4067 continue;
4068 }
4069 if( *outputCount != method->count1)
4070 {
4071 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4072 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4073 continue;
4074 }
4075
4076 func = method->func;
4077
4078 switch( inputCount) {
4079
4080 case 6:
4081 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4082 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4083 break;
4084 case 5:
4085 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4086 ARG32(input[3]), ARG32(input[4]),
4087 &_output[0] );
4088 break;
4089 case 4:
4090 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4091 ARG32(input[3]),
4092 &_output[0], &_output[1] );
4093 break;
4094 case 3:
4095 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4096 &_output[0], &_output[1], &_output[2] );
4097 break;
4098 case 2:
4099 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4100 &_output[0], &_output[1], &_output[2],
4101 &_output[3] );
4102 break;
4103 case 1:
4104 err = (object->*func)( ARG32(input[0]),
4105 &_output[0], &_output[1], &_output[2],
4106 &_output[3], &_output[4] );
4107 break;
4108 case 0:
4109 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4110 &_output[3], &_output[4], &_output[5] );
4111 break;
4112
4113 default:
4114 IOLog("%s: Bad method table\n", object->getName());
4115 }
4116 }
4117 while( false);
4118
4119 uint32_t i;
4120 for (i = 0; i < *outputCount; i++)
4121 output[i] = SCALAR32(_output[i]);
4122
4123 return( err);
4124 }
4125
4126 /* Routine io_async_method_scalarI_scalarO */
4127 kern_return_t is_io_async_method_scalarI_scalarO(
4128 io_object_t connect,
4129 mach_port_t wake_port,
4130 io_async_ref_t reference,
4131 mach_msg_type_number_t referenceCnt,
4132 uint32_t index,
4133 io_scalar_inband_t input,
4134 mach_msg_type_number_t inputCount,
4135 io_scalar_inband_t output,
4136 mach_msg_type_number_t * outputCount )
4137 {
4138 IOReturn err;
4139 uint32_t i;
4140 io_scalar_inband64_t _input;
4141 io_scalar_inband64_t _output;
4142 io_async_ref64_t _reference;
4143
4144 bzero(&_output[0], sizeof(_output));
4145 for (i = 0; i < referenceCnt; i++)
4146 _reference[i] = REF64(reference[i]);
4147
4148 mach_msg_type_number_t struct_outputCnt = 0;
4149 mach_vm_size_t ool_output_size = 0;
4150
4151 for (i = 0; i < inputCount; i++)
4152 _input[i] = SCALAR64(input[i]);
4153
4154 err = is_io_connect_async_method(connect,
4155 wake_port, _reference, referenceCnt,
4156 index,
4157 _input, inputCount,
4158 NULL, 0,
4159 0, 0,
4160 NULL, &struct_outputCnt,
4161 _output, outputCount,
4162 0, &ool_output_size);
4163
4164 for (i = 0; i < *outputCount; i++)
4165 output[i] = SCALAR32(_output[i]);
4166
4167 return (err);
4168 }
4169 /* Routine io_async_method_scalarI_structureO */
4170 kern_return_t is_io_async_method_scalarI_structureO(
4171 io_object_t connect,
4172 mach_port_t wake_port,
4173 io_async_ref_t reference,
4174 mach_msg_type_number_t referenceCnt,
4175 uint32_t index,
4176 io_scalar_inband_t input,
4177 mach_msg_type_number_t inputCount,
4178 io_struct_inband_t output,
4179 mach_msg_type_number_t * outputCount )
4180 {
4181 uint32_t i;
4182 io_scalar_inband64_t _input;
4183 io_async_ref64_t _reference;
4184
4185 for (i = 0; i < referenceCnt; i++)
4186 _reference[i] = REF64(reference[i]);
4187
4188 mach_msg_type_number_t scalar_outputCnt = 0;
4189 mach_vm_size_t ool_output_size = 0;
4190
4191 for (i = 0; i < inputCount; i++)
4192 _input[i] = SCALAR64(input[i]);
4193
4194 return (is_io_connect_async_method(connect,
4195 wake_port, _reference, referenceCnt,
4196 index,
4197 _input, inputCount,
4198 NULL, 0,
4199 0, 0,
4200 output, outputCount,
4201 NULL, &scalar_outputCnt,
4202 0, &ool_output_size));
4203 }
4204
4205 /* Routine io_async_method_scalarI_structureI */
4206 kern_return_t is_io_async_method_scalarI_structureI(
4207 io_connect_t connect,
4208 mach_port_t wake_port,
4209 io_async_ref_t reference,
4210 mach_msg_type_number_t referenceCnt,
4211 uint32_t index,
4212 io_scalar_inband_t input,
4213 mach_msg_type_number_t inputCount,
4214 io_struct_inband_t inputStruct,
4215 mach_msg_type_number_t inputStructCount )
4216 {
4217 uint32_t i;
4218 io_scalar_inband64_t _input;
4219 io_async_ref64_t _reference;
4220
4221 for (i = 0; i < referenceCnt; i++)
4222 _reference[i] = REF64(reference[i]);
4223
4224 mach_msg_type_number_t scalar_outputCnt = 0;
4225 mach_msg_type_number_t inband_outputCnt = 0;
4226 mach_vm_size_t ool_output_size = 0;
4227
4228 for (i = 0; i < inputCount; i++)
4229 _input[i] = SCALAR64(input[i]);
4230
4231 return (is_io_connect_async_method(connect,
4232 wake_port, _reference, referenceCnt,
4233 index,
4234 _input, inputCount,
4235 inputStruct, inputStructCount,
4236 0, 0,
4237 NULL, &inband_outputCnt,
4238 NULL, &scalar_outputCnt,
4239 0, &ool_output_size));
4240 }
4241
4242 /* Routine io_async_method_structureI_structureO */
4243 kern_return_t is_io_async_method_structureI_structureO(
4244 io_object_t connect,
4245 mach_port_t wake_port,
4246 io_async_ref_t reference,
4247 mach_msg_type_number_t referenceCnt,
4248 uint32_t index,
4249 io_struct_inband_t input,
4250 mach_msg_type_number_t inputCount,
4251 io_struct_inband_t output,
4252 mach_msg_type_number_t * outputCount )
4253 {
4254 uint32_t i;
4255 mach_msg_type_number_t scalar_outputCnt = 0;
4256 mach_vm_size_t ool_output_size = 0;
4257 io_async_ref64_t _reference;
4258
4259 for (i = 0; i < referenceCnt; i++)
4260 _reference[i] = REF64(reference[i]);
4261
4262 return (is_io_connect_async_method(connect,
4263 wake_port, _reference, referenceCnt,
4264 index,
4265 NULL, 0,
4266 input, inputCount,
4267 0, 0,
4268 output, outputCount,
4269 NULL, &scalar_outputCnt,
4270 0, &ool_output_size));
4271 }
4272
4273
4274 kern_return_t shim_io_async_method_scalarI_scalarO(
4275 IOExternalAsyncMethod * method,
4276 IOService * object,
4277 mach_port_t asyncWakePort,
4278 io_user_reference_t * asyncReference,
4279 uint32_t asyncReferenceCount,
4280 const io_user_scalar_t * input,
4281 mach_msg_type_number_t inputCount,
4282 io_user_scalar_t * output,
4283 mach_msg_type_number_t * outputCount )
4284 {
4285 IOAsyncMethod func;
4286 uint32_t i;
4287 io_scalar_inband_t _output;
4288 IOReturn err;
4289 io_async_ref_t reference;
4290
4291 bzero(&_output[0], sizeof(_output));
4292 for (i = 0; i < asyncReferenceCount; i++)
4293 reference[i] = REF32(asyncReference[i]);
4294
4295 err = kIOReturnBadArgument;
4296
4297 do {
4298
4299 if( inputCount != method->count0)
4300 {
4301 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4302 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4303 continue;
4304 }
4305 if( *outputCount != method->count1)
4306 {
4307 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4308 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4309 continue;
4310 }
4311
4312 func = method->func;
4313
4314 switch( inputCount) {
4315
4316 case 6:
4317 err = (object->*func)( reference,
4318 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4319 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4320 break;
4321 case 5:
4322 err = (object->*func)( reference,
4323 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4324 ARG32(input[3]), ARG32(input[4]),
4325 &_output[0] );
4326 break;
4327 case 4:
4328 err = (object->*func)( reference,
4329 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4330 ARG32(input[3]),
4331 &_output[0], &_output[1] );
4332 break;
4333 case 3:
4334 err = (object->*func)( reference,
4335 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4336 &_output[0], &_output[1], &_output[2] );
4337 break;
4338 case 2:
4339 err = (object->*func)( reference,
4340 ARG32(input[0]), ARG32(input[1]),
4341 &_output[0], &_output[1], &_output[2],
4342 &_output[3] );
4343 break;
4344 case 1:
4345 err = (object->*func)( reference,
4346 ARG32(input[0]),
4347 &_output[0], &_output[1], &_output[2],
4348 &_output[3], &_output[4] );
4349 break;
4350 case 0:
4351 err = (object->*func)( reference,
4352 &_output[0], &_output[1], &_output[2],
4353 &_output[3], &_output[4], &_output[5] );
4354 break;
4355
4356 default:
4357 IOLog("%s: Bad method table\n", object->getName());
4358 }
4359 }
4360 while( false);
4361
4362 for (i = 0; i < *outputCount; i++)
4363 output[i] = SCALAR32(_output[i]);
4364
4365 return( err);
4366 }
4367
4368
4369 /* Routine io_connect_method_scalarI_structureO */
4370 kern_return_t is_io_connect_method_scalarI_structureO(
4371 io_object_t connect,
4372 uint32_t index,
4373 io_scalar_inband_t input,
4374 mach_msg_type_number_t inputCount,
4375 io_struct_inband_t output,
4376 mach_msg_type_number_t * outputCount )
4377 {
4378 uint32_t i;
4379 io_scalar_inband64_t _input;
4380
4381 mach_msg_type_number_t scalar_outputCnt = 0;
4382 mach_vm_size_t ool_output_size = 0;
4383
4384 for (i = 0; i < inputCount; i++)
4385 _input[i] = SCALAR64(input[i]);
4386
4387 return (is_io_connect_method(connect, index,
4388 _input, inputCount,
4389 NULL, 0,
4390 0, 0,
4391 output, outputCount,
4392 NULL, &scalar_outputCnt,
4393 0, &ool_output_size));
4394 }
4395
4396 kern_return_t shim_io_connect_method_scalarI_structureO(
4397
4398 IOExternalMethod * method,
4399 IOService * object,
4400 const io_user_scalar_t * input,
4401 mach_msg_type_number_t inputCount,
4402 io_struct_inband_t output,
4403 IOByteCount * outputCount )
4404 {
4405 IOMethod func;
4406 IOReturn err;
4407
4408 err = kIOReturnBadArgument;
4409
4410 do {
4411 if( inputCount != method->count0)
4412 {
4413 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4414 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4415 continue;
4416 }
4417 if( (kIOUCVariableStructureSize != method->count1)
4418 && (*outputCount != method->count1))
4419 {
4420 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4421 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4422 continue;
4423 }
4424
4425 func = method->func;
4426
4427 switch( inputCount) {
4428
4429 case 5:
4430 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4431 ARG32(input[3]), ARG32(input[4]),
4432 output );
4433 break;
4434 case 4:
4435 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4436 ARG32(input[3]),
4437 output, (void *)outputCount );
4438 break;
4439 case 3:
4440 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4441 output, (void *)outputCount, 0 );
4442 break;
4443 case 2:
4444 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4445 output, (void *)outputCount, 0, 0 );
4446 break;
4447 case 1:
4448 err = (object->*func)( ARG32(input[0]),
4449 output, (void *)outputCount, 0, 0, 0 );
4450 break;
4451 case 0:
4452 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4453 break;
4454
4455 default:
4456 IOLog("%s: Bad method table\n", object->getName());
4457 }
4458 }
4459 while( false);
4460
4461 return( err);
4462 }
4463
4464
4465 kern_return_t shim_io_async_method_scalarI_structureO(
4466 IOExternalAsyncMethod * method,
4467 IOService * object,
4468 mach_port_t asyncWakePort,
4469 io_user_reference_t * asyncReference,
4470 uint32_t asyncReferenceCount,
4471 const io_user_scalar_t * input,
4472 mach_msg_type_number_t inputCount,
4473 io_struct_inband_t output,
4474 mach_msg_type_number_t * outputCount )
4475 {
4476 IOAsyncMethod func;
4477 uint32_t i;
4478 IOReturn err;
4479 io_async_ref_t reference;
4480
4481 for (i = 0; i < asyncReferenceCount; i++)
4482 reference[i] = REF32(asyncReference[i]);
4483
4484 err = kIOReturnBadArgument;
4485 do {
4486 if( inputCount != method->count0)
4487 {
4488 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4489 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4490 continue;
4491 }
4492 if( (kIOUCVariableStructureSize != method->count1)
4493 && (*outputCount != method->count1))
4494 {
4495 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4496 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4497 continue;
4498 }
4499
4500 func = method->func;
4501
4502 switch( inputCount) {
4503
4504 case 5:
4505 err = (object->*func)( reference,
4506 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4507 ARG32(input[3]), ARG32(input[4]),
4508 output );
4509 break;
4510 case 4:
4511 err = (object->*func)( reference,
4512 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4513 ARG32(input[3]),
4514 output, (void *)outputCount );
4515 break;
4516 case 3:
4517 err = (object->*func)( reference,
4518 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4519 output, (void *)outputCount, 0 );
4520 break;
4521 case 2:
4522 err = (object->*func)( reference,
4523 ARG32(input[0]), ARG32(input[1]),
4524 output, (void *)outputCount, 0, 0 );
4525 break;
4526 case 1:
4527 err = (object->*func)( reference,
4528 ARG32(input[0]),
4529 output, (void *)outputCount, 0, 0, 0 );
4530 break;
4531 case 0:
4532 err = (object->*func)( reference,
4533 output, (void *)outputCount, 0, 0, 0, 0 );
4534 break;
4535
4536 default:
4537 IOLog("%s: Bad method table\n", object->getName());
4538 }
4539 }
4540 while( false);
4541
4542 return( err);
4543 }
4544
4545 /* Routine io_connect_method_scalarI_structureI */
4546 kern_return_t is_io_connect_method_scalarI_structureI(
4547 io_connect_t connect,
4548 uint32_t index,
4549 io_scalar_inband_t input,
4550 mach_msg_type_number_t inputCount,
4551 io_struct_inband_t inputStruct,
4552 mach_msg_type_number_t inputStructCount )
4553 {
4554 uint32_t i;
4555 io_scalar_inband64_t _input;
4556
4557 mach_msg_type_number_t scalar_outputCnt = 0;
4558 mach_msg_type_number_t inband_outputCnt = 0;
4559 mach_vm_size_t ool_output_size = 0;
4560
4561 for (i = 0; i < inputCount; i++)
4562 _input[i] = SCALAR64(input[i]);
4563
4564 return (is_io_connect_method(connect, index,
4565 _input, inputCount,
4566 inputStruct, inputStructCount,
4567 0, 0,
4568 NULL, &inband_outputCnt,
4569 NULL, &scalar_outputCnt,
4570 0, &ool_output_size));
4571 }
4572
4573 kern_return_t shim_io_connect_method_scalarI_structureI(
4574 IOExternalMethod * method,
4575 IOService * object,
4576 const io_user_scalar_t * input,
4577 mach_msg_type_number_t inputCount,
4578 io_struct_inband_t inputStruct,
4579 mach_msg_type_number_t inputStructCount )
4580 {
4581 IOMethod func;
4582 IOReturn err = kIOReturnBadArgument;
4583
4584 do
4585 {
4586 if (inputCount != method->count0)
4587 {
4588 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4589 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4590 continue;
4591 }
4592 if( (kIOUCVariableStructureSize != method->count1)
4593 && (inputStructCount != method->count1))
4594 {
4595 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4596 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4597 continue;
4598 }
4599
4600 func = method->func;
4601
4602 switch( inputCount) {
4603
4604 case 5:
4605 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4606 ARG32(input[3]), ARG32(input[4]),
4607 inputStruct );
4608 break;
4609 case 4:
4610 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4611 ARG32(input[3]),
4612 inputStruct, (void *)(uintptr_t)inputStructCount );
4613 break;
4614 case 3:
4615 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4616 inputStruct, (void *)(uintptr_t)inputStructCount,
4617 0 );
4618 break;
4619 case 2:
4620 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4621 inputStruct, (void *)(uintptr_t)inputStructCount,
4622 0, 0 );
4623 break;
4624 case 1:
4625 err = (object->*func)( ARG32(input[0]),
4626 inputStruct, (void *)(uintptr_t)inputStructCount,
4627 0, 0, 0 );
4628 break;
4629 case 0:
4630 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4631 0, 0, 0, 0 );
4632 break;
4633
4634 default:
4635 IOLog("%s: Bad method table\n", object->getName());
4636 }
4637 }
4638 while (false);
4639
4640 return( err);
4641 }
4642
4643 kern_return_t shim_io_async_method_scalarI_structureI(
4644 IOExternalAsyncMethod * method,
4645 IOService * object,
4646 mach_port_t asyncWakePort,
4647 io_user_reference_t * asyncReference,
4648 uint32_t asyncReferenceCount,
4649 const io_user_scalar_t * input,
4650 mach_msg_type_number_t inputCount,
4651 io_struct_inband_t inputStruct,
4652 mach_msg_type_number_t inputStructCount )
4653 {
4654 IOAsyncMethod func;
4655 uint32_t i;
4656 IOReturn err = kIOReturnBadArgument;
4657 io_async_ref_t reference;
4658
4659 for (i = 0; i < asyncReferenceCount; i++)
4660 reference[i] = REF32(asyncReference[i]);
4661
4662 do
4663 {
4664 if (inputCount != method->count0)
4665 {
4666 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4667 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4668 continue;
4669 }
4670 if( (kIOUCVariableStructureSize != method->count1)
4671 && (inputStructCount != method->count1))
4672 {
4673 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4674 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4675 continue;
4676 }
4677
4678 func = method->func;
4679
4680 switch( inputCount) {
4681
4682 case 5:
4683 err = (object->*func)( reference,
4684 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4685 ARG32(input[3]), ARG32(input[4]),
4686 inputStruct );
4687 break;
4688 case 4:
4689 err = (object->*func)( reference,
4690 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4691 ARG32(input[3]),
4692 inputStruct, (void *)(uintptr_t)inputStructCount );
4693 break;
4694 case 3:
4695 err = (object->*func)( reference,
4696 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4697 inputStruct, (void *)(uintptr_t)inputStructCount,
4698 0 );
4699 break;
4700 case 2:
4701 err = (object->*func)( reference,
4702 ARG32(input[0]), ARG32(input[1]),
4703 inputStruct, (void *)(uintptr_t)inputStructCount,
4704 0, 0 );
4705 break;
4706 case 1:
4707 err = (object->*func)( reference,
4708 ARG32(input[0]),
4709 inputStruct, (void *)(uintptr_t)inputStructCount,
4710 0, 0, 0 );
4711 break;
4712 case 0:
4713 err = (object->*func)( reference,
4714 inputStruct, (void *)(uintptr_t)inputStructCount,
4715 0, 0, 0, 0 );
4716 break;
4717
4718 default:
4719 IOLog("%s: Bad method table\n", object->getName());
4720 }
4721 }
4722 while (false);
4723
4724 return( err);
4725 }
4726
4727 /* Routine io_connect_method_structureI_structureO */
4728 kern_return_t is_io_connect_method_structureI_structureO(
4729 io_object_t connect,
4730 uint32_t index,
4731 io_struct_inband_t input,
4732 mach_msg_type_number_t inputCount,
4733 io_struct_inband_t output,
4734 mach_msg_type_number_t * outputCount )
4735 {
4736 mach_msg_type_number_t scalar_outputCnt = 0;
4737 mach_vm_size_t ool_output_size = 0;
4738
4739 return (is_io_connect_method(connect, index,
4740 NULL, 0,
4741 input, inputCount,
4742 0, 0,
4743 output, outputCount,
4744 NULL, &scalar_outputCnt,
4745 0, &ool_output_size));
4746 }
4747
4748 kern_return_t shim_io_connect_method_structureI_structureO(
4749 IOExternalMethod * method,
4750 IOService * object,
4751 io_struct_inband_t input,
4752 mach_msg_type_number_t inputCount,
4753 io_struct_inband_t output,
4754 IOByteCount * outputCount )
4755 {
4756 IOMethod func;
4757 IOReturn err = kIOReturnBadArgument;
4758
4759 do
4760 {
4761 if( (kIOUCVariableStructureSize != method->count0)
4762 && (inputCount != method->count0))
4763 {
4764 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4765 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4766 continue;
4767 }
4768 if( (kIOUCVariableStructureSize != method->count1)
4769 && (*outputCount != method->count1))
4770 {
4771 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4772 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4773 continue;
4774 }
4775
4776 func = method->func;
4777
4778 if( method->count1) {
4779 if( method->count0) {
4780 err = (object->*func)( input, output,
4781 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4782 } else {
4783 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4784 }
4785 } else {
4786 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4787 }
4788 }
4789 while( false);
4790
4791
4792 return( err);
4793 }
4794
4795 kern_return_t shim_io_async_method_structureI_structureO(
4796 IOExternalAsyncMethod * method,
4797 IOService * object,
4798 mach_port_t asyncWakePort,
4799 io_user_reference_t * asyncReference,
4800 uint32_t asyncReferenceCount,
4801 io_struct_inband_t input,
4802 mach_msg_type_number_t inputCount,
4803 io_struct_inband_t output,
4804 mach_msg_type_number_t * outputCount )
4805 {
4806 IOAsyncMethod func;
4807 uint32_t i;
4808 IOReturn err;
4809 io_async_ref_t reference;
4810
4811 for (i = 0; i < asyncReferenceCount; i++)
4812 reference[i] = REF32(asyncReference[i]);
4813
4814 err = kIOReturnBadArgument;
4815 do
4816 {
4817 if( (kIOUCVariableStructureSize != method->count0)
4818 && (inputCount != method->count0))
4819 {
4820 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4821 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4822 continue;
4823 }
4824 if( (kIOUCVariableStructureSize != method->count1)
4825 && (*outputCount != method->count1))
4826 {
4827 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4828 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4829 continue;
4830 }
4831
4832 func = method->func;
4833
4834 if( method->count1) {
4835 if( method->count0) {
4836 err = (object->*func)( reference,
4837 input, output,
4838 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4839 } else {
4840 err = (object->*func)( reference,
4841 output, outputCount, 0, 0, 0, 0 );
4842 }
4843 } else {
4844 err = (object->*func)( reference,
4845 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4846 }
4847 }
4848 while( false);
4849
4850 return( err);
4851 }
4852
4853 #if !NO_KEXTD
4854 bool gIOKextdClearedBusy = false;
4855 #endif
4856
4857 /* Routine io_catalog_send_data */
4858 kern_return_t is_io_catalog_send_data(
4859 mach_port_t master_port,
4860 uint32_t flag,
4861 io_buf_ptr_t inData,
4862 mach_msg_type_number_t inDataCount,
4863 kern_return_t * result)
4864 {
4865 OSObject * obj = 0;
4866 vm_offset_t data;
4867 kern_return_t kr = kIOReturnError;
4868
4869 //printf("io_catalog_send_data called. flag: %d\n", flag);
4870
4871 if( master_port != master_device_port)
4872 return kIOReturnNotPrivileged;
4873
4874 if( (flag != kIOCatalogRemoveKernelLinker &&
4875 flag != kIOCatalogKextdActive &&
4876 flag != kIOCatalogKextdFinishedLaunching) &&
4877 ( !inData || !inDataCount) )
4878 {
4879 return kIOReturnBadArgument;
4880 }
4881
4882 if (inData) {
4883 vm_map_offset_t map_data;
4884
4885 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4886 return( kIOReturnMessageTooLarge);
4887
4888 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4889 data = CAST_DOWN(vm_offset_t, map_data);
4890
4891 if( kr != KERN_SUCCESS)
4892 return kr;
4893
4894 // must return success after vm_map_copyout() succeeds
4895
4896 if( inDataCount ) {
4897 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4898 vm_deallocate( kernel_map, data, inDataCount );
4899 if( !obj) {
4900 *result = kIOReturnNoMemory;
4901 return( KERN_SUCCESS);
4902 }
4903 }
4904 }
4905
4906 switch ( flag ) {
4907 case kIOCatalogResetDrivers:
4908 case kIOCatalogResetDriversNoMatch: {
4909 OSArray * array;
4910
4911 array = OSDynamicCast(OSArray, obj);
4912 if (array) {
4913 if ( !gIOCatalogue->resetAndAddDrivers(array,
4914 flag == kIOCatalogResetDrivers) ) {
4915
4916 kr = kIOReturnError;
4917 }
4918 } else {
4919 kr = kIOReturnBadArgument;
4920 }
4921 }
4922 break;
4923
4924 case kIOCatalogAddDrivers:
4925 case kIOCatalogAddDriversNoMatch: {
4926 OSArray * array;
4927
4928 array = OSDynamicCast(OSArray, obj);
4929 if ( array ) {
4930 if ( !gIOCatalogue->addDrivers( array ,
4931 flag == kIOCatalogAddDrivers) ) {
4932 kr = kIOReturnError;
4933 }
4934 }
4935 else {
4936 kr = kIOReturnBadArgument;
4937 }
4938 }
4939 break;
4940
4941 case kIOCatalogRemoveDrivers:
4942 case kIOCatalogRemoveDriversNoMatch: {
4943 OSDictionary * dict;
4944
4945 dict = OSDynamicCast(OSDictionary, obj);
4946 if ( dict ) {
4947 if ( !gIOCatalogue->removeDrivers( dict,
4948 flag == kIOCatalogRemoveDrivers ) ) {
4949 kr = kIOReturnError;
4950 }
4951 }
4952 else {
4953 kr = kIOReturnBadArgument;
4954 }
4955 }
4956 break;
4957
4958 case kIOCatalogStartMatching: {
4959 OSDictionary * dict;
4960
4961 dict = OSDynamicCast(OSDictionary, obj);
4962 if ( dict ) {
4963 if ( !gIOCatalogue->startMatching( dict ) ) {
4964 kr = kIOReturnError;
4965 }
4966 }
4967 else {
4968 kr = kIOReturnBadArgument;
4969 }
4970 }
4971 break;
4972
4973 case kIOCatalogRemoveKernelLinker:
4974 kr = KERN_NOT_SUPPORTED;
4975 break;
4976
4977 case kIOCatalogKextdActive:
4978 #if !NO_KEXTD
4979 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4980 OSKext::setKextdActive();
4981
4982 /* Dump all nonloaded startup extensions; kextd will now send them
4983 * down on request.
4984 */
4985 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4986 #endif
4987 kr = kIOReturnSuccess;
4988 break;
4989
4990 case kIOCatalogKextdFinishedLaunching: {
4991 #if !NO_KEXTD
4992 if (!gIOKextdClearedBusy) {
4993 IOService * serviceRoot = IOService::getServiceRoot();
4994 if (serviceRoot) {
4995 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4996 serviceRoot->adjustBusy(-1);
4997 gIOKextdClearedBusy = true;
4998 }
4999 }
5000 #endif
5001 kr = kIOReturnSuccess;
5002 }
5003 break;
5004
5005 default:
5006 kr = kIOReturnBadArgument;
5007 break;
5008 }
5009
5010 if (obj) obj->release();
5011
5012 *result = kr;
5013 return( KERN_SUCCESS);
5014 }
5015
5016 /* Routine io_catalog_terminate */
5017 kern_return_t is_io_catalog_terminate(
5018 mach_port_t master_port,
5019 uint32_t flag,
5020 io_name_t name )
5021 {
5022 kern_return_t kr;
5023
5024 if( master_port != master_device_port )
5025 return kIOReturnNotPrivileged;
5026
5027 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5028 kIOClientPrivilegeAdministrator );
5029 if( kIOReturnSuccess != kr)
5030 return( kr );
5031
5032 switch ( flag ) {
5033 #if !defined(SECURE_KERNEL)
5034 case kIOCatalogServiceTerminate:
5035 OSIterator * iter;
5036 IOService * service;
5037
5038 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5039 kIORegistryIterateRecursively);
5040 if ( !iter )
5041 return kIOReturnNoMemory;
5042
5043 do {
5044 iter->reset();
5045 while( (service = (IOService *)iter->getNextObject()) ) {
5046 if( service->metaCast(name)) {
5047 if ( !service->terminate( kIOServiceRequired
5048 | kIOServiceSynchronous) ) {
5049 kr = kIOReturnUnsupported;
5050 break;
5051 }
5052 }
5053 }
5054 } while( !service && !iter->isValid());
5055 iter->release();
5056 break;
5057
5058 case kIOCatalogModuleUnload:
5059 case kIOCatalogModuleTerminate:
5060 kr = gIOCatalogue->terminateDriversForModule(name,
5061 flag == kIOCatalogModuleUnload);
5062 break;
5063 #endif
5064
5065 default:
5066 kr = kIOReturnBadArgument;
5067 break;
5068 }
5069
5070 return( kr );
5071 }
5072
5073 /* Routine io_catalog_get_data */
5074 kern_return_t is_io_catalog_get_data(
5075 mach_port_t master_port,
5076 uint32_t flag,
5077 io_buf_ptr_t *outData,
5078 mach_msg_type_number_t *outDataCount)
5079 {
5080 kern_return_t kr = kIOReturnSuccess;
5081 OSSerialize * s;
5082
5083 if( master_port != master_device_port)
5084 return kIOReturnNotPrivileged;
5085
5086 //printf("io_catalog_get_data called. flag: %d\n", flag);
5087
5088 s = OSSerialize::withCapacity(4096);
5089 if ( !s )
5090 return kIOReturnNoMemory;
5091
5092 kr = gIOCatalogue->serializeData(flag, s);
5093
5094 if ( kr == kIOReturnSuccess ) {
5095 vm_offset_t data;
5096 vm_map_copy_t copy;
5097 vm_size_t size;
5098
5099 size = s->getLength();
5100 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
5101 if ( kr == kIOReturnSuccess ) {
5102 bcopy(s->text(), (void *)data, size);
5103 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5104 (vm_map_size_t)size, true, &copy);
5105 *outData = (char *)copy;
5106 *outDataCount = size;
5107 }
5108 }
5109
5110 s->release();
5111
5112 return kr;
5113 }
5114
5115 /* Routine io_catalog_get_gen_count */
5116 kern_return_t is_io_catalog_get_gen_count(
5117 mach_port_t master_port,
5118 uint32_t *genCount)
5119 {
5120 if( master_port != master_device_port)
5121 return kIOReturnNotPrivileged;
5122
5123 //printf("io_catalog_get_gen_count called.\n");
5124
5125 if ( !genCount )
5126 return kIOReturnBadArgument;
5127
5128 *genCount = gIOCatalogue->getGenerationCount();
5129
5130 return kIOReturnSuccess;
5131 }
5132
5133 /* Routine io_catalog_module_loaded.
5134 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5135 */
5136 kern_return_t is_io_catalog_module_loaded(
5137 mach_port_t master_port,
5138 io_name_t name)
5139 {
5140 if( master_port != master_device_port)
5141 return kIOReturnNotPrivileged;
5142
5143 //printf("io_catalog_module_loaded called. name %s\n", name);
5144
5145 if ( !name )
5146 return kIOReturnBadArgument;
5147
5148 gIOCatalogue->moduleHasLoaded(name);
5149
5150 return kIOReturnSuccess;
5151 }
5152
5153 kern_return_t is_io_catalog_reset(
5154 mach_port_t master_port,
5155 uint32_t flag)
5156 {
5157 if( master_port != master_device_port)
5158 return kIOReturnNotPrivileged;
5159
5160 switch ( flag ) {
5161 case kIOCatalogResetDefault:
5162 gIOCatalogue->reset();
5163 break;
5164
5165 default:
5166 return kIOReturnBadArgument;
5167 }
5168
5169 return kIOReturnSuccess;
5170 }
5171
5172 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5173 {
5174 kern_return_t result = kIOReturnBadArgument;
5175 IOUserClient *userClient;
5176
5177 if ((userClient = OSDynamicCast(IOUserClient,
5178 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5179 IOExternalTrap *trap;
5180 IOService *target = NULL;
5181
5182 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5183
5184 if (trap && target) {
5185 IOTrap func;
5186
5187 func = trap->func;
5188
5189 if (func) {
5190 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5191 }
5192 }
5193
5194 iokit_remove_connect_reference(userClient);
5195 }
5196
5197 return result;
5198 }
5199
5200 } /* extern "C" */
5201
5202 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5203 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5204 {
5205 IOReturn err;
5206 IOService * object;
5207 IOByteCount structureOutputSize;
5208
5209 if (dispatch)
5210 {
5211 uint32_t count;
5212 count = dispatch->checkScalarInputCount;
5213 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5214 {
5215 return (kIOReturnBadArgument);
5216 }
5217
5218 count = dispatch->checkStructureInputSize;
5219 if ((kIOUCVariableStructureSize != count)
5220 && (count != ((args->structureInputDescriptor)
5221 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5222 {
5223 return (kIOReturnBadArgument);
5224 }
5225
5226 count = dispatch->checkScalarOutputCount;
5227 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5228 {
5229 return (kIOReturnBadArgument);
5230 }
5231
5232 count = dispatch->checkStructureOutputSize;
5233 if ((kIOUCVariableStructureSize != count)
5234 && (count != ((args->structureOutputDescriptor)
5235 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5236 {
5237 return (kIOReturnBadArgument);
5238 }
5239
5240 if (dispatch->function)
5241 err = (*dispatch->function)(target, reference, args);
5242 else
5243 err = kIOReturnNoCompletion; /* implementator can dispatch */
5244
5245 return (err);
5246 }
5247
5248
5249 // pre-Leopard API's don't do ool structs
5250 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5251 {
5252 err = kIOReturnIPCError;
5253 return (err);
5254 }
5255
5256 structureOutputSize = args->structureOutputSize;
5257
5258 if (args->asyncWakePort)
5259 {
5260 IOExternalAsyncMethod * method;
5261 object = 0;
5262 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5263 return (kIOReturnUnsupported);
5264
5265 if (kIOUCForegroundOnly & method->flags)
5266 {
5267 if (task_is_gpu_denied(current_task()))
5268 return (kIOReturnNotPermitted);
5269 }
5270
5271 switch (method->flags & kIOUCTypeMask)
5272 {
5273 case kIOUCScalarIStructI:
5274 err = shim_io_async_method_scalarI_structureI( method, object,
5275 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5276 args->scalarInput, args->scalarInputCount,
5277 (char *)args->structureInput, args->structureInputSize );
5278 break;
5279
5280 case kIOUCScalarIScalarO:
5281 err = shim_io_async_method_scalarI_scalarO( method, object,
5282 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5283 args->scalarInput, args->scalarInputCount,
5284 args->scalarOutput, &args->scalarOutputCount );
5285 break;
5286
5287 case kIOUCScalarIStructO:
5288 err = shim_io_async_method_scalarI_structureO( method, object,
5289 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5290 args->scalarInput, args->scalarInputCount,
5291 (char *) args->structureOutput, &args->structureOutputSize );
5292 break;
5293
5294
5295 case kIOUCStructIStructO:
5296 err = shim_io_async_method_structureI_structureO( method, object,
5297 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5298 (char *)args->structureInput, args->structureInputSize,
5299 (char *) args->structureOutput, &args->structureOutputSize );
5300 break;
5301
5302 default:
5303 err = kIOReturnBadArgument;
5304 break;
5305 }
5306 }
5307 else
5308 {
5309 IOExternalMethod * method;
5310 object = 0;
5311 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5312 return (kIOReturnUnsupported);
5313
5314 if (kIOUCForegroundOnly & method->flags)
5315 {
5316 if (task_is_gpu_denied(current_task()))
5317 return (kIOReturnNotPermitted);
5318 }
5319
5320 switch (method->flags & kIOUCTypeMask)
5321 {
5322 case kIOUCScalarIStructI:
5323 err = shim_io_connect_method_scalarI_structureI( method, object,
5324 args->scalarInput, args->scalarInputCount,
5325 (char *) args->structureInput, args->structureInputSize );
5326 break;
5327
5328 case kIOUCScalarIScalarO:
5329 err = shim_io_connect_method_scalarI_scalarO( method, object,
5330 args->scalarInput, args->scalarInputCount,
5331 args->scalarOutput, &args->scalarOutputCount );
5332 break;
5333
5334 case kIOUCScalarIStructO:
5335 err = shim_io_connect_method_scalarI_structureO( method, object,
5336 args->scalarInput, args->scalarInputCount,
5337 (char *) args->structureOutput, &structureOutputSize );
5338 break;
5339
5340
5341 case kIOUCStructIStructO:
5342 err = shim_io_connect_method_structureI_structureO( method, object,
5343 (char *) args->structureInput, args->structureInputSize,
5344 (char *) args->structureOutput, &structureOutputSize );
5345 break;
5346
5347 default:
5348 err = kIOReturnBadArgument;
5349 break;
5350 }
5351 }
5352
5353 args->structureOutputSize = structureOutputSize;
5354
5355 return (err);
5356 }
5357
5358 #if __LP64__
5359 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5360 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5361 #else
5362 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5363 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5364 #endif
5365 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5366 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5367 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5368 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5369 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5370 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5371 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5372 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5373 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5374 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5375 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5376 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5377 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5378 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5379