]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
e2b67172717b980b977cdc6c665f8884e559fa10
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #include <mach/sdt.h>
49
50 #if CONFIG_MACF
51
52 extern "C" {
53 #include <security/mac_framework.h>
54 };
55 #include <sys/kauth.h>
56
57 #define IOMACF_LOG 0
58
59 #endif /* CONFIG_MACF */
60
61 #include <IOKit/assert.h>
62
63 #include "IOServicePrivate.h"
64 #include "IOKitKernelInternal.h"
65
66 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
67 #define SCALAR32(x) ((uint32_t )x)
68 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
69 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
70 #define REF32(x) ((int)(x))
71
72 enum
73 {
74 kIOUCAsync0Flags = 3ULL,
75 kIOUCAsync64Flag = 1ULL,
76 kIOUCAsyncErrorLoggedFlag = 2ULL
77 };
78
79 #if IOKITSTATS
80
81 #define IOStatisticsRegisterCounter() \
82 do { \
83 reserved->counter = IOStatistics::registerUserClient(this); \
84 } while (0)
85
86 #define IOStatisticsUnregisterCounter() \
87 do { \
88 if (reserved) \
89 IOStatistics::unregisterUserClient(reserved->counter); \
90 } while (0)
91
92 #define IOStatisticsClientCall() \
93 do { \
94 IOStatistics::countUserClientCall(client); \
95 } while (0)
96
97 #else
98
99 #define IOStatisticsRegisterCounter()
100 #define IOStatisticsUnregisterCounter()
101 #define IOStatisticsClientCall()
102
103 #endif /* IOKITSTATS */
104
105 #if DEVELOPMENT || DEBUG
106
107 #define FAKE_STACK_FRAME(a) \
108 const void ** __frameptr; \
109 const void * __retaddr; \
110 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
111 __retaddr = __frameptr[1]; \
112 __frameptr[1] = (a);
113
114 #define FAKE_STACK_FRAME_END() \
115 __frameptr[1] = __retaddr;
116
117 #else /* DEVELOPMENT || DEBUG */
118
119 #define FAKE_STACK_FRAME(a)
120 #define FAKE_STACK_FRAME_END()
121
122 #endif /* DEVELOPMENT || DEBUG */
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 // definitions we should get from osfmk
127
128 //typedef struct ipc_port * ipc_port_t;
129 typedef natural_t ipc_kobject_type_t;
130
131 #define IKOT_IOKIT_SPARE 27
132 #define IKOT_IOKIT_CONNECT 29
133 #define IKOT_IOKIT_OBJECT 30
134
135 extern "C" {
136
137 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
138 ipc_kobject_type_t type );
139
140 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
141
142 extern mach_port_name_t iokit_make_send_right( task_t task,
143 io_object_t obj, ipc_kobject_type_t type );
144
145 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
146
147 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
148
149 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
150
151 extern ipc_port_t master_device_port;
152
153 extern void iokit_retain_port( ipc_port_t port );
154 extern void iokit_release_port( ipc_port_t port );
155 extern void iokit_release_port_send( ipc_port_t port );
156
157 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
158
159 #include <mach/mach_traps.h>
160 #include <vm/vm_map.h>
161
162 } /* extern "C" */
163
164
165 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
166
167 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
168
169 class IOMachPort : public OSObject
170 {
171 OSDeclareDefaultStructors(IOMachPort)
172 public:
173 OSObject * object;
174 ipc_port_t port;
175 UInt32 mscount;
176 UInt8 holdDestroy;
177
178 static IOMachPort * portForObject( OSObject * obj,
179 ipc_kobject_type_t type );
180 static bool noMoreSendersForObject( OSObject * obj,
181 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
182 static void releasePortForObject( OSObject * obj,
183 ipc_kobject_type_t type );
184 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
185
186 static OSDictionary * dictForType( ipc_kobject_type_t type );
187
188 static mach_port_name_t makeSendRightForTask( task_t task,
189 io_object_t obj, ipc_kobject_type_t type );
190
191 virtual void free() APPLE_KEXT_OVERRIDE;
192 };
193
194 #define super OSObject
195 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
196
197 static IOLock * gIOObjectPortLock;
198
199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
200
201 // not in dictForType() for debugging ease
202 static OSDictionary * gIOObjectPorts;
203 static OSDictionary * gIOConnectPorts;
204
205 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
206 {
207 OSDictionary ** dict;
208
209 if( IKOT_IOKIT_OBJECT == type )
210 dict = &gIOObjectPorts;
211 else if( IKOT_IOKIT_CONNECT == type )
212 dict = &gIOConnectPorts;
213 else
214 return( 0 );
215
216 if( 0 == *dict)
217 *dict = OSDictionary::withCapacity( 1 );
218
219 return( *dict );
220 }
221
222 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
223 ipc_kobject_type_t type )
224 {
225 IOMachPort * inst = 0;
226 OSDictionary * dict;
227
228 IOTakeLock( gIOObjectPortLock);
229
230 do {
231
232 dict = dictForType( type );
233 if( !dict)
234 continue;
235
236 if( (inst = (IOMachPort *)
237 dict->getObject( (const OSSymbol *) obj ))) {
238 inst->mscount++;
239 inst->retain();
240 continue;
241 }
242
243 inst = new IOMachPort;
244 if( inst && !inst->init()) {
245 inst = 0;
246 continue;
247 }
248
249 inst->port = iokit_alloc_object_port( obj, type );
250 if( inst->port) {
251 // retains obj
252 dict->setObject( (const OSSymbol *) obj, inst );
253 inst->mscount++;
254
255 } else {
256 inst->release();
257 inst = 0;
258 }
259
260 } while( false );
261
262 IOUnlock( gIOObjectPortLock);
263
264 return( inst );
265 }
266
267 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
268 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
269 {
270 OSDictionary * dict;
271 IOMachPort * machPort;
272 IOUserClient * uc;
273 bool destroyed = true;
274
275 IOTakeLock( gIOObjectPortLock);
276
277 if( (dict = dictForType( type ))) {
278 obj->retain();
279
280 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
281 if( machPort) {
282 destroyed = (machPort->mscount <= *mscount);
283 if (!destroyed) *mscount = machPort->mscount;
284 else
285 {
286 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
287 {
288 uc->noMoreSenders();
289 }
290 dict->removeObject( (const OSSymbol *) obj );
291 }
292 }
293 obj->release();
294 }
295
296 IOUnlock( gIOObjectPortLock);
297
298 return( destroyed );
299 }
300
301 void IOMachPort::releasePortForObject( OSObject * obj,
302 ipc_kobject_type_t type )
303 {
304 OSDictionary * dict;
305 IOMachPort * machPort;
306
307 assert(IKOT_IOKIT_CONNECT != type);
308
309 IOTakeLock( gIOObjectPortLock);
310
311 if( (dict = dictForType( type ))) {
312 obj->retain();
313 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
314 if( machPort && !machPort->holdDestroy)
315 dict->removeObject( (const OSSymbol *) obj );
316 obj->release();
317 }
318
319 IOUnlock( gIOObjectPortLock);
320 }
321
322 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
323 {
324 OSDictionary * dict;
325 IOMachPort * machPort;
326
327 IOLockLock( gIOObjectPortLock );
328
329 if( (dict = dictForType( type ))) {
330 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
331 if( machPort)
332 machPort->holdDestroy = true;
333 }
334
335 IOLockUnlock( gIOObjectPortLock );
336 }
337
338 void IOUserClient::destroyUserReferences( OSObject * obj )
339 {
340 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
341
342 // panther, 3160200
343 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
344
345 OSDictionary * dict;
346
347 IOTakeLock( gIOObjectPortLock);
348 obj->retain();
349
350 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
351 {
352 IOMachPort * port;
353 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
354 if (port)
355 {
356 IOUserClient * uc;
357 if ((uc = OSDynamicCast(IOUserClient, obj)))
358 {
359 uc->noMoreSenders();
360 if (uc->mappings)
361 {
362 dict->setObject((const OSSymbol *) uc->mappings, port);
363 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
364
365 uc->mappings->release();
366 uc->mappings = 0;
367 }
368 }
369 dict->removeObject( (const OSSymbol *) obj );
370 }
371 }
372 obj->release();
373 IOUnlock( gIOObjectPortLock);
374 }
375
376 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
377 io_object_t obj, ipc_kobject_type_t type )
378 {
379 return( iokit_make_send_right( task, obj, type ));
380 }
381
382 void IOMachPort::free( void )
383 {
384 if( port)
385 iokit_destroy_object_port( port );
386 super::free();
387 }
388
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390
391 class IOUserIterator : public OSIterator
392 {
393 OSDeclareDefaultStructors(IOUserIterator)
394 public:
395 OSObject * userIteratorObject;
396 IOLock * lock;
397
398 static IOUserIterator * withIterator(OSIterator * iter);
399 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
400 virtual void free() APPLE_KEXT_OVERRIDE;
401
402 virtual void reset() APPLE_KEXT_OVERRIDE;
403 virtual bool isValid() APPLE_KEXT_OVERRIDE;
404 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
405 };
406
407 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
408
409 class IOUserNotification : public IOUserIterator
410 {
411 OSDeclareDefaultStructors(IOUserNotification)
412
413 #define holdNotify userIteratorObject
414
415 public:
416
417 virtual void free() APPLE_KEXT_OVERRIDE;
418
419 virtual void setNotification( IONotifier * obj );
420
421 virtual void reset() APPLE_KEXT_OVERRIDE;
422 virtual bool isValid() APPLE_KEXT_OVERRIDE;
423 };
424
425 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
426
427 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
428
429 IOUserIterator *
430 IOUserIterator::withIterator(OSIterator * iter)
431 {
432 IOUserIterator * me;
433
434 if (!iter) return (0);
435
436 me = new IOUserIterator;
437 if (me && !me->init())
438 {
439 me->release();
440 me = 0;
441 }
442 if (!me) return me;
443 me->userIteratorObject = iter;
444
445 return (me);
446 }
447
448 bool
449 IOUserIterator::init( void )
450 {
451 if (!OSObject::init()) return (false);
452
453 lock = IOLockAlloc();
454 if( !lock)
455 return( false );
456
457 return (true);
458 }
459
460 void
461 IOUserIterator::free()
462 {
463 if (userIteratorObject) userIteratorObject->release();
464 if (lock) IOLockFree(lock);
465 OSObject::free();
466 }
467
468 void
469 IOUserIterator::reset()
470 {
471 IOLockLock(lock);
472 assert(OSDynamicCast(OSIterator, userIteratorObject));
473 ((OSIterator *)userIteratorObject)->reset();
474 IOLockUnlock(lock);
475 }
476
477 bool
478 IOUserIterator::isValid()
479 {
480 bool ret;
481
482 IOLockLock(lock);
483 assert(OSDynamicCast(OSIterator, userIteratorObject));
484 ret = ((OSIterator *)userIteratorObject)->isValid();
485 IOLockUnlock(lock);
486
487 return (ret);
488 }
489
490 OSObject *
491 IOUserIterator::getNextObject()
492 {
493 OSObject * ret;
494
495 IOLockLock(lock);
496 assert(OSDynamicCast(OSIterator, userIteratorObject));
497 ret = ((OSIterator *)userIteratorObject)->getNextObject();
498 IOLockUnlock(lock);
499
500 return (ret);
501 }
502
503 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
504 extern "C" {
505
506 // functions called from osfmk/device/iokit_rpc.c
507
508 void
509 iokit_add_reference( io_object_t obj )
510 {
511 if( obj)
512 obj->retain();
513 }
514
515 void
516 iokit_remove_reference( io_object_t obj )
517 {
518 if( obj)
519 obj->release();
520 }
521
522 void
523 iokit_add_connect_reference( io_object_t obj )
524 {
525 IOUserClient * uc;
526
527 if (!obj) return;
528
529 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
530
531 obj->retain();
532 }
533
534 void
535 iokit_remove_connect_reference( io_object_t obj )
536 {
537 IOUserClient * uc;
538 bool finalize = false;
539
540 if (!obj) return;
541
542 if ((uc = OSDynamicCast(IOUserClient, obj)))
543 {
544 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
545 {
546 IOLockLock(gIOObjectPortLock);
547 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
548 IOLockUnlock(gIOObjectPortLock);
549 }
550 if (finalize) uc->scheduleFinalize(true);
551 }
552
553 obj->release();
554 }
555
556 bool
557 IOUserClient::finalizeUserReferences(OSObject * obj)
558 {
559 IOUserClient * uc;
560 bool ok = true;
561
562 if ((uc = OSDynamicCast(IOUserClient, obj)))
563 {
564 IOLockLock(gIOObjectPortLock);
565 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
566 IOLockUnlock(gIOObjectPortLock);
567 }
568 return (ok);
569 }
570
571 ipc_port_t
572 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
573 {
574 IOMachPort * machPort;
575 ipc_port_t port;
576
577 if( (machPort = IOMachPort::portForObject( obj, type ))) {
578
579 port = machPort->port;
580 if( port)
581 iokit_retain_port( port );
582
583 machPort->release();
584
585 } else
586 port = NULL;
587
588 return( port );
589 }
590
591 kern_return_t
592 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
593 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
594 {
595 IOUserClient * client;
596 IOMemoryMap * map;
597 IOUserNotification * notify;
598
599 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
600 return( kIOReturnNotReady );
601
602 if( IKOT_IOKIT_CONNECT == type)
603 {
604 if( (client = OSDynamicCast( IOUserClient, obj )))
605 {
606 IOStatisticsClientCall();
607 client->clientDied();
608 }
609 }
610 else if( IKOT_IOKIT_OBJECT == type)
611 {
612 if( (map = OSDynamicCast( IOMemoryMap, obj )))
613 map->taskDied();
614 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
615 notify->setNotification( 0 );
616 }
617
618 return( kIOReturnSuccess );
619 }
620
621 }; /* extern "C" */
622
623 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
624
625 class IOServiceUserNotification : public IOUserNotification
626 {
627 OSDeclareDefaultStructors(IOServiceUserNotification)
628
629 struct PingMsg {
630 mach_msg_header_t msgHdr;
631 OSNotificationHeader64 notifyHeader;
632 };
633
634 enum { kMaxOutstanding = 1024 };
635
636 PingMsg * pingMsg;
637 vm_size_t msgSize;
638 OSArray * newSet;
639 OSObject * lastEntry;
640 bool armed;
641 bool ipcLogged;
642
643 public:
644
645 virtual bool init( mach_port_t port, natural_t type,
646 void * reference, vm_size_t referenceSize,
647 bool clientIs64 );
648 virtual void free() APPLE_KEXT_OVERRIDE;
649
650 static bool _handler( void * target,
651 void * ref, IOService * newService, IONotifier * notifier );
652 virtual bool handler( void * ref, IOService * newService );
653
654 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
655 };
656
657 class IOServiceMessageUserNotification : public IOUserNotification
658 {
659 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
660
661 struct PingMsg {
662 mach_msg_header_t msgHdr;
663 mach_msg_body_t msgBody;
664 mach_msg_port_descriptor_t ports[1];
665 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
666 };
667
668 PingMsg * pingMsg;
669 vm_size_t msgSize;
670 uint8_t clientIs64;
671 int owningPID;
672 bool ipcLogged;
673
674 public:
675
676 virtual bool init( mach_port_t port, natural_t type,
677 void * reference, vm_size_t referenceSize,
678 vm_size_t extraSize,
679 bool clientIs64 );
680
681 virtual void free() APPLE_KEXT_OVERRIDE;
682
683 static IOReturn _handler( void * target, void * ref,
684 UInt32 messageType, IOService * provider,
685 void * messageArgument, vm_size_t argSize );
686 virtual IOReturn handler( void * ref,
687 UInt32 messageType, IOService * provider,
688 void * messageArgument, vm_size_t argSize );
689
690 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
691 };
692
693 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
694
695 #undef super
696 #define super IOUserIterator
697 OSDefineMetaClass( IOUserNotification, IOUserIterator )
698 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
699
700 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
701
702 void IOUserNotification::free( void )
703 {
704 if (holdNotify)
705 {
706 assert(OSDynamicCast(IONotifier, holdNotify));
707 ((IONotifier *)holdNotify)->remove();
708 holdNotify = 0;
709 }
710 // can't be in handler now
711
712 super::free();
713 }
714
715
716 void IOUserNotification::setNotification( IONotifier * notify )
717 {
718 OSObject * previousNotify;
719
720 IOLockLock( gIOObjectPortLock);
721
722 previousNotify = holdNotify;
723 holdNotify = notify;
724
725 IOLockUnlock( gIOObjectPortLock);
726
727 if( previousNotify)
728 {
729 assert(OSDynamicCast(IONotifier, previousNotify));
730 ((IONotifier *)previousNotify)->remove();
731 }
732 }
733
734 void IOUserNotification::reset()
735 {
736 // ?
737 }
738
739 bool IOUserNotification::isValid()
740 {
741 return( true );
742 }
743
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745
746 #undef super
747 #define super IOUserNotification
748 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
749
750 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
751
752 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
753 void * reference, vm_size_t referenceSize,
754 bool clientIs64 )
755 {
756 if( !super::init())
757 return( false );
758
759 newSet = OSArray::withCapacity( 1 );
760 if( !newSet)
761 return( false );
762
763 if (referenceSize > sizeof(OSAsyncReference64))
764 return( false );
765
766 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
767 pingMsg = (PingMsg *) IOMalloc( msgSize);
768 if( !pingMsg)
769 return( false );
770
771 bzero( pingMsg, msgSize);
772
773 pingMsg->msgHdr.msgh_remote_port = port;
774 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
775 MACH_MSG_TYPE_COPY_SEND /*remote*/,
776 MACH_MSG_TYPE_MAKE_SEND /*local*/);
777 pingMsg->msgHdr.msgh_size = msgSize;
778 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
779
780 pingMsg->notifyHeader.size = 0;
781 pingMsg->notifyHeader.type = type;
782 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
783
784 return( true );
785 }
786
787 void IOServiceUserNotification::free( void )
788 {
789 PingMsg * _pingMsg;
790 vm_size_t _msgSize;
791 OSArray * _newSet;
792 OSObject * _lastEntry;
793
794 _pingMsg = pingMsg;
795 _msgSize = msgSize;
796 _lastEntry = lastEntry;
797 _newSet = newSet;
798
799 super::free();
800
801 if( _pingMsg && _msgSize) {
802 if (_pingMsg->msgHdr.msgh_remote_port) {
803 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
804 }
805 IOFree(_pingMsg, _msgSize);
806 }
807
808 if( _lastEntry)
809 _lastEntry->release();
810
811 if( _newSet)
812 _newSet->release();
813 }
814
815 bool IOServiceUserNotification::_handler( void * target,
816 void * ref, IOService * newService, IONotifier * notifier )
817 {
818 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
819 }
820
821 bool IOServiceUserNotification::handler( void * ref,
822 IOService * newService )
823 {
824 unsigned int count;
825 kern_return_t kr;
826 ipc_port_t port = NULL;
827 bool sendPing = false;
828
829 IOTakeLock( lock );
830
831 count = newSet->getCount();
832 if( count < kMaxOutstanding) {
833
834 newSet->setObject( newService );
835 if( (sendPing = (armed && (0 == count))))
836 armed = false;
837 }
838
839 IOUnlock( lock );
840
841 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
842 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
843
844 if( sendPing) {
845 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
846 pingMsg->msgHdr.msgh_local_port = port;
847 else
848 pingMsg->msgHdr.msgh_local_port = NULL;
849
850 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
851 pingMsg->msgHdr.msgh_size,
852 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
853 0);
854 if( port)
855 iokit_release_port( port );
856
857 if( (KERN_SUCCESS != kr) && !ipcLogged)
858 {
859 ipcLogged = true;
860 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
861 }
862 }
863
864 return( true );
865 }
866
867 OSObject * IOServiceUserNotification::getNextObject()
868 {
869 unsigned int count;
870 OSObject * result;
871 OSObject * releaseEntry;
872
873 IOLockLock(lock);
874
875 releaseEntry = lastEntry;
876 count = newSet->getCount();
877 if( count ) {
878 result = newSet->getObject( count - 1 );
879 result->retain();
880 newSet->removeObject( count - 1);
881 } else {
882 result = 0;
883 armed = true;
884 }
885 lastEntry = result;
886
887 IOLockUnlock(lock);
888
889 if (releaseEntry) releaseEntry->release();
890
891 return( result );
892 }
893
894 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
895
896 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
897
898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
899
900 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
901 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
902 bool client64 )
903 {
904 if( !super::init())
905 return( false );
906
907 if (referenceSize > sizeof(OSAsyncReference64))
908 return( false );
909
910 clientIs64 = client64;
911
912 owningPID = proc_selfpid();
913
914 extraSize += sizeof(IOServiceInterestContent64);
915 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
916 pingMsg = (PingMsg *) IOMalloc( msgSize);
917 if( !pingMsg)
918 return( false );
919
920 bzero( pingMsg, msgSize);
921
922 pingMsg->msgHdr.msgh_remote_port = port;
923 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
924 | MACH_MSGH_BITS(
925 MACH_MSG_TYPE_COPY_SEND /*remote*/,
926 MACH_MSG_TYPE_MAKE_SEND /*local*/);
927 pingMsg->msgHdr.msgh_size = msgSize;
928 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
929
930 pingMsg->msgBody.msgh_descriptor_count = 1;
931
932 pingMsg->ports[0].name = 0;
933 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
934 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
935
936 pingMsg->notifyHeader.size = extraSize;
937 pingMsg->notifyHeader.type = type;
938 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
939
940 return( true );
941 }
942
943 void IOServiceMessageUserNotification::free( void )
944 {
945 PingMsg * _pingMsg;
946 vm_size_t _msgSize;
947
948 _pingMsg = pingMsg;
949 _msgSize = msgSize;
950
951 super::free();
952
953 if( _pingMsg && _msgSize) {
954 if (_pingMsg->msgHdr.msgh_remote_port) {
955 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
956 }
957 IOFree( _pingMsg, _msgSize);
958 }
959 }
960
961 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
962 UInt32 messageType, IOService * provider,
963 void * argument, vm_size_t argSize )
964 {
965 return( ((IOServiceMessageUserNotification *) target)->handler(
966 ref, messageType, provider, argument, argSize));
967 }
968
969 IOReturn IOServiceMessageUserNotification::handler( void * ref,
970 UInt32 messageType, IOService * provider,
971 void * messageArgument, vm_size_t callerArgSize )
972 {
973 enum { kLocalMsgSize = 0x100 };
974 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
975 void * allocMsg;
976 kern_return_t kr;
977 vm_size_t argSize;
978 vm_size_t thisMsgSize;
979 ipc_port_t thisPort, providerPort;
980 struct PingMsg * thisMsg;
981 IOServiceInterestContent64 * data;
982
983 if (kIOMessageCopyClientID == messageType)
984 {
985 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
986 return (kIOReturnSuccess);
987 }
988
989 if (callerArgSize == 0)
990 {
991 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
992 else argSize = sizeof(uint32_t);
993 }
994 else
995 {
996 argSize = callerArgSize;
997 if( argSize > kIOUserNotifyMaxMessageSize)
998 argSize = kIOUserNotifyMaxMessageSize;
999 }
1000
1001 // adjust message size for ipc restrictions
1002 natural_t type;
1003 type = pingMsg->notifyHeader.type;
1004 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1005 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1006 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1007
1008 thisMsgSize = msgSize
1009 + sizeof( IOServiceInterestContent64 )
1010 - sizeof( data->messageArgument)
1011 + argSize;
1012
1013 if (thisMsgSize > sizeof(stackMsg))
1014 {
1015 allocMsg = IOMalloc(thisMsgSize);
1016 if (!allocMsg) return (kIOReturnNoMemory);
1017 thisMsg = (typeof(thisMsg)) allocMsg;
1018 }
1019 else
1020 {
1021 allocMsg = 0;
1022 thisMsg = (typeof(thisMsg)) stackMsg;
1023 }
1024
1025 bcopy(pingMsg, thisMsg, msgSize);
1026 thisMsg->notifyHeader.type = type;
1027 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1028 // == pingMsg->notifyHeader.content;
1029 data->messageType = messageType;
1030
1031 if (callerArgSize == 0)
1032 {
1033 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1034 if (!clientIs64)
1035 {
1036 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1037 }
1038 }
1039 else
1040 {
1041 bcopy( messageArgument, data->messageArgument, callerArgSize );
1042 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1043 }
1044
1045 thisMsg->notifyHeader.type = type;
1046 thisMsg->msgHdr.msgh_size = thisMsgSize;
1047
1048 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1049 thisMsg->ports[0].name = providerPort;
1050 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1051 thisMsg->msgHdr.msgh_local_port = thisPort;
1052
1053 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1054 thisMsg->msgHdr.msgh_size,
1055 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1056 0);
1057 if( thisPort)
1058 iokit_release_port( thisPort );
1059 if( providerPort)
1060 iokit_release_port( providerPort );
1061
1062 if (allocMsg)
1063 IOFree(allocMsg, thisMsgSize);
1064
1065 if((KERN_SUCCESS != kr) && !ipcLogged)
1066 {
1067 ipcLogged = true;
1068 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1069 }
1070
1071 return( kIOReturnSuccess );
1072 }
1073
1074 OSObject * IOServiceMessageUserNotification::getNextObject()
1075 {
1076 return( 0 );
1077 }
1078
1079 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1080
1081 #undef super
1082 #define super IOService
1083 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1084
1085 IOLock * gIOUserClientOwnersLock;
1086
1087 void IOUserClient::initialize( void )
1088 {
1089 gIOObjectPortLock = IOLockAlloc();
1090 gIOUserClientOwnersLock = IOLockAlloc();
1091 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1092 }
1093
1094 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1095 mach_port_t wakePort,
1096 void *callback, void *refcon)
1097 {
1098 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1099 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1100 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1101 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1102 }
1103
1104 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1105 mach_port_t wakePort,
1106 mach_vm_address_t callback, io_user_reference_t refcon)
1107 {
1108 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1109 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1110 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1111 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1112 }
1113
1114 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1115 mach_port_t wakePort,
1116 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1117 {
1118 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1119 if (vm_map_is_64bit(get_task_map(task))) {
1120 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1121 }
1122 }
1123
1124 static OSDictionary * CopyConsoleUser(UInt32 uid)
1125 {
1126 OSArray * array;
1127 OSDictionary * user = 0;
1128
1129 if ((array = OSDynamicCast(OSArray,
1130 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1131 {
1132 for (unsigned int idx = 0;
1133 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1134 idx++) {
1135 OSNumber * num;
1136
1137 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1138 && (uid == num->unsigned32BitValue())) {
1139 user->retain();
1140 break;
1141 }
1142 }
1143 array->release();
1144 }
1145 return user;
1146 }
1147
1148 static OSDictionary * CopyUserOnConsole(void)
1149 {
1150 OSArray * array;
1151 OSDictionary * user = 0;
1152
1153 if ((array = OSDynamicCast(OSArray,
1154 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1155 {
1156 for (unsigned int idx = 0;
1157 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1158 idx++)
1159 {
1160 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1161 {
1162 user->retain();
1163 break;
1164 }
1165 }
1166 array->release();
1167 }
1168 return (user);
1169 }
1170
1171 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1172 IOService * service )
1173 {
1174 proc_t p;
1175
1176 p = (proc_t) get_bsdtask_info(task);
1177 if (p)
1178 {
1179 uint64_t authorizationID;
1180
1181 authorizationID = proc_uniqueid(p);
1182 if (authorizationID)
1183 {
1184 if (service->getAuthorizationID() == authorizationID)
1185 {
1186 return (kIOReturnSuccess);
1187 }
1188 }
1189 }
1190
1191 return (kIOReturnNotPermitted);
1192 }
1193
1194 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1195 const char * privilegeName )
1196 {
1197 kern_return_t kr;
1198 security_token_t token;
1199 mach_msg_type_number_t count;
1200 task_t task;
1201 OSDictionary * user;
1202 bool secureConsole;
1203
1204
1205 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1206 sizeof(kIOClientPrivilegeForeground)))
1207 {
1208 if (task_is_gpu_denied(current_task()))
1209 return (kIOReturnNotPrivileged);
1210 else
1211 return (kIOReturnSuccess);
1212 }
1213
1214 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1215 sizeof(kIOClientPrivilegeConsoleSession)))
1216 {
1217 kauth_cred_t cred;
1218 proc_t p;
1219
1220 task = (task_t) securityToken;
1221 if (!task)
1222 task = current_task();
1223 p = (proc_t) get_bsdtask_info(task);
1224 kr = kIOReturnNotPrivileged;
1225
1226 if (p && (cred = kauth_cred_proc_ref(p)))
1227 {
1228 user = CopyUserOnConsole();
1229 if (user)
1230 {
1231 OSNumber * num;
1232 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1233 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1234 {
1235 kr = kIOReturnSuccess;
1236 }
1237 user->release();
1238 }
1239 kauth_cred_unref(&cred);
1240 }
1241 return (kr);
1242 }
1243
1244 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1245 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1246 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1247 else
1248 task = (task_t)securityToken;
1249
1250 count = TASK_SECURITY_TOKEN_COUNT;
1251 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1252
1253 if (KERN_SUCCESS != kr)
1254 {}
1255 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1256 sizeof(kIOClientPrivilegeAdministrator))) {
1257 if (0 != token.val[0])
1258 kr = kIOReturnNotPrivileged;
1259 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1260 sizeof(kIOClientPrivilegeLocalUser))) {
1261 user = CopyConsoleUser(token.val[0]);
1262 if ( user )
1263 user->release();
1264 else
1265 kr = kIOReturnNotPrivileged;
1266 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1267 sizeof(kIOClientPrivilegeConsoleUser))) {
1268 user = CopyConsoleUser(token.val[0]);
1269 if ( user ) {
1270 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1271 kr = kIOReturnNotPrivileged;
1272 else if ( secureConsole ) {
1273 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1274 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1275 kr = kIOReturnNotPrivileged;
1276 }
1277 user->release();
1278 }
1279 else
1280 kr = kIOReturnNotPrivileged;
1281 } else
1282 kr = kIOReturnUnsupported;
1283
1284 return (kr);
1285 }
1286
1287 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1288 const char * entitlement )
1289 {
1290 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1291
1292 proc_t p = NULL;
1293 pid_t pid = 0;
1294 char procname[MAXCOMLEN + 1] = "";
1295 size_t len = 0;
1296 void *entitlements_blob = NULL;
1297 char *entitlements_data = NULL;
1298 OSObject *entitlements_obj = NULL;
1299 OSDictionary *entitlements = NULL;
1300 OSString *errorString = NULL;
1301 OSObject *value = NULL;
1302
1303 p = (proc_t)get_bsdtask_info(task);
1304 if (p == NULL)
1305 goto fail;
1306 pid = proc_pid(p);
1307 proc_name(pid, procname, (int)sizeof(procname));
1308
1309 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1310 goto fail;
1311
1312 if (len <= offsetof(CS_GenericBlob, data))
1313 goto fail;
1314
1315 /*
1316 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1317 * we'll try to parse in the kernel.
1318 */
1319 len -= offsetof(CS_GenericBlob, data);
1320 if (len > MAX_ENTITLEMENTS_LEN) {
1321 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1322 goto fail;
1323 }
1324
1325 /*
1326 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1327 * what is stored in the entitlements blob. Copy the string and
1328 * terminate it.
1329 */
1330 entitlements_data = (char *)IOMalloc(len + 1);
1331 if (entitlements_data == NULL)
1332 goto fail;
1333 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1334 entitlements_data[len] = '\0';
1335
1336 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1337 if (errorString != NULL) {
1338 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1339 goto fail;
1340 }
1341 if (entitlements_obj == NULL)
1342 goto fail;
1343
1344 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1345 if (entitlements == NULL)
1346 goto fail;
1347
1348 /* Fetch the entitlement value from the dictionary. */
1349 value = entitlements->getObject(entitlement);
1350 if (value != NULL)
1351 value->retain();
1352
1353 fail:
1354 if (entitlements_data != NULL)
1355 IOFree(entitlements_data, len + 1);
1356 if (entitlements_obj != NULL)
1357 entitlements_obj->release();
1358 if (errorString != NULL)
1359 errorString->release();
1360 return value;
1361 }
1362
1363 bool IOUserClient::init()
1364 {
1365 if (getPropertyTable() || super::init())
1366 return reserve();
1367
1368 return false;
1369 }
1370
1371 bool IOUserClient::init(OSDictionary * dictionary)
1372 {
1373 if (getPropertyTable() || super::init(dictionary))
1374 return reserve();
1375
1376 return false;
1377 }
1378
1379 bool IOUserClient::initWithTask(task_t owningTask,
1380 void * securityID,
1381 UInt32 type )
1382 {
1383 if (getPropertyTable() || super::init())
1384 return reserve();
1385
1386 return false;
1387 }
1388
1389 bool IOUserClient::initWithTask(task_t owningTask,
1390 void * securityID,
1391 UInt32 type,
1392 OSDictionary * properties )
1393 {
1394 bool ok;
1395
1396 ok = super::init( properties );
1397 ok &= initWithTask( owningTask, securityID, type );
1398
1399 return( ok );
1400 }
1401
1402 bool IOUserClient::reserve()
1403 {
1404 if(!reserved) {
1405 reserved = IONew(ExpansionData, 1);
1406 if (!reserved) {
1407 return false;
1408 }
1409 }
1410 setTerminateDefer(NULL, true);
1411 IOStatisticsRegisterCounter();
1412
1413 return true;
1414 }
1415
1416 struct IOUserClientOwner
1417 {
1418 task_t task;
1419 queue_chain_t taskLink;
1420 IOUserClient * uc;
1421 queue_chain_t ucLink;
1422 };
1423
1424 IOReturn
1425 IOUserClient::registerOwner(task_t task)
1426 {
1427 IOUserClientOwner * owner;
1428 IOReturn ret;
1429 bool newOwner;
1430
1431 IOLockLock(gIOUserClientOwnersLock);
1432
1433 newOwner = true;
1434 ret = kIOReturnSuccess;
1435
1436 if (!owners.next) queue_init(&owners);
1437 else
1438 {
1439 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1440 {
1441 if (task != owner->task) continue;
1442 newOwner = false;
1443 break;
1444 }
1445 }
1446 if (newOwner)
1447 {
1448 owner = IONew(IOUserClientOwner, 1);
1449 if (!newOwner) ret = kIOReturnNoMemory;
1450 else
1451 {
1452 owner->task = task;
1453 owner->uc = this;
1454 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1455 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1456 }
1457 }
1458
1459 IOLockUnlock(gIOUserClientOwnersLock);
1460
1461 return (ret);
1462 }
1463
1464 void
1465 IOUserClient::noMoreSenders(void)
1466 {
1467 IOUserClientOwner * owner;
1468
1469 IOLockLock(gIOUserClientOwnersLock);
1470
1471 if (owners.next)
1472 {
1473 while (!queue_empty(&owners))
1474 {
1475 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1476 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1477 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1478 IODelete(owner, IOUserClientOwner, 1);
1479 }
1480 owners.next = owners.prev = NULL;
1481 }
1482
1483 IOLockUnlock(gIOUserClientOwnersLock);
1484 }
1485
1486 extern "C" kern_return_t
1487 iokit_task_terminate(task_t task)
1488 {
1489 IOUserClientOwner * owner;
1490 IOUserClient * dead;
1491 IOUserClient * uc;
1492 queue_head_t * taskque;
1493
1494 IOLockLock(gIOUserClientOwnersLock);
1495
1496 taskque = task_io_user_clients(task);
1497 dead = NULL;
1498 while (!queue_empty(taskque))
1499 {
1500 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1501 uc = owner->uc;
1502 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1503 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1504 if (queue_empty(&uc->owners))
1505 {
1506 uc->retain();
1507 IOLog("destroying out of band connect for %s\n", uc->getName());
1508 // now using the uc queue head as a singly linked queue,
1509 // leaving .next as NULL to mark it empty
1510 uc->owners.next = NULL;
1511 uc->owners.prev = (queue_entry_t) dead;
1512 dead = uc;
1513 }
1514 IODelete(owner, IOUserClientOwner, 1);
1515 }
1516
1517 IOLockUnlock(gIOUserClientOwnersLock);
1518
1519 while (dead)
1520 {
1521 uc = dead;
1522 dead = (IOUserClient *)(void *) dead->owners.prev;
1523 uc->owners.prev = NULL;
1524 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1525 uc->release();
1526 }
1527
1528 return (KERN_SUCCESS);
1529 }
1530
1531 void IOUserClient::free()
1532 {
1533 if( mappings) mappings->release();
1534
1535 IOStatisticsUnregisterCounter();
1536
1537 assert(!owners.next);
1538 assert(!owners.prev);
1539
1540 if (reserved) IODelete(reserved, ExpansionData, 1);
1541
1542 super::free();
1543 }
1544
1545 IOReturn IOUserClient::clientDied( void )
1546 {
1547 IOReturn ret = kIOReturnNotReady;
1548
1549 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1550 {
1551 ret = clientClose();
1552 }
1553
1554 return (ret);
1555 }
1556
1557 IOReturn IOUserClient::clientClose( void )
1558 {
1559 return( kIOReturnUnsupported );
1560 }
1561
1562 IOService * IOUserClient::getService( void )
1563 {
1564 return( 0 );
1565 }
1566
1567 IOReturn IOUserClient::registerNotificationPort(
1568 mach_port_t /* port */,
1569 UInt32 /* type */,
1570 UInt32 /* refCon */)
1571 {
1572 return( kIOReturnUnsupported);
1573 }
1574
1575 IOReturn IOUserClient::registerNotificationPort(
1576 mach_port_t port,
1577 UInt32 type,
1578 io_user_reference_t refCon)
1579 {
1580 return (registerNotificationPort(port, type, (UInt32) refCon));
1581 }
1582
1583 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1584 semaphore_t * semaphore )
1585 {
1586 return( kIOReturnUnsupported);
1587 }
1588
1589 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1590 {
1591 return( kIOReturnUnsupported);
1592 }
1593
1594 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1595 IOOptionBits * options,
1596 IOMemoryDescriptor ** memory )
1597 {
1598 return( kIOReturnUnsupported);
1599 }
1600
1601 #if !__LP64__
1602 IOMemoryMap * IOUserClient::mapClientMemory(
1603 IOOptionBits type,
1604 task_t task,
1605 IOOptionBits mapFlags,
1606 IOVirtualAddress atAddress )
1607 {
1608 return (NULL);
1609 }
1610 #endif
1611
1612 IOMemoryMap * IOUserClient::mapClientMemory64(
1613 IOOptionBits type,
1614 task_t task,
1615 IOOptionBits mapFlags,
1616 mach_vm_address_t atAddress )
1617 {
1618 IOReturn err;
1619 IOOptionBits options = 0;
1620 IOMemoryDescriptor * memory;
1621 IOMemoryMap * map = 0;
1622
1623 err = clientMemoryForType( (UInt32) type, &options, &memory );
1624
1625 if( memory && (kIOReturnSuccess == err)) {
1626
1627 FAKE_STACK_FRAME(getMetaClass());
1628
1629 options = (options & ~kIOMapUserOptionsMask)
1630 | (mapFlags & kIOMapUserOptionsMask);
1631 map = memory->createMappingInTask( task, atAddress, options );
1632 memory->release();
1633
1634 FAKE_STACK_FRAME_END();
1635 }
1636
1637 return( map );
1638 }
1639
1640 IOReturn IOUserClient::exportObjectToClient(task_t task,
1641 OSObject *obj, io_object_t *clientObj)
1642 {
1643 mach_port_name_t name;
1644
1645 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1646
1647 *(mach_port_name_t *)clientObj = name;
1648 return kIOReturnSuccess;
1649 }
1650
1651 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1652 {
1653 return( 0 );
1654 }
1655
1656 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1657 {
1658 return( 0 );
1659 }
1660
1661 IOExternalTrap * IOUserClient::
1662 getExternalTrapForIndex(UInt32 index)
1663 {
1664 return NULL;
1665 }
1666
1667 #pragma clang diagnostic push
1668 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1669
1670 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1671 // functions can break clients of kexts implementing getExternalMethodForIndex()
1672 IOExternalMethod * IOUserClient::
1673 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1674 {
1675 IOExternalMethod *method = getExternalMethodForIndex(index);
1676
1677 if (method)
1678 *targetP = (IOService *) method->object;
1679
1680 return method;
1681 }
1682
1683 IOExternalAsyncMethod * IOUserClient::
1684 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1685 {
1686 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1687
1688 if (method)
1689 *targetP = (IOService *) method->object;
1690
1691 return method;
1692 }
1693
1694 IOExternalTrap * IOUserClient::
1695 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1696 {
1697 IOExternalTrap *trap = getExternalTrapForIndex(index);
1698
1699 if (trap) {
1700 *targetP = trap->object;
1701 }
1702
1703 return trap;
1704 }
1705 #pragma clang diagnostic pop
1706
1707 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1708 {
1709 mach_port_t port;
1710 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1711
1712 if (MACH_PORT_NULL != port)
1713 iokit_release_port_send(port);
1714
1715 return (kIOReturnSuccess);
1716 }
1717
1718 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1719 {
1720 if (MACH_PORT_NULL != port)
1721 iokit_release_port_send(port);
1722
1723 return (kIOReturnSuccess);
1724 }
1725
1726 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1727 IOReturn result, void *args[], UInt32 numArgs)
1728 {
1729 OSAsyncReference64 reference64;
1730 io_user_reference_t args64[kMaxAsyncArgs];
1731 unsigned int idx;
1732
1733 if (numArgs > kMaxAsyncArgs)
1734 return kIOReturnMessageTooLarge;
1735
1736 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1737 reference64[idx] = REF64(reference[idx]);
1738
1739 for (idx = 0; idx < numArgs; idx++)
1740 args64[idx] = REF64(args[idx]);
1741
1742 return (sendAsyncResult64(reference64, result, args64, numArgs));
1743 }
1744
1745 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1746 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1747 {
1748 return _sendAsyncResult64(reference, result, args, numArgs, options);
1749 }
1750
1751 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1752 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1753 {
1754 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1755 }
1756
1757 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1758 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1759 {
1760 struct ReplyMsg
1761 {
1762 mach_msg_header_t msgHdr;
1763 union
1764 {
1765 struct
1766 {
1767 OSNotificationHeader notifyHdr;
1768 IOAsyncCompletionContent asyncContent;
1769 uint32_t args[kMaxAsyncArgs];
1770 } msg32;
1771 struct
1772 {
1773 OSNotificationHeader64 notifyHdr;
1774 IOAsyncCompletionContent asyncContent;
1775 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1776 } msg64;
1777 } m;
1778 };
1779 ReplyMsg replyMsg;
1780 mach_port_t replyPort;
1781 kern_return_t kr;
1782
1783 // If no reply port, do nothing.
1784 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1785 if (replyPort == MACH_PORT_NULL)
1786 return kIOReturnSuccess;
1787
1788 if (numArgs > kMaxAsyncArgs)
1789 return kIOReturnMessageTooLarge;
1790
1791 bzero(&replyMsg, sizeof(replyMsg));
1792 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1793 0 /*local*/);
1794 replyMsg.msgHdr.msgh_remote_port = replyPort;
1795 replyMsg.msgHdr.msgh_local_port = 0;
1796 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1797 if (kIOUCAsync64Flag & reference[0])
1798 {
1799 replyMsg.msgHdr.msgh_size =
1800 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1801 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1802 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1803 + numArgs * sizeof(io_user_reference_t);
1804 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1805 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1806
1807 replyMsg.m.msg64.asyncContent.result = result;
1808 if (numArgs)
1809 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1810 }
1811 else
1812 {
1813 unsigned int idx;
1814
1815 replyMsg.msgHdr.msgh_size =
1816 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1817 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1818
1819 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1820 + numArgs * sizeof(uint32_t);
1821 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1822
1823 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1824 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1825
1826 replyMsg.m.msg32.asyncContent.result = result;
1827
1828 for (idx = 0; idx < numArgs; idx++)
1829 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1830 }
1831
1832 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1833 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1834 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1835 } else {
1836 /* Fail on full queue. */
1837 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1838 replyMsg.msgHdr.msgh_size);
1839 }
1840 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1841 {
1842 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1843 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1844 }
1845 return kr;
1846 }
1847
1848
1849 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1850
1851 extern "C" {
1852
1853 #define CHECK(cls,obj,out) \
1854 cls * out; \
1855 if( !(out = OSDynamicCast( cls, obj))) \
1856 return( kIOReturnBadArgument )
1857
1858 #define CHECKLOCKED(cls,obj,out) \
1859 IOUserIterator * oIter; \
1860 cls * out; \
1861 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1862 return (kIOReturnBadArgument); \
1863 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1864 return (kIOReturnBadArgument)
1865
1866 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1867
1868 // Create a vm_map_copy_t or kalloc'ed data for memory
1869 // to be copied out. ipc will free after the copyout.
1870
1871 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1872 io_buf_ptr_t * buf )
1873 {
1874 kern_return_t err;
1875 vm_map_copy_t copy;
1876
1877 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1878 false /* src_destroy */, &copy);
1879
1880 assert( err == KERN_SUCCESS );
1881 if( err == KERN_SUCCESS )
1882 *buf = (char *) copy;
1883
1884 return( err );
1885 }
1886
1887 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1888
1889 /* Routine io_server_version */
1890 kern_return_t is_io_server_version(
1891 mach_port_t master_port,
1892 uint64_t *version)
1893 {
1894 *version = IOKIT_SERVER_VERSION;
1895 return (kIOReturnSuccess);
1896 }
1897
1898 /* Routine io_object_get_class */
1899 kern_return_t is_io_object_get_class(
1900 io_object_t object,
1901 io_name_t className )
1902 {
1903 const OSMetaClass* my_obj = NULL;
1904
1905 if( !object)
1906 return( kIOReturnBadArgument );
1907
1908 my_obj = object->getMetaClass();
1909 if (!my_obj) {
1910 return (kIOReturnNotFound);
1911 }
1912
1913 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1914
1915 return( kIOReturnSuccess );
1916 }
1917
1918 /* Routine io_object_get_superclass */
1919 kern_return_t is_io_object_get_superclass(
1920 mach_port_t master_port,
1921 io_name_t obj_name,
1922 io_name_t class_name)
1923 {
1924 const OSMetaClass* my_obj = NULL;
1925 const OSMetaClass* superclass = NULL;
1926 const OSSymbol *my_name = NULL;
1927 const char *my_cstr = NULL;
1928
1929 if (!obj_name || !class_name)
1930 return (kIOReturnBadArgument);
1931
1932 if( master_port != master_device_port)
1933 return( kIOReturnNotPrivileged);
1934
1935 my_name = OSSymbol::withCString(obj_name);
1936
1937 if (my_name) {
1938 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1939 my_name->release();
1940 }
1941 if (my_obj) {
1942 superclass = my_obj->getSuperClass();
1943 }
1944
1945 if (!superclass) {
1946 return( kIOReturnNotFound );
1947 }
1948
1949 my_cstr = superclass->getClassName();
1950
1951 if (my_cstr) {
1952 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1953 return( kIOReturnSuccess );
1954 }
1955 return (kIOReturnNotFound);
1956 }
1957
1958 /* Routine io_object_get_bundle_identifier */
1959 kern_return_t is_io_object_get_bundle_identifier(
1960 mach_port_t master_port,
1961 io_name_t obj_name,
1962 io_name_t bundle_name)
1963 {
1964 const OSMetaClass* my_obj = NULL;
1965 const OSSymbol *my_name = NULL;
1966 const OSSymbol *identifier = NULL;
1967 const char *my_cstr = NULL;
1968
1969 if (!obj_name || !bundle_name)
1970 return (kIOReturnBadArgument);
1971
1972 if( master_port != master_device_port)
1973 return( kIOReturnNotPrivileged);
1974
1975 my_name = OSSymbol::withCString(obj_name);
1976
1977 if (my_name) {
1978 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1979 my_name->release();
1980 }
1981
1982 if (my_obj) {
1983 identifier = my_obj->getKmodName();
1984 }
1985 if (!identifier) {
1986 return( kIOReturnNotFound );
1987 }
1988
1989 my_cstr = identifier->getCStringNoCopy();
1990 if (my_cstr) {
1991 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1992 return( kIOReturnSuccess );
1993 }
1994
1995 return (kIOReturnBadArgument);
1996 }
1997
1998 /* Routine io_object_conforms_to */
1999 kern_return_t is_io_object_conforms_to(
2000 io_object_t object,
2001 io_name_t className,
2002 boolean_t *conforms )
2003 {
2004 if( !object)
2005 return( kIOReturnBadArgument );
2006
2007 *conforms = (0 != object->metaCast( className ));
2008
2009 return( kIOReturnSuccess );
2010 }
2011
2012 /* Routine io_object_get_retain_count */
2013 kern_return_t is_io_object_get_retain_count(
2014 io_object_t object,
2015 uint32_t *retainCount )
2016 {
2017 if( !object)
2018 return( kIOReturnBadArgument );
2019
2020 *retainCount = object->getRetainCount();
2021 return( kIOReturnSuccess );
2022 }
2023
2024 /* Routine io_iterator_next */
2025 kern_return_t is_io_iterator_next(
2026 io_object_t iterator,
2027 io_object_t *object )
2028 {
2029 IOReturn ret;
2030 OSObject * obj;
2031
2032 CHECK( OSIterator, iterator, iter );
2033
2034 obj = iter->getNextObject();
2035 if( obj) {
2036 obj->retain();
2037 *object = obj;
2038 ret = kIOReturnSuccess;
2039 } else
2040 ret = kIOReturnNoDevice;
2041
2042 return (ret);
2043 }
2044
2045 /* Routine io_iterator_reset */
2046 kern_return_t is_io_iterator_reset(
2047 io_object_t iterator )
2048 {
2049 CHECK( OSIterator, iterator, iter );
2050
2051 iter->reset();
2052
2053 return( kIOReturnSuccess );
2054 }
2055
2056 /* Routine io_iterator_is_valid */
2057 kern_return_t is_io_iterator_is_valid(
2058 io_object_t iterator,
2059 boolean_t *is_valid )
2060 {
2061 CHECK( OSIterator, iterator, iter );
2062
2063 *is_valid = iter->isValid();
2064
2065 return( kIOReturnSuccess );
2066 }
2067
2068
2069 static kern_return_t internal_io_service_match_property_table(
2070 io_service_t _service,
2071 const char * matching,
2072 mach_msg_type_number_t matching_size,
2073 boolean_t *matches)
2074 {
2075 CHECK( IOService, _service, service );
2076
2077 kern_return_t kr;
2078 OSObject * obj;
2079 OSDictionary * dict;
2080
2081 assert(matching_size);
2082 obj = OSUnserializeXML(matching, matching_size);
2083
2084 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2085 *matches = service->passiveMatch( dict );
2086 kr = kIOReturnSuccess;
2087 } else
2088 kr = kIOReturnBadArgument;
2089
2090 if( obj)
2091 obj->release();
2092
2093 return( kr );
2094 }
2095
2096 /* Routine io_service_match_property_table */
2097 kern_return_t is_io_service_match_property_table(
2098 io_service_t service,
2099 io_string_t matching,
2100 boolean_t *matches )
2101 {
2102 return (kIOReturnUnsupported);
2103 }
2104
2105
2106 /* Routine io_service_match_property_table_ool */
2107 kern_return_t is_io_service_match_property_table_ool(
2108 io_object_t service,
2109 io_buf_ptr_t matching,
2110 mach_msg_type_number_t matchingCnt,
2111 kern_return_t *result,
2112 boolean_t *matches )
2113 {
2114 kern_return_t kr;
2115 vm_offset_t data;
2116 vm_map_offset_t map_data;
2117
2118 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2119 data = CAST_DOWN(vm_offset_t, map_data);
2120
2121 if( KERN_SUCCESS == kr) {
2122 // must return success after vm_map_copyout() succeeds
2123 *result = internal_io_service_match_property_table(service,
2124 (const char *)data, matchingCnt, matches );
2125 vm_deallocate( kernel_map, data, matchingCnt );
2126 }
2127
2128 return( kr );
2129 }
2130
2131 /* Routine io_service_match_property_table_bin */
2132 kern_return_t is_io_service_match_property_table_bin(
2133 io_object_t service,
2134 io_struct_inband_t matching,
2135 mach_msg_type_number_t matchingCnt,
2136 boolean_t *matches)
2137 {
2138 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2139 }
2140
2141 static kern_return_t internal_io_service_get_matching_services(
2142 mach_port_t master_port,
2143 const char * matching,
2144 mach_msg_type_number_t matching_size,
2145 io_iterator_t *existing )
2146 {
2147 kern_return_t kr;
2148 OSObject * obj;
2149 OSDictionary * dict;
2150
2151 if( master_port != master_device_port)
2152 return( kIOReturnNotPrivileged);
2153
2154 assert(matching_size);
2155 obj = OSUnserializeXML(matching, matching_size);
2156
2157 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2158 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2159 kr = kIOReturnSuccess;
2160 } else
2161 kr = kIOReturnBadArgument;
2162
2163 if( obj)
2164 obj->release();
2165
2166 return( kr );
2167 }
2168
2169 /* Routine io_service_get_matching_services */
2170 kern_return_t is_io_service_get_matching_services(
2171 mach_port_t master_port,
2172 io_string_t matching,
2173 io_iterator_t *existing )
2174 {
2175 return (kIOReturnUnsupported);
2176 }
2177
2178 /* Routine io_service_get_matching_services_ool */
2179 kern_return_t is_io_service_get_matching_services_ool(
2180 mach_port_t master_port,
2181 io_buf_ptr_t matching,
2182 mach_msg_type_number_t matchingCnt,
2183 kern_return_t *result,
2184 io_object_t *existing )
2185 {
2186 kern_return_t kr;
2187 vm_offset_t data;
2188 vm_map_offset_t map_data;
2189
2190 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2191 data = CAST_DOWN(vm_offset_t, map_data);
2192
2193 if( KERN_SUCCESS == kr) {
2194 // must return success after vm_map_copyout() succeeds
2195 // and mig will copy out objects on success
2196 *existing = 0;
2197 *result = internal_io_service_get_matching_services(master_port,
2198 (const char *) data, matchingCnt, existing);
2199 vm_deallocate( kernel_map, data, matchingCnt );
2200 }
2201
2202 return( kr );
2203 }
2204
2205 /* Routine io_service_get_matching_services_bin */
2206 kern_return_t is_io_service_get_matching_services_bin(
2207 mach_port_t master_port,
2208 io_struct_inband_t matching,
2209 mach_msg_type_number_t matchingCnt,
2210 io_object_t *existing)
2211 {
2212 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2213 }
2214
2215
2216 static kern_return_t internal_io_service_get_matching_service(
2217 mach_port_t master_port,
2218 const char * matching,
2219 mach_msg_type_number_t matching_size,
2220 io_service_t *service )
2221 {
2222 kern_return_t kr;
2223 OSObject * obj;
2224 OSDictionary * dict;
2225
2226 if( master_port != master_device_port)
2227 return( kIOReturnNotPrivileged);
2228
2229 assert(matching_size);
2230 obj = OSUnserializeXML(matching, matching_size);
2231
2232 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2233 *service = IOService::copyMatchingService( dict );
2234 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2235 } else
2236 kr = kIOReturnBadArgument;
2237
2238 if( obj)
2239 obj->release();
2240
2241 return( kr );
2242 }
2243
2244 /* Routine io_service_get_matching_service */
2245 kern_return_t is_io_service_get_matching_service(
2246 mach_port_t master_port,
2247 io_string_t matching,
2248 io_service_t *service )
2249 {
2250 return (kIOReturnUnsupported);
2251 }
2252
2253 /* Routine io_service_get_matching_services_ool */
2254 kern_return_t is_io_service_get_matching_service_ool(
2255 mach_port_t master_port,
2256 io_buf_ptr_t matching,
2257 mach_msg_type_number_t matchingCnt,
2258 kern_return_t *result,
2259 io_object_t *service )
2260 {
2261 kern_return_t kr;
2262 vm_offset_t data;
2263 vm_map_offset_t map_data;
2264
2265 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2266 data = CAST_DOWN(vm_offset_t, map_data);
2267
2268 if( KERN_SUCCESS == kr) {
2269 // must return success after vm_map_copyout() succeeds
2270 // and mig will copy out objects on success
2271 *service = 0;
2272 *result = internal_io_service_get_matching_service(master_port,
2273 (const char *) data, matchingCnt, service );
2274 vm_deallocate( kernel_map, data, matchingCnt );
2275 }
2276
2277 return( kr );
2278 }
2279
2280 /* Routine io_service_get_matching_service_bin */
2281 kern_return_t is_io_service_get_matching_service_bin(
2282 mach_port_t master_port,
2283 io_struct_inband_t matching,
2284 mach_msg_type_number_t matchingCnt,
2285 io_object_t *service)
2286 {
2287 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2288 }
2289
2290 static kern_return_t internal_io_service_add_notification(
2291 mach_port_t master_port,
2292 io_name_t notification_type,
2293 const char * matching,
2294 size_t matching_size,
2295 mach_port_t port,
2296 void * reference,
2297 vm_size_t referenceSize,
2298 bool client64,
2299 io_object_t * notification )
2300 {
2301 IOServiceUserNotification * userNotify = 0;
2302 IONotifier * notify = 0;
2303 const OSSymbol * sym;
2304 OSDictionary * dict;
2305 IOReturn err;
2306 unsigned long int userMsgType;
2307
2308 if( master_port != master_device_port)
2309 return( kIOReturnNotPrivileged);
2310
2311 do {
2312 err = kIOReturnNoResources;
2313
2314 if( !(sym = OSSymbol::withCString( notification_type )))
2315 err = kIOReturnNoResources;
2316
2317 assert(matching_size);
2318 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2319 if (!dict) {
2320 err = kIOReturnBadArgument;
2321 continue;
2322 }
2323
2324 if( (sym == gIOPublishNotification)
2325 || (sym == gIOFirstPublishNotification))
2326 userMsgType = kIOServicePublishNotificationType;
2327 else if( (sym == gIOMatchedNotification)
2328 || (sym == gIOFirstMatchNotification))
2329 userMsgType = kIOServiceMatchedNotificationType;
2330 else if( sym == gIOTerminatedNotification)
2331 userMsgType = kIOServiceTerminatedNotificationType;
2332 else
2333 userMsgType = kLastIOKitNotificationType;
2334
2335 userNotify = new IOServiceUserNotification;
2336
2337 if( userNotify && !userNotify->init( port, userMsgType,
2338 reference, referenceSize, client64)) {
2339 iokit_release_port_send(port);
2340 userNotify->release();
2341 userNotify = 0;
2342 }
2343 if( !userNotify)
2344 continue;
2345
2346 notify = IOService::addMatchingNotification( sym, dict,
2347 &userNotify->_handler, userNotify );
2348 if( notify) {
2349 *notification = userNotify;
2350 userNotify->setNotification( notify );
2351 err = kIOReturnSuccess;
2352 } else
2353 err = kIOReturnUnsupported;
2354
2355 } while( false );
2356
2357 if( sym)
2358 sym->release();
2359 if( dict)
2360 dict->release();
2361
2362 return( err );
2363 }
2364
2365
2366 /* Routine io_service_add_notification */
2367 kern_return_t is_io_service_add_notification(
2368 mach_port_t master_port,
2369 io_name_t notification_type,
2370 io_string_t matching,
2371 mach_port_t port,
2372 io_async_ref_t reference,
2373 mach_msg_type_number_t referenceCnt,
2374 io_object_t * notification )
2375 {
2376 return (kIOReturnUnsupported);
2377 }
2378
2379 /* Routine io_service_add_notification_64 */
2380 kern_return_t is_io_service_add_notification_64(
2381 mach_port_t master_port,
2382 io_name_t notification_type,
2383 io_string_t matching,
2384 mach_port_t wake_port,
2385 io_async_ref64_t reference,
2386 mach_msg_type_number_t referenceCnt,
2387 io_object_t *notification )
2388 {
2389 return (kIOReturnUnsupported);
2390 }
2391
2392 /* Routine io_service_add_notification_bin */
2393 kern_return_t is_io_service_add_notification_bin
2394 (
2395 mach_port_t master_port,
2396 io_name_t notification_type,
2397 io_struct_inband_t matching,
2398 mach_msg_type_number_t matchingCnt,
2399 mach_port_t wake_port,
2400 io_async_ref_t reference,
2401 mach_msg_type_number_t referenceCnt,
2402 io_object_t *notification)
2403 {
2404 return (internal_io_service_add_notification(master_port, notification_type,
2405 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2406 false, notification));
2407 }
2408
2409 /* Routine io_service_add_notification_bin_64 */
2410 kern_return_t is_io_service_add_notification_bin_64
2411 (
2412 mach_port_t master_port,
2413 io_name_t notification_type,
2414 io_struct_inband_t matching,
2415 mach_msg_type_number_t matchingCnt,
2416 mach_port_t wake_port,
2417 io_async_ref64_t reference,
2418 mach_msg_type_number_t referenceCnt,
2419 io_object_t *notification)
2420 {
2421 return (internal_io_service_add_notification(master_port, notification_type,
2422 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2423 true, notification));
2424 }
2425
2426 static kern_return_t internal_io_service_add_notification_ool(
2427 mach_port_t master_port,
2428 io_name_t notification_type,
2429 io_buf_ptr_t matching,
2430 mach_msg_type_number_t matchingCnt,
2431 mach_port_t wake_port,
2432 void * reference,
2433 vm_size_t referenceSize,
2434 bool client64,
2435 kern_return_t *result,
2436 io_object_t *notification )
2437 {
2438 kern_return_t kr;
2439 vm_offset_t data;
2440 vm_map_offset_t map_data;
2441
2442 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2443 data = CAST_DOWN(vm_offset_t, map_data);
2444
2445 if( KERN_SUCCESS == kr) {
2446 // must return success after vm_map_copyout() succeeds
2447 // and mig will copy out objects on success
2448 *notification = 0;
2449 *result = internal_io_service_add_notification( master_port, notification_type,
2450 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2451 vm_deallocate( kernel_map, data, matchingCnt );
2452 }
2453
2454 return( kr );
2455 }
2456
2457 /* Routine io_service_add_notification_ool */
2458 kern_return_t is_io_service_add_notification_ool(
2459 mach_port_t master_port,
2460 io_name_t notification_type,
2461 io_buf_ptr_t matching,
2462 mach_msg_type_number_t matchingCnt,
2463 mach_port_t wake_port,
2464 io_async_ref_t reference,
2465 mach_msg_type_number_t referenceCnt,
2466 kern_return_t *result,
2467 io_object_t *notification )
2468 {
2469 return (internal_io_service_add_notification_ool(master_port, notification_type,
2470 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2471 false, result, notification));
2472 }
2473
2474 /* Routine io_service_add_notification_ool_64 */
2475 kern_return_t is_io_service_add_notification_ool_64(
2476 mach_port_t master_port,
2477 io_name_t notification_type,
2478 io_buf_ptr_t matching,
2479 mach_msg_type_number_t matchingCnt,
2480 mach_port_t wake_port,
2481 io_async_ref64_t reference,
2482 mach_msg_type_number_t referenceCnt,
2483 kern_return_t *result,
2484 io_object_t *notification )
2485 {
2486 return (internal_io_service_add_notification_ool(master_port, notification_type,
2487 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2488 true, result, notification));
2489 }
2490
2491 /* Routine io_service_add_notification_old */
2492 kern_return_t is_io_service_add_notification_old(
2493 mach_port_t master_port,
2494 io_name_t notification_type,
2495 io_string_t matching,
2496 mach_port_t port,
2497 // for binary compatibility reasons, this must be natural_t for ILP32
2498 natural_t ref,
2499 io_object_t * notification )
2500 {
2501 return( is_io_service_add_notification( master_port, notification_type,
2502 matching, port, &ref, 1, notification ));
2503 }
2504
2505
2506 static kern_return_t internal_io_service_add_interest_notification(
2507 io_object_t _service,
2508 io_name_t type_of_interest,
2509 mach_port_t port,
2510 void * reference,
2511 vm_size_t referenceSize,
2512 bool client64,
2513 io_object_t * notification )
2514 {
2515
2516 IOServiceMessageUserNotification * userNotify = 0;
2517 IONotifier * notify = 0;
2518 const OSSymbol * sym;
2519 IOReturn err;
2520
2521 CHECK( IOService, _service, service );
2522
2523 err = kIOReturnNoResources;
2524 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2525
2526 userNotify = new IOServiceMessageUserNotification;
2527
2528 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2529 reference, referenceSize,
2530 kIOUserNotifyMaxMessageSize,
2531 client64 )) {
2532 iokit_release_port_send(port);
2533 userNotify->release();
2534 userNotify = 0;
2535 }
2536 if( !userNotify)
2537 continue;
2538
2539 notify = service->registerInterest( sym,
2540 &userNotify->_handler, userNotify );
2541 if( notify) {
2542 *notification = userNotify;
2543 userNotify->setNotification( notify );
2544 err = kIOReturnSuccess;
2545 } else
2546 err = kIOReturnUnsupported;
2547
2548 sym->release();
2549
2550 } while( false );
2551
2552 return( err );
2553 }
2554
2555 /* Routine io_service_add_message_notification */
2556 kern_return_t is_io_service_add_interest_notification(
2557 io_object_t service,
2558 io_name_t type_of_interest,
2559 mach_port_t port,
2560 io_async_ref_t reference,
2561 mach_msg_type_number_t referenceCnt,
2562 io_object_t * notification )
2563 {
2564 return (internal_io_service_add_interest_notification(service, type_of_interest,
2565 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2566 }
2567
2568 /* Routine io_service_add_interest_notification_64 */
2569 kern_return_t is_io_service_add_interest_notification_64(
2570 io_object_t service,
2571 io_name_t type_of_interest,
2572 mach_port_t wake_port,
2573 io_async_ref64_t reference,
2574 mach_msg_type_number_t referenceCnt,
2575 io_object_t *notification )
2576 {
2577 return (internal_io_service_add_interest_notification(service, type_of_interest,
2578 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2579 }
2580
2581
2582 /* Routine io_service_acknowledge_notification */
2583 kern_return_t is_io_service_acknowledge_notification(
2584 io_object_t _service,
2585 natural_t notify_ref,
2586 natural_t response )
2587 {
2588 CHECK( IOService, _service, service );
2589
2590 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2591 (IOOptionBits) response ));
2592
2593 }
2594
2595 /* Routine io_connect_get_semaphore */
2596 kern_return_t is_io_connect_get_notification_semaphore(
2597 io_connect_t connection,
2598 natural_t notification_type,
2599 semaphore_t *semaphore )
2600 {
2601 CHECK( IOUserClient, connection, client );
2602
2603 IOStatisticsClientCall();
2604 return( client->getNotificationSemaphore( (UInt32) notification_type,
2605 semaphore ));
2606 }
2607
2608 /* Routine io_registry_get_root_entry */
2609 kern_return_t is_io_registry_get_root_entry(
2610 mach_port_t master_port,
2611 io_object_t *root )
2612 {
2613 IORegistryEntry * entry;
2614
2615 if( master_port != master_device_port)
2616 return( kIOReturnNotPrivileged);
2617
2618 entry = IORegistryEntry::getRegistryRoot();
2619 if( entry)
2620 entry->retain();
2621 *root = entry;
2622
2623 return( kIOReturnSuccess );
2624 }
2625
2626 /* Routine io_registry_create_iterator */
2627 kern_return_t is_io_registry_create_iterator(
2628 mach_port_t master_port,
2629 io_name_t plane,
2630 uint32_t options,
2631 io_object_t *iterator )
2632 {
2633 if( master_port != master_device_port)
2634 return( kIOReturnNotPrivileged);
2635
2636 *iterator = IOUserIterator::withIterator(
2637 IORegistryIterator::iterateOver(
2638 IORegistryEntry::getPlane( plane ), options ));
2639
2640 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2641 }
2642
2643 /* Routine io_registry_entry_create_iterator */
2644 kern_return_t is_io_registry_entry_create_iterator(
2645 io_object_t registry_entry,
2646 io_name_t plane,
2647 uint32_t options,
2648 io_object_t *iterator )
2649 {
2650 CHECK( IORegistryEntry, registry_entry, entry );
2651
2652 *iterator = IOUserIterator::withIterator(
2653 IORegistryIterator::iterateOver( entry,
2654 IORegistryEntry::getPlane( plane ), options ));
2655
2656 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2657 }
2658
2659 /* Routine io_registry_iterator_enter */
2660 kern_return_t is_io_registry_iterator_enter_entry(
2661 io_object_t iterator )
2662 {
2663 CHECKLOCKED( IORegistryIterator, iterator, iter );
2664
2665 IOLockLock(oIter->lock);
2666 iter->enterEntry();
2667 IOLockUnlock(oIter->lock);
2668
2669 return( kIOReturnSuccess );
2670 }
2671
2672 /* Routine io_registry_iterator_exit */
2673 kern_return_t is_io_registry_iterator_exit_entry(
2674 io_object_t iterator )
2675 {
2676 bool didIt;
2677
2678 CHECKLOCKED( IORegistryIterator, iterator, iter );
2679
2680 IOLockLock(oIter->lock);
2681 didIt = iter->exitEntry();
2682 IOLockUnlock(oIter->lock);
2683
2684 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2685 }
2686
2687 /* Routine io_registry_entry_from_path */
2688 kern_return_t is_io_registry_entry_from_path(
2689 mach_port_t master_port,
2690 io_string_t path,
2691 io_object_t *registry_entry )
2692 {
2693 IORegistryEntry * entry;
2694
2695 if( master_port != master_device_port)
2696 return( kIOReturnNotPrivileged);
2697
2698 entry = IORegistryEntry::fromPath( path );
2699
2700 *registry_entry = entry;
2701
2702 return( kIOReturnSuccess );
2703 }
2704
2705
2706 /* Routine io_registry_entry_from_path */
2707 kern_return_t is_io_registry_entry_from_path_ool(
2708 mach_port_t master_port,
2709 io_string_inband_t path,
2710 io_buf_ptr_t path_ool,
2711 mach_msg_type_number_t path_oolCnt,
2712 kern_return_t *result,
2713 io_object_t *registry_entry)
2714 {
2715 IORegistryEntry * entry;
2716 vm_map_offset_t map_data;
2717 const char * cpath;
2718 IOReturn res;
2719 kern_return_t err;
2720
2721 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2722
2723 map_data = 0;
2724 entry = 0;
2725 res = err = KERN_SUCCESS;
2726 if (path[0]) cpath = path;
2727 else
2728 {
2729 if (!path_oolCnt) return(kIOReturnBadArgument);
2730 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2731
2732 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2733 if (KERN_SUCCESS == err)
2734 {
2735 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2736 cpath = CAST_DOWN(const char *, map_data);
2737 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2738 }
2739 }
2740
2741 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2742 {
2743 entry = IORegistryEntry::fromPath(cpath);
2744 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2745 }
2746
2747 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2748
2749 if (KERN_SUCCESS != err) res = err;
2750 *registry_entry = entry;
2751 *result = res;
2752
2753 return (err);
2754 }
2755
2756
2757 /* Routine io_registry_entry_in_plane */
2758 kern_return_t is_io_registry_entry_in_plane(
2759 io_object_t registry_entry,
2760 io_name_t plane,
2761 boolean_t *inPlane )
2762 {
2763 CHECK( IORegistryEntry, registry_entry, entry );
2764
2765 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2766
2767 return( kIOReturnSuccess );
2768 }
2769
2770
2771 /* Routine io_registry_entry_get_path */
2772 kern_return_t is_io_registry_entry_get_path(
2773 io_object_t registry_entry,
2774 io_name_t plane,
2775 io_string_t path )
2776 {
2777 int length;
2778 CHECK( IORegistryEntry, registry_entry, entry );
2779
2780 length = sizeof( io_string_t);
2781 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2782 return( kIOReturnSuccess );
2783 else
2784 return( kIOReturnBadArgument );
2785 }
2786
2787 /* Routine io_registry_entry_get_path */
2788 kern_return_t is_io_registry_entry_get_path_ool(
2789 io_object_t registry_entry,
2790 io_name_t plane,
2791 io_string_inband_t path,
2792 io_buf_ptr_t *path_ool,
2793 mach_msg_type_number_t *path_oolCnt)
2794 {
2795 enum { kMaxPath = 16384 };
2796 IOReturn err;
2797 int length;
2798 char * buf;
2799
2800 CHECK( IORegistryEntry, registry_entry, entry );
2801
2802 *path_ool = NULL;
2803 *path_oolCnt = 0;
2804 length = sizeof(io_string_inband_t);
2805 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2806 else
2807 {
2808 length = kMaxPath;
2809 buf = IONew(char, length);
2810 if (!buf) err = kIOReturnNoMemory;
2811 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2812 else
2813 {
2814 *path_oolCnt = length;
2815 err = copyoutkdata(buf, length, path_ool);
2816 }
2817 if (buf) IODelete(buf, char, kMaxPath);
2818 }
2819
2820 return (err);
2821 }
2822
2823
2824 /* Routine io_registry_entry_get_name */
2825 kern_return_t is_io_registry_entry_get_name(
2826 io_object_t registry_entry,
2827 io_name_t name )
2828 {
2829 CHECK( IORegistryEntry, registry_entry, entry );
2830
2831 strncpy( name, entry->getName(), sizeof( io_name_t));
2832
2833 return( kIOReturnSuccess );
2834 }
2835
2836 /* Routine io_registry_entry_get_name_in_plane */
2837 kern_return_t is_io_registry_entry_get_name_in_plane(
2838 io_object_t registry_entry,
2839 io_name_t planeName,
2840 io_name_t name )
2841 {
2842 const IORegistryPlane * plane;
2843 CHECK( IORegistryEntry, registry_entry, entry );
2844
2845 if( planeName[0])
2846 plane = IORegistryEntry::getPlane( planeName );
2847 else
2848 plane = 0;
2849
2850 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2851
2852 return( kIOReturnSuccess );
2853 }
2854
2855 /* Routine io_registry_entry_get_location_in_plane */
2856 kern_return_t is_io_registry_entry_get_location_in_plane(
2857 io_object_t registry_entry,
2858 io_name_t planeName,
2859 io_name_t location )
2860 {
2861 const IORegistryPlane * plane;
2862 CHECK( IORegistryEntry, registry_entry, entry );
2863
2864 if( planeName[0])
2865 plane = IORegistryEntry::getPlane( planeName );
2866 else
2867 plane = 0;
2868
2869 const char * cstr = entry->getLocation( plane );
2870
2871 if( cstr) {
2872 strncpy( location, cstr, sizeof( io_name_t));
2873 return( kIOReturnSuccess );
2874 } else
2875 return( kIOReturnNotFound );
2876 }
2877
2878 /* Routine io_registry_entry_get_registry_entry_id */
2879 kern_return_t is_io_registry_entry_get_registry_entry_id(
2880 io_object_t registry_entry,
2881 uint64_t *entry_id )
2882 {
2883 CHECK( IORegistryEntry, registry_entry, entry );
2884
2885 *entry_id = entry->getRegistryEntryID();
2886
2887 return (kIOReturnSuccess);
2888 }
2889
2890 /* Routine io_registry_entry_get_property */
2891 kern_return_t is_io_registry_entry_get_property_bytes(
2892 io_object_t registry_entry,
2893 io_name_t property_name,
2894 io_struct_inband_t buf,
2895 mach_msg_type_number_t *dataCnt )
2896 {
2897 OSObject * obj;
2898 OSData * data;
2899 OSString * str;
2900 OSBoolean * boo;
2901 OSNumber * off;
2902 UInt64 offsetBytes;
2903 unsigned int len = 0;
2904 const void * bytes = 0;
2905 IOReturn ret = kIOReturnSuccess;
2906
2907 CHECK( IORegistryEntry, registry_entry, entry );
2908
2909 #if CONFIG_MACF
2910 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2911 return kIOReturnNotPermitted;
2912 #endif
2913
2914 obj = entry->copyProperty(property_name);
2915 if( !obj)
2916 return( kIOReturnNoResources );
2917
2918 // One day OSData will be a common container base class
2919 // until then...
2920 if( (data = OSDynamicCast( OSData, obj ))) {
2921 len = data->getLength();
2922 bytes = data->getBytesNoCopy();
2923
2924 } else if( (str = OSDynamicCast( OSString, obj ))) {
2925 len = str->getLength() + 1;
2926 bytes = str->getCStringNoCopy();
2927
2928 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2929 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2930 bytes = boo->isTrue() ? "Yes" : "No";
2931
2932 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2933 offsetBytes = off->unsigned64BitValue();
2934 len = off->numberOfBytes();
2935 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2936 bytes = &offsetBytes;
2937 #ifdef __BIG_ENDIAN__
2938 bytes = (const void *)
2939 (((UInt32) bytes) + (sizeof( UInt64) - len));
2940 #endif
2941
2942 } else
2943 ret = kIOReturnBadArgument;
2944
2945 if( bytes) {
2946 if( *dataCnt < len)
2947 ret = kIOReturnIPCError;
2948 else {
2949 *dataCnt = len;
2950 bcopy( bytes, buf, len );
2951 }
2952 }
2953 obj->release();
2954
2955 return( ret );
2956 }
2957
2958
2959 /* Routine io_registry_entry_get_property */
2960 kern_return_t is_io_registry_entry_get_property(
2961 io_object_t registry_entry,
2962 io_name_t property_name,
2963 io_buf_ptr_t *properties,
2964 mach_msg_type_number_t *propertiesCnt )
2965 {
2966 kern_return_t err;
2967 vm_size_t len;
2968 OSObject * obj;
2969
2970 CHECK( IORegistryEntry, registry_entry, entry );
2971
2972 #if CONFIG_MACF
2973 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2974 return kIOReturnNotPermitted;
2975 #endif
2976
2977 obj = entry->copyProperty(property_name);
2978 if( !obj)
2979 return( kIOReturnNotFound );
2980
2981 OSSerialize * s = OSSerialize::withCapacity(4096);
2982 if( !s) {
2983 obj->release();
2984 return( kIOReturnNoMemory );
2985 }
2986
2987 if( obj->serialize( s )) {
2988 len = s->getLength();
2989 *propertiesCnt = len;
2990 err = copyoutkdata( s->text(), len, properties );
2991
2992 } else
2993 err = kIOReturnUnsupported;
2994
2995 s->release();
2996 obj->release();
2997
2998 return( err );
2999 }
3000
3001 /* Routine io_registry_entry_get_property_recursively */
3002 kern_return_t is_io_registry_entry_get_property_recursively(
3003 io_object_t registry_entry,
3004 io_name_t plane,
3005 io_name_t property_name,
3006 uint32_t options,
3007 io_buf_ptr_t *properties,
3008 mach_msg_type_number_t *propertiesCnt )
3009 {
3010 kern_return_t err;
3011 vm_size_t len;
3012 OSObject * obj;
3013
3014 CHECK( IORegistryEntry, registry_entry, entry );
3015
3016 #if CONFIG_MACF
3017 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3018 return kIOReturnNotPermitted;
3019 #endif
3020
3021 obj = entry->copyProperty( property_name,
3022 IORegistryEntry::getPlane( plane ), options );
3023 if( !obj)
3024 return( kIOReturnNotFound );
3025
3026 OSSerialize * s = OSSerialize::withCapacity(4096);
3027 if( !s) {
3028 obj->release();
3029 return( kIOReturnNoMemory );
3030 }
3031
3032 if( obj->serialize( s )) {
3033 len = s->getLength();
3034 *propertiesCnt = len;
3035 err = copyoutkdata( s->text(), len, properties );
3036
3037 } else
3038 err = kIOReturnUnsupported;
3039
3040 s->release();
3041 obj->release();
3042
3043 return( err );
3044 }
3045
3046 /* Routine io_registry_entry_get_properties */
3047 kern_return_t is_io_registry_entry_get_properties(
3048 io_object_t registry_entry,
3049 io_buf_ptr_t *properties,
3050 mach_msg_type_number_t *propertiesCnt )
3051 {
3052 return (kIOReturnUnsupported);
3053 }
3054
3055 #if CONFIG_MACF
3056
3057 struct GetPropertiesEditorRef
3058 {
3059 kauth_cred_t cred;
3060 IORegistryEntry * entry;
3061 OSCollection * root;
3062 };
3063
3064 static const OSMetaClassBase *
3065 GetPropertiesEditor(void * reference,
3066 OSSerialize * s,
3067 OSCollection * container,
3068 const OSSymbol * name,
3069 const OSMetaClassBase * value)
3070 {
3071 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3072
3073 if (!ref->root) ref->root = container;
3074 if (ref->root == container)
3075 {
3076 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3077 {
3078 value = 0;
3079 }
3080 }
3081 if (value) value->retain();
3082 return (value);
3083 }
3084
3085 #endif /* CONFIG_MACF */
3086
3087 /* Routine io_registry_entry_get_properties */
3088 kern_return_t is_io_registry_entry_get_properties_bin(
3089 io_object_t registry_entry,
3090 io_buf_ptr_t *properties,
3091 mach_msg_type_number_t *propertiesCnt)
3092 {
3093 kern_return_t err = kIOReturnSuccess;
3094 vm_size_t len;
3095 OSSerialize * s;
3096 OSSerialize::Editor editor = 0;
3097 void * editRef = 0;
3098
3099 CHECK(IORegistryEntry, registry_entry, entry);
3100
3101 #if CONFIG_MACF
3102 GetPropertiesEditorRef ref;
3103 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3104 {
3105 editor = &GetPropertiesEditor;
3106 editRef = &ref;
3107 ref.cred = kauth_cred_get();
3108 ref.entry = entry;
3109 ref.root = 0;
3110 }
3111 #endif
3112
3113 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3114 if (!s) return (kIOReturnNoMemory);
3115
3116 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3117
3118 if (kIOReturnSuccess == err)
3119 {
3120 len = s->getLength();
3121 *propertiesCnt = len;
3122 err = copyoutkdata(s->text(), len, properties);
3123 }
3124 s->release();
3125
3126 return (err);
3127 }
3128
3129 /* Routine io_registry_entry_get_property_bin */
3130 kern_return_t is_io_registry_entry_get_property_bin(
3131 io_object_t registry_entry,
3132 io_name_t plane,
3133 io_name_t property_name,
3134 uint32_t options,
3135 io_buf_ptr_t *properties,
3136 mach_msg_type_number_t *propertiesCnt )
3137 {
3138 kern_return_t err;
3139 vm_size_t len;
3140 OSObject * obj;
3141 const OSSymbol * sym;
3142
3143 CHECK( IORegistryEntry, registry_entry, entry );
3144
3145 #if CONFIG_MACF
3146 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3147 return kIOReturnNotPermitted;
3148 #endif
3149
3150 sym = OSSymbol::withCString(property_name);
3151 if (!sym) return (kIOReturnNoMemory);
3152
3153 if (gIORegistryEntryPropertyKeysKey == sym)
3154 {
3155 obj = entry->copyPropertyKeys();
3156 }
3157 else
3158 {
3159 if ((kIORegistryIterateRecursively & options) && plane[0])
3160 {
3161 obj = entry->copyProperty(property_name,
3162 IORegistryEntry::getPlane(plane), options );
3163 }
3164 else
3165 {
3166 obj = entry->copyProperty(property_name);
3167 }
3168 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3169 }
3170
3171 sym->release();
3172 if (!obj) return (kIOReturnNotFound);
3173
3174 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3175 if( !s) {
3176 obj->release();
3177 return( kIOReturnNoMemory );
3178 }
3179
3180 if( obj->serialize( s )) {
3181 len = s->getLength();
3182 *propertiesCnt = len;
3183 err = copyoutkdata( s->text(), len, properties );
3184
3185 } else err = kIOReturnUnsupported;
3186
3187 s->release();
3188 obj->release();
3189
3190 return( err );
3191 }
3192
3193
3194 /* Routine io_registry_entry_set_properties */
3195 kern_return_t is_io_registry_entry_set_properties
3196 (
3197 io_object_t registry_entry,
3198 io_buf_ptr_t properties,
3199 mach_msg_type_number_t propertiesCnt,
3200 kern_return_t * result)
3201 {
3202 OSObject * obj;
3203 kern_return_t err;
3204 IOReturn res;
3205 vm_offset_t data;
3206 vm_map_offset_t map_data;
3207
3208 CHECK( IORegistryEntry, registry_entry, entry );
3209
3210 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3211 return( kIOReturnMessageTooLarge);
3212
3213 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3214 data = CAST_DOWN(vm_offset_t, map_data);
3215
3216 if( KERN_SUCCESS == err) {
3217
3218 FAKE_STACK_FRAME(entry->getMetaClass());
3219
3220 // must return success after vm_map_copyout() succeeds
3221 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3222 vm_deallocate( kernel_map, data, propertiesCnt );
3223
3224 if (!obj)
3225 res = kIOReturnBadArgument;
3226 #if CONFIG_MACF
3227 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3228 registry_entry, obj))
3229 {
3230 res = kIOReturnNotPermitted;
3231 }
3232 #endif
3233 else
3234 {
3235 res = entry->setProperties( obj );
3236 }
3237
3238 if (obj)
3239 obj->release();
3240
3241 FAKE_STACK_FRAME_END();
3242
3243 } else
3244 res = err;
3245
3246 *result = res;
3247 return( err );
3248 }
3249
3250 /* Routine io_registry_entry_get_child_iterator */
3251 kern_return_t is_io_registry_entry_get_child_iterator(
3252 io_object_t registry_entry,
3253 io_name_t plane,
3254 io_object_t *iterator )
3255 {
3256 CHECK( IORegistryEntry, registry_entry, entry );
3257
3258 *iterator = entry->getChildIterator(
3259 IORegistryEntry::getPlane( plane ));
3260
3261 return( kIOReturnSuccess );
3262 }
3263
3264 /* Routine io_registry_entry_get_parent_iterator */
3265 kern_return_t is_io_registry_entry_get_parent_iterator(
3266 io_object_t registry_entry,
3267 io_name_t plane,
3268 io_object_t *iterator)
3269 {
3270 CHECK( IORegistryEntry, registry_entry, entry );
3271
3272 *iterator = entry->getParentIterator(
3273 IORegistryEntry::getPlane( plane ));
3274
3275 return( kIOReturnSuccess );
3276 }
3277
3278 /* Routine io_service_get_busy_state */
3279 kern_return_t is_io_service_get_busy_state(
3280 io_object_t _service,
3281 uint32_t *busyState )
3282 {
3283 CHECK( IOService, _service, service );
3284
3285 *busyState = service->getBusyState();
3286
3287 return( kIOReturnSuccess );
3288 }
3289
3290 /* Routine io_service_get_state */
3291 kern_return_t is_io_service_get_state(
3292 io_object_t _service,
3293 uint64_t *state,
3294 uint32_t *busy_state,
3295 uint64_t *accumulated_busy_time )
3296 {
3297 CHECK( IOService, _service, service );
3298
3299 *state = service->getState();
3300 *busy_state = service->getBusyState();
3301 *accumulated_busy_time = service->getAccumulatedBusyTime();
3302
3303 return( kIOReturnSuccess );
3304 }
3305
3306 /* Routine io_service_wait_quiet */
3307 kern_return_t is_io_service_wait_quiet(
3308 io_object_t _service,
3309 mach_timespec_t wait_time )
3310 {
3311 uint64_t timeoutNS;
3312
3313 CHECK( IOService, _service, service );
3314
3315 timeoutNS = wait_time.tv_sec;
3316 timeoutNS *= kSecondScale;
3317 timeoutNS += wait_time.tv_nsec;
3318
3319 return( service->waitQuiet(timeoutNS) );
3320 }
3321
3322 /* Routine io_service_request_probe */
3323 kern_return_t is_io_service_request_probe(
3324 io_object_t _service,
3325 uint32_t options )
3326 {
3327 CHECK( IOService, _service, service );
3328
3329 return( service->requestProbe( options ));
3330 }
3331
3332 /* Routine io_service_get_authorization_id */
3333 kern_return_t is_io_service_get_authorization_id(
3334 io_object_t _service,
3335 uint64_t *authorization_id )
3336 {
3337 kern_return_t kr;
3338
3339 CHECK( IOService, _service, service );
3340
3341 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3342 kIOClientPrivilegeAdministrator );
3343 if( kIOReturnSuccess != kr)
3344 return( kr );
3345
3346 *authorization_id = service->getAuthorizationID();
3347
3348 return( kr );
3349 }
3350
3351 /* Routine io_service_set_authorization_id */
3352 kern_return_t is_io_service_set_authorization_id(
3353 io_object_t _service,
3354 uint64_t authorization_id )
3355 {
3356 CHECK( IOService, _service, service );
3357
3358 return( service->setAuthorizationID( authorization_id ) );
3359 }
3360
3361 /* Routine io_service_open_ndr */
3362 kern_return_t is_io_service_open_extended(
3363 io_object_t _service,
3364 task_t owningTask,
3365 uint32_t connect_type,
3366 NDR_record_t ndr,
3367 io_buf_ptr_t properties,
3368 mach_msg_type_number_t propertiesCnt,
3369 kern_return_t * result,
3370 io_object_t *connection )
3371 {
3372 IOUserClient * client = 0;
3373 kern_return_t err = KERN_SUCCESS;
3374 IOReturn res = kIOReturnSuccess;
3375 OSDictionary * propertiesDict = 0;
3376 bool crossEndian;
3377 bool disallowAccess;
3378
3379 CHECK( IOService, _service, service );
3380
3381 if (!owningTask) return (kIOReturnBadArgument);
3382 assert(owningTask == current_task());
3383 if (owningTask != current_task()) return (kIOReturnBadArgument);
3384
3385 do
3386 {
3387 if (properties)
3388 {
3389 OSObject * obj;
3390 vm_offset_t data;
3391 vm_map_offset_t map_data;
3392
3393 if( propertiesCnt > sizeof(io_struct_inband_t))
3394 return( kIOReturnMessageTooLarge);
3395
3396 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3397 res = err;
3398 data = CAST_DOWN(vm_offset_t, map_data);
3399 if (KERN_SUCCESS == err)
3400 {
3401 // must return success after vm_map_copyout() succeeds
3402 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3403 vm_deallocate( kernel_map, data, propertiesCnt );
3404 propertiesDict = OSDynamicCast(OSDictionary, obj);
3405 if (!propertiesDict)
3406 {
3407 res = kIOReturnBadArgument;
3408 if (obj)
3409 obj->release();
3410 }
3411 }
3412 if (kIOReturnSuccess != res)
3413 break;
3414 }
3415
3416 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3417 if (crossEndian)
3418 {
3419 if (!propertiesDict)
3420 propertiesDict = OSDictionary::withCapacity(4);
3421 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3422 if (data)
3423 {
3424 if (propertiesDict)
3425 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3426 data->release();
3427 }
3428 }
3429
3430 res = service->newUserClient( owningTask, (void *) owningTask,
3431 connect_type, propertiesDict, &client );
3432
3433 if (propertiesDict)
3434 propertiesDict->release();
3435
3436 if (res == kIOReturnSuccess)
3437 {
3438 assert( OSDynamicCast(IOUserClient, client) );
3439
3440 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3441 client->closed = false;
3442
3443 disallowAccess = (crossEndian
3444 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3445 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3446 if (disallowAccess) res = kIOReturnUnsupported;
3447 #if CONFIG_MACF
3448 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3449 res = kIOReturnNotPermitted;
3450 #endif
3451
3452 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3453
3454 if (kIOReturnSuccess != res)
3455 {
3456 IOStatisticsClientCall();
3457 client->clientClose();
3458 client->release();
3459 client = 0;
3460 break;
3461 }
3462 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3463 if (creatorName)
3464 {
3465 client->setProperty(kIOUserClientCreatorKey, creatorName);
3466 creatorName->release();
3467 }
3468 client->setTerminateDefer(service, false);
3469 }
3470 }
3471 while (false);
3472
3473 *connection = client;
3474 *result = res;
3475
3476 return (err);
3477 }
3478
3479 /* Routine io_service_close */
3480 kern_return_t is_io_service_close(
3481 io_object_t connection )
3482 {
3483 OSSet * mappings;
3484 if ((mappings = OSDynamicCast(OSSet, connection)))
3485 return( kIOReturnSuccess );
3486
3487 CHECK( IOUserClient, connection, client );
3488
3489 IOStatisticsClientCall();
3490
3491 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3492 {
3493 client->clientClose();
3494 }
3495 else
3496 {
3497 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3498 client->getRegistryEntryID(), client->getName());
3499 }
3500
3501 return( kIOReturnSuccess );
3502 }
3503
3504 /* Routine io_connect_get_service */
3505 kern_return_t is_io_connect_get_service(
3506 io_object_t connection,
3507 io_object_t *service )
3508 {
3509 IOService * theService;
3510
3511 CHECK( IOUserClient, connection, client );
3512
3513 theService = client->getService();
3514 if( theService)
3515 theService->retain();
3516
3517 *service = theService;
3518
3519 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3520 }
3521
3522 /* Routine io_connect_set_notification_port */
3523 kern_return_t is_io_connect_set_notification_port(
3524 io_object_t connection,
3525 uint32_t notification_type,
3526 mach_port_t port,
3527 uint32_t reference)
3528 {
3529 CHECK( IOUserClient, connection, client );
3530
3531 IOStatisticsClientCall();
3532 return( client->registerNotificationPort( port, notification_type,
3533 (io_user_reference_t) reference ));
3534 }
3535
3536 /* Routine io_connect_set_notification_port */
3537 kern_return_t is_io_connect_set_notification_port_64(
3538 io_object_t connection,
3539 uint32_t notification_type,
3540 mach_port_t port,
3541 io_user_reference_t reference)
3542 {
3543 CHECK( IOUserClient, connection, client );
3544
3545 IOStatisticsClientCall();
3546 return( client->registerNotificationPort( port, notification_type,
3547 reference ));
3548 }
3549
3550 /* Routine io_connect_map_memory_into_task */
3551 kern_return_t is_io_connect_map_memory_into_task
3552 (
3553 io_connect_t connection,
3554 uint32_t memory_type,
3555 task_t into_task,
3556 mach_vm_address_t *address,
3557 mach_vm_size_t *size,
3558 uint32_t flags
3559 )
3560 {
3561 IOReturn err;
3562 IOMemoryMap * map;
3563
3564 CHECK( IOUserClient, connection, client );
3565
3566 if (!into_task) return (kIOReturnBadArgument);
3567
3568 IOStatisticsClientCall();
3569 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3570
3571 if( map) {
3572 *address = map->getAddress();
3573 if( size)
3574 *size = map->getSize();
3575
3576 if( client->sharedInstance
3577 || (into_task != current_task())) {
3578 // push a name out to the task owning the map,
3579 // so we can clean up maps
3580 mach_port_name_t name __unused =
3581 IOMachPort::makeSendRightForTask(
3582 into_task, map, IKOT_IOKIT_OBJECT );
3583
3584 } else {
3585 // keep it with the user client
3586 IOLockLock( gIOObjectPortLock);
3587 if( 0 == client->mappings)
3588 client->mappings = OSSet::withCapacity(2);
3589 if( client->mappings)
3590 client->mappings->setObject( map);
3591 IOLockUnlock( gIOObjectPortLock);
3592 map->release();
3593 }
3594 err = kIOReturnSuccess;
3595
3596 } else
3597 err = kIOReturnBadArgument;
3598
3599 return( err );
3600 }
3601
3602 /* Routine is_io_connect_map_memory */
3603 kern_return_t is_io_connect_map_memory(
3604 io_object_t connect,
3605 uint32_t type,
3606 task_t task,
3607 uint32_t * mapAddr,
3608 uint32_t * mapSize,
3609 uint32_t flags )
3610 {
3611 IOReturn err;
3612 mach_vm_address_t address;
3613 mach_vm_size_t size;
3614
3615 address = SCALAR64(*mapAddr);
3616 size = SCALAR64(*mapSize);
3617
3618 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3619
3620 *mapAddr = SCALAR32(address);
3621 *mapSize = SCALAR32(size);
3622
3623 return (err);
3624 }
3625
3626 } /* extern "C" */
3627
3628 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3629 {
3630 OSIterator * iter;
3631 IOMemoryMap * map = 0;
3632
3633 IOLockLock(gIOObjectPortLock);
3634
3635 iter = OSCollectionIterator::withCollection(mappings);
3636 if(iter)
3637 {
3638 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3639 {
3640 if(mem == map->getMemoryDescriptor())
3641 {
3642 map->retain();
3643 mappings->removeObject(map);
3644 break;
3645 }
3646 }
3647 iter->release();
3648 }
3649
3650 IOLockUnlock(gIOObjectPortLock);
3651
3652 return (map);
3653 }
3654
3655 extern "C" {
3656
3657 /* Routine io_connect_unmap_memory_from_task */
3658 kern_return_t is_io_connect_unmap_memory_from_task
3659 (
3660 io_connect_t connection,
3661 uint32_t memory_type,
3662 task_t from_task,
3663 mach_vm_address_t address)
3664 {
3665 IOReturn err;
3666 IOOptionBits options = 0;
3667 IOMemoryDescriptor * memory;
3668 IOMemoryMap * map;
3669
3670 CHECK( IOUserClient, connection, client );
3671
3672 if (!from_task) return (kIOReturnBadArgument);
3673
3674 IOStatisticsClientCall();
3675 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3676
3677 if( memory && (kIOReturnSuccess == err)) {
3678
3679 options = (options & ~kIOMapUserOptionsMask)
3680 | kIOMapAnywhere | kIOMapReference;
3681
3682 map = memory->createMappingInTask( from_task, address, options );
3683 memory->release();
3684 if( map)
3685 {
3686 IOLockLock( gIOObjectPortLock);
3687 if( client->mappings)
3688 client->mappings->removeObject( map);
3689 IOLockUnlock( gIOObjectPortLock);
3690
3691 mach_port_name_t name = 0;
3692 if (from_task != current_task())
3693 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3694 if (name)
3695 {
3696 map->userClientUnmap();
3697 err = iokit_mod_send_right( from_task, name, -2 );
3698 err = kIOReturnSuccess;
3699 }
3700 else
3701 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3702 if (from_task == current_task())
3703 map->release();
3704 }
3705 else
3706 err = kIOReturnBadArgument;
3707 }
3708
3709 return( err );
3710 }
3711
3712 kern_return_t is_io_connect_unmap_memory(
3713 io_object_t connect,
3714 uint32_t type,
3715 task_t task,
3716 uint32_t mapAddr )
3717 {
3718 IOReturn err;
3719 mach_vm_address_t address;
3720
3721 address = SCALAR64(mapAddr);
3722
3723 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3724
3725 return (err);
3726 }
3727
3728
3729 /* Routine io_connect_add_client */
3730 kern_return_t is_io_connect_add_client(
3731 io_object_t connection,
3732 io_object_t connect_to)
3733 {
3734 CHECK( IOUserClient, connection, client );
3735 CHECK( IOUserClient, connect_to, to );
3736
3737 IOStatisticsClientCall();
3738 return( client->connectClient( to ) );
3739 }
3740
3741
3742 /* Routine io_connect_set_properties */
3743 kern_return_t is_io_connect_set_properties(
3744 io_object_t connection,
3745 io_buf_ptr_t properties,
3746 mach_msg_type_number_t propertiesCnt,
3747 kern_return_t * result)
3748 {
3749 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3750 }
3751
3752 /* Routine io_user_client_method */
3753 kern_return_t is_io_connect_method_var_output
3754 (
3755 io_connect_t connection,
3756 uint32_t selector,
3757 io_scalar_inband64_t scalar_input,
3758 mach_msg_type_number_t scalar_inputCnt,
3759 io_struct_inband_t inband_input,
3760 mach_msg_type_number_t inband_inputCnt,
3761 mach_vm_address_t ool_input,
3762 mach_vm_size_t ool_input_size,
3763 io_struct_inband_t inband_output,
3764 mach_msg_type_number_t *inband_outputCnt,
3765 io_scalar_inband64_t scalar_output,
3766 mach_msg_type_number_t *scalar_outputCnt,
3767 io_buf_ptr_t *var_output,
3768 mach_msg_type_number_t *var_outputCnt
3769 )
3770 {
3771 CHECK( IOUserClient, connection, client );
3772
3773 IOExternalMethodArguments args;
3774 IOReturn ret;
3775 IOMemoryDescriptor * inputMD = 0;
3776 OSObject * structureVariableOutputData = 0;
3777
3778 bzero(&args.__reserved[0], sizeof(args.__reserved));
3779 args.__reservedA = 0;
3780 args.version = kIOExternalMethodArgumentsCurrentVersion;
3781
3782 args.selector = selector;
3783
3784 args.asyncWakePort = MACH_PORT_NULL;
3785 args.asyncReference = 0;
3786 args.asyncReferenceCount = 0;
3787 args.structureVariableOutputData = &structureVariableOutputData;
3788
3789 args.scalarInput = scalar_input;
3790 args.scalarInputCount = scalar_inputCnt;
3791 args.structureInput = inband_input;
3792 args.structureInputSize = inband_inputCnt;
3793
3794 if (ool_input)
3795 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3796 kIODirectionOut, current_task());
3797
3798 args.structureInputDescriptor = inputMD;
3799
3800 args.scalarOutput = scalar_output;
3801 args.scalarOutputCount = *scalar_outputCnt;
3802 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3803 args.structureOutput = inband_output;
3804 args.structureOutputSize = *inband_outputCnt;
3805 args.structureOutputDescriptor = NULL;
3806 args.structureOutputDescriptorSize = 0;
3807
3808 IOStatisticsClientCall();
3809 ret = client->externalMethod( selector, &args );
3810
3811 *scalar_outputCnt = args.scalarOutputCount;
3812 *inband_outputCnt = args.structureOutputSize;
3813
3814 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3815 {
3816 OSSerialize * serialize;
3817 OSData * data;
3818 vm_size_t len;
3819
3820 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3821 {
3822 len = serialize->getLength();
3823 *var_outputCnt = len;
3824 ret = copyoutkdata(serialize->text(), len, var_output);
3825 }
3826 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3827 {
3828 len = data->getLength();
3829 *var_outputCnt = len;
3830 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3831 }
3832 else
3833 {
3834 ret = kIOReturnUnderrun;
3835 }
3836 }
3837
3838 if (inputMD)
3839 inputMD->release();
3840 if (structureVariableOutputData)
3841 structureVariableOutputData->release();
3842
3843 return (ret);
3844 }
3845
3846 /* Routine io_user_client_method */
3847 kern_return_t is_io_connect_method
3848 (
3849 io_connect_t connection,
3850 uint32_t selector,
3851 io_scalar_inband64_t scalar_input,
3852 mach_msg_type_number_t scalar_inputCnt,
3853 io_struct_inband_t inband_input,
3854 mach_msg_type_number_t inband_inputCnt,
3855 mach_vm_address_t ool_input,
3856 mach_vm_size_t ool_input_size,
3857 io_struct_inband_t inband_output,
3858 mach_msg_type_number_t *inband_outputCnt,
3859 io_scalar_inband64_t scalar_output,
3860 mach_msg_type_number_t *scalar_outputCnt,
3861 mach_vm_address_t ool_output,
3862 mach_vm_size_t *ool_output_size
3863 )
3864 {
3865 CHECK( IOUserClient, connection, client );
3866
3867 IOExternalMethodArguments args;
3868 IOReturn ret;
3869 IOMemoryDescriptor * inputMD = 0;
3870 IOMemoryDescriptor * outputMD = 0;
3871
3872 bzero(&args.__reserved[0], sizeof(args.__reserved));
3873 args.__reservedA = 0;
3874 args.version = kIOExternalMethodArgumentsCurrentVersion;
3875
3876 args.selector = selector;
3877
3878 args.asyncWakePort = MACH_PORT_NULL;
3879 args.asyncReference = 0;
3880 args.asyncReferenceCount = 0;
3881 args.structureVariableOutputData = 0;
3882
3883 args.scalarInput = scalar_input;
3884 args.scalarInputCount = scalar_inputCnt;
3885 args.structureInput = inband_input;
3886 args.structureInputSize = inband_inputCnt;
3887
3888 if (ool_input)
3889 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3890 kIODirectionOut, current_task());
3891
3892 args.structureInputDescriptor = inputMD;
3893
3894 args.scalarOutput = scalar_output;
3895 args.scalarOutputCount = *scalar_outputCnt;
3896 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3897 args.structureOutput = inband_output;
3898 args.structureOutputSize = *inband_outputCnt;
3899
3900 if (ool_output && ool_output_size)
3901 {
3902 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3903 kIODirectionIn, current_task());
3904 }
3905
3906 args.structureOutputDescriptor = outputMD;
3907 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3908
3909 IOStatisticsClientCall();
3910 ret = client->externalMethod( selector, &args );
3911
3912 *scalar_outputCnt = args.scalarOutputCount;
3913 *inband_outputCnt = args.structureOutputSize;
3914 *ool_output_size = args.structureOutputDescriptorSize;
3915
3916 if (inputMD)
3917 inputMD->release();
3918 if (outputMD)
3919 outputMD->release();
3920
3921 return (ret);
3922 }
3923
3924 /* Routine io_async_user_client_method */
3925 kern_return_t is_io_connect_async_method
3926 (
3927 io_connect_t connection,
3928 mach_port_t wake_port,
3929 io_async_ref64_t reference,
3930 mach_msg_type_number_t referenceCnt,
3931 uint32_t selector,
3932 io_scalar_inband64_t scalar_input,
3933 mach_msg_type_number_t scalar_inputCnt,
3934 io_struct_inband_t inband_input,
3935 mach_msg_type_number_t inband_inputCnt,
3936 mach_vm_address_t ool_input,
3937 mach_vm_size_t ool_input_size,
3938 io_struct_inband_t inband_output,
3939 mach_msg_type_number_t *inband_outputCnt,
3940 io_scalar_inband64_t scalar_output,
3941 mach_msg_type_number_t *scalar_outputCnt,
3942 mach_vm_address_t ool_output,
3943 mach_vm_size_t * ool_output_size
3944 )
3945 {
3946 CHECK( IOUserClient, connection, client );
3947
3948 IOExternalMethodArguments args;
3949 IOReturn ret;
3950 IOMemoryDescriptor * inputMD = 0;
3951 IOMemoryDescriptor * outputMD = 0;
3952
3953 bzero(&args.__reserved[0], sizeof(args.__reserved));
3954 args.__reservedA = 0;
3955 args.version = kIOExternalMethodArgumentsCurrentVersion;
3956
3957 reference[0] = (io_user_reference_t) wake_port;
3958 if (vm_map_is_64bit(get_task_map(current_task())))
3959 reference[0] |= kIOUCAsync64Flag;
3960
3961 args.selector = selector;
3962
3963 args.asyncWakePort = wake_port;
3964 args.asyncReference = reference;
3965 args.asyncReferenceCount = referenceCnt;
3966
3967 args.scalarInput = scalar_input;
3968 args.scalarInputCount = scalar_inputCnt;
3969 args.structureInput = inband_input;
3970 args.structureInputSize = inband_inputCnt;
3971
3972 if (ool_input)
3973 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3974 kIODirectionOut, current_task());
3975
3976 args.structureInputDescriptor = inputMD;
3977
3978 args.scalarOutput = scalar_output;
3979 args.scalarOutputCount = *scalar_outputCnt;
3980 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3981 args.structureOutput = inband_output;
3982 args.structureOutputSize = *inband_outputCnt;
3983
3984 if (ool_output)
3985 {
3986 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3987 kIODirectionIn, current_task());
3988 }
3989
3990 args.structureOutputDescriptor = outputMD;
3991 args.structureOutputDescriptorSize = *ool_output_size;
3992
3993 IOStatisticsClientCall();
3994 ret = client->externalMethod( selector, &args );
3995
3996 *inband_outputCnt = args.structureOutputSize;
3997 *ool_output_size = args.structureOutputDescriptorSize;
3998
3999 if (inputMD)
4000 inputMD->release();
4001 if (outputMD)
4002 outputMD->release();
4003
4004 return (ret);
4005 }
4006
4007 /* Routine io_connect_method_scalarI_scalarO */
4008 kern_return_t is_io_connect_method_scalarI_scalarO(
4009 io_object_t connect,
4010 uint32_t index,
4011 io_scalar_inband_t input,
4012 mach_msg_type_number_t inputCount,
4013 io_scalar_inband_t output,
4014 mach_msg_type_number_t * outputCount )
4015 {
4016 IOReturn err;
4017 uint32_t i;
4018 io_scalar_inband64_t _input;
4019 io_scalar_inband64_t _output;
4020
4021 mach_msg_type_number_t struct_outputCnt = 0;
4022 mach_vm_size_t ool_output_size = 0;
4023
4024 bzero(&_output[0], sizeof(_output));
4025 for (i = 0; i < inputCount; i++)
4026 _input[i] = SCALAR64(input[i]);
4027
4028 err = is_io_connect_method(connect, index,
4029 _input, inputCount,
4030 NULL, 0,
4031 0, 0,
4032 NULL, &struct_outputCnt,
4033 _output, outputCount,
4034 0, &ool_output_size);
4035
4036 for (i = 0; i < *outputCount; i++)
4037 output[i] = SCALAR32(_output[i]);
4038
4039 return (err);
4040 }
4041
4042 kern_return_t shim_io_connect_method_scalarI_scalarO(
4043 IOExternalMethod * method,
4044 IOService * object,
4045 const io_user_scalar_t * input,
4046 mach_msg_type_number_t inputCount,
4047 io_user_scalar_t * output,
4048 mach_msg_type_number_t * outputCount )
4049 {
4050 IOMethod func;
4051 io_scalar_inband_t _output;
4052 IOReturn err;
4053 err = kIOReturnBadArgument;
4054
4055 bzero(&_output[0], sizeof(_output));
4056 do {
4057
4058 if( inputCount != method->count0)
4059 {
4060 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4061 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4062 continue;
4063 }
4064 if( *outputCount != method->count1)
4065 {
4066 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4067 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4068 continue;
4069 }
4070
4071 func = method->func;
4072
4073 switch( inputCount) {
4074
4075 case 6:
4076 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4077 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4078 break;
4079 case 5:
4080 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4081 ARG32(input[3]), ARG32(input[4]),
4082 &_output[0] );
4083 break;
4084 case 4:
4085 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4086 ARG32(input[3]),
4087 &_output[0], &_output[1] );
4088 break;
4089 case 3:
4090 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4091 &_output[0], &_output[1], &_output[2] );
4092 break;
4093 case 2:
4094 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4095 &_output[0], &_output[1], &_output[2],
4096 &_output[3] );
4097 break;
4098 case 1:
4099 err = (object->*func)( ARG32(input[0]),
4100 &_output[0], &_output[1], &_output[2],
4101 &_output[3], &_output[4] );
4102 break;
4103 case 0:
4104 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4105 &_output[3], &_output[4], &_output[5] );
4106 break;
4107
4108 default:
4109 IOLog("%s: Bad method table\n", object->getName());
4110 }
4111 }
4112 while( false);
4113
4114 uint32_t i;
4115 for (i = 0; i < *outputCount; i++)
4116 output[i] = SCALAR32(_output[i]);
4117
4118 return( err);
4119 }
4120
4121 /* Routine io_async_method_scalarI_scalarO */
4122 kern_return_t is_io_async_method_scalarI_scalarO(
4123 io_object_t connect,
4124 mach_port_t wake_port,
4125 io_async_ref_t reference,
4126 mach_msg_type_number_t referenceCnt,
4127 uint32_t index,
4128 io_scalar_inband_t input,
4129 mach_msg_type_number_t inputCount,
4130 io_scalar_inband_t output,
4131 mach_msg_type_number_t * outputCount )
4132 {
4133 IOReturn err;
4134 uint32_t i;
4135 io_scalar_inband64_t _input;
4136 io_scalar_inband64_t _output;
4137 io_async_ref64_t _reference;
4138
4139 bzero(&_output[0], sizeof(_output));
4140 for (i = 0; i < referenceCnt; i++)
4141 _reference[i] = REF64(reference[i]);
4142
4143 mach_msg_type_number_t struct_outputCnt = 0;
4144 mach_vm_size_t ool_output_size = 0;
4145
4146 for (i = 0; i < inputCount; i++)
4147 _input[i] = SCALAR64(input[i]);
4148
4149 err = is_io_connect_async_method(connect,
4150 wake_port, _reference, referenceCnt,
4151 index,
4152 _input, inputCount,
4153 NULL, 0,
4154 0, 0,
4155 NULL, &struct_outputCnt,
4156 _output, outputCount,
4157 0, &ool_output_size);
4158
4159 for (i = 0; i < *outputCount; i++)
4160 output[i] = SCALAR32(_output[i]);
4161
4162 return (err);
4163 }
4164 /* Routine io_async_method_scalarI_structureO */
4165 kern_return_t is_io_async_method_scalarI_structureO(
4166 io_object_t connect,
4167 mach_port_t wake_port,
4168 io_async_ref_t reference,
4169 mach_msg_type_number_t referenceCnt,
4170 uint32_t index,
4171 io_scalar_inband_t input,
4172 mach_msg_type_number_t inputCount,
4173 io_struct_inband_t output,
4174 mach_msg_type_number_t * outputCount )
4175 {
4176 uint32_t i;
4177 io_scalar_inband64_t _input;
4178 io_async_ref64_t _reference;
4179
4180 for (i = 0; i < referenceCnt; i++)
4181 _reference[i] = REF64(reference[i]);
4182
4183 mach_msg_type_number_t scalar_outputCnt = 0;
4184 mach_vm_size_t ool_output_size = 0;
4185
4186 for (i = 0; i < inputCount; i++)
4187 _input[i] = SCALAR64(input[i]);
4188
4189 return (is_io_connect_async_method(connect,
4190 wake_port, _reference, referenceCnt,
4191 index,
4192 _input, inputCount,
4193 NULL, 0,
4194 0, 0,
4195 output, outputCount,
4196 NULL, &scalar_outputCnt,
4197 0, &ool_output_size));
4198 }
4199
4200 /* Routine io_async_method_scalarI_structureI */
4201 kern_return_t is_io_async_method_scalarI_structureI(
4202 io_connect_t connect,
4203 mach_port_t wake_port,
4204 io_async_ref_t reference,
4205 mach_msg_type_number_t referenceCnt,
4206 uint32_t index,
4207 io_scalar_inband_t input,
4208 mach_msg_type_number_t inputCount,
4209 io_struct_inband_t inputStruct,
4210 mach_msg_type_number_t inputStructCount )
4211 {
4212 uint32_t i;
4213 io_scalar_inband64_t _input;
4214 io_async_ref64_t _reference;
4215
4216 for (i = 0; i < referenceCnt; i++)
4217 _reference[i] = REF64(reference[i]);
4218
4219 mach_msg_type_number_t scalar_outputCnt = 0;
4220 mach_msg_type_number_t inband_outputCnt = 0;
4221 mach_vm_size_t ool_output_size = 0;
4222
4223 for (i = 0; i < inputCount; i++)
4224 _input[i] = SCALAR64(input[i]);
4225
4226 return (is_io_connect_async_method(connect,
4227 wake_port, _reference, referenceCnt,
4228 index,
4229 _input, inputCount,
4230 inputStruct, inputStructCount,
4231 0, 0,
4232 NULL, &inband_outputCnt,
4233 NULL, &scalar_outputCnt,
4234 0, &ool_output_size));
4235 }
4236
4237 /* Routine io_async_method_structureI_structureO */
4238 kern_return_t is_io_async_method_structureI_structureO(
4239 io_object_t connect,
4240 mach_port_t wake_port,
4241 io_async_ref_t reference,
4242 mach_msg_type_number_t referenceCnt,
4243 uint32_t index,
4244 io_struct_inband_t input,
4245 mach_msg_type_number_t inputCount,
4246 io_struct_inband_t output,
4247 mach_msg_type_number_t * outputCount )
4248 {
4249 uint32_t i;
4250 mach_msg_type_number_t scalar_outputCnt = 0;
4251 mach_vm_size_t ool_output_size = 0;
4252 io_async_ref64_t _reference;
4253
4254 for (i = 0; i < referenceCnt; i++)
4255 _reference[i] = REF64(reference[i]);
4256
4257 return (is_io_connect_async_method(connect,
4258 wake_port, _reference, referenceCnt,
4259 index,
4260 NULL, 0,
4261 input, inputCount,
4262 0, 0,
4263 output, outputCount,
4264 NULL, &scalar_outputCnt,
4265 0, &ool_output_size));
4266 }
4267
4268
4269 kern_return_t shim_io_async_method_scalarI_scalarO(
4270 IOExternalAsyncMethod * method,
4271 IOService * object,
4272 mach_port_t asyncWakePort,
4273 io_user_reference_t * asyncReference,
4274 uint32_t asyncReferenceCount,
4275 const io_user_scalar_t * input,
4276 mach_msg_type_number_t inputCount,
4277 io_user_scalar_t * output,
4278 mach_msg_type_number_t * outputCount )
4279 {
4280 IOAsyncMethod func;
4281 uint32_t i;
4282 io_scalar_inband_t _output;
4283 IOReturn err;
4284 io_async_ref_t reference;
4285
4286 bzero(&_output[0], sizeof(_output));
4287 for (i = 0; i < asyncReferenceCount; i++)
4288 reference[i] = REF32(asyncReference[i]);
4289
4290 err = kIOReturnBadArgument;
4291
4292 do {
4293
4294 if( inputCount != method->count0)
4295 {
4296 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4297 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4298 continue;
4299 }
4300 if( *outputCount != method->count1)
4301 {
4302 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4303 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4304 continue;
4305 }
4306
4307 func = method->func;
4308
4309 switch( inputCount) {
4310
4311 case 6:
4312 err = (object->*func)( reference,
4313 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4314 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4315 break;
4316 case 5:
4317 err = (object->*func)( reference,
4318 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4319 ARG32(input[3]), ARG32(input[4]),
4320 &_output[0] );
4321 break;
4322 case 4:
4323 err = (object->*func)( reference,
4324 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4325 ARG32(input[3]),
4326 &_output[0], &_output[1] );
4327 break;
4328 case 3:
4329 err = (object->*func)( reference,
4330 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4331 &_output[0], &_output[1], &_output[2] );
4332 break;
4333 case 2:
4334 err = (object->*func)( reference,
4335 ARG32(input[0]), ARG32(input[1]),
4336 &_output[0], &_output[1], &_output[2],
4337 &_output[3] );
4338 break;
4339 case 1:
4340 err = (object->*func)( reference,
4341 ARG32(input[0]),
4342 &_output[0], &_output[1], &_output[2],
4343 &_output[3], &_output[4] );
4344 break;
4345 case 0:
4346 err = (object->*func)( reference,
4347 &_output[0], &_output[1], &_output[2],
4348 &_output[3], &_output[4], &_output[5] );
4349 break;
4350
4351 default:
4352 IOLog("%s: Bad method table\n", object->getName());
4353 }
4354 }
4355 while( false);
4356
4357 for (i = 0; i < *outputCount; i++)
4358 output[i] = SCALAR32(_output[i]);
4359
4360 return( err);
4361 }
4362
4363
4364 /* Routine io_connect_method_scalarI_structureO */
4365 kern_return_t is_io_connect_method_scalarI_structureO(
4366 io_object_t connect,
4367 uint32_t index,
4368 io_scalar_inband_t input,
4369 mach_msg_type_number_t inputCount,
4370 io_struct_inband_t output,
4371 mach_msg_type_number_t * outputCount )
4372 {
4373 uint32_t i;
4374 io_scalar_inband64_t _input;
4375
4376 mach_msg_type_number_t scalar_outputCnt = 0;
4377 mach_vm_size_t ool_output_size = 0;
4378
4379 for (i = 0; i < inputCount; i++)
4380 _input[i] = SCALAR64(input[i]);
4381
4382 return (is_io_connect_method(connect, index,
4383 _input, inputCount,
4384 NULL, 0,
4385 0, 0,
4386 output, outputCount,
4387 NULL, &scalar_outputCnt,
4388 0, &ool_output_size));
4389 }
4390
4391 kern_return_t shim_io_connect_method_scalarI_structureO(
4392
4393 IOExternalMethod * method,
4394 IOService * object,
4395 const io_user_scalar_t * input,
4396 mach_msg_type_number_t inputCount,
4397 io_struct_inband_t output,
4398 IOByteCount * outputCount )
4399 {
4400 IOMethod func;
4401 IOReturn err;
4402
4403 err = kIOReturnBadArgument;
4404
4405 do {
4406 if( inputCount != method->count0)
4407 {
4408 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4409 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4410 continue;
4411 }
4412 if( (kIOUCVariableStructureSize != method->count1)
4413 && (*outputCount != method->count1))
4414 {
4415 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4416 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4417 continue;
4418 }
4419
4420 func = method->func;
4421
4422 switch( inputCount) {
4423
4424 case 5:
4425 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4426 ARG32(input[3]), ARG32(input[4]),
4427 output );
4428 break;
4429 case 4:
4430 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4431 ARG32(input[3]),
4432 output, (void *)outputCount );
4433 break;
4434 case 3:
4435 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4436 output, (void *)outputCount, 0 );
4437 break;
4438 case 2:
4439 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4440 output, (void *)outputCount, 0, 0 );
4441 break;
4442 case 1:
4443 err = (object->*func)( ARG32(input[0]),
4444 output, (void *)outputCount, 0, 0, 0 );
4445 break;
4446 case 0:
4447 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4448 break;
4449
4450 default:
4451 IOLog("%s: Bad method table\n", object->getName());
4452 }
4453 }
4454 while( false);
4455
4456 return( err);
4457 }
4458
4459
4460 kern_return_t shim_io_async_method_scalarI_structureO(
4461 IOExternalAsyncMethod * method,
4462 IOService * object,
4463 mach_port_t asyncWakePort,
4464 io_user_reference_t * asyncReference,
4465 uint32_t asyncReferenceCount,
4466 const io_user_scalar_t * input,
4467 mach_msg_type_number_t inputCount,
4468 io_struct_inband_t output,
4469 mach_msg_type_number_t * outputCount )
4470 {
4471 IOAsyncMethod func;
4472 uint32_t i;
4473 IOReturn err;
4474 io_async_ref_t reference;
4475
4476 for (i = 0; i < asyncReferenceCount; i++)
4477 reference[i] = REF32(asyncReference[i]);
4478
4479 err = kIOReturnBadArgument;
4480 do {
4481 if( inputCount != method->count0)
4482 {
4483 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4484 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4485 continue;
4486 }
4487 if( (kIOUCVariableStructureSize != method->count1)
4488 && (*outputCount != method->count1))
4489 {
4490 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4491 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4492 continue;
4493 }
4494
4495 func = method->func;
4496
4497 switch( inputCount) {
4498
4499 case 5:
4500 err = (object->*func)( reference,
4501 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4502 ARG32(input[3]), ARG32(input[4]),
4503 output );
4504 break;
4505 case 4:
4506 err = (object->*func)( reference,
4507 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4508 ARG32(input[3]),
4509 output, (void *)outputCount );
4510 break;
4511 case 3:
4512 err = (object->*func)( reference,
4513 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4514 output, (void *)outputCount, 0 );
4515 break;
4516 case 2:
4517 err = (object->*func)( reference,
4518 ARG32(input[0]), ARG32(input[1]),
4519 output, (void *)outputCount, 0, 0 );
4520 break;
4521 case 1:
4522 err = (object->*func)( reference,
4523 ARG32(input[0]),
4524 output, (void *)outputCount, 0, 0, 0 );
4525 break;
4526 case 0:
4527 err = (object->*func)( reference,
4528 output, (void *)outputCount, 0, 0, 0, 0 );
4529 break;
4530
4531 default:
4532 IOLog("%s: Bad method table\n", object->getName());
4533 }
4534 }
4535 while( false);
4536
4537 return( err);
4538 }
4539
4540 /* Routine io_connect_method_scalarI_structureI */
4541 kern_return_t is_io_connect_method_scalarI_structureI(
4542 io_connect_t connect,
4543 uint32_t index,
4544 io_scalar_inband_t input,
4545 mach_msg_type_number_t inputCount,
4546 io_struct_inband_t inputStruct,
4547 mach_msg_type_number_t inputStructCount )
4548 {
4549 uint32_t i;
4550 io_scalar_inband64_t _input;
4551
4552 mach_msg_type_number_t scalar_outputCnt = 0;
4553 mach_msg_type_number_t inband_outputCnt = 0;
4554 mach_vm_size_t ool_output_size = 0;
4555
4556 for (i = 0; i < inputCount; i++)
4557 _input[i] = SCALAR64(input[i]);
4558
4559 return (is_io_connect_method(connect, index,
4560 _input, inputCount,
4561 inputStruct, inputStructCount,
4562 0, 0,
4563 NULL, &inband_outputCnt,
4564 NULL, &scalar_outputCnt,
4565 0, &ool_output_size));
4566 }
4567
4568 kern_return_t shim_io_connect_method_scalarI_structureI(
4569 IOExternalMethod * method,
4570 IOService * object,
4571 const io_user_scalar_t * input,
4572 mach_msg_type_number_t inputCount,
4573 io_struct_inband_t inputStruct,
4574 mach_msg_type_number_t inputStructCount )
4575 {
4576 IOMethod func;
4577 IOReturn err = kIOReturnBadArgument;
4578
4579 do
4580 {
4581 if (inputCount != method->count0)
4582 {
4583 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4584 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4585 continue;
4586 }
4587 if( (kIOUCVariableStructureSize != method->count1)
4588 && (inputStructCount != method->count1))
4589 {
4590 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4591 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4592 continue;
4593 }
4594
4595 func = method->func;
4596
4597 switch( inputCount) {
4598
4599 case 5:
4600 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4601 ARG32(input[3]), ARG32(input[4]),
4602 inputStruct );
4603 break;
4604 case 4:
4605 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4606 ARG32(input[3]),
4607 inputStruct, (void *)(uintptr_t)inputStructCount );
4608 break;
4609 case 3:
4610 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4611 inputStruct, (void *)(uintptr_t)inputStructCount,
4612 0 );
4613 break;
4614 case 2:
4615 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4616 inputStruct, (void *)(uintptr_t)inputStructCount,
4617 0, 0 );
4618 break;
4619 case 1:
4620 err = (object->*func)( ARG32(input[0]),
4621 inputStruct, (void *)(uintptr_t)inputStructCount,
4622 0, 0, 0 );
4623 break;
4624 case 0:
4625 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4626 0, 0, 0, 0 );
4627 break;
4628
4629 default:
4630 IOLog("%s: Bad method table\n", object->getName());
4631 }
4632 }
4633 while (false);
4634
4635 return( err);
4636 }
4637
4638 kern_return_t shim_io_async_method_scalarI_structureI(
4639 IOExternalAsyncMethod * method,
4640 IOService * object,
4641 mach_port_t asyncWakePort,
4642 io_user_reference_t * asyncReference,
4643 uint32_t asyncReferenceCount,
4644 const io_user_scalar_t * input,
4645 mach_msg_type_number_t inputCount,
4646 io_struct_inband_t inputStruct,
4647 mach_msg_type_number_t inputStructCount )
4648 {
4649 IOAsyncMethod func;
4650 uint32_t i;
4651 IOReturn err = kIOReturnBadArgument;
4652 io_async_ref_t reference;
4653
4654 for (i = 0; i < asyncReferenceCount; i++)
4655 reference[i] = REF32(asyncReference[i]);
4656
4657 do
4658 {
4659 if (inputCount != method->count0)
4660 {
4661 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4662 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4663 continue;
4664 }
4665 if( (kIOUCVariableStructureSize != method->count1)
4666 && (inputStructCount != method->count1))
4667 {
4668 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4669 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4670 continue;
4671 }
4672
4673 func = method->func;
4674
4675 switch( inputCount) {
4676
4677 case 5:
4678 err = (object->*func)( reference,
4679 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4680 ARG32(input[3]), ARG32(input[4]),
4681 inputStruct );
4682 break;
4683 case 4:
4684 err = (object->*func)( reference,
4685 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4686 ARG32(input[3]),
4687 inputStruct, (void *)(uintptr_t)inputStructCount );
4688 break;
4689 case 3:
4690 err = (object->*func)( reference,
4691 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4692 inputStruct, (void *)(uintptr_t)inputStructCount,
4693 0 );
4694 break;
4695 case 2:
4696 err = (object->*func)( reference,
4697 ARG32(input[0]), ARG32(input[1]),
4698 inputStruct, (void *)(uintptr_t)inputStructCount,
4699 0, 0 );
4700 break;
4701 case 1:
4702 err = (object->*func)( reference,
4703 ARG32(input[0]),
4704 inputStruct, (void *)(uintptr_t)inputStructCount,
4705 0, 0, 0 );
4706 break;
4707 case 0:
4708 err = (object->*func)( reference,
4709 inputStruct, (void *)(uintptr_t)inputStructCount,
4710 0, 0, 0, 0 );
4711 break;
4712
4713 default:
4714 IOLog("%s: Bad method table\n", object->getName());
4715 }
4716 }
4717 while (false);
4718
4719 return( err);
4720 }
4721
4722 /* Routine io_connect_method_structureI_structureO */
4723 kern_return_t is_io_connect_method_structureI_structureO(
4724 io_object_t connect,
4725 uint32_t index,
4726 io_struct_inband_t input,
4727 mach_msg_type_number_t inputCount,
4728 io_struct_inband_t output,
4729 mach_msg_type_number_t * outputCount )
4730 {
4731 mach_msg_type_number_t scalar_outputCnt = 0;
4732 mach_vm_size_t ool_output_size = 0;
4733
4734 return (is_io_connect_method(connect, index,
4735 NULL, 0,
4736 input, inputCount,
4737 0, 0,
4738 output, outputCount,
4739 NULL, &scalar_outputCnt,
4740 0, &ool_output_size));
4741 }
4742
4743 kern_return_t shim_io_connect_method_structureI_structureO(
4744 IOExternalMethod * method,
4745 IOService * object,
4746 io_struct_inband_t input,
4747 mach_msg_type_number_t inputCount,
4748 io_struct_inband_t output,
4749 IOByteCount * outputCount )
4750 {
4751 IOMethod func;
4752 IOReturn err = kIOReturnBadArgument;
4753
4754 do
4755 {
4756 if( (kIOUCVariableStructureSize != method->count0)
4757 && (inputCount != method->count0))
4758 {
4759 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4760 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4761 continue;
4762 }
4763 if( (kIOUCVariableStructureSize != method->count1)
4764 && (*outputCount != method->count1))
4765 {
4766 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4767 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4768 continue;
4769 }
4770
4771 func = method->func;
4772
4773 if( method->count1) {
4774 if( method->count0) {
4775 err = (object->*func)( input, output,
4776 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4777 } else {
4778 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4779 }
4780 } else {
4781 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4782 }
4783 }
4784 while( false);
4785
4786
4787 return( err);
4788 }
4789
4790 kern_return_t shim_io_async_method_structureI_structureO(
4791 IOExternalAsyncMethod * method,
4792 IOService * object,
4793 mach_port_t asyncWakePort,
4794 io_user_reference_t * asyncReference,
4795 uint32_t asyncReferenceCount,
4796 io_struct_inband_t input,
4797 mach_msg_type_number_t inputCount,
4798 io_struct_inband_t output,
4799 mach_msg_type_number_t * outputCount )
4800 {
4801 IOAsyncMethod func;
4802 uint32_t i;
4803 IOReturn err;
4804 io_async_ref_t reference;
4805
4806 for (i = 0; i < asyncReferenceCount; i++)
4807 reference[i] = REF32(asyncReference[i]);
4808
4809 err = kIOReturnBadArgument;
4810 do
4811 {
4812 if( (kIOUCVariableStructureSize != method->count0)
4813 && (inputCount != method->count0))
4814 {
4815 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4816 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4817 continue;
4818 }
4819 if( (kIOUCVariableStructureSize != method->count1)
4820 && (*outputCount != method->count1))
4821 {
4822 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4823 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4824 continue;
4825 }
4826
4827 func = method->func;
4828
4829 if( method->count1) {
4830 if( method->count0) {
4831 err = (object->*func)( reference,
4832 input, output,
4833 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4834 } else {
4835 err = (object->*func)( reference,
4836 output, outputCount, 0, 0, 0, 0 );
4837 }
4838 } else {
4839 err = (object->*func)( reference,
4840 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4841 }
4842 }
4843 while( false);
4844
4845 return( err);
4846 }
4847
4848 #if !NO_KEXTD
4849 bool gIOKextdClearedBusy = false;
4850 #endif
4851
4852 /* Routine io_catalog_send_data */
4853 kern_return_t is_io_catalog_send_data(
4854 mach_port_t master_port,
4855 uint32_t flag,
4856 io_buf_ptr_t inData,
4857 mach_msg_type_number_t inDataCount,
4858 kern_return_t * result)
4859 {
4860 OSObject * obj = 0;
4861 vm_offset_t data;
4862 kern_return_t kr = kIOReturnError;
4863
4864 //printf("io_catalog_send_data called. flag: %d\n", flag);
4865
4866 if( master_port != master_device_port)
4867 return kIOReturnNotPrivileged;
4868
4869 if( (flag != kIOCatalogRemoveKernelLinker &&
4870 flag != kIOCatalogKextdActive &&
4871 flag != kIOCatalogKextdFinishedLaunching) &&
4872 ( !inData || !inDataCount) )
4873 {
4874 return kIOReturnBadArgument;
4875 }
4876
4877 if (inData) {
4878 vm_map_offset_t map_data;
4879
4880 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4881 return( kIOReturnMessageTooLarge);
4882
4883 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4884 data = CAST_DOWN(vm_offset_t, map_data);
4885
4886 if( kr != KERN_SUCCESS)
4887 return kr;
4888
4889 // must return success after vm_map_copyout() succeeds
4890
4891 if( inDataCount ) {
4892 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4893 vm_deallocate( kernel_map, data, inDataCount );
4894 if( !obj) {
4895 *result = kIOReturnNoMemory;
4896 return( KERN_SUCCESS);
4897 }
4898 }
4899 }
4900
4901 switch ( flag ) {
4902 case kIOCatalogResetDrivers:
4903 case kIOCatalogResetDriversNoMatch: {
4904 OSArray * array;
4905
4906 array = OSDynamicCast(OSArray, obj);
4907 if (array) {
4908 if ( !gIOCatalogue->resetAndAddDrivers(array,
4909 flag == kIOCatalogResetDrivers) ) {
4910
4911 kr = kIOReturnError;
4912 }
4913 } else {
4914 kr = kIOReturnBadArgument;
4915 }
4916 }
4917 break;
4918
4919 case kIOCatalogAddDrivers:
4920 case kIOCatalogAddDriversNoMatch: {
4921 OSArray * array;
4922
4923 array = OSDynamicCast(OSArray, obj);
4924 if ( array ) {
4925 if ( !gIOCatalogue->addDrivers( array ,
4926 flag == kIOCatalogAddDrivers) ) {
4927 kr = kIOReturnError;
4928 }
4929 }
4930 else {
4931 kr = kIOReturnBadArgument;
4932 }
4933 }
4934 break;
4935
4936 case kIOCatalogRemoveDrivers:
4937 case kIOCatalogRemoveDriversNoMatch: {
4938 OSDictionary * dict;
4939
4940 dict = OSDynamicCast(OSDictionary, obj);
4941 if ( dict ) {
4942 if ( !gIOCatalogue->removeDrivers( dict,
4943 flag == kIOCatalogRemoveDrivers ) ) {
4944 kr = kIOReturnError;
4945 }
4946 }
4947 else {
4948 kr = kIOReturnBadArgument;
4949 }
4950 }
4951 break;
4952
4953 case kIOCatalogStartMatching: {
4954 OSDictionary * dict;
4955
4956 dict = OSDynamicCast(OSDictionary, obj);
4957 if ( dict ) {
4958 if ( !gIOCatalogue->startMatching( dict ) ) {
4959 kr = kIOReturnError;
4960 }
4961 }
4962 else {
4963 kr = kIOReturnBadArgument;
4964 }
4965 }
4966 break;
4967
4968 case kIOCatalogRemoveKernelLinker:
4969 kr = KERN_NOT_SUPPORTED;
4970 break;
4971
4972 case kIOCatalogKextdActive:
4973 #if !NO_KEXTD
4974 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4975 OSKext::setKextdActive();
4976
4977 /* Dump all nonloaded startup extensions; kextd will now send them
4978 * down on request.
4979 */
4980 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4981 #endif
4982 kr = kIOReturnSuccess;
4983 break;
4984
4985 case kIOCatalogKextdFinishedLaunching: {
4986 #if !NO_KEXTD
4987 if (!gIOKextdClearedBusy) {
4988 IOService * serviceRoot = IOService::getServiceRoot();
4989 if (serviceRoot) {
4990 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4991 serviceRoot->adjustBusy(-1);
4992 gIOKextdClearedBusy = true;
4993 }
4994 }
4995 #endif
4996 kr = kIOReturnSuccess;
4997 }
4998 break;
4999
5000 default:
5001 kr = kIOReturnBadArgument;
5002 break;
5003 }
5004
5005 if (obj) obj->release();
5006
5007 *result = kr;
5008 return( KERN_SUCCESS);
5009 }
5010
5011 /* Routine io_catalog_terminate */
5012 kern_return_t is_io_catalog_terminate(
5013 mach_port_t master_port,
5014 uint32_t flag,
5015 io_name_t name )
5016 {
5017 kern_return_t kr;
5018
5019 if( master_port != master_device_port )
5020 return kIOReturnNotPrivileged;
5021
5022 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5023 kIOClientPrivilegeAdministrator );
5024 if( kIOReturnSuccess != kr)
5025 return( kr );
5026
5027 switch ( flag ) {
5028 #if !defined(SECURE_KERNEL)
5029 case kIOCatalogServiceTerminate:
5030 OSIterator * iter;
5031 IOService * service;
5032
5033 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5034 kIORegistryIterateRecursively);
5035 if ( !iter )
5036 return kIOReturnNoMemory;
5037
5038 do {
5039 iter->reset();
5040 while( (service = (IOService *)iter->getNextObject()) ) {
5041 if( service->metaCast(name)) {
5042 if ( !service->terminate( kIOServiceRequired
5043 | kIOServiceSynchronous) ) {
5044 kr = kIOReturnUnsupported;
5045 break;
5046 }
5047 }
5048 }
5049 } while( !service && !iter->isValid());
5050 iter->release();
5051 break;
5052
5053 case kIOCatalogModuleUnload:
5054 case kIOCatalogModuleTerminate:
5055 kr = gIOCatalogue->terminateDriversForModule(name,
5056 flag == kIOCatalogModuleUnload);
5057 break;
5058 #endif
5059
5060 default:
5061 kr = kIOReturnBadArgument;
5062 break;
5063 }
5064
5065 return( kr );
5066 }
5067
5068 /* Routine io_catalog_get_data */
5069 kern_return_t is_io_catalog_get_data(
5070 mach_port_t master_port,
5071 uint32_t flag,
5072 io_buf_ptr_t *outData,
5073 mach_msg_type_number_t *outDataCount)
5074 {
5075 kern_return_t kr = kIOReturnSuccess;
5076 OSSerialize * s;
5077
5078 if( master_port != master_device_port)
5079 return kIOReturnNotPrivileged;
5080
5081 //printf("io_catalog_get_data called. flag: %d\n", flag);
5082
5083 s = OSSerialize::withCapacity(4096);
5084 if ( !s )
5085 return kIOReturnNoMemory;
5086
5087 kr = gIOCatalogue->serializeData(flag, s);
5088
5089 if ( kr == kIOReturnSuccess ) {
5090 vm_offset_t data;
5091 vm_map_copy_t copy;
5092 vm_size_t size;
5093
5094 size = s->getLength();
5095 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
5096 if ( kr == kIOReturnSuccess ) {
5097 bcopy(s->text(), (void *)data, size);
5098 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5099 (vm_map_size_t)size, true, &copy);
5100 *outData = (char *)copy;
5101 *outDataCount = size;
5102 }
5103 }
5104
5105 s->release();
5106
5107 return kr;
5108 }
5109
5110 /* Routine io_catalog_get_gen_count */
5111 kern_return_t is_io_catalog_get_gen_count(
5112 mach_port_t master_port,
5113 uint32_t *genCount)
5114 {
5115 if( master_port != master_device_port)
5116 return kIOReturnNotPrivileged;
5117
5118 //printf("io_catalog_get_gen_count called.\n");
5119
5120 if ( !genCount )
5121 return kIOReturnBadArgument;
5122
5123 *genCount = gIOCatalogue->getGenerationCount();
5124
5125 return kIOReturnSuccess;
5126 }
5127
5128 /* Routine io_catalog_module_loaded.
5129 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5130 */
5131 kern_return_t is_io_catalog_module_loaded(
5132 mach_port_t master_port,
5133 io_name_t name)
5134 {
5135 if( master_port != master_device_port)
5136 return kIOReturnNotPrivileged;
5137
5138 //printf("io_catalog_module_loaded called. name %s\n", name);
5139
5140 if ( !name )
5141 return kIOReturnBadArgument;
5142
5143 gIOCatalogue->moduleHasLoaded(name);
5144
5145 return kIOReturnSuccess;
5146 }
5147
5148 kern_return_t is_io_catalog_reset(
5149 mach_port_t master_port,
5150 uint32_t flag)
5151 {
5152 if( master_port != master_device_port)
5153 return kIOReturnNotPrivileged;
5154
5155 switch ( flag ) {
5156 case kIOCatalogResetDefault:
5157 gIOCatalogue->reset();
5158 break;
5159
5160 default:
5161 return kIOReturnBadArgument;
5162 }
5163
5164 return kIOReturnSuccess;
5165 }
5166
5167 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5168 {
5169 kern_return_t result = kIOReturnBadArgument;
5170 IOUserClient *userClient;
5171
5172 if ((userClient = OSDynamicCast(IOUserClient,
5173 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5174 IOExternalTrap *trap;
5175 IOService *target = NULL;
5176
5177 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5178
5179 if (trap && target) {
5180 IOTrap func;
5181
5182 func = trap->func;
5183
5184 if (func) {
5185 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5186 }
5187 }
5188
5189 iokit_remove_connect_reference(userClient);
5190 }
5191
5192 return result;
5193 }
5194
5195 } /* extern "C" */
5196
5197 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5198 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5199 {
5200 IOReturn err;
5201 IOService * object;
5202 IOByteCount structureOutputSize;
5203
5204 if (dispatch)
5205 {
5206 uint32_t count;
5207 count = dispatch->checkScalarInputCount;
5208 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5209 {
5210 return (kIOReturnBadArgument);
5211 }
5212
5213 count = dispatch->checkStructureInputSize;
5214 if ((kIOUCVariableStructureSize != count)
5215 && (count != ((args->structureInputDescriptor)
5216 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5217 {
5218 return (kIOReturnBadArgument);
5219 }
5220
5221 count = dispatch->checkScalarOutputCount;
5222 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5223 {
5224 return (kIOReturnBadArgument);
5225 }
5226
5227 count = dispatch->checkStructureOutputSize;
5228 if ((kIOUCVariableStructureSize != count)
5229 && (count != ((args->structureOutputDescriptor)
5230 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5231 {
5232 return (kIOReturnBadArgument);
5233 }
5234
5235 if (dispatch->function)
5236 err = (*dispatch->function)(target, reference, args);
5237 else
5238 err = kIOReturnNoCompletion; /* implementator can dispatch */
5239
5240 return (err);
5241 }
5242
5243
5244 // pre-Leopard API's don't do ool structs
5245 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5246 {
5247 err = kIOReturnIPCError;
5248 return (err);
5249 }
5250
5251 structureOutputSize = args->structureOutputSize;
5252
5253 if (args->asyncWakePort)
5254 {
5255 IOExternalAsyncMethod * method;
5256 object = 0;
5257 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5258 return (kIOReturnUnsupported);
5259
5260 if (kIOUCForegroundOnly & method->flags)
5261 {
5262 if (task_is_gpu_denied(current_task()))
5263 return (kIOReturnNotPermitted);
5264 }
5265
5266 switch (method->flags & kIOUCTypeMask)
5267 {
5268 case kIOUCScalarIStructI:
5269 err = shim_io_async_method_scalarI_structureI( method, object,
5270 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5271 args->scalarInput, args->scalarInputCount,
5272 (char *)args->structureInput, args->structureInputSize );
5273 break;
5274
5275 case kIOUCScalarIScalarO:
5276 err = shim_io_async_method_scalarI_scalarO( method, object,
5277 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5278 args->scalarInput, args->scalarInputCount,
5279 args->scalarOutput, &args->scalarOutputCount );
5280 break;
5281
5282 case kIOUCScalarIStructO:
5283 err = shim_io_async_method_scalarI_structureO( method, object,
5284 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5285 args->scalarInput, args->scalarInputCount,
5286 (char *) args->structureOutput, &args->structureOutputSize );
5287 break;
5288
5289
5290 case kIOUCStructIStructO:
5291 err = shim_io_async_method_structureI_structureO( method, object,
5292 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5293 (char *)args->structureInput, args->structureInputSize,
5294 (char *) args->structureOutput, &args->structureOutputSize );
5295 break;
5296
5297 default:
5298 err = kIOReturnBadArgument;
5299 break;
5300 }
5301 }
5302 else
5303 {
5304 IOExternalMethod * method;
5305 object = 0;
5306 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5307 return (kIOReturnUnsupported);
5308
5309 if (kIOUCForegroundOnly & method->flags)
5310 {
5311 if (task_is_gpu_denied(current_task()))
5312 return (kIOReturnNotPermitted);
5313 }
5314
5315 switch (method->flags & kIOUCTypeMask)
5316 {
5317 case kIOUCScalarIStructI:
5318 err = shim_io_connect_method_scalarI_structureI( method, object,
5319 args->scalarInput, args->scalarInputCount,
5320 (char *) args->structureInput, args->structureInputSize );
5321 break;
5322
5323 case kIOUCScalarIScalarO:
5324 err = shim_io_connect_method_scalarI_scalarO( method, object,
5325 args->scalarInput, args->scalarInputCount,
5326 args->scalarOutput, &args->scalarOutputCount );
5327 break;
5328
5329 case kIOUCScalarIStructO:
5330 err = shim_io_connect_method_scalarI_structureO( method, object,
5331 args->scalarInput, args->scalarInputCount,
5332 (char *) args->structureOutput, &structureOutputSize );
5333 break;
5334
5335
5336 case kIOUCStructIStructO:
5337 err = shim_io_connect_method_structureI_structureO( method, object,
5338 (char *) args->structureInput, args->structureInputSize,
5339 (char *) args->structureOutput, &structureOutputSize );
5340 break;
5341
5342 default:
5343 err = kIOReturnBadArgument;
5344 break;
5345 }
5346 }
5347
5348 args->structureOutputSize = structureOutputSize;
5349
5350 return (err);
5351 }
5352
5353 #if __LP64__
5354 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5355 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5356 #else
5357 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5358 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5359 #endif
5360 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5361 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5362 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5363 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5364 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5365 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5366 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5367 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5368 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5369 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5370 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5371 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5372 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5373 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5374