]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
6cd4737c5602f794821c6ee881e6ad469b7456c0
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOStatisticsPrivate.h>
41 #include <IOKit/IOTimeStamp.h>
42 #include <IOKit/system.h>
43 #include <libkern/OSDebug.h>
44 #include <sys/proc.h>
45 #include <sys/kauth.h>
46 #include <sys/codesign.h>
47
48 #include <mach/sdt.h>
49
50 #if CONFIG_MACF
51
52 extern "C" {
53 #include <security/mac_framework.h>
54 };
55 #include <sys/kauth.h>
56
57 #define IOMACF_LOG 0
58
59 #endif /* CONFIG_MACF */
60
61 #include <IOKit/assert.h>
62
63 #include "IOServicePrivate.h"
64 #include "IOKitKernelInternal.h"
65
66 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
67 #define SCALAR32(x) ((uint32_t )x)
68 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
69 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
70 #define REF32(x) ((int)(x))
71
72 enum
73 {
74 kIOUCAsync0Flags = 3ULL,
75 kIOUCAsync64Flag = 1ULL,
76 kIOUCAsyncErrorLoggedFlag = 2ULL
77 };
78
79 #if IOKITSTATS
80
81 #define IOStatisticsRegisterCounter() \
82 do { \
83 reserved->counter = IOStatistics::registerUserClient(this); \
84 } while (0)
85
86 #define IOStatisticsUnregisterCounter() \
87 do { \
88 if (reserved) \
89 IOStatistics::unregisterUserClient(reserved->counter); \
90 } while (0)
91
92 #define IOStatisticsClientCall() \
93 do { \
94 IOStatistics::countUserClientCall(client); \
95 } while (0)
96
97 #else
98
99 #define IOStatisticsRegisterCounter()
100 #define IOStatisticsUnregisterCounter()
101 #define IOStatisticsClientCall()
102
103 #endif /* IOKITSTATS */
104
105 #if DEVELOPMENT || DEBUG
106
107 #define FAKE_STACK_FRAME(a) \
108 const void ** __frameptr; \
109 const void * __retaddr; \
110 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
111 __retaddr = __frameptr[1]; \
112 __frameptr[1] = (a);
113
114 #define FAKE_STACK_FRAME_END() \
115 __frameptr[1] = __retaddr;
116
117 #else /* DEVELOPMENT || DEBUG */
118
119 #define FAKE_STACK_FRAME(a)
120 #define FAKE_STACK_FRAME_END()
121
122 #endif /* DEVELOPMENT || DEBUG */
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 // definitions we should get from osfmk
127
128 //typedef struct ipc_port * ipc_port_t;
129 typedef natural_t ipc_kobject_type_t;
130
131 #define IKOT_IOKIT_SPARE 27
132 #define IKOT_IOKIT_CONNECT 29
133 #define IKOT_IOKIT_OBJECT 30
134
135 extern "C" {
136
137 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
138 ipc_kobject_type_t type );
139
140 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
141
142 extern mach_port_name_t iokit_make_send_right( task_t task,
143 io_object_t obj, ipc_kobject_type_t type );
144
145 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
146
147 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
148
149 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
150
151 extern ipc_port_t master_device_port;
152
153 extern void iokit_retain_port( ipc_port_t port );
154 extern void iokit_release_port( ipc_port_t port );
155 extern void iokit_release_port_send( ipc_port_t port );
156
157 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
158
159 #include <mach/mach_traps.h>
160 #include <vm/vm_map.h>
161
162 } /* extern "C" */
163
164
165 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
166
167 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
168
169 class IOMachPort : public OSObject
170 {
171 OSDeclareDefaultStructors(IOMachPort)
172 public:
173 OSObject * object;
174 ipc_port_t port;
175 UInt32 mscount;
176 UInt8 holdDestroy;
177
178 static IOMachPort * portForObject( OSObject * obj,
179 ipc_kobject_type_t type );
180 static bool noMoreSendersForObject( OSObject * obj,
181 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
182 static void releasePortForObject( OSObject * obj,
183 ipc_kobject_type_t type );
184 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
185
186 static OSDictionary * dictForType( ipc_kobject_type_t type );
187
188 static mach_port_name_t makeSendRightForTask( task_t task,
189 io_object_t obj, ipc_kobject_type_t type );
190
191 virtual void free() APPLE_KEXT_OVERRIDE;
192 };
193
194 #define super OSObject
195 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
196
197 static IOLock * gIOObjectPortLock;
198
199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
200
201 // not in dictForType() for debugging ease
202 static OSDictionary * gIOObjectPorts;
203 static OSDictionary * gIOConnectPorts;
204
205 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
206 {
207 OSDictionary ** dict;
208
209 if( IKOT_IOKIT_OBJECT == type )
210 dict = &gIOObjectPorts;
211 else if( IKOT_IOKIT_CONNECT == type )
212 dict = &gIOConnectPorts;
213 else
214 return( 0 );
215
216 if( 0 == *dict)
217 *dict = OSDictionary::withCapacity( 1 );
218
219 return( *dict );
220 }
221
222 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
223 ipc_kobject_type_t type )
224 {
225 IOMachPort * inst = 0;
226 OSDictionary * dict;
227
228 IOTakeLock( gIOObjectPortLock);
229
230 do {
231
232 dict = dictForType( type );
233 if( !dict)
234 continue;
235
236 if( (inst = (IOMachPort *)
237 dict->getObject( (const OSSymbol *) obj ))) {
238 inst->mscount++;
239 inst->retain();
240 continue;
241 }
242
243 inst = new IOMachPort;
244 if( inst && !inst->init()) {
245 inst = 0;
246 continue;
247 }
248
249 inst->port = iokit_alloc_object_port( obj, type );
250 if( inst->port) {
251 // retains obj
252 dict->setObject( (const OSSymbol *) obj, inst );
253 inst->mscount++;
254
255 } else {
256 inst->release();
257 inst = 0;
258 }
259
260 } while( false );
261
262 IOUnlock( gIOObjectPortLock);
263
264 return( inst );
265 }
266
267 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
268 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
269 {
270 OSDictionary * dict;
271 IOMachPort * machPort;
272 IOUserClient * uc;
273 bool destroyed = true;
274
275 IOTakeLock( gIOObjectPortLock);
276
277 if( (dict = dictForType( type ))) {
278 obj->retain();
279
280 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
281 if( machPort) {
282 destroyed = (machPort->mscount <= *mscount);
283 if (!destroyed) *mscount = machPort->mscount;
284 else
285 {
286 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
287 {
288 uc->noMoreSenders();
289 }
290 dict->removeObject( (const OSSymbol *) obj );
291 }
292 }
293 obj->release();
294 }
295
296 IOUnlock( gIOObjectPortLock);
297
298 return( destroyed );
299 }
300
301 void IOMachPort::releasePortForObject( OSObject * obj,
302 ipc_kobject_type_t type )
303 {
304 OSDictionary * dict;
305 IOMachPort * machPort;
306
307 assert(IKOT_IOKIT_CONNECT != type);
308
309 IOTakeLock( gIOObjectPortLock);
310
311 if( (dict = dictForType( type ))) {
312 obj->retain();
313 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
314 if( machPort && !machPort->holdDestroy)
315 dict->removeObject( (const OSSymbol *) obj );
316 obj->release();
317 }
318
319 IOUnlock( gIOObjectPortLock);
320 }
321
322 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
323 {
324 OSDictionary * dict;
325 IOMachPort * machPort;
326
327 IOLockLock( gIOObjectPortLock );
328
329 if( (dict = dictForType( type ))) {
330 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
331 if( machPort)
332 machPort->holdDestroy = true;
333 }
334
335 IOLockUnlock( gIOObjectPortLock );
336 }
337
338 void IOUserClient::destroyUserReferences( OSObject * obj )
339 {
340 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
341
342 // panther, 3160200
343 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
344
345 OSDictionary * dict;
346
347 IOTakeLock( gIOObjectPortLock);
348 obj->retain();
349
350 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
351 {
352 IOMachPort * port;
353 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
354 if (port)
355 {
356 IOUserClient * uc;
357 if ((uc = OSDynamicCast(IOUserClient, obj)))
358 {
359 uc->noMoreSenders();
360 if (uc->mappings)
361 {
362 dict->setObject((const OSSymbol *) uc->mappings, port);
363 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
364
365 uc->mappings->release();
366 uc->mappings = 0;
367 }
368 }
369 dict->removeObject( (const OSSymbol *) obj );
370 }
371 }
372 obj->release();
373 IOUnlock( gIOObjectPortLock);
374 }
375
376 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
377 io_object_t obj, ipc_kobject_type_t type )
378 {
379 return( iokit_make_send_right( task, obj, type ));
380 }
381
382 void IOMachPort::free( void )
383 {
384 if( port)
385 iokit_destroy_object_port( port );
386 super::free();
387 }
388
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390
391 class IOUserIterator : public OSIterator
392 {
393 OSDeclareDefaultStructors(IOUserIterator)
394 public:
395 OSObject * userIteratorObject;
396 IOLock * lock;
397
398 static IOUserIterator * withIterator(OSIterator * iter);
399 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
400 virtual void free() APPLE_KEXT_OVERRIDE;
401
402 virtual void reset() APPLE_KEXT_OVERRIDE;
403 virtual bool isValid() APPLE_KEXT_OVERRIDE;
404 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
405 };
406
407 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
408
409 class IOUserNotification : public IOUserIterator
410 {
411 OSDeclareDefaultStructors(IOUserNotification)
412
413 #define holdNotify userIteratorObject
414
415 public:
416
417 virtual void free() APPLE_KEXT_OVERRIDE;
418
419 virtual void setNotification( IONotifier * obj );
420
421 virtual void reset() APPLE_KEXT_OVERRIDE;
422 virtual bool isValid() APPLE_KEXT_OVERRIDE;
423 };
424
425 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
426
427 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
428
429 IOUserIterator *
430 IOUserIterator::withIterator(OSIterator * iter)
431 {
432 IOUserIterator * me;
433
434 if (!iter) return (0);
435
436 me = new IOUserIterator;
437 if (me && !me->init())
438 {
439 me->release();
440 me = 0;
441 }
442 if (!me) return me;
443 me->userIteratorObject = iter;
444
445 return (me);
446 }
447
448 bool
449 IOUserIterator::init( void )
450 {
451 if (!OSObject::init()) return (false);
452
453 lock = IOLockAlloc();
454 if( !lock)
455 return( false );
456
457 return (true);
458 }
459
460 void
461 IOUserIterator::free()
462 {
463 if (userIteratorObject) userIteratorObject->release();
464 if (lock) IOLockFree(lock);
465 OSObject::free();
466 }
467
468 void
469 IOUserIterator::reset()
470 {
471 IOLockLock(lock);
472 assert(OSDynamicCast(OSIterator, userIteratorObject));
473 ((OSIterator *)userIteratorObject)->reset();
474 IOLockUnlock(lock);
475 }
476
477 bool
478 IOUserIterator::isValid()
479 {
480 bool ret;
481
482 IOLockLock(lock);
483 assert(OSDynamicCast(OSIterator, userIteratorObject));
484 ret = ((OSIterator *)userIteratorObject)->isValid();
485 IOLockUnlock(lock);
486
487 return (ret);
488 }
489
490 OSObject *
491 IOUserIterator::getNextObject()
492 {
493 OSObject * ret;
494
495 IOLockLock(lock);
496 assert(OSDynamicCast(OSIterator, userIteratorObject));
497 ret = ((OSIterator *)userIteratorObject)->getNextObject();
498 IOLockUnlock(lock);
499
500 return (ret);
501 }
502
503 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
504 extern "C" {
505
506 // functions called from osfmk/device/iokit_rpc.c
507
508 void
509 iokit_add_reference( io_object_t obj )
510 {
511 if( obj)
512 obj->retain();
513 }
514
515 void
516 iokit_remove_reference( io_object_t obj )
517 {
518 if( obj)
519 obj->release();
520 }
521
522 void
523 iokit_add_connect_reference( io_object_t obj )
524 {
525 IOUserClient * uc;
526
527 if (!obj) return;
528
529 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
530
531 obj->retain();
532 }
533
534 void
535 iokit_remove_connect_reference( io_object_t obj )
536 {
537 IOUserClient * uc;
538 bool finalize = false;
539
540 if (!obj) return;
541
542 if ((uc = OSDynamicCast(IOUserClient, obj)))
543 {
544 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
545 {
546 IOLockLock(gIOObjectPortLock);
547 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
548 IOLockUnlock(gIOObjectPortLock);
549 }
550 if (finalize) uc->scheduleFinalize(true);
551 }
552
553 obj->release();
554 }
555
556 bool
557 IOUserClient::finalizeUserReferences(OSObject * obj)
558 {
559 IOUserClient * uc;
560 bool ok = true;
561
562 if ((uc = OSDynamicCast(IOUserClient, obj)))
563 {
564 IOLockLock(gIOObjectPortLock);
565 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
566 IOLockUnlock(gIOObjectPortLock);
567 }
568 return (ok);
569 }
570
571 ipc_port_t
572 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
573 {
574 IOMachPort * machPort;
575 ipc_port_t port;
576
577 if( (machPort = IOMachPort::portForObject( obj, type ))) {
578
579 port = machPort->port;
580 if( port)
581 iokit_retain_port( port );
582
583 machPort->release();
584
585 } else
586 port = NULL;
587
588 return( port );
589 }
590
591 kern_return_t
592 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
593 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
594 {
595 IOUserClient * client;
596 IOMemoryMap * map;
597 IOUserNotification * notify;
598
599 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
600 return( kIOReturnNotReady );
601
602 if( IKOT_IOKIT_CONNECT == type)
603 {
604 if( (client = OSDynamicCast( IOUserClient, obj )))
605 {
606 IOStatisticsClientCall();
607 client->clientDied();
608 }
609 }
610 else if( IKOT_IOKIT_OBJECT == type)
611 {
612 if( (map = OSDynamicCast( IOMemoryMap, obj )))
613 map->taskDied();
614 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
615 notify->setNotification( 0 );
616 }
617
618 return( kIOReturnSuccess );
619 }
620
621 }; /* extern "C" */
622
623 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
624
625 class IOServiceUserNotification : public IOUserNotification
626 {
627 OSDeclareDefaultStructors(IOServiceUserNotification)
628
629 struct PingMsg {
630 mach_msg_header_t msgHdr;
631 OSNotificationHeader64 notifyHeader;
632 };
633
634 enum { kMaxOutstanding = 1024 };
635
636 PingMsg * pingMsg;
637 vm_size_t msgSize;
638 OSArray * newSet;
639 OSObject * lastEntry;
640 bool armed;
641 bool ipcLogged;
642
643 public:
644
645 virtual bool init( mach_port_t port, natural_t type,
646 void * reference, vm_size_t referenceSize,
647 bool clientIs64 );
648 virtual void free() APPLE_KEXT_OVERRIDE;
649
650 static bool _handler( void * target,
651 void * ref, IOService * newService, IONotifier * notifier );
652 virtual bool handler( void * ref, IOService * newService );
653
654 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
655 };
656
657 class IOServiceMessageUserNotification : public IOUserNotification
658 {
659 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
660
661 struct PingMsg {
662 mach_msg_header_t msgHdr;
663 mach_msg_body_t msgBody;
664 mach_msg_port_descriptor_t ports[1];
665 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
666 };
667
668 PingMsg * pingMsg;
669 vm_size_t msgSize;
670 uint8_t clientIs64;
671 int owningPID;
672 bool ipcLogged;
673
674 public:
675
676 virtual bool init( mach_port_t port, natural_t type,
677 void * reference, vm_size_t referenceSize,
678 vm_size_t extraSize,
679 bool clientIs64 );
680
681 virtual void free() APPLE_KEXT_OVERRIDE;
682
683 static IOReturn _handler( void * target, void * ref,
684 UInt32 messageType, IOService * provider,
685 void * messageArgument, vm_size_t argSize );
686 virtual IOReturn handler( void * ref,
687 UInt32 messageType, IOService * provider,
688 void * messageArgument, vm_size_t argSize );
689
690 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
691 };
692
693 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
694
695 #undef super
696 #define super IOUserIterator
697 OSDefineMetaClass( IOUserNotification, IOUserIterator )
698 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
699
700 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
701
702 void IOUserNotification::free( void )
703 {
704 if (holdNotify)
705 {
706 assert(OSDynamicCast(IONotifier, holdNotify));
707 ((IONotifier *)holdNotify)->remove();
708 holdNotify = 0;
709 }
710 // can't be in handler now
711
712 super::free();
713 }
714
715
716 void IOUserNotification::setNotification( IONotifier * notify )
717 {
718 OSObject * previousNotify;
719
720 IOLockLock( gIOObjectPortLock);
721
722 previousNotify = holdNotify;
723 holdNotify = notify;
724
725 IOLockUnlock( gIOObjectPortLock);
726
727 if( previousNotify)
728 {
729 assert(OSDynamicCast(IONotifier, previousNotify));
730 ((IONotifier *)previousNotify)->remove();
731 }
732 }
733
734 void IOUserNotification::reset()
735 {
736 // ?
737 }
738
739 bool IOUserNotification::isValid()
740 {
741 return( true );
742 }
743
744 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745
746 #undef super
747 #define super IOUserNotification
748 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
749
750 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
751
752 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
753 void * reference, vm_size_t referenceSize,
754 bool clientIs64 )
755 {
756 if( !super::init())
757 return( false );
758
759 newSet = OSArray::withCapacity( 1 );
760 if( !newSet)
761 return( false );
762
763 if (referenceSize > sizeof(OSAsyncReference64))
764 return( false );
765
766 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
767 pingMsg = (PingMsg *) IOMalloc( msgSize);
768 if( !pingMsg)
769 return( false );
770
771 bzero( pingMsg, msgSize);
772
773 pingMsg->msgHdr.msgh_remote_port = port;
774 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
775 MACH_MSG_TYPE_COPY_SEND /*remote*/,
776 MACH_MSG_TYPE_MAKE_SEND /*local*/);
777 pingMsg->msgHdr.msgh_size = msgSize;
778 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
779
780 pingMsg->notifyHeader.size = 0;
781 pingMsg->notifyHeader.type = type;
782 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
783
784 return( true );
785 }
786
787 void IOServiceUserNotification::free( void )
788 {
789 PingMsg * _pingMsg;
790 vm_size_t _msgSize;
791 OSArray * _newSet;
792 OSObject * _lastEntry;
793
794 _pingMsg = pingMsg;
795 _msgSize = msgSize;
796 _lastEntry = lastEntry;
797 _newSet = newSet;
798
799 super::free();
800
801 if( _pingMsg && _msgSize) {
802 if (_pingMsg->msgHdr.msgh_remote_port) {
803 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
804 }
805 IOFree(_pingMsg, _msgSize);
806 }
807
808 if( _lastEntry)
809 _lastEntry->release();
810
811 if( _newSet)
812 _newSet->release();
813 }
814
815 bool IOServiceUserNotification::_handler( void * target,
816 void * ref, IOService * newService, IONotifier * notifier )
817 {
818 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
819 }
820
821 bool IOServiceUserNotification::handler( void * ref,
822 IOService * newService )
823 {
824 unsigned int count;
825 kern_return_t kr;
826 ipc_port_t port = NULL;
827 bool sendPing = false;
828
829 IOTakeLock( lock );
830
831 count = newSet->getCount();
832 if( count < kMaxOutstanding) {
833
834 newSet->setObject( newService );
835 if( (sendPing = (armed && (0 == count))))
836 armed = false;
837 }
838
839 IOUnlock( lock );
840
841 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
842 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
843
844 if( sendPing) {
845 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
846 pingMsg->msgHdr.msgh_local_port = port;
847 else
848 pingMsg->msgHdr.msgh_local_port = NULL;
849
850 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
851 pingMsg->msgHdr.msgh_size,
852 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
853 0);
854 if( port)
855 iokit_release_port( port );
856
857 if( (KERN_SUCCESS != kr) && !ipcLogged)
858 {
859 ipcLogged = true;
860 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
861 }
862 }
863
864 return( true );
865 }
866
867 OSObject * IOServiceUserNotification::getNextObject()
868 {
869 unsigned int count;
870 OSObject * result;
871 OSObject * releaseEntry;
872
873 IOLockLock(lock);
874
875 releaseEntry = lastEntry;
876 count = newSet->getCount();
877 if( count ) {
878 result = newSet->getObject( count - 1 );
879 result->retain();
880 newSet->removeObject( count - 1);
881 } else {
882 result = 0;
883 armed = true;
884 }
885 lastEntry = result;
886
887 IOLockUnlock(lock);
888
889 if (releaseEntry) releaseEntry->release();
890
891 return( result );
892 }
893
894 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
895
896 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
897
898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
899
900 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
901 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
902 bool client64 )
903 {
904 if( !super::init())
905 return( false );
906
907 if (referenceSize > sizeof(OSAsyncReference64))
908 return( false );
909
910 clientIs64 = client64;
911
912 owningPID = proc_selfpid();
913
914 extraSize += sizeof(IOServiceInterestContent64);
915 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
916 pingMsg = (PingMsg *) IOMalloc( msgSize);
917 if( !pingMsg)
918 return( false );
919
920 bzero( pingMsg, msgSize);
921
922 pingMsg->msgHdr.msgh_remote_port = port;
923 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
924 | MACH_MSGH_BITS(
925 MACH_MSG_TYPE_COPY_SEND /*remote*/,
926 MACH_MSG_TYPE_MAKE_SEND /*local*/);
927 pingMsg->msgHdr.msgh_size = msgSize;
928 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
929
930 pingMsg->msgBody.msgh_descriptor_count = 1;
931
932 pingMsg->ports[0].name = 0;
933 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
934 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
935
936 pingMsg->notifyHeader.size = extraSize;
937 pingMsg->notifyHeader.type = type;
938 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
939
940 return( true );
941 }
942
943 void IOServiceMessageUserNotification::free( void )
944 {
945 PingMsg * _pingMsg;
946 vm_size_t _msgSize;
947
948 _pingMsg = pingMsg;
949 _msgSize = msgSize;
950
951 super::free();
952
953 if( _pingMsg && _msgSize) {
954 if (_pingMsg->msgHdr.msgh_remote_port) {
955 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
956 }
957 IOFree( _pingMsg, _msgSize);
958 }
959 }
960
961 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
962 UInt32 messageType, IOService * provider,
963 void * argument, vm_size_t argSize )
964 {
965 return( ((IOServiceMessageUserNotification *) target)->handler(
966 ref, messageType, provider, argument, argSize));
967 }
968
969 IOReturn IOServiceMessageUserNotification::handler( void * ref,
970 UInt32 messageType, IOService * provider,
971 void * messageArgument, vm_size_t callerArgSize )
972 {
973 enum { kLocalMsgSize = 0x100 };
974 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
975 void * allocMsg;
976 kern_return_t kr;
977 vm_size_t argSize;
978 vm_size_t thisMsgSize;
979 ipc_port_t thisPort, providerPort;
980 struct PingMsg * thisMsg;
981 IOServiceInterestContent64 * data;
982
983 if (kIOMessageCopyClientID == messageType)
984 {
985 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
986 return (kIOReturnSuccess);
987 }
988
989 if (callerArgSize == 0)
990 {
991 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
992 else argSize = sizeof(uint32_t);
993 }
994 else
995 {
996 argSize = callerArgSize;
997 if( argSize > kIOUserNotifyMaxMessageSize)
998 argSize = kIOUserNotifyMaxMessageSize;
999 }
1000
1001 // adjust message size for ipc restrictions
1002 natural_t type;
1003 type = pingMsg->notifyHeader.type;
1004 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1005 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1006 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1007
1008 thisMsgSize = msgSize
1009 + sizeof( IOServiceInterestContent64 )
1010 - sizeof( data->messageArgument)
1011 + argSize;
1012
1013 if (thisMsgSize > sizeof(stackMsg))
1014 {
1015 allocMsg = IOMalloc(thisMsgSize);
1016 if (!allocMsg) return (kIOReturnNoMemory);
1017 thisMsg = (typeof(thisMsg)) allocMsg;
1018 }
1019 else
1020 {
1021 allocMsg = 0;
1022 thisMsg = (typeof(thisMsg)) stackMsg;
1023 }
1024
1025 bcopy(pingMsg, thisMsg, msgSize);
1026 thisMsg->notifyHeader.type = type;
1027 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1028 // == pingMsg->notifyHeader.content;
1029 data->messageType = messageType;
1030
1031 if (callerArgSize == 0)
1032 {
1033 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1034 if (!clientIs64)
1035 {
1036 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1037 }
1038 }
1039 else
1040 {
1041 bcopy( messageArgument, data->messageArgument, callerArgSize );
1042 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1043 }
1044
1045 thisMsg->notifyHeader.type = type;
1046 thisMsg->msgHdr.msgh_size = thisMsgSize;
1047
1048 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1049 thisMsg->ports[0].name = providerPort;
1050 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1051 thisMsg->msgHdr.msgh_local_port = thisPort;
1052
1053 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1054 thisMsg->msgHdr.msgh_size,
1055 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1056 0);
1057 if( thisPort)
1058 iokit_release_port( thisPort );
1059 if( providerPort)
1060 iokit_release_port( providerPort );
1061
1062 if (allocMsg)
1063 IOFree(allocMsg, thisMsgSize);
1064
1065 if((KERN_SUCCESS != kr) && !ipcLogged)
1066 {
1067 ipcLogged = true;
1068 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1069 }
1070
1071 return( kIOReturnSuccess );
1072 }
1073
1074 OSObject * IOServiceMessageUserNotification::getNextObject()
1075 {
1076 return( 0 );
1077 }
1078
1079 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1080
1081 #undef super
1082 #define super IOService
1083 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1084
1085 IOLock * gIOUserClientOwnersLock;
1086
1087 void IOUserClient::initialize( void )
1088 {
1089 gIOObjectPortLock = IOLockAlloc();
1090 gIOUserClientOwnersLock = IOLockAlloc();
1091 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1092 }
1093
1094 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1095 mach_port_t wakePort,
1096 void *callback, void *refcon)
1097 {
1098 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1099 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1100 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1101 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1102 }
1103
1104 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1105 mach_port_t wakePort,
1106 mach_vm_address_t callback, io_user_reference_t refcon)
1107 {
1108 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1109 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1110 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1111 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1112 }
1113
1114 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1115 mach_port_t wakePort,
1116 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1117 {
1118 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1119 if (vm_map_is_64bit(get_task_map(task))) {
1120 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1121 }
1122 }
1123
1124 static OSDictionary * CopyConsoleUser(UInt32 uid)
1125 {
1126 OSArray * array;
1127 OSDictionary * user = 0;
1128
1129 if ((array = OSDynamicCast(OSArray,
1130 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1131 {
1132 for (unsigned int idx = 0;
1133 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1134 idx++) {
1135 OSNumber * num;
1136
1137 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1138 && (uid == num->unsigned32BitValue())) {
1139 user->retain();
1140 break;
1141 }
1142 }
1143 array->release();
1144 }
1145 return user;
1146 }
1147
1148 static OSDictionary * CopyUserOnConsole(void)
1149 {
1150 OSArray * array;
1151 OSDictionary * user = 0;
1152
1153 if ((array = OSDynamicCast(OSArray,
1154 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1155 {
1156 for (unsigned int idx = 0;
1157 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1158 idx++)
1159 {
1160 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1161 {
1162 user->retain();
1163 break;
1164 }
1165 }
1166 array->release();
1167 }
1168 return (user);
1169 }
1170
1171 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1172 IOService * service )
1173 {
1174 proc_t p;
1175
1176 p = (proc_t) get_bsdtask_info(task);
1177 if (p)
1178 {
1179 uint64_t authorizationID;
1180
1181 authorizationID = proc_uniqueid(p);
1182 if (authorizationID)
1183 {
1184 if (service->getAuthorizationID() == authorizationID)
1185 {
1186 return (kIOReturnSuccess);
1187 }
1188 }
1189 }
1190
1191 return (kIOReturnNotPermitted);
1192 }
1193
1194 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1195 const char * privilegeName )
1196 {
1197 kern_return_t kr;
1198 security_token_t token;
1199 mach_msg_type_number_t count;
1200 task_t task;
1201 OSDictionary * user;
1202 bool secureConsole;
1203
1204
1205 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1206 sizeof(kIOClientPrivilegeForeground)))
1207 {
1208 if (task_is_gpu_denied(current_task()))
1209 return (kIOReturnNotPrivileged);
1210 else
1211 return (kIOReturnSuccess);
1212 }
1213
1214 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1215 sizeof(kIOClientPrivilegeConsoleSession)))
1216 {
1217 kauth_cred_t cred;
1218 proc_t p;
1219
1220 task = (task_t) securityToken;
1221 if (!task)
1222 task = current_task();
1223 p = (proc_t) get_bsdtask_info(task);
1224 kr = kIOReturnNotPrivileged;
1225
1226 if (p && (cred = kauth_cred_proc_ref(p)))
1227 {
1228 user = CopyUserOnConsole();
1229 if (user)
1230 {
1231 OSNumber * num;
1232 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1233 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1234 {
1235 kr = kIOReturnSuccess;
1236 }
1237 user->release();
1238 }
1239 kauth_cred_unref(&cred);
1240 }
1241 return (kr);
1242 }
1243
1244 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1245 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1246 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1247 else
1248 task = (task_t)securityToken;
1249
1250 count = TASK_SECURITY_TOKEN_COUNT;
1251 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1252
1253 if (KERN_SUCCESS != kr)
1254 {}
1255 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1256 sizeof(kIOClientPrivilegeAdministrator))) {
1257 if (0 != token.val[0])
1258 kr = kIOReturnNotPrivileged;
1259 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1260 sizeof(kIOClientPrivilegeLocalUser))) {
1261 user = CopyConsoleUser(token.val[0]);
1262 if ( user )
1263 user->release();
1264 else
1265 kr = kIOReturnNotPrivileged;
1266 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1267 sizeof(kIOClientPrivilegeConsoleUser))) {
1268 user = CopyConsoleUser(token.val[0]);
1269 if ( user ) {
1270 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1271 kr = kIOReturnNotPrivileged;
1272 else if ( secureConsole ) {
1273 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1274 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1275 kr = kIOReturnNotPrivileged;
1276 }
1277 user->release();
1278 }
1279 else
1280 kr = kIOReturnNotPrivileged;
1281 } else
1282 kr = kIOReturnUnsupported;
1283
1284 return (kr);
1285 }
1286
1287 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1288 const char * entitlement )
1289 {
1290 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1291
1292 proc_t p = NULL;
1293 pid_t pid = 0;
1294 char procname[MAXCOMLEN + 1] = "";
1295 size_t len = 0;
1296 void *entitlements_blob = NULL;
1297 char *entitlements_data = NULL;
1298 OSObject *entitlements_obj = NULL;
1299 OSDictionary *entitlements = NULL;
1300 OSString *errorString = NULL;
1301 OSObject *value = NULL;
1302
1303 p = (proc_t)get_bsdtask_info(task);
1304 if (p == NULL)
1305 goto fail;
1306 pid = proc_pid(p);
1307 proc_name(pid, procname, (int)sizeof(procname));
1308
1309 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1310 goto fail;
1311
1312 if (len <= offsetof(CS_GenericBlob, data))
1313 goto fail;
1314
1315 /*
1316 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1317 * we'll try to parse in the kernel.
1318 */
1319 len -= offsetof(CS_GenericBlob, data);
1320 if (len > MAX_ENTITLEMENTS_LEN) {
1321 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1322 goto fail;
1323 }
1324
1325 /*
1326 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1327 * what is stored in the entitlements blob. Copy the string and
1328 * terminate it.
1329 */
1330 entitlements_data = (char *)IOMalloc(len + 1);
1331 if (entitlements_data == NULL)
1332 goto fail;
1333 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1334 entitlements_data[len] = '\0';
1335
1336 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1337 if (errorString != NULL) {
1338 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1339 goto fail;
1340 }
1341 if (entitlements_obj == NULL)
1342 goto fail;
1343
1344 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1345 if (entitlements == NULL)
1346 goto fail;
1347
1348 /* Fetch the entitlement value from the dictionary. */
1349 value = entitlements->getObject(entitlement);
1350 if (value != NULL)
1351 value->retain();
1352
1353 fail:
1354 if (entitlements_data != NULL)
1355 IOFree(entitlements_data, len + 1);
1356 if (entitlements_obj != NULL)
1357 entitlements_obj->release();
1358 if (errorString != NULL)
1359 errorString->release();
1360 return value;
1361 }
1362
1363 bool IOUserClient::init()
1364 {
1365 if (getPropertyTable() || super::init())
1366 return reserve();
1367
1368 return false;
1369 }
1370
1371 bool IOUserClient::init(OSDictionary * dictionary)
1372 {
1373 if (getPropertyTable() || super::init(dictionary))
1374 return reserve();
1375
1376 return false;
1377 }
1378
1379 bool IOUserClient::initWithTask(task_t owningTask,
1380 void * securityID,
1381 UInt32 type )
1382 {
1383 if (getPropertyTable() || super::init())
1384 return reserve();
1385
1386 return false;
1387 }
1388
1389 bool IOUserClient::initWithTask(task_t owningTask,
1390 void * securityID,
1391 UInt32 type,
1392 OSDictionary * properties )
1393 {
1394 bool ok;
1395
1396 ok = super::init( properties );
1397 ok &= initWithTask( owningTask, securityID, type );
1398
1399 return( ok );
1400 }
1401
1402 bool IOUserClient::reserve()
1403 {
1404 if(!reserved) {
1405 reserved = IONew(ExpansionData, 1);
1406 if (!reserved) {
1407 return false;
1408 }
1409 }
1410 setTerminateDefer(NULL, true);
1411 IOStatisticsRegisterCounter();
1412
1413 return true;
1414 }
1415
1416 struct IOUserClientOwner
1417 {
1418 task_t task;
1419 queue_chain_t taskLink;
1420 IOUserClient * uc;
1421 queue_chain_t ucLink;
1422 };
1423
1424 IOReturn
1425 IOUserClient::registerOwner(task_t task)
1426 {
1427 IOUserClientOwner * owner;
1428 IOReturn ret;
1429 bool newOwner;
1430
1431 IOLockLock(gIOUserClientOwnersLock);
1432
1433 newOwner = true;
1434 ret = kIOReturnSuccess;
1435
1436 if (!owners.next) queue_init(&owners);
1437 else
1438 {
1439 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1440 {
1441 if (task != owner->task) continue;
1442 newOwner = false;
1443 break;
1444 }
1445 }
1446 if (newOwner)
1447 {
1448 owner = IONew(IOUserClientOwner, 1);
1449 if (!newOwner) ret = kIOReturnNoMemory;
1450 else
1451 {
1452 owner->task = task;
1453 owner->uc = this;
1454 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1455 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1456 }
1457 }
1458
1459 IOLockUnlock(gIOUserClientOwnersLock);
1460
1461 return (ret);
1462 }
1463
1464 void
1465 IOUserClient::noMoreSenders(void)
1466 {
1467 IOUserClientOwner * owner;
1468
1469 IOLockLock(gIOUserClientOwnersLock);
1470
1471 if (owners.next)
1472 {
1473 while (!queue_empty(&owners))
1474 {
1475 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1476 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1477 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1478 IODelete(owner, IOUserClientOwner, 1);
1479 }
1480 owners.next = owners.prev = NULL;
1481 }
1482
1483 IOLockUnlock(gIOUserClientOwnersLock);
1484 }
1485
1486 extern "C" kern_return_t
1487 iokit_task_terminate(task_t task)
1488 {
1489 IOUserClientOwner * owner;
1490 IOUserClient * dead;
1491 IOUserClient * uc;
1492 queue_head_t * taskque;
1493
1494 IOLockLock(gIOUserClientOwnersLock);
1495
1496 taskque = task_io_user_clients(task);
1497 dead = NULL;
1498 while (!queue_empty(taskque))
1499 {
1500 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1501 uc = owner->uc;
1502 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1503 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1504 if (queue_empty(&uc->owners))
1505 {
1506 uc->retain();
1507 IOLog("destroying out of band connect for %s\n", uc->getName());
1508 // now using the uc queue head as a singly linked queue,
1509 // leaving .next as NULL to mark it empty
1510 uc->owners.next = NULL;
1511 uc->owners.prev = (queue_entry_t) dead;
1512 dead = uc;
1513 }
1514 IODelete(owner, IOUserClientOwner, 1);
1515 }
1516
1517 IOLockUnlock(gIOUserClientOwnersLock);
1518
1519 while (dead)
1520 {
1521 uc = dead;
1522 dead = (IOUserClient *)(void *) dead->owners.prev;
1523 uc->owners.prev = NULL;
1524 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1525 uc->release();
1526 }
1527
1528 return (KERN_SUCCESS);
1529 }
1530
1531 void IOUserClient::free()
1532 {
1533 if( mappings) mappings->release();
1534
1535 IOStatisticsUnregisterCounter();
1536
1537 assert(!owners.next);
1538 assert(!owners.prev);
1539
1540 if (reserved) IODelete(reserved, ExpansionData, 1);
1541
1542 super::free();
1543 }
1544
1545 IOReturn IOUserClient::clientDied( void )
1546 {
1547 IOReturn ret = kIOReturnNotReady;
1548
1549 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1550 {
1551 ret = clientClose();
1552 }
1553
1554 return (ret);
1555 }
1556
1557 IOReturn IOUserClient::clientClose( void )
1558 {
1559 return( kIOReturnUnsupported );
1560 }
1561
1562 IOService * IOUserClient::getService( void )
1563 {
1564 return( 0 );
1565 }
1566
1567 IOReturn IOUserClient::registerNotificationPort(
1568 mach_port_t /* port */,
1569 UInt32 /* type */,
1570 UInt32 /* refCon */)
1571 {
1572 return( kIOReturnUnsupported);
1573 }
1574
1575 IOReturn IOUserClient::registerNotificationPort(
1576 mach_port_t port,
1577 UInt32 type,
1578 io_user_reference_t refCon)
1579 {
1580 return (registerNotificationPort(port, type, (UInt32) refCon));
1581 }
1582
1583 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1584 semaphore_t * semaphore )
1585 {
1586 return( kIOReturnUnsupported);
1587 }
1588
1589 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1590 {
1591 return( kIOReturnUnsupported);
1592 }
1593
1594 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1595 IOOptionBits * options,
1596 IOMemoryDescriptor ** memory )
1597 {
1598 return( kIOReturnUnsupported);
1599 }
1600
1601 #if !__LP64__
1602 IOMemoryMap * IOUserClient::mapClientMemory(
1603 IOOptionBits type,
1604 task_t task,
1605 IOOptionBits mapFlags,
1606 IOVirtualAddress atAddress )
1607 {
1608 return (NULL);
1609 }
1610 #endif
1611
1612 IOMemoryMap * IOUserClient::mapClientMemory64(
1613 IOOptionBits type,
1614 task_t task,
1615 IOOptionBits mapFlags,
1616 mach_vm_address_t atAddress )
1617 {
1618 IOReturn err;
1619 IOOptionBits options = 0;
1620 IOMemoryDescriptor * memory;
1621 IOMemoryMap * map = 0;
1622
1623 err = clientMemoryForType( (UInt32) type, &options, &memory );
1624
1625 if( memory && (kIOReturnSuccess == err)) {
1626
1627 FAKE_STACK_FRAME(getMetaClass());
1628
1629 options = (options & ~kIOMapUserOptionsMask)
1630 | (mapFlags & kIOMapUserOptionsMask);
1631 map = memory->createMappingInTask( task, atAddress, options );
1632 memory->release();
1633
1634 FAKE_STACK_FRAME_END();
1635 }
1636
1637 return( map );
1638 }
1639
1640 IOReturn IOUserClient::exportObjectToClient(task_t task,
1641 OSObject *obj, io_object_t *clientObj)
1642 {
1643 mach_port_name_t name;
1644
1645 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1646
1647 *(mach_port_name_t *)clientObj = name;
1648 return kIOReturnSuccess;
1649 }
1650
1651 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1652 {
1653 return( 0 );
1654 }
1655
1656 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1657 {
1658 return( 0 );
1659 }
1660
1661 IOExternalTrap * IOUserClient::
1662 getExternalTrapForIndex(UInt32 index)
1663 {
1664 return NULL;
1665 }
1666
1667 #pragma clang diagnostic push
1668 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1669
1670 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1671 // functions can break clients of kexts implementing getExternalMethodForIndex()
1672 IOExternalMethod * IOUserClient::
1673 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1674 {
1675 IOExternalMethod *method = getExternalMethodForIndex(index);
1676
1677 if (method)
1678 *targetP = (IOService *) method->object;
1679
1680 return method;
1681 }
1682
1683 IOExternalAsyncMethod * IOUserClient::
1684 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1685 {
1686 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1687
1688 if (method)
1689 *targetP = (IOService *) method->object;
1690
1691 return method;
1692 }
1693
1694 IOExternalTrap * IOUserClient::
1695 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1696 {
1697 IOExternalTrap *trap = getExternalTrapForIndex(index);
1698
1699 if (trap) {
1700 *targetP = trap->object;
1701 }
1702
1703 return trap;
1704 }
1705 #pragma clang diagnostic pop
1706
1707 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1708 {
1709 mach_port_t port;
1710 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1711
1712 if (MACH_PORT_NULL != port)
1713 iokit_release_port_send(port);
1714
1715 return (kIOReturnSuccess);
1716 }
1717
1718 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1719 {
1720 if (MACH_PORT_NULL != port)
1721 iokit_release_port_send(port);
1722
1723 return (kIOReturnSuccess);
1724 }
1725
1726 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1727 IOReturn result, void *args[], UInt32 numArgs)
1728 {
1729 OSAsyncReference64 reference64;
1730 io_user_reference_t args64[kMaxAsyncArgs];
1731 unsigned int idx;
1732
1733 if (numArgs > kMaxAsyncArgs)
1734 return kIOReturnMessageTooLarge;
1735
1736 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1737 reference64[idx] = REF64(reference[idx]);
1738
1739 for (idx = 0; idx < numArgs; idx++)
1740 args64[idx] = REF64(args[idx]);
1741
1742 return (sendAsyncResult64(reference64, result, args64, numArgs));
1743 }
1744
1745 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1746 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1747 {
1748 return _sendAsyncResult64(reference, result, args, numArgs, options);
1749 }
1750
1751 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1752 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1753 {
1754 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1755 }
1756
1757 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1758 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1759 {
1760 struct ReplyMsg
1761 {
1762 mach_msg_header_t msgHdr;
1763 union
1764 {
1765 struct
1766 {
1767 OSNotificationHeader notifyHdr;
1768 IOAsyncCompletionContent asyncContent;
1769 uint32_t args[kMaxAsyncArgs];
1770 } msg32;
1771 struct
1772 {
1773 OSNotificationHeader64 notifyHdr;
1774 IOAsyncCompletionContent asyncContent;
1775 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1776 } msg64;
1777 } m;
1778 };
1779 ReplyMsg replyMsg;
1780 mach_port_t replyPort;
1781 kern_return_t kr;
1782
1783 // If no reply port, do nothing.
1784 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1785 if (replyPort == MACH_PORT_NULL)
1786 return kIOReturnSuccess;
1787
1788 if (numArgs > kMaxAsyncArgs)
1789 return kIOReturnMessageTooLarge;
1790
1791 bzero(&replyMsg, sizeof(replyMsg));
1792 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1793 0 /*local*/);
1794 replyMsg.msgHdr.msgh_remote_port = replyPort;
1795 replyMsg.msgHdr.msgh_local_port = 0;
1796 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1797 if (kIOUCAsync64Flag & reference[0])
1798 {
1799 replyMsg.msgHdr.msgh_size =
1800 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1801 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1802 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1803 + numArgs * sizeof(io_user_reference_t);
1804 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1805 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1806
1807 replyMsg.m.msg64.asyncContent.result = result;
1808 if (numArgs)
1809 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1810 }
1811 else
1812 {
1813 unsigned int idx;
1814
1815 replyMsg.msgHdr.msgh_size =
1816 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1817 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1818
1819 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1820 + numArgs * sizeof(uint32_t);
1821 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1822
1823 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1824 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1825
1826 replyMsg.m.msg32.asyncContent.result = result;
1827
1828 for (idx = 0; idx < numArgs; idx++)
1829 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1830 }
1831
1832 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1833 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1834 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1835 } else {
1836 /* Fail on full queue. */
1837 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1838 replyMsg.msgHdr.msgh_size);
1839 }
1840 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1841 {
1842 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1843 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1844 }
1845 return kr;
1846 }
1847
1848
1849 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1850
1851 extern "C" {
1852
1853 #define CHECK(cls,obj,out) \
1854 cls * out; \
1855 if( !(out = OSDynamicCast( cls, obj))) \
1856 return( kIOReturnBadArgument )
1857
1858 #define CHECKLOCKED(cls,obj,out) \
1859 IOUserIterator * oIter; \
1860 cls * out; \
1861 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1862 return (kIOReturnBadArgument); \
1863 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1864 return (kIOReturnBadArgument)
1865
1866 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1867
1868 // Create a vm_map_copy_t or kalloc'ed data for memory
1869 // to be copied out. ipc will free after the copyout.
1870
1871 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1872 io_buf_ptr_t * buf )
1873 {
1874 kern_return_t err;
1875 vm_map_copy_t copy;
1876
1877 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1878 false /* src_destroy */, &copy);
1879
1880 assert( err == KERN_SUCCESS );
1881 if( err == KERN_SUCCESS )
1882 *buf = (char *) copy;
1883
1884 return( err );
1885 }
1886
1887 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1888
1889 /* Routine io_server_version */
1890 kern_return_t is_io_server_version(
1891 mach_port_t master_port,
1892 uint64_t *version)
1893 {
1894 *version = IOKIT_SERVER_VERSION;
1895 return (kIOReturnSuccess);
1896 }
1897
1898 /* Routine io_object_get_class */
1899 kern_return_t is_io_object_get_class(
1900 io_object_t object,
1901 io_name_t className )
1902 {
1903 const OSMetaClass* my_obj = NULL;
1904
1905 if( !object)
1906 return( kIOReturnBadArgument );
1907
1908 my_obj = object->getMetaClass();
1909 if (!my_obj) {
1910 return (kIOReturnNotFound);
1911 }
1912
1913 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1914
1915 return( kIOReturnSuccess );
1916 }
1917
1918 /* Routine io_object_get_superclass */
1919 kern_return_t is_io_object_get_superclass(
1920 mach_port_t master_port,
1921 io_name_t obj_name,
1922 io_name_t class_name)
1923 {
1924 const OSMetaClass* my_obj = NULL;
1925 const OSMetaClass* superclass = NULL;
1926 const OSSymbol *my_name = NULL;
1927 const char *my_cstr = NULL;
1928
1929 if (!obj_name || !class_name)
1930 return (kIOReturnBadArgument);
1931
1932 if( master_port != master_device_port)
1933 return( kIOReturnNotPrivileged);
1934
1935 my_name = OSSymbol::withCString(obj_name);
1936
1937 if (my_name) {
1938 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1939 my_name->release();
1940 }
1941 if (my_obj) {
1942 superclass = my_obj->getSuperClass();
1943 }
1944
1945 if (!superclass) {
1946 return( kIOReturnNotFound );
1947 }
1948
1949 my_cstr = superclass->getClassName();
1950
1951 if (my_cstr) {
1952 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1953 return( kIOReturnSuccess );
1954 }
1955 return (kIOReturnNotFound);
1956 }
1957
1958 /* Routine io_object_get_bundle_identifier */
1959 kern_return_t is_io_object_get_bundle_identifier(
1960 mach_port_t master_port,
1961 io_name_t obj_name,
1962 io_name_t bundle_name)
1963 {
1964 const OSMetaClass* my_obj = NULL;
1965 const OSSymbol *my_name = NULL;
1966 const OSSymbol *identifier = NULL;
1967 const char *my_cstr = NULL;
1968
1969 if (!obj_name || !bundle_name)
1970 return (kIOReturnBadArgument);
1971
1972 if( master_port != master_device_port)
1973 return( kIOReturnNotPrivileged);
1974
1975 my_name = OSSymbol::withCString(obj_name);
1976
1977 if (my_name) {
1978 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1979 my_name->release();
1980 }
1981
1982 if (my_obj) {
1983 identifier = my_obj->getKmodName();
1984 }
1985 if (!identifier) {
1986 return( kIOReturnNotFound );
1987 }
1988
1989 my_cstr = identifier->getCStringNoCopy();
1990 if (my_cstr) {
1991 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1992 return( kIOReturnSuccess );
1993 }
1994
1995 return (kIOReturnBadArgument);
1996 }
1997
1998 /* Routine io_object_conforms_to */
1999 kern_return_t is_io_object_conforms_to(
2000 io_object_t object,
2001 io_name_t className,
2002 boolean_t *conforms )
2003 {
2004 if( !object)
2005 return( kIOReturnBadArgument );
2006
2007 *conforms = (0 != object->metaCast( className ));
2008
2009 return( kIOReturnSuccess );
2010 }
2011
2012 /* Routine io_object_get_retain_count */
2013 kern_return_t is_io_object_get_retain_count(
2014 io_object_t object,
2015 uint32_t *retainCount )
2016 {
2017 if( !object)
2018 return( kIOReturnBadArgument );
2019
2020 *retainCount = object->getRetainCount();
2021 return( kIOReturnSuccess );
2022 }
2023
2024 /* Routine io_iterator_next */
2025 kern_return_t is_io_iterator_next(
2026 io_object_t iterator,
2027 io_object_t *object )
2028 {
2029 IOReturn ret;
2030 OSObject * obj;
2031
2032 CHECK( OSIterator, iterator, iter );
2033
2034 obj = iter->getNextObject();
2035 if( obj) {
2036 obj->retain();
2037 *object = obj;
2038 ret = kIOReturnSuccess;
2039 } else
2040 ret = kIOReturnNoDevice;
2041
2042 return (ret);
2043 }
2044
2045 /* Routine io_iterator_reset */
2046 kern_return_t is_io_iterator_reset(
2047 io_object_t iterator )
2048 {
2049 CHECK( OSIterator, iterator, iter );
2050
2051 iter->reset();
2052
2053 return( kIOReturnSuccess );
2054 }
2055
2056 /* Routine io_iterator_is_valid */
2057 kern_return_t is_io_iterator_is_valid(
2058 io_object_t iterator,
2059 boolean_t *is_valid )
2060 {
2061 CHECK( OSIterator, iterator, iter );
2062
2063 *is_valid = iter->isValid();
2064
2065 return( kIOReturnSuccess );
2066 }
2067
2068
2069 static kern_return_t internal_io_service_match_property_table(
2070 io_service_t _service,
2071 const char * matching,
2072 mach_msg_type_number_t matching_size,
2073 boolean_t *matches)
2074 {
2075 CHECK( IOService, _service, service );
2076
2077 kern_return_t kr;
2078 OSObject * obj;
2079 OSDictionary * dict;
2080
2081 assert(matching_size);
2082 obj = OSUnserializeXML(matching, matching_size);
2083
2084 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2085 *matches = service->passiveMatch( dict );
2086 kr = kIOReturnSuccess;
2087 } else
2088 kr = kIOReturnBadArgument;
2089
2090 if( obj)
2091 obj->release();
2092
2093 return( kr );
2094 }
2095
2096 /* Routine io_service_match_property_table */
2097 kern_return_t is_io_service_match_property_table(
2098 io_service_t service,
2099 io_string_t matching,
2100 boolean_t *matches )
2101 {
2102 return (kIOReturnUnsupported);
2103 }
2104
2105
2106 /* Routine io_service_match_property_table_ool */
2107 kern_return_t is_io_service_match_property_table_ool(
2108 io_object_t service,
2109 io_buf_ptr_t matching,
2110 mach_msg_type_number_t matchingCnt,
2111 kern_return_t *result,
2112 boolean_t *matches )
2113 {
2114 kern_return_t kr;
2115 vm_offset_t data;
2116 vm_map_offset_t map_data;
2117
2118 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2119 data = CAST_DOWN(vm_offset_t, map_data);
2120
2121 if( KERN_SUCCESS == kr) {
2122 // must return success after vm_map_copyout() succeeds
2123 *result = internal_io_service_match_property_table(service,
2124 (const char *)data, matchingCnt, matches );
2125 vm_deallocate( kernel_map, data, matchingCnt );
2126 }
2127
2128 return( kr );
2129 }
2130
2131 /* Routine io_service_match_property_table_bin */
2132 kern_return_t is_io_service_match_property_table_bin(
2133 io_object_t service,
2134 io_struct_inband_t matching,
2135 mach_msg_type_number_t matchingCnt,
2136 boolean_t *matches)
2137 {
2138 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2139 }
2140
2141 static kern_return_t internal_io_service_get_matching_services(
2142 mach_port_t master_port,
2143 const char * matching,
2144 mach_msg_type_number_t matching_size,
2145 io_iterator_t *existing )
2146 {
2147 kern_return_t kr;
2148 OSObject * obj;
2149 OSDictionary * dict;
2150
2151 if( master_port != master_device_port)
2152 return( kIOReturnNotPrivileged);
2153
2154 assert(matching_size);
2155 obj = OSUnserializeXML(matching, matching_size);
2156
2157 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2158 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2159 kr = kIOReturnSuccess;
2160 } else
2161 kr = kIOReturnBadArgument;
2162
2163 if( obj)
2164 obj->release();
2165
2166 return( kr );
2167 }
2168
2169 /* Routine io_service_get_matching_services */
2170 kern_return_t is_io_service_get_matching_services(
2171 mach_port_t master_port,
2172 io_string_t matching,
2173 io_iterator_t *existing )
2174 {
2175 return (kIOReturnUnsupported);
2176 }
2177
2178 /* Routine io_service_get_matching_services_ool */
2179 kern_return_t is_io_service_get_matching_services_ool(
2180 mach_port_t master_port,
2181 io_buf_ptr_t matching,
2182 mach_msg_type_number_t matchingCnt,
2183 kern_return_t *result,
2184 io_object_t *existing )
2185 {
2186 kern_return_t kr;
2187 vm_offset_t data;
2188 vm_map_offset_t map_data;
2189
2190 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2191 data = CAST_DOWN(vm_offset_t, map_data);
2192
2193 if( KERN_SUCCESS == kr) {
2194 // must return success after vm_map_copyout() succeeds
2195 // and mig will copy out objects on success
2196 *existing = 0;
2197 *result = internal_io_service_get_matching_services(master_port,
2198 (const char *) data, matchingCnt, existing);
2199 vm_deallocate( kernel_map, data, matchingCnt );
2200 }
2201
2202 return( kr );
2203 }
2204
2205 /* Routine io_service_get_matching_services_bin */
2206 kern_return_t is_io_service_get_matching_services_bin(
2207 mach_port_t master_port,
2208 io_struct_inband_t matching,
2209 mach_msg_type_number_t matchingCnt,
2210 io_object_t *existing)
2211 {
2212 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2213 }
2214
2215
2216 static kern_return_t internal_io_service_get_matching_service(
2217 mach_port_t master_port,
2218 const char * matching,
2219 mach_msg_type_number_t matching_size,
2220 io_service_t *service )
2221 {
2222 kern_return_t kr;
2223 OSObject * obj;
2224 OSDictionary * dict;
2225
2226 if( master_port != master_device_port)
2227 return( kIOReturnNotPrivileged);
2228
2229 assert(matching_size);
2230 obj = OSUnserializeXML(matching, matching_size);
2231
2232 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2233 *service = IOService::copyMatchingService( dict );
2234 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2235 } else
2236 kr = kIOReturnBadArgument;
2237
2238 if( obj)
2239 obj->release();
2240
2241 return( kr );
2242 }
2243
2244 /* Routine io_service_get_matching_service */
2245 kern_return_t is_io_service_get_matching_service(
2246 mach_port_t master_port,
2247 io_string_t matching,
2248 io_service_t *service )
2249 {
2250 return (kIOReturnUnsupported);
2251 }
2252
2253 /* Routine io_service_get_matching_services_ool */
2254 kern_return_t is_io_service_get_matching_service_ool(
2255 mach_port_t master_port,
2256 io_buf_ptr_t matching,
2257 mach_msg_type_number_t matchingCnt,
2258 kern_return_t *result,
2259 io_object_t *service )
2260 {
2261 kern_return_t kr;
2262 vm_offset_t data;
2263 vm_map_offset_t map_data;
2264
2265 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2266 data = CAST_DOWN(vm_offset_t, map_data);
2267
2268 if( KERN_SUCCESS == kr) {
2269 // must return success after vm_map_copyout() succeeds
2270 // and mig will copy out objects on success
2271 *service = 0;
2272 *result = internal_io_service_get_matching_service(master_port,
2273 (const char *) data, matchingCnt, service );
2274 vm_deallocate( kernel_map, data, matchingCnt );
2275 }
2276
2277 return( kr );
2278 }
2279
2280 /* Routine io_service_get_matching_service_bin */
2281 kern_return_t is_io_service_get_matching_service_bin(
2282 mach_port_t master_port,
2283 io_struct_inband_t matching,
2284 mach_msg_type_number_t matchingCnt,
2285 io_object_t *service)
2286 {
2287 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2288 }
2289
2290 static kern_return_t internal_io_service_add_notification(
2291 mach_port_t master_port,
2292 io_name_t notification_type,
2293 const char * matching,
2294 size_t matching_size,
2295 mach_port_t port,
2296 void * reference,
2297 vm_size_t referenceSize,
2298 bool client64,
2299 io_object_t * notification )
2300 {
2301 IOServiceUserNotification * userNotify = 0;
2302 IONotifier * notify = 0;
2303 const OSSymbol * sym;
2304 OSDictionary * dict;
2305 IOReturn err;
2306 unsigned long int userMsgType;
2307
2308 if( master_port != master_device_port)
2309 return( kIOReturnNotPrivileged);
2310
2311 do {
2312 err = kIOReturnNoResources;
2313
2314 if( !(sym = OSSymbol::withCString( notification_type )))
2315 err = kIOReturnNoResources;
2316
2317 assert(matching_size);
2318 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2319 if (!dict) {
2320 err = kIOReturnBadArgument;
2321 continue;
2322 }
2323
2324 if( (sym == gIOPublishNotification)
2325 || (sym == gIOFirstPublishNotification))
2326 userMsgType = kIOServicePublishNotificationType;
2327 else if( (sym == gIOMatchedNotification)
2328 || (sym == gIOFirstMatchNotification))
2329 userMsgType = kIOServiceMatchedNotificationType;
2330 else if( sym == gIOTerminatedNotification)
2331 userMsgType = kIOServiceTerminatedNotificationType;
2332 else
2333 userMsgType = kLastIOKitNotificationType;
2334
2335 userNotify = new IOServiceUserNotification;
2336
2337 if( userNotify && !userNotify->init( port, userMsgType,
2338 reference, referenceSize, client64)) {
2339 iokit_release_port_send(port);
2340 userNotify->release();
2341 userNotify = 0;
2342 }
2343 if( !userNotify)
2344 continue;
2345
2346 notify = IOService::addMatchingNotification( sym, dict,
2347 &userNotify->_handler, userNotify );
2348 if( notify) {
2349 *notification = userNotify;
2350 userNotify->setNotification( notify );
2351 err = kIOReturnSuccess;
2352 } else
2353 err = kIOReturnUnsupported;
2354
2355 } while( false );
2356
2357 if( sym)
2358 sym->release();
2359 if( dict)
2360 dict->release();
2361
2362 return( err );
2363 }
2364
2365
2366 /* Routine io_service_add_notification */
2367 kern_return_t is_io_service_add_notification(
2368 mach_port_t master_port,
2369 io_name_t notification_type,
2370 io_string_t matching,
2371 mach_port_t port,
2372 io_async_ref_t reference,
2373 mach_msg_type_number_t referenceCnt,
2374 io_object_t * notification )
2375 {
2376 return (kIOReturnUnsupported);
2377 }
2378
2379 /* Routine io_service_add_notification_64 */
2380 kern_return_t is_io_service_add_notification_64(
2381 mach_port_t master_port,
2382 io_name_t notification_type,
2383 io_string_t matching,
2384 mach_port_t wake_port,
2385 io_async_ref64_t reference,
2386 mach_msg_type_number_t referenceCnt,
2387 io_object_t *notification )
2388 {
2389 return (kIOReturnUnsupported);
2390 }
2391
2392 /* Routine io_service_add_notification_bin */
2393 kern_return_t is_io_service_add_notification_bin
2394 (
2395 mach_port_t master_port,
2396 io_name_t notification_type,
2397 io_struct_inband_t matching,
2398 mach_msg_type_number_t matchingCnt,
2399 mach_port_t wake_port,
2400 io_async_ref_t reference,
2401 mach_msg_type_number_t referenceCnt,
2402 io_object_t *notification)
2403 {
2404 return (internal_io_service_add_notification(master_port, notification_type,
2405 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2406 false, notification));
2407 }
2408
2409 /* Routine io_service_add_notification_bin_64 */
2410 kern_return_t is_io_service_add_notification_bin_64
2411 (
2412 mach_port_t master_port,
2413 io_name_t notification_type,
2414 io_struct_inband_t matching,
2415 mach_msg_type_number_t matchingCnt,
2416 mach_port_t wake_port,
2417 io_async_ref64_t reference,
2418 mach_msg_type_number_t referenceCnt,
2419 io_object_t *notification)
2420 {
2421 return (internal_io_service_add_notification(master_port, notification_type,
2422 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2423 true, notification));
2424 }
2425
2426 static kern_return_t internal_io_service_add_notification_ool(
2427 mach_port_t master_port,
2428 io_name_t notification_type,
2429 io_buf_ptr_t matching,
2430 mach_msg_type_number_t matchingCnt,
2431 mach_port_t wake_port,
2432 void * reference,
2433 vm_size_t referenceSize,
2434 bool client64,
2435 kern_return_t *result,
2436 io_object_t *notification )
2437 {
2438 kern_return_t kr;
2439 vm_offset_t data;
2440 vm_map_offset_t map_data;
2441
2442 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2443 data = CAST_DOWN(vm_offset_t, map_data);
2444
2445 if( KERN_SUCCESS == kr) {
2446 // must return success after vm_map_copyout() succeeds
2447 // and mig will copy out objects on success
2448 *notification = 0;
2449 *result = internal_io_service_add_notification( master_port, notification_type,
2450 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2451 vm_deallocate( kernel_map, data, matchingCnt );
2452 }
2453
2454 return( kr );
2455 }
2456
2457 /* Routine io_service_add_notification_ool */
2458 kern_return_t is_io_service_add_notification_ool(
2459 mach_port_t master_port,
2460 io_name_t notification_type,
2461 io_buf_ptr_t matching,
2462 mach_msg_type_number_t matchingCnt,
2463 mach_port_t wake_port,
2464 io_async_ref_t reference,
2465 mach_msg_type_number_t referenceCnt,
2466 kern_return_t *result,
2467 io_object_t *notification )
2468 {
2469 return (internal_io_service_add_notification_ool(master_port, notification_type,
2470 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2471 false, result, notification));
2472 }
2473
2474 /* Routine io_service_add_notification_ool_64 */
2475 kern_return_t is_io_service_add_notification_ool_64(
2476 mach_port_t master_port,
2477 io_name_t notification_type,
2478 io_buf_ptr_t matching,
2479 mach_msg_type_number_t matchingCnt,
2480 mach_port_t wake_port,
2481 io_async_ref64_t reference,
2482 mach_msg_type_number_t referenceCnt,
2483 kern_return_t *result,
2484 io_object_t *notification )
2485 {
2486 return (internal_io_service_add_notification_ool(master_port, notification_type,
2487 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2488 true, result, notification));
2489 }
2490
2491 /* Routine io_service_add_notification_old */
2492 kern_return_t is_io_service_add_notification_old(
2493 mach_port_t master_port,
2494 io_name_t notification_type,
2495 io_string_t matching,
2496 mach_port_t port,
2497 // for binary compatibility reasons, this must be natural_t for ILP32
2498 natural_t ref,
2499 io_object_t * notification )
2500 {
2501 return( is_io_service_add_notification( master_port, notification_type,
2502 matching, port, &ref, 1, notification ));
2503 }
2504
2505
2506 static kern_return_t internal_io_service_add_interest_notification(
2507 io_object_t _service,
2508 io_name_t type_of_interest,
2509 mach_port_t port,
2510 void * reference,
2511 vm_size_t referenceSize,
2512 bool client64,
2513 io_object_t * notification )
2514 {
2515
2516 IOServiceMessageUserNotification * userNotify = 0;
2517 IONotifier * notify = 0;
2518 const OSSymbol * sym;
2519 IOReturn err;
2520
2521 CHECK( IOService, _service, service );
2522
2523 err = kIOReturnNoResources;
2524 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2525
2526 userNotify = new IOServiceMessageUserNotification;
2527
2528 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2529 reference, referenceSize,
2530 kIOUserNotifyMaxMessageSize,
2531 client64 )) {
2532 iokit_release_port_send(port);
2533 userNotify->release();
2534 userNotify = 0;
2535 }
2536 if( !userNotify)
2537 continue;
2538
2539 notify = service->registerInterest( sym,
2540 &userNotify->_handler, userNotify );
2541 if( notify) {
2542 *notification = userNotify;
2543 userNotify->setNotification( notify );
2544 err = kIOReturnSuccess;
2545 } else
2546 err = kIOReturnUnsupported;
2547
2548 sym->release();
2549
2550 } while( false );
2551
2552 return( err );
2553 }
2554
2555 /* Routine io_service_add_message_notification */
2556 kern_return_t is_io_service_add_interest_notification(
2557 io_object_t service,
2558 io_name_t type_of_interest,
2559 mach_port_t port,
2560 io_async_ref_t reference,
2561 mach_msg_type_number_t referenceCnt,
2562 io_object_t * notification )
2563 {
2564 return (internal_io_service_add_interest_notification(service, type_of_interest,
2565 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2566 }
2567
2568 /* Routine io_service_add_interest_notification_64 */
2569 kern_return_t is_io_service_add_interest_notification_64(
2570 io_object_t service,
2571 io_name_t type_of_interest,
2572 mach_port_t wake_port,
2573 io_async_ref64_t reference,
2574 mach_msg_type_number_t referenceCnt,
2575 io_object_t *notification )
2576 {
2577 return (internal_io_service_add_interest_notification(service, type_of_interest,
2578 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2579 }
2580
2581
2582 /* Routine io_service_acknowledge_notification */
2583 kern_return_t is_io_service_acknowledge_notification(
2584 io_object_t _service,
2585 natural_t notify_ref,
2586 natural_t response )
2587 {
2588 CHECK( IOService, _service, service );
2589
2590 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2591 (IOOptionBits) response ));
2592
2593 }
2594
2595 /* Routine io_connect_get_semaphore */
2596 kern_return_t is_io_connect_get_notification_semaphore(
2597 io_connect_t connection,
2598 natural_t notification_type,
2599 semaphore_t *semaphore )
2600 {
2601 CHECK( IOUserClient, connection, client );
2602
2603 IOStatisticsClientCall();
2604 return( client->getNotificationSemaphore( (UInt32) notification_type,
2605 semaphore ));
2606 }
2607
2608 /* Routine io_registry_get_root_entry */
2609 kern_return_t is_io_registry_get_root_entry(
2610 mach_port_t master_port,
2611 io_object_t *root )
2612 {
2613 IORegistryEntry * entry;
2614
2615 if( master_port != master_device_port)
2616 return( kIOReturnNotPrivileged);
2617
2618 entry = IORegistryEntry::getRegistryRoot();
2619 if( entry)
2620 entry->retain();
2621 *root = entry;
2622
2623 return( kIOReturnSuccess );
2624 }
2625
2626 /* Routine io_registry_create_iterator */
2627 kern_return_t is_io_registry_create_iterator(
2628 mach_port_t master_port,
2629 io_name_t plane,
2630 uint32_t options,
2631 io_object_t *iterator )
2632 {
2633 if( master_port != master_device_port)
2634 return( kIOReturnNotPrivileged);
2635
2636 *iterator = IOUserIterator::withIterator(
2637 IORegistryIterator::iterateOver(
2638 IORegistryEntry::getPlane( plane ), options ));
2639
2640 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2641 }
2642
2643 /* Routine io_registry_entry_create_iterator */
2644 kern_return_t is_io_registry_entry_create_iterator(
2645 io_object_t registry_entry,
2646 io_name_t plane,
2647 uint32_t options,
2648 io_object_t *iterator )
2649 {
2650 CHECK( IORegistryEntry, registry_entry, entry );
2651
2652 *iterator = IOUserIterator::withIterator(
2653 IORegistryIterator::iterateOver( entry,
2654 IORegistryEntry::getPlane( plane ), options ));
2655
2656 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2657 }
2658
2659 /* Routine io_registry_iterator_enter */
2660 kern_return_t is_io_registry_iterator_enter_entry(
2661 io_object_t iterator )
2662 {
2663 CHECKLOCKED( IORegistryIterator, iterator, iter );
2664
2665 IOLockLock(oIter->lock);
2666 iter->enterEntry();
2667 IOLockUnlock(oIter->lock);
2668
2669 return( kIOReturnSuccess );
2670 }
2671
2672 /* Routine io_registry_iterator_exit */
2673 kern_return_t is_io_registry_iterator_exit_entry(
2674 io_object_t iterator )
2675 {
2676 bool didIt;
2677
2678 CHECKLOCKED( IORegistryIterator, iterator, iter );
2679
2680 IOLockLock(oIter->lock);
2681 didIt = iter->exitEntry();
2682 IOLockUnlock(oIter->lock);
2683
2684 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2685 }
2686
2687 /* Routine io_registry_entry_from_path */
2688 kern_return_t is_io_registry_entry_from_path(
2689 mach_port_t master_port,
2690 io_string_t path,
2691 io_object_t *registry_entry )
2692 {
2693 IORegistryEntry * entry;
2694
2695 if( master_port != master_device_port)
2696 return( kIOReturnNotPrivileged);
2697
2698 entry = IORegistryEntry::fromPath( path );
2699
2700 *registry_entry = entry;
2701
2702 return( kIOReturnSuccess );
2703 }
2704
2705
2706 /* Routine io_registry_entry_from_path */
2707 kern_return_t is_io_registry_entry_from_path_ool(
2708 mach_port_t master_port,
2709 io_string_inband_t path,
2710 io_buf_ptr_t path_ool,
2711 mach_msg_type_number_t path_oolCnt,
2712 kern_return_t *result,
2713 io_object_t *registry_entry)
2714 {
2715 IORegistryEntry * entry;
2716 vm_map_offset_t map_data;
2717 const char * cpath;
2718 IOReturn res;
2719 kern_return_t err;
2720
2721 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2722
2723 map_data = 0;
2724 entry = 0;
2725 res = err = KERN_SUCCESS;
2726 if (path[0]) cpath = path;
2727 else
2728 {
2729 if (!path_oolCnt) return(kIOReturnBadArgument);
2730 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2731
2732 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2733 if (KERN_SUCCESS == err)
2734 {
2735 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2736 cpath = CAST_DOWN(const char *, map_data);
2737 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2738 }
2739 }
2740
2741 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2742 {
2743 entry = IORegistryEntry::fromPath(cpath);
2744 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2745 }
2746
2747 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2748
2749 if (KERN_SUCCESS != err) res = err;
2750 *registry_entry = entry;
2751 *result = res;
2752
2753 return (err);
2754 }
2755
2756
2757 /* Routine io_registry_entry_in_plane */
2758 kern_return_t is_io_registry_entry_in_plane(
2759 io_object_t registry_entry,
2760 io_name_t plane,
2761 boolean_t *inPlane )
2762 {
2763 CHECK( IORegistryEntry, registry_entry, entry );
2764
2765 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2766
2767 return( kIOReturnSuccess );
2768 }
2769
2770
2771 /* Routine io_registry_entry_get_path */
2772 kern_return_t is_io_registry_entry_get_path(
2773 io_object_t registry_entry,
2774 io_name_t plane,
2775 io_string_t path )
2776 {
2777 int length;
2778 CHECK( IORegistryEntry, registry_entry, entry );
2779
2780 length = sizeof( io_string_t);
2781 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2782 return( kIOReturnSuccess );
2783 else
2784 return( kIOReturnBadArgument );
2785 }
2786
2787 /* Routine io_registry_entry_get_path */
2788 kern_return_t is_io_registry_entry_get_path_ool(
2789 io_object_t registry_entry,
2790 io_name_t plane,
2791 io_string_inband_t path,
2792 io_buf_ptr_t *path_ool,
2793 mach_msg_type_number_t *path_oolCnt)
2794 {
2795 enum { kMaxPath = 16384 };
2796 IOReturn err;
2797 int length;
2798 char * buf;
2799
2800 CHECK( IORegistryEntry, registry_entry, entry );
2801
2802 *path_ool = NULL;
2803 *path_oolCnt = 0;
2804 length = sizeof(io_string_inband_t);
2805 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2806 else
2807 {
2808 length = kMaxPath;
2809 buf = IONew(char, length);
2810 if (!buf) err = kIOReturnNoMemory;
2811 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2812 else
2813 {
2814 *path_oolCnt = length;
2815 err = copyoutkdata(buf, length, path_ool);
2816 }
2817 if (buf) IODelete(buf, char, kMaxPath);
2818 }
2819
2820 return (err);
2821 }
2822
2823
2824 /* Routine io_registry_entry_get_name */
2825 kern_return_t is_io_registry_entry_get_name(
2826 io_object_t registry_entry,
2827 io_name_t name )
2828 {
2829 CHECK( IORegistryEntry, registry_entry, entry );
2830
2831 strncpy( name, entry->getName(), sizeof( io_name_t));
2832
2833 return( kIOReturnSuccess );
2834 }
2835
2836 /* Routine io_registry_entry_get_name_in_plane */
2837 kern_return_t is_io_registry_entry_get_name_in_plane(
2838 io_object_t registry_entry,
2839 io_name_t planeName,
2840 io_name_t name )
2841 {
2842 const IORegistryPlane * plane;
2843 CHECK( IORegistryEntry, registry_entry, entry );
2844
2845 if( planeName[0])
2846 plane = IORegistryEntry::getPlane( planeName );
2847 else
2848 plane = 0;
2849
2850 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2851
2852 return( kIOReturnSuccess );
2853 }
2854
2855 /* Routine io_registry_entry_get_location_in_plane */
2856 kern_return_t is_io_registry_entry_get_location_in_plane(
2857 io_object_t registry_entry,
2858 io_name_t planeName,
2859 io_name_t location )
2860 {
2861 const IORegistryPlane * plane;
2862 CHECK( IORegistryEntry, registry_entry, entry );
2863
2864 if( planeName[0])
2865 plane = IORegistryEntry::getPlane( planeName );
2866 else
2867 plane = 0;
2868
2869 const char * cstr = entry->getLocation( plane );
2870
2871 if( cstr) {
2872 strncpy( location, cstr, sizeof( io_name_t));
2873 return( kIOReturnSuccess );
2874 } else
2875 return( kIOReturnNotFound );
2876 }
2877
2878 /* Routine io_registry_entry_get_registry_entry_id */
2879 kern_return_t is_io_registry_entry_get_registry_entry_id(
2880 io_object_t registry_entry,
2881 uint64_t *entry_id )
2882 {
2883 CHECK( IORegistryEntry, registry_entry, entry );
2884
2885 *entry_id = entry->getRegistryEntryID();
2886
2887 return (kIOReturnSuccess);
2888 }
2889
2890 /* Routine io_registry_entry_get_property */
2891 kern_return_t is_io_registry_entry_get_property_bytes(
2892 io_object_t registry_entry,
2893 io_name_t property_name,
2894 io_struct_inband_t buf,
2895 mach_msg_type_number_t *dataCnt )
2896 {
2897 OSObject * obj;
2898 OSData * data;
2899 OSString * str;
2900 OSBoolean * boo;
2901 OSNumber * off;
2902 UInt64 offsetBytes;
2903 unsigned int len = 0;
2904 const void * bytes = 0;
2905 IOReturn ret = kIOReturnSuccess;
2906
2907 CHECK( IORegistryEntry, registry_entry, entry );
2908
2909 #if CONFIG_MACF
2910 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2911 return kIOReturnNotPermitted;
2912 #endif
2913
2914 obj = entry->copyProperty(property_name);
2915 if( !obj)
2916 return( kIOReturnNoResources );
2917
2918 // One day OSData will be a common container base class
2919 // until then...
2920 if( (data = OSDynamicCast( OSData, obj ))) {
2921 len = data->getLength();
2922 bytes = data->getBytesNoCopy();
2923
2924 } else if( (str = OSDynamicCast( OSString, obj ))) {
2925 len = str->getLength() + 1;
2926 bytes = str->getCStringNoCopy();
2927
2928 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2929 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2930 bytes = boo->isTrue() ? "Yes" : "No";
2931
2932 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2933 offsetBytes = off->unsigned64BitValue();
2934 len = off->numberOfBytes();
2935 bytes = &offsetBytes;
2936 #ifdef __BIG_ENDIAN__
2937 bytes = (const void *)
2938 (((UInt32) bytes) + (sizeof( UInt64) - len));
2939 #endif
2940
2941 } else
2942 ret = kIOReturnBadArgument;
2943
2944 if( bytes) {
2945 if( *dataCnt < len)
2946 ret = kIOReturnIPCError;
2947 else {
2948 *dataCnt = len;
2949 bcopy( bytes, buf, len );
2950 }
2951 }
2952 obj->release();
2953
2954 return( ret );
2955 }
2956
2957
2958 /* Routine io_registry_entry_get_property */
2959 kern_return_t is_io_registry_entry_get_property(
2960 io_object_t registry_entry,
2961 io_name_t property_name,
2962 io_buf_ptr_t *properties,
2963 mach_msg_type_number_t *propertiesCnt )
2964 {
2965 kern_return_t err;
2966 vm_size_t len;
2967 OSObject * obj;
2968
2969 CHECK( IORegistryEntry, registry_entry, entry );
2970
2971 #if CONFIG_MACF
2972 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2973 return kIOReturnNotPermitted;
2974 #endif
2975
2976 obj = entry->copyProperty(property_name);
2977 if( !obj)
2978 return( kIOReturnNotFound );
2979
2980 OSSerialize * s = OSSerialize::withCapacity(4096);
2981 if( !s) {
2982 obj->release();
2983 return( kIOReturnNoMemory );
2984 }
2985
2986 if( obj->serialize( s )) {
2987 len = s->getLength();
2988 *propertiesCnt = len;
2989 err = copyoutkdata( s->text(), len, properties );
2990
2991 } else
2992 err = kIOReturnUnsupported;
2993
2994 s->release();
2995 obj->release();
2996
2997 return( err );
2998 }
2999
3000 /* Routine io_registry_entry_get_property_recursively */
3001 kern_return_t is_io_registry_entry_get_property_recursively(
3002 io_object_t registry_entry,
3003 io_name_t plane,
3004 io_name_t property_name,
3005 uint32_t options,
3006 io_buf_ptr_t *properties,
3007 mach_msg_type_number_t *propertiesCnt )
3008 {
3009 kern_return_t err;
3010 vm_size_t len;
3011 OSObject * obj;
3012
3013 CHECK( IORegistryEntry, registry_entry, entry );
3014
3015 #if CONFIG_MACF
3016 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3017 return kIOReturnNotPermitted;
3018 #endif
3019
3020 obj = entry->copyProperty( property_name,
3021 IORegistryEntry::getPlane( plane ), options );
3022 if( !obj)
3023 return( kIOReturnNotFound );
3024
3025 OSSerialize * s = OSSerialize::withCapacity(4096);
3026 if( !s) {
3027 obj->release();
3028 return( kIOReturnNoMemory );
3029 }
3030
3031 if( obj->serialize( s )) {
3032 len = s->getLength();
3033 *propertiesCnt = len;
3034 err = copyoutkdata( s->text(), len, properties );
3035
3036 } else
3037 err = kIOReturnUnsupported;
3038
3039 s->release();
3040 obj->release();
3041
3042 return( err );
3043 }
3044
3045 /* Routine io_registry_entry_get_properties */
3046 kern_return_t is_io_registry_entry_get_properties(
3047 io_object_t registry_entry,
3048 io_buf_ptr_t *properties,
3049 mach_msg_type_number_t *propertiesCnt )
3050 {
3051 return (kIOReturnUnsupported);
3052 }
3053
3054 #if CONFIG_MACF
3055
3056 struct GetPropertiesEditorRef
3057 {
3058 kauth_cred_t cred;
3059 IORegistryEntry * entry;
3060 OSCollection * root;
3061 };
3062
3063 static const OSMetaClassBase *
3064 GetPropertiesEditor(void * reference,
3065 OSSerialize * s,
3066 OSCollection * container,
3067 const OSSymbol * name,
3068 const OSMetaClassBase * value)
3069 {
3070 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3071
3072 if (!ref->root) ref->root = container;
3073 if (ref->root == container)
3074 {
3075 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3076 {
3077 value = 0;
3078 }
3079 }
3080 if (value) value->retain();
3081 return (value);
3082 }
3083
3084 #endif /* CONFIG_MACF */
3085
3086 /* Routine io_registry_entry_get_properties */
3087 kern_return_t is_io_registry_entry_get_properties_bin(
3088 io_object_t registry_entry,
3089 io_buf_ptr_t *properties,
3090 mach_msg_type_number_t *propertiesCnt)
3091 {
3092 kern_return_t err = kIOReturnSuccess;
3093 vm_size_t len;
3094 OSSerialize * s;
3095 OSSerialize::Editor editor = 0;
3096 void * editRef = 0;
3097
3098 CHECK(IORegistryEntry, registry_entry, entry);
3099
3100 #if CONFIG_MACF
3101 GetPropertiesEditorRef ref;
3102 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3103 {
3104 editor = &GetPropertiesEditor;
3105 editRef = &ref;
3106 ref.cred = kauth_cred_get();
3107 ref.entry = entry;
3108 ref.root = 0;
3109 }
3110 #endif
3111
3112 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3113 if (!s) return (kIOReturnNoMemory);
3114
3115 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3116
3117 if (kIOReturnSuccess == err)
3118 {
3119 len = s->getLength();
3120 *propertiesCnt = len;
3121 err = copyoutkdata(s->text(), len, properties);
3122 }
3123 s->release();
3124
3125 return (err);
3126 }
3127
3128 /* Routine io_registry_entry_get_property_bin */
3129 kern_return_t is_io_registry_entry_get_property_bin(
3130 io_object_t registry_entry,
3131 io_name_t plane,
3132 io_name_t property_name,
3133 uint32_t options,
3134 io_buf_ptr_t *properties,
3135 mach_msg_type_number_t *propertiesCnt )
3136 {
3137 kern_return_t err;
3138 vm_size_t len;
3139 OSObject * obj;
3140 const OSSymbol * sym;
3141
3142 CHECK( IORegistryEntry, registry_entry, entry );
3143
3144 #if CONFIG_MACF
3145 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3146 return kIOReturnNotPermitted;
3147 #endif
3148
3149 sym = OSSymbol::withCString(property_name);
3150 if (!sym) return (kIOReturnNoMemory);
3151
3152 if (gIORegistryEntryPropertyKeysKey == sym)
3153 {
3154 obj = entry->copyPropertyKeys();
3155 }
3156 else
3157 {
3158 if ((kIORegistryIterateRecursively & options) && plane[0])
3159 {
3160 obj = entry->copyProperty(property_name,
3161 IORegistryEntry::getPlane(plane), options );
3162 }
3163 else
3164 {
3165 obj = entry->copyProperty(property_name);
3166 }
3167 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3168 }
3169
3170 sym->release();
3171 if (!obj) return (kIOReturnNotFound);
3172
3173 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3174 if( !s) {
3175 obj->release();
3176 return( kIOReturnNoMemory );
3177 }
3178
3179 if( obj->serialize( s )) {
3180 len = s->getLength();
3181 *propertiesCnt = len;
3182 err = copyoutkdata( s->text(), len, properties );
3183
3184 } else err = kIOReturnUnsupported;
3185
3186 s->release();
3187 obj->release();
3188
3189 return( err );
3190 }
3191
3192
3193 /* Routine io_registry_entry_set_properties */
3194 kern_return_t is_io_registry_entry_set_properties
3195 (
3196 io_object_t registry_entry,
3197 io_buf_ptr_t properties,
3198 mach_msg_type_number_t propertiesCnt,
3199 kern_return_t * result)
3200 {
3201 OSObject * obj;
3202 kern_return_t err;
3203 IOReturn res;
3204 vm_offset_t data;
3205 vm_map_offset_t map_data;
3206
3207 CHECK( IORegistryEntry, registry_entry, entry );
3208
3209 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3210 return( kIOReturnMessageTooLarge);
3211
3212 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3213 data = CAST_DOWN(vm_offset_t, map_data);
3214
3215 if( KERN_SUCCESS == err) {
3216
3217 FAKE_STACK_FRAME(entry->getMetaClass());
3218
3219 // must return success after vm_map_copyout() succeeds
3220 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3221 vm_deallocate( kernel_map, data, propertiesCnt );
3222
3223 if (!obj)
3224 res = kIOReturnBadArgument;
3225 #if CONFIG_MACF
3226 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3227 registry_entry, obj))
3228 {
3229 res = kIOReturnNotPermitted;
3230 }
3231 #endif
3232 else
3233 {
3234 res = entry->setProperties( obj );
3235 }
3236
3237 if (obj)
3238 obj->release();
3239
3240 FAKE_STACK_FRAME_END();
3241
3242 } else
3243 res = err;
3244
3245 *result = res;
3246 return( err );
3247 }
3248
3249 /* Routine io_registry_entry_get_child_iterator */
3250 kern_return_t is_io_registry_entry_get_child_iterator(
3251 io_object_t registry_entry,
3252 io_name_t plane,
3253 io_object_t *iterator )
3254 {
3255 CHECK( IORegistryEntry, registry_entry, entry );
3256
3257 *iterator = entry->getChildIterator(
3258 IORegistryEntry::getPlane( plane ));
3259
3260 return( kIOReturnSuccess );
3261 }
3262
3263 /* Routine io_registry_entry_get_parent_iterator */
3264 kern_return_t is_io_registry_entry_get_parent_iterator(
3265 io_object_t registry_entry,
3266 io_name_t plane,
3267 io_object_t *iterator)
3268 {
3269 CHECK( IORegistryEntry, registry_entry, entry );
3270
3271 *iterator = entry->getParentIterator(
3272 IORegistryEntry::getPlane( plane ));
3273
3274 return( kIOReturnSuccess );
3275 }
3276
3277 /* Routine io_service_get_busy_state */
3278 kern_return_t is_io_service_get_busy_state(
3279 io_object_t _service,
3280 uint32_t *busyState )
3281 {
3282 CHECK( IOService, _service, service );
3283
3284 *busyState = service->getBusyState();
3285
3286 return( kIOReturnSuccess );
3287 }
3288
3289 /* Routine io_service_get_state */
3290 kern_return_t is_io_service_get_state(
3291 io_object_t _service,
3292 uint64_t *state,
3293 uint32_t *busy_state,
3294 uint64_t *accumulated_busy_time )
3295 {
3296 CHECK( IOService, _service, service );
3297
3298 *state = service->getState();
3299 *busy_state = service->getBusyState();
3300 *accumulated_busy_time = service->getAccumulatedBusyTime();
3301
3302 return( kIOReturnSuccess );
3303 }
3304
3305 /* Routine io_service_wait_quiet */
3306 kern_return_t is_io_service_wait_quiet(
3307 io_object_t _service,
3308 mach_timespec_t wait_time )
3309 {
3310 uint64_t timeoutNS;
3311
3312 CHECK( IOService, _service, service );
3313
3314 timeoutNS = wait_time.tv_sec;
3315 timeoutNS *= kSecondScale;
3316 timeoutNS += wait_time.tv_nsec;
3317
3318 return( service->waitQuiet(timeoutNS) );
3319 }
3320
3321 /* Routine io_service_request_probe */
3322 kern_return_t is_io_service_request_probe(
3323 io_object_t _service,
3324 uint32_t options )
3325 {
3326 CHECK( IOService, _service, service );
3327
3328 return( service->requestProbe( options ));
3329 }
3330
3331 /* Routine io_service_get_authorization_id */
3332 kern_return_t is_io_service_get_authorization_id(
3333 io_object_t _service,
3334 uint64_t *authorization_id )
3335 {
3336 kern_return_t kr;
3337
3338 CHECK( IOService, _service, service );
3339
3340 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3341 kIOClientPrivilegeAdministrator );
3342 if( kIOReturnSuccess != kr)
3343 return( kr );
3344
3345 *authorization_id = service->getAuthorizationID();
3346
3347 return( kr );
3348 }
3349
3350 /* Routine io_service_set_authorization_id */
3351 kern_return_t is_io_service_set_authorization_id(
3352 io_object_t _service,
3353 uint64_t authorization_id )
3354 {
3355 CHECK( IOService, _service, service );
3356
3357 return( service->setAuthorizationID( authorization_id ) );
3358 }
3359
3360 /* Routine io_service_open_ndr */
3361 kern_return_t is_io_service_open_extended(
3362 io_object_t _service,
3363 task_t owningTask,
3364 uint32_t connect_type,
3365 NDR_record_t ndr,
3366 io_buf_ptr_t properties,
3367 mach_msg_type_number_t propertiesCnt,
3368 kern_return_t * result,
3369 io_object_t *connection )
3370 {
3371 IOUserClient * client = 0;
3372 kern_return_t err = KERN_SUCCESS;
3373 IOReturn res = kIOReturnSuccess;
3374 OSDictionary * propertiesDict = 0;
3375 bool crossEndian;
3376 bool disallowAccess;
3377
3378 CHECK( IOService, _service, service );
3379
3380 if (!owningTask) return (kIOReturnBadArgument);
3381 assert(owningTask == current_task());
3382 if (owningTask != current_task()) return (kIOReturnBadArgument);
3383
3384 do
3385 {
3386 if (properties)
3387 {
3388 OSObject * obj;
3389 vm_offset_t data;
3390 vm_map_offset_t map_data;
3391
3392 if( propertiesCnt > sizeof(io_struct_inband_t))
3393 return( kIOReturnMessageTooLarge);
3394
3395 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3396 res = err;
3397 data = CAST_DOWN(vm_offset_t, map_data);
3398 if (KERN_SUCCESS == err)
3399 {
3400 // must return success after vm_map_copyout() succeeds
3401 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3402 vm_deallocate( kernel_map, data, propertiesCnt );
3403 propertiesDict = OSDynamicCast(OSDictionary, obj);
3404 if (!propertiesDict)
3405 {
3406 res = kIOReturnBadArgument;
3407 if (obj)
3408 obj->release();
3409 }
3410 }
3411 if (kIOReturnSuccess != res)
3412 break;
3413 }
3414
3415 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3416 if (crossEndian)
3417 {
3418 if (!propertiesDict)
3419 propertiesDict = OSDictionary::withCapacity(4);
3420 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3421 if (data)
3422 {
3423 if (propertiesDict)
3424 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3425 data->release();
3426 }
3427 }
3428
3429 res = service->newUserClient( owningTask, (void *) owningTask,
3430 connect_type, propertiesDict, &client );
3431
3432 if (propertiesDict)
3433 propertiesDict->release();
3434
3435 if (res == kIOReturnSuccess)
3436 {
3437 assert( OSDynamicCast(IOUserClient, client) );
3438
3439 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3440 client->closed = false;
3441
3442 disallowAccess = (crossEndian
3443 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3444 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3445 if (disallowAccess) res = kIOReturnUnsupported;
3446 #if CONFIG_MACF
3447 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3448 res = kIOReturnNotPermitted;
3449 #endif
3450
3451 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3452
3453 if (kIOReturnSuccess != res)
3454 {
3455 IOStatisticsClientCall();
3456 client->clientClose();
3457 client->release();
3458 client = 0;
3459 break;
3460 }
3461 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3462 if (creatorName)
3463 {
3464 client->setProperty(kIOUserClientCreatorKey, creatorName);
3465 creatorName->release();
3466 }
3467 client->setTerminateDefer(service, false);
3468 }
3469 }
3470 while (false);
3471
3472 *connection = client;
3473 *result = res;
3474
3475 return (err);
3476 }
3477
3478 /* Routine io_service_close */
3479 kern_return_t is_io_service_close(
3480 io_object_t connection )
3481 {
3482 OSSet * mappings;
3483 if ((mappings = OSDynamicCast(OSSet, connection)))
3484 return( kIOReturnSuccess );
3485
3486 CHECK( IOUserClient, connection, client );
3487
3488 IOStatisticsClientCall();
3489
3490 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3491 {
3492 client->clientClose();
3493 }
3494 else
3495 {
3496 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3497 client->getRegistryEntryID(), client->getName());
3498 }
3499
3500 return( kIOReturnSuccess );
3501 }
3502
3503 /* Routine io_connect_get_service */
3504 kern_return_t is_io_connect_get_service(
3505 io_object_t connection,
3506 io_object_t *service )
3507 {
3508 IOService * theService;
3509
3510 CHECK( IOUserClient, connection, client );
3511
3512 theService = client->getService();
3513 if( theService)
3514 theService->retain();
3515
3516 *service = theService;
3517
3518 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3519 }
3520
3521 /* Routine io_connect_set_notification_port */
3522 kern_return_t is_io_connect_set_notification_port(
3523 io_object_t connection,
3524 uint32_t notification_type,
3525 mach_port_t port,
3526 uint32_t reference)
3527 {
3528 CHECK( IOUserClient, connection, client );
3529
3530 IOStatisticsClientCall();
3531 return( client->registerNotificationPort( port, notification_type,
3532 (io_user_reference_t) reference ));
3533 }
3534
3535 /* Routine io_connect_set_notification_port */
3536 kern_return_t is_io_connect_set_notification_port_64(
3537 io_object_t connection,
3538 uint32_t notification_type,
3539 mach_port_t port,
3540 io_user_reference_t reference)
3541 {
3542 CHECK( IOUserClient, connection, client );
3543
3544 IOStatisticsClientCall();
3545 return( client->registerNotificationPort( port, notification_type,
3546 reference ));
3547 }
3548
3549 /* Routine io_connect_map_memory_into_task */
3550 kern_return_t is_io_connect_map_memory_into_task
3551 (
3552 io_connect_t connection,
3553 uint32_t memory_type,
3554 task_t into_task,
3555 mach_vm_address_t *address,
3556 mach_vm_size_t *size,
3557 uint32_t flags
3558 )
3559 {
3560 IOReturn err;
3561 IOMemoryMap * map;
3562
3563 CHECK( IOUserClient, connection, client );
3564
3565 if (!into_task) return (kIOReturnBadArgument);
3566
3567 IOStatisticsClientCall();
3568 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3569
3570 if( map) {
3571 *address = map->getAddress();
3572 if( size)
3573 *size = map->getSize();
3574
3575 if( client->sharedInstance
3576 || (into_task != current_task())) {
3577 // push a name out to the task owning the map,
3578 // so we can clean up maps
3579 mach_port_name_t name __unused =
3580 IOMachPort::makeSendRightForTask(
3581 into_task, map, IKOT_IOKIT_OBJECT );
3582
3583 } else {
3584 // keep it with the user client
3585 IOLockLock( gIOObjectPortLock);
3586 if( 0 == client->mappings)
3587 client->mappings = OSSet::withCapacity(2);
3588 if( client->mappings)
3589 client->mappings->setObject( map);
3590 IOLockUnlock( gIOObjectPortLock);
3591 map->release();
3592 }
3593 err = kIOReturnSuccess;
3594
3595 } else
3596 err = kIOReturnBadArgument;
3597
3598 return( err );
3599 }
3600
3601 /* Routine is_io_connect_map_memory */
3602 kern_return_t is_io_connect_map_memory(
3603 io_object_t connect,
3604 uint32_t type,
3605 task_t task,
3606 uint32_t * mapAddr,
3607 uint32_t * mapSize,
3608 uint32_t flags )
3609 {
3610 IOReturn err;
3611 mach_vm_address_t address;
3612 mach_vm_size_t size;
3613
3614 address = SCALAR64(*mapAddr);
3615 size = SCALAR64(*mapSize);
3616
3617 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3618
3619 *mapAddr = SCALAR32(address);
3620 *mapSize = SCALAR32(size);
3621
3622 return (err);
3623 }
3624
3625 } /* extern "C" */
3626
3627 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3628 {
3629 OSIterator * iter;
3630 IOMemoryMap * map = 0;
3631
3632 IOLockLock(gIOObjectPortLock);
3633
3634 iter = OSCollectionIterator::withCollection(mappings);
3635 if(iter)
3636 {
3637 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3638 {
3639 if(mem == map->getMemoryDescriptor())
3640 {
3641 map->retain();
3642 mappings->removeObject(map);
3643 break;
3644 }
3645 }
3646 iter->release();
3647 }
3648
3649 IOLockUnlock(gIOObjectPortLock);
3650
3651 return (map);
3652 }
3653
3654 extern "C" {
3655
3656 /* Routine io_connect_unmap_memory_from_task */
3657 kern_return_t is_io_connect_unmap_memory_from_task
3658 (
3659 io_connect_t connection,
3660 uint32_t memory_type,
3661 task_t from_task,
3662 mach_vm_address_t address)
3663 {
3664 IOReturn err;
3665 IOOptionBits options = 0;
3666 IOMemoryDescriptor * memory;
3667 IOMemoryMap * map;
3668
3669 CHECK( IOUserClient, connection, client );
3670
3671 if (!from_task) return (kIOReturnBadArgument);
3672
3673 IOStatisticsClientCall();
3674 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3675
3676 if( memory && (kIOReturnSuccess == err)) {
3677
3678 options = (options & ~kIOMapUserOptionsMask)
3679 | kIOMapAnywhere | kIOMapReference;
3680
3681 map = memory->createMappingInTask( from_task, address, options );
3682 memory->release();
3683 if( map)
3684 {
3685 IOLockLock( gIOObjectPortLock);
3686 if( client->mappings)
3687 client->mappings->removeObject( map);
3688 IOLockUnlock( gIOObjectPortLock);
3689
3690 mach_port_name_t name = 0;
3691 if (from_task != current_task())
3692 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3693 if (name)
3694 {
3695 map->userClientUnmap();
3696 err = iokit_mod_send_right( from_task, name, -2 );
3697 err = kIOReturnSuccess;
3698 }
3699 else
3700 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3701 if (from_task == current_task())
3702 map->release();
3703 }
3704 else
3705 err = kIOReturnBadArgument;
3706 }
3707
3708 return( err );
3709 }
3710
3711 kern_return_t is_io_connect_unmap_memory(
3712 io_object_t connect,
3713 uint32_t type,
3714 task_t task,
3715 uint32_t mapAddr )
3716 {
3717 IOReturn err;
3718 mach_vm_address_t address;
3719
3720 address = SCALAR64(mapAddr);
3721
3722 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3723
3724 return (err);
3725 }
3726
3727
3728 /* Routine io_connect_add_client */
3729 kern_return_t is_io_connect_add_client(
3730 io_object_t connection,
3731 io_object_t connect_to)
3732 {
3733 CHECK( IOUserClient, connection, client );
3734 CHECK( IOUserClient, connect_to, to );
3735
3736 IOStatisticsClientCall();
3737 return( client->connectClient( to ) );
3738 }
3739
3740
3741 /* Routine io_connect_set_properties */
3742 kern_return_t is_io_connect_set_properties(
3743 io_object_t connection,
3744 io_buf_ptr_t properties,
3745 mach_msg_type_number_t propertiesCnt,
3746 kern_return_t * result)
3747 {
3748 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3749 }
3750
3751 /* Routine io_user_client_method */
3752 kern_return_t is_io_connect_method_var_output
3753 (
3754 io_connect_t connection,
3755 uint32_t selector,
3756 io_scalar_inband64_t scalar_input,
3757 mach_msg_type_number_t scalar_inputCnt,
3758 io_struct_inband_t inband_input,
3759 mach_msg_type_number_t inband_inputCnt,
3760 mach_vm_address_t ool_input,
3761 mach_vm_size_t ool_input_size,
3762 io_struct_inband_t inband_output,
3763 mach_msg_type_number_t *inband_outputCnt,
3764 io_scalar_inband64_t scalar_output,
3765 mach_msg_type_number_t *scalar_outputCnt,
3766 io_buf_ptr_t *var_output,
3767 mach_msg_type_number_t *var_outputCnt
3768 )
3769 {
3770 CHECK( IOUserClient, connection, client );
3771
3772 IOExternalMethodArguments args;
3773 IOReturn ret;
3774 IOMemoryDescriptor * inputMD = 0;
3775 OSObject * structureVariableOutputData = 0;
3776
3777 bzero(&args.__reserved[0], sizeof(args.__reserved));
3778 args.__reservedA = 0;
3779 args.version = kIOExternalMethodArgumentsCurrentVersion;
3780
3781 args.selector = selector;
3782
3783 args.asyncWakePort = MACH_PORT_NULL;
3784 args.asyncReference = 0;
3785 args.asyncReferenceCount = 0;
3786 args.structureVariableOutputData = &structureVariableOutputData;
3787
3788 args.scalarInput = scalar_input;
3789 args.scalarInputCount = scalar_inputCnt;
3790 args.structureInput = inband_input;
3791 args.structureInputSize = inband_inputCnt;
3792
3793 if (ool_input)
3794 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3795 kIODirectionOut, current_task());
3796
3797 args.structureInputDescriptor = inputMD;
3798
3799 args.scalarOutput = scalar_output;
3800 args.scalarOutputCount = *scalar_outputCnt;
3801 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3802 args.structureOutput = inband_output;
3803 args.structureOutputSize = *inband_outputCnt;
3804 args.structureOutputDescriptor = NULL;
3805 args.structureOutputDescriptorSize = 0;
3806
3807 IOStatisticsClientCall();
3808 ret = client->externalMethod( selector, &args );
3809
3810 *scalar_outputCnt = args.scalarOutputCount;
3811 *inband_outputCnt = args.structureOutputSize;
3812
3813 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3814 {
3815 OSSerialize * serialize;
3816 OSData * data;
3817 vm_size_t len;
3818
3819 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3820 {
3821 len = serialize->getLength();
3822 *var_outputCnt = len;
3823 ret = copyoutkdata(serialize->text(), len, var_output);
3824 }
3825 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3826 {
3827 len = data->getLength();
3828 *var_outputCnt = len;
3829 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3830 }
3831 else
3832 {
3833 ret = kIOReturnUnderrun;
3834 }
3835 }
3836
3837 if (inputMD)
3838 inputMD->release();
3839 if (structureVariableOutputData)
3840 structureVariableOutputData->release();
3841
3842 return (ret);
3843 }
3844
3845 /* Routine io_user_client_method */
3846 kern_return_t is_io_connect_method
3847 (
3848 io_connect_t connection,
3849 uint32_t selector,
3850 io_scalar_inband64_t scalar_input,
3851 mach_msg_type_number_t scalar_inputCnt,
3852 io_struct_inband_t inband_input,
3853 mach_msg_type_number_t inband_inputCnt,
3854 mach_vm_address_t ool_input,
3855 mach_vm_size_t ool_input_size,
3856 io_struct_inband_t inband_output,
3857 mach_msg_type_number_t *inband_outputCnt,
3858 io_scalar_inband64_t scalar_output,
3859 mach_msg_type_number_t *scalar_outputCnt,
3860 mach_vm_address_t ool_output,
3861 mach_vm_size_t *ool_output_size
3862 )
3863 {
3864 CHECK( IOUserClient, connection, client );
3865
3866 IOExternalMethodArguments args;
3867 IOReturn ret;
3868 IOMemoryDescriptor * inputMD = 0;
3869 IOMemoryDescriptor * outputMD = 0;
3870
3871 bzero(&args.__reserved[0], sizeof(args.__reserved));
3872 args.__reservedA = 0;
3873 args.version = kIOExternalMethodArgumentsCurrentVersion;
3874
3875 args.selector = selector;
3876
3877 args.asyncWakePort = MACH_PORT_NULL;
3878 args.asyncReference = 0;
3879 args.asyncReferenceCount = 0;
3880 args.structureVariableOutputData = 0;
3881
3882 args.scalarInput = scalar_input;
3883 args.scalarInputCount = scalar_inputCnt;
3884 args.structureInput = inband_input;
3885 args.structureInputSize = inband_inputCnt;
3886
3887 if (ool_input)
3888 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3889 kIODirectionOut, current_task());
3890
3891 args.structureInputDescriptor = inputMD;
3892
3893 args.scalarOutput = scalar_output;
3894 args.scalarOutputCount = *scalar_outputCnt;
3895 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3896 args.structureOutput = inband_output;
3897 args.structureOutputSize = *inband_outputCnt;
3898
3899 if (ool_output && ool_output_size)
3900 {
3901 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3902 kIODirectionIn, current_task());
3903 }
3904
3905 args.structureOutputDescriptor = outputMD;
3906 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3907
3908 IOStatisticsClientCall();
3909 ret = client->externalMethod( selector, &args );
3910
3911 *scalar_outputCnt = args.scalarOutputCount;
3912 *inband_outputCnt = args.structureOutputSize;
3913 *ool_output_size = args.structureOutputDescriptorSize;
3914
3915 if (inputMD)
3916 inputMD->release();
3917 if (outputMD)
3918 outputMD->release();
3919
3920 return (ret);
3921 }
3922
3923 /* Routine io_async_user_client_method */
3924 kern_return_t is_io_connect_async_method
3925 (
3926 io_connect_t connection,
3927 mach_port_t wake_port,
3928 io_async_ref64_t reference,
3929 mach_msg_type_number_t referenceCnt,
3930 uint32_t selector,
3931 io_scalar_inband64_t scalar_input,
3932 mach_msg_type_number_t scalar_inputCnt,
3933 io_struct_inband_t inband_input,
3934 mach_msg_type_number_t inband_inputCnt,
3935 mach_vm_address_t ool_input,
3936 mach_vm_size_t ool_input_size,
3937 io_struct_inband_t inband_output,
3938 mach_msg_type_number_t *inband_outputCnt,
3939 io_scalar_inband64_t scalar_output,
3940 mach_msg_type_number_t *scalar_outputCnt,
3941 mach_vm_address_t ool_output,
3942 mach_vm_size_t * ool_output_size
3943 )
3944 {
3945 CHECK( IOUserClient, connection, client );
3946
3947 IOExternalMethodArguments args;
3948 IOReturn ret;
3949 IOMemoryDescriptor * inputMD = 0;
3950 IOMemoryDescriptor * outputMD = 0;
3951
3952 bzero(&args.__reserved[0], sizeof(args.__reserved));
3953 args.__reservedA = 0;
3954 args.version = kIOExternalMethodArgumentsCurrentVersion;
3955
3956 reference[0] = (io_user_reference_t) wake_port;
3957 if (vm_map_is_64bit(get_task_map(current_task())))
3958 reference[0] |= kIOUCAsync64Flag;
3959
3960 args.selector = selector;
3961
3962 args.asyncWakePort = wake_port;
3963 args.asyncReference = reference;
3964 args.asyncReferenceCount = referenceCnt;
3965
3966 args.scalarInput = scalar_input;
3967 args.scalarInputCount = scalar_inputCnt;
3968 args.structureInput = inband_input;
3969 args.structureInputSize = inband_inputCnt;
3970
3971 if (ool_input)
3972 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3973 kIODirectionOut, current_task());
3974
3975 args.structureInputDescriptor = inputMD;
3976
3977 args.scalarOutput = scalar_output;
3978 args.scalarOutputCount = *scalar_outputCnt;
3979 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3980 args.structureOutput = inband_output;
3981 args.structureOutputSize = *inband_outputCnt;
3982
3983 if (ool_output)
3984 {
3985 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3986 kIODirectionIn, current_task());
3987 }
3988
3989 args.structureOutputDescriptor = outputMD;
3990 args.structureOutputDescriptorSize = *ool_output_size;
3991
3992 IOStatisticsClientCall();
3993 ret = client->externalMethod( selector, &args );
3994
3995 *inband_outputCnt = args.structureOutputSize;
3996 *ool_output_size = args.structureOutputDescriptorSize;
3997
3998 if (inputMD)
3999 inputMD->release();
4000 if (outputMD)
4001 outputMD->release();
4002
4003 return (ret);
4004 }
4005
4006 /* Routine io_connect_method_scalarI_scalarO */
4007 kern_return_t is_io_connect_method_scalarI_scalarO(
4008 io_object_t connect,
4009 uint32_t index,
4010 io_scalar_inband_t input,
4011 mach_msg_type_number_t inputCount,
4012 io_scalar_inband_t output,
4013 mach_msg_type_number_t * outputCount )
4014 {
4015 IOReturn err;
4016 uint32_t i;
4017 io_scalar_inband64_t _input;
4018 io_scalar_inband64_t _output;
4019
4020 mach_msg_type_number_t struct_outputCnt = 0;
4021 mach_vm_size_t ool_output_size = 0;
4022
4023 bzero(&_output[0], sizeof(_output));
4024 for (i = 0; i < inputCount; i++)
4025 _input[i] = SCALAR64(input[i]);
4026
4027 err = is_io_connect_method(connect, index,
4028 _input, inputCount,
4029 NULL, 0,
4030 0, 0,
4031 NULL, &struct_outputCnt,
4032 _output, outputCount,
4033 0, &ool_output_size);
4034
4035 for (i = 0; i < *outputCount; i++)
4036 output[i] = SCALAR32(_output[i]);
4037
4038 return (err);
4039 }
4040
4041 kern_return_t shim_io_connect_method_scalarI_scalarO(
4042 IOExternalMethod * method,
4043 IOService * object,
4044 const io_user_scalar_t * input,
4045 mach_msg_type_number_t inputCount,
4046 io_user_scalar_t * output,
4047 mach_msg_type_number_t * outputCount )
4048 {
4049 IOMethod func;
4050 io_scalar_inband_t _output;
4051 IOReturn err;
4052 err = kIOReturnBadArgument;
4053
4054 bzero(&_output[0], sizeof(_output));
4055 do {
4056
4057 if( inputCount != method->count0)
4058 {
4059 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4060 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4061 continue;
4062 }
4063 if( *outputCount != method->count1)
4064 {
4065 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4066 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4067 continue;
4068 }
4069
4070 func = method->func;
4071
4072 switch( inputCount) {
4073
4074 case 6:
4075 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4076 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4077 break;
4078 case 5:
4079 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4080 ARG32(input[3]), ARG32(input[4]),
4081 &_output[0] );
4082 break;
4083 case 4:
4084 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4085 ARG32(input[3]),
4086 &_output[0], &_output[1] );
4087 break;
4088 case 3:
4089 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4090 &_output[0], &_output[1], &_output[2] );
4091 break;
4092 case 2:
4093 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4094 &_output[0], &_output[1], &_output[2],
4095 &_output[3] );
4096 break;
4097 case 1:
4098 err = (object->*func)( ARG32(input[0]),
4099 &_output[0], &_output[1], &_output[2],
4100 &_output[3], &_output[4] );
4101 break;
4102 case 0:
4103 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4104 &_output[3], &_output[4], &_output[5] );
4105 break;
4106
4107 default:
4108 IOLog("%s: Bad method table\n", object->getName());
4109 }
4110 }
4111 while( false);
4112
4113 uint32_t i;
4114 for (i = 0; i < *outputCount; i++)
4115 output[i] = SCALAR32(_output[i]);
4116
4117 return( err);
4118 }
4119
4120 /* Routine io_async_method_scalarI_scalarO */
4121 kern_return_t is_io_async_method_scalarI_scalarO(
4122 io_object_t connect,
4123 mach_port_t wake_port,
4124 io_async_ref_t reference,
4125 mach_msg_type_number_t referenceCnt,
4126 uint32_t index,
4127 io_scalar_inband_t input,
4128 mach_msg_type_number_t inputCount,
4129 io_scalar_inband_t output,
4130 mach_msg_type_number_t * outputCount )
4131 {
4132 IOReturn err;
4133 uint32_t i;
4134 io_scalar_inband64_t _input;
4135 io_scalar_inband64_t _output;
4136 io_async_ref64_t _reference;
4137
4138 bzero(&_output[0], sizeof(_output));
4139 for (i = 0; i < referenceCnt; i++)
4140 _reference[i] = REF64(reference[i]);
4141
4142 mach_msg_type_number_t struct_outputCnt = 0;
4143 mach_vm_size_t ool_output_size = 0;
4144
4145 for (i = 0; i < inputCount; i++)
4146 _input[i] = SCALAR64(input[i]);
4147
4148 err = is_io_connect_async_method(connect,
4149 wake_port, _reference, referenceCnt,
4150 index,
4151 _input, inputCount,
4152 NULL, 0,
4153 0, 0,
4154 NULL, &struct_outputCnt,
4155 _output, outputCount,
4156 0, &ool_output_size);
4157
4158 for (i = 0; i < *outputCount; i++)
4159 output[i] = SCALAR32(_output[i]);
4160
4161 return (err);
4162 }
4163 /* Routine io_async_method_scalarI_structureO */
4164 kern_return_t is_io_async_method_scalarI_structureO(
4165 io_object_t connect,
4166 mach_port_t wake_port,
4167 io_async_ref_t reference,
4168 mach_msg_type_number_t referenceCnt,
4169 uint32_t index,
4170 io_scalar_inband_t input,
4171 mach_msg_type_number_t inputCount,
4172 io_struct_inband_t output,
4173 mach_msg_type_number_t * outputCount )
4174 {
4175 uint32_t i;
4176 io_scalar_inband64_t _input;
4177 io_async_ref64_t _reference;
4178
4179 for (i = 0; i < referenceCnt; i++)
4180 _reference[i] = REF64(reference[i]);
4181
4182 mach_msg_type_number_t scalar_outputCnt = 0;
4183 mach_vm_size_t ool_output_size = 0;
4184
4185 for (i = 0; i < inputCount; i++)
4186 _input[i] = SCALAR64(input[i]);
4187
4188 return (is_io_connect_async_method(connect,
4189 wake_port, _reference, referenceCnt,
4190 index,
4191 _input, inputCount,
4192 NULL, 0,
4193 0, 0,
4194 output, outputCount,
4195 NULL, &scalar_outputCnt,
4196 0, &ool_output_size));
4197 }
4198
4199 /* Routine io_async_method_scalarI_structureI */
4200 kern_return_t is_io_async_method_scalarI_structureI(
4201 io_connect_t connect,
4202 mach_port_t wake_port,
4203 io_async_ref_t reference,
4204 mach_msg_type_number_t referenceCnt,
4205 uint32_t index,
4206 io_scalar_inband_t input,
4207 mach_msg_type_number_t inputCount,
4208 io_struct_inband_t inputStruct,
4209 mach_msg_type_number_t inputStructCount )
4210 {
4211 uint32_t i;
4212 io_scalar_inband64_t _input;
4213 io_async_ref64_t _reference;
4214
4215 for (i = 0; i < referenceCnt; i++)
4216 _reference[i] = REF64(reference[i]);
4217
4218 mach_msg_type_number_t scalar_outputCnt = 0;
4219 mach_msg_type_number_t inband_outputCnt = 0;
4220 mach_vm_size_t ool_output_size = 0;
4221
4222 for (i = 0; i < inputCount; i++)
4223 _input[i] = SCALAR64(input[i]);
4224
4225 return (is_io_connect_async_method(connect,
4226 wake_port, _reference, referenceCnt,
4227 index,
4228 _input, inputCount,
4229 inputStruct, inputStructCount,
4230 0, 0,
4231 NULL, &inband_outputCnt,
4232 NULL, &scalar_outputCnt,
4233 0, &ool_output_size));
4234 }
4235
4236 /* Routine io_async_method_structureI_structureO */
4237 kern_return_t is_io_async_method_structureI_structureO(
4238 io_object_t connect,
4239 mach_port_t wake_port,
4240 io_async_ref_t reference,
4241 mach_msg_type_number_t referenceCnt,
4242 uint32_t index,
4243 io_struct_inband_t input,
4244 mach_msg_type_number_t inputCount,
4245 io_struct_inband_t output,
4246 mach_msg_type_number_t * outputCount )
4247 {
4248 uint32_t i;
4249 mach_msg_type_number_t scalar_outputCnt = 0;
4250 mach_vm_size_t ool_output_size = 0;
4251 io_async_ref64_t _reference;
4252
4253 for (i = 0; i < referenceCnt; i++)
4254 _reference[i] = REF64(reference[i]);
4255
4256 return (is_io_connect_async_method(connect,
4257 wake_port, _reference, referenceCnt,
4258 index,
4259 NULL, 0,
4260 input, inputCount,
4261 0, 0,
4262 output, outputCount,
4263 NULL, &scalar_outputCnt,
4264 0, &ool_output_size));
4265 }
4266
4267
4268 kern_return_t shim_io_async_method_scalarI_scalarO(
4269 IOExternalAsyncMethod * method,
4270 IOService * object,
4271 mach_port_t asyncWakePort,
4272 io_user_reference_t * asyncReference,
4273 uint32_t asyncReferenceCount,
4274 const io_user_scalar_t * input,
4275 mach_msg_type_number_t inputCount,
4276 io_user_scalar_t * output,
4277 mach_msg_type_number_t * outputCount )
4278 {
4279 IOAsyncMethod func;
4280 uint32_t i;
4281 io_scalar_inband_t _output;
4282 IOReturn err;
4283 io_async_ref_t reference;
4284
4285 bzero(&_output[0], sizeof(_output));
4286 for (i = 0; i < asyncReferenceCount; i++)
4287 reference[i] = REF32(asyncReference[i]);
4288
4289 err = kIOReturnBadArgument;
4290
4291 do {
4292
4293 if( inputCount != method->count0)
4294 {
4295 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4296 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4297 continue;
4298 }
4299 if( *outputCount != method->count1)
4300 {
4301 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4302 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4303 continue;
4304 }
4305
4306 func = method->func;
4307
4308 switch( inputCount) {
4309
4310 case 6:
4311 err = (object->*func)( reference,
4312 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4313 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4314 break;
4315 case 5:
4316 err = (object->*func)( reference,
4317 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4318 ARG32(input[3]), ARG32(input[4]),
4319 &_output[0] );
4320 break;
4321 case 4:
4322 err = (object->*func)( reference,
4323 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4324 ARG32(input[3]),
4325 &_output[0], &_output[1] );
4326 break;
4327 case 3:
4328 err = (object->*func)( reference,
4329 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4330 &_output[0], &_output[1], &_output[2] );
4331 break;
4332 case 2:
4333 err = (object->*func)( reference,
4334 ARG32(input[0]), ARG32(input[1]),
4335 &_output[0], &_output[1], &_output[2],
4336 &_output[3] );
4337 break;
4338 case 1:
4339 err = (object->*func)( reference,
4340 ARG32(input[0]),
4341 &_output[0], &_output[1], &_output[2],
4342 &_output[3], &_output[4] );
4343 break;
4344 case 0:
4345 err = (object->*func)( reference,
4346 &_output[0], &_output[1], &_output[2],
4347 &_output[3], &_output[4], &_output[5] );
4348 break;
4349
4350 default:
4351 IOLog("%s: Bad method table\n", object->getName());
4352 }
4353 }
4354 while( false);
4355
4356 for (i = 0; i < *outputCount; i++)
4357 output[i] = SCALAR32(_output[i]);
4358
4359 return( err);
4360 }
4361
4362
4363 /* Routine io_connect_method_scalarI_structureO */
4364 kern_return_t is_io_connect_method_scalarI_structureO(
4365 io_object_t connect,
4366 uint32_t index,
4367 io_scalar_inband_t input,
4368 mach_msg_type_number_t inputCount,
4369 io_struct_inband_t output,
4370 mach_msg_type_number_t * outputCount )
4371 {
4372 uint32_t i;
4373 io_scalar_inband64_t _input;
4374
4375 mach_msg_type_number_t scalar_outputCnt = 0;
4376 mach_vm_size_t ool_output_size = 0;
4377
4378 for (i = 0; i < inputCount; i++)
4379 _input[i] = SCALAR64(input[i]);
4380
4381 return (is_io_connect_method(connect, index,
4382 _input, inputCount,
4383 NULL, 0,
4384 0, 0,
4385 output, outputCount,
4386 NULL, &scalar_outputCnt,
4387 0, &ool_output_size));
4388 }
4389
4390 kern_return_t shim_io_connect_method_scalarI_structureO(
4391
4392 IOExternalMethod * method,
4393 IOService * object,
4394 const io_user_scalar_t * input,
4395 mach_msg_type_number_t inputCount,
4396 io_struct_inband_t output,
4397 IOByteCount * outputCount )
4398 {
4399 IOMethod func;
4400 IOReturn err;
4401
4402 err = kIOReturnBadArgument;
4403
4404 do {
4405 if( inputCount != method->count0)
4406 {
4407 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4408 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4409 continue;
4410 }
4411 if( (kIOUCVariableStructureSize != method->count1)
4412 && (*outputCount != method->count1))
4413 {
4414 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4415 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4416 continue;
4417 }
4418
4419 func = method->func;
4420
4421 switch( inputCount) {
4422
4423 case 5:
4424 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4425 ARG32(input[3]), ARG32(input[4]),
4426 output );
4427 break;
4428 case 4:
4429 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4430 ARG32(input[3]),
4431 output, (void *)outputCount );
4432 break;
4433 case 3:
4434 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4435 output, (void *)outputCount, 0 );
4436 break;
4437 case 2:
4438 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4439 output, (void *)outputCount, 0, 0 );
4440 break;
4441 case 1:
4442 err = (object->*func)( ARG32(input[0]),
4443 output, (void *)outputCount, 0, 0, 0 );
4444 break;
4445 case 0:
4446 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4447 break;
4448
4449 default:
4450 IOLog("%s: Bad method table\n", object->getName());
4451 }
4452 }
4453 while( false);
4454
4455 return( err);
4456 }
4457
4458
4459 kern_return_t shim_io_async_method_scalarI_structureO(
4460 IOExternalAsyncMethod * method,
4461 IOService * object,
4462 mach_port_t asyncWakePort,
4463 io_user_reference_t * asyncReference,
4464 uint32_t asyncReferenceCount,
4465 const io_user_scalar_t * input,
4466 mach_msg_type_number_t inputCount,
4467 io_struct_inband_t output,
4468 mach_msg_type_number_t * outputCount )
4469 {
4470 IOAsyncMethod func;
4471 uint32_t i;
4472 IOReturn err;
4473 io_async_ref_t reference;
4474
4475 for (i = 0; i < asyncReferenceCount; i++)
4476 reference[i] = REF32(asyncReference[i]);
4477
4478 err = kIOReturnBadArgument;
4479 do {
4480 if( inputCount != method->count0)
4481 {
4482 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4483 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4484 continue;
4485 }
4486 if( (kIOUCVariableStructureSize != method->count1)
4487 && (*outputCount != method->count1))
4488 {
4489 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4490 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4491 continue;
4492 }
4493
4494 func = method->func;
4495
4496 switch( inputCount) {
4497
4498 case 5:
4499 err = (object->*func)( reference,
4500 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4501 ARG32(input[3]), ARG32(input[4]),
4502 output );
4503 break;
4504 case 4:
4505 err = (object->*func)( reference,
4506 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4507 ARG32(input[3]),
4508 output, (void *)outputCount );
4509 break;
4510 case 3:
4511 err = (object->*func)( reference,
4512 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4513 output, (void *)outputCount, 0 );
4514 break;
4515 case 2:
4516 err = (object->*func)( reference,
4517 ARG32(input[0]), ARG32(input[1]),
4518 output, (void *)outputCount, 0, 0 );
4519 break;
4520 case 1:
4521 err = (object->*func)( reference,
4522 ARG32(input[0]),
4523 output, (void *)outputCount, 0, 0, 0 );
4524 break;
4525 case 0:
4526 err = (object->*func)( reference,
4527 output, (void *)outputCount, 0, 0, 0, 0 );
4528 break;
4529
4530 default:
4531 IOLog("%s: Bad method table\n", object->getName());
4532 }
4533 }
4534 while( false);
4535
4536 return( err);
4537 }
4538
4539 /* Routine io_connect_method_scalarI_structureI */
4540 kern_return_t is_io_connect_method_scalarI_structureI(
4541 io_connect_t connect,
4542 uint32_t index,
4543 io_scalar_inband_t input,
4544 mach_msg_type_number_t inputCount,
4545 io_struct_inband_t inputStruct,
4546 mach_msg_type_number_t inputStructCount )
4547 {
4548 uint32_t i;
4549 io_scalar_inband64_t _input;
4550
4551 mach_msg_type_number_t scalar_outputCnt = 0;
4552 mach_msg_type_number_t inband_outputCnt = 0;
4553 mach_vm_size_t ool_output_size = 0;
4554
4555 for (i = 0; i < inputCount; i++)
4556 _input[i] = SCALAR64(input[i]);
4557
4558 return (is_io_connect_method(connect, index,
4559 _input, inputCount,
4560 inputStruct, inputStructCount,
4561 0, 0,
4562 NULL, &inband_outputCnt,
4563 NULL, &scalar_outputCnt,
4564 0, &ool_output_size));
4565 }
4566
4567 kern_return_t shim_io_connect_method_scalarI_structureI(
4568 IOExternalMethod * method,
4569 IOService * object,
4570 const io_user_scalar_t * input,
4571 mach_msg_type_number_t inputCount,
4572 io_struct_inband_t inputStruct,
4573 mach_msg_type_number_t inputStructCount )
4574 {
4575 IOMethod func;
4576 IOReturn err = kIOReturnBadArgument;
4577
4578 do
4579 {
4580 if (inputCount != method->count0)
4581 {
4582 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4583 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4584 continue;
4585 }
4586 if( (kIOUCVariableStructureSize != method->count1)
4587 && (inputStructCount != method->count1))
4588 {
4589 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4590 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4591 continue;
4592 }
4593
4594 func = method->func;
4595
4596 switch( inputCount) {
4597
4598 case 5:
4599 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4600 ARG32(input[3]), ARG32(input[4]),
4601 inputStruct );
4602 break;
4603 case 4:
4604 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4605 ARG32(input[3]),
4606 inputStruct, (void *)(uintptr_t)inputStructCount );
4607 break;
4608 case 3:
4609 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4610 inputStruct, (void *)(uintptr_t)inputStructCount,
4611 0 );
4612 break;
4613 case 2:
4614 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4615 inputStruct, (void *)(uintptr_t)inputStructCount,
4616 0, 0 );
4617 break;
4618 case 1:
4619 err = (object->*func)( ARG32(input[0]),
4620 inputStruct, (void *)(uintptr_t)inputStructCount,
4621 0, 0, 0 );
4622 break;
4623 case 0:
4624 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4625 0, 0, 0, 0 );
4626 break;
4627
4628 default:
4629 IOLog("%s: Bad method table\n", object->getName());
4630 }
4631 }
4632 while (false);
4633
4634 return( err);
4635 }
4636
4637 kern_return_t shim_io_async_method_scalarI_structureI(
4638 IOExternalAsyncMethod * method,
4639 IOService * object,
4640 mach_port_t asyncWakePort,
4641 io_user_reference_t * asyncReference,
4642 uint32_t asyncReferenceCount,
4643 const io_user_scalar_t * input,
4644 mach_msg_type_number_t inputCount,
4645 io_struct_inband_t inputStruct,
4646 mach_msg_type_number_t inputStructCount )
4647 {
4648 IOAsyncMethod func;
4649 uint32_t i;
4650 IOReturn err = kIOReturnBadArgument;
4651 io_async_ref_t reference;
4652
4653 for (i = 0; i < asyncReferenceCount; i++)
4654 reference[i] = REF32(asyncReference[i]);
4655
4656 do
4657 {
4658 if (inputCount != method->count0)
4659 {
4660 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4661 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4662 continue;
4663 }
4664 if( (kIOUCVariableStructureSize != method->count1)
4665 && (inputStructCount != method->count1))
4666 {
4667 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4668 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4669 continue;
4670 }
4671
4672 func = method->func;
4673
4674 switch( inputCount) {
4675
4676 case 5:
4677 err = (object->*func)( reference,
4678 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4679 ARG32(input[3]), ARG32(input[4]),
4680 inputStruct );
4681 break;
4682 case 4:
4683 err = (object->*func)( reference,
4684 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4685 ARG32(input[3]),
4686 inputStruct, (void *)(uintptr_t)inputStructCount );
4687 break;
4688 case 3:
4689 err = (object->*func)( reference,
4690 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4691 inputStruct, (void *)(uintptr_t)inputStructCount,
4692 0 );
4693 break;
4694 case 2:
4695 err = (object->*func)( reference,
4696 ARG32(input[0]), ARG32(input[1]),
4697 inputStruct, (void *)(uintptr_t)inputStructCount,
4698 0, 0 );
4699 break;
4700 case 1:
4701 err = (object->*func)( reference,
4702 ARG32(input[0]),
4703 inputStruct, (void *)(uintptr_t)inputStructCount,
4704 0, 0, 0 );
4705 break;
4706 case 0:
4707 err = (object->*func)( reference,
4708 inputStruct, (void *)(uintptr_t)inputStructCount,
4709 0, 0, 0, 0 );
4710 break;
4711
4712 default:
4713 IOLog("%s: Bad method table\n", object->getName());
4714 }
4715 }
4716 while (false);
4717
4718 return( err);
4719 }
4720
4721 /* Routine io_connect_method_structureI_structureO */
4722 kern_return_t is_io_connect_method_structureI_structureO(
4723 io_object_t connect,
4724 uint32_t index,
4725 io_struct_inband_t input,
4726 mach_msg_type_number_t inputCount,
4727 io_struct_inband_t output,
4728 mach_msg_type_number_t * outputCount )
4729 {
4730 mach_msg_type_number_t scalar_outputCnt = 0;
4731 mach_vm_size_t ool_output_size = 0;
4732
4733 return (is_io_connect_method(connect, index,
4734 NULL, 0,
4735 input, inputCount,
4736 0, 0,
4737 output, outputCount,
4738 NULL, &scalar_outputCnt,
4739 0, &ool_output_size));
4740 }
4741
4742 kern_return_t shim_io_connect_method_structureI_structureO(
4743 IOExternalMethod * method,
4744 IOService * object,
4745 io_struct_inband_t input,
4746 mach_msg_type_number_t inputCount,
4747 io_struct_inband_t output,
4748 IOByteCount * outputCount )
4749 {
4750 IOMethod func;
4751 IOReturn err = kIOReturnBadArgument;
4752
4753 do
4754 {
4755 if( (kIOUCVariableStructureSize != method->count0)
4756 && (inputCount != method->count0))
4757 {
4758 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4759 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4760 continue;
4761 }
4762 if( (kIOUCVariableStructureSize != method->count1)
4763 && (*outputCount != method->count1))
4764 {
4765 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4766 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4767 continue;
4768 }
4769
4770 func = method->func;
4771
4772 if( method->count1) {
4773 if( method->count0) {
4774 err = (object->*func)( input, output,
4775 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4776 } else {
4777 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4778 }
4779 } else {
4780 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4781 }
4782 }
4783 while( false);
4784
4785
4786 return( err);
4787 }
4788
4789 kern_return_t shim_io_async_method_structureI_structureO(
4790 IOExternalAsyncMethod * method,
4791 IOService * object,
4792 mach_port_t asyncWakePort,
4793 io_user_reference_t * asyncReference,
4794 uint32_t asyncReferenceCount,
4795 io_struct_inband_t input,
4796 mach_msg_type_number_t inputCount,
4797 io_struct_inband_t output,
4798 mach_msg_type_number_t * outputCount )
4799 {
4800 IOAsyncMethod func;
4801 uint32_t i;
4802 IOReturn err;
4803 io_async_ref_t reference;
4804
4805 for (i = 0; i < asyncReferenceCount; i++)
4806 reference[i] = REF32(asyncReference[i]);
4807
4808 err = kIOReturnBadArgument;
4809 do
4810 {
4811 if( (kIOUCVariableStructureSize != method->count0)
4812 && (inputCount != method->count0))
4813 {
4814 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4815 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4816 continue;
4817 }
4818 if( (kIOUCVariableStructureSize != method->count1)
4819 && (*outputCount != method->count1))
4820 {
4821 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4822 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4823 continue;
4824 }
4825
4826 func = method->func;
4827
4828 if( method->count1) {
4829 if( method->count0) {
4830 err = (object->*func)( reference,
4831 input, output,
4832 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4833 } else {
4834 err = (object->*func)( reference,
4835 output, outputCount, 0, 0, 0, 0 );
4836 }
4837 } else {
4838 err = (object->*func)( reference,
4839 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4840 }
4841 }
4842 while( false);
4843
4844 return( err);
4845 }
4846
4847 #if !NO_KEXTD
4848 bool gIOKextdClearedBusy = false;
4849 #endif
4850
4851 /* Routine io_catalog_send_data */
4852 kern_return_t is_io_catalog_send_data(
4853 mach_port_t master_port,
4854 uint32_t flag,
4855 io_buf_ptr_t inData,
4856 mach_msg_type_number_t inDataCount,
4857 kern_return_t * result)
4858 {
4859 OSObject * obj = 0;
4860 vm_offset_t data;
4861 kern_return_t kr = kIOReturnError;
4862
4863 //printf("io_catalog_send_data called. flag: %d\n", flag);
4864
4865 if( master_port != master_device_port)
4866 return kIOReturnNotPrivileged;
4867
4868 if( (flag != kIOCatalogRemoveKernelLinker &&
4869 flag != kIOCatalogKextdActive &&
4870 flag != kIOCatalogKextdFinishedLaunching) &&
4871 ( !inData || !inDataCount) )
4872 {
4873 return kIOReturnBadArgument;
4874 }
4875
4876 if (inData) {
4877 vm_map_offset_t map_data;
4878
4879 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4880 return( kIOReturnMessageTooLarge);
4881
4882 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4883 data = CAST_DOWN(vm_offset_t, map_data);
4884
4885 if( kr != KERN_SUCCESS)
4886 return kr;
4887
4888 // must return success after vm_map_copyout() succeeds
4889
4890 if( inDataCount ) {
4891 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4892 vm_deallocate( kernel_map, data, inDataCount );
4893 if( !obj) {
4894 *result = kIOReturnNoMemory;
4895 return( KERN_SUCCESS);
4896 }
4897 }
4898 }
4899
4900 switch ( flag ) {
4901 case kIOCatalogResetDrivers:
4902 case kIOCatalogResetDriversNoMatch: {
4903 OSArray * array;
4904
4905 array = OSDynamicCast(OSArray, obj);
4906 if (array) {
4907 if ( !gIOCatalogue->resetAndAddDrivers(array,
4908 flag == kIOCatalogResetDrivers) ) {
4909
4910 kr = kIOReturnError;
4911 }
4912 } else {
4913 kr = kIOReturnBadArgument;
4914 }
4915 }
4916 break;
4917
4918 case kIOCatalogAddDrivers:
4919 case kIOCatalogAddDriversNoMatch: {
4920 OSArray * array;
4921
4922 array = OSDynamicCast(OSArray, obj);
4923 if ( array ) {
4924 if ( !gIOCatalogue->addDrivers( array ,
4925 flag == kIOCatalogAddDrivers) ) {
4926 kr = kIOReturnError;
4927 }
4928 }
4929 else {
4930 kr = kIOReturnBadArgument;
4931 }
4932 }
4933 break;
4934
4935 case kIOCatalogRemoveDrivers:
4936 case kIOCatalogRemoveDriversNoMatch: {
4937 OSDictionary * dict;
4938
4939 dict = OSDynamicCast(OSDictionary, obj);
4940 if ( dict ) {
4941 if ( !gIOCatalogue->removeDrivers( dict,
4942 flag == kIOCatalogRemoveDrivers ) ) {
4943 kr = kIOReturnError;
4944 }
4945 }
4946 else {
4947 kr = kIOReturnBadArgument;
4948 }
4949 }
4950 break;
4951
4952 case kIOCatalogStartMatching: {
4953 OSDictionary * dict;
4954
4955 dict = OSDynamicCast(OSDictionary, obj);
4956 if ( dict ) {
4957 if ( !gIOCatalogue->startMatching( dict ) ) {
4958 kr = kIOReturnError;
4959 }
4960 }
4961 else {
4962 kr = kIOReturnBadArgument;
4963 }
4964 }
4965 break;
4966
4967 case kIOCatalogRemoveKernelLinker:
4968 kr = KERN_NOT_SUPPORTED;
4969 break;
4970
4971 case kIOCatalogKextdActive:
4972 #if !NO_KEXTD
4973 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
4974 OSKext::setKextdActive();
4975
4976 /* Dump all nonloaded startup extensions; kextd will now send them
4977 * down on request.
4978 */
4979 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
4980 #endif
4981 kr = kIOReturnSuccess;
4982 break;
4983
4984 case kIOCatalogKextdFinishedLaunching: {
4985 #if !NO_KEXTD
4986 if (!gIOKextdClearedBusy) {
4987 IOService * serviceRoot = IOService::getServiceRoot();
4988 if (serviceRoot) {
4989 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
4990 serviceRoot->adjustBusy(-1);
4991 gIOKextdClearedBusy = true;
4992 }
4993 }
4994 #endif
4995 kr = kIOReturnSuccess;
4996 }
4997 break;
4998
4999 default:
5000 kr = kIOReturnBadArgument;
5001 break;
5002 }
5003
5004 if (obj) obj->release();
5005
5006 *result = kr;
5007 return( KERN_SUCCESS);
5008 }
5009
5010 /* Routine io_catalog_terminate */
5011 kern_return_t is_io_catalog_terminate(
5012 mach_port_t master_port,
5013 uint32_t flag,
5014 io_name_t name )
5015 {
5016 kern_return_t kr;
5017
5018 if( master_port != master_device_port )
5019 return kIOReturnNotPrivileged;
5020
5021 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5022 kIOClientPrivilegeAdministrator );
5023 if( kIOReturnSuccess != kr)
5024 return( kr );
5025
5026 switch ( flag ) {
5027 #if !defined(SECURE_KERNEL)
5028 case kIOCatalogServiceTerminate:
5029 OSIterator * iter;
5030 IOService * service;
5031
5032 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5033 kIORegistryIterateRecursively);
5034 if ( !iter )
5035 return kIOReturnNoMemory;
5036
5037 do {
5038 iter->reset();
5039 while( (service = (IOService *)iter->getNextObject()) ) {
5040 if( service->metaCast(name)) {
5041 if ( !service->terminate( kIOServiceRequired
5042 | kIOServiceSynchronous) ) {
5043 kr = kIOReturnUnsupported;
5044 break;
5045 }
5046 }
5047 }
5048 } while( !service && !iter->isValid());
5049 iter->release();
5050 break;
5051
5052 case kIOCatalogModuleUnload:
5053 case kIOCatalogModuleTerminate:
5054 kr = gIOCatalogue->terminateDriversForModule(name,
5055 flag == kIOCatalogModuleUnload);
5056 break;
5057 #endif
5058
5059 default:
5060 kr = kIOReturnBadArgument;
5061 break;
5062 }
5063
5064 return( kr );
5065 }
5066
5067 /* Routine io_catalog_get_data */
5068 kern_return_t is_io_catalog_get_data(
5069 mach_port_t master_port,
5070 uint32_t flag,
5071 io_buf_ptr_t *outData,
5072 mach_msg_type_number_t *outDataCount)
5073 {
5074 kern_return_t kr = kIOReturnSuccess;
5075 OSSerialize * s;
5076
5077 if( master_port != master_device_port)
5078 return kIOReturnNotPrivileged;
5079
5080 //printf("io_catalog_get_data called. flag: %d\n", flag);
5081
5082 s = OSSerialize::withCapacity(4096);
5083 if ( !s )
5084 return kIOReturnNoMemory;
5085
5086 kr = gIOCatalogue->serializeData(flag, s);
5087
5088 if ( kr == kIOReturnSuccess ) {
5089 vm_offset_t data;
5090 vm_map_copy_t copy;
5091 vm_size_t size;
5092
5093 size = s->getLength();
5094 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
5095 if ( kr == kIOReturnSuccess ) {
5096 bcopy(s->text(), (void *)data, size);
5097 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5098 (vm_map_size_t)size, true, &copy);
5099 *outData = (char *)copy;
5100 *outDataCount = size;
5101 }
5102 }
5103
5104 s->release();
5105
5106 return kr;
5107 }
5108
5109 /* Routine io_catalog_get_gen_count */
5110 kern_return_t is_io_catalog_get_gen_count(
5111 mach_port_t master_port,
5112 uint32_t *genCount)
5113 {
5114 if( master_port != master_device_port)
5115 return kIOReturnNotPrivileged;
5116
5117 //printf("io_catalog_get_gen_count called.\n");
5118
5119 if ( !genCount )
5120 return kIOReturnBadArgument;
5121
5122 *genCount = gIOCatalogue->getGenerationCount();
5123
5124 return kIOReturnSuccess;
5125 }
5126
5127 /* Routine io_catalog_module_loaded.
5128 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5129 */
5130 kern_return_t is_io_catalog_module_loaded(
5131 mach_port_t master_port,
5132 io_name_t name)
5133 {
5134 if( master_port != master_device_port)
5135 return kIOReturnNotPrivileged;
5136
5137 //printf("io_catalog_module_loaded called. name %s\n", name);
5138
5139 if ( !name )
5140 return kIOReturnBadArgument;
5141
5142 gIOCatalogue->moduleHasLoaded(name);
5143
5144 return kIOReturnSuccess;
5145 }
5146
5147 kern_return_t is_io_catalog_reset(
5148 mach_port_t master_port,
5149 uint32_t flag)
5150 {
5151 if( master_port != master_device_port)
5152 return kIOReturnNotPrivileged;
5153
5154 switch ( flag ) {
5155 case kIOCatalogResetDefault:
5156 gIOCatalogue->reset();
5157 break;
5158
5159 default:
5160 return kIOReturnBadArgument;
5161 }
5162
5163 return kIOReturnSuccess;
5164 }
5165
5166 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5167 {
5168 kern_return_t result = kIOReturnBadArgument;
5169 IOUserClient *userClient;
5170
5171 if ((userClient = OSDynamicCast(IOUserClient,
5172 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5173 IOExternalTrap *trap;
5174 IOService *target = NULL;
5175
5176 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5177
5178 if (trap && target) {
5179 IOTrap func;
5180
5181 func = trap->func;
5182
5183 if (func) {
5184 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5185 }
5186 }
5187
5188 iokit_remove_connect_reference(userClient);
5189 }
5190
5191 return result;
5192 }
5193
5194 } /* extern "C" */
5195
5196 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5197 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5198 {
5199 IOReturn err;
5200 IOService * object;
5201 IOByteCount structureOutputSize;
5202
5203 if (dispatch)
5204 {
5205 uint32_t count;
5206 count = dispatch->checkScalarInputCount;
5207 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5208 {
5209 return (kIOReturnBadArgument);
5210 }
5211
5212 count = dispatch->checkStructureInputSize;
5213 if ((kIOUCVariableStructureSize != count)
5214 && (count != ((args->structureInputDescriptor)
5215 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5216 {
5217 return (kIOReturnBadArgument);
5218 }
5219
5220 count = dispatch->checkScalarOutputCount;
5221 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5222 {
5223 return (kIOReturnBadArgument);
5224 }
5225
5226 count = dispatch->checkStructureOutputSize;
5227 if ((kIOUCVariableStructureSize != count)
5228 && (count != ((args->structureOutputDescriptor)
5229 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5230 {
5231 return (kIOReturnBadArgument);
5232 }
5233
5234 if (dispatch->function)
5235 err = (*dispatch->function)(target, reference, args);
5236 else
5237 err = kIOReturnNoCompletion; /* implementator can dispatch */
5238
5239 return (err);
5240 }
5241
5242
5243 // pre-Leopard API's don't do ool structs
5244 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5245 {
5246 err = kIOReturnIPCError;
5247 return (err);
5248 }
5249
5250 structureOutputSize = args->structureOutputSize;
5251
5252 if (args->asyncWakePort)
5253 {
5254 IOExternalAsyncMethod * method;
5255 object = 0;
5256 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5257 return (kIOReturnUnsupported);
5258
5259 if (kIOUCForegroundOnly & method->flags)
5260 {
5261 if (task_is_gpu_denied(current_task()))
5262 return (kIOReturnNotPermitted);
5263 }
5264
5265 switch (method->flags & kIOUCTypeMask)
5266 {
5267 case kIOUCScalarIStructI:
5268 err = shim_io_async_method_scalarI_structureI( method, object,
5269 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5270 args->scalarInput, args->scalarInputCount,
5271 (char *)args->structureInput, args->structureInputSize );
5272 break;
5273
5274 case kIOUCScalarIScalarO:
5275 err = shim_io_async_method_scalarI_scalarO( method, object,
5276 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5277 args->scalarInput, args->scalarInputCount,
5278 args->scalarOutput, &args->scalarOutputCount );
5279 break;
5280
5281 case kIOUCScalarIStructO:
5282 err = shim_io_async_method_scalarI_structureO( method, object,
5283 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5284 args->scalarInput, args->scalarInputCount,
5285 (char *) args->structureOutput, &args->structureOutputSize );
5286 break;
5287
5288
5289 case kIOUCStructIStructO:
5290 err = shim_io_async_method_structureI_structureO( method, object,
5291 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5292 (char *)args->structureInput, args->structureInputSize,
5293 (char *) args->structureOutput, &args->structureOutputSize );
5294 break;
5295
5296 default:
5297 err = kIOReturnBadArgument;
5298 break;
5299 }
5300 }
5301 else
5302 {
5303 IOExternalMethod * method;
5304 object = 0;
5305 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5306 return (kIOReturnUnsupported);
5307
5308 if (kIOUCForegroundOnly & method->flags)
5309 {
5310 if (task_is_gpu_denied(current_task()))
5311 return (kIOReturnNotPermitted);
5312 }
5313
5314 switch (method->flags & kIOUCTypeMask)
5315 {
5316 case kIOUCScalarIStructI:
5317 err = shim_io_connect_method_scalarI_structureI( method, object,
5318 args->scalarInput, args->scalarInputCount,
5319 (char *) args->structureInput, args->structureInputSize );
5320 break;
5321
5322 case kIOUCScalarIScalarO:
5323 err = shim_io_connect_method_scalarI_scalarO( method, object,
5324 args->scalarInput, args->scalarInputCount,
5325 args->scalarOutput, &args->scalarOutputCount );
5326 break;
5327
5328 case kIOUCScalarIStructO:
5329 err = shim_io_connect_method_scalarI_structureO( method, object,
5330 args->scalarInput, args->scalarInputCount,
5331 (char *) args->structureOutput, &structureOutputSize );
5332 break;
5333
5334
5335 case kIOUCStructIStructO:
5336 err = shim_io_connect_method_structureI_structureO( method, object,
5337 (char *) args->structureInput, args->structureInputSize,
5338 (char *) args->structureOutput, &structureOutputSize );
5339 break;
5340
5341 default:
5342 err = kIOReturnBadArgument;
5343 break;
5344 }
5345 }
5346
5347 args->structureOutputSize = structureOutputSize;
5348
5349 return (err);
5350 }
5351
5352 #if __LP64__
5353 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5354 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5355 #else
5356 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5357 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5358 #endif
5359 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5360 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5361 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5362 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5363 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5364 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5365 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5366 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5367 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5368 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5369 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5370 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5371 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5372 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5373