]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
985e3b201286c38571cd1d2fdd55c97da161fa5d
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/system.h>
44 #include <libkern/OSDebug.h>
45 #include <sys/proc.h>
46 #include <sys/kauth.h>
47 #include <sys/codesign.h>
48
49 #include <mach/sdt.h>
50
51 #if CONFIG_MACF
52
53 extern "C" {
54 #include <security/mac_framework.h>
55 };
56 #include <sys/kauth.h>
57
58 #define IOMACF_LOG 0
59
60 #endif /* CONFIG_MACF */
61
62 #include <IOKit/assert.h>
63
64 #include "IOServicePrivate.h"
65 #include "IOKitKernelInternal.h"
66
67 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
68 #define SCALAR32(x) ((uint32_t )x)
69 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
70 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
71 #define REF32(x) ((int)(x))
72
73 enum
74 {
75 kIOUCAsync0Flags = 3ULL,
76 kIOUCAsync64Flag = 1ULL,
77 kIOUCAsyncErrorLoggedFlag = 2ULL
78 };
79
80 #if IOKITSTATS
81
82 #define IOStatisticsRegisterCounter() \
83 do { \
84 reserved->counter = IOStatistics::registerUserClient(this); \
85 } while (0)
86
87 #define IOStatisticsUnregisterCounter() \
88 do { \
89 if (reserved) \
90 IOStatistics::unregisterUserClient(reserved->counter); \
91 } while (0)
92
93 #define IOStatisticsClientCall() \
94 do { \
95 IOStatistics::countUserClientCall(client); \
96 } while (0)
97
98 #else
99
100 #define IOStatisticsRegisterCounter()
101 #define IOStatisticsUnregisterCounter()
102 #define IOStatisticsClientCall()
103
104 #endif /* IOKITSTATS */
105
106 #if DEVELOPMENT || DEBUG
107
108 #define FAKE_STACK_FRAME(a) \
109 const void ** __frameptr; \
110 const void * __retaddr; \
111 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
112 __retaddr = __frameptr[1]; \
113 __frameptr[1] = (a);
114
115 #define FAKE_STACK_FRAME_END() \
116 __frameptr[1] = __retaddr;
117
118 #else /* DEVELOPMENT || DEBUG */
119
120 #define FAKE_STACK_FRAME(a)
121 #define FAKE_STACK_FRAME_END()
122
123 #endif /* DEVELOPMENT || DEBUG */
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 // definitions we should get from osfmk
128
129 //typedef struct ipc_port * ipc_port_t;
130 typedef natural_t ipc_kobject_type_t;
131
132 #define IKOT_IOKIT_SPARE 27
133 #define IKOT_IOKIT_CONNECT 29
134 #define IKOT_IOKIT_OBJECT 30
135
136 extern "C" {
137
138 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
139 ipc_kobject_type_t type );
140
141 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
142
143 extern mach_port_name_t iokit_make_send_right( task_t task,
144 io_object_t obj, ipc_kobject_type_t type );
145
146 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
147
148 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
149
150 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
151
152 extern ipc_port_t master_device_port;
153
154 extern void iokit_retain_port( ipc_port_t port );
155 extern void iokit_release_port( ipc_port_t port );
156 extern void iokit_release_port_send( ipc_port_t port );
157
158 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
159
160 #include <mach/mach_traps.h>
161 #include <vm/vm_map.h>
162
163 } /* extern "C" */
164
165
166 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
167
168 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
169
170 class IOMachPort : public OSObject
171 {
172 OSDeclareDefaultStructors(IOMachPort)
173 public:
174 OSObject * object;
175 ipc_port_t port;
176 UInt32 mscount;
177 UInt8 holdDestroy;
178
179 static IOMachPort * portForObject( OSObject * obj,
180 ipc_kobject_type_t type );
181 static bool noMoreSendersForObject( OSObject * obj,
182 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
183 static void releasePortForObject( OSObject * obj,
184 ipc_kobject_type_t type );
185 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
186
187 static OSDictionary * dictForType( ipc_kobject_type_t type );
188
189 static mach_port_name_t makeSendRightForTask( task_t task,
190 io_object_t obj, ipc_kobject_type_t type );
191
192 virtual void free() APPLE_KEXT_OVERRIDE;
193 };
194
195 #define super OSObject
196 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
197
198 static IOLock * gIOObjectPortLock;
199
200 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
201
202 // not in dictForType() for debugging ease
203 static OSDictionary * gIOObjectPorts;
204 static OSDictionary * gIOConnectPorts;
205
206 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
207 {
208 OSDictionary ** dict;
209
210 if( IKOT_IOKIT_OBJECT == type )
211 dict = &gIOObjectPorts;
212 else if( IKOT_IOKIT_CONNECT == type )
213 dict = &gIOConnectPorts;
214 else
215 return( 0 );
216
217 if( 0 == *dict)
218 *dict = OSDictionary::withCapacity( 1 );
219
220 return( *dict );
221 }
222
223 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
224 ipc_kobject_type_t type )
225 {
226 IOMachPort * inst = 0;
227 OSDictionary * dict;
228
229 IOTakeLock( gIOObjectPortLock);
230
231 do {
232
233 dict = dictForType( type );
234 if( !dict)
235 continue;
236
237 if( (inst = (IOMachPort *)
238 dict->getObject( (const OSSymbol *) obj ))) {
239 inst->mscount++;
240 inst->retain();
241 continue;
242 }
243
244 inst = new IOMachPort;
245 if( inst && !inst->init()) {
246 inst = 0;
247 continue;
248 }
249
250 inst->port = iokit_alloc_object_port( obj, type );
251 if( inst->port) {
252 // retains obj
253 dict->setObject( (const OSSymbol *) obj, inst );
254 inst->mscount++;
255
256 } else {
257 inst->release();
258 inst = 0;
259 }
260
261 } while( false );
262
263 IOUnlock( gIOObjectPortLock);
264
265 return( inst );
266 }
267
268 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
269 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
270 {
271 OSDictionary * dict;
272 IOMachPort * machPort;
273 IOUserClient * uc;
274 bool destroyed = true;
275
276 IOTakeLock( gIOObjectPortLock);
277
278 if( (dict = dictForType( type ))) {
279 obj->retain();
280
281 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if( machPort) {
283 destroyed = (machPort->mscount <= *mscount);
284 if (!destroyed) *mscount = machPort->mscount;
285 else
286 {
287 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
288 {
289 uc->noMoreSenders();
290 }
291 dict->removeObject( (const OSSymbol *) obj );
292 }
293 }
294 obj->release();
295 }
296
297 IOUnlock( gIOObjectPortLock);
298
299 return( destroyed );
300 }
301
302 void IOMachPort::releasePortForObject( OSObject * obj,
303 ipc_kobject_type_t type )
304 {
305 OSDictionary * dict;
306 IOMachPort * machPort;
307
308 assert(IKOT_IOKIT_CONNECT != type);
309
310 IOTakeLock( gIOObjectPortLock);
311
312 if( (dict = dictForType( type ))) {
313 obj->retain();
314 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
315 if( machPort && !machPort->holdDestroy)
316 dict->removeObject( (const OSSymbol *) obj );
317 obj->release();
318 }
319
320 IOUnlock( gIOObjectPortLock);
321 }
322
323 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
324 {
325 OSDictionary * dict;
326 IOMachPort * machPort;
327
328 IOLockLock( gIOObjectPortLock );
329
330 if( (dict = dictForType( type ))) {
331 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
332 if( machPort)
333 machPort->holdDestroy = true;
334 }
335
336 IOLockUnlock( gIOObjectPortLock );
337 }
338
339 void IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 OSDictionary * dict;
347
348 IOTakeLock( gIOObjectPortLock);
349 obj->retain();
350
351 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
352 {
353 IOMachPort * port;
354 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
355 if (port)
356 {
357 IOUserClient * uc;
358 if ((uc = OSDynamicCast(IOUserClient, obj)))
359 {
360 uc->noMoreSenders();
361 if (uc->mappings)
362 {
363 dict->setObject((const OSSymbol *) uc->mappings, port);
364 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
365
366 uc->mappings->release();
367 uc->mappings = 0;
368 }
369 }
370 dict->removeObject( (const OSSymbol *) obj );
371 }
372 }
373 obj->release();
374 IOUnlock( gIOObjectPortLock);
375 }
376
377 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
378 io_object_t obj, ipc_kobject_type_t type )
379 {
380 return( iokit_make_send_right( task, obj, type ));
381 }
382
383 void IOMachPort::free( void )
384 {
385 if( port)
386 iokit_destroy_object_port( port );
387 super::free();
388 }
389
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 class IOUserIterator : public OSIterator
393 {
394 OSDeclareDefaultStructors(IOUserIterator)
395 public:
396 OSObject * userIteratorObject;
397 IOLock * lock;
398
399 static IOUserIterator * withIterator(OSIterator * iter);
400 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
401 virtual void free() APPLE_KEXT_OVERRIDE;
402
403 virtual void reset() APPLE_KEXT_OVERRIDE;
404 virtual bool isValid() APPLE_KEXT_OVERRIDE;
405 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
406 };
407
408 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
409
410 class IOUserNotification : public IOUserIterator
411 {
412 OSDeclareDefaultStructors(IOUserNotification)
413
414 #define holdNotify userIteratorObject
415
416 public:
417
418 virtual void free() APPLE_KEXT_OVERRIDE;
419
420 virtual void setNotification( IONotifier * obj );
421
422 virtual void reset() APPLE_KEXT_OVERRIDE;
423 virtual bool isValid() APPLE_KEXT_OVERRIDE;
424 };
425
426 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
427
428 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
429
430 IOUserIterator *
431 IOUserIterator::withIterator(OSIterator * iter)
432 {
433 IOUserIterator * me;
434
435 if (!iter) return (0);
436
437 me = new IOUserIterator;
438 if (me && !me->init())
439 {
440 me->release();
441 me = 0;
442 }
443 if (!me) return me;
444 me->userIteratorObject = iter;
445
446 return (me);
447 }
448
449 bool
450 IOUserIterator::init( void )
451 {
452 if (!OSObject::init()) return (false);
453
454 lock = IOLockAlloc();
455 if( !lock)
456 return( false );
457
458 return (true);
459 }
460
461 void
462 IOUserIterator::free()
463 {
464 if (userIteratorObject) userIteratorObject->release();
465 if (lock) IOLockFree(lock);
466 OSObject::free();
467 }
468
469 void
470 IOUserIterator::reset()
471 {
472 IOLockLock(lock);
473 assert(OSDynamicCast(OSIterator, userIteratorObject));
474 ((OSIterator *)userIteratorObject)->reset();
475 IOLockUnlock(lock);
476 }
477
478 bool
479 IOUserIterator::isValid()
480 {
481 bool ret;
482
483 IOLockLock(lock);
484 assert(OSDynamicCast(OSIterator, userIteratorObject));
485 ret = ((OSIterator *)userIteratorObject)->isValid();
486 IOLockUnlock(lock);
487
488 return (ret);
489 }
490
491 OSObject *
492 IOUserIterator::getNextObject()
493 {
494 OSObject * ret;
495
496 IOLockLock(lock);
497 assert(OSDynamicCast(OSIterator, userIteratorObject));
498 ret = ((OSIterator *)userIteratorObject)->getNextObject();
499 IOLockUnlock(lock);
500
501 return (ret);
502 }
503
504 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
505 extern "C" {
506
507 // functions called from osfmk/device/iokit_rpc.c
508
509 void
510 iokit_add_reference( io_object_t obj )
511 {
512 if( obj)
513 obj->retain();
514 }
515
516 void
517 iokit_remove_reference( io_object_t obj )
518 {
519 if( obj)
520 obj->release();
521 }
522
523 void
524 iokit_add_connect_reference( io_object_t obj )
525 {
526 IOUserClient * uc;
527
528 if (!obj) return;
529
530 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
531
532 obj->retain();
533 }
534
535 void
536 iokit_remove_connect_reference( io_object_t obj )
537 {
538 IOUserClient * uc;
539 bool finalize = false;
540
541 if (!obj) return;
542
543 if ((uc = OSDynamicCast(IOUserClient, obj)))
544 {
545 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
546 {
547 IOLockLock(gIOObjectPortLock);
548 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
549 IOLockUnlock(gIOObjectPortLock);
550 }
551 if (finalize) uc->scheduleFinalize(true);
552 }
553
554 obj->release();
555 }
556
557 bool
558 IOUserClient::finalizeUserReferences(OSObject * obj)
559 {
560 IOUserClient * uc;
561 bool ok = true;
562
563 if ((uc = OSDynamicCast(IOUserClient, obj)))
564 {
565 IOLockLock(gIOObjectPortLock);
566 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
567 IOLockUnlock(gIOObjectPortLock);
568 }
569 return (ok);
570 }
571
572 ipc_port_t
573 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
574 {
575 IOMachPort * machPort;
576 ipc_port_t port;
577
578 if( (machPort = IOMachPort::portForObject( obj, type ))) {
579
580 port = machPort->port;
581 if( port)
582 iokit_retain_port( port );
583
584 machPort->release();
585
586 } else
587 port = NULL;
588
589 return( port );
590 }
591
592 kern_return_t
593 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
594 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
595 {
596 IOUserClient * client;
597 IOMemoryMap * map;
598 IOUserNotification * notify;
599
600 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
601 return( kIOReturnNotReady );
602
603 if( IKOT_IOKIT_CONNECT == type)
604 {
605 if( (client = OSDynamicCast( IOUserClient, obj )))
606 {
607 IOStatisticsClientCall();
608 client->clientDied();
609 }
610 }
611 else if( IKOT_IOKIT_OBJECT == type)
612 {
613 if( (map = OSDynamicCast( IOMemoryMap, obj )))
614 map->taskDied();
615 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
616 notify->setNotification( 0 );
617 }
618
619 return( kIOReturnSuccess );
620 }
621
622 }; /* extern "C" */
623
624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
625
626 class IOServiceUserNotification : public IOUserNotification
627 {
628 OSDeclareDefaultStructors(IOServiceUserNotification)
629
630 struct PingMsg {
631 mach_msg_header_t msgHdr;
632 OSNotificationHeader64 notifyHeader;
633 };
634
635 enum { kMaxOutstanding = 1024 };
636
637 PingMsg * pingMsg;
638 vm_size_t msgSize;
639 OSArray * newSet;
640 OSObject * lastEntry;
641 bool armed;
642 bool ipcLogged;
643
644 public:
645
646 virtual bool init( mach_port_t port, natural_t type,
647 void * reference, vm_size_t referenceSize,
648 bool clientIs64 );
649 virtual void free() APPLE_KEXT_OVERRIDE;
650
651 static bool _handler( void * target,
652 void * ref, IOService * newService, IONotifier * notifier );
653 virtual bool handler( void * ref, IOService * newService );
654
655 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
656 };
657
658 class IOServiceMessageUserNotification : public IOUserNotification
659 {
660 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
661
662 struct PingMsg {
663 mach_msg_header_t msgHdr;
664 mach_msg_body_t msgBody;
665 mach_msg_port_descriptor_t ports[1];
666 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
667 };
668
669 PingMsg * pingMsg;
670 vm_size_t msgSize;
671 uint8_t clientIs64;
672 int owningPID;
673 bool ipcLogged;
674
675 public:
676
677 virtual bool init( mach_port_t port, natural_t type,
678 void * reference, vm_size_t referenceSize,
679 vm_size_t extraSize,
680 bool clientIs64 );
681
682 virtual void free() APPLE_KEXT_OVERRIDE;
683
684 static IOReturn _handler( void * target, void * ref,
685 UInt32 messageType, IOService * provider,
686 void * messageArgument, vm_size_t argSize );
687 virtual IOReturn handler( void * ref,
688 UInt32 messageType, IOService * provider,
689 void * messageArgument, vm_size_t argSize );
690
691 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
692 };
693
694 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
695
696 #undef super
697 #define super IOUserIterator
698 OSDefineMetaClass( IOUserNotification, IOUserIterator )
699 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
700
701 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
702
703 void IOUserNotification::free( void )
704 {
705 if (holdNotify)
706 {
707 assert(OSDynamicCast(IONotifier, holdNotify));
708 ((IONotifier *)holdNotify)->remove();
709 holdNotify = 0;
710 }
711 // can't be in handler now
712
713 super::free();
714 }
715
716
717 void IOUserNotification::setNotification( IONotifier * notify )
718 {
719 OSObject * previousNotify;
720
721 IOLockLock( gIOObjectPortLock);
722
723 previousNotify = holdNotify;
724 holdNotify = notify;
725
726 IOLockUnlock( gIOObjectPortLock);
727
728 if( previousNotify)
729 {
730 assert(OSDynamicCast(IONotifier, previousNotify));
731 ((IONotifier *)previousNotify)->remove();
732 }
733 }
734
735 void IOUserNotification::reset()
736 {
737 // ?
738 }
739
740 bool IOUserNotification::isValid()
741 {
742 return( true );
743 }
744
745 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
746
747 #undef super
748 #define super IOUserNotification
749 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
750
751 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
752
753 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
754 void * reference, vm_size_t referenceSize,
755 bool clientIs64 )
756 {
757 if( !super::init())
758 return( false );
759
760 newSet = OSArray::withCapacity( 1 );
761 if( !newSet)
762 return( false );
763
764 if (referenceSize > sizeof(OSAsyncReference64))
765 return( false );
766
767 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
768 pingMsg = (PingMsg *) IOMalloc( msgSize);
769 if( !pingMsg)
770 return( false );
771
772 bzero( pingMsg, msgSize);
773
774 pingMsg->msgHdr.msgh_remote_port = port;
775 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
776 MACH_MSG_TYPE_COPY_SEND /*remote*/,
777 MACH_MSG_TYPE_MAKE_SEND /*local*/);
778 pingMsg->msgHdr.msgh_size = msgSize;
779 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
780
781 pingMsg->notifyHeader.size = 0;
782 pingMsg->notifyHeader.type = type;
783 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
784
785 return( true );
786 }
787
788 void IOServiceUserNotification::free( void )
789 {
790 PingMsg * _pingMsg;
791 vm_size_t _msgSize;
792 OSArray * _newSet;
793 OSObject * _lastEntry;
794
795 _pingMsg = pingMsg;
796 _msgSize = msgSize;
797 _lastEntry = lastEntry;
798 _newSet = newSet;
799
800 super::free();
801
802 if( _pingMsg && _msgSize) {
803 if (_pingMsg->msgHdr.msgh_remote_port) {
804 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
805 }
806 IOFree(_pingMsg, _msgSize);
807 }
808
809 if( _lastEntry)
810 _lastEntry->release();
811
812 if( _newSet)
813 _newSet->release();
814 }
815
816 bool IOServiceUserNotification::_handler( void * target,
817 void * ref, IOService * newService, IONotifier * notifier )
818 {
819 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
820 }
821
822 bool IOServiceUserNotification::handler( void * ref,
823 IOService * newService )
824 {
825 unsigned int count;
826 kern_return_t kr;
827 ipc_port_t port = NULL;
828 bool sendPing = false;
829
830 IOTakeLock( lock );
831
832 count = newSet->getCount();
833 if( count < kMaxOutstanding) {
834
835 newSet->setObject( newService );
836 if( (sendPing = (armed && (0 == count))))
837 armed = false;
838 }
839
840 IOUnlock( lock );
841
842 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
843 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
844
845 if( sendPing) {
846 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
847 pingMsg->msgHdr.msgh_local_port = port;
848 else
849 pingMsg->msgHdr.msgh_local_port = NULL;
850
851 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
852 pingMsg->msgHdr.msgh_size,
853 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
854 0);
855 if( port)
856 iokit_release_port( port );
857
858 if( (KERN_SUCCESS != kr) && !ipcLogged)
859 {
860 ipcLogged = true;
861 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
862 }
863 }
864
865 return( true );
866 }
867
868 OSObject * IOServiceUserNotification::getNextObject()
869 {
870 unsigned int count;
871 OSObject * result;
872 OSObject * releaseEntry;
873
874 IOLockLock(lock);
875
876 releaseEntry = lastEntry;
877 count = newSet->getCount();
878 if( count ) {
879 result = newSet->getObject( count - 1 );
880 result->retain();
881 newSet->removeObject( count - 1);
882 } else {
883 result = 0;
884 armed = true;
885 }
886 lastEntry = result;
887
888 IOLockUnlock(lock);
889
890 if (releaseEntry) releaseEntry->release();
891
892 return( result );
893 }
894
895 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
896
897 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
898
899 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
900
901 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
902 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
903 bool client64 )
904 {
905 if( !super::init())
906 return( false );
907
908 if (referenceSize > sizeof(OSAsyncReference64))
909 return( false );
910
911 clientIs64 = client64;
912
913 owningPID = proc_selfpid();
914
915 extraSize += sizeof(IOServiceInterestContent64);
916 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
917 pingMsg = (PingMsg *) IOMalloc( msgSize);
918 if( !pingMsg)
919 return( false );
920
921 bzero( pingMsg, msgSize);
922
923 pingMsg->msgHdr.msgh_remote_port = port;
924 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
925 | MACH_MSGH_BITS(
926 MACH_MSG_TYPE_COPY_SEND /*remote*/,
927 MACH_MSG_TYPE_MAKE_SEND /*local*/);
928 pingMsg->msgHdr.msgh_size = msgSize;
929 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
930
931 pingMsg->msgBody.msgh_descriptor_count = 1;
932
933 pingMsg->ports[0].name = 0;
934 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
935 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
936
937 pingMsg->notifyHeader.size = extraSize;
938 pingMsg->notifyHeader.type = type;
939 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
940
941 return( true );
942 }
943
944 void IOServiceMessageUserNotification::free( void )
945 {
946 PingMsg * _pingMsg;
947 vm_size_t _msgSize;
948
949 _pingMsg = pingMsg;
950 _msgSize = msgSize;
951
952 super::free();
953
954 if( _pingMsg && _msgSize) {
955 if (_pingMsg->msgHdr.msgh_remote_port) {
956 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
957 }
958 IOFree( _pingMsg, _msgSize);
959 }
960 }
961
962 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
963 UInt32 messageType, IOService * provider,
964 void * argument, vm_size_t argSize )
965 {
966 return( ((IOServiceMessageUserNotification *) target)->handler(
967 ref, messageType, provider, argument, argSize));
968 }
969
970 IOReturn IOServiceMessageUserNotification::handler( void * ref,
971 UInt32 messageType, IOService * provider,
972 void * messageArgument, vm_size_t callerArgSize )
973 {
974 enum { kLocalMsgSize = 0x100 };
975 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
976 void * allocMsg;
977 kern_return_t kr;
978 vm_size_t argSize;
979 vm_size_t thisMsgSize;
980 ipc_port_t thisPort, providerPort;
981 struct PingMsg * thisMsg;
982 IOServiceInterestContent64 * data;
983
984 if (kIOMessageCopyClientID == messageType)
985 {
986 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
987 return (kIOReturnSuccess);
988 }
989
990 if (callerArgSize == 0)
991 {
992 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
993 else argSize = sizeof(uint32_t);
994 }
995 else
996 {
997 argSize = callerArgSize;
998 if( argSize > kIOUserNotifyMaxMessageSize)
999 argSize = kIOUserNotifyMaxMessageSize;
1000 }
1001
1002 // adjust message size for ipc restrictions
1003 natural_t type;
1004 type = pingMsg->notifyHeader.type;
1005 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1006 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1007 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1008
1009 thisMsgSize = msgSize
1010 + sizeof( IOServiceInterestContent64 )
1011 - sizeof( data->messageArgument)
1012 + argSize;
1013
1014 if (thisMsgSize > sizeof(stackMsg))
1015 {
1016 allocMsg = IOMalloc(thisMsgSize);
1017 if (!allocMsg) return (kIOReturnNoMemory);
1018 thisMsg = (typeof(thisMsg)) allocMsg;
1019 }
1020 else
1021 {
1022 allocMsg = 0;
1023 thisMsg = (typeof(thisMsg)) stackMsg;
1024 }
1025
1026 bcopy(pingMsg, thisMsg, msgSize);
1027 thisMsg->notifyHeader.type = type;
1028 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1029 // == pingMsg->notifyHeader.content;
1030 data->messageType = messageType;
1031
1032 if (callerArgSize == 0)
1033 {
1034 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1035 if (!clientIs64)
1036 {
1037 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1038 }
1039 }
1040 else
1041 {
1042 bcopy( messageArgument, data->messageArgument, callerArgSize );
1043 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1044 }
1045
1046 thisMsg->notifyHeader.type = type;
1047 thisMsg->msgHdr.msgh_size = thisMsgSize;
1048
1049 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1050 thisMsg->ports[0].name = providerPort;
1051 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1052 thisMsg->msgHdr.msgh_local_port = thisPort;
1053
1054 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1055 thisMsg->msgHdr.msgh_size,
1056 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1057 0);
1058 if( thisPort)
1059 iokit_release_port( thisPort );
1060 if( providerPort)
1061 iokit_release_port( providerPort );
1062
1063 if (allocMsg)
1064 IOFree(allocMsg, thisMsgSize);
1065
1066 if((KERN_SUCCESS != kr) && !ipcLogged)
1067 {
1068 ipcLogged = true;
1069 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1070 }
1071
1072 return( kIOReturnSuccess );
1073 }
1074
1075 OSObject * IOServiceMessageUserNotification::getNextObject()
1076 {
1077 return( 0 );
1078 }
1079
1080 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1081
1082 #undef super
1083 #define super IOService
1084 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1085
1086 IOLock * gIOUserClientOwnersLock;
1087
1088 void IOUserClient::initialize( void )
1089 {
1090 gIOObjectPortLock = IOLockAlloc();
1091 gIOUserClientOwnersLock = IOLockAlloc();
1092 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1093 }
1094
1095 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1096 mach_port_t wakePort,
1097 void *callback, void *refcon)
1098 {
1099 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1100 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1101 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1102 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1103 }
1104
1105 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1106 mach_port_t wakePort,
1107 mach_vm_address_t callback, io_user_reference_t refcon)
1108 {
1109 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1110 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1111 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1112 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1113 }
1114
1115 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1116 mach_port_t wakePort,
1117 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1118 {
1119 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1120 if (vm_map_is_64bit(get_task_map(task))) {
1121 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1122 }
1123 }
1124
1125 static OSDictionary * CopyConsoleUser(UInt32 uid)
1126 {
1127 OSArray * array;
1128 OSDictionary * user = 0;
1129
1130 if ((array = OSDynamicCast(OSArray,
1131 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1132 {
1133 for (unsigned int idx = 0;
1134 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1135 idx++) {
1136 OSNumber * num;
1137
1138 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1139 && (uid == num->unsigned32BitValue())) {
1140 user->retain();
1141 break;
1142 }
1143 }
1144 array->release();
1145 }
1146 return user;
1147 }
1148
1149 static OSDictionary * CopyUserOnConsole(void)
1150 {
1151 OSArray * array;
1152 OSDictionary * user = 0;
1153
1154 if ((array = OSDynamicCast(OSArray,
1155 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1156 {
1157 for (unsigned int idx = 0;
1158 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1159 idx++)
1160 {
1161 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1162 {
1163 user->retain();
1164 break;
1165 }
1166 }
1167 array->release();
1168 }
1169 return (user);
1170 }
1171
1172 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1173 IOService * service )
1174 {
1175 proc_t p;
1176
1177 p = (proc_t) get_bsdtask_info(task);
1178 if (p)
1179 {
1180 uint64_t authorizationID;
1181
1182 authorizationID = proc_uniqueid(p);
1183 if (authorizationID)
1184 {
1185 if (service->getAuthorizationID() == authorizationID)
1186 {
1187 return (kIOReturnSuccess);
1188 }
1189 }
1190 }
1191
1192 return (kIOReturnNotPermitted);
1193 }
1194
1195 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1196 const char * privilegeName )
1197 {
1198 kern_return_t kr;
1199 security_token_t token;
1200 mach_msg_type_number_t count;
1201 task_t task;
1202 OSDictionary * user;
1203 bool secureConsole;
1204
1205
1206 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1207 sizeof(kIOClientPrivilegeForeground)))
1208 {
1209 if (task_is_gpu_denied(current_task()))
1210 return (kIOReturnNotPrivileged);
1211 else
1212 return (kIOReturnSuccess);
1213 }
1214
1215 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1216 sizeof(kIOClientPrivilegeConsoleSession)))
1217 {
1218 kauth_cred_t cred;
1219 proc_t p;
1220
1221 task = (task_t) securityToken;
1222 if (!task)
1223 task = current_task();
1224 p = (proc_t) get_bsdtask_info(task);
1225 kr = kIOReturnNotPrivileged;
1226
1227 if (p && (cred = kauth_cred_proc_ref(p)))
1228 {
1229 user = CopyUserOnConsole();
1230 if (user)
1231 {
1232 OSNumber * num;
1233 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1234 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1235 {
1236 kr = kIOReturnSuccess;
1237 }
1238 user->release();
1239 }
1240 kauth_cred_unref(&cred);
1241 }
1242 return (kr);
1243 }
1244
1245 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1246 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1247 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1248 else
1249 task = (task_t)securityToken;
1250
1251 count = TASK_SECURITY_TOKEN_COUNT;
1252 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1253
1254 if (KERN_SUCCESS != kr)
1255 {}
1256 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1257 sizeof(kIOClientPrivilegeAdministrator))) {
1258 if (0 != token.val[0])
1259 kr = kIOReturnNotPrivileged;
1260 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1261 sizeof(kIOClientPrivilegeLocalUser))) {
1262 user = CopyConsoleUser(token.val[0]);
1263 if ( user )
1264 user->release();
1265 else
1266 kr = kIOReturnNotPrivileged;
1267 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1268 sizeof(kIOClientPrivilegeConsoleUser))) {
1269 user = CopyConsoleUser(token.val[0]);
1270 if ( user ) {
1271 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1272 kr = kIOReturnNotPrivileged;
1273 else if ( secureConsole ) {
1274 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1275 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1276 kr = kIOReturnNotPrivileged;
1277 }
1278 user->release();
1279 }
1280 else
1281 kr = kIOReturnNotPrivileged;
1282 } else
1283 kr = kIOReturnUnsupported;
1284
1285 return (kr);
1286 }
1287
1288 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1289 const char * entitlement )
1290 {
1291 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1292
1293 proc_t p = NULL;
1294 pid_t pid = 0;
1295 char procname[MAXCOMLEN + 1] = "";
1296 size_t len = 0;
1297 void *entitlements_blob = NULL;
1298 char *entitlements_data = NULL;
1299 OSObject *entitlements_obj = NULL;
1300 OSDictionary *entitlements = NULL;
1301 OSString *errorString = NULL;
1302 OSObject *value = NULL;
1303
1304 p = (proc_t)get_bsdtask_info(task);
1305 if (p == NULL)
1306 goto fail;
1307 pid = proc_pid(p);
1308 proc_name(pid, procname, (int)sizeof(procname));
1309
1310 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1311 goto fail;
1312
1313 if (len <= offsetof(CS_GenericBlob, data))
1314 goto fail;
1315
1316 /*
1317 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1318 * we'll try to parse in the kernel.
1319 */
1320 len -= offsetof(CS_GenericBlob, data);
1321 if (len > MAX_ENTITLEMENTS_LEN) {
1322 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1323 goto fail;
1324 }
1325
1326 /*
1327 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1328 * what is stored in the entitlements blob. Copy the string and
1329 * terminate it.
1330 */
1331 entitlements_data = (char *)IOMalloc(len + 1);
1332 if (entitlements_data == NULL)
1333 goto fail;
1334 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1335 entitlements_data[len] = '\0';
1336
1337 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1338 if (errorString != NULL) {
1339 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1340 goto fail;
1341 }
1342 if (entitlements_obj == NULL)
1343 goto fail;
1344
1345 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1346 if (entitlements == NULL)
1347 goto fail;
1348
1349 /* Fetch the entitlement value from the dictionary. */
1350 value = entitlements->getObject(entitlement);
1351 if (value != NULL)
1352 value->retain();
1353
1354 fail:
1355 if (entitlements_data != NULL)
1356 IOFree(entitlements_data, len + 1);
1357 if (entitlements_obj != NULL)
1358 entitlements_obj->release();
1359 if (errorString != NULL)
1360 errorString->release();
1361 return value;
1362 }
1363
1364 bool IOUserClient::init()
1365 {
1366 if (getPropertyTable() || super::init())
1367 return reserve();
1368
1369 return false;
1370 }
1371
1372 bool IOUserClient::init(OSDictionary * dictionary)
1373 {
1374 if (getPropertyTable() || super::init(dictionary))
1375 return reserve();
1376
1377 return false;
1378 }
1379
1380 bool IOUserClient::initWithTask(task_t owningTask,
1381 void * securityID,
1382 UInt32 type )
1383 {
1384 if (getPropertyTable() || super::init())
1385 return reserve();
1386
1387 return false;
1388 }
1389
1390 bool IOUserClient::initWithTask(task_t owningTask,
1391 void * securityID,
1392 UInt32 type,
1393 OSDictionary * properties )
1394 {
1395 bool ok;
1396
1397 ok = super::init( properties );
1398 ok &= initWithTask( owningTask, securityID, type );
1399
1400 return( ok );
1401 }
1402
1403 bool IOUserClient::reserve()
1404 {
1405 if(!reserved) {
1406 reserved = IONew(ExpansionData, 1);
1407 if (!reserved) {
1408 return false;
1409 }
1410 }
1411 setTerminateDefer(NULL, true);
1412 IOStatisticsRegisterCounter();
1413
1414 return true;
1415 }
1416
1417 struct IOUserClientOwner
1418 {
1419 task_t task;
1420 queue_chain_t taskLink;
1421 IOUserClient * uc;
1422 queue_chain_t ucLink;
1423 };
1424
1425 IOReturn
1426 IOUserClient::registerOwner(task_t task)
1427 {
1428 IOUserClientOwner * owner;
1429 IOReturn ret;
1430 bool newOwner;
1431
1432 IOLockLock(gIOUserClientOwnersLock);
1433
1434 newOwner = true;
1435 ret = kIOReturnSuccess;
1436
1437 if (!owners.next) queue_init(&owners);
1438 else
1439 {
1440 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1441 {
1442 if (task != owner->task) continue;
1443 newOwner = false;
1444 break;
1445 }
1446 }
1447 if (newOwner)
1448 {
1449 owner = IONew(IOUserClientOwner, 1);
1450 if (!newOwner) ret = kIOReturnNoMemory;
1451 else
1452 {
1453 owner->task = task;
1454 owner->uc = this;
1455 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1456 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1457 }
1458 }
1459
1460 IOLockUnlock(gIOUserClientOwnersLock);
1461
1462 return (ret);
1463 }
1464
1465 void
1466 IOUserClient::noMoreSenders(void)
1467 {
1468 IOUserClientOwner * owner;
1469
1470 IOLockLock(gIOUserClientOwnersLock);
1471
1472 if (owners.next)
1473 {
1474 while (!queue_empty(&owners))
1475 {
1476 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1477 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1478 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1479 IODelete(owner, IOUserClientOwner, 1);
1480 }
1481 owners.next = owners.prev = NULL;
1482 }
1483
1484 IOLockUnlock(gIOUserClientOwnersLock);
1485 }
1486
1487 extern "C" kern_return_t
1488 iokit_task_terminate(task_t task)
1489 {
1490 IOUserClientOwner * owner;
1491 IOUserClient * dead;
1492 IOUserClient * uc;
1493 queue_head_t * taskque;
1494
1495 IOLockLock(gIOUserClientOwnersLock);
1496
1497 taskque = task_io_user_clients(task);
1498 dead = NULL;
1499 while (!queue_empty(taskque))
1500 {
1501 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1502 uc = owner->uc;
1503 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1504 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1505 if (queue_empty(&uc->owners))
1506 {
1507 uc->retain();
1508 IOLog("destroying out of band connect for %s\n", uc->getName());
1509 // now using the uc queue head as a singly linked queue,
1510 // leaving .next as NULL to mark it empty
1511 uc->owners.next = NULL;
1512 uc->owners.prev = (queue_entry_t) dead;
1513 dead = uc;
1514 }
1515 IODelete(owner, IOUserClientOwner, 1);
1516 }
1517
1518 IOLockUnlock(gIOUserClientOwnersLock);
1519
1520 while (dead)
1521 {
1522 uc = dead;
1523 dead = (IOUserClient *)(void *) dead->owners.prev;
1524 uc->owners.prev = NULL;
1525 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1526 uc->release();
1527 }
1528
1529 return (KERN_SUCCESS);
1530 }
1531
1532 void IOUserClient::free()
1533 {
1534 if( mappings) mappings->release();
1535
1536 IOStatisticsUnregisterCounter();
1537
1538 assert(!owners.next);
1539 assert(!owners.prev);
1540
1541 if (reserved) IODelete(reserved, ExpansionData, 1);
1542
1543 super::free();
1544 }
1545
1546 IOReturn IOUserClient::clientDied( void )
1547 {
1548 IOReturn ret = kIOReturnNotReady;
1549
1550 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1551 {
1552 ret = clientClose();
1553 }
1554
1555 return (ret);
1556 }
1557
1558 IOReturn IOUserClient::clientClose( void )
1559 {
1560 return( kIOReturnUnsupported );
1561 }
1562
1563 IOService * IOUserClient::getService( void )
1564 {
1565 return( 0 );
1566 }
1567
1568 IOReturn IOUserClient::registerNotificationPort(
1569 mach_port_t /* port */,
1570 UInt32 /* type */,
1571 UInt32 /* refCon */)
1572 {
1573 return( kIOReturnUnsupported);
1574 }
1575
1576 IOReturn IOUserClient::registerNotificationPort(
1577 mach_port_t port,
1578 UInt32 type,
1579 io_user_reference_t refCon)
1580 {
1581 return (registerNotificationPort(port, type, (UInt32) refCon));
1582 }
1583
1584 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1585 semaphore_t * semaphore )
1586 {
1587 return( kIOReturnUnsupported);
1588 }
1589
1590 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1591 {
1592 return( kIOReturnUnsupported);
1593 }
1594
1595 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1596 IOOptionBits * options,
1597 IOMemoryDescriptor ** memory )
1598 {
1599 return( kIOReturnUnsupported);
1600 }
1601
1602 #if !__LP64__
1603 IOMemoryMap * IOUserClient::mapClientMemory(
1604 IOOptionBits type,
1605 task_t task,
1606 IOOptionBits mapFlags,
1607 IOVirtualAddress atAddress )
1608 {
1609 return (NULL);
1610 }
1611 #endif
1612
1613 IOMemoryMap * IOUserClient::mapClientMemory64(
1614 IOOptionBits type,
1615 task_t task,
1616 IOOptionBits mapFlags,
1617 mach_vm_address_t atAddress )
1618 {
1619 IOReturn err;
1620 IOOptionBits options = 0;
1621 IOMemoryDescriptor * memory = 0;
1622 IOMemoryMap * map = 0;
1623
1624 err = clientMemoryForType( (UInt32) type, &options, &memory );
1625
1626 if( memory && (kIOReturnSuccess == err)) {
1627
1628 FAKE_STACK_FRAME(getMetaClass());
1629
1630 options = (options & ~kIOMapUserOptionsMask)
1631 | (mapFlags & kIOMapUserOptionsMask);
1632 map = memory->createMappingInTask( task, atAddress, options );
1633 memory->release();
1634
1635 FAKE_STACK_FRAME_END();
1636 }
1637
1638 return( map );
1639 }
1640
1641 IOReturn IOUserClient::exportObjectToClient(task_t task,
1642 OSObject *obj, io_object_t *clientObj)
1643 {
1644 mach_port_name_t name;
1645
1646 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1647
1648 *(mach_port_name_t *)clientObj = name;
1649 return kIOReturnSuccess;
1650 }
1651
1652 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1653 {
1654 return( 0 );
1655 }
1656
1657 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1658 {
1659 return( 0 );
1660 }
1661
1662 IOExternalTrap * IOUserClient::
1663 getExternalTrapForIndex(UInt32 index)
1664 {
1665 return NULL;
1666 }
1667
1668 #pragma clang diagnostic push
1669 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1670
1671 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1672 // functions can break clients of kexts implementing getExternalMethodForIndex()
1673 IOExternalMethod * IOUserClient::
1674 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1675 {
1676 IOExternalMethod *method = getExternalMethodForIndex(index);
1677
1678 if (method)
1679 *targetP = (IOService *) method->object;
1680
1681 return method;
1682 }
1683
1684 IOExternalAsyncMethod * IOUserClient::
1685 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1686 {
1687 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1688
1689 if (method)
1690 *targetP = (IOService *) method->object;
1691
1692 return method;
1693 }
1694
1695 IOExternalTrap * IOUserClient::
1696 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1697 {
1698 IOExternalTrap *trap = getExternalTrapForIndex(index);
1699
1700 if (trap) {
1701 *targetP = trap->object;
1702 }
1703
1704 return trap;
1705 }
1706 #pragma clang diagnostic pop
1707
1708 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1709 {
1710 mach_port_t port;
1711 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1712
1713 if (MACH_PORT_NULL != port)
1714 iokit_release_port_send(port);
1715
1716 return (kIOReturnSuccess);
1717 }
1718
1719 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1720 {
1721 if (MACH_PORT_NULL != port)
1722 iokit_release_port_send(port);
1723
1724 return (kIOReturnSuccess);
1725 }
1726
1727 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1728 IOReturn result, void *args[], UInt32 numArgs)
1729 {
1730 OSAsyncReference64 reference64;
1731 io_user_reference_t args64[kMaxAsyncArgs];
1732 unsigned int idx;
1733
1734 if (numArgs > kMaxAsyncArgs)
1735 return kIOReturnMessageTooLarge;
1736
1737 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1738 reference64[idx] = REF64(reference[idx]);
1739
1740 for (idx = 0; idx < numArgs; idx++)
1741 args64[idx] = REF64(args[idx]);
1742
1743 return (sendAsyncResult64(reference64, result, args64, numArgs));
1744 }
1745
1746 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1747 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1748 {
1749 return _sendAsyncResult64(reference, result, args, numArgs, options);
1750 }
1751
1752 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1753 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1754 {
1755 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1756 }
1757
1758 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1759 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1760 {
1761 struct ReplyMsg
1762 {
1763 mach_msg_header_t msgHdr;
1764 union
1765 {
1766 struct
1767 {
1768 OSNotificationHeader notifyHdr;
1769 IOAsyncCompletionContent asyncContent;
1770 uint32_t args[kMaxAsyncArgs];
1771 } msg32;
1772 struct
1773 {
1774 OSNotificationHeader64 notifyHdr;
1775 IOAsyncCompletionContent asyncContent;
1776 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1777 } msg64;
1778 } m;
1779 };
1780 ReplyMsg replyMsg;
1781 mach_port_t replyPort;
1782 kern_return_t kr;
1783
1784 // If no reply port, do nothing.
1785 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1786 if (replyPort == MACH_PORT_NULL)
1787 return kIOReturnSuccess;
1788
1789 if (numArgs > kMaxAsyncArgs)
1790 return kIOReturnMessageTooLarge;
1791
1792 bzero(&replyMsg, sizeof(replyMsg));
1793 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1794 0 /*local*/);
1795 replyMsg.msgHdr.msgh_remote_port = replyPort;
1796 replyMsg.msgHdr.msgh_local_port = 0;
1797 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1798 if (kIOUCAsync64Flag & reference[0])
1799 {
1800 replyMsg.msgHdr.msgh_size =
1801 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1802 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1803 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1804 + numArgs * sizeof(io_user_reference_t);
1805 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1806 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1807
1808 replyMsg.m.msg64.asyncContent.result = result;
1809 if (numArgs)
1810 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1811 }
1812 else
1813 {
1814 unsigned int idx;
1815
1816 replyMsg.msgHdr.msgh_size =
1817 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1818 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1819
1820 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1821 + numArgs * sizeof(uint32_t);
1822 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1823
1824 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1825 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1826
1827 replyMsg.m.msg32.asyncContent.result = result;
1828
1829 for (idx = 0; idx < numArgs; idx++)
1830 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1831 }
1832
1833 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1834 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1835 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1836 } else {
1837 /* Fail on full queue. */
1838 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1839 replyMsg.msgHdr.msgh_size);
1840 }
1841 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1842 {
1843 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1844 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1845 }
1846 return kr;
1847 }
1848
1849
1850 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1851
1852 extern "C" {
1853
1854 #define CHECK(cls,obj,out) \
1855 cls * out; \
1856 if( !(out = OSDynamicCast( cls, obj))) \
1857 return( kIOReturnBadArgument )
1858
1859 #define CHECKLOCKED(cls,obj,out) \
1860 IOUserIterator * oIter; \
1861 cls * out; \
1862 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1863 return (kIOReturnBadArgument); \
1864 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1865 return (kIOReturnBadArgument)
1866
1867 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1868
1869 // Create a vm_map_copy_t or kalloc'ed data for memory
1870 // to be copied out. ipc will free after the copyout.
1871
1872 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1873 io_buf_ptr_t * buf )
1874 {
1875 kern_return_t err;
1876 vm_map_copy_t copy;
1877
1878 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1879 false /* src_destroy */, &copy);
1880
1881 assert( err == KERN_SUCCESS );
1882 if( err == KERN_SUCCESS )
1883 *buf = (char *) copy;
1884
1885 return( err );
1886 }
1887
1888 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1889
1890 /* Routine io_server_version */
1891 kern_return_t is_io_server_version(
1892 mach_port_t master_port,
1893 uint64_t *version)
1894 {
1895 *version = IOKIT_SERVER_VERSION;
1896 return (kIOReturnSuccess);
1897 }
1898
1899 /* Routine io_object_get_class */
1900 kern_return_t is_io_object_get_class(
1901 io_object_t object,
1902 io_name_t className )
1903 {
1904 const OSMetaClass* my_obj = NULL;
1905
1906 if( !object)
1907 return( kIOReturnBadArgument );
1908
1909 my_obj = object->getMetaClass();
1910 if (!my_obj) {
1911 return (kIOReturnNotFound);
1912 }
1913
1914 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1915
1916 return( kIOReturnSuccess );
1917 }
1918
1919 /* Routine io_object_get_superclass */
1920 kern_return_t is_io_object_get_superclass(
1921 mach_port_t master_port,
1922 io_name_t obj_name,
1923 io_name_t class_name)
1924 {
1925 const OSMetaClass* my_obj = NULL;
1926 const OSMetaClass* superclass = NULL;
1927 const OSSymbol *my_name = NULL;
1928 const char *my_cstr = NULL;
1929
1930 if (!obj_name || !class_name)
1931 return (kIOReturnBadArgument);
1932
1933 if( master_port != master_device_port)
1934 return( kIOReturnNotPrivileged);
1935
1936 my_name = OSSymbol::withCString(obj_name);
1937
1938 if (my_name) {
1939 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1940 my_name->release();
1941 }
1942 if (my_obj) {
1943 superclass = my_obj->getSuperClass();
1944 }
1945
1946 if (!superclass) {
1947 return( kIOReturnNotFound );
1948 }
1949
1950 my_cstr = superclass->getClassName();
1951
1952 if (my_cstr) {
1953 strlcpy(class_name, my_cstr, sizeof(io_name_t));
1954 return( kIOReturnSuccess );
1955 }
1956 return (kIOReturnNotFound);
1957 }
1958
1959 /* Routine io_object_get_bundle_identifier */
1960 kern_return_t is_io_object_get_bundle_identifier(
1961 mach_port_t master_port,
1962 io_name_t obj_name,
1963 io_name_t bundle_name)
1964 {
1965 const OSMetaClass* my_obj = NULL;
1966 const OSSymbol *my_name = NULL;
1967 const OSSymbol *identifier = NULL;
1968 const char *my_cstr = NULL;
1969
1970 if (!obj_name || !bundle_name)
1971 return (kIOReturnBadArgument);
1972
1973 if( master_port != master_device_port)
1974 return( kIOReturnNotPrivileged);
1975
1976 my_name = OSSymbol::withCString(obj_name);
1977
1978 if (my_name) {
1979 my_obj = OSMetaClass::getMetaClassWithName(my_name);
1980 my_name->release();
1981 }
1982
1983 if (my_obj) {
1984 identifier = my_obj->getKmodName();
1985 }
1986 if (!identifier) {
1987 return( kIOReturnNotFound );
1988 }
1989
1990 my_cstr = identifier->getCStringNoCopy();
1991 if (my_cstr) {
1992 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1993 return( kIOReturnSuccess );
1994 }
1995
1996 return (kIOReturnBadArgument);
1997 }
1998
1999 /* Routine io_object_conforms_to */
2000 kern_return_t is_io_object_conforms_to(
2001 io_object_t object,
2002 io_name_t className,
2003 boolean_t *conforms )
2004 {
2005 if( !object)
2006 return( kIOReturnBadArgument );
2007
2008 *conforms = (0 != object->metaCast( className ));
2009
2010 return( kIOReturnSuccess );
2011 }
2012
2013 /* Routine io_object_get_retain_count */
2014 kern_return_t is_io_object_get_retain_count(
2015 io_object_t object,
2016 uint32_t *retainCount )
2017 {
2018 if( !object)
2019 return( kIOReturnBadArgument );
2020
2021 *retainCount = object->getRetainCount();
2022 return( kIOReturnSuccess );
2023 }
2024
2025 /* Routine io_iterator_next */
2026 kern_return_t is_io_iterator_next(
2027 io_object_t iterator,
2028 io_object_t *object )
2029 {
2030 IOReturn ret;
2031 OSObject * obj;
2032
2033 CHECK( OSIterator, iterator, iter );
2034
2035 obj = iter->getNextObject();
2036 if( obj) {
2037 obj->retain();
2038 *object = obj;
2039 ret = kIOReturnSuccess;
2040 } else
2041 ret = kIOReturnNoDevice;
2042
2043 return (ret);
2044 }
2045
2046 /* Routine io_iterator_reset */
2047 kern_return_t is_io_iterator_reset(
2048 io_object_t iterator )
2049 {
2050 CHECK( OSIterator, iterator, iter );
2051
2052 iter->reset();
2053
2054 return( kIOReturnSuccess );
2055 }
2056
2057 /* Routine io_iterator_is_valid */
2058 kern_return_t is_io_iterator_is_valid(
2059 io_object_t iterator,
2060 boolean_t *is_valid )
2061 {
2062 CHECK( OSIterator, iterator, iter );
2063
2064 *is_valid = iter->isValid();
2065
2066 return( kIOReturnSuccess );
2067 }
2068
2069
2070 static kern_return_t internal_io_service_match_property_table(
2071 io_service_t _service,
2072 const char * matching,
2073 mach_msg_type_number_t matching_size,
2074 boolean_t *matches)
2075 {
2076 CHECK( IOService, _service, service );
2077
2078 kern_return_t kr;
2079 OSObject * obj;
2080 OSDictionary * dict;
2081
2082 assert(matching_size);
2083 obj = OSUnserializeXML(matching, matching_size);
2084
2085 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2086 *matches = service->passiveMatch( dict );
2087 kr = kIOReturnSuccess;
2088 } else
2089 kr = kIOReturnBadArgument;
2090
2091 if( obj)
2092 obj->release();
2093
2094 return( kr );
2095 }
2096
2097 /* Routine io_service_match_property_table */
2098 kern_return_t is_io_service_match_property_table(
2099 io_service_t service,
2100 io_string_t matching,
2101 boolean_t *matches )
2102 {
2103 return (kIOReturnUnsupported);
2104 }
2105
2106
2107 /* Routine io_service_match_property_table_ool */
2108 kern_return_t is_io_service_match_property_table_ool(
2109 io_object_t service,
2110 io_buf_ptr_t matching,
2111 mach_msg_type_number_t matchingCnt,
2112 kern_return_t *result,
2113 boolean_t *matches )
2114 {
2115 kern_return_t kr;
2116 vm_offset_t data;
2117 vm_map_offset_t map_data;
2118
2119 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2120 data = CAST_DOWN(vm_offset_t, map_data);
2121
2122 if( KERN_SUCCESS == kr) {
2123 // must return success after vm_map_copyout() succeeds
2124 *result = internal_io_service_match_property_table(service,
2125 (const char *)data, matchingCnt, matches );
2126 vm_deallocate( kernel_map, data, matchingCnt );
2127 }
2128
2129 return( kr );
2130 }
2131
2132 /* Routine io_service_match_property_table_bin */
2133 kern_return_t is_io_service_match_property_table_bin(
2134 io_object_t service,
2135 io_struct_inband_t matching,
2136 mach_msg_type_number_t matchingCnt,
2137 boolean_t *matches)
2138 {
2139 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2140 }
2141
2142 static kern_return_t internal_io_service_get_matching_services(
2143 mach_port_t master_port,
2144 const char * matching,
2145 mach_msg_type_number_t matching_size,
2146 io_iterator_t *existing )
2147 {
2148 kern_return_t kr;
2149 OSObject * obj;
2150 OSDictionary * dict;
2151
2152 if( master_port != master_device_port)
2153 return( kIOReturnNotPrivileged);
2154
2155 assert(matching_size);
2156 obj = OSUnserializeXML(matching, matching_size);
2157
2158 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2159 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2160 kr = kIOReturnSuccess;
2161 } else
2162 kr = kIOReturnBadArgument;
2163
2164 if( obj)
2165 obj->release();
2166
2167 return( kr );
2168 }
2169
2170 /* Routine io_service_get_matching_services */
2171 kern_return_t is_io_service_get_matching_services(
2172 mach_port_t master_port,
2173 io_string_t matching,
2174 io_iterator_t *existing )
2175 {
2176 return (kIOReturnUnsupported);
2177 }
2178
2179 /* Routine io_service_get_matching_services_ool */
2180 kern_return_t is_io_service_get_matching_services_ool(
2181 mach_port_t master_port,
2182 io_buf_ptr_t matching,
2183 mach_msg_type_number_t matchingCnt,
2184 kern_return_t *result,
2185 io_object_t *existing )
2186 {
2187 kern_return_t kr;
2188 vm_offset_t data;
2189 vm_map_offset_t map_data;
2190
2191 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2192 data = CAST_DOWN(vm_offset_t, map_data);
2193
2194 if( KERN_SUCCESS == kr) {
2195 // must return success after vm_map_copyout() succeeds
2196 // and mig will copy out objects on success
2197 *existing = 0;
2198 *result = internal_io_service_get_matching_services(master_port,
2199 (const char *) data, matchingCnt, existing);
2200 vm_deallocate( kernel_map, data, matchingCnt );
2201 }
2202
2203 return( kr );
2204 }
2205
2206 /* Routine io_service_get_matching_services_bin */
2207 kern_return_t is_io_service_get_matching_services_bin(
2208 mach_port_t master_port,
2209 io_struct_inband_t matching,
2210 mach_msg_type_number_t matchingCnt,
2211 io_object_t *existing)
2212 {
2213 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2214 }
2215
2216
2217 static kern_return_t internal_io_service_get_matching_service(
2218 mach_port_t master_port,
2219 const char * matching,
2220 mach_msg_type_number_t matching_size,
2221 io_service_t *service )
2222 {
2223 kern_return_t kr;
2224 OSObject * obj;
2225 OSDictionary * dict;
2226
2227 if( master_port != master_device_port)
2228 return( kIOReturnNotPrivileged);
2229
2230 assert(matching_size);
2231 obj = OSUnserializeXML(matching, matching_size);
2232
2233 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2234 *service = IOService::copyMatchingService( dict );
2235 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2236 } else
2237 kr = kIOReturnBadArgument;
2238
2239 if( obj)
2240 obj->release();
2241
2242 return( kr );
2243 }
2244
2245 /* Routine io_service_get_matching_service */
2246 kern_return_t is_io_service_get_matching_service(
2247 mach_port_t master_port,
2248 io_string_t matching,
2249 io_service_t *service )
2250 {
2251 return (kIOReturnUnsupported);
2252 }
2253
2254 /* Routine io_service_get_matching_services_ool */
2255 kern_return_t is_io_service_get_matching_service_ool(
2256 mach_port_t master_port,
2257 io_buf_ptr_t matching,
2258 mach_msg_type_number_t matchingCnt,
2259 kern_return_t *result,
2260 io_object_t *service )
2261 {
2262 kern_return_t kr;
2263 vm_offset_t data;
2264 vm_map_offset_t map_data;
2265
2266 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2267 data = CAST_DOWN(vm_offset_t, map_data);
2268
2269 if( KERN_SUCCESS == kr) {
2270 // must return success after vm_map_copyout() succeeds
2271 // and mig will copy out objects on success
2272 *service = 0;
2273 *result = internal_io_service_get_matching_service(master_port,
2274 (const char *) data, matchingCnt, service );
2275 vm_deallocate( kernel_map, data, matchingCnt );
2276 }
2277
2278 return( kr );
2279 }
2280
2281 /* Routine io_service_get_matching_service_bin */
2282 kern_return_t is_io_service_get_matching_service_bin(
2283 mach_port_t master_port,
2284 io_struct_inband_t matching,
2285 mach_msg_type_number_t matchingCnt,
2286 io_object_t *service)
2287 {
2288 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2289 }
2290
2291 static kern_return_t internal_io_service_add_notification(
2292 mach_port_t master_port,
2293 io_name_t notification_type,
2294 const char * matching,
2295 size_t matching_size,
2296 mach_port_t port,
2297 void * reference,
2298 vm_size_t referenceSize,
2299 bool client64,
2300 io_object_t * notification )
2301 {
2302 IOServiceUserNotification * userNotify = 0;
2303 IONotifier * notify = 0;
2304 const OSSymbol * sym;
2305 OSDictionary * dict;
2306 IOReturn err;
2307 unsigned long int userMsgType;
2308
2309 if( master_port != master_device_port)
2310 return( kIOReturnNotPrivileged);
2311
2312 do {
2313 err = kIOReturnNoResources;
2314
2315 if( !(sym = OSSymbol::withCString( notification_type )))
2316 err = kIOReturnNoResources;
2317
2318 assert(matching_size);
2319 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2320 if (!dict) {
2321 err = kIOReturnBadArgument;
2322 continue;
2323 }
2324
2325 if( (sym == gIOPublishNotification)
2326 || (sym == gIOFirstPublishNotification))
2327 userMsgType = kIOServicePublishNotificationType;
2328 else if( (sym == gIOMatchedNotification)
2329 || (sym == gIOFirstMatchNotification))
2330 userMsgType = kIOServiceMatchedNotificationType;
2331 else if( sym == gIOTerminatedNotification)
2332 userMsgType = kIOServiceTerminatedNotificationType;
2333 else
2334 userMsgType = kLastIOKitNotificationType;
2335
2336 userNotify = new IOServiceUserNotification;
2337
2338 if( userNotify && !userNotify->init( port, userMsgType,
2339 reference, referenceSize, client64)) {
2340 iokit_release_port_send(port);
2341 userNotify->release();
2342 userNotify = 0;
2343 }
2344 if( !userNotify)
2345 continue;
2346
2347 notify = IOService::addMatchingNotification( sym, dict,
2348 &userNotify->_handler, userNotify );
2349 if( notify) {
2350 *notification = userNotify;
2351 userNotify->setNotification( notify );
2352 err = kIOReturnSuccess;
2353 } else
2354 err = kIOReturnUnsupported;
2355
2356 } while( false );
2357
2358 if( sym)
2359 sym->release();
2360 if( dict)
2361 dict->release();
2362
2363 return( err );
2364 }
2365
2366
2367 /* Routine io_service_add_notification */
2368 kern_return_t is_io_service_add_notification(
2369 mach_port_t master_port,
2370 io_name_t notification_type,
2371 io_string_t matching,
2372 mach_port_t port,
2373 io_async_ref_t reference,
2374 mach_msg_type_number_t referenceCnt,
2375 io_object_t * notification )
2376 {
2377 return (kIOReturnUnsupported);
2378 }
2379
2380 /* Routine io_service_add_notification_64 */
2381 kern_return_t is_io_service_add_notification_64(
2382 mach_port_t master_port,
2383 io_name_t notification_type,
2384 io_string_t matching,
2385 mach_port_t wake_port,
2386 io_async_ref64_t reference,
2387 mach_msg_type_number_t referenceCnt,
2388 io_object_t *notification )
2389 {
2390 return (kIOReturnUnsupported);
2391 }
2392
2393 /* Routine io_service_add_notification_bin */
2394 kern_return_t is_io_service_add_notification_bin
2395 (
2396 mach_port_t master_port,
2397 io_name_t notification_type,
2398 io_struct_inband_t matching,
2399 mach_msg_type_number_t matchingCnt,
2400 mach_port_t wake_port,
2401 io_async_ref_t reference,
2402 mach_msg_type_number_t referenceCnt,
2403 io_object_t *notification)
2404 {
2405 return (internal_io_service_add_notification(master_port, notification_type,
2406 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2407 false, notification));
2408 }
2409
2410 /* Routine io_service_add_notification_bin_64 */
2411 kern_return_t is_io_service_add_notification_bin_64
2412 (
2413 mach_port_t master_port,
2414 io_name_t notification_type,
2415 io_struct_inband_t matching,
2416 mach_msg_type_number_t matchingCnt,
2417 mach_port_t wake_port,
2418 io_async_ref64_t reference,
2419 mach_msg_type_number_t referenceCnt,
2420 io_object_t *notification)
2421 {
2422 return (internal_io_service_add_notification(master_port, notification_type,
2423 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2424 true, notification));
2425 }
2426
2427 static kern_return_t internal_io_service_add_notification_ool(
2428 mach_port_t master_port,
2429 io_name_t notification_type,
2430 io_buf_ptr_t matching,
2431 mach_msg_type_number_t matchingCnt,
2432 mach_port_t wake_port,
2433 void * reference,
2434 vm_size_t referenceSize,
2435 bool client64,
2436 kern_return_t *result,
2437 io_object_t *notification )
2438 {
2439 kern_return_t kr;
2440 vm_offset_t data;
2441 vm_map_offset_t map_data;
2442
2443 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2444 data = CAST_DOWN(vm_offset_t, map_data);
2445
2446 if( KERN_SUCCESS == kr) {
2447 // must return success after vm_map_copyout() succeeds
2448 // and mig will copy out objects on success
2449 *notification = 0;
2450 *result = internal_io_service_add_notification( master_port, notification_type,
2451 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2452 vm_deallocate( kernel_map, data, matchingCnt );
2453 }
2454
2455 return( kr );
2456 }
2457
2458 /* Routine io_service_add_notification_ool */
2459 kern_return_t is_io_service_add_notification_ool(
2460 mach_port_t master_port,
2461 io_name_t notification_type,
2462 io_buf_ptr_t matching,
2463 mach_msg_type_number_t matchingCnt,
2464 mach_port_t wake_port,
2465 io_async_ref_t reference,
2466 mach_msg_type_number_t referenceCnt,
2467 kern_return_t *result,
2468 io_object_t *notification )
2469 {
2470 return (internal_io_service_add_notification_ool(master_port, notification_type,
2471 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2472 false, result, notification));
2473 }
2474
2475 /* Routine io_service_add_notification_ool_64 */
2476 kern_return_t is_io_service_add_notification_ool_64(
2477 mach_port_t master_port,
2478 io_name_t notification_type,
2479 io_buf_ptr_t matching,
2480 mach_msg_type_number_t matchingCnt,
2481 mach_port_t wake_port,
2482 io_async_ref64_t reference,
2483 mach_msg_type_number_t referenceCnt,
2484 kern_return_t *result,
2485 io_object_t *notification )
2486 {
2487 return (internal_io_service_add_notification_ool(master_port, notification_type,
2488 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2489 true, result, notification));
2490 }
2491
2492 /* Routine io_service_add_notification_old */
2493 kern_return_t is_io_service_add_notification_old(
2494 mach_port_t master_port,
2495 io_name_t notification_type,
2496 io_string_t matching,
2497 mach_port_t port,
2498 // for binary compatibility reasons, this must be natural_t for ILP32
2499 natural_t ref,
2500 io_object_t * notification )
2501 {
2502 return( is_io_service_add_notification( master_port, notification_type,
2503 matching, port, &ref, 1, notification ));
2504 }
2505
2506
2507 static kern_return_t internal_io_service_add_interest_notification(
2508 io_object_t _service,
2509 io_name_t type_of_interest,
2510 mach_port_t port,
2511 void * reference,
2512 vm_size_t referenceSize,
2513 bool client64,
2514 io_object_t * notification )
2515 {
2516
2517 IOServiceMessageUserNotification * userNotify = 0;
2518 IONotifier * notify = 0;
2519 const OSSymbol * sym;
2520 IOReturn err;
2521
2522 CHECK( IOService, _service, service );
2523
2524 err = kIOReturnNoResources;
2525 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2526
2527 userNotify = new IOServiceMessageUserNotification;
2528
2529 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2530 reference, referenceSize,
2531 kIOUserNotifyMaxMessageSize,
2532 client64 )) {
2533 iokit_release_port_send(port);
2534 userNotify->release();
2535 userNotify = 0;
2536 }
2537 if( !userNotify)
2538 continue;
2539
2540 notify = service->registerInterest( sym,
2541 &userNotify->_handler, userNotify );
2542 if( notify) {
2543 *notification = userNotify;
2544 userNotify->setNotification( notify );
2545 err = kIOReturnSuccess;
2546 } else
2547 err = kIOReturnUnsupported;
2548
2549 sym->release();
2550
2551 } while( false );
2552
2553 return( err );
2554 }
2555
2556 /* Routine io_service_add_message_notification */
2557 kern_return_t is_io_service_add_interest_notification(
2558 io_object_t service,
2559 io_name_t type_of_interest,
2560 mach_port_t port,
2561 io_async_ref_t reference,
2562 mach_msg_type_number_t referenceCnt,
2563 io_object_t * notification )
2564 {
2565 return (internal_io_service_add_interest_notification(service, type_of_interest,
2566 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2567 }
2568
2569 /* Routine io_service_add_interest_notification_64 */
2570 kern_return_t is_io_service_add_interest_notification_64(
2571 io_object_t service,
2572 io_name_t type_of_interest,
2573 mach_port_t wake_port,
2574 io_async_ref64_t reference,
2575 mach_msg_type_number_t referenceCnt,
2576 io_object_t *notification )
2577 {
2578 return (internal_io_service_add_interest_notification(service, type_of_interest,
2579 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2580 }
2581
2582
2583 /* Routine io_service_acknowledge_notification */
2584 kern_return_t is_io_service_acknowledge_notification(
2585 io_object_t _service,
2586 natural_t notify_ref,
2587 natural_t response )
2588 {
2589 CHECK( IOService, _service, service );
2590
2591 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2592 (IOOptionBits) response ));
2593
2594 }
2595
2596 /* Routine io_connect_get_semaphore */
2597 kern_return_t is_io_connect_get_notification_semaphore(
2598 io_connect_t connection,
2599 natural_t notification_type,
2600 semaphore_t *semaphore )
2601 {
2602 CHECK( IOUserClient, connection, client );
2603
2604 IOStatisticsClientCall();
2605 return( client->getNotificationSemaphore( (UInt32) notification_type,
2606 semaphore ));
2607 }
2608
2609 /* Routine io_registry_get_root_entry */
2610 kern_return_t is_io_registry_get_root_entry(
2611 mach_port_t master_port,
2612 io_object_t *root )
2613 {
2614 IORegistryEntry * entry;
2615
2616 if( master_port != master_device_port)
2617 return( kIOReturnNotPrivileged);
2618
2619 entry = IORegistryEntry::getRegistryRoot();
2620 if( entry)
2621 entry->retain();
2622 *root = entry;
2623
2624 return( kIOReturnSuccess );
2625 }
2626
2627 /* Routine io_registry_create_iterator */
2628 kern_return_t is_io_registry_create_iterator(
2629 mach_port_t master_port,
2630 io_name_t plane,
2631 uint32_t options,
2632 io_object_t *iterator )
2633 {
2634 if( master_port != master_device_port)
2635 return( kIOReturnNotPrivileged);
2636
2637 *iterator = IOUserIterator::withIterator(
2638 IORegistryIterator::iterateOver(
2639 IORegistryEntry::getPlane( plane ), options ));
2640
2641 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2642 }
2643
2644 /* Routine io_registry_entry_create_iterator */
2645 kern_return_t is_io_registry_entry_create_iterator(
2646 io_object_t registry_entry,
2647 io_name_t plane,
2648 uint32_t options,
2649 io_object_t *iterator )
2650 {
2651 CHECK( IORegistryEntry, registry_entry, entry );
2652
2653 *iterator = IOUserIterator::withIterator(
2654 IORegistryIterator::iterateOver( entry,
2655 IORegistryEntry::getPlane( plane ), options ));
2656
2657 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2658 }
2659
2660 /* Routine io_registry_iterator_enter */
2661 kern_return_t is_io_registry_iterator_enter_entry(
2662 io_object_t iterator )
2663 {
2664 CHECKLOCKED( IORegistryIterator, iterator, iter );
2665
2666 IOLockLock(oIter->lock);
2667 iter->enterEntry();
2668 IOLockUnlock(oIter->lock);
2669
2670 return( kIOReturnSuccess );
2671 }
2672
2673 /* Routine io_registry_iterator_exit */
2674 kern_return_t is_io_registry_iterator_exit_entry(
2675 io_object_t iterator )
2676 {
2677 bool didIt;
2678
2679 CHECKLOCKED( IORegistryIterator, iterator, iter );
2680
2681 IOLockLock(oIter->lock);
2682 didIt = iter->exitEntry();
2683 IOLockUnlock(oIter->lock);
2684
2685 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2686 }
2687
2688 /* Routine io_registry_entry_from_path */
2689 kern_return_t is_io_registry_entry_from_path(
2690 mach_port_t master_port,
2691 io_string_t path,
2692 io_object_t *registry_entry )
2693 {
2694 IORegistryEntry * entry;
2695
2696 if( master_port != master_device_port)
2697 return( kIOReturnNotPrivileged);
2698
2699 entry = IORegistryEntry::fromPath( path );
2700
2701 *registry_entry = entry;
2702
2703 return( kIOReturnSuccess );
2704 }
2705
2706
2707 /* Routine io_registry_entry_from_path */
2708 kern_return_t is_io_registry_entry_from_path_ool(
2709 mach_port_t master_port,
2710 io_string_inband_t path,
2711 io_buf_ptr_t path_ool,
2712 mach_msg_type_number_t path_oolCnt,
2713 kern_return_t *result,
2714 io_object_t *registry_entry)
2715 {
2716 IORegistryEntry * entry;
2717 vm_map_offset_t map_data;
2718 const char * cpath;
2719 IOReturn res;
2720 kern_return_t err;
2721
2722 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2723
2724 map_data = 0;
2725 entry = 0;
2726 res = err = KERN_SUCCESS;
2727 if (path[0]) cpath = path;
2728 else
2729 {
2730 if (!path_oolCnt) return(kIOReturnBadArgument);
2731 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2732
2733 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2734 if (KERN_SUCCESS == err)
2735 {
2736 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2737 cpath = CAST_DOWN(const char *, map_data);
2738 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2739 }
2740 }
2741
2742 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2743 {
2744 entry = IORegistryEntry::fromPath(cpath);
2745 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2746 }
2747
2748 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2749
2750 if (KERN_SUCCESS != err) res = err;
2751 *registry_entry = entry;
2752 *result = res;
2753
2754 return (err);
2755 }
2756
2757
2758 /* Routine io_registry_entry_in_plane */
2759 kern_return_t is_io_registry_entry_in_plane(
2760 io_object_t registry_entry,
2761 io_name_t plane,
2762 boolean_t *inPlane )
2763 {
2764 CHECK( IORegistryEntry, registry_entry, entry );
2765
2766 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2767
2768 return( kIOReturnSuccess );
2769 }
2770
2771
2772 /* Routine io_registry_entry_get_path */
2773 kern_return_t is_io_registry_entry_get_path(
2774 io_object_t registry_entry,
2775 io_name_t plane,
2776 io_string_t path )
2777 {
2778 int length;
2779 CHECK( IORegistryEntry, registry_entry, entry );
2780
2781 length = sizeof( io_string_t);
2782 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2783 return( kIOReturnSuccess );
2784 else
2785 return( kIOReturnBadArgument );
2786 }
2787
2788 /* Routine io_registry_entry_get_path */
2789 kern_return_t is_io_registry_entry_get_path_ool(
2790 io_object_t registry_entry,
2791 io_name_t plane,
2792 io_string_inband_t path,
2793 io_buf_ptr_t *path_ool,
2794 mach_msg_type_number_t *path_oolCnt)
2795 {
2796 enum { kMaxPath = 16384 };
2797 IOReturn err;
2798 int length;
2799 char * buf;
2800
2801 CHECK( IORegistryEntry, registry_entry, entry );
2802
2803 *path_ool = NULL;
2804 *path_oolCnt = 0;
2805 length = sizeof(io_string_inband_t);
2806 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2807 else
2808 {
2809 length = kMaxPath;
2810 buf = IONew(char, length);
2811 if (!buf) err = kIOReturnNoMemory;
2812 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2813 else
2814 {
2815 *path_oolCnt = length;
2816 err = copyoutkdata(buf, length, path_ool);
2817 }
2818 if (buf) IODelete(buf, char, kMaxPath);
2819 }
2820
2821 return (err);
2822 }
2823
2824
2825 /* Routine io_registry_entry_get_name */
2826 kern_return_t is_io_registry_entry_get_name(
2827 io_object_t registry_entry,
2828 io_name_t name )
2829 {
2830 CHECK( IORegistryEntry, registry_entry, entry );
2831
2832 strncpy( name, entry->getName(), sizeof( io_name_t));
2833
2834 return( kIOReturnSuccess );
2835 }
2836
2837 /* Routine io_registry_entry_get_name_in_plane */
2838 kern_return_t is_io_registry_entry_get_name_in_plane(
2839 io_object_t registry_entry,
2840 io_name_t planeName,
2841 io_name_t name )
2842 {
2843 const IORegistryPlane * plane;
2844 CHECK( IORegistryEntry, registry_entry, entry );
2845
2846 if( planeName[0])
2847 plane = IORegistryEntry::getPlane( planeName );
2848 else
2849 plane = 0;
2850
2851 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2852
2853 return( kIOReturnSuccess );
2854 }
2855
2856 /* Routine io_registry_entry_get_location_in_plane */
2857 kern_return_t is_io_registry_entry_get_location_in_plane(
2858 io_object_t registry_entry,
2859 io_name_t planeName,
2860 io_name_t location )
2861 {
2862 const IORegistryPlane * plane;
2863 CHECK( IORegistryEntry, registry_entry, entry );
2864
2865 if( planeName[0])
2866 plane = IORegistryEntry::getPlane( planeName );
2867 else
2868 plane = 0;
2869
2870 const char * cstr = entry->getLocation( plane );
2871
2872 if( cstr) {
2873 strncpy( location, cstr, sizeof( io_name_t));
2874 return( kIOReturnSuccess );
2875 } else
2876 return( kIOReturnNotFound );
2877 }
2878
2879 /* Routine io_registry_entry_get_registry_entry_id */
2880 kern_return_t is_io_registry_entry_get_registry_entry_id(
2881 io_object_t registry_entry,
2882 uint64_t *entry_id )
2883 {
2884 CHECK( IORegistryEntry, registry_entry, entry );
2885
2886 *entry_id = entry->getRegistryEntryID();
2887
2888 return (kIOReturnSuccess);
2889 }
2890
2891 /* Routine io_registry_entry_get_property */
2892 kern_return_t is_io_registry_entry_get_property_bytes(
2893 io_object_t registry_entry,
2894 io_name_t property_name,
2895 io_struct_inband_t buf,
2896 mach_msg_type_number_t *dataCnt )
2897 {
2898 OSObject * obj;
2899 OSData * data;
2900 OSString * str;
2901 OSBoolean * boo;
2902 OSNumber * off;
2903 UInt64 offsetBytes;
2904 unsigned int len = 0;
2905 const void * bytes = 0;
2906 IOReturn ret = kIOReturnSuccess;
2907
2908 CHECK( IORegistryEntry, registry_entry, entry );
2909
2910 #if CONFIG_MACF
2911 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2912 return kIOReturnNotPermitted;
2913 #endif
2914
2915 obj = entry->copyProperty(property_name);
2916 if( !obj)
2917 return( kIOReturnNoResources );
2918
2919 // One day OSData will be a common container base class
2920 // until then...
2921 if( (data = OSDynamicCast( OSData, obj ))) {
2922 len = data->getLength();
2923 bytes = data->getBytesNoCopy();
2924 if (!data->isSerializable()) len = 0;
2925
2926 } else if( (str = OSDynamicCast( OSString, obj ))) {
2927 len = str->getLength() + 1;
2928 bytes = str->getCStringNoCopy();
2929
2930 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2931 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2932 bytes = boo->isTrue() ? "Yes" : "No";
2933
2934 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2935 offsetBytes = off->unsigned64BitValue();
2936 len = off->numberOfBytes();
2937 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2938 bytes = &offsetBytes;
2939 #ifdef __BIG_ENDIAN__
2940 bytes = (const void *)
2941 (((UInt32) bytes) + (sizeof( UInt64) - len));
2942 #endif
2943
2944 } else
2945 ret = kIOReturnBadArgument;
2946
2947 if( bytes) {
2948 if( *dataCnt < len)
2949 ret = kIOReturnIPCError;
2950 else {
2951 *dataCnt = len;
2952 bcopy( bytes, buf, len );
2953 }
2954 }
2955 obj->release();
2956
2957 return( ret );
2958 }
2959
2960
2961 /* Routine io_registry_entry_get_property */
2962 kern_return_t is_io_registry_entry_get_property(
2963 io_object_t registry_entry,
2964 io_name_t property_name,
2965 io_buf_ptr_t *properties,
2966 mach_msg_type_number_t *propertiesCnt )
2967 {
2968 kern_return_t err;
2969 vm_size_t len;
2970 OSObject * obj;
2971
2972 CHECK( IORegistryEntry, registry_entry, entry );
2973
2974 #if CONFIG_MACF
2975 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2976 return kIOReturnNotPermitted;
2977 #endif
2978
2979 obj = entry->copyProperty(property_name);
2980 if( !obj)
2981 return( kIOReturnNotFound );
2982
2983 OSSerialize * s = OSSerialize::withCapacity(4096);
2984 if( !s) {
2985 obj->release();
2986 return( kIOReturnNoMemory );
2987 }
2988
2989 if( obj->serialize( s )) {
2990 len = s->getLength();
2991 *propertiesCnt = len;
2992 err = copyoutkdata( s->text(), len, properties );
2993
2994 } else
2995 err = kIOReturnUnsupported;
2996
2997 s->release();
2998 obj->release();
2999
3000 return( err );
3001 }
3002
3003 /* Routine io_registry_entry_get_property_recursively */
3004 kern_return_t is_io_registry_entry_get_property_recursively(
3005 io_object_t registry_entry,
3006 io_name_t plane,
3007 io_name_t property_name,
3008 uint32_t options,
3009 io_buf_ptr_t *properties,
3010 mach_msg_type_number_t *propertiesCnt )
3011 {
3012 kern_return_t err;
3013 vm_size_t len;
3014 OSObject * obj;
3015
3016 CHECK( IORegistryEntry, registry_entry, entry );
3017
3018 #if CONFIG_MACF
3019 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3020 return kIOReturnNotPermitted;
3021 #endif
3022
3023 obj = entry->copyProperty( property_name,
3024 IORegistryEntry::getPlane( plane ), options );
3025 if( !obj)
3026 return( kIOReturnNotFound );
3027
3028 OSSerialize * s = OSSerialize::withCapacity(4096);
3029 if( !s) {
3030 obj->release();
3031 return( kIOReturnNoMemory );
3032 }
3033
3034 if( obj->serialize( s )) {
3035 len = s->getLength();
3036 *propertiesCnt = len;
3037 err = copyoutkdata( s->text(), len, properties );
3038
3039 } else
3040 err = kIOReturnUnsupported;
3041
3042 s->release();
3043 obj->release();
3044
3045 return( err );
3046 }
3047
3048 /* Routine io_registry_entry_get_properties */
3049 kern_return_t is_io_registry_entry_get_properties(
3050 io_object_t registry_entry,
3051 io_buf_ptr_t *properties,
3052 mach_msg_type_number_t *propertiesCnt )
3053 {
3054 return (kIOReturnUnsupported);
3055 }
3056
3057 #if CONFIG_MACF
3058
3059 struct GetPropertiesEditorRef
3060 {
3061 kauth_cred_t cred;
3062 IORegistryEntry * entry;
3063 OSCollection * root;
3064 };
3065
3066 static const OSMetaClassBase *
3067 GetPropertiesEditor(void * reference,
3068 OSSerialize * s,
3069 OSCollection * container,
3070 const OSSymbol * name,
3071 const OSMetaClassBase * value)
3072 {
3073 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3074
3075 if (!ref->root) ref->root = container;
3076 if (ref->root == container)
3077 {
3078 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3079 {
3080 value = 0;
3081 }
3082 }
3083 if (value) value->retain();
3084 return (value);
3085 }
3086
3087 #endif /* CONFIG_MACF */
3088
3089 /* Routine io_registry_entry_get_properties */
3090 kern_return_t is_io_registry_entry_get_properties_bin(
3091 io_object_t registry_entry,
3092 io_buf_ptr_t *properties,
3093 mach_msg_type_number_t *propertiesCnt)
3094 {
3095 kern_return_t err = kIOReturnSuccess;
3096 vm_size_t len;
3097 OSSerialize * s;
3098 OSSerialize::Editor editor = 0;
3099 void * editRef = 0;
3100
3101 CHECK(IORegistryEntry, registry_entry, entry);
3102
3103 #if CONFIG_MACF
3104 GetPropertiesEditorRef ref;
3105 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3106 {
3107 editor = &GetPropertiesEditor;
3108 editRef = &ref;
3109 ref.cred = kauth_cred_get();
3110 ref.entry = entry;
3111 ref.root = 0;
3112 }
3113 #endif
3114
3115 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3116 if (!s) return (kIOReturnNoMemory);
3117
3118 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3119
3120 if (kIOReturnSuccess == err)
3121 {
3122 len = s->getLength();
3123 *propertiesCnt = len;
3124 err = copyoutkdata(s->text(), len, properties);
3125 }
3126 s->release();
3127
3128 return (err);
3129 }
3130
3131 /* Routine io_registry_entry_get_property_bin */
3132 kern_return_t is_io_registry_entry_get_property_bin(
3133 io_object_t registry_entry,
3134 io_name_t plane,
3135 io_name_t property_name,
3136 uint32_t options,
3137 io_buf_ptr_t *properties,
3138 mach_msg_type_number_t *propertiesCnt )
3139 {
3140 kern_return_t err;
3141 vm_size_t len;
3142 OSObject * obj;
3143 const OSSymbol * sym;
3144
3145 CHECK( IORegistryEntry, registry_entry, entry );
3146
3147 #if CONFIG_MACF
3148 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3149 return kIOReturnNotPermitted;
3150 #endif
3151
3152 sym = OSSymbol::withCString(property_name);
3153 if (!sym) return (kIOReturnNoMemory);
3154
3155 if (gIORegistryEntryPropertyKeysKey == sym)
3156 {
3157 obj = entry->copyPropertyKeys();
3158 }
3159 else
3160 {
3161 if ((kIORegistryIterateRecursively & options) && plane[0])
3162 {
3163 obj = entry->copyProperty(property_name,
3164 IORegistryEntry::getPlane(plane), options );
3165 }
3166 else
3167 {
3168 obj = entry->copyProperty(property_name);
3169 }
3170 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3171 }
3172
3173 sym->release();
3174 if (!obj) return (kIOReturnNotFound);
3175
3176 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3177 if( !s) {
3178 obj->release();
3179 return( kIOReturnNoMemory );
3180 }
3181
3182 if( obj->serialize( s )) {
3183 len = s->getLength();
3184 *propertiesCnt = len;
3185 err = copyoutkdata( s->text(), len, properties );
3186
3187 } else err = kIOReturnUnsupported;
3188
3189 s->release();
3190 obj->release();
3191
3192 return( err );
3193 }
3194
3195
3196 /* Routine io_registry_entry_set_properties */
3197 kern_return_t is_io_registry_entry_set_properties
3198 (
3199 io_object_t registry_entry,
3200 io_buf_ptr_t properties,
3201 mach_msg_type_number_t propertiesCnt,
3202 kern_return_t * result)
3203 {
3204 OSObject * obj;
3205 kern_return_t err;
3206 IOReturn res;
3207 vm_offset_t data;
3208 vm_map_offset_t map_data;
3209
3210 CHECK( IORegistryEntry, registry_entry, entry );
3211
3212 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3213 return( kIOReturnMessageTooLarge);
3214
3215 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3216 data = CAST_DOWN(vm_offset_t, map_data);
3217
3218 if( KERN_SUCCESS == err) {
3219
3220 FAKE_STACK_FRAME(entry->getMetaClass());
3221
3222 // must return success after vm_map_copyout() succeeds
3223 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3224 vm_deallocate( kernel_map, data, propertiesCnt );
3225
3226 if (!obj)
3227 res = kIOReturnBadArgument;
3228 #if CONFIG_MACF
3229 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3230 registry_entry, obj))
3231 {
3232 res = kIOReturnNotPermitted;
3233 }
3234 #endif
3235 else
3236 {
3237 res = entry->setProperties( obj );
3238 }
3239
3240 if (obj)
3241 obj->release();
3242
3243 FAKE_STACK_FRAME_END();
3244
3245 } else
3246 res = err;
3247
3248 *result = res;
3249 return( err );
3250 }
3251
3252 /* Routine io_registry_entry_get_child_iterator */
3253 kern_return_t is_io_registry_entry_get_child_iterator(
3254 io_object_t registry_entry,
3255 io_name_t plane,
3256 io_object_t *iterator )
3257 {
3258 CHECK( IORegistryEntry, registry_entry, entry );
3259
3260 *iterator = entry->getChildIterator(
3261 IORegistryEntry::getPlane( plane ));
3262
3263 return( kIOReturnSuccess );
3264 }
3265
3266 /* Routine io_registry_entry_get_parent_iterator */
3267 kern_return_t is_io_registry_entry_get_parent_iterator(
3268 io_object_t registry_entry,
3269 io_name_t plane,
3270 io_object_t *iterator)
3271 {
3272 CHECK( IORegistryEntry, registry_entry, entry );
3273
3274 *iterator = entry->getParentIterator(
3275 IORegistryEntry::getPlane( plane ));
3276
3277 return( kIOReturnSuccess );
3278 }
3279
3280 /* Routine io_service_get_busy_state */
3281 kern_return_t is_io_service_get_busy_state(
3282 io_object_t _service,
3283 uint32_t *busyState )
3284 {
3285 CHECK( IOService, _service, service );
3286
3287 *busyState = service->getBusyState();
3288
3289 return( kIOReturnSuccess );
3290 }
3291
3292 /* Routine io_service_get_state */
3293 kern_return_t is_io_service_get_state(
3294 io_object_t _service,
3295 uint64_t *state,
3296 uint32_t *busy_state,
3297 uint64_t *accumulated_busy_time )
3298 {
3299 CHECK( IOService, _service, service );
3300
3301 *state = service->getState();
3302 *busy_state = service->getBusyState();
3303 *accumulated_busy_time = service->getAccumulatedBusyTime();
3304
3305 return( kIOReturnSuccess );
3306 }
3307
3308 /* Routine io_service_wait_quiet */
3309 kern_return_t is_io_service_wait_quiet(
3310 io_object_t _service,
3311 mach_timespec_t wait_time )
3312 {
3313 uint64_t timeoutNS;
3314
3315 CHECK( IOService, _service, service );
3316
3317 timeoutNS = wait_time.tv_sec;
3318 timeoutNS *= kSecondScale;
3319 timeoutNS += wait_time.tv_nsec;
3320
3321 return( service->waitQuiet(timeoutNS) );
3322 }
3323
3324 /* Routine io_service_request_probe */
3325 kern_return_t is_io_service_request_probe(
3326 io_object_t _service,
3327 uint32_t options )
3328 {
3329 CHECK( IOService, _service, service );
3330
3331 return( service->requestProbe( options ));
3332 }
3333
3334 /* Routine io_service_get_authorization_id */
3335 kern_return_t is_io_service_get_authorization_id(
3336 io_object_t _service,
3337 uint64_t *authorization_id )
3338 {
3339 kern_return_t kr;
3340
3341 CHECK( IOService, _service, service );
3342
3343 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3344 kIOClientPrivilegeAdministrator );
3345 if( kIOReturnSuccess != kr)
3346 return( kr );
3347
3348 *authorization_id = service->getAuthorizationID();
3349
3350 return( kr );
3351 }
3352
3353 /* Routine io_service_set_authorization_id */
3354 kern_return_t is_io_service_set_authorization_id(
3355 io_object_t _service,
3356 uint64_t authorization_id )
3357 {
3358 CHECK( IOService, _service, service );
3359
3360 return( service->setAuthorizationID( authorization_id ) );
3361 }
3362
3363 /* Routine io_service_open_ndr */
3364 kern_return_t is_io_service_open_extended(
3365 io_object_t _service,
3366 task_t owningTask,
3367 uint32_t connect_type,
3368 NDR_record_t ndr,
3369 io_buf_ptr_t properties,
3370 mach_msg_type_number_t propertiesCnt,
3371 kern_return_t * result,
3372 io_object_t *connection )
3373 {
3374 IOUserClient * client = 0;
3375 kern_return_t err = KERN_SUCCESS;
3376 IOReturn res = kIOReturnSuccess;
3377 OSDictionary * propertiesDict = 0;
3378 bool crossEndian;
3379 bool disallowAccess;
3380
3381 CHECK( IOService, _service, service );
3382
3383 if (!owningTask) return (kIOReturnBadArgument);
3384 assert(owningTask == current_task());
3385 if (owningTask != current_task()) return (kIOReturnBadArgument);
3386
3387 do
3388 {
3389 if (properties) return (kIOReturnUnsupported);
3390 #if 0
3391 {
3392 OSObject * obj;
3393 vm_offset_t data;
3394 vm_map_offset_t map_data;
3395
3396 if( propertiesCnt > sizeof(io_struct_inband_t))
3397 return( kIOReturnMessageTooLarge);
3398
3399 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3400 res = err;
3401 data = CAST_DOWN(vm_offset_t, map_data);
3402 if (KERN_SUCCESS == err)
3403 {
3404 // must return success after vm_map_copyout() succeeds
3405 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3406 vm_deallocate( kernel_map, data, propertiesCnt );
3407 propertiesDict = OSDynamicCast(OSDictionary, obj);
3408 if (!propertiesDict)
3409 {
3410 res = kIOReturnBadArgument;
3411 if (obj)
3412 obj->release();
3413 }
3414 }
3415 if (kIOReturnSuccess != res)
3416 break;
3417 }
3418 #endif
3419 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3420 if (crossEndian)
3421 {
3422 if (!propertiesDict)
3423 propertiesDict = OSDictionary::withCapacity(4);
3424 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3425 if (data)
3426 {
3427 if (propertiesDict)
3428 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3429 data->release();
3430 }
3431 }
3432
3433 res = service->newUserClient( owningTask, (void *) owningTask,
3434 connect_type, propertiesDict, &client );
3435
3436 if (propertiesDict)
3437 propertiesDict->release();
3438
3439 if (res == kIOReturnSuccess)
3440 {
3441 assert( OSDynamicCast(IOUserClient, client) );
3442
3443 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3444 client->closed = false;
3445
3446 disallowAccess = (crossEndian
3447 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3448 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3449 if (disallowAccess) res = kIOReturnUnsupported;
3450 #if CONFIG_MACF
3451 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3452 res = kIOReturnNotPermitted;
3453 #endif
3454
3455 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3456
3457 if (kIOReturnSuccess != res)
3458 {
3459 IOStatisticsClientCall();
3460 client->clientClose();
3461 client->release();
3462 client = 0;
3463 break;
3464 }
3465 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3466 if (creatorName)
3467 {
3468 client->setProperty(kIOUserClientCreatorKey, creatorName);
3469 creatorName->release();
3470 }
3471 client->setTerminateDefer(service, false);
3472 }
3473 }
3474 while (false);
3475
3476 *connection = client;
3477 *result = res;
3478
3479 return (err);
3480 }
3481
3482 /* Routine io_service_close */
3483 kern_return_t is_io_service_close(
3484 io_object_t connection )
3485 {
3486 OSSet * mappings;
3487 if ((mappings = OSDynamicCast(OSSet, connection)))
3488 return( kIOReturnSuccess );
3489
3490 CHECK( IOUserClient, connection, client );
3491
3492 IOStatisticsClientCall();
3493
3494 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3495 {
3496 client->clientClose();
3497 }
3498 else
3499 {
3500 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3501 client->getRegistryEntryID(), client->getName());
3502 }
3503
3504 return( kIOReturnSuccess );
3505 }
3506
3507 /* Routine io_connect_get_service */
3508 kern_return_t is_io_connect_get_service(
3509 io_object_t connection,
3510 io_object_t *service )
3511 {
3512 IOService * theService;
3513
3514 CHECK( IOUserClient, connection, client );
3515
3516 theService = client->getService();
3517 if( theService)
3518 theService->retain();
3519
3520 *service = theService;
3521
3522 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3523 }
3524
3525 /* Routine io_connect_set_notification_port */
3526 kern_return_t is_io_connect_set_notification_port(
3527 io_object_t connection,
3528 uint32_t notification_type,
3529 mach_port_t port,
3530 uint32_t reference)
3531 {
3532 CHECK( IOUserClient, connection, client );
3533
3534 IOStatisticsClientCall();
3535 return( client->registerNotificationPort( port, notification_type,
3536 (io_user_reference_t) reference ));
3537 }
3538
3539 /* Routine io_connect_set_notification_port */
3540 kern_return_t is_io_connect_set_notification_port_64(
3541 io_object_t connection,
3542 uint32_t notification_type,
3543 mach_port_t port,
3544 io_user_reference_t reference)
3545 {
3546 CHECK( IOUserClient, connection, client );
3547
3548 IOStatisticsClientCall();
3549 return( client->registerNotificationPort( port, notification_type,
3550 reference ));
3551 }
3552
3553 /* Routine io_connect_map_memory_into_task */
3554 kern_return_t is_io_connect_map_memory_into_task
3555 (
3556 io_connect_t connection,
3557 uint32_t memory_type,
3558 task_t into_task,
3559 mach_vm_address_t *address,
3560 mach_vm_size_t *size,
3561 uint32_t flags
3562 )
3563 {
3564 IOReturn err;
3565 IOMemoryMap * map;
3566
3567 CHECK( IOUserClient, connection, client );
3568
3569 if (!into_task) return (kIOReturnBadArgument);
3570
3571 IOStatisticsClientCall();
3572 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3573
3574 if( map) {
3575 *address = map->getAddress();
3576 if( size)
3577 *size = map->getSize();
3578
3579 if( client->sharedInstance
3580 || (into_task != current_task())) {
3581 // push a name out to the task owning the map,
3582 // so we can clean up maps
3583 mach_port_name_t name __unused =
3584 IOMachPort::makeSendRightForTask(
3585 into_task, map, IKOT_IOKIT_OBJECT );
3586
3587 } else {
3588 // keep it with the user client
3589 IOLockLock( gIOObjectPortLock);
3590 if( 0 == client->mappings)
3591 client->mappings = OSSet::withCapacity(2);
3592 if( client->mappings)
3593 client->mappings->setObject( map);
3594 IOLockUnlock( gIOObjectPortLock);
3595 map->release();
3596 }
3597 err = kIOReturnSuccess;
3598
3599 } else
3600 err = kIOReturnBadArgument;
3601
3602 return( err );
3603 }
3604
3605 /* Routine is_io_connect_map_memory */
3606 kern_return_t is_io_connect_map_memory(
3607 io_object_t connect,
3608 uint32_t type,
3609 task_t task,
3610 uint32_t * mapAddr,
3611 uint32_t * mapSize,
3612 uint32_t flags )
3613 {
3614 IOReturn err;
3615 mach_vm_address_t address;
3616 mach_vm_size_t size;
3617
3618 address = SCALAR64(*mapAddr);
3619 size = SCALAR64(*mapSize);
3620
3621 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3622
3623 *mapAddr = SCALAR32(address);
3624 *mapSize = SCALAR32(size);
3625
3626 return (err);
3627 }
3628
3629 } /* extern "C" */
3630
3631 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3632 {
3633 OSIterator * iter;
3634 IOMemoryMap * map = 0;
3635
3636 IOLockLock(gIOObjectPortLock);
3637
3638 iter = OSCollectionIterator::withCollection(mappings);
3639 if(iter)
3640 {
3641 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3642 {
3643 if(mem == map->getMemoryDescriptor())
3644 {
3645 map->retain();
3646 mappings->removeObject(map);
3647 break;
3648 }
3649 }
3650 iter->release();
3651 }
3652
3653 IOLockUnlock(gIOObjectPortLock);
3654
3655 return (map);
3656 }
3657
3658 extern "C" {
3659
3660 /* Routine io_connect_unmap_memory_from_task */
3661 kern_return_t is_io_connect_unmap_memory_from_task
3662 (
3663 io_connect_t connection,
3664 uint32_t memory_type,
3665 task_t from_task,
3666 mach_vm_address_t address)
3667 {
3668 IOReturn err;
3669 IOOptionBits options = 0;
3670 IOMemoryDescriptor * memory = 0;
3671 IOMemoryMap * map;
3672
3673 CHECK( IOUserClient, connection, client );
3674
3675 if (!from_task) return (kIOReturnBadArgument);
3676
3677 IOStatisticsClientCall();
3678 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3679
3680 if( memory && (kIOReturnSuccess == err)) {
3681
3682 options = (options & ~kIOMapUserOptionsMask)
3683 | kIOMapAnywhere | kIOMapReference;
3684
3685 map = memory->createMappingInTask( from_task, address, options );
3686 memory->release();
3687 if( map)
3688 {
3689 IOLockLock( gIOObjectPortLock);
3690 if( client->mappings)
3691 client->mappings->removeObject( map);
3692 IOLockUnlock( gIOObjectPortLock);
3693
3694 mach_port_name_t name = 0;
3695 if (from_task != current_task())
3696 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3697 if (name)
3698 {
3699 map->userClientUnmap();
3700 err = iokit_mod_send_right( from_task, name, -2 );
3701 err = kIOReturnSuccess;
3702 }
3703 else
3704 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3705 if (from_task == current_task())
3706 map->release();
3707 }
3708 else
3709 err = kIOReturnBadArgument;
3710 }
3711
3712 return( err );
3713 }
3714
3715 kern_return_t is_io_connect_unmap_memory(
3716 io_object_t connect,
3717 uint32_t type,
3718 task_t task,
3719 uint32_t mapAddr )
3720 {
3721 IOReturn err;
3722 mach_vm_address_t address;
3723
3724 address = SCALAR64(mapAddr);
3725
3726 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3727
3728 return (err);
3729 }
3730
3731
3732 /* Routine io_connect_add_client */
3733 kern_return_t is_io_connect_add_client(
3734 io_object_t connection,
3735 io_object_t connect_to)
3736 {
3737 CHECK( IOUserClient, connection, client );
3738 CHECK( IOUserClient, connect_to, to );
3739
3740 IOStatisticsClientCall();
3741 return( client->connectClient( to ) );
3742 }
3743
3744
3745 /* Routine io_connect_set_properties */
3746 kern_return_t is_io_connect_set_properties(
3747 io_object_t connection,
3748 io_buf_ptr_t properties,
3749 mach_msg_type_number_t propertiesCnt,
3750 kern_return_t * result)
3751 {
3752 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3753 }
3754
3755 /* Routine io_user_client_method */
3756 kern_return_t is_io_connect_method_var_output
3757 (
3758 io_connect_t connection,
3759 uint32_t selector,
3760 io_scalar_inband64_t scalar_input,
3761 mach_msg_type_number_t scalar_inputCnt,
3762 io_struct_inband_t inband_input,
3763 mach_msg_type_number_t inband_inputCnt,
3764 mach_vm_address_t ool_input,
3765 mach_vm_size_t ool_input_size,
3766 io_struct_inband_t inband_output,
3767 mach_msg_type_number_t *inband_outputCnt,
3768 io_scalar_inband64_t scalar_output,
3769 mach_msg_type_number_t *scalar_outputCnt,
3770 io_buf_ptr_t *var_output,
3771 mach_msg_type_number_t *var_outputCnt
3772 )
3773 {
3774 CHECK( IOUserClient, connection, client );
3775
3776 IOExternalMethodArguments args;
3777 IOReturn ret;
3778 IOMemoryDescriptor * inputMD = 0;
3779 OSObject * structureVariableOutputData = 0;
3780
3781 bzero(&args.__reserved[0], sizeof(args.__reserved));
3782 args.__reservedA = 0;
3783 args.version = kIOExternalMethodArgumentsCurrentVersion;
3784
3785 args.selector = selector;
3786
3787 args.asyncWakePort = MACH_PORT_NULL;
3788 args.asyncReference = 0;
3789 args.asyncReferenceCount = 0;
3790 args.structureVariableOutputData = &structureVariableOutputData;
3791
3792 args.scalarInput = scalar_input;
3793 args.scalarInputCount = scalar_inputCnt;
3794 args.structureInput = inband_input;
3795 args.structureInputSize = inband_inputCnt;
3796
3797 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3798
3799 if (ool_input)
3800 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3801 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3802 current_task());
3803
3804 args.structureInputDescriptor = inputMD;
3805
3806 args.scalarOutput = scalar_output;
3807 args.scalarOutputCount = *scalar_outputCnt;
3808 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3809 args.structureOutput = inband_output;
3810 args.structureOutputSize = *inband_outputCnt;
3811 args.structureOutputDescriptor = NULL;
3812 args.structureOutputDescriptorSize = 0;
3813
3814 IOStatisticsClientCall();
3815 ret = client->externalMethod( selector, &args );
3816
3817 *scalar_outputCnt = args.scalarOutputCount;
3818 *inband_outputCnt = args.structureOutputSize;
3819
3820 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3821 {
3822 OSSerialize * serialize;
3823 OSData * data;
3824 vm_size_t len;
3825
3826 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3827 {
3828 len = serialize->getLength();
3829 *var_outputCnt = len;
3830 ret = copyoutkdata(serialize->text(), len, var_output);
3831 }
3832 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3833 {
3834 len = data->getLength();
3835 *var_outputCnt = len;
3836 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3837 }
3838 else
3839 {
3840 ret = kIOReturnUnderrun;
3841 }
3842 }
3843
3844 if (inputMD)
3845 inputMD->release();
3846 if (structureVariableOutputData)
3847 structureVariableOutputData->release();
3848
3849 return (ret);
3850 }
3851
3852 /* Routine io_user_client_method */
3853 kern_return_t is_io_connect_method
3854 (
3855 io_connect_t connection,
3856 uint32_t selector,
3857 io_scalar_inband64_t scalar_input,
3858 mach_msg_type_number_t scalar_inputCnt,
3859 io_struct_inband_t inband_input,
3860 mach_msg_type_number_t inband_inputCnt,
3861 mach_vm_address_t ool_input,
3862 mach_vm_size_t ool_input_size,
3863 io_struct_inband_t inband_output,
3864 mach_msg_type_number_t *inband_outputCnt,
3865 io_scalar_inband64_t scalar_output,
3866 mach_msg_type_number_t *scalar_outputCnt,
3867 mach_vm_address_t ool_output,
3868 mach_vm_size_t *ool_output_size
3869 )
3870 {
3871 CHECK( IOUserClient, connection, client );
3872
3873 IOExternalMethodArguments args;
3874 IOReturn ret;
3875 IOMemoryDescriptor * inputMD = 0;
3876 IOMemoryDescriptor * outputMD = 0;
3877
3878 bzero(&args.__reserved[0], sizeof(args.__reserved));
3879 args.__reservedA = 0;
3880 args.version = kIOExternalMethodArgumentsCurrentVersion;
3881
3882 args.selector = selector;
3883
3884 args.asyncWakePort = MACH_PORT_NULL;
3885 args.asyncReference = 0;
3886 args.asyncReferenceCount = 0;
3887 args.structureVariableOutputData = 0;
3888
3889 args.scalarInput = scalar_input;
3890 args.scalarInputCount = scalar_inputCnt;
3891 args.structureInput = inband_input;
3892 args.structureInputSize = inband_inputCnt;
3893
3894 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3895 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3896
3897 if (ool_input)
3898 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3899 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3900 current_task());
3901
3902 args.structureInputDescriptor = inputMD;
3903
3904 args.scalarOutput = scalar_output;
3905 args.scalarOutputCount = *scalar_outputCnt;
3906 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3907 args.structureOutput = inband_output;
3908 args.structureOutputSize = *inband_outputCnt;
3909
3910 if (ool_output && ool_output_size)
3911 {
3912 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3913 kIODirectionIn, current_task());
3914 }
3915
3916 args.structureOutputDescriptor = outputMD;
3917 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3918
3919 IOStatisticsClientCall();
3920 ret = client->externalMethod( selector, &args );
3921
3922 *scalar_outputCnt = args.scalarOutputCount;
3923 *inband_outputCnt = args.structureOutputSize;
3924 *ool_output_size = args.structureOutputDescriptorSize;
3925
3926 if (inputMD)
3927 inputMD->release();
3928 if (outputMD)
3929 outputMD->release();
3930
3931 return (ret);
3932 }
3933
3934 /* Routine io_async_user_client_method */
3935 kern_return_t is_io_connect_async_method
3936 (
3937 io_connect_t connection,
3938 mach_port_t wake_port,
3939 io_async_ref64_t reference,
3940 mach_msg_type_number_t referenceCnt,
3941 uint32_t selector,
3942 io_scalar_inband64_t scalar_input,
3943 mach_msg_type_number_t scalar_inputCnt,
3944 io_struct_inband_t inband_input,
3945 mach_msg_type_number_t inband_inputCnt,
3946 mach_vm_address_t ool_input,
3947 mach_vm_size_t ool_input_size,
3948 io_struct_inband_t inband_output,
3949 mach_msg_type_number_t *inband_outputCnt,
3950 io_scalar_inband64_t scalar_output,
3951 mach_msg_type_number_t *scalar_outputCnt,
3952 mach_vm_address_t ool_output,
3953 mach_vm_size_t * ool_output_size
3954 )
3955 {
3956 CHECK( IOUserClient, connection, client );
3957
3958 IOExternalMethodArguments args;
3959 IOReturn ret;
3960 IOMemoryDescriptor * inputMD = 0;
3961 IOMemoryDescriptor * outputMD = 0;
3962
3963 bzero(&args.__reserved[0], sizeof(args.__reserved));
3964 args.__reservedA = 0;
3965 args.version = kIOExternalMethodArgumentsCurrentVersion;
3966
3967 reference[0] = (io_user_reference_t) wake_port;
3968 if (vm_map_is_64bit(get_task_map(current_task())))
3969 reference[0] |= kIOUCAsync64Flag;
3970
3971 args.selector = selector;
3972
3973 args.asyncWakePort = wake_port;
3974 args.asyncReference = reference;
3975 args.asyncReferenceCount = referenceCnt;
3976
3977 args.scalarInput = scalar_input;
3978 args.scalarInputCount = scalar_inputCnt;
3979 args.structureInput = inband_input;
3980 args.structureInputSize = inband_inputCnt;
3981
3982 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3983 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3984
3985 if (ool_input)
3986 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3987 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3988 current_task());
3989
3990 args.structureInputDescriptor = inputMD;
3991
3992 args.scalarOutput = scalar_output;
3993 args.scalarOutputCount = *scalar_outputCnt;
3994 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3995 args.structureOutput = inband_output;
3996 args.structureOutputSize = *inband_outputCnt;
3997
3998 if (ool_output)
3999 {
4000 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4001 kIODirectionIn, current_task());
4002 }
4003
4004 args.structureOutputDescriptor = outputMD;
4005 args.structureOutputDescriptorSize = *ool_output_size;
4006
4007 IOStatisticsClientCall();
4008 ret = client->externalMethod( selector, &args );
4009
4010 *inband_outputCnt = args.structureOutputSize;
4011 *ool_output_size = args.structureOutputDescriptorSize;
4012
4013 if (inputMD)
4014 inputMD->release();
4015 if (outputMD)
4016 outputMD->release();
4017
4018 return (ret);
4019 }
4020
4021 /* Routine io_connect_method_scalarI_scalarO */
4022 kern_return_t is_io_connect_method_scalarI_scalarO(
4023 io_object_t connect,
4024 uint32_t index,
4025 io_scalar_inband_t input,
4026 mach_msg_type_number_t inputCount,
4027 io_scalar_inband_t output,
4028 mach_msg_type_number_t * outputCount )
4029 {
4030 IOReturn err;
4031 uint32_t i;
4032 io_scalar_inband64_t _input;
4033 io_scalar_inband64_t _output;
4034
4035 mach_msg_type_number_t struct_outputCnt = 0;
4036 mach_vm_size_t ool_output_size = 0;
4037
4038 bzero(&_output[0], sizeof(_output));
4039 for (i = 0; i < inputCount; i++)
4040 _input[i] = SCALAR64(input[i]);
4041
4042 err = is_io_connect_method(connect, index,
4043 _input, inputCount,
4044 NULL, 0,
4045 0, 0,
4046 NULL, &struct_outputCnt,
4047 _output, outputCount,
4048 0, &ool_output_size);
4049
4050 for (i = 0; i < *outputCount; i++)
4051 output[i] = SCALAR32(_output[i]);
4052
4053 return (err);
4054 }
4055
4056 kern_return_t shim_io_connect_method_scalarI_scalarO(
4057 IOExternalMethod * method,
4058 IOService * object,
4059 const io_user_scalar_t * input,
4060 mach_msg_type_number_t inputCount,
4061 io_user_scalar_t * output,
4062 mach_msg_type_number_t * outputCount )
4063 {
4064 IOMethod func;
4065 io_scalar_inband_t _output;
4066 IOReturn err;
4067 err = kIOReturnBadArgument;
4068
4069 bzero(&_output[0], sizeof(_output));
4070 do {
4071
4072 if( inputCount != method->count0)
4073 {
4074 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4075 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4076 continue;
4077 }
4078 if( *outputCount != method->count1)
4079 {
4080 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4081 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4082 continue;
4083 }
4084
4085 func = method->func;
4086
4087 switch( inputCount) {
4088
4089 case 6:
4090 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4091 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4092 break;
4093 case 5:
4094 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4095 ARG32(input[3]), ARG32(input[4]),
4096 &_output[0] );
4097 break;
4098 case 4:
4099 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4100 ARG32(input[3]),
4101 &_output[0], &_output[1] );
4102 break;
4103 case 3:
4104 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4105 &_output[0], &_output[1], &_output[2] );
4106 break;
4107 case 2:
4108 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4109 &_output[0], &_output[1], &_output[2],
4110 &_output[3] );
4111 break;
4112 case 1:
4113 err = (object->*func)( ARG32(input[0]),
4114 &_output[0], &_output[1], &_output[2],
4115 &_output[3], &_output[4] );
4116 break;
4117 case 0:
4118 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4119 &_output[3], &_output[4], &_output[5] );
4120 break;
4121
4122 default:
4123 IOLog("%s: Bad method table\n", object->getName());
4124 }
4125 }
4126 while( false);
4127
4128 uint32_t i;
4129 for (i = 0; i < *outputCount; i++)
4130 output[i] = SCALAR32(_output[i]);
4131
4132 return( err);
4133 }
4134
4135 /* Routine io_async_method_scalarI_scalarO */
4136 kern_return_t is_io_async_method_scalarI_scalarO(
4137 io_object_t connect,
4138 mach_port_t wake_port,
4139 io_async_ref_t reference,
4140 mach_msg_type_number_t referenceCnt,
4141 uint32_t index,
4142 io_scalar_inband_t input,
4143 mach_msg_type_number_t inputCount,
4144 io_scalar_inband_t output,
4145 mach_msg_type_number_t * outputCount )
4146 {
4147 IOReturn err;
4148 uint32_t i;
4149 io_scalar_inband64_t _input;
4150 io_scalar_inband64_t _output;
4151 io_async_ref64_t _reference;
4152
4153 bzero(&_output[0], sizeof(_output));
4154 for (i = 0; i < referenceCnt; i++)
4155 _reference[i] = REF64(reference[i]);
4156
4157 mach_msg_type_number_t struct_outputCnt = 0;
4158 mach_vm_size_t ool_output_size = 0;
4159
4160 for (i = 0; i < inputCount; i++)
4161 _input[i] = SCALAR64(input[i]);
4162
4163 err = is_io_connect_async_method(connect,
4164 wake_port, _reference, referenceCnt,
4165 index,
4166 _input, inputCount,
4167 NULL, 0,
4168 0, 0,
4169 NULL, &struct_outputCnt,
4170 _output, outputCount,
4171 0, &ool_output_size);
4172
4173 for (i = 0; i < *outputCount; i++)
4174 output[i] = SCALAR32(_output[i]);
4175
4176 return (err);
4177 }
4178 /* Routine io_async_method_scalarI_structureO */
4179 kern_return_t is_io_async_method_scalarI_structureO(
4180 io_object_t connect,
4181 mach_port_t wake_port,
4182 io_async_ref_t reference,
4183 mach_msg_type_number_t referenceCnt,
4184 uint32_t index,
4185 io_scalar_inband_t input,
4186 mach_msg_type_number_t inputCount,
4187 io_struct_inband_t output,
4188 mach_msg_type_number_t * outputCount )
4189 {
4190 uint32_t i;
4191 io_scalar_inband64_t _input;
4192 io_async_ref64_t _reference;
4193
4194 for (i = 0; i < referenceCnt; i++)
4195 _reference[i] = REF64(reference[i]);
4196
4197 mach_msg_type_number_t scalar_outputCnt = 0;
4198 mach_vm_size_t ool_output_size = 0;
4199
4200 for (i = 0; i < inputCount; i++)
4201 _input[i] = SCALAR64(input[i]);
4202
4203 return (is_io_connect_async_method(connect,
4204 wake_port, _reference, referenceCnt,
4205 index,
4206 _input, inputCount,
4207 NULL, 0,
4208 0, 0,
4209 output, outputCount,
4210 NULL, &scalar_outputCnt,
4211 0, &ool_output_size));
4212 }
4213
4214 /* Routine io_async_method_scalarI_structureI */
4215 kern_return_t is_io_async_method_scalarI_structureI(
4216 io_connect_t connect,
4217 mach_port_t wake_port,
4218 io_async_ref_t reference,
4219 mach_msg_type_number_t referenceCnt,
4220 uint32_t index,
4221 io_scalar_inband_t input,
4222 mach_msg_type_number_t inputCount,
4223 io_struct_inband_t inputStruct,
4224 mach_msg_type_number_t inputStructCount )
4225 {
4226 uint32_t i;
4227 io_scalar_inband64_t _input;
4228 io_async_ref64_t _reference;
4229
4230 for (i = 0; i < referenceCnt; i++)
4231 _reference[i] = REF64(reference[i]);
4232
4233 mach_msg_type_number_t scalar_outputCnt = 0;
4234 mach_msg_type_number_t inband_outputCnt = 0;
4235 mach_vm_size_t ool_output_size = 0;
4236
4237 for (i = 0; i < inputCount; i++)
4238 _input[i] = SCALAR64(input[i]);
4239
4240 return (is_io_connect_async_method(connect,
4241 wake_port, _reference, referenceCnt,
4242 index,
4243 _input, inputCount,
4244 inputStruct, inputStructCount,
4245 0, 0,
4246 NULL, &inband_outputCnt,
4247 NULL, &scalar_outputCnt,
4248 0, &ool_output_size));
4249 }
4250
4251 /* Routine io_async_method_structureI_structureO */
4252 kern_return_t is_io_async_method_structureI_structureO(
4253 io_object_t connect,
4254 mach_port_t wake_port,
4255 io_async_ref_t reference,
4256 mach_msg_type_number_t referenceCnt,
4257 uint32_t index,
4258 io_struct_inband_t input,
4259 mach_msg_type_number_t inputCount,
4260 io_struct_inband_t output,
4261 mach_msg_type_number_t * outputCount )
4262 {
4263 uint32_t i;
4264 mach_msg_type_number_t scalar_outputCnt = 0;
4265 mach_vm_size_t ool_output_size = 0;
4266 io_async_ref64_t _reference;
4267
4268 for (i = 0; i < referenceCnt; i++)
4269 _reference[i] = REF64(reference[i]);
4270
4271 return (is_io_connect_async_method(connect,
4272 wake_port, _reference, referenceCnt,
4273 index,
4274 NULL, 0,
4275 input, inputCount,
4276 0, 0,
4277 output, outputCount,
4278 NULL, &scalar_outputCnt,
4279 0, &ool_output_size));
4280 }
4281
4282
4283 kern_return_t shim_io_async_method_scalarI_scalarO(
4284 IOExternalAsyncMethod * method,
4285 IOService * object,
4286 mach_port_t asyncWakePort,
4287 io_user_reference_t * asyncReference,
4288 uint32_t asyncReferenceCount,
4289 const io_user_scalar_t * input,
4290 mach_msg_type_number_t inputCount,
4291 io_user_scalar_t * output,
4292 mach_msg_type_number_t * outputCount )
4293 {
4294 IOAsyncMethod func;
4295 uint32_t i;
4296 io_scalar_inband_t _output;
4297 IOReturn err;
4298 io_async_ref_t reference;
4299
4300 bzero(&_output[0], sizeof(_output));
4301 for (i = 0; i < asyncReferenceCount; i++)
4302 reference[i] = REF32(asyncReference[i]);
4303
4304 err = kIOReturnBadArgument;
4305
4306 do {
4307
4308 if( inputCount != method->count0)
4309 {
4310 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4311 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4312 continue;
4313 }
4314 if( *outputCount != method->count1)
4315 {
4316 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4317 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4318 continue;
4319 }
4320
4321 func = method->func;
4322
4323 switch( inputCount) {
4324
4325 case 6:
4326 err = (object->*func)( reference,
4327 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4328 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4329 break;
4330 case 5:
4331 err = (object->*func)( reference,
4332 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4333 ARG32(input[3]), ARG32(input[4]),
4334 &_output[0] );
4335 break;
4336 case 4:
4337 err = (object->*func)( reference,
4338 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4339 ARG32(input[3]),
4340 &_output[0], &_output[1] );
4341 break;
4342 case 3:
4343 err = (object->*func)( reference,
4344 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4345 &_output[0], &_output[1], &_output[2] );
4346 break;
4347 case 2:
4348 err = (object->*func)( reference,
4349 ARG32(input[0]), ARG32(input[1]),
4350 &_output[0], &_output[1], &_output[2],
4351 &_output[3] );
4352 break;
4353 case 1:
4354 err = (object->*func)( reference,
4355 ARG32(input[0]),
4356 &_output[0], &_output[1], &_output[2],
4357 &_output[3], &_output[4] );
4358 break;
4359 case 0:
4360 err = (object->*func)( reference,
4361 &_output[0], &_output[1], &_output[2],
4362 &_output[3], &_output[4], &_output[5] );
4363 break;
4364
4365 default:
4366 IOLog("%s: Bad method table\n", object->getName());
4367 }
4368 }
4369 while( false);
4370
4371 for (i = 0; i < *outputCount; i++)
4372 output[i] = SCALAR32(_output[i]);
4373
4374 return( err);
4375 }
4376
4377
4378 /* Routine io_connect_method_scalarI_structureO */
4379 kern_return_t is_io_connect_method_scalarI_structureO(
4380 io_object_t connect,
4381 uint32_t index,
4382 io_scalar_inband_t input,
4383 mach_msg_type_number_t inputCount,
4384 io_struct_inband_t output,
4385 mach_msg_type_number_t * outputCount )
4386 {
4387 uint32_t i;
4388 io_scalar_inband64_t _input;
4389
4390 mach_msg_type_number_t scalar_outputCnt = 0;
4391 mach_vm_size_t ool_output_size = 0;
4392
4393 for (i = 0; i < inputCount; i++)
4394 _input[i] = SCALAR64(input[i]);
4395
4396 return (is_io_connect_method(connect, index,
4397 _input, inputCount,
4398 NULL, 0,
4399 0, 0,
4400 output, outputCount,
4401 NULL, &scalar_outputCnt,
4402 0, &ool_output_size));
4403 }
4404
4405 kern_return_t shim_io_connect_method_scalarI_structureO(
4406
4407 IOExternalMethod * method,
4408 IOService * object,
4409 const io_user_scalar_t * input,
4410 mach_msg_type_number_t inputCount,
4411 io_struct_inband_t output,
4412 IOByteCount * outputCount )
4413 {
4414 IOMethod func;
4415 IOReturn err;
4416
4417 err = kIOReturnBadArgument;
4418
4419 do {
4420 if( inputCount != method->count0)
4421 {
4422 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4423 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4424 continue;
4425 }
4426 if( (kIOUCVariableStructureSize != method->count1)
4427 && (*outputCount != method->count1))
4428 {
4429 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4430 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4431 continue;
4432 }
4433
4434 func = method->func;
4435
4436 switch( inputCount) {
4437
4438 case 5:
4439 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4440 ARG32(input[3]), ARG32(input[4]),
4441 output );
4442 break;
4443 case 4:
4444 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4445 ARG32(input[3]),
4446 output, (void *)outputCount );
4447 break;
4448 case 3:
4449 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4450 output, (void *)outputCount, 0 );
4451 break;
4452 case 2:
4453 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4454 output, (void *)outputCount, 0, 0 );
4455 break;
4456 case 1:
4457 err = (object->*func)( ARG32(input[0]),
4458 output, (void *)outputCount, 0, 0, 0 );
4459 break;
4460 case 0:
4461 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4462 break;
4463
4464 default:
4465 IOLog("%s: Bad method table\n", object->getName());
4466 }
4467 }
4468 while( false);
4469
4470 return( err);
4471 }
4472
4473
4474 kern_return_t shim_io_async_method_scalarI_structureO(
4475 IOExternalAsyncMethod * method,
4476 IOService * object,
4477 mach_port_t asyncWakePort,
4478 io_user_reference_t * asyncReference,
4479 uint32_t asyncReferenceCount,
4480 const io_user_scalar_t * input,
4481 mach_msg_type_number_t inputCount,
4482 io_struct_inband_t output,
4483 mach_msg_type_number_t * outputCount )
4484 {
4485 IOAsyncMethod func;
4486 uint32_t i;
4487 IOReturn err;
4488 io_async_ref_t reference;
4489
4490 for (i = 0; i < asyncReferenceCount; i++)
4491 reference[i] = REF32(asyncReference[i]);
4492
4493 err = kIOReturnBadArgument;
4494 do {
4495 if( inputCount != method->count0)
4496 {
4497 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4498 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4499 continue;
4500 }
4501 if( (kIOUCVariableStructureSize != method->count1)
4502 && (*outputCount != method->count1))
4503 {
4504 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4505 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4506 continue;
4507 }
4508
4509 func = method->func;
4510
4511 switch( inputCount) {
4512
4513 case 5:
4514 err = (object->*func)( reference,
4515 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4516 ARG32(input[3]), ARG32(input[4]),
4517 output );
4518 break;
4519 case 4:
4520 err = (object->*func)( reference,
4521 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4522 ARG32(input[3]),
4523 output, (void *)outputCount );
4524 break;
4525 case 3:
4526 err = (object->*func)( reference,
4527 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4528 output, (void *)outputCount, 0 );
4529 break;
4530 case 2:
4531 err = (object->*func)( reference,
4532 ARG32(input[0]), ARG32(input[1]),
4533 output, (void *)outputCount, 0, 0 );
4534 break;
4535 case 1:
4536 err = (object->*func)( reference,
4537 ARG32(input[0]),
4538 output, (void *)outputCount, 0, 0, 0 );
4539 break;
4540 case 0:
4541 err = (object->*func)( reference,
4542 output, (void *)outputCount, 0, 0, 0, 0 );
4543 break;
4544
4545 default:
4546 IOLog("%s: Bad method table\n", object->getName());
4547 }
4548 }
4549 while( false);
4550
4551 return( err);
4552 }
4553
4554 /* Routine io_connect_method_scalarI_structureI */
4555 kern_return_t is_io_connect_method_scalarI_structureI(
4556 io_connect_t connect,
4557 uint32_t index,
4558 io_scalar_inband_t input,
4559 mach_msg_type_number_t inputCount,
4560 io_struct_inband_t inputStruct,
4561 mach_msg_type_number_t inputStructCount )
4562 {
4563 uint32_t i;
4564 io_scalar_inband64_t _input;
4565
4566 mach_msg_type_number_t scalar_outputCnt = 0;
4567 mach_msg_type_number_t inband_outputCnt = 0;
4568 mach_vm_size_t ool_output_size = 0;
4569
4570 for (i = 0; i < inputCount; i++)
4571 _input[i] = SCALAR64(input[i]);
4572
4573 return (is_io_connect_method(connect, index,
4574 _input, inputCount,
4575 inputStruct, inputStructCount,
4576 0, 0,
4577 NULL, &inband_outputCnt,
4578 NULL, &scalar_outputCnt,
4579 0, &ool_output_size));
4580 }
4581
4582 kern_return_t shim_io_connect_method_scalarI_structureI(
4583 IOExternalMethod * method,
4584 IOService * object,
4585 const io_user_scalar_t * input,
4586 mach_msg_type_number_t inputCount,
4587 io_struct_inband_t inputStruct,
4588 mach_msg_type_number_t inputStructCount )
4589 {
4590 IOMethod func;
4591 IOReturn err = kIOReturnBadArgument;
4592
4593 do
4594 {
4595 if (inputCount != method->count0)
4596 {
4597 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4598 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4599 continue;
4600 }
4601 if( (kIOUCVariableStructureSize != method->count1)
4602 && (inputStructCount != method->count1))
4603 {
4604 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4605 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4606 continue;
4607 }
4608
4609 func = method->func;
4610
4611 switch( inputCount) {
4612
4613 case 5:
4614 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4615 ARG32(input[3]), ARG32(input[4]),
4616 inputStruct );
4617 break;
4618 case 4:
4619 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4620 ARG32(input[3]),
4621 inputStruct, (void *)(uintptr_t)inputStructCount );
4622 break;
4623 case 3:
4624 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4625 inputStruct, (void *)(uintptr_t)inputStructCount,
4626 0 );
4627 break;
4628 case 2:
4629 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4630 inputStruct, (void *)(uintptr_t)inputStructCount,
4631 0, 0 );
4632 break;
4633 case 1:
4634 err = (object->*func)( ARG32(input[0]),
4635 inputStruct, (void *)(uintptr_t)inputStructCount,
4636 0, 0, 0 );
4637 break;
4638 case 0:
4639 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4640 0, 0, 0, 0 );
4641 break;
4642
4643 default:
4644 IOLog("%s: Bad method table\n", object->getName());
4645 }
4646 }
4647 while (false);
4648
4649 return( err);
4650 }
4651
4652 kern_return_t shim_io_async_method_scalarI_structureI(
4653 IOExternalAsyncMethod * method,
4654 IOService * object,
4655 mach_port_t asyncWakePort,
4656 io_user_reference_t * asyncReference,
4657 uint32_t asyncReferenceCount,
4658 const io_user_scalar_t * input,
4659 mach_msg_type_number_t inputCount,
4660 io_struct_inband_t inputStruct,
4661 mach_msg_type_number_t inputStructCount )
4662 {
4663 IOAsyncMethod func;
4664 uint32_t i;
4665 IOReturn err = kIOReturnBadArgument;
4666 io_async_ref_t reference;
4667
4668 for (i = 0; i < asyncReferenceCount; i++)
4669 reference[i] = REF32(asyncReference[i]);
4670
4671 do
4672 {
4673 if (inputCount != method->count0)
4674 {
4675 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4676 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4677 continue;
4678 }
4679 if( (kIOUCVariableStructureSize != method->count1)
4680 && (inputStructCount != method->count1))
4681 {
4682 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4683 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4684 continue;
4685 }
4686
4687 func = method->func;
4688
4689 switch( inputCount) {
4690
4691 case 5:
4692 err = (object->*func)( reference,
4693 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4694 ARG32(input[3]), ARG32(input[4]),
4695 inputStruct );
4696 break;
4697 case 4:
4698 err = (object->*func)( reference,
4699 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4700 ARG32(input[3]),
4701 inputStruct, (void *)(uintptr_t)inputStructCount );
4702 break;
4703 case 3:
4704 err = (object->*func)( reference,
4705 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4706 inputStruct, (void *)(uintptr_t)inputStructCount,
4707 0 );
4708 break;
4709 case 2:
4710 err = (object->*func)( reference,
4711 ARG32(input[0]), ARG32(input[1]),
4712 inputStruct, (void *)(uintptr_t)inputStructCount,
4713 0, 0 );
4714 break;
4715 case 1:
4716 err = (object->*func)( reference,
4717 ARG32(input[0]),
4718 inputStruct, (void *)(uintptr_t)inputStructCount,
4719 0, 0, 0 );
4720 break;
4721 case 0:
4722 err = (object->*func)( reference,
4723 inputStruct, (void *)(uintptr_t)inputStructCount,
4724 0, 0, 0, 0 );
4725 break;
4726
4727 default:
4728 IOLog("%s: Bad method table\n", object->getName());
4729 }
4730 }
4731 while (false);
4732
4733 return( err);
4734 }
4735
4736 /* Routine io_connect_method_structureI_structureO */
4737 kern_return_t is_io_connect_method_structureI_structureO(
4738 io_object_t connect,
4739 uint32_t index,
4740 io_struct_inband_t input,
4741 mach_msg_type_number_t inputCount,
4742 io_struct_inband_t output,
4743 mach_msg_type_number_t * outputCount )
4744 {
4745 mach_msg_type_number_t scalar_outputCnt = 0;
4746 mach_vm_size_t ool_output_size = 0;
4747
4748 return (is_io_connect_method(connect, index,
4749 NULL, 0,
4750 input, inputCount,
4751 0, 0,
4752 output, outputCount,
4753 NULL, &scalar_outputCnt,
4754 0, &ool_output_size));
4755 }
4756
4757 kern_return_t shim_io_connect_method_structureI_structureO(
4758 IOExternalMethod * method,
4759 IOService * object,
4760 io_struct_inband_t input,
4761 mach_msg_type_number_t inputCount,
4762 io_struct_inband_t output,
4763 IOByteCount * outputCount )
4764 {
4765 IOMethod func;
4766 IOReturn err = kIOReturnBadArgument;
4767
4768 do
4769 {
4770 if( (kIOUCVariableStructureSize != method->count0)
4771 && (inputCount != method->count0))
4772 {
4773 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4774 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4775 continue;
4776 }
4777 if( (kIOUCVariableStructureSize != method->count1)
4778 && (*outputCount != method->count1))
4779 {
4780 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4781 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4782 continue;
4783 }
4784
4785 func = method->func;
4786
4787 if( method->count1) {
4788 if( method->count0) {
4789 err = (object->*func)( input, output,
4790 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4791 } else {
4792 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4793 }
4794 } else {
4795 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4796 }
4797 }
4798 while( false);
4799
4800
4801 return( err);
4802 }
4803
4804 kern_return_t shim_io_async_method_structureI_structureO(
4805 IOExternalAsyncMethod * method,
4806 IOService * object,
4807 mach_port_t asyncWakePort,
4808 io_user_reference_t * asyncReference,
4809 uint32_t asyncReferenceCount,
4810 io_struct_inband_t input,
4811 mach_msg_type_number_t inputCount,
4812 io_struct_inband_t output,
4813 mach_msg_type_number_t * outputCount )
4814 {
4815 IOAsyncMethod func;
4816 uint32_t i;
4817 IOReturn err;
4818 io_async_ref_t reference;
4819
4820 for (i = 0; i < asyncReferenceCount; i++)
4821 reference[i] = REF32(asyncReference[i]);
4822
4823 err = kIOReturnBadArgument;
4824 do
4825 {
4826 if( (kIOUCVariableStructureSize != method->count0)
4827 && (inputCount != method->count0))
4828 {
4829 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4830 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4831 continue;
4832 }
4833 if( (kIOUCVariableStructureSize != method->count1)
4834 && (*outputCount != method->count1))
4835 {
4836 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4837 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4838 continue;
4839 }
4840
4841 func = method->func;
4842
4843 if( method->count1) {
4844 if( method->count0) {
4845 err = (object->*func)( reference,
4846 input, output,
4847 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4848 } else {
4849 err = (object->*func)( reference,
4850 output, outputCount, 0, 0, 0, 0 );
4851 }
4852 } else {
4853 err = (object->*func)( reference,
4854 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4855 }
4856 }
4857 while( false);
4858
4859 return( err);
4860 }
4861
4862 #if !NO_KEXTD
4863 bool gIOKextdClearedBusy = false;
4864 #endif
4865
4866 /* Routine io_catalog_send_data */
4867 kern_return_t is_io_catalog_send_data(
4868 mach_port_t master_port,
4869 uint32_t flag,
4870 io_buf_ptr_t inData,
4871 mach_msg_type_number_t inDataCount,
4872 kern_return_t * result)
4873 {
4874 #if NO_KEXTD
4875 return kIOReturnNotPrivileged;
4876 #else /* NO_KEXTD */
4877 OSObject * obj = 0;
4878 vm_offset_t data;
4879 kern_return_t kr = kIOReturnError;
4880
4881 //printf("io_catalog_send_data called. flag: %d\n", flag);
4882
4883 if( master_port != master_device_port)
4884 return kIOReturnNotPrivileged;
4885
4886 if( (flag != kIOCatalogRemoveKernelLinker &&
4887 flag != kIOCatalogKextdActive &&
4888 flag != kIOCatalogKextdFinishedLaunching) &&
4889 ( !inData || !inDataCount) )
4890 {
4891 return kIOReturnBadArgument;
4892 }
4893
4894 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management"))
4895 {
4896 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4897 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4898 OSSafeReleaseNULL(taskName);
4899 // For now, fake success to not break applications relying on this function succeeding.
4900 // See <rdar://problem/32554970> for more details.
4901 return kIOReturnSuccess;
4902 }
4903
4904 if (inData) {
4905 vm_map_offset_t map_data;
4906
4907 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4908 return( kIOReturnMessageTooLarge);
4909
4910 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4911 data = CAST_DOWN(vm_offset_t, map_data);
4912
4913 if( kr != KERN_SUCCESS)
4914 return kr;
4915
4916 // must return success after vm_map_copyout() succeeds
4917
4918 if( inDataCount ) {
4919 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4920 vm_deallocate( kernel_map, data, inDataCount );
4921 if( !obj) {
4922 *result = kIOReturnNoMemory;
4923 return( KERN_SUCCESS);
4924 }
4925 }
4926 }
4927
4928 switch ( flag ) {
4929 case kIOCatalogResetDrivers:
4930 case kIOCatalogResetDriversNoMatch: {
4931 OSArray * array;
4932
4933 array = OSDynamicCast(OSArray, obj);
4934 if (array) {
4935 if ( !gIOCatalogue->resetAndAddDrivers(array,
4936 flag == kIOCatalogResetDrivers) ) {
4937
4938 kr = kIOReturnError;
4939 }
4940 } else {
4941 kr = kIOReturnBadArgument;
4942 }
4943 }
4944 break;
4945
4946 case kIOCatalogAddDrivers:
4947 case kIOCatalogAddDriversNoMatch: {
4948 OSArray * array;
4949
4950 array = OSDynamicCast(OSArray, obj);
4951 if ( array ) {
4952 if ( !gIOCatalogue->addDrivers( array ,
4953 flag == kIOCatalogAddDrivers) ) {
4954 kr = kIOReturnError;
4955 }
4956 }
4957 else {
4958 kr = kIOReturnBadArgument;
4959 }
4960 }
4961 break;
4962
4963 case kIOCatalogRemoveDrivers:
4964 case kIOCatalogRemoveDriversNoMatch: {
4965 OSDictionary * dict;
4966
4967 dict = OSDynamicCast(OSDictionary, obj);
4968 if ( dict ) {
4969 if ( !gIOCatalogue->removeDrivers( dict,
4970 flag == kIOCatalogRemoveDrivers ) ) {
4971 kr = kIOReturnError;
4972 }
4973 }
4974 else {
4975 kr = kIOReturnBadArgument;
4976 }
4977 }
4978 break;
4979
4980 case kIOCatalogStartMatching: {
4981 OSDictionary * dict;
4982
4983 dict = OSDynamicCast(OSDictionary, obj);
4984 if ( dict ) {
4985 if ( !gIOCatalogue->startMatching( dict ) ) {
4986 kr = kIOReturnError;
4987 }
4988 }
4989 else {
4990 kr = kIOReturnBadArgument;
4991 }
4992 }
4993 break;
4994
4995 case kIOCatalogRemoveKernelLinker:
4996 kr = KERN_NOT_SUPPORTED;
4997 break;
4998
4999 case kIOCatalogKextdActive:
5000 #if !NO_KEXTD
5001 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5002 OSKext::setKextdActive();
5003
5004 /* Dump all nonloaded startup extensions; kextd will now send them
5005 * down on request.
5006 */
5007 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5008 #endif
5009 kr = kIOReturnSuccess;
5010 break;
5011
5012 case kIOCatalogKextdFinishedLaunching: {
5013 #if !NO_KEXTD
5014 if (!gIOKextdClearedBusy) {
5015 IOService * serviceRoot = IOService::getServiceRoot();
5016 if (serviceRoot) {
5017 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
5018 serviceRoot->adjustBusy(-1);
5019 gIOKextdClearedBusy = true;
5020 }
5021 }
5022 #endif
5023 kr = kIOReturnSuccess;
5024 }
5025 break;
5026
5027 default:
5028 kr = kIOReturnBadArgument;
5029 break;
5030 }
5031
5032 if (obj) obj->release();
5033
5034 *result = kr;
5035 return( KERN_SUCCESS);
5036 #endif /* NO_KEXTD */
5037 }
5038
5039 /* Routine io_catalog_terminate */
5040 kern_return_t is_io_catalog_terminate(
5041 mach_port_t master_port,
5042 uint32_t flag,
5043 io_name_t name )
5044 {
5045 kern_return_t kr;
5046
5047 if( master_port != master_device_port )
5048 return kIOReturnNotPrivileged;
5049
5050 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5051 kIOClientPrivilegeAdministrator );
5052 if( kIOReturnSuccess != kr)
5053 return( kr );
5054
5055 switch ( flag ) {
5056 #if !defined(SECURE_KERNEL)
5057 case kIOCatalogServiceTerminate:
5058 OSIterator * iter;
5059 IOService * service;
5060
5061 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5062 kIORegistryIterateRecursively);
5063 if ( !iter )
5064 return kIOReturnNoMemory;
5065
5066 do {
5067 iter->reset();
5068 while( (service = (IOService *)iter->getNextObject()) ) {
5069 if( service->metaCast(name)) {
5070 if ( !service->terminate( kIOServiceRequired
5071 | kIOServiceSynchronous) ) {
5072 kr = kIOReturnUnsupported;
5073 break;
5074 }
5075 }
5076 }
5077 } while( !service && !iter->isValid());
5078 iter->release();
5079 break;
5080
5081 case kIOCatalogModuleUnload:
5082 case kIOCatalogModuleTerminate:
5083 kr = gIOCatalogue->terminateDriversForModule(name,
5084 flag == kIOCatalogModuleUnload);
5085 break;
5086 #endif
5087
5088 default:
5089 kr = kIOReturnBadArgument;
5090 break;
5091 }
5092
5093 return( kr );
5094 }
5095
5096 /* Routine io_catalog_get_data */
5097 kern_return_t is_io_catalog_get_data(
5098 mach_port_t master_port,
5099 uint32_t flag,
5100 io_buf_ptr_t *outData,
5101 mach_msg_type_number_t *outDataCount)
5102 {
5103 kern_return_t kr = kIOReturnSuccess;
5104 OSSerialize * s;
5105
5106 if( master_port != master_device_port)
5107 return kIOReturnNotPrivileged;
5108
5109 //printf("io_catalog_get_data called. flag: %d\n", flag);
5110
5111 s = OSSerialize::withCapacity(4096);
5112 if ( !s )
5113 return kIOReturnNoMemory;
5114
5115 kr = gIOCatalogue->serializeData(flag, s);
5116
5117 if ( kr == kIOReturnSuccess ) {
5118 vm_offset_t data;
5119 vm_map_copy_t copy;
5120 vm_size_t size;
5121
5122 size = s->getLength();
5123 kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE);
5124 if ( kr == kIOReturnSuccess ) {
5125 bcopy(s->text(), (void *)data, size);
5126 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5127 (vm_map_size_t)size, true, &copy);
5128 *outData = (char *)copy;
5129 *outDataCount = size;
5130 }
5131 }
5132
5133 s->release();
5134
5135 return kr;
5136 }
5137
5138 /* Routine io_catalog_get_gen_count */
5139 kern_return_t is_io_catalog_get_gen_count(
5140 mach_port_t master_port,
5141 uint32_t *genCount)
5142 {
5143 if( master_port != master_device_port)
5144 return kIOReturnNotPrivileged;
5145
5146 //printf("io_catalog_get_gen_count called.\n");
5147
5148 if ( !genCount )
5149 return kIOReturnBadArgument;
5150
5151 *genCount = gIOCatalogue->getGenerationCount();
5152
5153 return kIOReturnSuccess;
5154 }
5155
5156 /* Routine io_catalog_module_loaded.
5157 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5158 */
5159 kern_return_t is_io_catalog_module_loaded(
5160 mach_port_t master_port,
5161 io_name_t name)
5162 {
5163 if( master_port != master_device_port)
5164 return kIOReturnNotPrivileged;
5165
5166 //printf("io_catalog_module_loaded called. name %s\n", name);
5167
5168 if ( !name )
5169 return kIOReturnBadArgument;
5170
5171 gIOCatalogue->moduleHasLoaded(name);
5172
5173 return kIOReturnSuccess;
5174 }
5175
5176 kern_return_t is_io_catalog_reset(
5177 mach_port_t master_port,
5178 uint32_t flag)
5179 {
5180 if( master_port != master_device_port)
5181 return kIOReturnNotPrivileged;
5182
5183 switch ( flag ) {
5184 case kIOCatalogResetDefault:
5185 gIOCatalogue->reset();
5186 break;
5187
5188 default:
5189 return kIOReturnBadArgument;
5190 }
5191
5192 return kIOReturnSuccess;
5193 }
5194
5195 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5196 {
5197 kern_return_t result = kIOReturnBadArgument;
5198 IOUserClient *userClient;
5199
5200 if ((userClient = OSDynamicCast(IOUserClient,
5201 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5202 IOExternalTrap *trap;
5203 IOService *target = NULL;
5204
5205 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5206
5207 if (trap && target) {
5208 IOTrap func;
5209
5210 func = trap->func;
5211
5212 if (func) {
5213 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5214 }
5215 }
5216
5217 iokit_remove_connect_reference(userClient);
5218 }
5219
5220 return result;
5221 }
5222
5223 } /* extern "C" */
5224
5225 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5226 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5227 {
5228 IOReturn err;
5229 IOService * object;
5230 IOByteCount structureOutputSize;
5231
5232 if (dispatch)
5233 {
5234 uint32_t count;
5235 count = dispatch->checkScalarInputCount;
5236 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5237 {
5238 return (kIOReturnBadArgument);
5239 }
5240
5241 count = dispatch->checkStructureInputSize;
5242 if ((kIOUCVariableStructureSize != count)
5243 && (count != ((args->structureInputDescriptor)
5244 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5245 {
5246 return (kIOReturnBadArgument);
5247 }
5248
5249 count = dispatch->checkScalarOutputCount;
5250 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5251 {
5252 return (kIOReturnBadArgument);
5253 }
5254
5255 count = dispatch->checkStructureOutputSize;
5256 if ((kIOUCVariableStructureSize != count)
5257 && (count != ((args->structureOutputDescriptor)
5258 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5259 {
5260 return (kIOReturnBadArgument);
5261 }
5262
5263 if (dispatch->function)
5264 err = (*dispatch->function)(target, reference, args);
5265 else
5266 err = kIOReturnNoCompletion; /* implementator can dispatch */
5267
5268 return (err);
5269 }
5270
5271
5272 // pre-Leopard API's don't do ool structs
5273 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5274 {
5275 err = kIOReturnIPCError;
5276 return (err);
5277 }
5278
5279 structureOutputSize = args->structureOutputSize;
5280
5281 if (args->asyncWakePort)
5282 {
5283 IOExternalAsyncMethod * method;
5284 object = 0;
5285 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5286 return (kIOReturnUnsupported);
5287
5288 if (kIOUCForegroundOnly & method->flags)
5289 {
5290 if (task_is_gpu_denied(current_task()))
5291 return (kIOReturnNotPermitted);
5292 }
5293
5294 switch (method->flags & kIOUCTypeMask)
5295 {
5296 case kIOUCScalarIStructI:
5297 err = shim_io_async_method_scalarI_structureI( method, object,
5298 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5299 args->scalarInput, args->scalarInputCount,
5300 (char *)args->structureInput, args->structureInputSize );
5301 break;
5302
5303 case kIOUCScalarIScalarO:
5304 err = shim_io_async_method_scalarI_scalarO( method, object,
5305 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5306 args->scalarInput, args->scalarInputCount,
5307 args->scalarOutput, &args->scalarOutputCount );
5308 break;
5309
5310 case kIOUCScalarIStructO:
5311 err = shim_io_async_method_scalarI_structureO( method, object,
5312 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5313 args->scalarInput, args->scalarInputCount,
5314 (char *) args->structureOutput, &args->structureOutputSize );
5315 break;
5316
5317
5318 case kIOUCStructIStructO:
5319 err = shim_io_async_method_structureI_structureO( method, object,
5320 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5321 (char *)args->structureInput, args->structureInputSize,
5322 (char *) args->structureOutput, &args->structureOutputSize );
5323 break;
5324
5325 default:
5326 err = kIOReturnBadArgument;
5327 break;
5328 }
5329 }
5330 else
5331 {
5332 IOExternalMethod * method;
5333 object = 0;
5334 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5335 return (kIOReturnUnsupported);
5336
5337 if (kIOUCForegroundOnly & method->flags)
5338 {
5339 if (task_is_gpu_denied(current_task()))
5340 return (kIOReturnNotPermitted);
5341 }
5342
5343 switch (method->flags & kIOUCTypeMask)
5344 {
5345 case kIOUCScalarIStructI:
5346 err = shim_io_connect_method_scalarI_structureI( method, object,
5347 args->scalarInput, args->scalarInputCount,
5348 (char *) args->structureInput, args->structureInputSize );
5349 break;
5350
5351 case kIOUCScalarIScalarO:
5352 err = shim_io_connect_method_scalarI_scalarO( method, object,
5353 args->scalarInput, args->scalarInputCount,
5354 args->scalarOutput, &args->scalarOutputCount );
5355 break;
5356
5357 case kIOUCScalarIStructO:
5358 err = shim_io_connect_method_scalarI_structureO( method, object,
5359 args->scalarInput, args->scalarInputCount,
5360 (char *) args->structureOutput, &structureOutputSize );
5361 break;
5362
5363
5364 case kIOUCStructIStructO:
5365 err = shim_io_connect_method_structureI_structureO( method, object,
5366 (char *) args->structureInput, args->structureInputSize,
5367 (char *) args->structureOutput, &structureOutputSize );
5368 break;
5369
5370 default:
5371 err = kIOReturnBadArgument;
5372 break;
5373 }
5374 }
5375
5376 args->structureOutputSize = structureOutputSize;
5377
5378 return (err);
5379 }
5380
5381 #if __LP64__
5382 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5383 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5384 #else
5385 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5386 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5387 #endif
5388 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5389 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5390 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5391 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5392 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5393 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5394 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5395 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5396 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5397 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5398 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5399 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5400 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5401 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5402