]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
b16a516bb9c9838680ab1a570fa5a1eede4ec2f1
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/system.h>
44 #include <libkern/OSDebug.h>
45 #include <sys/proc.h>
46 #include <sys/kauth.h>
47 #include <sys/codesign.h>
48
49 #include <mach/sdt.h>
50
51 #if CONFIG_MACF
52
53 extern "C" {
54 #include <security/mac_framework.h>
55 };
56 #include <sys/kauth.h>
57
58 #define IOMACF_LOG 0
59
60 #endif /* CONFIG_MACF */
61
62 #include <IOKit/assert.h>
63
64 #include "IOServicePrivate.h"
65 #include "IOKitKernelInternal.h"
66
67 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
68 #define SCALAR32(x) ((uint32_t )x)
69 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
70 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
71 #define REF32(x) ((int)(x))
72
73 enum
74 {
75 kIOUCAsync0Flags = 3ULL,
76 kIOUCAsync64Flag = 1ULL,
77 kIOUCAsyncErrorLoggedFlag = 2ULL
78 };
79
80 #if IOKITSTATS
81
82 #define IOStatisticsRegisterCounter() \
83 do { \
84 reserved->counter = IOStatistics::registerUserClient(this); \
85 } while (0)
86
87 #define IOStatisticsUnregisterCounter() \
88 do { \
89 if (reserved) \
90 IOStatistics::unregisterUserClient(reserved->counter); \
91 } while (0)
92
93 #define IOStatisticsClientCall() \
94 do { \
95 IOStatistics::countUserClientCall(client); \
96 } while (0)
97
98 #else
99
100 #define IOStatisticsRegisterCounter()
101 #define IOStatisticsUnregisterCounter()
102 #define IOStatisticsClientCall()
103
104 #endif /* IOKITSTATS */
105
106 #if DEVELOPMENT || DEBUG
107
108 #define FAKE_STACK_FRAME(a) \
109 const void ** __frameptr; \
110 const void * __retaddr; \
111 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
112 __retaddr = __frameptr[1]; \
113 __frameptr[1] = (a);
114
115 #define FAKE_STACK_FRAME_END() \
116 __frameptr[1] = __retaddr;
117
118 #else /* DEVELOPMENT || DEBUG */
119
120 #define FAKE_STACK_FRAME(a)
121 #define FAKE_STACK_FRAME_END()
122
123 #endif /* DEVELOPMENT || DEBUG */
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 // definitions we should get from osfmk
128
129 //typedef struct ipc_port * ipc_port_t;
130 typedef natural_t ipc_kobject_type_t;
131
132 #define IKOT_IOKIT_SPARE 27
133 #define IKOT_IOKIT_CONNECT 29
134 #define IKOT_IOKIT_OBJECT 30
135
136 extern "C" {
137
138 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
139 ipc_kobject_type_t type );
140
141 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
142
143 extern mach_port_name_t iokit_make_send_right( task_t task,
144 io_object_t obj, ipc_kobject_type_t type );
145
146 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
147
148 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
149
150 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
151
152 extern ipc_port_t master_device_port;
153
154 extern void iokit_retain_port( ipc_port_t port );
155 extern void iokit_release_port( ipc_port_t port );
156 extern void iokit_release_port_send( ipc_port_t port );
157
158 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
159
160 #include <mach/mach_traps.h>
161 #include <vm/vm_map.h>
162
163 } /* extern "C" */
164
165
166 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
167
168 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
169
170 class IOMachPort : public OSObject
171 {
172 OSDeclareDefaultStructors(IOMachPort)
173 public:
174 OSObject * object;
175 ipc_port_t port;
176 UInt32 mscount;
177 UInt8 holdDestroy;
178
179 static IOMachPort * portForObject( OSObject * obj,
180 ipc_kobject_type_t type );
181 static bool noMoreSendersForObject( OSObject * obj,
182 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
183 static void releasePortForObject( OSObject * obj,
184 ipc_kobject_type_t type );
185 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
186
187 static OSDictionary * dictForType( ipc_kobject_type_t type );
188
189 static mach_port_name_t makeSendRightForTask( task_t task,
190 io_object_t obj, ipc_kobject_type_t type );
191
192 virtual void free() APPLE_KEXT_OVERRIDE;
193 };
194
195 #define super OSObject
196 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
197
198 static IOLock * gIOObjectPortLock;
199
200 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
201
202 // not in dictForType() for debugging ease
203 static OSDictionary * gIOObjectPorts;
204 static OSDictionary * gIOConnectPorts;
205
206 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
207 {
208 OSDictionary ** dict;
209
210 if( IKOT_IOKIT_OBJECT == type )
211 dict = &gIOObjectPorts;
212 else if( IKOT_IOKIT_CONNECT == type )
213 dict = &gIOConnectPorts;
214 else
215 return( 0 );
216
217 if( 0 == *dict)
218 *dict = OSDictionary::withCapacity( 1 );
219
220 return( *dict );
221 }
222
223 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
224 ipc_kobject_type_t type )
225 {
226 IOMachPort * inst = 0;
227 OSDictionary * dict;
228
229 IOTakeLock( gIOObjectPortLock);
230
231 do {
232
233 dict = dictForType( type );
234 if( !dict)
235 continue;
236
237 if( (inst = (IOMachPort *)
238 dict->getObject( (const OSSymbol *) obj ))) {
239 inst->mscount++;
240 inst->retain();
241 continue;
242 }
243
244 inst = new IOMachPort;
245 if( inst && !inst->init()) {
246 inst = 0;
247 continue;
248 }
249
250 inst->port = iokit_alloc_object_port( obj, type );
251 if( inst->port) {
252 // retains obj
253 dict->setObject( (const OSSymbol *) obj, inst );
254 inst->mscount++;
255
256 } else {
257 inst->release();
258 inst = 0;
259 }
260
261 } while( false );
262
263 IOUnlock( gIOObjectPortLock);
264
265 return( inst );
266 }
267
268 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
269 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
270 {
271 OSDictionary * dict;
272 IOMachPort * machPort;
273 IOUserClient * uc;
274 bool destroyed = true;
275
276 IOTakeLock( gIOObjectPortLock);
277
278 if( (dict = dictForType( type ))) {
279 obj->retain();
280
281 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if( machPort) {
283 destroyed = (machPort->mscount <= *mscount);
284 if (!destroyed) *mscount = machPort->mscount;
285 else
286 {
287 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
288 {
289 uc->noMoreSenders();
290 }
291 dict->removeObject( (const OSSymbol *) obj );
292 }
293 }
294 obj->release();
295 }
296
297 IOUnlock( gIOObjectPortLock);
298
299 return( destroyed );
300 }
301
302 void IOMachPort::releasePortForObject( OSObject * obj,
303 ipc_kobject_type_t type )
304 {
305 OSDictionary * dict;
306 IOMachPort * machPort;
307
308 assert(IKOT_IOKIT_CONNECT != type);
309
310 IOTakeLock( gIOObjectPortLock);
311
312 if( (dict = dictForType( type ))) {
313 obj->retain();
314 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
315 if( machPort && !machPort->holdDestroy)
316 dict->removeObject( (const OSSymbol *) obj );
317 obj->release();
318 }
319
320 IOUnlock( gIOObjectPortLock);
321 }
322
323 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
324 {
325 OSDictionary * dict;
326 IOMachPort * machPort;
327
328 IOLockLock( gIOObjectPortLock );
329
330 if( (dict = dictForType( type ))) {
331 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
332 if( machPort)
333 machPort->holdDestroy = true;
334 }
335
336 IOLockUnlock( gIOObjectPortLock );
337 }
338
339 void IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 OSDictionary * dict;
347
348 IOTakeLock( gIOObjectPortLock);
349 obj->retain();
350
351 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
352 {
353 IOMachPort * port;
354 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
355 if (port)
356 {
357 IOUserClient * uc;
358 if ((uc = OSDynamicCast(IOUserClient, obj)))
359 {
360 uc->noMoreSenders();
361 if (uc->mappings)
362 {
363 dict->setObject((const OSSymbol *) uc->mappings, port);
364 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
365
366 uc->mappings->release();
367 uc->mappings = 0;
368 }
369 }
370 dict->removeObject( (const OSSymbol *) obj );
371 }
372 }
373 obj->release();
374 IOUnlock( gIOObjectPortLock);
375 }
376
377 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
378 io_object_t obj, ipc_kobject_type_t type )
379 {
380 return( iokit_make_send_right( task, obj, type ));
381 }
382
383 void IOMachPort::free( void )
384 {
385 if( port)
386 iokit_destroy_object_port( port );
387 super::free();
388 }
389
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 class IOUserIterator : public OSIterator
393 {
394 OSDeclareDefaultStructors(IOUserIterator)
395 public:
396 OSObject * userIteratorObject;
397 IOLock * lock;
398
399 static IOUserIterator * withIterator(OSIterator * iter);
400 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
401 virtual void free() APPLE_KEXT_OVERRIDE;
402
403 virtual void reset() APPLE_KEXT_OVERRIDE;
404 virtual bool isValid() APPLE_KEXT_OVERRIDE;
405 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
406 };
407
408 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
409
410 class IOUserNotification : public IOUserIterator
411 {
412 OSDeclareDefaultStructors(IOUserNotification)
413
414 #define holdNotify userIteratorObject
415
416 public:
417
418 virtual void free() APPLE_KEXT_OVERRIDE;
419
420 virtual void setNotification( IONotifier * obj );
421
422 virtual void reset() APPLE_KEXT_OVERRIDE;
423 virtual bool isValid() APPLE_KEXT_OVERRIDE;
424 };
425
426 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
427
428 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
429
430 IOUserIterator *
431 IOUserIterator::withIterator(OSIterator * iter)
432 {
433 IOUserIterator * me;
434
435 if (!iter) return (0);
436
437 me = new IOUserIterator;
438 if (me && !me->init())
439 {
440 me->release();
441 me = 0;
442 }
443 if (!me) return me;
444 me->userIteratorObject = iter;
445
446 return (me);
447 }
448
449 bool
450 IOUserIterator::init( void )
451 {
452 if (!OSObject::init()) return (false);
453
454 lock = IOLockAlloc();
455 if( !lock)
456 return( false );
457
458 return (true);
459 }
460
461 void
462 IOUserIterator::free()
463 {
464 if (userIteratorObject) userIteratorObject->release();
465 if (lock) IOLockFree(lock);
466 OSObject::free();
467 }
468
469 void
470 IOUserIterator::reset()
471 {
472 IOLockLock(lock);
473 assert(OSDynamicCast(OSIterator, userIteratorObject));
474 ((OSIterator *)userIteratorObject)->reset();
475 IOLockUnlock(lock);
476 }
477
478 bool
479 IOUserIterator::isValid()
480 {
481 bool ret;
482
483 IOLockLock(lock);
484 assert(OSDynamicCast(OSIterator, userIteratorObject));
485 ret = ((OSIterator *)userIteratorObject)->isValid();
486 IOLockUnlock(lock);
487
488 return (ret);
489 }
490
491 OSObject *
492 IOUserIterator::getNextObject()
493 {
494 OSObject * ret;
495
496 IOLockLock(lock);
497 assert(OSDynamicCast(OSIterator, userIteratorObject));
498 ret = ((OSIterator *)userIteratorObject)->getNextObject();
499 IOLockUnlock(lock);
500
501 return (ret);
502 }
503
504 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
505 extern "C" {
506
507 // functions called from osfmk/device/iokit_rpc.c
508
509 void
510 iokit_add_reference( io_object_t obj )
511 {
512 if( obj)
513 obj->retain();
514 }
515
516 void
517 iokit_remove_reference( io_object_t obj )
518 {
519 if( obj)
520 obj->release();
521 }
522
523 void
524 iokit_add_connect_reference( io_object_t obj )
525 {
526 IOUserClient * uc;
527
528 if (!obj) return;
529
530 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
531
532 obj->retain();
533 }
534
535 void
536 iokit_remove_connect_reference( io_object_t obj )
537 {
538 IOUserClient * uc;
539 bool finalize = false;
540
541 if (!obj) return;
542
543 if ((uc = OSDynamicCast(IOUserClient, obj)))
544 {
545 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
546 {
547 IOLockLock(gIOObjectPortLock);
548 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
549 IOLockUnlock(gIOObjectPortLock);
550 }
551 if (finalize) uc->scheduleFinalize(true);
552 }
553
554 obj->release();
555 }
556
557 bool
558 IOUserClient::finalizeUserReferences(OSObject * obj)
559 {
560 IOUserClient * uc;
561 bool ok = true;
562
563 if ((uc = OSDynamicCast(IOUserClient, obj)))
564 {
565 IOLockLock(gIOObjectPortLock);
566 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
567 IOLockUnlock(gIOObjectPortLock);
568 }
569 return (ok);
570 }
571
572 ipc_port_t
573 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
574 {
575 IOMachPort * machPort;
576 ipc_port_t port;
577
578 if( (machPort = IOMachPort::portForObject( obj, type ))) {
579
580 port = machPort->port;
581 if( port)
582 iokit_retain_port( port );
583
584 machPort->release();
585
586 } else
587 port = NULL;
588
589 return( port );
590 }
591
592 kern_return_t
593 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
594 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
595 {
596 IOUserClient * client;
597 IOMemoryMap * map;
598 IOUserNotification * notify;
599
600 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
601 return( kIOReturnNotReady );
602
603 if( IKOT_IOKIT_CONNECT == type)
604 {
605 if( (client = OSDynamicCast( IOUserClient, obj )))
606 {
607 IOStatisticsClientCall();
608 client->clientDied();
609 }
610 }
611 else if( IKOT_IOKIT_OBJECT == type)
612 {
613 if( (map = OSDynamicCast( IOMemoryMap, obj )))
614 map->taskDied();
615 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
616 notify->setNotification( 0 );
617 }
618
619 return( kIOReturnSuccess );
620 }
621
622 }; /* extern "C" */
623
624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
625
626 class IOServiceUserNotification : public IOUserNotification
627 {
628 OSDeclareDefaultStructors(IOServiceUserNotification)
629
630 struct PingMsg {
631 mach_msg_header_t msgHdr;
632 OSNotificationHeader64 notifyHeader;
633 };
634
635 enum { kMaxOutstanding = 1024 };
636
637 PingMsg * pingMsg;
638 vm_size_t msgSize;
639 OSArray * newSet;
640 OSObject * lastEntry;
641 bool armed;
642 bool ipcLogged;
643
644 public:
645
646 virtual bool init( mach_port_t port, natural_t type,
647 void * reference, vm_size_t referenceSize,
648 bool clientIs64 );
649 virtual void free() APPLE_KEXT_OVERRIDE;
650 void invalidatePort(void);
651
652 static bool _handler( void * target,
653 void * ref, IOService * newService, IONotifier * notifier );
654 virtual bool handler( void * ref, IOService * newService );
655
656 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
657 };
658
659 class IOServiceMessageUserNotification : public IOUserNotification
660 {
661 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
662
663 struct PingMsg {
664 mach_msg_header_t msgHdr;
665 mach_msg_body_t msgBody;
666 mach_msg_port_descriptor_t ports[1];
667 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
668 };
669
670 PingMsg * pingMsg;
671 vm_size_t msgSize;
672 uint8_t clientIs64;
673 int owningPID;
674 bool ipcLogged;
675
676 public:
677
678 virtual bool init( mach_port_t port, natural_t type,
679 void * reference, vm_size_t referenceSize,
680 vm_size_t extraSize,
681 bool clientIs64 );
682
683 virtual void free() APPLE_KEXT_OVERRIDE;
684 void invalidatePort(void);
685
686 static IOReturn _handler( void * target, void * ref,
687 UInt32 messageType, IOService * provider,
688 void * messageArgument, vm_size_t argSize );
689 virtual IOReturn handler( void * ref,
690 UInt32 messageType, IOService * provider,
691 void * messageArgument, vm_size_t argSize );
692
693 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
694 };
695
696 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
697
698 #undef super
699 #define super IOUserIterator
700 OSDefineMetaClass( IOUserNotification, IOUserIterator )
701 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
702
703 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
704
705 void IOUserNotification::free( void )
706 {
707 if (holdNotify)
708 {
709 assert(OSDynamicCast(IONotifier, holdNotify));
710 ((IONotifier *)holdNotify)->remove();
711 holdNotify = 0;
712 }
713 // can't be in handler now
714
715 super::free();
716 }
717
718
719 void IOUserNotification::setNotification( IONotifier * notify )
720 {
721 OSObject * previousNotify;
722
723 IOLockLock( gIOObjectPortLock);
724
725 previousNotify = holdNotify;
726 holdNotify = notify;
727
728 IOLockUnlock( gIOObjectPortLock);
729
730 if( previousNotify)
731 {
732 assert(OSDynamicCast(IONotifier, previousNotify));
733 ((IONotifier *)previousNotify)->remove();
734 }
735 }
736
737 void IOUserNotification::reset()
738 {
739 // ?
740 }
741
742 bool IOUserNotification::isValid()
743 {
744 return( true );
745 }
746
747 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
748
749 #undef super
750 #define super IOUserNotification
751 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
752
753 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
754
755 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
756 void * reference, vm_size_t referenceSize,
757 bool clientIs64 )
758 {
759 if( !super::init())
760 return( false );
761
762 newSet = OSArray::withCapacity( 1 );
763 if( !newSet)
764 return( false );
765
766 if (referenceSize > sizeof(OSAsyncReference64))
767 return( false );
768
769 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
770 pingMsg = (PingMsg *) IOMalloc( msgSize);
771 if( !pingMsg)
772 return( false );
773
774 bzero( pingMsg, msgSize);
775
776 pingMsg->msgHdr.msgh_remote_port = port;
777 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
778 MACH_MSG_TYPE_COPY_SEND /*remote*/,
779 MACH_MSG_TYPE_MAKE_SEND /*local*/);
780 pingMsg->msgHdr.msgh_size = msgSize;
781 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
782
783 pingMsg->notifyHeader.size = 0;
784 pingMsg->notifyHeader.type = type;
785 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
786
787 return( true );
788 }
789
790 void IOServiceUserNotification::invalidatePort(void)
791 {
792 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
793 }
794
795 void IOServiceUserNotification::free( void )
796 {
797 PingMsg * _pingMsg;
798 vm_size_t _msgSize;
799 OSArray * _newSet;
800 OSObject * _lastEntry;
801
802 _pingMsg = pingMsg;
803 _msgSize = msgSize;
804 _lastEntry = lastEntry;
805 _newSet = newSet;
806
807 super::free();
808
809 if( _pingMsg && _msgSize) {
810 if (_pingMsg->msgHdr.msgh_remote_port) {
811 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
812 }
813 IOFree(_pingMsg, _msgSize);
814 }
815
816 if( _lastEntry)
817 _lastEntry->release();
818
819 if( _newSet)
820 _newSet->release();
821 }
822
823 bool IOServiceUserNotification::_handler( void * target,
824 void * ref, IOService * newService, IONotifier * notifier )
825 {
826 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
827 }
828
829 bool IOServiceUserNotification::handler( void * ref,
830 IOService * newService )
831 {
832 unsigned int count;
833 kern_return_t kr;
834 ipc_port_t port = NULL;
835 bool sendPing = false;
836
837 IOTakeLock( lock );
838
839 count = newSet->getCount();
840 if( count < kMaxOutstanding) {
841
842 newSet->setObject( newService );
843 if( (sendPing = (armed && (0 == count))))
844 armed = false;
845 }
846
847 IOUnlock( lock );
848
849 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
850 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
851
852 if( sendPing) {
853 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
854 pingMsg->msgHdr.msgh_local_port = port;
855 else
856 pingMsg->msgHdr.msgh_local_port = NULL;
857
858 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
859 pingMsg->msgHdr.msgh_size,
860 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
861 0);
862 if( port)
863 iokit_release_port( port );
864
865 if( (KERN_SUCCESS != kr) && !ipcLogged)
866 {
867 ipcLogged = true;
868 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
869 }
870 }
871
872 return( true );
873 }
874
875 OSObject * IOServiceUserNotification::getNextObject()
876 {
877 unsigned int count;
878 OSObject * result;
879 OSObject * releaseEntry;
880
881 IOLockLock(lock);
882
883 releaseEntry = lastEntry;
884 count = newSet->getCount();
885 if( count ) {
886 result = newSet->getObject( count - 1 );
887 result->retain();
888 newSet->removeObject( count - 1);
889 } else {
890 result = 0;
891 armed = true;
892 }
893 lastEntry = result;
894
895 IOLockUnlock(lock);
896
897 if (releaseEntry) releaseEntry->release();
898
899 return( result );
900 }
901
902 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
903
904 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
905
906 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
907
908 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
909 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
910 bool client64 )
911 {
912 if( !super::init())
913 return( false );
914
915 if (referenceSize > sizeof(OSAsyncReference64))
916 return( false );
917
918 clientIs64 = client64;
919
920 owningPID = proc_selfpid();
921
922 extraSize += sizeof(IOServiceInterestContent64);
923 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
924 pingMsg = (PingMsg *) IOMalloc( msgSize);
925 if( !pingMsg)
926 return( false );
927
928 bzero( pingMsg, msgSize);
929
930 pingMsg->msgHdr.msgh_remote_port = port;
931 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
932 | MACH_MSGH_BITS(
933 MACH_MSG_TYPE_COPY_SEND /*remote*/,
934 MACH_MSG_TYPE_MAKE_SEND /*local*/);
935 pingMsg->msgHdr.msgh_size = msgSize;
936 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
937
938 pingMsg->msgBody.msgh_descriptor_count = 1;
939
940 pingMsg->ports[0].name = 0;
941 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
942 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
943
944 pingMsg->notifyHeader.size = extraSize;
945 pingMsg->notifyHeader.type = type;
946 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
947
948 return( true );
949 }
950
951 void IOServiceMessageUserNotification::invalidatePort(void)
952 {
953 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
954 }
955
956 void IOServiceMessageUserNotification::free( void )
957 {
958 PingMsg * _pingMsg;
959 vm_size_t _msgSize;
960
961 _pingMsg = pingMsg;
962 _msgSize = msgSize;
963
964 super::free();
965
966 if( _pingMsg && _msgSize) {
967 if (_pingMsg->msgHdr.msgh_remote_port) {
968 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
969 }
970 IOFree( _pingMsg, _msgSize);
971 }
972 }
973
974 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
975 UInt32 messageType, IOService * provider,
976 void * argument, vm_size_t argSize )
977 {
978 return( ((IOServiceMessageUserNotification *) target)->handler(
979 ref, messageType, provider, argument, argSize));
980 }
981
982 IOReturn IOServiceMessageUserNotification::handler( void * ref,
983 UInt32 messageType, IOService * provider,
984 void * messageArgument, vm_size_t callerArgSize )
985 {
986 enum { kLocalMsgSize = 0x100 };
987 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
988 void * allocMsg;
989 kern_return_t kr;
990 vm_size_t argSize;
991 vm_size_t thisMsgSize;
992 ipc_port_t thisPort, providerPort;
993 struct PingMsg * thisMsg;
994 IOServiceInterestContent64 * data;
995
996 if (kIOMessageCopyClientID == messageType)
997 {
998 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
999 return (kIOReturnSuccess);
1000 }
1001
1002 if (callerArgSize == 0)
1003 {
1004 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
1005 else argSize = sizeof(uint32_t);
1006 }
1007 else
1008 {
1009 argSize = callerArgSize;
1010 if( argSize > kIOUserNotifyMaxMessageSize)
1011 argSize = kIOUserNotifyMaxMessageSize;
1012 }
1013
1014 // adjust message size for ipc restrictions
1015 natural_t type;
1016 type = pingMsg->notifyHeader.type;
1017 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1018 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1019 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1020
1021 thisMsgSize = msgSize
1022 + sizeof( IOServiceInterestContent64 )
1023 - sizeof( data->messageArgument)
1024 + argSize;
1025
1026 if (thisMsgSize > sizeof(stackMsg))
1027 {
1028 allocMsg = IOMalloc(thisMsgSize);
1029 if (!allocMsg) return (kIOReturnNoMemory);
1030 thisMsg = (typeof(thisMsg)) allocMsg;
1031 }
1032 else
1033 {
1034 allocMsg = 0;
1035 thisMsg = (typeof(thisMsg)) stackMsg;
1036 }
1037
1038 bcopy(pingMsg, thisMsg, msgSize);
1039 thisMsg->notifyHeader.type = type;
1040 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1041 // == pingMsg->notifyHeader.content;
1042 data->messageType = messageType;
1043
1044 if (callerArgSize == 0)
1045 {
1046 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1047 if (!clientIs64)
1048 {
1049 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1050 }
1051 }
1052 else
1053 {
1054 bcopy( messageArgument, data->messageArgument, callerArgSize );
1055 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1056 }
1057
1058 thisMsg->notifyHeader.type = type;
1059 thisMsg->msgHdr.msgh_size = thisMsgSize;
1060
1061 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1062 thisMsg->ports[0].name = providerPort;
1063 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1064 thisMsg->msgHdr.msgh_local_port = thisPort;
1065
1066 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1067 thisMsg->msgHdr.msgh_size,
1068 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1069 0);
1070 if( thisPort)
1071 iokit_release_port( thisPort );
1072 if( providerPort)
1073 iokit_release_port( providerPort );
1074
1075 if (allocMsg)
1076 IOFree(allocMsg, thisMsgSize);
1077
1078 if((KERN_SUCCESS != kr) && !ipcLogged)
1079 {
1080 ipcLogged = true;
1081 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1082 }
1083
1084 return( kIOReturnSuccess );
1085 }
1086
1087 OSObject * IOServiceMessageUserNotification::getNextObject()
1088 {
1089 return( 0 );
1090 }
1091
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1093
1094 #undef super
1095 #define super IOService
1096 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1097
1098 IOLock * gIOUserClientOwnersLock;
1099
1100 void IOUserClient::initialize( void )
1101 {
1102 gIOObjectPortLock = IOLockAlloc();
1103 gIOUserClientOwnersLock = IOLockAlloc();
1104 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1105 }
1106
1107 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1108 mach_port_t wakePort,
1109 void *callback, void *refcon)
1110 {
1111 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1112 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1113 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1114 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1115 }
1116
1117 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1118 mach_port_t wakePort,
1119 mach_vm_address_t callback, io_user_reference_t refcon)
1120 {
1121 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1122 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1123 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1124 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1125 }
1126
1127 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1128 mach_port_t wakePort,
1129 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1130 {
1131 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1132 if (vm_map_is_64bit(get_task_map(task))) {
1133 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1134 }
1135 }
1136
1137 static OSDictionary * CopyConsoleUser(UInt32 uid)
1138 {
1139 OSArray * array;
1140 OSDictionary * user = 0;
1141
1142 if ((array = OSDynamicCast(OSArray,
1143 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1144 {
1145 for (unsigned int idx = 0;
1146 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1147 idx++) {
1148 OSNumber * num;
1149
1150 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1151 && (uid == num->unsigned32BitValue())) {
1152 user->retain();
1153 break;
1154 }
1155 }
1156 array->release();
1157 }
1158 return user;
1159 }
1160
1161 static OSDictionary * CopyUserOnConsole(void)
1162 {
1163 OSArray * array;
1164 OSDictionary * user = 0;
1165
1166 if ((array = OSDynamicCast(OSArray,
1167 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1168 {
1169 for (unsigned int idx = 0;
1170 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1171 idx++)
1172 {
1173 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1174 {
1175 user->retain();
1176 break;
1177 }
1178 }
1179 array->release();
1180 }
1181 return (user);
1182 }
1183
1184 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1185 IOService * service )
1186 {
1187 proc_t p;
1188
1189 p = (proc_t) get_bsdtask_info(task);
1190 if (p)
1191 {
1192 uint64_t authorizationID;
1193
1194 authorizationID = proc_uniqueid(p);
1195 if (authorizationID)
1196 {
1197 if (service->getAuthorizationID() == authorizationID)
1198 {
1199 return (kIOReturnSuccess);
1200 }
1201 }
1202 }
1203
1204 return (kIOReturnNotPermitted);
1205 }
1206
1207 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1208 const char * privilegeName )
1209 {
1210 kern_return_t kr;
1211 security_token_t token;
1212 mach_msg_type_number_t count;
1213 task_t task;
1214 OSDictionary * user;
1215 bool secureConsole;
1216
1217
1218 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1219 sizeof(kIOClientPrivilegeForeground)))
1220 {
1221 if (task_is_gpu_denied(current_task()))
1222 return (kIOReturnNotPrivileged);
1223 else
1224 return (kIOReturnSuccess);
1225 }
1226
1227 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1228 sizeof(kIOClientPrivilegeConsoleSession)))
1229 {
1230 kauth_cred_t cred;
1231 proc_t p;
1232
1233 task = (task_t) securityToken;
1234 if (!task)
1235 task = current_task();
1236 p = (proc_t) get_bsdtask_info(task);
1237 kr = kIOReturnNotPrivileged;
1238
1239 if (p && (cred = kauth_cred_proc_ref(p)))
1240 {
1241 user = CopyUserOnConsole();
1242 if (user)
1243 {
1244 OSNumber * num;
1245 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1246 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1247 {
1248 kr = kIOReturnSuccess;
1249 }
1250 user->release();
1251 }
1252 kauth_cred_unref(&cred);
1253 }
1254 return (kr);
1255 }
1256
1257 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1258 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1259 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1260 else
1261 task = (task_t)securityToken;
1262
1263 count = TASK_SECURITY_TOKEN_COUNT;
1264 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1265
1266 if (KERN_SUCCESS != kr)
1267 {}
1268 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1269 sizeof(kIOClientPrivilegeAdministrator))) {
1270 if (0 != token.val[0])
1271 kr = kIOReturnNotPrivileged;
1272 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1273 sizeof(kIOClientPrivilegeLocalUser))) {
1274 user = CopyConsoleUser(token.val[0]);
1275 if ( user )
1276 user->release();
1277 else
1278 kr = kIOReturnNotPrivileged;
1279 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1280 sizeof(kIOClientPrivilegeConsoleUser))) {
1281 user = CopyConsoleUser(token.val[0]);
1282 if ( user ) {
1283 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1284 kr = kIOReturnNotPrivileged;
1285 else if ( secureConsole ) {
1286 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1287 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1288 kr = kIOReturnNotPrivileged;
1289 }
1290 user->release();
1291 }
1292 else
1293 kr = kIOReturnNotPrivileged;
1294 } else
1295 kr = kIOReturnUnsupported;
1296
1297 return (kr);
1298 }
1299
1300 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1301 const char * entitlement )
1302 {
1303 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1304
1305 proc_t p = NULL;
1306 pid_t pid = 0;
1307 char procname[MAXCOMLEN + 1] = "";
1308 size_t len = 0;
1309 void *entitlements_blob = NULL;
1310 char *entitlements_data = NULL;
1311 OSObject *entitlements_obj = NULL;
1312 OSDictionary *entitlements = NULL;
1313 OSString *errorString = NULL;
1314 OSObject *value = NULL;
1315
1316 p = (proc_t)get_bsdtask_info(task);
1317 if (p == NULL)
1318 goto fail;
1319 pid = proc_pid(p);
1320 proc_name(pid, procname, (int)sizeof(procname));
1321
1322 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1323 goto fail;
1324
1325 if (len <= offsetof(CS_GenericBlob, data))
1326 goto fail;
1327
1328 /*
1329 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1330 * we'll try to parse in the kernel.
1331 */
1332 len -= offsetof(CS_GenericBlob, data);
1333 if (len > MAX_ENTITLEMENTS_LEN) {
1334 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1335 goto fail;
1336 }
1337
1338 /*
1339 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1340 * what is stored in the entitlements blob. Copy the string and
1341 * terminate it.
1342 */
1343 entitlements_data = (char *)IOMalloc(len + 1);
1344 if (entitlements_data == NULL)
1345 goto fail;
1346 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1347 entitlements_data[len] = '\0';
1348
1349 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1350 if (errorString != NULL) {
1351 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1352 goto fail;
1353 }
1354 if (entitlements_obj == NULL)
1355 goto fail;
1356
1357 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1358 if (entitlements == NULL)
1359 goto fail;
1360
1361 /* Fetch the entitlement value from the dictionary. */
1362 value = entitlements->getObject(entitlement);
1363 if (value != NULL)
1364 value->retain();
1365
1366 fail:
1367 if (entitlements_data != NULL)
1368 IOFree(entitlements_data, len + 1);
1369 if (entitlements_obj != NULL)
1370 entitlements_obj->release();
1371 if (errorString != NULL)
1372 errorString->release();
1373 return value;
1374 }
1375
1376 bool IOUserClient::init()
1377 {
1378 if (getPropertyTable() || super::init())
1379 return reserve();
1380
1381 return false;
1382 }
1383
1384 bool IOUserClient::init(OSDictionary * dictionary)
1385 {
1386 if (getPropertyTable() || super::init(dictionary))
1387 return reserve();
1388
1389 return false;
1390 }
1391
1392 bool IOUserClient::initWithTask(task_t owningTask,
1393 void * securityID,
1394 UInt32 type )
1395 {
1396 if (getPropertyTable() || super::init())
1397 return reserve();
1398
1399 return false;
1400 }
1401
1402 bool IOUserClient::initWithTask(task_t owningTask,
1403 void * securityID,
1404 UInt32 type,
1405 OSDictionary * properties )
1406 {
1407 bool ok;
1408
1409 ok = super::init( properties );
1410 ok &= initWithTask( owningTask, securityID, type );
1411
1412 return( ok );
1413 }
1414
1415 bool IOUserClient::reserve()
1416 {
1417 if(!reserved) {
1418 reserved = IONew(ExpansionData, 1);
1419 if (!reserved) {
1420 return false;
1421 }
1422 }
1423 setTerminateDefer(NULL, true);
1424 IOStatisticsRegisterCounter();
1425
1426 return true;
1427 }
1428
1429 struct IOUserClientOwner
1430 {
1431 task_t task;
1432 queue_chain_t taskLink;
1433 IOUserClient * uc;
1434 queue_chain_t ucLink;
1435 };
1436
1437 IOReturn
1438 IOUserClient::registerOwner(task_t task)
1439 {
1440 IOUserClientOwner * owner;
1441 IOReturn ret;
1442 bool newOwner;
1443
1444 IOLockLock(gIOUserClientOwnersLock);
1445
1446 newOwner = true;
1447 ret = kIOReturnSuccess;
1448
1449 if (!owners.next) queue_init(&owners);
1450 else
1451 {
1452 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1453 {
1454 if (task != owner->task) continue;
1455 newOwner = false;
1456 break;
1457 }
1458 }
1459 if (newOwner)
1460 {
1461 owner = IONew(IOUserClientOwner, 1);
1462 if (!newOwner) ret = kIOReturnNoMemory;
1463 else
1464 {
1465 owner->task = task;
1466 owner->uc = this;
1467 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1468 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1469 }
1470 }
1471
1472 IOLockUnlock(gIOUserClientOwnersLock);
1473
1474 return (ret);
1475 }
1476
1477 void
1478 IOUserClient::noMoreSenders(void)
1479 {
1480 IOUserClientOwner * owner;
1481
1482 IOLockLock(gIOUserClientOwnersLock);
1483
1484 if (owners.next)
1485 {
1486 while (!queue_empty(&owners))
1487 {
1488 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1489 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1490 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1491 IODelete(owner, IOUserClientOwner, 1);
1492 }
1493 owners.next = owners.prev = NULL;
1494 }
1495
1496 IOLockUnlock(gIOUserClientOwnersLock);
1497 }
1498
1499 extern "C" kern_return_t
1500 iokit_task_terminate(task_t task)
1501 {
1502 IOUserClientOwner * owner;
1503 IOUserClient * dead;
1504 IOUserClient * uc;
1505 queue_head_t * taskque;
1506
1507 IOLockLock(gIOUserClientOwnersLock);
1508
1509 taskque = task_io_user_clients(task);
1510 dead = NULL;
1511 while (!queue_empty(taskque))
1512 {
1513 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1514 uc = owner->uc;
1515 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1516 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1517 if (queue_empty(&uc->owners))
1518 {
1519 uc->retain();
1520 IOLog("destroying out of band connect for %s\n", uc->getName());
1521 // now using the uc queue head as a singly linked queue,
1522 // leaving .next as NULL to mark it empty
1523 uc->owners.next = NULL;
1524 uc->owners.prev = (queue_entry_t) dead;
1525 dead = uc;
1526 }
1527 IODelete(owner, IOUserClientOwner, 1);
1528 }
1529
1530 IOLockUnlock(gIOUserClientOwnersLock);
1531
1532 while (dead)
1533 {
1534 uc = dead;
1535 dead = (IOUserClient *)(void *) dead->owners.prev;
1536 uc->owners.prev = NULL;
1537 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1538 uc->release();
1539 }
1540
1541 return (KERN_SUCCESS);
1542 }
1543
1544 void IOUserClient::free()
1545 {
1546 if( mappings) mappings->release();
1547
1548 IOStatisticsUnregisterCounter();
1549
1550 assert(!owners.next);
1551 assert(!owners.prev);
1552
1553 if (reserved) IODelete(reserved, ExpansionData, 1);
1554
1555 super::free();
1556 }
1557
1558 IOReturn IOUserClient::clientDied( void )
1559 {
1560 IOReturn ret = kIOReturnNotReady;
1561
1562 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1563 {
1564 ret = clientClose();
1565 }
1566
1567 return (ret);
1568 }
1569
1570 IOReturn IOUserClient::clientClose( void )
1571 {
1572 return( kIOReturnUnsupported );
1573 }
1574
1575 IOService * IOUserClient::getService( void )
1576 {
1577 return( 0 );
1578 }
1579
1580 IOReturn IOUserClient::registerNotificationPort(
1581 mach_port_t /* port */,
1582 UInt32 /* type */,
1583 UInt32 /* refCon */)
1584 {
1585 return( kIOReturnUnsupported);
1586 }
1587
1588 IOReturn IOUserClient::registerNotificationPort(
1589 mach_port_t port,
1590 UInt32 type,
1591 io_user_reference_t refCon)
1592 {
1593 return (registerNotificationPort(port, type, (UInt32) refCon));
1594 }
1595
1596 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1597 semaphore_t * semaphore )
1598 {
1599 return( kIOReturnUnsupported);
1600 }
1601
1602 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1603 {
1604 return( kIOReturnUnsupported);
1605 }
1606
1607 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1608 IOOptionBits * options,
1609 IOMemoryDescriptor ** memory )
1610 {
1611 return( kIOReturnUnsupported);
1612 }
1613
1614 #if !__LP64__
1615 IOMemoryMap * IOUserClient::mapClientMemory(
1616 IOOptionBits type,
1617 task_t task,
1618 IOOptionBits mapFlags,
1619 IOVirtualAddress atAddress )
1620 {
1621 return (NULL);
1622 }
1623 #endif
1624
1625 IOMemoryMap * IOUserClient::mapClientMemory64(
1626 IOOptionBits type,
1627 task_t task,
1628 IOOptionBits mapFlags,
1629 mach_vm_address_t atAddress )
1630 {
1631 IOReturn err;
1632 IOOptionBits options = 0;
1633 IOMemoryDescriptor * memory = 0;
1634 IOMemoryMap * map = 0;
1635
1636 err = clientMemoryForType( (UInt32) type, &options, &memory );
1637
1638 if( memory && (kIOReturnSuccess == err)) {
1639
1640 FAKE_STACK_FRAME(getMetaClass());
1641
1642 options = (options & ~kIOMapUserOptionsMask)
1643 | (mapFlags & kIOMapUserOptionsMask);
1644 map = memory->createMappingInTask( task, atAddress, options );
1645 memory->release();
1646
1647 FAKE_STACK_FRAME_END();
1648 }
1649
1650 return( map );
1651 }
1652
1653 IOReturn IOUserClient::exportObjectToClient(task_t task,
1654 OSObject *obj, io_object_t *clientObj)
1655 {
1656 mach_port_name_t name;
1657
1658 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1659
1660 *(mach_port_name_t *)clientObj = name;
1661 return kIOReturnSuccess;
1662 }
1663
1664 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1665 {
1666 return( 0 );
1667 }
1668
1669 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1670 {
1671 return( 0 );
1672 }
1673
1674 IOExternalTrap * IOUserClient::
1675 getExternalTrapForIndex(UInt32 index)
1676 {
1677 return NULL;
1678 }
1679
1680 #pragma clang diagnostic push
1681 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1682
1683 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1684 // functions can break clients of kexts implementing getExternalMethodForIndex()
1685 IOExternalMethod * IOUserClient::
1686 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1687 {
1688 IOExternalMethod *method = getExternalMethodForIndex(index);
1689
1690 if (method)
1691 *targetP = (IOService *) method->object;
1692
1693 return method;
1694 }
1695
1696 IOExternalAsyncMethod * IOUserClient::
1697 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1698 {
1699 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1700
1701 if (method)
1702 *targetP = (IOService *) method->object;
1703
1704 return method;
1705 }
1706
1707 IOExternalTrap * IOUserClient::
1708 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1709 {
1710 IOExternalTrap *trap = getExternalTrapForIndex(index);
1711
1712 if (trap) {
1713 *targetP = trap->object;
1714 }
1715
1716 return trap;
1717 }
1718 #pragma clang diagnostic pop
1719
1720 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1721 {
1722 mach_port_t port;
1723 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1724
1725 if (MACH_PORT_NULL != port)
1726 iokit_release_port_send(port);
1727
1728 return (kIOReturnSuccess);
1729 }
1730
1731 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1732 {
1733 if (MACH_PORT_NULL != port)
1734 iokit_release_port_send(port);
1735
1736 return (kIOReturnSuccess);
1737 }
1738
1739 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1740 IOReturn result, void *args[], UInt32 numArgs)
1741 {
1742 OSAsyncReference64 reference64;
1743 io_user_reference_t args64[kMaxAsyncArgs];
1744 unsigned int idx;
1745
1746 if (numArgs > kMaxAsyncArgs)
1747 return kIOReturnMessageTooLarge;
1748
1749 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1750 reference64[idx] = REF64(reference[idx]);
1751
1752 for (idx = 0; idx < numArgs; idx++)
1753 args64[idx] = REF64(args[idx]);
1754
1755 return (sendAsyncResult64(reference64, result, args64, numArgs));
1756 }
1757
1758 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1759 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1760 {
1761 return _sendAsyncResult64(reference, result, args, numArgs, options);
1762 }
1763
1764 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1765 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1766 {
1767 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1768 }
1769
1770 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1771 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1772 {
1773 struct ReplyMsg
1774 {
1775 mach_msg_header_t msgHdr;
1776 union
1777 {
1778 struct
1779 {
1780 OSNotificationHeader notifyHdr;
1781 IOAsyncCompletionContent asyncContent;
1782 uint32_t args[kMaxAsyncArgs];
1783 } msg32;
1784 struct
1785 {
1786 OSNotificationHeader64 notifyHdr;
1787 IOAsyncCompletionContent asyncContent;
1788 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1789 } msg64;
1790 } m;
1791 };
1792 ReplyMsg replyMsg;
1793 mach_port_t replyPort;
1794 kern_return_t kr;
1795
1796 // If no reply port, do nothing.
1797 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1798 if (replyPort == MACH_PORT_NULL)
1799 return kIOReturnSuccess;
1800
1801 if (numArgs > kMaxAsyncArgs)
1802 return kIOReturnMessageTooLarge;
1803
1804 bzero(&replyMsg, sizeof(replyMsg));
1805 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1806 0 /*local*/);
1807 replyMsg.msgHdr.msgh_remote_port = replyPort;
1808 replyMsg.msgHdr.msgh_local_port = 0;
1809 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1810 if (kIOUCAsync64Flag & reference[0])
1811 {
1812 replyMsg.msgHdr.msgh_size =
1813 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1814 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1815 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1816 + numArgs * sizeof(io_user_reference_t);
1817 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1818 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1819
1820 replyMsg.m.msg64.asyncContent.result = result;
1821 if (numArgs)
1822 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1823 }
1824 else
1825 {
1826 unsigned int idx;
1827
1828 replyMsg.msgHdr.msgh_size =
1829 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1830 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1831
1832 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1833 + numArgs * sizeof(uint32_t);
1834 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1835
1836 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1837 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1838
1839 replyMsg.m.msg32.asyncContent.result = result;
1840
1841 for (idx = 0; idx < numArgs; idx++)
1842 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1843 }
1844
1845 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1846 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1847 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1848 } else {
1849 /* Fail on full queue. */
1850 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1851 replyMsg.msgHdr.msgh_size);
1852 }
1853 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1854 {
1855 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1856 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1857 }
1858 return kr;
1859 }
1860
1861
1862 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1863
1864 extern "C" {
1865
1866 #define CHECK(cls,obj,out) \
1867 cls * out; \
1868 if( !(out = OSDynamicCast( cls, obj))) \
1869 return( kIOReturnBadArgument )
1870
1871 #define CHECKLOCKED(cls,obj,out) \
1872 IOUserIterator * oIter; \
1873 cls * out; \
1874 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1875 return (kIOReturnBadArgument); \
1876 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1877 return (kIOReturnBadArgument)
1878
1879 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1880
1881 // Create a vm_map_copy_t or kalloc'ed data for memory
1882 // to be copied out. ipc will free after the copyout.
1883
1884 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1885 io_buf_ptr_t * buf )
1886 {
1887 kern_return_t err;
1888 vm_map_copy_t copy;
1889
1890 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1891 false /* src_destroy */, &copy);
1892
1893 assert( err == KERN_SUCCESS );
1894 if( err == KERN_SUCCESS )
1895 *buf = (char *) copy;
1896
1897 return( err );
1898 }
1899
1900 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1901
1902 /* Routine io_server_version */
1903 kern_return_t is_io_server_version(
1904 mach_port_t master_port,
1905 uint64_t *version)
1906 {
1907 *version = IOKIT_SERVER_VERSION;
1908 return (kIOReturnSuccess);
1909 }
1910
1911 /* Routine io_object_get_class */
1912 kern_return_t is_io_object_get_class(
1913 io_object_t object,
1914 io_name_t className )
1915 {
1916 const OSMetaClass* my_obj = NULL;
1917
1918 if( !object)
1919 return( kIOReturnBadArgument );
1920
1921 my_obj = object->getMetaClass();
1922 if (!my_obj) {
1923 return (kIOReturnNotFound);
1924 }
1925
1926 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1927
1928 return( kIOReturnSuccess );
1929 }
1930
1931 /* Routine io_object_get_superclass */
1932 kern_return_t is_io_object_get_superclass(
1933 mach_port_t master_port,
1934 io_name_t obj_name,
1935 io_name_t class_name)
1936 {
1937 IOReturn ret;
1938 const OSMetaClass * meta;
1939 const OSMetaClass * super;
1940 const OSSymbol * name;
1941 const char * cstr;
1942
1943 if (!obj_name || !class_name) return (kIOReturnBadArgument);
1944 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1945
1946 ret = kIOReturnNotFound;
1947 meta = 0;
1948 do
1949 {
1950 name = OSSymbol::withCString(obj_name);
1951 if (!name) break;
1952 meta = OSMetaClass::copyMetaClassWithName(name);
1953 if (!meta) break;
1954 super = meta->getSuperClass();
1955 if (!super) break;
1956 cstr = super->getClassName();
1957 if (!cstr) break;
1958 strlcpy(class_name, cstr, sizeof(io_name_t));
1959 ret = kIOReturnSuccess;
1960 }
1961 while (false);
1962
1963 OSSafeReleaseNULL(name);
1964 if (meta) meta->releaseMetaClass();
1965
1966 return (ret);
1967 }
1968
1969 /* Routine io_object_get_bundle_identifier */
1970 kern_return_t is_io_object_get_bundle_identifier(
1971 mach_port_t master_port,
1972 io_name_t obj_name,
1973 io_name_t bundle_name)
1974 {
1975 IOReturn ret;
1976 const OSMetaClass * meta;
1977 const OSSymbol * name;
1978 const OSSymbol * identifier;
1979 const char * cstr;
1980
1981 if (!obj_name || !bundle_name) return (kIOReturnBadArgument);
1982 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1983
1984 ret = kIOReturnNotFound;
1985 meta = 0;
1986 do
1987 {
1988 name = OSSymbol::withCString(obj_name);
1989 if (!name) break;
1990 meta = OSMetaClass::copyMetaClassWithName(name);
1991 if (!meta) break;
1992 identifier = meta->getKmodName();
1993 if (!identifier) break;
1994 cstr = identifier->getCStringNoCopy();
1995 if (!cstr) break;
1996 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1997 ret = kIOReturnSuccess;
1998 }
1999 while (false);
2000
2001 OSSafeReleaseNULL(name);
2002 if (meta) meta->releaseMetaClass();
2003
2004 return (ret);
2005 }
2006
2007 /* Routine io_object_conforms_to */
2008 kern_return_t is_io_object_conforms_to(
2009 io_object_t object,
2010 io_name_t className,
2011 boolean_t *conforms )
2012 {
2013 if( !object)
2014 return( kIOReturnBadArgument );
2015
2016 *conforms = (0 != object->metaCast( className ));
2017
2018 return( kIOReturnSuccess );
2019 }
2020
2021 /* Routine io_object_get_retain_count */
2022 kern_return_t is_io_object_get_retain_count(
2023 io_object_t object,
2024 uint32_t *retainCount )
2025 {
2026 if( !object)
2027 return( kIOReturnBadArgument );
2028
2029 *retainCount = object->getRetainCount();
2030 return( kIOReturnSuccess );
2031 }
2032
2033 /* Routine io_iterator_next */
2034 kern_return_t is_io_iterator_next(
2035 io_object_t iterator,
2036 io_object_t *object )
2037 {
2038 IOReturn ret;
2039 OSObject * obj;
2040
2041 CHECK( OSIterator, iterator, iter );
2042
2043 obj = iter->getNextObject();
2044 if( obj) {
2045 obj->retain();
2046 *object = obj;
2047 ret = kIOReturnSuccess;
2048 } else
2049 ret = kIOReturnNoDevice;
2050
2051 return (ret);
2052 }
2053
2054 /* Routine io_iterator_reset */
2055 kern_return_t is_io_iterator_reset(
2056 io_object_t iterator )
2057 {
2058 CHECK( OSIterator, iterator, iter );
2059
2060 iter->reset();
2061
2062 return( kIOReturnSuccess );
2063 }
2064
2065 /* Routine io_iterator_is_valid */
2066 kern_return_t is_io_iterator_is_valid(
2067 io_object_t iterator,
2068 boolean_t *is_valid )
2069 {
2070 CHECK( OSIterator, iterator, iter );
2071
2072 *is_valid = iter->isValid();
2073
2074 return( kIOReturnSuccess );
2075 }
2076
2077
2078 static kern_return_t internal_io_service_match_property_table(
2079 io_service_t _service,
2080 const char * matching,
2081 mach_msg_type_number_t matching_size,
2082 boolean_t *matches)
2083 {
2084 CHECK( IOService, _service, service );
2085
2086 kern_return_t kr;
2087 OSObject * obj;
2088 OSDictionary * dict;
2089
2090 assert(matching_size);
2091 obj = OSUnserializeXML(matching, matching_size);
2092
2093 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2094 *matches = service->passiveMatch( dict );
2095 kr = kIOReturnSuccess;
2096 } else
2097 kr = kIOReturnBadArgument;
2098
2099 if( obj)
2100 obj->release();
2101
2102 return( kr );
2103 }
2104
2105 /* Routine io_service_match_property_table */
2106 kern_return_t is_io_service_match_property_table(
2107 io_service_t service,
2108 io_string_t matching,
2109 boolean_t *matches )
2110 {
2111 return (kIOReturnUnsupported);
2112 }
2113
2114
2115 /* Routine io_service_match_property_table_ool */
2116 kern_return_t is_io_service_match_property_table_ool(
2117 io_object_t service,
2118 io_buf_ptr_t matching,
2119 mach_msg_type_number_t matchingCnt,
2120 kern_return_t *result,
2121 boolean_t *matches )
2122 {
2123 kern_return_t kr;
2124 vm_offset_t data;
2125 vm_map_offset_t map_data;
2126
2127 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2128 data = CAST_DOWN(vm_offset_t, map_data);
2129
2130 if( KERN_SUCCESS == kr) {
2131 // must return success after vm_map_copyout() succeeds
2132 *result = internal_io_service_match_property_table(service,
2133 (const char *)data, matchingCnt, matches );
2134 vm_deallocate( kernel_map, data, matchingCnt );
2135 }
2136
2137 return( kr );
2138 }
2139
2140 /* Routine io_service_match_property_table_bin */
2141 kern_return_t is_io_service_match_property_table_bin(
2142 io_object_t service,
2143 io_struct_inband_t matching,
2144 mach_msg_type_number_t matchingCnt,
2145 boolean_t *matches)
2146 {
2147 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2148 }
2149
2150 static kern_return_t internal_io_service_get_matching_services(
2151 mach_port_t master_port,
2152 const char * matching,
2153 mach_msg_type_number_t matching_size,
2154 io_iterator_t *existing )
2155 {
2156 kern_return_t kr;
2157 OSObject * obj;
2158 OSDictionary * dict;
2159
2160 if( master_port != master_device_port)
2161 return( kIOReturnNotPrivileged);
2162
2163 assert(matching_size);
2164 obj = OSUnserializeXML(matching, matching_size);
2165
2166 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2167 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2168 kr = kIOReturnSuccess;
2169 } else
2170 kr = kIOReturnBadArgument;
2171
2172 if( obj)
2173 obj->release();
2174
2175 return( kr );
2176 }
2177
2178 /* Routine io_service_get_matching_services */
2179 kern_return_t is_io_service_get_matching_services(
2180 mach_port_t master_port,
2181 io_string_t matching,
2182 io_iterator_t *existing )
2183 {
2184 return (kIOReturnUnsupported);
2185 }
2186
2187 /* Routine io_service_get_matching_services_ool */
2188 kern_return_t is_io_service_get_matching_services_ool(
2189 mach_port_t master_port,
2190 io_buf_ptr_t matching,
2191 mach_msg_type_number_t matchingCnt,
2192 kern_return_t *result,
2193 io_object_t *existing )
2194 {
2195 kern_return_t kr;
2196 vm_offset_t data;
2197 vm_map_offset_t map_data;
2198
2199 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2200 data = CAST_DOWN(vm_offset_t, map_data);
2201
2202 if( KERN_SUCCESS == kr) {
2203 // must return success after vm_map_copyout() succeeds
2204 // and mig will copy out objects on success
2205 *existing = 0;
2206 *result = internal_io_service_get_matching_services(master_port,
2207 (const char *) data, matchingCnt, existing);
2208 vm_deallocate( kernel_map, data, matchingCnt );
2209 }
2210
2211 return( kr );
2212 }
2213
2214 /* Routine io_service_get_matching_services_bin */
2215 kern_return_t is_io_service_get_matching_services_bin(
2216 mach_port_t master_port,
2217 io_struct_inband_t matching,
2218 mach_msg_type_number_t matchingCnt,
2219 io_object_t *existing)
2220 {
2221 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2222 }
2223
2224
2225 static kern_return_t internal_io_service_get_matching_service(
2226 mach_port_t master_port,
2227 const char * matching,
2228 mach_msg_type_number_t matching_size,
2229 io_service_t *service )
2230 {
2231 kern_return_t kr;
2232 OSObject * obj;
2233 OSDictionary * dict;
2234
2235 if( master_port != master_device_port)
2236 return( kIOReturnNotPrivileged);
2237
2238 assert(matching_size);
2239 obj = OSUnserializeXML(matching, matching_size);
2240
2241 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2242 *service = IOService::copyMatchingService( dict );
2243 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2244 } else
2245 kr = kIOReturnBadArgument;
2246
2247 if( obj)
2248 obj->release();
2249
2250 return( kr );
2251 }
2252
2253 /* Routine io_service_get_matching_service */
2254 kern_return_t is_io_service_get_matching_service(
2255 mach_port_t master_port,
2256 io_string_t matching,
2257 io_service_t *service )
2258 {
2259 return (kIOReturnUnsupported);
2260 }
2261
2262 /* Routine io_service_get_matching_services_ool */
2263 kern_return_t is_io_service_get_matching_service_ool(
2264 mach_port_t master_port,
2265 io_buf_ptr_t matching,
2266 mach_msg_type_number_t matchingCnt,
2267 kern_return_t *result,
2268 io_object_t *service )
2269 {
2270 kern_return_t kr;
2271 vm_offset_t data;
2272 vm_map_offset_t map_data;
2273
2274 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2275 data = CAST_DOWN(vm_offset_t, map_data);
2276
2277 if( KERN_SUCCESS == kr) {
2278 // must return success after vm_map_copyout() succeeds
2279 // and mig will copy out objects on success
2280 *service = 0;
2281 *result = internal_io_service_get_matching_service(master_port,
2282 (const char *) data, matchingCnt, service );
2283 vm_deallocate( kernel_map, data, matchingCnt );
2284 }
2285
2286 return( kr );
2287 }
2288
2289 /* Routine io_service_get_matching_service_bin */
2290 kern_return_t is_io_service_get_matching_service_bin(
2291 mach_port_t master_port,
2292 io_struct_inband_t matching,
2293 mach_msg_type_number_t matchingCnt,
2294 io_object_t *service)
2295 {
2296 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2297 }
2298
2299 static kern_return_t internal_io_service_add_notification(
2300 mach_port_t master_port,
2301 io_name_t notification_type,
2302 const char * matching,
2303 size_t matching_size,
2304 mach_port_t port,
2305 void * reference,
2306 vm_size_t referenceSize,
2307 bool client64,
2308 io_object_t * notification )
2309 {
2310 IOServiceUserNotification * userNotify = 0;
2311 IONotifier * notify = 0;
2312 const OSSymbol * sym;
2313 OSDictionary * dict;
2314 IOReturn err;
2315 unsigned long int userMsgType;
2316
2317 if( master_port != master_device_port)
2318 return( kIOReturnNotPrivileged);
2319
2320 do {
2321 err = kIOReturnNoResources;
2322
2323 if( !(sym = OSSymbol::withCString( notification_type )))
2324 err = kIOReturnNoResources;
2325
2326 assert(matching_size);
2327 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2328 if (!dict) {
2329 err = kIOReturnBadArgument;
2330 continue;
2331 }
2332
2333 if( (sym == gIOPublishNotification)
2334 || (sym == gIOFirstPublishNotification))
2335 userMsgType = kIOServicePublishNotificationType;
2336 else if( (sym == gIOMatchedNotification)
2337 || (sym == gIOFirstMatchNotification))
2338 userMsgType = kIOServiceMatchedNotificationType;
2339 else if ((sym == gIOTerminatedNotification)
2340 || (sym == gIOWillTerminateNotification))
2341 userMsgType = kIOServiceTerminatedNotificationType;
2342 else
2343 userMsgType = kLastIOKitNotificationType;
2344
2345 userNotify = new IOServiceUserNotification;
2346
2347 if( userNotify && !userNotify->init( port, userMsgType,
2348 reference, referenceSize, client64)) {
2349 userNotify->release();
2350 userNotify = 0;
2351 }
2352 if( !userNotify)
2353 continue;
2354
2355 notify = IOService::addMatchingNotification( sym, dict,
2356 &userNotify->_handler, userNotify );
2357 if( notify) {
2358 *notification = userNotify;
2359 userNotify->setNotification( notify );
2360 err = kIOReturnSuccess;
2361 } else
2362 err = kIOReturnUnsupported;
2363
2364 } while( false );
2365
2366 if ((kIOReturnSuccess != err) && userNotify)
2367 {
2368 userNotify->invalidatePort();
2369 userNotify->release();
2370 userNotify = 0;
2371 }
2372
2373 if( sym)
2374 sym->release();
2375 if( dict)
2376 dict->release();
2377
2378 return( err );
2379 }
2380
2381
2382 /* Routine io_service_add_notification */
2383 kern_return_t is_io_service_add_notification(
2384 mach_port_t master_port,
2385 io_name_t notification_type,
2386 io_string_t matching,
2387 mach_port_t port,
2388 io_async_ref_t reference,
2389 mach_msg_type_number_t referenceCnt,
2390 io_object_t * notification )
2391 {
2392 return (kIOReturnUnsupported);
2393 }
2394
2395 /* Routine io_service_add_notification_64 */
2396 kern_return_t is_io_service_add_notification_64(
2397 mach_port_t master_port,
2398 io_name_t notification_type,
2399 io_string_t matching,
2400 mach_port_t wake_port,
2401 io_async_ref64_t reference,
2402 mach_msg_type_number_t referenceCnt,
2403 io_object_t *notification )
2404 {
2405 return (kIOReturnUnsupported);
2406 }
2407
2408 /* Routine io_service_add_notification_bin */
2409 kern_return_t is_io_service_add_notification_bin
2410 (
2411 mach_port_t master_port,
2412 io_name_t notification_type,
2413 io_struct_inband_t matching,
2414 mach_msg_type_number_t matchingCnt,
2415 mach_port_t wake_port,
2416 io_async_ref_t reference,
2417 mach_msg_type_number_t referenceCnt,
2418 io_object_t *notification)
2419 {
2420 return (internal_io_service_add_notification(master_port, notification_type,
2421 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2422 false, notification));
2423 }
2424
2425 /* Routine io_service_add_notification_bin_64 */
2426 kern_return_t is_io_service_add_notification_bin_64
2427 (
2428 mach_port_t master_port,
2429 io_name_t notification_type,
2430 io_struct_inband_t matching,
2431 mach_msg_type_number_t matchingCnt,
2432 mach_port_t wake_port,
2433 io_async_ref64_t reference,
2434 mach_msg_type_number_t referenceCnt,
2435 io_object_t *notification)
2436 {
2437 return (internal_io_service_add_notification(master_port, notification_type,
2438 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2439 true, notification));
2440 }
2441
2442 static kern_return_t internal_io_service_add_notification_ool(
2443 mach_port_t master_port,
2444 io_name_t notification_type,
2445 io_buf_ptr_t matching,
2446 mach_msg_type_number_t matchingCnt,
2447 mach_port_t wake_port,
2448 void * reference,
2449 vm_size_t referenceSize,
2450 bool client64,
2451 kern_return_t *result,
2452 io_object_t *notification )
2453 {
2454 kern_return_t kr;
2455 vm_offset_t data;
2456 vm_map_offset_t map_data;
2457
2458 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2459 data = CAST_DOWN(vm_offset_t, map_data);
2460
2461 if( KERN_SUCCESS == kr) {
2462 // must return success after vm_map_copyout() succeeds
2463 // and mig will copy out objects on success
2464 *notification = 0;
2465 *result = internal_io_service_add_notification( master_port, notification_type,
2466 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2467 vm_deallocate( kernel_map, data, matchingCnt );
2468 }
2469
2470 return( kr );
2471 }
2472
2473 /* Routine io_service_add_notification_ool */
2474 kern_return_t is_io_service_add_notification_ool(
2475 mach_port_t master_port,
2476 io_name_t notification_type,
2477 io_buf_ptr_t matching,
2478 mach_msg_type_number_t matchingCnt,
2479 mach_port_t wake_port,
2480 io_async_ref_t reference,
2481 mach_msg_type_number_t referenceCnt,
2482 kern_return_t *result,
2483 io_object_t *notification )
2484 {
2485 return (internal_io_service_add_notification_ool(master_port, notification_type,
2486 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2487 false, result, notification));
2488 }
2489
2490 /* Routine io_service_add_notification_ool_64 */
2491 kern_return_t is_io_service_add_notification_ool_64(
2492 mach_port_t master_port,
2493 io_name_t notification_type,
2494 io_buf_ptr_t matching,
2495 mach_msg_type_number_t matchingCnt,
2496 mach_port_t wake_port,
2497 io_async_ref64_t reference,
2498 mach_msg_type_number_t referenceCnt,
2499 kern_return_t *result,
2500 io_object_t *notification )
2501 {
2502 return (internal_io_service_add_notification_ool(master_port, notification_type,
2503 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2504 true, result, notification));
2505 }
2506
2507 /* Routine io_service_add_notification_old */
2508 kern_return_t is_io_service_add_notification_old(
2509 mach_port_t master_port,
2510 io_name_t notification_type,
2511 io_string_t matching,
2512 mach_port_t port,
2513 // for binary compatibility reasons, this must be natural_t for ILP32
2514 natural_t ref,
2515 io_object_t * notification )
2516 {
2517 return( is_io_service_add_notification( master_port, notification_type,
2518 matching, port, &ref, 1, notification ));
2519 }
2520
2521
2522 static kern_return_t internal_io_service_add_interest_notification(
2523 io_object_t _service,
2524 io_name_t type_of_interest,
2525 mach_port_t port,
2526 void * reference,
2527 vm_size_t referenceSize,
2528 bool client64,
2529 io_object_t * notification )
2530 {
2531
2532 IOServiceMessageUserNotification * userNotify = 0;
2533 IONotifier * notify = 0;
2534 const OSSymbol * sym;
2535 IOReturn err;
2536
2537 CHECK( IOService, _service, service );
2538
2539 err = kIOReturnNoResources;
2540 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2541
2542 userNotify = new IOServiceMessageUserNotification;
2543
2544 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2545 reference, referenceSize,
2546 kIOUserNotifyMaxMessageSize,
2547 client64 )) {
2548 userNotify->release();
2549 userNotify = 0;
2550 }
2551 if( !userNotify)
2552 continue;
2553
2554 notify = service->registerInterest( sym,
2555 &userNotify->_handler, userNotify );
2556 if( notify) {
2557 *notification = userNotify;
2558 userNotify->setNotification( notify );
2559 err = kIOReturnSuccess;
2560 } else
2561 err = kIOReturnUnsupported;
2562
2563 sym->release();
2564
2565 } while( false );
2566
2567 if ((kIOReturnSuccess != err) && userNotify)
2568 {
2569 userNotify->invalidatePort();
2570 userNotify->release();
2571 userNotify = 0;
2572 }
2573
2574 return( err );
2575 }
2576
2577 /* Routine io_service_add_message_notification */
2578 kern_return_t is_io_service_add_interest_notification(
2579 io_object_t service,
2580 io_name_t type_of_interest,
2581 mach_port_t port,
2582 io_async_ref_t reference,
2583 mach_msg_type_number_t referenceCnt,
2584 io_object_t * notification )
2585 {
2586 return (internal_io_service_add_interest_notification(service, type_of_interest,
2587 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2588 }
2589
2590 /* Routine io_service_add_interest_notification_64 */
2591 kern_return_t is_io_service_add_interest_notification_64(
2592 io_object_t service,
2593 io_name_t type_of_interest,
2594 mach_port_t wake_port,
2595 io_async_ref64_t reference,
2596 mach_msg_type_number_t referenceCnt,
2597 io_object_t *notification )
2598 {
2599 return (internal_io_service_add_interest_notification(service, type_of_interest,
2600 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2601 }
2602
2603
2604 /* Routine io_service_acknowledge_notification */
2605 kern_return_t is_io_service_acknowledge_notification(
2606 io_object_t _service,
2607 natural_t notify_ref,
2608 natural_t response )
2609 {
2610 CHECK( IOService, _service, service );
2611
2612 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2613 (IOOptionBits) response ));
2614
2615 }
2616
2617 /* Routine io_connect_get_semaphore */
2618 kern_return_t is_io_connect_get_notification_semaphore(
2619 io_connect_t connection,
2620 natural_t notification_type,
2621 semaphore_t *semaphore )
2622 {
2623 CHECK( IOUserClient, connection, client );
2624
2625 IOStatisticsClientCall();
2626 return( client->getNotificationSemaphore( (UInt32) notification_type,
2627 semaphore ));
2628 }
2629
2630 /* Routine io_registry_get_root_entry */
2631 kern_return_t is_io_registry_get_root_entry(
2632 mach_port_t master_port,
2633 io_object_t *root )
2634 {
2635 IORegistryEntry * entry;
2636
2637 if( master_port != master_device_port)
2638 return( kIOReturnNotPrivileged);
2639
2640 entry = IORegistryEntry::getRegistryRoot();
2641 if( entry)
2642 entry->retain();
2643 *root = entry;
2644
2645 return( kIOReturnSuccess );
2646 }
2647
2648 /* Routine io_registry_create_iterator */
2649 kern_return_t is_io_registry_create_iterator(
2650 mach_port_t master_port,
2651 io_name_t plane,
2652 uint32_t options,
2653 io_object_t *iterator )
2654 {
2655 if( master_port != master_device_port)
2656 return( kIOReturnNotPrivileged);
2657
2658 *iterator = IOUserIterator::withIterator(
2659 IORegistryIterator::iterateOver(
2660 IORegistryEntry::getPlane( plane ), options ));
2661
2662 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2663 }
2664
2665 /* Routine io_registry_entry_create_iterator */
2666 kern_return_t is_io_registry_entry_create_iterator(
2667 io_object_t registry_entry,
2668 io_name_t plane,
2669 uint32_t options,
2670 io_object_t *iterator )
2671 {
2672 CHECK( IORegistryEntry, registry_entry, entry );
2673
2674 *iterator = IOUserIterator::withIterator(
2675 IORegistryIterator::iterateOver( entry,
2676 IORegistryEntry::getPlane( plane ), options ));
2677
2678 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2679 }
2680
2681 /* Routine io_registry_iterator_enter */
2682 kern_return_t is_io_registry_iterator_enter_entry(
2683 io_object_t iterator )
2684 {
2685 CHECKLOCKED( IORegistryIterator, iterator, iter );
2686
2687 IOLockLock(oIter->lock);
2688 iter->enterEntry();
2689 IOLockUnlock(oIter->lock);
2690
2691 return( kIOReturnSuccess );
2692 }
2693
2694 /* Routine io_registry_iterator_exit */
2695 kern_return_t is_io_registry_iterator_exit_entry(
2696 io_object_t iterator )
2697 {
2698 bool didIt;
2699
2700 CHECKLOCKED( IORegistryIterator, iterator, iter );
2701
2702 IOLockLock(oIter->lock);
2703 didIt = iter->exitEntry();
2704 IOLockUnlock(oIter->lock);
2705
2706 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2707 }
2708
2709 /* Routine io_registry_entry_from_path */
2710 kern_return_t is_io_registry_entry_from_path(
2711 mach_port_t master_port,
2712 io_string_t path,
2713 io_object_t *registry_entry )
2714 {
2715 IORegistryEntry * entry;
2716
2717 if( master_port != master_device_port)
2718 return( kIOReturnNotPrivileged);
2719
2720 entry = IORegistryEntry::fromPath( path );
2721
2722 *registry_entry = entry;
2723
2724 return( kIOReturnSuccess );
2725 }
2726
2727
2728 /* Routine io_registry_entry_from_path */
2729 kern_return_t is_io_registry_entry_from_path_ool(
2730 mach_port_t master_port,
2731 io_string_inband_t path,
2732 io_buf_ptr_t path_ool,
2733 mach_msg_type_number_t path_oolCnt,
2734 kern_return_t *result,
2735 io_object_t *registry_entry)
2736 {
2737 IORegistryEntry * entry;
2738 vm_map_offset_t map_data;
2739 const char * cpath;
2740 IOReturn res;
2741 kern_return_t err;
2742
2743 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2744
2745 map_data = 0;
2746 entry = 0;
2747 res = err = KERN_SUCCESS;
2748 if (path[0]) cpath = path;
2749 else
2750 {
2751 if (!path_oolCnt) return(kIOReturnBadArgument);
2752 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2753
2754 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2755 if (KERN_SUCCESS == err)
2756 {
2757 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2758 cpath = CAST_DOWN(const char *, map_data);
2759 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2760 }
2761 }
2762
2763 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2764 {
2765 entry = IORegistryEntry::fromPath(cpath);
2766 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2767 }
2768
2769 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2770
2771 if (KERN_SUCCESS != err) res = err;
2772 *registry_entry = entry;
2773 *result = res;
2774
2775 return (err);
2776 }
2777
2778
2779 /* Routine io_registry_entry_in_plane */
2780 kern_return_t is_io_registry_entry_in_plane(
2781 io_object_t registry_entry,
2782 io_name_t plane,
2783 boolean_t *inPlane )
2784 {
2785 CHECK( IORegistryEntry, registry_entry, entry );
2786
2787 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2788
2789 return( kIOReturnSuccess );
2790 }
2791
2792
2793 /* Routine io_registry_entry_get_path */
2794 kern_return_t is_io_registry_entry_get_path(
2795 io_object_t registry_entry,
2796 io_name_t plane,
2797 io_string_t path )
2798 {
2799 int length;
2800 CHECK( IORegistryEntry, registry_entry, entry );
2801
2802 length = sizeof( io_string_t);
2803 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2804 return( kIOReturnSuccess );
2805 else
2806 return( kIOReturnBadArgument );
2807 }
2808
2809 /* Routine io_registry_entry_get_path */
2810 kern_return_t is_io_registry_entry_get_path_ool(
2811 io_object_t registry_entry,
2812 io_name_t plane,
2813 io_string_inband_t path,
2814 io_buf_ptr_t *path_ool,
2815 mach_msg_type_number_t *path_oolCnt)
2816 {
2817 enum { kMaxPath = 16384 };
2818 IOReturn err;
2819 int length;
2820 char * buf;
2821
2822 CHECK( IORegistryEntry, registry_entry, entry );
2823
2824 *path_ool = NULL;
2825 *path_oolCnt = 0;
2826 length = sizeof(io_string_inband_t);
2827 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2828 else
2829 {
2830 length = kMaxPath;
2831 buf = IONew(char, length);
2832 if (!buf) err = kIOReturnNoMemory;
2833 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2834 else
2835 {
2836 *path_oolCnt = length;
2837 err = copyoutkdata(buf, length, path_ool);
2838 }
2839 if (buf) IODelete(buf, char, kMaxPath);
2840 }
2841
2842 return (err);
2843 }
2844
2845
2846 /* Routine io_registry_entry_get_name */
2847 kern_return_t is_io_registry_entry_get_name(
2848 io_object_t registry_entry,
2849 io_name_t name )
2850 {
2851 CHECK( IORegistryEntry, registry_entry, entry );
2852
2853 strncpy( name, entry->getName(), sizeof( io_name_t));
2854
2855 return( kIOReturnSuccess );
2856 }
2857
2858 /* Routine io_registry_entry_get_name_in_plane */
2859 kern_return_t is_io_registry_entry_get_name_in_plane(
2860 io_object_t registry_entry,
2861 io_name_t planeName,
2862 io_name_t name )
2863 {
2864 const IORegistryPlane * plane;
2865 CHECK( IORegistryEntry, registry_entry, entry );
2866
2867 if( planeName[0])
2868 plane = IORegistryEntry::getPlane( planeName );
2869 else
2870 plane = 0;
2871
2872 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2873
2874 return( kIOReturnSuccess );
2875 }
2876
2877 /* Routine io_registry_entry_get_location_in_plane */
2878 kern_return_t is_io_registry_entry_get_location_in_plane(
2879 io_object_t registry_entry,
2880 io_name_t planeName,
2881 io_name_t location )
2882 {
2883 const IORegistryPlane * plane;
2884 CHECK( IORegistryEntry, registry_entry, entry );
2885
2886 if( planeName[0])
2887 plane = IORegistryEntry::getPlane( planeName );
2888 else
2889 plane = 0;
2890
2891 const char * cstr = entry->getLocation( plane );
2892
2893 if( cstr) {
2894 strncpy( location, cstr, sizeof( io_name_t));
2895 return( kIOReturnSuccess );
2896 } else
2897 return( kIOReturnNotFound );
2898 }
2899
2900 /* Routine io_registry_entry_get_registry_entry_id */
2901 kern_return_t is_io_registry_entry_get_registry_entry_id(
2902 io_object_t registry_entry,
2903 uint64_t *entry_id )
2904 {
2905 CHECK( IORegistryEntry, registry_entry, entry );
2906
2907 *entry_id = entry->getRegistryEntryID();
2908
2909 return (kIOReturnSuccess);
2910 }
2911
2912 /* Routine io_registry_entry_get_property */
2913 kern_return_t is_io_registry_entry_get_property_bytes(
2914 io_object_t registry_entry,
2915 io_name_t property_name,
2916 io_struct_inband_t buf,
2917 mach_msg_type_number_t *dataCnt )
2918 {
2919 OSObject * obj;
2920 OSData * data;
2921 OSString * str;
2922 OSBoolean * boo;
2923 OSNumber * off;
2924 UInt64 offsetBytes;
2925 unsigned int len = 0;
2926 const void * bytes = 0;
2927 IOReturn ret = kIOReturnSuccess;
2928
2929 CHECK( IORegistryEntry, registry_entry, entry );
2930
2931 #if CONFIG_MACF
2932 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2933 return kIOReturnNotPermitted;
2934 #endif
2935
2936 obj = entry->copyProperty(property_name);
2937 if( !obj)
2938 return( kIOReturnNoResources );
2939
2940 // One day OSData will be a common container base class
2941 // until then...
2942 if( (data = OSDynamicCast( OSData, obj ))) {
2943 len = data->getLength();
2944 bytes = data->getBytesNoCopy();
2945 if (!data->isSerializable()) len = 0;
2946
2947 } else if( (str = OSDynamicCast( OSString, obj ))) {
2948 len = str->getLength() + 1;
2949 bytes = str->getCStringNoCopy();
2950
2951 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2952 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2953 bytes = boo->isTrue() ? "Yes" : "No";
2954
2955 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2956 offsetBytes = off->unsigned64BitValue();
2957 len = off->numberOfBytes();
2958 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2959 bytes = &offsetBytes;
2960 #ifdef __BIG_ENDIAN__
2961 bytes = (const void *)
2962 (((UInt32) bytes) + (sizeof( UInt64) - len));
2963 #endif
2964
2965 } else
2966 ret = kIOReturnBadArgument;
2967
2968 if( bytes) {
2969 if( *dataCnt < len)
2970 ret = kIOReturnIPCError;
2971 else {
2972 *dataCnt = len;
2973 bcopy( bytes, buf, len );
2974 }
2975 }
2976 obj->release();
2977
2978 return( ret );
2979 }
2980
2981
2982 /* Routine io_registry_entry_get_property */
2983 kern_return_t is_io_registry_entry_get_property(
2984 io_object_t registry_entry,
2985 io_name_t property_name,
2986 io_buf_ptr_t *properties,
2987 mach_msg_type_number_t *propertiesCnt )
2988 {
2989 kern_return_t err;
2990 vm_size_t len;
2991 OSObject * obj;
2992
2993 CHECK( IORegistryEntry, registry_entry, entry );
2994
2995 #if CONFIG_MACF
2996 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2997 return kIOReturnNotPermitted;
2998 #endif
2999
3000 obj = entry->copyProperty(property_name);
3001 if( !obj)
3002 return( kIOReturnNotFound );
3003
3004 OSSerialize * s = OSSerialize::withCapacity(4096);
3005 if( !s) {
3006 obj->release();
3007 return( kIOReturnNoMemory );
3008 }
3009
3010 if( obj->serialize( s )) {
3011 len = s->getLength();
3012 *propertiesCnt = len;
3013 err = copyoutkdata( s->text(), len, properties );
3014
3015 } else
3016 err = kIOReturnUnsupported;
3017
3018 s->release();
3019 obj->release();
3020
3021 return( err );
3022 }
3023
3024 /* Routine io_registry_entry_get_property_recursively */
3025 kern_return_t is_io_registry_entry_get_property_recursively(
3026 io_object_t registry_entry,
3027 io_name_t plane,
3028 io_name_t property_name,
3029 uint32_t options,
3030 io_buf_ptr_t *properties,
3031 mach_msg_type_number_t *propertiesCnt )
3032 {
3033 kern_return_t err;
3034 vm_size_t len;
3035 OSObject * obj;
3036
3037 CHECK( IORegistryEntry, registry_entry, entry );
3038
3039 #if CONFIG_MACF
3040 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3041 return kIOReturnNotPermitted;
3042 #endif
3043
3044 obj = entry->copyProperty( property_name,
3045 IORegistryEntry::getPlane( plane ), options );
3046 if( !obj)
3047 return( kIOReturnNotFound );
3048
3049 OSSerialize * s = OSSerialize::withCapacity(4096);
3050 if( !s) {
3051 obj->release();
3052 return( kIOReturnNoMemory );
3053 }
3054
3055 if( obj->serialize( s )) {
3056 len = s->getLength();
3057 *propertiesCnt = len;
3058 err = copyoutkdata( s->text(), len, properties );
3059
3060 } else
3061 err = kIOReturnUnsupported;
3062
3063 s->release();
3064 obj->release();
3065
3066 return( err );
3067 }
3068
3069 /* Routine io_registry_entry_get_properties */
3070 kern_return_t is_io_registry_entry_get_properties(
3071 io_object_t registry_entry,
3072 io_buf_ptr_t *properties,
3073 mach_msg_type_number_t *propertiesCnt )
3074 {
3075 return (kIOReturnUnsupported);
3076 }
3077
3078 #if CONFIG_MACF
3079
3080 struct GetPropertiesEditorRef
3081 {
3082 kauth_cred_t cred;
3083 IORegistryEntry * entry;
3084 OSCollection * root;
3085 };
3086
3087 static const OSMetaClassBase *
3088 GetPropertiesEditor(void * reference,
3089 OSSerialize * s,
3090 OSCollection * container,
3091 const OSSymbol * name,
3092 const OSMetaClassBase * value)
3093 {
3094 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3095
3096 if (!ref->root) ref->root = container;
3097 if (ref->root == container)
3098 {
3099 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3100 {
3101 value = 0;
3102 }
3103 }
3104 if (value) value->retain();
3105 return (value);
3106 }
3107
3108 #endif /* CONFIG_MACF */
3109
3110 /* Routine io_registry_entry_get_properties */
3111 kern_return_t is_io_registry_entry_get_properties_bin(
3112 io_object_t registry_entry,
3113 io_buf_ptr_t *properties,
3114 mach_msg_type_number_t *propertiesCnt)
3115 {
3116 kern_return_t err = kIOReturnSuccess;
3117 vm_size_t len;
3118 OSSerialize * s;
3119 OSSerialize::Editor editor = 0;
3120 void * editRef = 0;
3121
3122 CHECK(IORegistryEntry, registry_entry, entry);
3123
3124 #if CONFIG_MACF
3125 GetPropertiesEditorRef ref;
3126 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3127 {
3128 editor = &GetPropertiesEditor;
3129 editRef = &ref;
3130 ref.cred = kauth_cred_get();
3131 ref.entry = entry;
3132 ref.root = 0;
3133 }
3134 #endif
3135
3136 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3137 if (!s) return (kIOReturnNoMemory);
3138
3139 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3140
3141 if (kIOReturnSuccess == err)
3142 {
3143 len = s->getLength();
3144 *propertiesCnt = len;
3145 err = copyoutkdata(s->text(), len, properties);
3146 }
3147 s->release();
3148
3149 return (err);
3150 }
3151
3152 /* Routine io_registry_entry_get_property_bin */
3153 kern_return_t is_io_registry_entry_get_property_bin(
3154 io_object_t registry_entry,
3155 io_name_t plane,
3156 io_name_t property_name,
3157 uint32_t options,
3158 io_buf_ptr_t *properties,
3159 mach_msg_type_number_t *propertiesCnt )
3160 {
3161 kern_return_t err;
3162 vm_size_t len;
3163 OSObject * obj;
3164 const OSSymbol * sym;
3165
3166 CHECK( IORegistryEntry, registry_entry, entry );
3167
3168 #if CONFIG_MACF
3169 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3170 return kIOReturnNotPermitted;
3171 #endif
3172
3173 sym = OSSymbol::withCString(property_name);
3174 if (!sym) return (kIOReturnNoMemory);
3175
3176 if (gIORegistryEntryPropertyKeysKey == sym)
3177 {
3178 obj = entry->copyPropertyKeys();
3179 }
3180 else
3181 {
3182 if ((kIORegistryIterateRecursively & options) && plane[0])
3183 {
3184 obj = entry->copyProperty(property_name,
3185 IORegistryEntry::getPlane(plane), options );
3186 }
3187 else
3188 {
3189 obj = entry->copyProperty(property_name);
3190 }
3191 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3192 }
3193
3194 sym->release();
3195 if (!obj) return (kIOReturnNotFound);
3196
3197 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3198 if( !s) {
3199 obj->release();
3200 return( kIOReturnNoMemory );
3201 }
3202
3203 if( obj->serialize( s )) {
3204 len = s->getLength();
3205 *propertiesCnt = len;
3206 err = copyoutkdata( s->text(), len, properties );
3207
3208 } else err = kIOReturnUnsupported;
3209
3210 s->release();
3211 obj->release();
3212
3213 return( err );
3214 }
3215
3216
3217 /* Routine io_registry_entry_set_properties */
3218 kern_return_t is_io_registry_entry_set_properties
3219 (
3220 io_object_t registry_entry,
3221 io_buf_ptr_t properties,
3222 mach_msg_type_number_t propertiesCnt,
3223 kern_return_t * result)
3224 {
3225 OSObject * obj;
3226 kern_return_t err;
3227 IOReturn res;
3228 vm_offset_t data;
3229 vm_map_offset_t map_data;
3230
3231 CHECK( IORegistryEntry, registry_entry, entry );
3232
3233 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3234 return( kIOReturnMessageTooLarge);
3235
3236 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3237 data = CAST_DOWN(vm_offset_t, map_data);
3238
3239 if( KERN_SUCCESS == err) {
3240
3241 FAKE_STACK_FRAME(entry->getMetaClass());
3242
3243 // must return success after vm_map_copyout() succeeds
3244 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3245 vm_deallocate( kernel_map, data, propertiesCnt );
3246
3247 if (!obj)
3248 res = kIOReturnBadArgument;
3249 #if CONFIG_MACF
3250 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3251 registry_entry, obj))
3252 {
3253 res = kIOReturnNotPermitted;
3254 }
3255 #endif
3256 else
3257 {
3258 res = entry->setProperties( obj );
3259 }
3260
3261 if (obj)
3262 obj->release();
3263
3264 FAKE_STACK_FRAME_END();
3265
3266 } else
3267 res = err;
3268
3269 *result = res;
3270 return( err );
3271 }
3272
3273 /* Routine io_registry_entry_get_child_iterator */
3274 kern_return_t is_io_registry_entry_get_child_iterator(
3275 io_object_t registry_entry,
3276 io_name_t plane,
3277 io_object_t *iterator )
3278 {
3279 CHECK( IORegistryEntry, registry_entry, entry );
3280
3281 *iterator = entry->getChildIterator(
3282 IORegistryEntry::getPlane( plane ));
3283
3284 return( kIOReturnSuccess );
3285 }
3286
3287 /* Routine io_registry_entry_get_parent_iterator */
3288 kern_return_t is_io_registry_entry_get_parent_iterator(
3289 io_object_t registry_entry,
3290 io_name_t plane,
3291 io_object_t *iterator)
3292 {
3293 CHECK( IORegistryEntry, registry_entry, entry );
3294
3295 *iterator = entry->getParentIterator(
3296 IORegistryEntry::getPlane( plane ));
3297
3298 return( kIOReturnSuccess );
3299 }
3300
3301 /* Routine io_service_get_busy_state */
3302 kern_return_t is_io_service_get_busy_state(
3303 io_object_t _service,
3304 uint32_t *busyState )
3305 {
3306 CHECK( IOService, _service, service );
3307
3308 *busyState = service->getBusyState();
3309
3310 return( kIOReturnSuccess );
3311 }
3312
3313 /* Routine io_service_get_state */
3314 kern_return_t is_io_service_get_state(
3315 io_object_t _service,
3316 uint64_t *state,
3317 uint32_t *busy_state,
3318 uint64_t *accumulated_busy_time )
3319 {
3320 CHECK( IOService, _service, service );
3321
3322 *state = service->getState();
3323 *busy_state = service->getBusyState();
3324 *accumulated_busy_time = service->getAccumulatedBusyTime();
3325
3326 return( kIOReturnSuccess );
3327 }
3328
3329 /* Routine io_service_wait_quiet */
3330 kern_return_t is_io_service_wait_quiet(
3331 io_object_t _service,
3332 mach_timespec_t wait_time )
3333 {
3334 uint64_t timeoutNS;
3335
3336 CHECK( IOService, _service, service );
3337
3338 timeoutNS = wait_time.tv_sec;
3339 timeoutNS *= kSecondScale;
3340 timeoutNS += wait_time.tv_nsec;
3341
3342 return( service->waitQuiet(timeoutNS) );
3343 }
3344
3345 /* Routine io_service_request_probe */
3346 kern_return_t is_io_service_request_probe(
3347 io_object_t _service,
3348 uint32_t options )
3349 {
3350 CHECK( IOService, _service, service );
3351
3352 return( service->requestProbe( options ));
3353 }
3354
3355 /* Routine io_service_get_authorization_id */
3356 kern_return_t is_io_service_get_authorization_id(
3357 io_object_t _service,
3358 uint64_t *authorization_id )
3359 {
3360 kern_return_t kr;
3361
3362 CHECK( IOService, _service, service );
3363
3364 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3365 kIOClientPrivilegeAdministrator );
3366 if( kIOReturnSuccess != kr)
3367 return( kr );
3368
3369 *authorization_id = service->getAuthorizationID();
3370
3371 return( kr );
3372 }
3373
3374 /* Routine io_service_set_authorization_id */
3375 kern_return_t is_io_service_set_authorization_id(
3376 io_object_t _service,
3377 uint64_t authorization_id )
3378 {
3379 CHECK( IOService, _service, service );
3380
3381 return( service->setAuthorizationID( authorization_id ) );
3382 }
3383
3384 /* Routine io_service_open_ndr */
3385 kern_return_t is_io_service_open_extended(
3386 io_object_t _service,
3387 task_t owningTask,
3388 uint32_t connect_type,
3389 NDR_record_t ndr,
3390 io_buf_ptr_t properties,
3391 mach_msg_type_number_t propertiesCnt,
3392 kern_return_t * result,
3393 io_object_t *connection )
3394 {
3395 IOUserClient * client = 0;
3396 kern_return_t err = KERN_SUCCESS;
3397 IOReturn res = kIOReturnSuccess;
3398 OSDictionary * propertiesDict = 0;
3399 bool crossEndian;
3400 bool disallowAccess;
3401
3402 CHECK( IOService, _service, service );
3403
3404 if (!owningTask) return (kIOReturnBadArgument);
3405 assert(owningTask == current_task());
3406 if (owningTask != current_task()) return (kIOReturnBadArgument);
3407
3408 do
3409 {
3410 if (properties) return (kIOReturnUnsupported);
3411 #if 0
3412 {
3413 OSObject * obj;
3414 vm_offset_t data;
3415 vm_map_offset_t map_data;
3416
3417 if( propertiesCnt > sizeof(io_struct_inband_t))
3418 return( kIOReturnMessageTooLarge);
3419
3420 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3421 res = err;
3422 data = CAST_DOWN(vm_offset_t, map_data);
3423 if (KERN_SUCCESS == err)
3424 {
3425 // must return success after vm_map_copyout() succeeds
3426 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3427 vm_deallocate( kernel_map, data, propertiesCnt );
3428 propertiesDict = OSDynamicCast(OSDictionary, obj);
3429 if (!propertiesDict)
3430 {
3431 res = kIOReturnBadArgument;
3432 if (obj)
3433 obj->release();
3434 }
3435 }
3436 if (kIOReturnSuccess != res)
3437 break;
3438 }
3439 #endif
3440 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3441 if (crossEndian)
3442 {
3443 if (!propertiesDict)
3444 propertiesDict = OSDictionary::withCapacity(4);
3445 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3446 if (data)
3447 {
3448 if (propertiesDict)
3449 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3450 data->release();
3451 }
3452 }
3453
3454 res = service->newUserClient( owningTask, (void *) owningTask,
3455 connect_type, propertiesDict, &client );
3456
3457 if (propertiesDict)
3458 propertiesDict->release();
3459
3460 if (res == kIOReturnSuccess)
3461 {
3462 assert( OSDynamicCast(IOUserClient, client) );
3463
3464 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3465 client->closed = false;
3466
3467 disallowAccess = (crossEndian
3468 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3469 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3470 if (disallowAccess) res = kIOReturnUnsupported;
3471 #if CONFIG_MACF
3472 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3473 res = kIOReturnNotPermitted;
3474 #endif
3475
3476 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3477
3478 if (kIOReturnSuccess != res)
3479 {
3480 IOStatisticsClientCall();
3481 client->clientClose();
3482 client->release();
3483 client = 0;
3484 break;
3485 }
3486 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3487 if (creatorName)
3488 {
3489 client->setProperty(kIOUserClientCreatorKey, creatorName);
3490 creatorName->release();
3491 }
3492 client->setTerminateDefer(service, false);
3493 }
3494 }
3495 while (false);
3496
3497 *connection = client;
3498 *result = res;
3499
3500 return (err);
3501 }
3502
3503 /* Routine io_service_close */
3504 kern_return_t is_io_service_close(
3505 io_object_t connection )
3506 {
3507 OSSet * mappings;
3508 if ((mappings = OSDynamicCast(OSSet, connection)))
3509 return( kIOReturnSuccess );
3510
3511 CHECK( IOUserClient, connection, client );
3512
3513 IOStatisticsClientCall();
3514
3515 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3516 {
3517 client->clientClose();
3518 }
3519 else
3520 {
3521 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3522 client->getRegistryEntryID(), client->getName());
3523 }
3524
3525 return( kIOReturnSuccess );
3526 }
3527
3528 /* Routine io_connect_get_service */
3529 kern_return_t is_io_connect_get_service(
3530 io_object_t connection,
3531 io_object_t *service )
3532 {
3533 IOService * theService;
3534
3535 CHECK( IOUserClient, connection, client );
3536
3537 theService = client->getService();
3538 if( theService)
3539 theService->retain();
3540
3541 *service = theService;
3542
3543 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3544 }
3545
3546 /* Routine io_connect_set_notification_port */
3547 kern_return_t is_io_connect_set_notification_port(
3548 io_object_t connection,
3549 uint32_t notification_type,
3550 mach_port_t port,
3551 uint32_t reference)
3552 {
3553 CHECK( IOUserClient, connection, client );
3554
3555 IOStatisticsClientCall();
3556 return( client->registerNotificationPort( port, notification_type,
3557 (io_user_reference_t) reference ));
3558 }
3559
3560 /* Routine io_connect_set_notification_port */
3561 kern_return_t is_io_connect_set_notification_port_64(
3562 io_object_t connection,
3563 uint32_t notification_type,
3564 mach_port_t port,
3565 io_user_reference_t reference)
3566 {
3567 CHECK( IOUserClient, connection, client );
3568
3569 IOStatisticsClientCall();
3570 return( client->registerNotificationPort( port, notification_type,
3571 reference ));
3572 }
3573
3574 /* Routine io_connect_map_memory_into_task */
3575 kern_return_t is_io_connect_map_memory_into_task
3576 (
3577 io_connect_t connection,
3578 uint32_t memory_type,
3579 task_t into_task,
3580 mach_vm_address_t *address,
3581 mach_vm_size_t *size,
3582 uint32_t flags
3583 )
3584 {
3585 IOReturn err;
3586 IOMemoryMap * map;
3587
3588 CHECK( IOUserClient, connection, client );
3589
3590 if (!into_task) return (kIOReturnBadArgument);
3591
3592 IOStatisticsClientCall();
3593 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3594
3595 if( map) {
3596 *address = map->getAddress();
3597 if( size)
3598 *size = map->getSize();
3599
3600 if( client->sharedInstance
3601 || (into_task != current_task())) {
3602 // push a name out to the task owning the map,
3603 // so we can clean up maps
3604 mach_port_name_t name __unused =
3605 IOMachPort::makeSendRightForTask(
3606 into_task, map, IKOT_IOKIT_OBJECT );
3607
3608 } else {
3609 // keep it with the user client
3610 IOLockLock( gIOObjectPortLock);
3611 if( 0 == client->mappings)
3612 client->mappings = OSSet::withCapacity(2);
3613 if( client->mappings)
3614 client->mappings->setObject( map);
3615 IOLockUnlock( gIOObjectPortLock);
3616 map->release();
3617 }
3618 err = kIOReturnSuccess;
3619
3620 } else
3621 err = kIOReturnBadArgument;
3622
3623 return( err );
3624 }
3625
3626 /* Routine is_io_connect_map_memory */
3627 kern_return_t is_io_connect_map_memory(
3628 io_object_t connect,
3629 uint32_t type,
3630 task_t task,
3631 uint32_t * mapAddr,
3632 uint32_t * mapSize,
3633 uint32_t flags )
3634 {
3635 IOReturn err;
3636 mach_vm_address_t address;
3637 mach_vm_size_t size;
3638
3639 address = SCALAR64(*mapAddr);
3640 size = SCALAR64(*mapSize);
3641
3642 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3643
3644 *mapAddr = SCALAR32(address);
3645 *mapSize = SCALAR32(size);
3646
3647 return (err);
3648 }
3649
3650 } /* extern "C" */
3651
3652 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3653 {
3654 OSIterator * iter;
3655 IOMemoryMap * map = 0;
3656
3657 IOLockLock(gIOObjectPortLock);
3658
3659 iter = OSCollectionIterator::withCollection(mappings);
3660 if(iter)
3661 {
3662 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3663 {
3664 if(mem == map->getMemoryDescriptor())
3665 {
3666 map->retain();
3667 mappings->removeObject(map);
3668 break;
3669 }
3670 }
3671 iter->release();
3672 }
3673
3674 IOLockUnlock(gIOObjectPortLock);
3675
3676 return (map);
3677 }
3678
3679 extern "C" {
3680
3681 /* Routine io_connect_unmap_memory_from_task */
3682 kern_return_t is_io_connect_unmap_memory_from_task
3683 (
3684 io_connect_t connection,
3685 uint32_t memory_type,
3686 task_t from_task,
3687 mach_vm_address_t address)
3688 {
3689 IOReturn err;
3690 IOOptionBits options = 0;
3691 IOMemoryDescriptor * memory = 0;
3692 IOMemoryMap * map;
3693
3694 CHECK( IOUserClient, connection, client );
3695
3696 if (!from_task) return (kIOReturnBadArgument);
3697
3698 IOStatisticsClientCall();
3699 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3700
3701 if( memory && (kIOReturnSuccess == err)) {
3702
3703 options = (options & ~kIOMapUserOptionsMask)
3704 | kIOMapAnywhere | kIOMapReference;
3705
3706 map = memory->createMappingInTask( from_task, address, options );
3707 memory->release();
3708 if( map)
3709 {
3710 IOLockLock( gIOObjectPortLock);
3711 if( client->mappings)
3712 client->mappings->removeObject( map);
3713 IOLockUnlock( gIOObjectPortLock);
3714
3715 mach_port_name_t name = 0;
3716 if (from_task != current_task())
3717 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3718 if (name)
3719 {
3720 map->userClientUnmap();
3721 err = iokit_mod_send_right( from_task, name, -2 );
3722 err = kIOReturnSuccess;
3723 }
3724 else
3725 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3726 if (from_task == current_task())
3727 map->release();
3728 }
3729 else
3730 err = kIOReturnBadArgument;
3731 }
3732
3733 return( err );
3734 }
3735
3736 kern_return_t is_io_connect_unmap_memory(
3737 io_object_t connect,
3738 uint32_t type,
3739 task_t task,
3740 uint32_t mapAddr )
3741 {
3742 IOReturn err;
3743 mach_vm_address_t address;
3744
3745 address = SCALAR64(mapAddr);
3746
3747 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3748
3749 return (err);
3750 }
3751
3752
3753 /* Routine io_connect_add_client */
3754 kern_return_t is_io_connect_add_client(
3755 io_object_t connection,
3756 io_object_t connect_to)
3757 {
3758 CHECK( IOUserClient, connection, client );
3759 CHECK( IOUserClient, connect_to, to );
3760
3761 IOStatisticsClientCall();
3762 return( client->connectClient( to ) );
3763 }
3764
3765
3766 /* Routine io_connect_set_properties */
3767 kern_return_t is_io_connect_set_properties(
3768 io_object_t connection,
3769 io_buf_ptr_t properties,
3770 mach_msg_type_number_t propertiesCnt,
3771 kern_return_t * result)
3772 {
3773 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3774 }
3775
3776 /* Routine io_user_client_method */
3777 kern_return_t is_io_connect_method_var_output
3778 (
3779 io_connect_t connection,
3780 uint32_t selector,
3781 io_scalar_inband64_t scalar_input,
3782 mach_msg_type_number_t scalar_inputCnt,
3783 io_struct_inband_t inband_input,
3784 mach_msg_type_number_t inband_inputCnt,
3785 mach_vm_address_t ool_input,
3786 mach_vm_size_t ool_input_size,
3787 io_struct_inband_t inband_output,
3788 mach_msg_type_number_t *inband_outputCnt,
3789 io_scalar_inband64_t scalar_output,
3790 mach_msg_type_number_t *scalar_outputCnt,
3791 io_buf_ptr_t *var_output,
3792 mach_msg_type_number_t *var_outputCnt
3793 )
3794 {
3795 CHECK( IOUserClient, connection, client );
3796
3797 IOExternalMethodArguments args;
3798 IOReturn ret;
3799 IOMemoryDescriptor * inputMD = 0;
3800 OSObject * structureVariableOutputData = 0;
3801
3802 bzero(&args.__reserved[0], sizeof(args.__reserved));
3803 args.__reservedA = 0;
3804 args.version = kIOExternalMethodArgumentsCurrentVersion;
3805
3806 args.selector = selector;
3807
3808 args.asyncWakePort = MACH_PORT_NULL;
3809 args.asyncReference = 0;
3810 args.asyncReferenceCount = 0;
3811 args.structureVariableOutputData = &structureVariableOutputData;
3812
3813 args.scalarInput = scalar_input;
3814 args.scalarInputCount = scalar_inputCnt;
3815 args.structureInput = inband_input;
3816 args.structureInputSize = inband_inputCnt;
3817
3818 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3819
3820 if (ool_input)
3821 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3822 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3823 current_task());
3824
3825 args.structureInputDescriptor = inputMD;
3826
3827 args.scalarOutput = scalar_output;
3828 args.scalarOutputCount = *scalar_outputCnt;
3829 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3830 args.structureOutput = inband_output;
3831 args.structureOutputSize = *inband_outputCnt;
3832 args.structureOutputDescriptor = NULL;
3833 args.structureOutputDescriptorSize = 0;
3834
3835 IOStatisticsClientCall();
3836 ret = client->externalMethod( selector, &args );
3837
3838 *scalar_outputCnt = args.scalarOutputCount;
3839 *inband_outputCnt = args.structureOutputSize;
3840
3841 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3842 {
3843 OSSerialize * serialize;
3844 OSData * data;
3845 vm_size_t len;
3846
3847 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3848 {
3849 len = serialize->getLength();
3850 *var_outputCnt = len;
3851 ret = copyoutkdata(serialize->text(), len, var_output);
3852 }
3853 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3854 {
3855 len = data->getLength();
3856 *var_outputCnt = len;
3857 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3858 }
3859 else
3860 {
3861 ret = kIOReturnUnderrun;
3862 }
3863 }
3864
3865 if (inputMD)
3866 inputMD->release();
3867 if (structureVariableOutputData)
3868 structureVariableOutputData->release();
3869
3870 return (ret);
3871 }
3872
3873 /* Routine io_user_client_method */
3874 kern_return_t is_io_connect_method
3875 (
3876 io_connect_t connection,
3877 uint32_t selector,
3878 io_scalar_inband64_t scalar_input,
3879 mach_msg_type_number_t scalar_inputCnt,
3880 io_struct_inband_t inband_input,
3881 mach_msg_type_number_t inband_inputCnt,
3882 mach_vm_address_t ool_input,
3883 mach_vm_size_t ool_input_size,
3884 io_struct_inband_t inband_output,
3885 mach_msg_type_number_t *inband_outputCnt,
3886 io_scalar_inband64_t scalar_output,
3887 mach_msg_type_number_t *scalar_outputCnt,
3888 mach_vm_address_t ool_output,
3889 mach_vm_size_t *ool_output_size
3890 )
3891 {
3892 CHECK( IOUserClient, connection, client );
3893
3894 IOExternalMethodArguments args;
3895 IOReturn ret;
3896 IOMemoryDescriptor * inputMD = 0;
3897 IOMemoryDescriptor * outputMD = 0;
3898
3899 bzero(&args.__reserved[0], sizeof(args.__reserved));
3900 args.__reservedA = 0;
3901 args.version = kIOExternalMethodArgumentsCurrentVersion;
3902
3903 args.selector = selector;
3904
3905 args.asyncWakePort = MACH_PORT_NULL;
3906 args.asyncReference = 0;
3907 args.asyncReferenceCount = 0;
3908 args.structureVariableOutputData = 0;
3909
3910 args.scalarInput = scalar_input;
3911 args.scalarInputCount = scalar_inputCnt;
3912 args.structureInput = inband_input;
3913 args.structureInputSize = inband_inputCnt;
3914
3915 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3916 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3917
3918 if (ool_input)
3919 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3920 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3921 current_task());
3922
3923 args.structureInputDescriptor = inputMD;
3924
3925 args.scalarOutput = scalar_output;
3926 args.scalarOutputCount = *scalar_outputCnt;
3927 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3928 args.structureOutput = inband_output;
3929 args.structureOutputSize = *inband_outputCnt;
3930
3931 if (ool_output && ool_output_size)
3932 {
3933 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3934 kIODirectionIn, current_task());
3935 }
3936
3937 args.structureOutputDescriptor = outputMD;
3938 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3939
3940 IOStatisticsClientCall();
3941 ret = client->externalMethod( selector, &args );
3942
3943 *scalar_outputCnt = args.scalarOutputCount;
3944 *inband_outputCnt = args.structureOutputSize;
3945 *ool_output_size = args.structureOutputDescriptorSize;
3946
3947 if (inputMD)
3948 inputMD->release();
3949 if (outputMD)
3950 outputMD->release();
3951
3952 return (ret);
3953 }
3954
3955 /* Routine io_async_user_client_method */
3956 kern_return_t is_io_connect_async_method
3957 (
3958 io_connect_t connection,
3959 mach_port_t wake_port,
3960 io_async_ref64_t reference,
3961 mach_msg_type_number_t referenceCnt,
3962 uint32_t selector,
3963 io_scalar_inband64_t scalar_input,
3964 mach_msg_type_number_t scalar_inputCnt,
3965 io_struct_inband_t inband_input,
3966 mach_msg_type_number_t inband_inputCnt,
3967 mach_vm_address_t ool_input,
3968 mach_vm_size_t ool_input_size,
3969 io_struct_inband_t inband_output,
3970 mach_msg_type_number_t *inband_outputCnt,
3971 io_scalar_inband64_t scalar_output,
3972 mach_msg_type_number_t *scalar_outputCnt,
3973 mach_vm_address_t ool_output,
3974 mach_vm_size_t * ool_output_size
3975 )
3976 {
3977 CHECK( IOUserClient, connection, client );
3978
3979 IOExternalMethodArguments args;
3980 IOReturn ret;
3981 IOMemoryDescriptor * inputMD = 0;
3982 IOMemoryDescriptor * outputMD = 0;
3983
3984 bzero(&args.__reserved[0], sizeof(args.__reserved));
3985 args.__reservedA = 0;
3986 args.version = kIOExternalMethodArgumentsCurrentVersion;
3987
3988 reference[0] = (io_user_reference_t) wake_port;
3989 if (vm_map_is_64bit(get_task_map(current_task())))
3990 reference[0] |= kIOUCAsync64Flag;
3991
3992 args.selector = selector;
3993
3994 args.asyncWakePort = wake_port;
3995 args.asyncReference = reference;
3996 args.asyncReferenceCount = referenceCnt;
3997
3998 args.structureVariableOutputData = 0;
3999
4000 args.scalarInput = scalar_input;
4001 args.scalarInputCount = scalar_inputCnt;
4002 args.structureInput = inband_input;
4003 args.structureInputSize = inband_inputCnt;
4004
4005 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4006 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4007
4008 if (ool_input)
4009 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4010 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4011 current_task());
4012
4013 args.structureInputDescriptor = inputMD;
4014
4015 args.scalarOutput = scalar_output;
4016 args.scalarOutputCount = *scalar_outputCnt;
4017 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4018 args.structureOutput = inband_output;
4019 args.structureOutputSize = *inband_outputCnt;
4020
4021 if (ool_output)
4022 {
4023 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4024 kIODirectionIn, current_task());
4025 }
4026
4027 args.structureOutputDescriptor = outputMD;
4028 args.structureOutputDescriptorSize = *ool_output_size;
4029
4030 IOStatisticsClientCall();
4031 ret = client->externalMethod( selector, &args );
4032
4033 *inband_outputCnt = args.structureOutputSize;
4034 *ool_output_size = args.structureOutputDescriptorSize;
4035
4036 if (inputMD)
4037 inputMD->release();
4038 if (outputMD)
4039 outputMD->release();
4040
4041 return (ret);
4042 }
4043
4044 /* Routine io_connect_method_scalarI_scalarO */
4045 kern_return_t is_io_connect_method_scalarI_scalarO(
4046 io_object_t connect,
4047 uint32_t index,
4048 io_scalar_inband_t input,
4049 mach_msg_type_number_t inputCount,
4050 io_scalar_inband_t output,
4051 mach_msg_type_number_t * outputCount )
4052 {
4053 IOReturn err;
4054 uint32_t i;
4055 io_scalar_inband64_t _input;
4056 io_scalar_inband64_t _output;
4057
4058 mach_msg_type_number_t struct_outputCnt = 0;
4059 mach_vm_size_t ool_output_size = 0;
4060
4061 bzero(&_output[0], sizeof(_output));
4062 for (i = 0; i < inputCount; i++)
4063 _input[i] = SCALAR64(input[i]);
4064
4065 err = is_io_connect_method(connect, index,
4066 _input, inputCount,
4067 NULL, 0,
4068 0, 0,
4069 NULL, &struct_outputCnt,
4070 _output, outputCount,
4071 0, &ool_output_size);
4072
4073 for (i = 0; i < *outputCount; i++)
4074 output[i] = SCALAR32(_output[i]);
4075
4076 return (err);
4077 }
4078
4079 kern_return_t shim_io_connect_method_scalarI_scalarO(
4080 IOExternalMethod * method,
4081 IOService * object,
4082 const io_user_scalar_t * input,
4083 mach_msg_type_number_t inputCount,
4084 io_user_scalar_t * output,
4085 mach_msg_type_number_t * outputCount )
4086 {
4087 IOMethod func;
4088 io_scalar_inband_t _output;
4089 IOReturn err;
4090 err = kIOReturnBadArgument;
4091
4092 bzero(&_output[0], sizeof(_output));
4093 do {
4094
4095 if( inputCount != method->count0)
4096 {
4097 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4098 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4099 continue;
4100 }
4101 if( *outputCount != method->count1)
4102 {
4103 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4104 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4105 continue;
4106 }
4107
4108 func = method->func;
4109
4110 switch( inputCount) {
4111
4112 case 6:
4113 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4114 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4115 break;
4116 case 5:
4117 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4118 ARG32(input[3]), ARG32(input[4]),
4119 &_output[0] );
4120 break;
4121 case 4:
4122 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4123 ARG32(input[3]),
4124 &_output[0], &_output[1] );
4125 break;
4126 case 3:
4127 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4128 &_output[0], &_output[1], &_output[2] );
4129 break;
4130 case 2:
4131 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4132 &_output[0], &_output[1], &_output[2],
4133 &_output[3] );
4134 break;
4135 case 1:
4136 err = (object->*func)( ARG32(input[0]),
4137 &_output[0], &_output[1], &_output[2],
4138 &_output[3], &_output[4] );
4139 break;
4140 case 0:
4141 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4142 &_output[3], &_output[4], &_output[5] );
4143 break;
4144
4145 default:
4146 IOLog("%s: Bad method table\n", object->getName());
4147 }
4148 }
4149 while( false);
4150
4151 uint32_t i;
4152 for (i = 0; i < *outputCount; i++)
4153 output[i] = SCALAR32(_output[i]);
4154
4155 return( err);
4156 }
4157
4158 /* Routine io_async_method_scalarI_scalarO */
4159 kern_return_t is_io_async_method_scalarI_scalarO(
4160 io_object_t connect,
4161 mach_port_t wake_port,
4162 io_async_ref_t reference,
4163 mach_msg_type_number_t referenceCnt,
4164 uint32_t index,
4165 io_scalar_inband_t input,
4166 mach_msg_type_number_t inputCount,
4167 io_scalar_inband_t output,
4168 mach_msg_type_number_t * outputCount )
4169 {
4170 IOReturn err;
4171 uint32_t i;
4172 io_scalar_inband64_t _input;
4173 io_scalar_inband64_t _output;
4174 io_async_ref64_t _reference;
4175
4176 bzero(&_output[0], sizeof(_output));
4177 for (i = 0; i < referenceCnt; i++)
4178 _reference[i] = REF64(reference[i]);
4179
4180 mach_msg_type_number_t struct_outputCnt = 0;
4181 mach_vm_size_t ool_output_size = 0;
4182
4183 for (i = 0; i < inputCount; i++)
4184 _input[i] = SCALAR64(input[i]);
4185
4186 err = is_io_connect_async_method(connect,
4187 wake_port, _reference, referenceCnt,
4188 index,
4189 _input, inputCount,
4190 NULL, 0,
4191 0, 0,
4192 NULL, &struct_outputCnt,
4193 _output, outputCount,
4194 0, &ool_output_size);
4195
4196 for (i = 0; i < *outputCount; i++)
4197 output[i] = SCALAR32(_output[i]);
4198
4199 return (err);
4200 }
4201 /* Routine io_async_method_scalarI_structureO */
4202 kern_return_t is_io_async_method_scalarI_structureO(
4203 io_object_t connect,
4204 mach_port_t wake_port,
4205 io_async_ref_t reference,
4206 mach_msg_type_number_t referenceCnt,
4207 uint32_t index,
4208 io_scalar_inband_t input,
4209 mach_msg_type_number_t inputCount,
4210 io_struct_inband_t output,
4211 mach_msg_type_number_t * outputCount )
4212 {
4213 uint32_t i;
4214 io_scalar_inband64_t _input;
4215 io_async_ref64_t _reference;
4216
4217 for (i = 0; i < referenceCnt; i++)
4218 _reference[i] = REF64(reference[i]);
4219
4220 mach_msg_type_number_t scalar_outputCnt = 0;
4221 mach_vm_size_t ool_output_size = 0;
4222
4223 for (i = 0; i < inputCount; i++)
4224 _input[i] = SCALAR64(input[i]);
4225
4226 return (is_io_connect_async_method(connect,
4227 wake_port, _reference, referenceCnt,
4228 index,
4229 _input, inputCount,
4230 NULL, 0,
4231 0, 0,
4232 output, outputCount,
4233 NULL, &scalar_outputCnt,
4234 0, &ool_output_size));
4235 }
4236
4237 /* Routine io_async_method_scalarI_structureI */
4238 kern_return_t is_io_async_method_scalarI_structureI(
4239 io_connect_t connect,
4240 mach_port_t wake_port,
4241 io_async_ref_t reference,
4242 mach_msg_type_number_t referenceCnt,
4243 uint32_t index,
4244 io_scalar_inband_t input,
4245 mach_msg_type_number_t inputCount,
4246 io_struct_inband_t inputStruct,
4247 mach_msg_type_number_t inputStructCount )
4248 {
4249 uint32_t i;
4250 io_scalar_inband64_t _input;
4251 io_async_ref64_t _reference;
4252
4253 for (i = 0; i < referenceCnt; i++)
4254 _reference[i] = REF64(reference[i]);
4255
4256 mach_msg_type_number_t scalar_outputCnt = 0;
4257 mach_msg_type_number_t inband_outputCnt = 0;
4258 mach_vm_size_t ool_output_size = 0;
4259
4260 for (i = 0; i < inputCount; i++)
4261 _input[i] = SCALAR64(input[i]);
4262
4263 return (is_io_connect_async_method(connect,
4264 wake_port, _reference, referenceCnt,
4265 index,
4266 _input, inputCount,
4267 inputStruct, inputStructCount,
4268 0, 0,
4269 NULL, &inband_outputCnt,
4270 NULL, &scalar_outputCnt,
4271 0, &ool_output_size));
4272 }
4273
4274 /* Routine io_async_method_structureI_structureO */
4275 kern_return_t is_io_async_method_structureI_structureO(
4276 io_object_t connect,
4277 mach_port_t wake_port,
4278 io_async_ref_t reference,
4279 mach_msg_type_number_t referenceCnt,
4280 uint32_t index,
4281 io_struct_inband_t input,
4282 mach_msg_type_number_t inputCount,
4283 io_struct_inband_t output,
4284 mach_msg_type_number_t * outputCount )
4285 {
4286 uint32_t i;
4287 mach_msg_type_number_t scalar_outputCnt = 0;
4288 mach_vm_size_t ool_output_size = 0;
4289 io_async_ref64_t _reference;
4290
4291 for (i = 0; i < referenceCnt; i++)
4292 _reference[i] = REF64(reference[i]);
4293
4294 return (is_io_connect_async_method(connect,
4295 wake_port, _reference, referenceCnt,
4296 index,
4297 NULL, 0,
4298 input, inputCount,
4299 0, 0,
4300 output, outputCount,
4301 NULL, &scalar_outputCnt,
4302 0, &ool_output_size));
4303 }
4304
4305
4306 kern_return_t shim_io_async_method_scalarI_scalarO(
4307 IOExternalAsyncMethod * method,
4308 IOService * object,
4309 mach_port_t asyncWakePort,
4310 io_user_reference_t * asyncReference,
4311 uint32_t asyncReferenceCount,
4312 const io_user_scalar_t * input,
4313 mach_msg_type_number_t inputCount,
4314 io_user_scalar_t * output,
4315 mach_msg_type_number_t * outputCount )
4316 {
4317 IOAsyncMethod func;
4318 uint32_t i;
4319 io_scalar_inband_t _output;
4320 IOReturn err;
4321 io_async_ref_t reference;
4322
4323 bzero(&_output[0], sizeof(_output));
4324 for (i = 0; i < asyncReferenceCount; i++)
4325 reference[i] = REF32(asyncReference[i]);
4326
4327 err = kIOReturnBadArgument;
4328
4329 do {
4330
4331 if( inputCount != method->count0)
4332 {
4333 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4334 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4335 continue;
4336 }
4337 if( *outputCount != method->count1)
4338 {
4339 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4340 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4341 continue;
4342 }
4343
4344 func = method->func;
4345
4346 switch( inputCount) {
4347
4348 case 6:
4349 err = (object->*func)( reference,
4350 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4351 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4352 break;
4353 case 5:
4354 err = (object->*func)( reference,
4355 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4356 ARG32(input[3]), ARG32(input[4]),
4357 &_output[0] );
4358 break;
4359 case 4:
4360 err = (object->*func)( reference,
4361 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4362 ARG32(input[3]),
4363 &_output[0], &_output[1] );
4364 break;
4365 case 3:
4366 err = (object->*func)( reference,
4367 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4368 &_output[0], &_output[1], &_output[2] );
4369 break;
4370 case 2:
4371 err = (object->*func)( reference,
4372 ARG32(input[0]), ARG32(input[1]),
4373 &_output[0], &_output[1], &_output[2],
4374 &_output[3] );
4375 break;
4376 case 1:
4377 err = (object->*func)( reference,
4378 ARG32(input[0]),
4379 &_output[0], &_output[1], &_output[2],
4380 &_output[3], &_output[4] );
4381 break;
4382 case 0:
4383 err = (object->*func)( reference,
4384 &_output[0], &_output[1], &_output[2],
4385 &_output[3], &_output[4], &_output[5] );
4386 break;
4387
4388 default:
4389 IOLog("%s: Bad method table\n", object->getName());
4390 }
4391 }
4392 while( false);
4393
4394 for (i = 0; i < *outputCount; i++)
4395 output[i] = SCALAR32(_output[i]);
4396
4397 return( err);
4398 }
4399
4400
4401 /* Routine io_connect_method_scalarI_structureO */
4402 kern_return_t is_io_connect_method_scalarI_structureO(
4403 io_object_t connect,
4404 uint32_t index,
4405 io_scalar_inband_t input,
4406 mach_msg_type_number_t inputCount,
4407 io_struct_inband_t output,
4408 mach_msg_type_number_t * outputCount )
4409 {
4410 uint32_t i;
4411 io_scalar_inband64_t _input;
4412
4413 mach_msg_type_number_t scalar_outputCnt = 0;
4414 mach_vm_size_t ool_output_size = 0;
4415
4416 for (i = 0; i < inputCount; i++)
4417 _input[i] = SCALAR64(input[i]);
4418
4419 return (is_io_connect_method(connect, index,
4420 _input, inputCount,
4421 NULL, 0,
4422 0, 0,
4423 output, outputCount,
4424 NULL, &scalar_outputCnt,
4425 0, &ool_output_size));
4426 }
4427
4428 kern_return_t shim_io_connect_method_scalarI_structureO(
4429
4430 IOExternalMethod * method,
4431 IOService * object,
4432 const io_user_scalar_t * input,
4433 mach_msg_type_number_t inputCount,
4434 io_struct_inband_t output,
4435 IOByteCount * outputCount )
4436 {
4437 IOMethod func;
4438 IOReturn err;
4439
4440 err = kIOReturnBadArgument;
4441
4442 do {
4443 if( inputCount != method->count0)
4444 {
4445 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4446 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4447 continue;
4448 }
4449 if( (kIOUCVariableStructureSize != method->count1)
4450 && (*outputCount != method->count1))
4451 {
4452 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4453 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4454 continue;
4455 }
4456
4457 func = method->func;
4458
4459 switch( inputCount) {
4460
4461 case 5:
4462 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4463 ARG32(input[3]), ARG32(input[4]),
4464 output );
4465 break;
4466 case 4:
4467 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4468 ARG32(input[3]),
4469 output, (void *)outputCount );
4470 break;
4471 case 3:
4472 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4473 output, (void *)outputCount, 0 );
4474 break;
4475 case 2:
4476 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4477 output, (void *)outputCount, 0, 0 );
4478 break;
4479 case 1:
4480 err = (object->*func)( ARG32(input[0]),
4481 output, (void *)outputCount, 0, 0, 0 );
4482 break;
4483 case 0:
4484 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4485 break;
4486
4487 default:
4488 IOLog("%s: Bad method table\n", object->getName());
4489 }
4490 }
4491 while( false);
4492
4493 return( err);
4494 }
4495
4496
4497 kern_return_t shim_io_async_method_scalarI_structureO(
4498 IOExternalAsyncMethod * method,
4499 IOService * object,
4500 mach_port_t asyncWakePort,
4501 io_user_reference_t * asyncReference,
4502 uint32_t asyncReferenceCount,
4503 const io_user_scalar_t * input,
4504 mach_msg_type_number_t inputCount,
4505 io_struct_inband_t output,
4506 mach_msg_type_number_t * outputCount )
4507 {
4508 IOAsyncMethod func;
4509 uint32_t i;
4510 IOReturn err;
4511 io_async_ref_t reference;
4512
4513 for (i = 0; i < asyncReferenceCount; i++)
4514 reference[i] = REF32(asyncReference[i]);
4515
4516 err = kIOReturnBadArgument;
4517 do {
4518 if( inputCount != method->count0)
4519 {
4520 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4521 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4522 continue;
4523 }
4524 if( (kIOUCVariableStructureSize != method->count1)
4525 && (*outputCount != method->count1))
4526 {
4527 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4528 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4529 continue;
4530 }
4531
4532 func = method->func;
4533
4534 switch( inputCount) {
4535
4536 case 5:
4537 err = (object->*func)( reference,
4538 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4539 ARG32(input[3]), ARG32(input[4]),
4540 output );
4541 break;
4542 case 4:
4543 err = (object->*func)( reference,
4544 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4545 ARG32(input[3]),
4546 output, (void *)outputCount );
4547 break;
4548 case 3:
4549 err = (object->*func)( reference,
4550 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4551 output, (void *)outputCount, 0 );
4552 break;
4553 case 2:
4554 err = (object->*func)( reference,
4555 ARG32(input[0]), ARG32(input[1]),
4556 output, (void *)outputCount, 0, 0 );
4557 break;
4558 case 1:
4559 err = (object->*func)( reference,
4560 ARG32(input[0]),
4561 output, (void *)outputCount, 0, 0, 0 );
4562 break;
4563 case 0:
4564 err = (object->*func)( reference,
4565 output, (void *)outputCount, 0, 0, 0, 0 );
4566 break;
4567
4568 default:
4569 IOLog("%s: Bad method table\n", object->getName());
4570 }
4571 }
4572 while( false);
4573
4574 return( err);
4575 }
4576
4577 /* Routine io_connect_method_scalarI_structureI */
4578 kern_return_t is_io_connect_method_scalarI_structureI(
4579 io_connect_t connect,
4580 uint32_t index,
4581 io_scalar_inband_t input,
4582 mach_msg_type_number_t inputCount,
4583 io_struct_inband_t inputStruct,
4584 mach_msg_type_number_t inputStructCount )
4585 {
4586 uint32_t i;
4587 io_scalar_inband64_t _input;
4588
4589 mach_msg_type_number_t scalar_outputCnt = 0;
4590 mach_msg_type_number_t inband_outputCnt = 0;
4591 mach_vm_size_t ool_output_size = 0;
4592
4593 for (i = 0; i < inputCount; i++)
4594 _input[i] = SCALAR64(input[i]);
4595
4596 return (is_io_connect_method(connect, index,
4597 _input, inputCount,
4598 inputStruct, inputStructCount,
4599 0, 0,
4600 NULL, &inband_outputCnt,
4601 NULL, &scalar_outputCnt,
4602 0, &ool_output_size));
4603 }
4604
4605 kern_return_t shim_io_connect_method_scalarI_structureI(
4606 IOExternalMethod * method,
4607 IOService * object,
4608 const io_user_scalar_t * input,
4609 mach_msg_type_number_t inputCount,
4610 io_struct_inband_t inputStruct,
4611 mach_msg_type_number_t inputStructCount )
4612 {
4613 IOMethod func;
4614 IOReturn err = kIOReturnBadArgument;
4615
4616 do
4617 {
4618 if (inputCount != method->count0)
4619 {
4620 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4621 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4622 continue;
4623 }
4624 if( (kIOUCVariableStructureSize != method->count1)
4625 && (inputStructCount != method->count1))
4626 {
4627 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4628 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4629 continue;
4630 }
4631
4632 func = method->func;
4633
4634 switch( inputCount) {
4635
4636 case 5:
4637 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4638 ARG32(input[3]), ARG32(input[4]),
4639 inputStruct );
4640 break;
4641 case 4:
4642 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4643 ARG32(input[3]),
4644 inputStruct, (void *)(uintptr_t)inputStructCount );
4645 break;
4646 case 3:
4647 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4648 inputStruct, (void *)(uintptr_t)inputStructCount,
4649 0 );
4650 break;
4651 case 2:
4652 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4653 inputStruct, (void *)(uintptr_t)inputStructCount,
4654 0, 0 );
4655 break;
4656 case 1:
4657 err = (object->*func)( ARG32(input[0]),
4658 inputStruct, (void *)(uintptr_t)inputStructCount,
4659 0, 0, 0 );
4660 break;
4661 case 0:
4662 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4663 0, 0, 0, 0 );
4664 break;
4665
4666 default:
4667 IOLog("%s: Bad method table\n", object->getName());
4668 }
4669 }
4670 while (false);
4671
4672 return( err);
4673 }
4674
4675 kern_return_t shim_io_async_method_scalarI_structureI(
4676 IOExternalAsyncMethod * method,
4677 IOService * object,
4678 mach_port_t asyncWakePort,
4679 io_user_reference_t * asyncReference,
4680 uint32_t asyncReferenceCount,
4681 const io_user_scalar_t * input,
4682 mach_msg_type_number_t inputCount,
4683 io_struct_inband_t inputStruct,
4684 mach_msg_type_number_t inputStructCount )
4685 {
4686 IOAsyncMethod func;
4687 uint32_t i;
4688 IOReturn err = kIOReturnBadArgument;
4689 io_async_ref_t reference;
4690
4691 for (i = 0; i < asyncReferenceCount; i++)
4692 reference[i] = REF32(asyncReference[i]);
4693
4694 do
4695 {
4696 if (inputCount != method->count0)
4697 {
4698 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4699 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4700 continue;
4701 }
4702 if( (kIOUCVariableStructureSize != method->count1)
4703 && (inputStructCount != method->count1))
4704 {
4705 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4706 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4707 continue;
4708 }
4709
4710 func = method->func;
4711
4712 switch( inputCount) {
4713
4714 case 5:
4715 err = (object->*func)( reference,
4716 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4717 ARG32(input[3]), ARG32(input[4]),
4718 inputStruct );
4719 break;
4720 case 4:
4721 err = (object->*func)( reference,
4722 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4723 ARG32(input[3]),
4724 inputStruct, (void *)(uintptr_t)inputStructCount );
4725 break;
4726 case 3:
4727 err = (object->*func)( reference,
4728 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4729 inputStruct, (void *)(uintptr_t)inputStructCount,
4730 0 );
4731 break;
4732 case 2:
4733 err = (object->*func)( reference,
4734 ARG32(input[0]), ARG32(input[1]),
4735 inputStruct, (void *)(uintptr_t)inputStructCount,
4736 0, 0 );
4737 break;
4738 case 1:
4739 err = (object->*func)( reference,
4740 ARG32(input[0]),
4741 inputStruct, (void *)(uintptr_t)inputStructCount,
4742 0, 0, 0 );
4743 break;
4744 case 0:
4745 err = (object->*func)( reference,
4746 inputStruct, (void *)(uintptr_t)inputStructCount,
4747 0, 0, 0, 0 );
4748 break;
4749
4750 default:
4751 IOLog("%s: Bad method table\n", object->getName());
4752 }
4753 }
4754 while (false);
4755
4756 return( err);
4757 }
4758
4759 /* Routine io_connect_method_structureI_structureO */
4760 kern_return_t is_io_connect_method_structureI_structureO(
4761 io_object_t connect,
4762 uint32_t index,
4763 io_struct_inband_t input,
4764 mach_msg_type_number_t inputCount,
4765 io_struct_inband_t output,
4766 mach_msg_type_number_t * outputCount )
4767 {
4768 mach_msg_type_number_t scalar_outputCnt = 0;
4769 mach_vm_size_t ool_output_size = 0;
4770
4771 return (is_io_connect_method(connect, index,
4772 NULL, 0,
4773 input, inputCount,
4774 0, 0,
4775 output, outputCount,
4776 NULL, &scalar_outputCnt,
4777 0, &ool_output_size));
4778 }
4779
4780 kern_return_t shim_io_connect_method_structureI_structureO(
4781 IOExternalMethod * method,
4782 IOService * object,
4783 io_struct_inband_t input,
4784 mach_msg_type_number_t inputCount,
4785 io_struct_inband_t output,
4786 IOByteCount * outputCount )
4787 {
4788 IOMethod func;
4789 IOReturn err = kIOReturnBadArgument;
4790
4791 do
4792 {
4793 if( (kIOUCVariableStructureSize != method->count0)
4794 && (inputCount != method->count0))
4795 {
4796 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4797 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4798 continue;
4799 }
4800 if( (kIOUCVariableStructureSize != method->count1)
4801 && (*outputCount != method->count1))
4802 {
4803 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4804 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4805 continue;
4806 }
4807
4808 func = method->func;
4809
4810 if( method->count1) {
4811 if( method->count0) {
4812 err = (object->*func)( input, output,
4813 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4814 } else {
4815 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4816 }
4817 } else {
4818 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4819 }
4820 }
4821 while( false);
4822
4823
4824 return( err);
4825 }
4826
4827 kern_return_t shim_io_async_method_structureI_structureO(
4828 IOExternalAsyncMethod * method,
4829 IOService * object,
4830 mach_port_t asyncWakePort,
4831 io_user_reference_t * asyncReference,
4832 uint32_t asyncReferenceCount,
4833 io_struct_inband_t input,
4834 mach_msg_type_number_t inputCount,
4835 io_struct_inband_t output,
4836 mach_msg_type_number_t * outputCount )
4837 {
4838 IOAsyncMethod func;
4839 uint32_t i;
4840 IOReturn err;
4841 io_async_ref_t reference;
4842
4843 for (i = 0; i < asyncReferenceCount; i++)
4844 reference[i] = REF32(asyncReference[i]);
4845
4846 err = kIOReturnBadArgument;
4847 do
4848 {
4849 if( (kIOUCVariableStructureSize != method->count0)
4850 && (inputCount != method->count0))
4851 {
4852 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4853 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4854 continue;
4855 }
4856 if( (kIOUCVariableStructureSize != method->count1)
4857 && (*outputCount != method->count1))
4858 {
4859 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4860 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4861 continue;
4862 }
4863
4864 func = method->func;
4865
4866 if( method->count1) {
4867 if( method->count0) {
4868 err = (object->*func)( reference,
4869 input, output,
4870 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4871 } else {
4872 err = (object->*func)( reference,
4873 output, outputCount, 0, 0, 0, 0 );
4874 }
4875 } else {
4876 err = (object->*func)( reference,
4877 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4878 }
4879 }
4880 while( false);
4881
4882 return( err);
4883 }
4884
4885 #if !NO_KEXTD
4886 bool gIOKextdClearedBusy = false;
4887 #endif
4888
4889 /* Routine io_catalog_send_data */
4890 kern_return_t is_io_catalog_send_data(
4891 mach_port_t master_port,
4892 uint32_t flag,
4893 io_buf_ptr_t inData,
4894 mach_msg_type_number_t inDataCount,
4895 kern_return_t * result)
4896 {
4897 #if NO_KEXTD
4898 return kIOReturnNotPrivileged;
4899 #else /* NO_KEXTD */
4900 OSObject * obj = 0;
4901 vm_offset_t data;
4902 kern_return_t kr = kIOReturnError;
4903
4904 //printf("io_catalog_send_data called. flag: %d\n", flag);
4905
4906 if( master_port != master_device_port)
4907 return kIOReturnNotPrivileged;
4908
4909 if( (flag != kIOCatalogRemoveKernelLinker &&
4910 flag != kIOCatalogKextdActive &&
4911 flag != kIOCatalogKextdFinishedLaunching) &&
4912 ( !inData || !inDataCount) )
4913 {
4914 return kIOReturnBadArgument;
4915 }
4916
4917 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management"))
4918 {
4919 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4920 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4921 OSSafeReleaseNULL(taskName);
4922 // For now, fake success to not break applications relying on this function succeeding.
4923 // See <rdar://problem/32554970> for more details.
4924 return kIOReturnSuccess;
4925 }
4926
4927 if (inData) {
4928 vm_map_offset_t map_data;
4929
4930 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4931 return( kIOReturnMessageTooLarge);
4932
4933 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4934 data = CAST_DOWN(vm_offset_t, map_data);
4935
4936 if( kr != KERN_SUCCESS)
4937 return kr;
4938
4939 // must return success after vm_map_copyout() succeeds
4940
4941 if( inDataCount ) {
4942 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4943 vm_deallocate( kernel_map, data, inDataCount );
4944 if( !obj) {
4945 *result = kIOReturnNoMemory;
4946 return( KERN_SUCCESS);
4947 }
4948 }
4949 }
4950
4951 switch ( flag ) {
4952 case kIOCatalogResetDrivers:
4953 case kIOCatalogResetDriversNoMatch: {
4954 OSArray * array;
4955
4956 array = OSDynamicCast(OSArray, obj);
4957 if (array) {
4958 if ( !gIOCatalogue->resetAndAddDrivers(array,
4959 flag == kIOCatalogResetDrivers) ) {
4960
4961 kr = kIOReturnError;
4962 }
4963 } else {
4964 kr = kIOReturnBadArgument;
4965 }
4966 }
4967 break;
4968
4969 case kIOCatalogAddDrivers:
4970 case kIOCatalogAddDriversNoMatch: {
4971 OSArray * array;
4972
4973 array = OSDynamicCast(OSArray, obj);
4974 if ( array ) {
4975 if ( !gIOCatalogue->addDrivers( array ,
4976 flag == kIOCatalogAddDrivers) ) {
4977 kr = kIOReturnError;
4978 }
4979 }
4980 else {
4981 kr = kIOReturnBadArgument;
4982 }
4983 }
4984 break;
4985
4986 case kIOCatalogRemoveDrivers:
4987 case kIOCatalogRemoveDriversNoMatch: {
4988 OSDictionary * dict;
4989
4990 dict = OSDynamicCast(OSDictionary, obj);
4991 if ( dict ) {
4992 if ( !gIOCatalogue->removeDrivers( dict,
4993 flag == kIOCatalogRemoveDrivers ) ) {
4994 kr = kIOReturnError;
4995 }
4996 }
4997 else {
4998 kr = kIOReturnBadArgument;
4999 }
5000 }
5001 break;
5002
5003 case kIOCatalogStartMatching: {
5004 OSDictionary * dict;
5005
5006 dict = OSDynamicCast(OSDictionary, obj);
5007 if ( dict ) {
5008 if ( !gIOCatalogue->startMatching( dict ) ) {
5009 kr = kIOReturnError;
5010 }
5011 }
5012 else {
5013 kr = kIOReturnBadArgument;
5014 }
5015 }
5016 break;
5017
5018 case kIOCatalogRemoveKernelLinker:
5019 kr = KERN_NOT_SUPPORTED;
5020 break;
5021
5022 case kIOCatalogKextdActive:
5023 #if !NO_KEXTD
5024 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5025 OSKext::setKextdActive();
5026
5027 /* Dump all nonloaded startup extensions; kextd will now send them
5028 * down on request.
5029 */
5030 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5031 #endif
5032 kr = kIOReturnSuccess;
5033 break;
5034
5035 case kIOCatalogKextdFinishedLaunching: {
5036 #if !NO_KEXTD
5037 if (!gIOKextdClearedBusy) {
5038 IOService * serviceRoot = IOService::getServiceRoot();
5039 if (serviceRoot) {
5040 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
5041 serviceRoot->adjustBusy(-1);
5042 gIOKextdClearedBusy = true;
5043 }
5044 }
5045 #endif
5046 kr = kIOReturnSuccess;
5047 }
5048 break;
5049
5050 default:
5051 kr = kIOReturnBadArgument;
5052 break;
5053 }
5054
5055 if (obj) obj->release();
5056
5057 *result = kr;
5058 return( KERN_SUCCESS);
5059 #endif /* NO_KEXTD */
5060 }
5061
5062 /* Routine io_catalog_terminate */
5063 kern_return_t is_io_catalog_terminate(
5064 mach_port_t master_port,
5065 uint32_t flag,
5066 io_name_t name )
5067 {
5068 kern_return_t kr;
5069
5070 if( master_port != master_device_port )
5071 return kIOReturnNotPrivileged;
5072
5073 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5074 kIOClientPrivilegeAdministrator );
5075 if( kIOReturnSuccess != kr)
5076 return( kr );
5077
5078 switch ( flag ) {
5079 #if !defined(SECURE_KERNEL)
5080 case kIOCatalogServiceTerminate:
5081 OSIterator * iter;
5082 IOService * service;
5083
5084 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5085 kIORegistryIterateRecursively);
5086 if ( !iter )
5087 return kIOReturnNoMemory;
5088
5089 do {
5090 iter->reset();
5091 while( (service = (IOService *)iter->getNextObject()) ) {
5092 if( service->metaCast(name)) {
5093 if ( !service->terminate( kIOServiceRequired
5094 | kIOServiceSynchronous) ) {
5095 kr = kIOReturnUnsupported;
5096 break;
5097 }
5098 }
5099 }
5100 } while( !service && !iter->isValid());
5101 iter->release();
5102 break;
5103
5104 case kIOCatalogModuleUnload:
5105 case kIOCatalogModuleTerminate:
5106 kr = gIOCatalogue->terminateDriversForModule(name,
5107 flag == kIOCatalogModuleUnload);
5108 break;
5109 #endif
5110
5111 default:
5112 kr = kIOReturnBadArgument;
5113 break;
5114 }
5115
5116 return( kr );
5117 }
5118
5119 /* Routine io_catalog_get_data */
5120 kern_return_t is_io_catalog_get_data(
5121 mach_port_t master_port,
5122 uint32_t flag,
5123 io_buf_ptr_t *outData,
5124 mach_msg_type_number_t *outDataCount)
5125 {
5126 kern_return_t kr = kIOReturnSuccess;
5127 OSSerialize * s;
5128
5129 if( master_port != master_device_port)
5130 return kIOReturnNotPrivileged;
5131
5132 //printf("io_catalog_get_data called. flag: %d\n", flag);
5133
5134 s = OSSerialize::withCapacity(4096);
5135 if ( !s )
5136 return kIOReturnNoMemory;
5137
5138 kr = gIOCatalogue->serializeData(flag, s);
5139
5140 if ( kr == kIOReturnSuccess ) {
5141 vm_offset_t data;
5142 vm_map_copy_t copy;
5143 vm_size_t size;
5144
5145 size = s->getLength();
5146 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5147 if ( kr == kIOReturnSuccess ) {
5148 bcopy(s->text(), (void *)data, size);
5149 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5150 (vm_map_size_t)size, true, &copy);
5151 *outData = (char *)copy;
5152 *outDataCount = size;
5153 }
5154 }
5155
5156 s->release();
5157
5158 return kr;
5159 }
5160
5161 /* Routine io_catalog_get_gen_count */
5162 kern_return_t is_io_catalog_get_gen_count(
5163 mach_port_t master_port,
5164 uint32_t *genCount)
5165 {
5166 if( master_port != master_device_port)
5167 return kIOReturnNotPrivileged;
5168
5169 //printf("io_catalog_get_gen_count called.\n");
5170
5171 if ( !genCount )
5172 return kIOReturnBadArgument;
5173
5174 *genCount = gIOCatalogue->getGenerationCount();
5175
5176 return kIOReturnSuccess;
5177 }
5178
5179 /* Routine io_catalog_module_loaded.
5180 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5181 */
5182 kern_return_t is_io_catalog_module_loaded(
5183 mach_port_t master_port,
5184 io_name_t name)
5185 {
5186 if( master_port != master_device_port)
5187 return kIOReturnNotPrivileged;
5188
5189 //printf("io_catalog_module_loaded called. name %s\n", name);
5190
5191 if ( !name )
5192 return kIOReturnBadArgument;
5193
5194 gIOCatalogue->moduleHasLoaded(name);
5195
5196 return kIOReturnSuccess;
5197 }
5198
5199 kern_return_t is_io_catalog_reset(
5200 mach_port_t master_port,
5201 uint32_t flag)
5202 {
5203 if( master_port != master_device_port)
5204 return kIOReturnNotPrivileged;
5205
5206 switch ( flag ) {
5207 case kIOCatalogResetDefault:
5208 gIOCatalogue->reset();
5209 break;
5210
5211 default:
5212 return kIOReturnBadArgument;
5213 }
5214
5215 return kIOReturnSuccess;
5216 }
5217
5218 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5219 {
5220 kern_return_t result = kIOReturnBadArgument;
5221 IOUserClient *userClient;
5222
5223 if ((userClient = OSDynamicCast(IOUserClient,
5224 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5225 IOExternalTrap *trap;
5226 IOService *target = NULL;
5227
5228 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5229
5230 if (trap && target) {
5231 IOTrap func;
5232
5233 func = trap->func;
5234
5235 if (func) {
5236 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5237 }
5238 }
5239
5240 iokit_remove_connect_reference(userClient);
5241 }
5242
5243 return result;
5244 }
5245
5246 } /* extern "C" */
5247
5248 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5249 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5250 {
5251 IOReturn err;
5252 IOService * object;
5253 IOByteCount structureOutputSize;
5254
5255 if (dispatch)
5256 {
5257 uint32_t count;
5258 count = dispatch->checkScalarInputCount;
5259 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5260 {
5261 return (kIOReturnBadArgument);
5262 }
5263
5264 count = dispatch->checkStructureInputSize;
5265 if ((kIOUCVariableStructureSize != count)
5266 && (count != ((args->structureInputDescriptor)
5267 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5268 {
5269 return (kIOReturnBadArgument);
5270 }
5271
5272 count = dispatch->checkScalarOutputCount;
5273 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5274 {
5275 return (kIOReturnBadArgument);
5276 }
5277
5278 count = dispatch->checkStructureOutputSize;
5279 if ((kIOUCVariableStructureSize != count)
5280 && (count != ((args->structureOutputDescriptor)
5281 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5282 {
5283 return (kIOReturnBadArgument);
5284 }
5285
5286 if (dispatch->function)
5287 err = (*dispatch->function)(target, reference, args);
5288 else
5289 err = kIOReturnNoCompletion; /* implementator can dispatch */
5290
5291 return (err);
5292 }
5293
5294
5295 // pre-Leopard API's don't do ool structs
5296 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5297 {
5298 err = kIOReturnIPCError;
5299 return (err);
5300 }
5301
5302 structureOutputSize = args->structureOutputSize;
5303
5304 if (args->asyncWakePort)
5305 {
5306 IOExternalAsyncMethod * method;
5307 object = 0;
5308 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5309 return (kIOReturnUnsupported);
5310
5311 if (kIOUCForegroundOnly & method->flags)
5312 {
5313 if (task_is_gpu_denied(current_task()))
5314 return (kIOReturnNotPermitted);
5315 }
5316
5317 switch (method->flags & kIOUCTypeMask)
5318 {
5319 case kIOUCScalarIStructI:
5320 err = shim_io_async_method_scalarI_structureI( method, object,
5321 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5322 args->scalarInput, args->scalarInputCount,
5323 (char *)args->structureInput, args->structureInputSize );
5324 break;
5325
5326 case kIOUCScalarIScalarO:
5327 err = shim_io_async_method_scalarI_scalarO( method, object,
5328 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5329 args->scalarInput, args->scalarInputCount,
5330 args->scalarOutput, &args->scalarOutputCount );
5331 break;
5332
5333 case kIOUCScalarIStructO:
5334 err = shim_io_async_method_scalarI_structureO( method, object,
5335 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5336 args->scalarInput, args->scalarInputCount,
5337 (char *) args->structureOutput, &args->structureOutputSize );
5338 break;
5339
5340
5341 case kIOUCStructIStructO:
5342 err = shim_io_async_method_structureI_structureO( method, object,
5343 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5344 (char *)args->structureInput, args->structureInputSize,
5345 (char *) args->structureOutput, &args->structureOutputSize );
5346 break;
5347
5348 default:
5349 err = kIOReturnBadArgument;
5350 break;
5351 }
5352 }
5353 else
5354 {
5355 IOExternalMethod * method;
5356 object = 0;
5357 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5358 return (kIOReturnUnsupported);
5359
5360 if (kIOUCForegroundOnly & method->flags)
5361 {
5362 if (task_is_gpu_denied(current_task()))
5363 return (kIOReturnNotPermitted);
5364 }
5365
5366 switch (method->flags & kIOUCTypeMask)
5367 {
5368 case kIOUCScalarIStructI:
5369 err = shim_io_connect_method_scalarI_structureI( method, object,
5370 args->scalarInput, args->scalarInputCount,
5371 (char *) args->structureInput, args->structureInputSize );
5372 break;
5373
5374 case kIOUCScalarIScalarO:
5375 err = shim_io_connect_method_scalarI_scalarO( method, object,
5376 args->scalarInput, args->scalarInputCount,
5377 args->scalarOutput, &args->scalarOutputCount );
5378 break;
5379
5380 case kIOUCScalarIStructO:
5381 err = shim_io_connect_method_scalarI_structureO( method, object,
5382 args->scalarInput, args->scalarInputCount,
5383 (char *) args->structureOutput, &structureOutputSize );
5384 break;
5385
5386
5387 case kIOUCStructIStructO:
5388 err = shim_io_connect_method_structureI_structureO( method, object,
5389 (char *) args->structureInput, args->structureInputSize,
5390 (char *) args->structureOutput, &structureOutputSize );
5391 break;
5392
5393 default:
5394 err = kIOReturnBadArgument;
5395 break;
5396 }
5397 }
5398
5399 args->structureOutputSize = structureOutputSize;
5400
5401 return (err);
5402 }
5403
5404 #if __LP64__
5405 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5406 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5407 #else
5408 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5409 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5410 #endif
5411 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5412 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5413 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5414 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5415 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5416 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5417 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5418 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5419 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5420 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5421 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5422 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5423 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5424 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5425