]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOUserClient.cpp
6a3fc814b113beef0832682348598ed16ae60df8
[apple/xnu.git] / iokit / Kernel / IOUserClient.cpp
1 /*
2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/system.h>
44 #include <libkern/OSDebug.h>
45 #include <sys/proc.h>
46 #include <sys/kauth.h>
47 #include <sys/codesign.h>
48
49 #include <mach/sdt.h>
50
51 #if CONFIG_MACF
52
53 extern "C" {
54 #include <security/mac_framework.h>
55 };
56 #include <sys/kauth.h>
57
58 #define IOMACF_LOG 0
59
60 #endif /* CONFIG_MACF */
61
62 #include <IOKit/assert.h>
63
64 #include "IOServicePrivate.h"
65 #include "IOKitKernelInternal.h"
66
67 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
68 #define SCALAR32(x) ((uint32_t )x)
69 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
70 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
71 #define REF32(x) ((int)(x))
72
73 enum
74 {
75 kIOUCAsync0Flags = 3ULL,
76 kIOUCAsync64Flag = 1ULL,
77 kIOUCAsyncErrorLoggedFlag = 2ULL
78 };
79
80 #if IOKITSTATS
81
82 #define IOStatisticsRegisterCounter() \
83 do { \
84 reserved->counter = IOStatistics::registerUserClient(this); \
85 } while (0)
86
87 #define IOStatisticsUnregisterCounter() \
88 do { \
89 if (reserved) \
90 IOStatistics::unregisterUserClient(reserved->counter); \
91 } while (0)
92
93 #define IOStatisticsClientCall() \
94 do { \
95 IOStatistics::countUserClientCall(client); \
96 } while (0)
97
98 #else
99
100 #define IOStatisticsRegisterCounter()
101 #define IOStatisticsUnregisterCounter()
102 #define IOStatisticsClientCall()
103
104 #endif /* IOKITSTATS */
105
106 #if DEVELOPMENT || DEBUG
107
108 #define FAKE_STACK_FRAME(a) \
109 const void ** __frameptr; \
110 const void * __retaddr; \
111 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
112 __retaddr = __frameptr[1]; \
113 __frameptr[1] = (a);
114
115 #define FAKE_STACK_FRAME_END() \
116 __frameptr[1] = __retaddr;
117
118 #else /* DEVELOPMENT || DEBUG */
119
120 #define FAKE_STACK_FRAME(a)
121 #define FAKE_STACK_FRAME_END()
122
123 #endif /* DEVELOPMENT || DEBUG */
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 // definitions we should get from osfmk
128
129 //typedef struct ipc_port * ipc_port_t;
130 typedef natural_t ipc_kobject_type_t;
131
132 #define IKOT_IOKIT_SPARE 27
133 #define IKOT_IOKIT_CONNECT 29
134 #define IKOT_IOKIT_OBJECT 30
135
136 extern "C" {
137
138 extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
139 ipc_kobject_type_t type );
140
141 extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
142
143 extern mach_port_name_t iokit_make_send_right( task_t task,
144 io_object_t obj, ipc_kobject_type_t type );
145
146 extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
147
148 extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
149
150 extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
151
152 extern ipc_port_t master_device_port;
153
154 extern void iokit_retain_port( ipc_port_t port );
155 extern void iokit_release_port( ipc_port_t port );
156 extern void iokit_release_port_send( ipc_port_t port );
157
158 extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
159
160 #include <mach/mach_traps.h>
161 #include <vm/vm_map.h>
162
163 } /* extern "C" */
164
165
166 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
167
168 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
169
170 class IOMachPort : public OSObject
171 {
172 OSDeclareDefaultStructors(IOMachPort)
173 public:
174 OSObject * object;
175 ipc_port_t port;
176 UInt32 mscount;
177 UInt8 holdDestroy;
178
179 static IOMachPort * portForObject( OSObject * obj,
180 ipc_kobject_type_t type );
181 static bool noMoreSendersForObject( OSObject * obj,
182 ipc_kobject_type_t type, mach_port_mscount_t * mscount );
183 static void releasePortForObject( OSObject * obj,
184 ipc_kobject_type_t type );
185 static void setHoldDestroy( OSObject * obj, ipc_kobject_type_t type );
186
187 static OSDictionary * dictForType( ipc_kobject_type_t type );
188
189 static mach_port_name_t makeSendRightForTask( task_t task,
190 io_object_t obj, ipc_kobject_type_t type );
191
192 virtual void free() APPLE_KEXT_OVERRIDE;
193 };
194
195 #define super OSObject
196 OSDefineMetaClassAndStructors(IOMachPort, OSObject)
197
198 static IOLock * gIOObjectPortLock;
199
200 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
201
202 // not in dictForType() for debugging ease
203 static OSDictionary * gIOObjectPorts;
204 static OSDictionary * gIOConnectPorts;
205
206 OSDictionary * IOMachPort::dictForType( ipc_kobject_type_t type )
207 {
208 OSDictionary ** dict;
209
210 if( IKOT_IOKIT_OBJECT == type )
211 dict = &gIOObjectPorts;
212 else if( IKOT_IOKIT_CONNECT == type )
213 dict = &gIOConnectPorts;
214 else
215 return( 0 );
216
217 if( 0 == *dict)
218 *dict = OSDictionary::withCapacity( 1 );
219
220 return( *dict );
221 }
222
223 IOMachPort * IOMachPort::portForObject ( OSObject * obj,
224 ipc_kobject_type_t type )
225 {
226 IOMachPort * inst = 0;
227 OSDictionary * dict;
228
229 IOTakeLock( gIOObjectPortLock);
230
231 do {
232
233 dict = dictForType( type );
234 if( !dict)
235 continue;
236
237 if( (inst = (IOMachPort *)
238 dict->getObject( (const OSSymbol *) obj ))) {
239 inst->mscount++;
240 inst->retain();
241 continue;
242 }
243
244 inst = new IOMachPort;
245 if( inst && !inst->init()) {
246 inst = 0;
247 continue;
248 }
249
250 inst->port = iokit_alloc_object_port( obj, type );
251 if( inst->port) {
252 // retains obj
253 dict->setObject( (const OSSymbol *) obj, inst );
254 inst->mscount++;
255
256 } else {
257 inst->release();
258 inst = 0;
259 }
260
261 } while( false );
262
263 IOUnlock( gIOObjectPortLock);
264
265 return( inst );
266 }
267
268 bool IOMachPort::noMoreSendersForObject( OSObject * obj,
269 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
270 {
271 OSDictionary * dict;
272 IOMachPort * machPort;
273 IOUserClient * uc;
274 bool destroyed = true;
275
276 IOTakeLock( gIOObjectPortLock);
277
278 if( (dict = dictForType( type ))) {
279 obj->retain();
280
281 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
282 if( machPort) {
283 destroyed = (machPort->mscount <= *mscount);
284 if (!destroyed) *mscount = machPort->mscount;
285 else
286 {
287 if ((IKOT_IOKIT_CONNECT == type) && (uc = OSDynamicCast(IOUserClient, obj)))
288 {
289 uc->noMoreSenders();
290 }
291 dict->removeObject( (const OSSymbol *) obj );
292 }
293 }
294 obj->release();
295 }
296
297 IOUnlock( gIOObjectPortLock);
298
299 return( destroyed );
300 }
301
302 void IOMachPort::releasePortForObject( OSObject * obj,
303 ipc_kobject_type_t type )
304 {
305 OSDictionary * dict;
306 IOMachPort * machPort;
307
308 assert(IKOT_IOKIT_CONNECT != type);
309
310 IOTakeLock( gIOObjectPortLock);
311
312 if( (dict = dictForType( type ))) {
313 obj->retain();
314 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
315 if( machPort && !machPort->holdDestroy)
316 dict->removeObject( (const OSSymbol *) obj );
317 obj->release();
318 }
319
320 IOUnlock( gIOObjectPortLock);
321 }
322
323 void IOMachPort::setHoldDestroy( OSObject * obj, ipc_kobject_type_t type )
324 {
325 OSDictionary * dict;
326 IOMachPort * machPort;
327
328 IOLockLock( gIOObjectPortLock );
329
330 if( (dict = dictForType( type ))) {
331 machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
332 if( machPort)
333 machPort->holdDestroy = true;
334 }
335
336 IOLockUnlock( gIOObjectPortLock );
337 }
338
339 void IOUserClient::destroyUserReferences( OSObject * obj )
340 {
341 IOMachPort::releasePortForObject( obj, IKOT_IOKIT_OBJECT );
342
343 // panther, 3160200
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
345
346 OSDictionary * dict;
347
348 IOTakeLock( gIOObjectPortLock);
349 obj->retain();
350
351 if( (dict = IOMachPort::dictForType( IKOT_IOKIT_CONNECT )))
352 {
353 IOMachPort * port;
354 port = (IOMachPort *) dict->getObject( (const OSSymbol *) obj );
355 if (port)
356 {
357 IOUserClient * uc;
358 if ((uc = OSDynamicCast(IOUserClient, obj)))
359 {
360 uc->noMoreSenders();
361 if (uc->mappings)
362 {
363 dict->setObject((const OSSymbol *) uc->mappings, port);
364 iokit_switch_object_port(port->port, uc->mappings, IKOT_IOKIT_CONNECT);
365
366 uc->mappings->release();
367 uc->mappings = 0;
368 }
369 }
370 dict->removeObject( (const OSSymbol *) obj );
371 }
372 }
373 obj->release();
374 IOUnlock( gIOObjectPortLock);
375 }
376
377 mach_port_name_t IOMachPort::makeSendRightForTask( task_t task,
378 io_object_t obj, ipc_kobject_type_t type )
379 {
380 return( iokit_make_send_right( task, obj, type ));
381 }
382
383 void IOMachPort::free( void )
384 {
385 if( port)
386 iokit_destroy_object_port( port );
387 super::free();
388 }
389
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 class IOUserIterator : public OSIterator
393 {
394 OSDeclareDefaultStructors(IOUserIterator)
395 public:
396 OSObject * userIteratorObject;
397 IOLock * lock;
398
399 static IOUserIterator * withIterator(OSIterator * iter);
400 virtual bool init( void ) APPLE_KEXT_OVERRIDE;
401 virtual void free() APPLE_KEXT_OVERRIDE;
402
403 virtual void reset() APPLE_KEXT_OVERRIDE;
404 virtual bool isValid() APPLE_KEXT_OVERRIDE;
405 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
406 };
407
408 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
409
410 class IOUserNotification : public IOUserIterator
411 {
412 OSDeclareDefaultStructors(IOUserNotification)
413
414 #define holdNotify userIteratorObject
415
416 public:
417
418 virtual void free() APPLE_KEXT_OVERRIDE;
419
420 virtual void setNotification( IONotifier * obj );
421
422 virtual void reset() APPLE_KEXT_OVERRIDE;
423 virtual bool isValid() APPLE_KEXT_OVERRIDE;
424 };
425
426 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
427
428 OSDefineMetaClassAndStructors( IOUserIterator, OSIterator )
429
430 IOUserIterator *
431 IOUserIterator::withIterator(OSIterator * iter)
432 {
433 IOUserIterator * me;
434
435 if (!iter) return (0);
436
437 me = new IOUserIterator;
438 if (me && !me->init())
439 {
440 me->release();
441 me = 0;
442 }
443 if (!me) return me;
444 me->userIteratorObject = iter;
445
446 return (me);
447 }
448
449 bool
450 IOUserIterator::init( void )
451 {
452 if (!OSObject::init()) return (false);
453
454 lock = IOLockAlloc();
455 if( !lock)
456 return( false );
457
458 return (true);
459 }
460
461 void
462 IOUserIterator::free()
463 {
464 if (userIteratorObject) userIteratorObject->release();
465 if (lock) IOLockFree(lock);
466 OSObject::free();
467 }
468
469 void
470 IOUserIterator::reset()
471 {
472 IOLockLock(lock);
473 assert(OSDynamicCast(OSIterator, userIteratorObject));
474 ((OSIterator *)userIteratorObject)->reset();
475 IOLockUnlock(lock);
476 }
477
478 bool
479 IOUserIterator::isValid()
480 {
481 bool ret;
482
483 IOLockLock(lock);
484 assert(OSDynamicCast(OSIterator, userIteratorObject));
485 ret = ((OSIterator *)userIteratorObject)->isValid();
486 IOLockUnlock(lock);
487
488 return (ret);
489 }
490
491 OSObject *
492 IOUserIterator::getNextObject()
493 {
494 OSObject * ret;
495
496 IOLockLock(lock);
497 assert(OSDynamicCast(OSIterator, userIteratorObject));
498 ret = ((OSIterator *)userIteratorObject)->getNextObject();
499 IOLockUnlock(lock);
500
501 return (ret);
502 }
503
504 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
505 extern "C" {
506
507 // functions called from osfmk/device/iokit_rpc.c
508
509 void
510 iokit_add_reference( io_object_t obj )
511 {
512 if( obj)
513 obj->retain();
514 }
515
516 void
517 iokit_remove_reference( io_object_t obj )
518 {
519 if( obj)
520 obj->release();
521 }
522
523 void
524 iokit_add_connect_reference( io_object_t obj )
525 {
526 IOUserClient * uc;
527
528 if (!obj) return;
529
530 if ((uc = OSDynamicCast(IOUserClient, obj))) OSIncrementAtomic(&uc->__ipc);
531
532 obj->retain();
533 }
534
535 void
536 iokit_remove_connect_reference( io_object_t obj )
537 {
538 IOUserClient * uc;
539 bool finalize = false;
540
541 if (!obj) return;
542
543 if ((uc = OSDynamicCast(IOUserClient, obj)))
544 {
545 if (1 == OSDecrementAtomic(&uc->__ipc) && uc->isInactive())
546 {
547 IOLockLock(gIOObjectPortLock);
548 if ((finalize = uc->__ipcFinal)) uc->__ipcFinal = false;
549 IOLockUnlock(gIOObjectPortLock);
550 }
551 if (finalize) uc->scheduleFinalize(true);
552 }
553
554 obj->release();
555 }
556
557 bool
558 IOUserClient::finalizeUserReferences(OSObject * obj)
559 {
560 IOUserClient * uc;
561 bool ok = true;
562
563 if ((uc = OSDynamicCast(IOUserClient, obj)))
564 {
565 IOLockLock(gIOObjectPortLock);
566 if ((uc->__ipcFinal = (0 != uc->__ipc))) ok = false;
567 IOLockUnlock(gIOObjectPortLock);
568 }
569 return (ok);
570 }
571
572 ipc_port_t
573 iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type )
574 {
575 IOMachPort * machPort;
576 ipc_port_t port;
577
578 if( (machPort = IOMachPort::portForObject( obj, type ))) {
579
580 port = machPort->port;
581 if( port)
582 iokit_retain_port( port );
583
584 machPort->release();
585
586 } else
587 port = NULL;
588
589 return( port );
590 }
591
592 kern_return_t
593 iokit_client_died( io_object_t obj, ipc_port_t /* port */,
594 ipc_kobject_type_t type, mach_port_mscount_t * mscount )
595 {
596 IOUserClient * client;
597 IOMemoryMap * map;
598 IOUserNotification * notify;
599
600 if( !IOMachPort::noMoreSendersForObject( obj, type, mscount ))
601 return( kIOReturnNotReady );
602
603 if( IKOT_IOKIT_CONNECT == type)
604 {
605 if( (client = OSDynamicCast( IOUserClient, obj )))
606 {
607 IOStatisticsClientCall();
608 client->clientDied();
609 }
610 }
611 else if( IKOT_IOKIT_OBJECT == type)
612 {
613 if( (map = OSDynamicCast( IOMemoryMap, obj )))
614 map->taskDied();
615 else if( (notify = OSDynamicCast( IOUserNotification, obj )))
616 notify->setNotification( 0 );
617 }
618
619 return( kIOReturnSuccess );
620 }
621
622 }; /* extern "C" */
623
624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
625
626 class IOServiceUserNotification : public IOUserNotification
627 {
628 OSDeclareDefaultStructors(IOServiceUserNotification)
629
630 struct PingMsg {
631 mach_msg_header_t msgHdr;
632 OSNotificationHeader64 notifyHeader;
633 };
634
635 enum { kMaxOutstanding = 1024 };
636
637 PingMsg * pingMsg;
638 vm_size_t msgSize;
639 OSArray * newSet;
640 OSObject * lastEntry;
641 bool armed;
642 bool ipcLogged;
643
644 public:
645
646 virtual bool init( mach_port_t port, natural_t type,
647 void * reference, vm_size_t referenceSize,
648 bool clientIs64 );
649 virtual void free() APPLE_KEXT_OVERRIDE;
650 void invalidatePort(void);
651
652 static bool _handler( void * target,
653 void * ref, IOService * newService, IONotifier * notifier );
654 virtual bool handler( void * ref, IOService * newService );
655
656 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
657 };
658
659 class IOServiceMessageUserNotification : public IOUserNotification
660 {
661 OSDeclareDefaultStructors(IOServiceMessageUserNotification)
662
663 struct PingMsg {
664 mach_msg_header_t msgHdr;
665 mach_msg_body_t msgBody;
666 mach_msg_port_descriptor_t ports[1];
667 OSNotificationHeader64 notifyHeader __attribute__ ((packed));
668 };
669
670 PingMsg * pingMsg;
671 vm_size_t msgSize;
672 uint8_t clientIs64;
673 int owningPID;
674 bool ipcLogged;
675
676 public:
677
678 virtual bool init( mach_port_t port, natural_t type,
679 void * reference, vm_size_t referenceSize,
680 vm_size_t extraSize,
681 bool clientIs64 );
682
683 virtual void free() APPLE_KEXT_OVERRIDE;
684 void invalidatePort(void);
685
686 static IOReturn _handler( void * target, void * ref,
687 UInt32 messageType, IOService * provider,
688 void * messageArgument, vm_size_t argSize );
689 virtual IOReturn handler( void * ref,
690 UInt32 messageType, IOService * provider,
691 void * messageArgument, vm_size_t argSize );
692
693 virtual OSObject * getNextObject() APPLE_KEXT_OVERRIDE;
694 };
695
696 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
697
698 #undef super
699 #define super IOUserIterator
700 OSDefineMetaClass( IOUserNotification, IOUserIterator )
701 OSDefineAbstractStructors( IOUserNotification, IOUserIterator )
702
703 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
704
705 void IOUserNotification::free( void )
706 {
707 if (holdNotify)
708 {
709 assert(OSDynamicCast(IONotifier, holdNotify));
710 ((IONotifier *)holdNotify)->remove();
711 holdNotify = 0;
712 }
713 // can't be in handler now
714
715 super::free();
716 }
717
718
719 void IOUserNotification::setNotification( IONotifier * notify )
720 {
721 OSObject * previousNotify;
722
723 IOLockLock( gIOObjectPortLock);
724
725 previousNotify = holdNotify;
726 holdNotify = notify;
727
728 IOLockUnlock( gIOObjectPortLock);
729
730 if( previousNotify)
731 {
732 assert(OSDynamicCast(IONotifier, previousNotify));
733 ((IONotifier *)previousNotify)->remove();
734 }
735 }
736
737 void IOUserNotification::reset()
738 {
739 // ?
740 }
741
742 bool IOUserNotification::isValid()
743 {
744 return( true );
745 }
746
747 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
748
749 #undef super
750 #define super IOUserNotification
751 OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification)
752
753 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
754
755 bool IOServiceUserNotification::init( mach_port_t port, natural_t type,
756 void * reference, vm_size_t referenceSize,
757 bool clientIs64 )
758 {
759 if( !super::init())
760 return( false );
761
762 newSet = OSArray::withCapacity( 1 );
763 if( !newSet)
764 return( false );
765
766 if (referenceSize > sizeof(OSAsyncReference64))
767 return( false );
768
769 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
770 pingMsg = (PingMsg *) IOMalloc( msgSize);
771 if( !pingMsg)
772 return( false );
773
774 bzero( pingMsg, msgSize);
775
776 pingMsg->msgHdr.msgh_remote_port = port;
777 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS(
778 MACH_MSG_TYPE_COPY_SEND /*remote*/,
779 MACH_MSG_TYPE_MAKE_SEND /*local*/);
780 pingMsg->msgHdr.msgh_size = msgSize;
781 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
782
783 pingMsg->notifyHeader.size = 0;
784 pingMsg->notifyHeader.type = type;
785 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
786
787 return( true );
788 }
789
790 void IOServiceUserNotification::invalidatePort(void)
791 {
792 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
793 }
794
795 void IOServiceUserNotification::free( void )
796 {
797 PingMsg * _pingMsg;
798 vm_size_t _msgSize;
799 OSArray * _newSet;
800 OSObject * _lastEntry;
801
802 _pingMsg = pingMsg;
803 _msgSize = msgSize;
804 _lastEntry = lastEntry;
805 _newSet = newSet;
806
807 super::free();
808
809 if( _pingMsg && _msgSize) {
810 if (_pingMsg->msgHdr.msgh_remote_port) {
811 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
812 }
813 IOFree(_pingMsg, _msgSize);
814 }
815
816 if( _lastEntry)
817 _lastEntry->release();
818
819 if( _newSet)
820 _newSet->release();
821 }
822
823 bool IOServiceUserNotification::_handler( void * target,
824 void * ref, IOService * newService, IONotifier * notifier )
825 {
826 return( ((IOServiceUserNotification *) target)->handler( ref, newService ));
827 }
828
829 bool IOServiceUserNotification::handler( void * ref,
830 IOService * newService )
831 {
832 unsigned int count;
833 kern_return_t kr;
834 ipc_port_t port = NULL;
835 bool sendPing = false;
836
837 IOTakeLock( lock );
838
839 count = newSet->getCount();
840 if( count < kMaxOutstanding) {
841
842 newSet->setObject( newService );
843 if( (sendPing = (armed && (0 == count))))
844 armed = false;
845 }
846
847 IOUnlock( lock );
848
849 if( kIOServiceTerminatedNotificationType == pingMsg->notifyHeader.type)
850 IOMachPort::setHoldDestroy( newService, IKOT_IOKIT_OBJECT );
851
852 if( sendPing) {
853 if( (port = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ) ))
854 pingMsg->msgHdr.msgh_local_port = port;
855 else
856 pingMsg->msgHdr.msgh_local_port = NULL;
857
858 kr = mach_msg_send_from_kernel_with_options( &pingMsg->msgHdr,
859 pingMsg->msgHdr.msgh_size,
860 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
861 0);
862 if( port)
863 iokit_release_port( port );
864
865 if( (KERN_SUCCESS != kr) && !ipcLogged)
866 {
867 ipcLogged = true;
868 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
869 }
870 }
871
872 return( true );
873 }
874
875 OSObject * IOServiceUserNotification::getNextObject()
876 {
877 unsigned int count;
878 OSObject * result;
879 OSObject * releaseEntry;
880
881 IOLockLock(lock);
882
883 releaseEntry = lastEntry;
884 count = newSet->getCount();
885 if( count ) {
886 result = newSet->getObject( count - 1 );
887 result->retain();
888 newSet->removeObject( count - 1);
889 } else {
890 result = 0;
891 armed = true;
892 }
893 lastEntry = result;
894
895 IOLockUnlock(lock);
896
897 if (releaseEntry) releaseEntry->release();
898
899 return( result );
900 }
901
902 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
903
904 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotification)
905
906 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
907
908 bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type,
909 void * reference, vm_size_t referenceSize, vm_size_t extraSize,
910 bool client64 )
911 {
912 if( !super::init())
913 return( false );
914
915 if (referenceSize > sizeof(OSAsyncReference64))
916 return( false );
917
918 clientIs64 = client64;
919
920 owningPID = proc_selfpid();
921
922 extraSize += sizeof(IOServiceInterestContent64);
923 msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize;
924 pingMsg = (PingMsg *) IOMalloc( msgSize);
925 if( !pingMsg)
926 return( false );
927
928 bzero( pingMsg, msgSize);
929
930 pingMsg->msgHdr.msgh_remote_port = port;
931 pingMsg->msgHdr.msgh_bits = MACH_MSGH_BITS_COMPLEX
932 | MACH_MSGH_BITS(
933 MACH_MSG_TYPE_COPY_SEND /*remote*/,
934 MACH_MSG_TYPE_MAKE_SEND /*local*/);
935 pingMsg->msgHdr.msgh_size = msgSize;
936 pingMsg->msgHdr.msgh_id = kOSNotificationMessageID;
937
938 pingMsg->msgBody.msgh_descriptor_count = 1;
939
940 pingMsg->ports[0].name = 0;
941 pingMsg->ports[0].disposition = MACH_MSG_TYPE_MAKE_SEND;
942 pingMsg->ports[0].type = MACH_MSG_PORT_DESCRIPTOR;
943
944 pingMsg->notifyHeader.size = extraSize;
945 pingMsg->notifyHeader.type = type;
946 bcopy( reference, pingMsg->notifyHeader.reference, referenceSize );
947
948 return( true );
949 }
950
951 void IOServiceMessageUserNotification::invalidatePort(void)
952 {
953 if (pingMsg) pingMsg->msgHdr.msgh_remote_port = MACH_PORT_NULL;
954 }
955
956 void IOServiceMessageUserNotification::free( void )
957 {
958 PingMsg * _pingMsg;
959 vm_size_t _msgSize;
960
961 _pingMsg = pingMsg;
962 _msgSize = msgSize;
963
964 super::free();
965
966 if( _pingMsg && _msgSize) {
967 if (_pingMsg->msgHdr.msgh_remote_port) {
968 iokit_release_port_send(_pingMsg->msgHdr.msgh_remote_port);
969 }
970 IOFree( _pingMsg, _msgSize);
971 }
972 }
973
974 IOReturn IOServiceMessageUserNotification::_handler( void * target, void * ref,
975 UInt32 messageType, IOService * provider,
976 void * argument, vm_size_t argSize )
977 {
978 return( ((IOServiceMessageUserNotification *) target)->handler(
979 ref, messageType, provider, argument, argSize));
980 }
981
982 IOReturn IOServiceMessageUserNotification::handler( void * ref,
983 UInt32 messageType, IOService * provider,
984 void * messageArgument, vm_size_t callerArgSize )
985 {
986 enum { kLocalMsgSize = 0x100 };
987 uint64_t stackMsg[kLocalMsgSize / sizeof(uint64_t)];
988 void * allocMsg;
989 kern_return_t kr;
990 vm_size_t argSize;
991 vm_size_t thisMsgSize;
992 ipc_port_t thisPort, providerPort;
993 struct PingMsg * thisMsg;
994 IOServiceInterestContent64 * data;
995
996 if (kIOMessageCopyClientID == messageType)
997 {
998 *((void **) messageArgument) = OSNumber::withNumber(owningPID, 32);
999 return (kIOReturnSuccess);
1000 }
1001
1002 if (callerArgSize == 0)
1003 {
1004 if (clientIs64) argSize = sizeof(data->messageArgument[0]);
1005 else argSize = sizeof(uint32_t);
1006 }
1007 else
1008 {
1009 if( callerArgSize > kIOUserNotifyMaxMessageSize)
1010 callerArgSize = kIOUserNotifyMaxMessageSize;
1011 argSize = callerArgSize;
1012 }
1013
1014 // adjust message size for ipc restrictions
1015 natural_t type;
1016 type = pingMsg->notifyHeader.type;
1017 type &= ~(kIOKitNoticationMsgSizeMask << kIOKitNoticationTypeSizeAdjShift);
1018 type |= ((argSize & kIOKitNoticationMsgSizeMask) << kIOKitNoticationTypeSizeAdjShift);
1019 argSize = (argSize + kIOKitNoticationMsgSizeMask) & ~kIOKitNoticationMsgSizeMask;
1020
1021 thisMsgSize = msgSize
1022 + sizeof( IOServiceInterestContent64 )
1023 - sizeof( data->messageArgument)
1024 + argSize;
1025
1026 if (thisMsgSize > sizeof(stackMsg))
1027 {
1028 allocMsg = IOMalloc(thisMsgSize);
1029 if (!allocMsg) return (kIOReturnNoMemory);
1030 thisMsg = (typeof(thisMsg)) allocMsg;
1031 }
1032 else
1033 {
1034 allocMsg = 0;
1035 thisMsg = (typeof(thisMsg)) stackMsg;
1036 }
1037
1038 bcopy(pingMsg, thisMsg, msgSize);
1039 thisMsg->notifyHeader.type = type;
1040 data = (IOServiceInterestContent64 *) (((uint8_t *) thisMsg) + msgSize);
1041 // == pingMsg->notifyHeader.content;
1042 data->messageType = messageType;
1043
1044 if (callerArgSize == 0)
1045 {
1046 data->messageArgument[0] = (io_user_reference_t) messageArgument;
1047 if (!clientIs64)
1048 {
1049 data->messageArgument[0] |= (data->messageArgument[0] << 32);
1050 }
1051 }
1052 else
1053 {
1054 bcopy( messageArgument, data->messageArgument, callerArgSize );
1055 bzero((void *)(((uintptr_t) &data->messageArgument[0]) + callerArgSize), argSize - callerArgSize);
1056 }
1057
1058 thisMsg->notifyHeader.type = type;
1059 thisMsg->msgHdr.msgh_size = thisMsgSize;
1060
1061 providerPort = iokit_port_for_object( provider, IKOT_IOKIT_OBJECT );
1062 thisMsg->ports[0].name = providerPort;
1063 thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT );
1064 thisMsg->msgHdr.msgh_local_port = thisPort;
1065
1066 kr = mach_msg_send_from_kernel_with_options( &thisMsg->msgHdr,
1067 thisMsg->msgHdr.msgh_size,
1068 (MACH_SEND_MSG | MACH_SEND_ALWAYS | MACH_SEND_IMPORTANCE),
1069 0);
1070 if( thisPort)
1071 iokit_release_port( thisPort );
1072 if( providerPort)
1073 iokit_release_port( providerPort );
1074
1075 if (allocMsg)
1076 IOFree(allocMsg, thisMsgSize);
1077
1078 if((KERN_SUCCESS != kr) && !ipcLogged)
1079 {
1080 ipcLogged = true;
1081 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__, kr );
1082 }
1083
1084 return( kIOReturnSuccess );
1085 }
1086
1087 OSObject * IOServiceMessageUserNotification::getNextObject()
1088 {
1089 return( 0 );
1090 }
1091
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1093
1094 #undef super
1095 #define super IOService
1096 OSDefineMetaClassAndAbstractStructors( IOUserClient, IOService )
1097
1098 IOLock * gIOUserClientOwnersLock;
1099
1100 void IOUserClient::initialize( void )
1101 {
1102 gIOObjectPortLock = IOLockAlloc();
1103 gIOUserClientOwnersLock = IOLockAlloc();
1104 assert(gIOObjectPortLock && gIOUserClientOwnersLock);
1105 }
1106
1107 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef,
1108 mach_port_t wakePort,
1109 void *callback, void *refcon)
1110 {
1111 asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort)
1112 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1113 asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback;
1114 asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon;
1115 }
1116
1117 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1118 mach_port_t wakePort,
1119 mach_vm_address_t callback, io_user_reference_t refcon)
1120 {
1121 asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort)
1122 | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]);
1123 asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback;
1124 asyncRef[kIOAsyncCalloutRefconIndex] = refcon;
1125 }
1126
1127 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef,
1128 mach_port_t wakePort,
1129 mach_vm_address_t callback, io_user_reference_t refcon, task_t task)
1130 {
1131 setAsyncReference64(asyncRef, wakePort, callback, refcon);
1132 if (vm_map_is_64bit(get_task_map(task))) {
1133 asyncRef[kIOAsyncReservedIndex] |= kIOUCAsync64Flag;
1134 }
1135 }
1136
1137 static OSDictionary * CopyConsoleUser(UInt32 uid)
1138 {
1139 OSArray * array;
1140 OSDictionary * user = 0;
1141
1142 if ((array = OSDynamicCast(OSArray,
1143 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1144 {
1145 for (unsigned int idx = 0;
1146 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1147 idx++) {
1148 OSNumber * num;
1149
1150 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey)))
1151 && (uid == num->unsigned32BitValue())) {
1152 user->retain();
1153 break;
1154 }
1155 }
1156 array->release();
1157 }
1158 return user;
1159 }
1160
1161 static OSDictionary * CopyUserOnConsole(void)
1162 {
1163 OSArray * array;
1164 OSDictionary * user = 0;
1165
1166 if ((array = OSDynamicCast(OSArray,
1167 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey))))
1168 {
1169 for (unsigned int idx = 0;
1170 (user = OSDynamicCast(OSDictionary, array->getObject(idx)));
1171 idx++)
1172 {
1173 if (kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
1174 {
1175 user->retain();
1176 break;
1177 }
1178 }
1179 array->release();
1180 }
1181 return (user);
1182 }
1183
1184 IOReturn IOUserClient::clientHasAuthorization( task_t task,
1185 IOService * service )
1186 {
1187 proc_t p;
1188
1189 p = (proc_t) get_bsdtask_info(task);
1190 if (p)
1191 {
1192 uint64_t authorizationID;
1193
1194 authorizationID = proc_uniqueid(p);
1195 if (authorizationID)
1196 {
1197 if (service->getAuthorizationID() == authorizationID)
1198 {
1199 return (kIOReturnSuccess);
1200 }
1201 }
1202 }
1203
1204 return (kIOReturnNotPermitted);
1205 }
1206
1207 IOReturn IOUserClient::clientHasPrivilege( void * securityToken,
1208 const char * privilegeName )
1209 {
1210 kern_return_t kr;
1211 security_token_t token;
1212 mach_msg_type_number_t count;
1213 task_t task;
1214 OSDictionary * user;
1215 bool secureConsole;
1216
1217
1218 if (!strncmp(privilegeName, kIOClientPrivilegeForeground,
1219 sizeof(kIOClientPrivilegeForeground)))
1220 {
1221 if (task_is_gpu_denied(current_task()))
1222 return (kIOReturnNotPrivileged);
1223 else
1224 return (kIOReturnSuccess);
1225 }
1226
1227 if (!strncmp(privilegeName, kIOClientPrivilegeConsoleSession,
1228 sizeof(kIOClientPrivilegeConsoleSession)))
1229 {
1230 kauth_cred_t cred;
1231 proc_t p;
1232
1233 task = (task_t) securityToken;
1234 if (!task)
1235 task = current_task();
1236 p = (proc_t) get_bsdtask_info(task);
1237 kr = kIOReturnNotPrivileged;
1238
1239 if (p && (cred = kauth_cred_proc_ref(p)))
1240 {
1241 user = CopyUserOnConsole();
1242 if (user)
1243 {
1244 OSNumber * num;
1245 if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionAuditIDKey)))
1246 && (cred->cr_audit.as_aia_p->ai_asid == (au_asid_t) num->unsigned32BitValue()))
1247 {
1248 kr = kIOReturnSuccess;
1249 }
1250 user->release();
1251 }
1252 kauth_cred_unref(&cred);
1253 }
1254 return (kr);
1255 }
1256
1257 if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess,
1258 sizeof(kIOClientPrivilegeSecureConsoleProcess))))
1259 task = (task_t)((IOUCProcessToken *)securityToken)->token;
1260 else
1261 task = (task_t)securityToken;
1262
1263 count = TASK_SECURITY_TOKEN_COUNT;
1264 kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count );
1265
1266 if (KERN_SUCCESS != kr)
1267 {}
1268 else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator,
1269 sizeof(kIOClientPrivilegeAdministrator))) {
1270 if (0 != token.val[0])
1271 kr = kIOReturnNotPrivileged;
1272 } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser,
1273 sizeof(kIOClientPrivilegeLocalUser))) {
1274 user = CopyConsoleUser(token.val[0]);
1275 if ( user )
1276 user->release();
1277 else
1278 kr = kIOReturnNotPrivileged;
1279 } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser,
1280 sizeof(kIOClientPrivilegeConsoleUser))) {
1281 user = CopyConsoleUser(token.val[0]);
1282 if ( user ) {
1283 if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue)
1284 kr = kIOReturnNotPrivileged;
1285 else if ( secureConsole ) {
1286 OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey));
1287 if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid)
1288 kr = kIOReturnNotPrivileged;
1289 }
1290 user->release();
1291 }
1292 else
1293 kr = kIOReturnNotPrivileged;
1294 } else
1295 kr = kIOReturnUnsupported;
1296
1297 return (kr);
1298 }
1299
1300 OSObject * IOUserClient::copyClientEntitlement( task_t task,
1301 const char * entitlement )
1302 {
1303 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1304
1305 proc_t p = NULL;
1306 pid_t pid = 0;
1307 char procname[MAXCOMLEN + 1] = "";
1308 size_t len = 0;
1309 void *entitlements_blob = NULL;
1310 char *entitlements_data = NULL;
1311 OSObject *entitlements_obj = NULL;
1312 OSDictionary *entitlements = NULL;
1313 OSString *errorString = NULL;
1314 OSObject *value = NULL;
1315
1316 p = (proc_t)get_bsdtask_info(task);
1317 if (p == NULL)
1318 goto fail;
1319 pid = proc_pid(p);
1320 proc_name(pid, procname, (int)sizeof(procname));
1321
1322 if (cs_entitlements_blob_get(p, &entitlements_blob, &len) != 0)
1323 goto fail;
1324
1325 if (len <= offsetof(CS_GenericBlob, data))
1326 goto fail;
1327
1328 /*
1329 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1330 * we'll try to parse in the kernel.
1331 */
1332 len -= offsetof(CS_GenericBlob, data);
1333 if (len > MAX_ENTITLEMENTS_LEN) {
1334 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname, pid, len, MAX_ENTITLEMENTS_LEN);
1335 goto fail;
1336 }
1337
1338 /*
1339 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1340 * what is stored in the entitlements blob. Copy the string and
1341 * terminate it.
1342 */
1343 entitlements_data = (char *)IOMalloc(len + 1);
1344 if (entitlements_data == NULL)
1345 goto fail;
1346 memcpy(entitlements_data, ((CS_GenericBlob *)entitlements_blob)->data, len);
1347 entitlements_data[len] = '\0';
1348
1349 entitlements_obj = OSUnserializeXML(entitlements_data, len + 1, &errorString);
1350 if (errorString != NULL) {
1351 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname, pid, errorString->getCStringNoCopy());
1352 goto fail;
1353 }
1354 if (entitlements_obj == NULL)
1355 goto fail;
1356
1357 entitlements = OSDynamicCast(OSDictionary, entitlements_obj);
1358 if (entitlements == NULL)
1359 goto fail;
1360
1361 /* Fetch the entitlement value from the dictionary. */
1362 value = entitlements->getObject(entitlement);
1363 if (value != NULL)
1364 value->retain();
1365
1366 fail:
1367 if (entitlements_data != NULL)
1368 IOFree(entitlements_data, len + 1);
1369 if (entitlements_obj != NULL)
1370 entitlements_obj->release();
1371 if (errorString != NULL)
1372 errorString->release();
1373 return value;
1374 }
1375
1376 bool IOUserClient::init()
1377 {
1378 if (getPropertyTable() || super::init())
1379 return reserve();
1380
1381 return false;
1382 }
1383
1384 bool IOUserClient::init(OSDictionary * dictionary)
1385 {
1386 if (getPropertyTable() || super::init(dictionary))
1387 return reserve();
1388
1389 return false;
1390 }
1391
1392 bool IOUserClient::initWithTask(task_t owningTask,
1393 void * securityID,
1394 UInt32 type )
1395 {
1396 if (getPropertyTable() || super::init())
1397 return reserve();
1398
1399 return false;
1400 }
1401
1402 bool IOUserClient::initWithTask(task_t owningTask,
1403 void * securityID,
1404 UInt32 type,
1405 OSDictionary * properties )
1406 {
1407 bool ok;
1408
1409 ok = super::init( properties );
1410 ok &= initWithTask( owningTask, securityID, type );
1411
1412 return( ok );
1413 }
1414
1415 bool IOUserClient::reserve()
1416 {
1417 if(!reserved) {
1418 reserved = IONew(ExpansionData, 1);
1419 if (!reserved) {
1420 return false;
1421 }
1422 }
1423 setTerminateDefer(NULL, true);
1424 IOStatisticsRegisterCounter();
1425
1426 return true;
1427 }
1428
1429 struct IOUserClientOwner
1430 {
1431 task_t task;
1432 queue_chain_t taskLink;
1433 IOUserClient * uc;
1434 queue_chain_t ucLink;
1435 };
1436
1437 IOReturn
1438 IOUserClient::registerOwner(task_t task)
1439 {
1440 IOUserClientOwner * owner;
1441 IOReturn ret;
1442 bool newOwner;
1443
1444 IOLockLock(gIOUserClientOwnersLock);
1445
1446 newOwner = true;
1447 ret = kIOReturnSuccess;
1448
1449 if (!owners.next) queue_init(&owners);
1450 else
1451 {
1452 queue_iterate(&owners, owner, IOUserClientOwner *, ucLink)
1453 {
1454 if (task != owner->task) continue;
1455 newOwner = false;
1456 break;
1457 }
1458 }
1459 if (newOwner)
1460 {
1461 owner = IONew(IOUserClientOwner, 1);
1462 if (!newOwner) ret = kIOReturnNoMemory;
1463 else
1464 {
1465 owner->task = task;
1466 owner->uc = this;
1467 queue_enter_first(&owners, owner, IOUserClientOwner *, ucLink);
1468 queue_enter_first(task_io_user_clients(task), owner, IOUserClientOwner *, taskLink);
1469 }
1470 }
1471
1472 IOLockUnlock(gIOUserClientOwnersLock);
1473
1474 return (ret);
1475 }
1476
1477 void
1478 IOUserClient::noMoreSenders(void)
1479 {
1480 IOUserClientOwner * owner;
1481
1482 IOLockLock(gIOUserClientOwnersLock);
1483
1484 if (owners.next)
1485 {
1486 while (!queue_empty(&owners))
1487 {
1488 owner = (IOUserClientOwner *)(void *) queue_first(&owners);
1489 queue_remove(task_io_user_clients(owner->task), owner, IOUserClientOwner *, taskLink);
1490 queue_remove(&owners, owner, IOUserClientOwner *, ucLink);
1491 IODelete(owner, IOUserClientOwner, 1);
1492 }
1493 owners.next = owners.prev = NULL;
1494 }
1495
1496 IOLockUnlock(gIOUserClientOwnersLock);
1497 }
1498
1499 extern "C" kern_return_t
1500 iokit_task_terminate(task_t task)
1501 {
1502 IOUserClientOwner * owner;
1503 IOUserClient * dead;
1504 IOUserClient * uc;
1505 queue_head_t * taskque;
1506
1507 IOLockLock(gIOUserClientOwnersLock);
1508
1509 taskque = task_io_user_clients(task);
1510 dead = NULL;
1511 while (!queue_empty(taskque))
1512 {
1513 owner = (IOUserClientOwner *)(void *) queue_first(taskque);
1514 uc = owner->uc;
1515 queue_remove(taskque, owner, IOUserClientOwner *, taskLink);
1516 queue_remove(&uc->owners, owner, IOUserClientOwner *, ucLink);
1517 if (queue_empty(&uc->owners))
1518 {
1519 uc->retain();
1520 IOLog("destroying out of band connect for %s\n", uc->getName());
1521 // now using the uc queue head as a singly linked queue,
1522 // leaving .next as NULL to mark it empty
1523 uc->owners.next = NULL;
1524 uc->owners.prev = (queue_entry_t) dead;
1525 dead = uc;
1526 }
1527 IODelete(owner, IOUserClientOwner, 1);
1528 }
1529
1530 IOLockUnlock(gIOUserClientOwnersLock);
1531
1532 while (dead)
1533 {
1534 uc = dead;
1535 dead = (IOUserClient *)(void *) dead->owners.prev;
1536 uc->owners.prev = NULL;
1537 if (uc->sharedInstance || !uc->closed) uc->clientDied();
1538 uc->release();
1539 }
1540
1541 return (KERN_SUCCESS);
1542 }
1543
1544 void IOUserClient::free()
1545 {
1546 if( mappings) mappings->release();
1547
1548 IOStatisticsUnregisterCounter();
1549
1550 assert(!owners.next);
1551 assert(!owners.prev);
1552
1553 if (reserved) IODelete(reserved, ExpansionData, 1);
1554
1555 super::free();
1556 }
1557
1558 IOReturn IOUserClient::clientDied( void )
1559 {
1560 IOReturn ret = kIOReturnNotReady;
1561
1562 if (sharedInstance || OSCompareAndSwap8(0, 1, &closed))
1563 {
1564 ret = clientClose();
1565 }
1566
1567 return (ret);
1568 }
1569
1570 IOReturn IOUserClient::clientClose( void )
1571 {
1572 return( kIOReturnUnsupported );
1573 }
1574
1575 IOService * IOUserClient::getService( void )
1576 {
1577 return( 0 );
1578 }
1579
1580 IOReturn IOUserClient::registerNotificationPort(
1581 mach_port_t /* port */,
1582 UInt32 /* type */,
1583 UInt32 /* refCon */)
1584 {
1585 return( kIOReturnUnsupported);
1586 }
1587
1588 IOReturn IOUserClient::registerNotificationPort(
1589 mach_port_t port,
1590 UInt32 type,
1591 io_user_reference_t refCon)
1592 {
1593 return (registerNotificationPort(port, type, (UInt32) refCon));
1594 }
1595
1596 IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type,
1597 semaphore_t * semaphore )
1598 {
1599 return( kIOReturnUnsupported);
1600 }
1601
1602 IOReturn IOUserClient::connectClient( IOUserClient * /* client */ )
1603 {
1604 return( kIOReturnUnsupported);
1605 }
1606
1607 IOReturn IOUserClient::clientMemoryForType( UInt32 type,
1608 IOOptionBits * options,
1609 IOMemoryDescriptor ** memory )
1610 {
1611 return( kIOReturnUnsupported);
1612 }
1613
1614 #if !__LP64__
1615 IOMemoryMap * IOUserClient::mapClientMemory(
1616 IOOptionBits type,
1617 task_t task,
1618 IOOptionBits mapFlags,
1619 IOVirtualAddress atAddress )
1620 {
1621 return (NULL);
1622 }
1623 #endif
1624
1625 IOMemoryMap * IOUserClient::mapClientMemory64(
1626 IOOptionBits type,
1627 task_t task,
1628 IOOptionBits mapFlags,
1629 mach_vm_address_t atAddress )
1630 {
1631 IOReturn err;
1632 IOOptionBits options = 0;
1633 IOMemoryDescriptor * memory = 0;
1634 IOMemoryMap * map = 0;
1635
1636 err = clientMemoryForType( (UInt32) type, &options, &memory );
1637
1638 if( memory && (kIOReturnSuccess == err)) {
1639
1640 FAKE_STACK_FRAME(getMetaClass());
1641
1642 options = (options & ~kIOMapUserOptionsMask)
1643 | (mapFlags & kIOMapUserOptionsMask);
1644 map = memory->createMappingInTask( task, atAddress, options );
1645 memory->release();
1646
1647 FAKE_STACK_FRAME_END();
1648 }
1649
1650 return( map );
1651 }
1652
1653 IOReturn IOUserClient::exportObjectToClient(task_t task,
1654 OSObject *obj, io_object_t *clientObj)
1655 {
1656 mach_port_name_t name;
1657
1658 name = IOMachPort::makeSendRightForTask( task, obj, IKOT_IOKIT_OBJECT );
1659
1660 *(mach_port_name_t *)clientObj = name;
1661 return kIOReturnSuccess;
1662 }
1663
1664 IOExternalMethod * IOUserClient::getExternalMethodForIndex( UInt32 /* index */)
1665 {
1666 return( 0 );
1667 }
1668
1669 IOExternalAsyncMethod * IOUserClient::getExternalAsyncMethodForIndex( UInt32 /* index */)
1670 {
1671 return( 0 );
1672 }
1673
1674 IOExternalTrap * IOUserClient::
1675 getExternalTrapForIndex(UInt32 index)
1676 {
1677 return NULL;
1678 }
1679
1680 #pragma clang diagnostic push
1681 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1682
1683 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1684 // functions can break clients of kexts implementing getExternalMethodForIndex()
1685 IOExternalMethod * IOUserClient::
1686 getTargetAndMethodForIndex(IOService **targetP, UInt32 index)
1687 {
1688 IOExternalMethod *method = getExternalMethodForIndex(index);
1689
1690 if (method)
1691 *targetP = (IOService *) method->object;
1692
1693 return method;
1694 }
1695
1696 IOExternalAsyncMethod * IOUserClient::
1697 getAsyncTargetAndMethodForIndex(IOService ** targetP, UInt32 index)
1698 {
1699 IOExternalAsyncMethod *method = getExternalAsyncMethodForIndex(index);
1700
1701 if (method)
1702 *targetP = (IOService *) method->object;
1703
1704 return method;
1705 }
1706
1707 IOExternalTrap * IOUserClient::
1708 getTargetAndTrapForIndex(IOService ** targetP, UInt32 index)
1709 {
1710 IOExternalTrap *trap = getExternalTrapForIndex(index);
1711
1712 if (trap) {
1713 *targetP = trap->object;
1714 }
1715
1716 return trap;
1717 }
1718 #pragma clang diagnostic pop
1719
1720 IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference)
1721 {
1722 mach_port_t port;
1723 port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1724
1725 if (MACH_PORT_NULL != port)
1726 iokit_release_port_send(port);
1727
1728 return (kIOReturnSuccess);
1729 }
1730
1731 IOReturn IOUserClient::releaseNotificationPort(mach_port_t port)
1732 {
1733 if (MACH_PORT_NULL != port)
1734 iokit_release_port_send(port);
1735
1736 return (kIOReturnSuccess);
1737 }
1738
1739 IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference,
1740 IOReturn result, void *args[], UInt32 numArgs)
1741 {
1742 OSAsyncReference64 reference64;
1743 io_user_reference_t args64[kMaxAsyncArgs];
1744 unsigned int idx;
1745
1746 if (numArgs > kMaxAsyncArgs)
1747 return kIOReturnMessageTooLarge;
1748
1749 for (idx = 0; idx < kOSAsyncRef64Count; idx++)
1750 reference64[idx] = REF64(reference[idx]);
1751
1752 for (idx = 0; idx < numArgs; idx++)
1753 args64[idx] = REF64(args[idx]);
1754
1755 return (sendAsyncResult64(reference64, result, args64, numArgs));
1756 }
1757
1758 IOReturn IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference,
1759 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1760 {
1761 return _sendAsyncResult64(reference, result, args, numArgs, options);
1762 }
1763
1764 IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference,
1765 IOReturn result, io_user_reference_t args[], UInt32 numArgs)
1766 {
1767 return _sendAsyncResult64(reference, result, args, numArgs, 0);
1768 }
1769
1770 IOReturn IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference,
1771 IOReturn result, io_user_reference_t args[], UInt32 numArgs, IOOptionBits options)
1772 {
1773 struct ReplyMsg
1774 {
1775 mach_msg_header_t msgHdr;
1776 union
1777 {
1778 struct
1779 {
1780 OSNotificationHeader notifyHdr;
1781 IOAsyncCompletionContent asyncContent;
1782 uint32_t args[kMaxAsyncArgs];
1783 } msg32;
1784 struct
1785 {
1786 OSNotificationHeader64 notifyHdr;
1787 IOAsyncCompletionContent asyncContent;
1788 io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed));
1789 } msg64;
1790 } m;
1791 };
1792 ReplyMsg replyMsg;
1793 mach_port_t replyPort;
1794 kern_return_t kr;
1795
1796 // If no reply port, do nothing.
1797 replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags);
1798 if (replyPort == MACH_PORT_NULL)
1799 return kIOReturnSuccess;
1800
1801 if (numArgs > kMaxAsyncArgs)
1802 return kIOReturnMessageTooLarge;
1803
1804 bzero(&replyMsg, sizeof(replyMsg));
1805 replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/,
1806 0 /*local*/);
1807 replyMsg.msgHdr.msgh_remote_port = replyPort;
1808 replyMsg.msgHdr.msgh_local_port = 0;
1809 replyMsg.msgHdr.msgh_id = kOSNotificationMessageID;
1810 if (kIOUCAsync64Flag & reference[0])
1811 {
1812 replyMsg.msgHdr.msgh_size =
1813 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64)
1814 - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t);
1815 replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1816 + numArgs * sizeof(io_user_reference_t);
1817 replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType;
1818 bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64));
1819
1820 replyMsg.m.msg64.asyncContent.result = result;
1821 if (numArgs)
1822 bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t));
1823 }
1824 else
1825 {
1826 unsigned int idx;
1827
1828 replyMsg.msgHdr.msgh_size =
1829 sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32)
1830 - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t);
1831
1832 replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent)
1833 + numArgs * sizeof(uint32_t);
1834 replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType;
1835
1836 for (idx = 0; idx < kOSAsyncRefCount; idx++)
1837 replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]);
1838
1839 replyMsg.m.msg32.asyncContent.result = result;
1840
1841 for (idx = 0; idx < numArgs; idx++)
1842 replyMsg.m.msg32.args[idx] = REF32(args[idx]);
1843 }
1844
1845 if ((options & kIOUserNotifyOptionCanDrop) != 0) {
1846 kr = mach_msg_send_from_kernel_with_options( &replyMsg.msgHdr,
1847 replyMsg.msgHdr.msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
1848 } else {
1849 /* Fail on full queue. */
1850 kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr,
1851 replyMsg.msgHdr.msgh_size);
1852 }
1853 if ((KERN_SUCCESS != kr) && (MACH_SEND_TIMED_OUT != kr) && !(kIOUCAsyncErrorLoggedFlag & reference[0]))
1854 {
1855 reference[0] |= kIOUCAsyncErrorLoggedFlag;
1856 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__, kr );
1857 }
1858 return kr;
1859 }
1860
1861
1862 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1863
1864 extern "C" {
1865
1866 #define CHECK(cls,obj,out) \
1867 cls * out; \
1868 if( !(out = OSDynamicCast( cls, obj))) \
1869 return( kIOReturnBadArgument )
1870
1871 #define CHECKLOCKED(cls,obj,out) \
1872 IOUserIterator * oIter; \
1873 cls * out; \
1874 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1875 return (kIOReturnBadArgument); \
1876 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1877 return (kIOReturnBadArgument)
1878
1879 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1880
1881 // Create a vm_map_copy_t or kalloc'ed data for memory
1882 // to be copied out. ipc will free after the copyout.
1883
1884 static kern_return_t copyoutkdata( const void * data, vm_size_t len,
1885 io_buf_ptr_t * buf )
1886 {
1887 kern_return_t err;
1888 vm_map_copy_t copy;
1889
1890 err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len,
1891 false /* src_destroy */, &copy);
1892
1893 assert( err == KERN_SUCCESS );
1894 if( err == KERN_SUCCESS )
1895 *buf = (char *) copy;
1896
1897 return( err );
1898 }
1899
1900 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1901
1902 /* Routine io_server_version */
1903 kern_return_t is_io_server_version(
1904 mach_port_t master_port,
1905 uint64_t *version)
1906 {
1907 *version = IOKIT_SERVER_VERSION;
1908 return (kIOReturnSuccess);
1909 }
1910
1911 /* Routine io_object_get_class */
1912 kern_return_t is_io_object_get_class(
1913 io_object_t object,
1914 io_name_t className )
1915 {
1916 const OSMetaClass* my_obj = NULL;
1917
1918 if( !object)
1919 return( kIOReturnBadArgument );
1920
1921 my_obj = object->getMetaClass();
1922 if (!my_obj) {
1923 return (kIOReturnNotFound);
1924 }
1925
1926 strlcpy( className, my_obj->getClassName(), sizeof(io_name_t));
1927
1928 return( kIOReturnSuccess );
1929 }
1930
1931 /* Routine io_object_get_superclass */
1932 kern_return_t is_io_object_get_superclass(
1933 mach_port_t master_port,
1934 io_name_t obj_name,
1935 io_name_t class_name)
1936 {
1937 IOReturn ret;
1938 const OSMetaClass * meta;
1939 const OSMetaClass * super;
1940 const OSSymbol * name;
1941 const char * cstr;
1942
1943 if (!obj_name || !class_name) return (kIOReturnBadArgument);
1944 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1945
1946 ret = kIOReturnNotFound;
1947 meta = 0;
1948 do
1949 {
1950 name = OSSymbol::withCString(obj_name);
1951 if (!name) break;
1952 meta = OSMetaClass::copyMetaClassWithName(name);
1953 if (!meta) break;
1954 super = meta->getSuperClass();
1955 if (!super) break;
1956 cstr = super->getClassName();
1957 if (!cstr) break;
1958 strlcpy(class_name, cstr, sizeof(io_name_t));
1959 ret = kIOReturnSuccess;
1960 }
1961 while (false);
1962
1963 OSSafeReleaseNULL(name);
1964 if (meta) meta->releaseMetaClass();
1965
1966 return (ret);
1967 }
1968
1969 /* Routine io_object_get_bundle_identifier */
1970 kern_return_t is_io_object_get_bundle_identifier(
1971 mach_port_t master_port,
1972 io_name_t obj_name,
1973 io_name_t bundle_name)
1974 {
1975 IOReturn ret;
1976 const OSMetaClass * meta;
1977 const OSSymbol * name;
1978 const OSSymbol * identifier;
1979 const char * cstr;
1980
1981 if (!obj_name || !bundle_name) return (kIOReturnBadArgument);
1982 if (master_port != master_device_port) return( kIOReturnNotPrivileged);
1983
1984 ret = kIOReturnNotFound;
1985 meta = 0;
1986 do
1987 {
1988 name = OSSymbol::withCString(obj_name);
1989 if (!name) break;
1990 meta = OSMetaClass::copyMetaClassWithName(name);
1991 if (!meta) break;
1992 identifier = meta->getKmodName();
1993 if (!identifier) break;
1994 cstr = identifier->getCStringNoCopy();
1995 if (!cstr) break;
1996 strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t));
1997 ret = kIOReturnSuccess;
1998 }
1999 while (false);
2000
2001 OSSafeReleaseNULL(name);
2002 if (meta) meta->releaseMetaClass();
2003
2004 return (ret);
2005 }
2006
2007 /* Routine io_object_conforms_to */
2008 kern_return_t is_io_object_conforms_to(
2009 io_object_t object,
2010 io_name_t className,
2011 boolean_t *conforms )
2012 {
2013 if( !object)
2014 return( kIOReturnBadArgument );
2015
2016 *conforms = (0 != object->metaCast( className ));
2017
2018 return( kIOReturnSuccess );
2019 }
2020
2021 /* Routine io_object_get_retain_count */
2022 kern_return_t is_io_object_get_retain_count(
2023 io_object_t object,
2024 uint32_t *retainCount )
2025 {
2026 if( !object)
2027 return( kIOReturnBadArgument );
2028
2029 *retainCount = object->getRetainCount();
2030 return( kIOReturnSuccess );
2031 }
2032
2033 /* Routine io_iterator_next */
2034 kern_return_t is_io_iterator_next(
2035 io_object_t iterator,
2036 io_object_t *object )
2037 {
2038 IOReturn ret;
2039 OSObject * obj;
2040
2041 CHECK( OSIterator, iterator, iter );
2042
2043 obj = iter->getNextObject();
2044 if( obj) {
2045 obj->retain();
2046 *object = obj;
2047 ret = kIOReturnSuccess;
2048 } else
2049 ret = kIOReturnNoDevice;
2050
2051 return (ret);
2052 }
2053
2054 /* Routine io_iterator_reset */
2055 kern_return_t is_io_iterator_reset(
2056 io_object_t iterator )
2057 {
2058 CHECK( OSIterator, iterator, iter );
2059
2060 iter->reset();
2061
2062 return( kIOReturnSuccess );
2063 }
2064
2065 /* Routine io_iterator_is_valid */
2066 kern_return_t is_io_iterator_is_valid(
2067 io_object_t iterator,
2068 boolean_t *is_valid )
2069 {
2070 CHECK( OSIterator, iterator, iter );
2071
2072 *is_valid = iter->isValid();
2073
2074 return( kIOReturnSuccess );
2075 }
2076
2077
2078 static kern_return_t internal_io_service_match_property_table(
2079 io_service_t _service,
2080 const char * matching,
2081 mach_msg_type_number_t matching_size,
2082 boolean_t *matches)
2083 {
2084 CHECK( IOService, _service, service );
2085
2086 kern_return_t kr;
2087 OSObject * obj;
2088 OSDictionary * dict;
2089
2090 assert(matching_size);
2091 obj = OSUnserializeXML(matching, matching_size);
2092
2093 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2094 *matches = service->passiveMatch( dict );
2095 kr = kIOReturnSuccess;
2096 } else
2097 kr = kIOReturnBadArgument;
2098
2099 if( obj)
2100 obj->release();
2101
2102 return( kr );
2103 }
2104
2105 /* Routine io_service_match_property_table */
2106 kern_return_t is_io_service_match_property_table(
2107 io_service_t service,
2108 io_string_t matching,
2109 boolean_t *matches )
2110 {
2111 return (kIOReturnUnsupported);
2112 }
2113
2114
2115 /* Routine io_service_match_property_table_ool */
2116 kern_return_t is_io_service_match_property_table_ool(
2117 io_object_t service,
2118 io_buf_ptr_t matching,
2119 mach_msg_type_number_t matchingCnt,
2120 kern_return_t *result,
2121 boolean_t *matches )
2122 {
2123 kern_return_t kr;
2124 vm_offset_t data;
2125 vm_map_offset_t map_data;
2126
2127 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2128 data = CAST_DOWN(vm_offset_t, map_data);
2129
2130 if( KERN_SUCCESS == kr) {
2131 // must return success after vm_map_copyout() succeeds
2132 *result = internal_io_service_match_property_table(service,
2133 (const char *)data, matchingCnt, matches );
2134 vm_deallocate( kernel_map, data, matchingCnt );
2135 }
2136
2137 return( kr );
2138 }
2139
2140 /* Routine io_service_match_property_table_bin */
2141 kern_return_t is_io_service_match_property_table_bin(
2142 io_object_t service,
2143 io_struct_inband_t matching,
2144 mach_msg_type_number_t matchingCnt,
2145 boolean_t *matches)
2146 {
2147 return (internal_io_service_match_property_table(service, matching, matchingCnt, matches));
2148 }
2149
2150 static kern_return_t internal_io_service_get_matching_services(
2151 mach_port_t master_port,
2152 const char * matching,
2153 mach_msg_type_number_t matching_size,
2154 io_iterator_t *existing )
2155 {
2156 kern_return_t kr;
2157 OSObject * obj;
2158 OSDictionary * dict;
2159
2160 if( master_port != master_device_port)
2161 return( kIOReturnNotPrivileged);
2162
2163 assert(matching_size);
2164 obj = OSUnserializeXML(matching, matching_size);
2165
2166 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2167 *existing = IOUserIterator::withIterator(IOService::getMatchingServices( dict ));
2168 kr = kIOReturnSuccess;
2169 } else
2170 kr = kIOReturnBadArgument;
2171
2172 if( obj)
2173 obj->release();
2174
2175 return( kr );
2176 }
2177
2178 /* Routine io_service_get_matching_services */
2179 kern_return_t is_io_service_get_matching_services(
2180 mach_port_t master_port,
2181 io_string_t matching,
2182 io_iterator_t *existing )
2183 {
2184 return (kIOReturnUnsupported);
2185 }
2186
2187 /* Routine io_service_get_matching_services_ool */
2188 kern_return_t is_io_service_get_matching_services_ool(
2189 mach_port_t master_port,
2190 io_buf_ptr_t matching,
2191 mach_msg_type_number_t matchingCnt,
2192 kern_return_t *result,
2193 io_object_t *existing )
2194 {
2195 kern_return_t kr;
2196 vm_offset_t data;
2197 vm_map_offset_t map_data;
2198
2199 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2200 data = CAST_DOWN(vm_offset_t, map_data);
2201
2202 if( KERN_SUCCESS == kr) {
2203 // must return success after vm_map_copyout() succeeds
2204 // and mig will copy out objects on success
2205 *existing = 0;
2206 *result = internal_io_service_get_matching_services(master_port,
2207 (const char *) data, matchingCnt, existing);
2208 vm_deallocate( kernel_map, data, matchingCnt );
2209 }
2210
2211 return( kr );
2212 }
2213
2214 /* Routine io_service_get_matching_services_bin */
2215 kern_return_t is_io_service_get_matching_services_bin(
2216 mach_port_t master_port,
2217 io_struct_inband_t matching,
2218 mach_msg_type_number_t matchingCnt,
2219 io_object_t *existing)
2220 {
2221 return (internal_io_service_get_matching_services(master_port, matching, matchingCnt, existing));
2222 }
2223
2224
2225 static kern_return_t internal_io_service_get_matching_service(
2226 mach_port_t master_port,
2227 const char * matching,
2228 mach_msg_type_number_t matching_size,
2229 io_service_t *service )
2230 {
2231 kern_return_t kr;
2232 OSObject * obj;
2233 OSDictionary * dict;
2234
2235 if( master_port != master_device_port)
2236 return( kIOReturnNotPrivileged);
2237
2238 assert(matching_size);
2239 obj = OSUnserializeXML(matching, matching_size);
2240
2241 if( (dict = OSDynamicCast( OSDictionary, obj))) {
2242 *service = IOService::copyMatchingService( dict );
2243 kr = *service ? kIOReturnSuccess : kIOReturnNotFound;
2244 } else
2245 kr = kIOReturnBadArgument;
2246
2247 if( obj)
2248 obj->release();
2249
2250 return( kr );
2251 }
2252
2253 /* Routine io_service_get_matching_service */
2254 kern_return_t is_io_service_get_matching_service(
2255 mach_port_t master_port,
2256 io_string_t matching,
2257 io_service_t *service )
2258 {
2259 return (kIOReturnUnsupported);
2260 }
2261
2262 /* Routine io_service_get_matching_services_ool */
2263 kern_return_t is_io_service_get_matching_service_ool(
2264 mach_port_t master_port,
2265 io_buf_ptr_t matching,
2266 mach_msg_type_number_t matchingCnt,
2267 kern_return_t *result,
2268 io_object_t *service )
2269 {
2270 kern_return_t kr;
2271 vm_offset_t data;
2272 vm_map_offset_t map_data;
2273
2274 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2275 data = CAST_DOWN(vm_offset_t, map_data);
2276
2277 if( KERN_SUCCESS == kr) {
2278 // must return success after vm_map_copyout() succeeds
2279 // and mig will copy out objects on success
2280 *service = 0;
2281 *result = internal_io_service_get_matching_service(master_port,
2282 (const char *) data, matchingCnt, service );
2283 vm_deallocate( kernel_map, data, matchingCnt );
2284 }
2285
2286 return( kr );
2287 }
2288
2289 /* Routine io_service_get_matching_service_bin */
2290 kern_return_t is_io_service_get_matching_service_bin(
2291 mach_port_t master_port,
2292 io_struct_inband_t matching,
2293 mach_msg_type_number_t matchingCnt,
2294 io_object_t *service)
2295 {
2296 return (internal_io_service_get_matching_service(master_port, matching, matchingCnt, service));
2297 }
2298
2299 static kern_return_t internal_io_service_add_notification(
2300 mach_port_t master_port,
2301 io_name_t notification_type,
2302 const char * matching,
2303 size_t matching_size,
2304 mach_port_t port,
2305 void * reference,
2306 vm_size_t referenceSize,
2307 bool client64,
2308 io_object_t * notification )
2309 {
2310 IOServiceUserNotification * userNotify = 0;
2311 IONotifier * notify = 0;
2312 const OSSymbol * sym;
2313 OSDictionary * dict;
2314 IOReturn err;
2315 unsigned long int userMsgType;
2316
2317 if( master_port != master_device_port)
2318 return( kIOReturnNotPrivileged);
2319
2320 do {
2321 err = kIOReturnNoResources;
2322
2323 if (matching_size > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2324
2325 if( !(sym = OSSymbol::withCString( notification_type )))
2326 err = kIOReturnNoResources;
2327
2328 assert(matching_size);
2329 dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
2330 if (!dict) {
2331 err = kIOReturnBadArgument;
2332 continue;
2333 }
2334
2335 if( (sym == gIOPublishNotification)
2336 || (sym == gIOFirstPublishNotification))
2337 userMsgType = kIOServicePublishNotificationType;
2338 else if( (sym == gIOMatchedNotification)
2339 || (sym == gIOFirstMatchNotification))
2340 userMsgType = kIOServiceMatchedNotificationType;
2341 else if ((sym == gIOTerminatedNotification)
2342 || (sym == gIOWillTerminateNotification))
2343 userMsgType = kIOServiceTerminatedNotificationType;
2344 else
2345 userMsgType = kLastIOKitNotificationType;
2346
2347 userNotify = new IOServiceUserNotification;
2348
2349 if( userNotify && !userNotify->init( port, userMsgType,
2350 reference, referenceSize, client64)) {
2351 userNotify->release();
2352 userNotify = 0;
2353 }
2354 if( !userNotify)
2355 continue;
2356
2357 notify = IOService::addMatchingNotification( sym, dict,
2358 &userNotify->_handler, userNotify );
2359 if( notify) {
2360 *notification = userNotify;
2361 userNotify->setNotification( notify );
2362 err = kIOReturnSuccess;
2363 } else
2364 err = kIOReturnUnsupported;
2365
2366 } while( false );
2367
2368 if ((kIOReturnSuccess != err) && userNotify)
2369 {
2370 userNotify->invalidatePort();
2371 userNotify->release();
2372 userNotify = 0;
2373 }
2374
2375 if( sym)
2376 sym->release();
2377 if( dict)
2378 dict->release();
2379
2380 return( err );
2381 }
2382
2383
2384 /* Routine io_service_add_notification */
2385 kern_return_t is_io_service_add_notification(
2386 mach_port_t master_port,
2387 io_name_t notification_type,
2388 io_string_t matching,
2389 mach_port_t port,
2390 io_async_ref_t reference,
2391 mach_msg_type_number_t referenceCnt,
2392 io_object_t * notification )
2393 {
2394 return (kIOReturnUnsupported);
2395 }
2396
2397 /* Routine io_service_add_notification_64 */
2398 kern_return_t is_io_service_add_notification_64(
2399 mach_port_t master_port,
2400 io_name_t notification_type,
2401 io_string_t matching,
2402 mach_port_t wake_port,
2403 io_async_ref64_t reference,
2404 mach_msg_type_number_t referenceCnt,
2405 io_object_t *notification )
2406 {
2407 return (kIOReturnUnsupported);
2408 }
2409
2410 /* Routine io_service_add_notification_bin */
2411 kern_return_t is_io_service_add_notification_bin
2412 (
2413 mach_port_t master_port,
2414 io_name_t notification_type,
2415 io_struct_inband_t matching,
2416 mach_msg_type_number_t matchingCnt,
2417 mach_port_t wake_port,
2418 io_async_ref_t reference,
2419 mach_msg_type_number_t referenceCnt,
2420 io_object_t *notification)
2421 {
2422 return (internal_io_service_add_notification(master_port, notification_type,
2423 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2424 false, notification));
2425 }
2426
2427 /* Routine io_service_add_notification_bin_64 */
2428 kern_return_t is_io_service_add_notification_bin_64
2429 (
2430 mach_port_t master_port,
2431 io_name_t notification_type,
2432 io_struct_inband_t matching,
2433 mach_msg_type_number_t matchingCnt,
2434 mach_port_t wake_port,
2435 io_async_ref64_t reference,
2436 mach_msg_type_number_t referenceCnt,
2437 io_object_t *notification)
2438 {
2439 return (internal_io_service_add_notification(master_port, notification_type,
2440 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2441 true, notification));
2442 }
2443
2444 static kern_return_t internal_io_service_add_notification_ool(
2445 mach_port_t master_port,
2446 io_name_t notification_type,
2447 io_buf_ptr_t matching,
2448 mach_msg_type_number_t matchingCnt,
2449 mach_port_t wake_port,
2450 void * reference,
2451 vm_size_t referenceSize,
2452 bool client64,
2453 kern_return_t *result,
2454 io_object_t *notification )
2455 {
2456 kern_return_t kr;
2457 vm_offset_t data;
2458 vm_map_offset_t map_data;
2459
2460 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
2461 data = CAST_DOWN(vm_offset_t, map_data);
2462
2463 if( KERN_SUCCESS == kr) {
2464 // must return success after vm_map_copyout() succeeds
2465 // and mig will copy out objects on success
2466 *notification = 0;
2467 *result = internal_io_service_add_notification( master_port, notification_type,
2468 (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
2469 vm_deallocate( kernel_map, data, matchingCnt );
2470 }
2471
2472 return( kr );
2473 }
2474
2475 /* Routine io_service_add_notification_ool */
2476 kern_return_t is_io_service_add_notification_ool(
2477 mach_port_t master_port,
2478 io_name_t notification_type,
2479 io_buf_ptr_t matching,
2480 mach_msg_type_number_t matchingCnt,
2481 mach_port_t wake_port,
2482 io_async_ref_t reference,
2483 mach_msg_type_number_t referenceCnt,
2484 kern_return_t *result,
2485 io_object_t *notification )
2486 {
2487 return (internal_io_service_add_notification_ool(master_port, notification_type,
2488 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t),
2489 false, result, notification));
2490 }
2491
2492 /* Routine io_service_add_notification_ool_64 */
2493 kern_return_t is_io_service_add_notification_ool_64(
2494 mach_port_t master_port,
2495 io_name_t notification_type,
2496 io_buf_ptr_t matching,
2497 mach_msg_type_number_t matchingCnt,
2498 mach_port_t wake_port,
2499 io_async_ref64_t reference,
2500 mach_msg_type_number_t referenceCnt,
2501 kern_return_t *result,
2502 io_object_t *notification )
2503 {
2504 return (internal_io_service_add_notification_ool(master_port, notification_type,
2505 matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t),
2506 true, result, notification));
2507 }
2508
2509 /* Routine io_service_add_notification_old */
2510 kern_return_t is_io_service_add_notification_old(
2511 mach_port_t master_port,
2512 io_name_t notification_type,
2513 io_string_t matching,
2514 mach_port_t port,
2515 // for binary compatibility reasons, this must be natural_t for ILP32
2516 natural_t ref,
2517 io_object_t * notification )
2518 {
2519 return( is_io_service_add_notification( master_port, notification_type,
2520 matching, port, &ref, 1, notification ));
2521 }
2522
2523
2524 static kern_return_t internal_io_service_add_interest_notification(
2525 io_object_t _service,
2526 io_name_t type_of_interest,
2527 mach_port_t port,
2528 void * reference,
2529 vm_size_t referenceSize,
2530 bool client64,
2531 io_object_t * notification )
2532 {
2533
2534 IOServiceMessageUserNotification * userNotify = 0;
2535 IONotifier * notify = 0;
2536 const OSSymbol * sym;
2537 IOReturn err;
2538
2539 CHECK( IOService, _service, service );
2540
2541 err = kIOReturnNoResources;
2542 if( (sym = OSSymbol::withCString( type_of_interest ))) do {
2543
2544 userNotify = new IOServiceMessageUserNotification;
2545
2546 if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
2547 reference, referenceSize,
2548 kIOUserNotifyMaxMessageSize,
2549 client64 )) {
2550 userNotify->release();
2551 userNotify = 0;
2552 }
2553 if( !userNotify)
2554 continue;
2555
2556 notify = service->registerInterest( sym,
2557 &userNotify->_handler, userNotify );
2558 if( notify) {
2559 *notification = userNotify;
2560 userNotify->setNotification( notify );
2561 err = kIOReturnSuccess;
2562 } else
2563 err = kIOReturnUnsupported;
2564
2565 sym->release();
2566
2567 } while( false );
2568
2569 if ((kIOReturnSuccess != err) && userNotify)
2570 {
2571 userNotify->invalidatePort();
2572 userNotify->release();
2573 userNotify = 0;
2574 }
2575
2576 return( err );
2577 }
2578
2579 /* Routine io_service_add_message_notification */
2580 kern_return_t is_io_service_add_interest_notification(
2581 io_object_t service,
2582 io_name_t type_of_interest,
2583 mach_port_t port,
2584 io_async_ref_t reference,
2585 mach_msg_type_number_t referenceCnt,
2586 io_object_t * notification )
2587 {
2588 return (internal_io_service_add_interest_notification(service, type_of_interest,
2589 port, &reference[0], sizeof(io_async_ref_t), false, notification));
2590 }
2591
2592 /* Routine io_service_add_interest_notification_64 */
2593 kern_return_t is_io_service_add_interest_notification_64(
2594 io_object_t service,
2595 io_name_t type_of_interest,
2596 mach_port_t wake_port,
2597 io_async_ref64_t reference,
2598 mach_msg_type_number_t referenceCnt,
2599 io_object_t *notification )
2600 {
2601 return (internal_io_service_add_interest_notification(service, type_of_interest,
2602 wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification));
2603 }
2604
2605
2606 /* Routine io_service_acknowledge_notification */
2607 kern_return_t is_io_service_acknowledge_notification(
2608 io_object_t _service,
2609 natural_t notify_ref,
2610 natural_t response )
2611 {
2612 CHECK( IOService, _service, service );
2613
2614 return( service->acknowledgeNotification( (IONotificationRef)(uintptr_t) notify_ref,
2615 (IOOptionBits) response ));
2616
2617 }
2618
2619 /* Routine io_connect_get_semaphore */
2620 kern_return_t is_io_connect_get_notification_semaphore(
2621 io_connect_t connection,
2622 natural_t notification_type,
2623 semaphore_t *semaphore )
2624 {
2625 CHECK( IOUserClient, connection, client );
2626
2627 IOStatisticsClientCall();
2628 return( client->getNotificationSemaphore( (UInt32) notification_type,
2629 semaphore ));
2630 }
2631
2632 /* Routine io_registry_get_root_entry */
2633 kern_return_t is_io_registry_get_root_entry(
2634 mach_port_t master_port,
2635 io_object_t *root )
2636 {
2637 IORegistryEntry * entry;
2638
2639 if( master_port != master_device_port)
2640 return( kIOReturnNotPrivileged);
2641
2642 entry = IORegistryEntry::getRegistryRoot();
2643 if( entry)
2644 entry->retain();
2645 *root = entry;
2646
2647 return( kIOReturnSuccess );
2648 }
2649
2650 /* Routine io_registry_create_iterator */
2651 kern_return_t is_io_registry_create_iterator(
2652 mach_port_t master_port,
2653 io_name_t plane,
2654 uint32_t options,
2655 io_object_t *iterator )
2656 {
2657 if( master_port != master_device_port)
2658 return( kIOReturnNotPrivileged);
2659
2660 *iterator = IOUserIterator::withIterator(
2661 IORegistryIterator::iterateOver(
2662 IORegistryEntry::getPlane( plane ), options ));
2663
2664 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2665 }
2666
2667 /* Routine io_registry_entry_create_iterator */
2668 kern_return_t is_io_registry_entry_create_iterator(
2669 io_object_t registry_entry,
2670 io_name_t plane,
2671 uint32_t options,
2672 io_object_t *iterator )
2673 {
2674 CHECK( IORegistryEntry, registry_entry, entry );
2675
2676 *iterator = IOUserIterator::withIterator(
2677 IORegistryIterator::iterateOver( entry,
2678 IORegistryEntry::getPlane( plane ), options ));
2679
2680 return( *iterator ? kIOReturnSuccess : kIOReturnBadArgument );
2681 }
2682
2683 /* Routine io_registry_iterator_enter */
2684 kern_return_t is_io_registry_iterator_enter_entry(
2685 io_object_t iterator )
2686 {
2687 CHECKLOCKED( IORegistryIterator, iterator, iter );
2688
2689 IOLockLock(oIter->lock);
2690 iter->enterEntry();
2691 IOLockUnlock(oIter->lock);
2692
2693 return( kIOReturnSuccess );
2694 }
2695
2696 /* Routine io_registry_iterator_exit */
2697 kern_return_t is_io_registry_iterator_exit_entry(
2698 io_object_t iterator )
2699 {
2700 bool didIt;
2701
2702 CHECKLOCKED( IORegistryIterator, iterator, iter );
2703
2704 IOLockLock(oIter->lock);
2705 didIt = iter->exitEntry();
2706 IOLockUnlock(oIter->lock);
2707
2708 return( didIt ? kIOReturnSuccess : kIOReturnNoDevice );
2709 }
2710
2711 /* Routine io_registry_entry_from_path */
2712 kern_return_t is_io_registry_entry_from_path(
2713 mach_port_t master_port,
2714 io_string_t path,
2715 io_object_t *registry_entry )
2716 {
2717 IORegistryEntry * entry;
2718
2719 if( master_port != master_device_port)
2720 return( kIOReturnNotPrivileged);
2721
2722 entry = IORegistryEntry::fromPath( path );
2723
2724 *registry_entry = entry;
2725
2726 return( kIOReturnSuccess );
2727 }
2728
2729
2730 /* Routine io_registry_entry_from_path */
2731 kern_return_t is_io_registry_entry_from_path_ool(
2732 mach_port_t master_port,
2733 io_string_inband_t path,
2734 io_buf_ptr_t path_ool,
2735 mach_msg_type_number_t path_oolCnt,
2736 kern_return_t *result,
2737 io_object_t *registry_entry)
2738 {
2739 IORegistryEntry * entry;
2740 vm_map_offset_t map_data;
2741 const char * cpath;
2742 IOReturn res;
2743 kern_return_t err;
2744
2745 if (master_port != master_device_port) return(kIOReturnNotPrivileged);
2746
2747 map_data = 0;
2748 entry = 0;
2749 res = err = KERN_SUCCESS;
2750 if (path[0]) cpath = path;
2751 else
2752 {
2753 if (!path_oolCnt) return(kIOReturnBadArgument);
2754 if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) return(kIOReturnMessageTooLarge);
2755
2756 err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
2757 if (KERN_SUCCESS == err)
2758 {
2759 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2760 cpath = CAST_DOWN(const char *, map_data);
2761 if (cpath[path_oolCnt - 1]) res = kIOReturnBadArgument;
2762 }
2763 }
2764
2765 if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res))
2766 {
2767 entry = IORegistryEntry::fromPath(cpath);
2768 res = entry ? kIOReturnSuccess : kIOReturnNotFound;
2769 }
2770
2771 if (map_data) vm_deallocate(kernel_map, map_data, path_oolCnt);
2772
2773 if (KERN_SUCCESS != err) res = err;
2774 *registry_entry = entry;
2775 *result = res;
2776
2777 return (err);
2778 }
2779
2780
2781 /* Routine io_registry_entry_in_plane */
2782 kern_return_t is_io_registry_entry_in_plane(
2783 io_object_t registry_entry,
2784 io_name_t plane,
2785 boolean_t *inPlane )
2786 {
2787 CHECK( IORegistryEntry, registry_entry, entry );
2788
2789 *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
2790
2791 return( kIOReturnSuccess );
2792 }
2793
2794
2795 /* Routine io_registry_entry_get_path */
2796 kern_return_t is_io_registry_entry_get_path(
2797 io_object_t registry_entry,
2798 io_name_t plane,
2799 io_string_t path )
2800 {
2801 int length;
2802 CHECK( IORegistryEntry, registry_entry, entry );
2803
2804 length = sizeof( io_string_t);
2805 if( entry->getPath( path, &length, IORegistryEntry::getPlane( plane )))
2806 return( kIOReturnSuccess );
2807 else
2808 return( kIOReturnBadArgument );
2809 }
2810
2811 /* Routine io_registry_entry_get_path */
2812 kern_return_t is_io_registry_entry_get_path_ool(
2813 io_object_t registry_entry,
2814 io_name_t plane,
2815 io_string_inband_t path,
2816 io_buf_ptr_t *path_ool,
2817 mach_msg_type_number_t *path_oolCnt)
2818 {
2819 enum { kMaxPath = 16384 };
2820 IOReturn err;
2821 int length;
2822 char * buf;
2823
2824 CHECK( IORegistryEntry, registry_entry, entry );
2825
2826 *path_ool = NULL;
2827 *path_oolCnt = 0;
2828 length = sizeof(io_string_inband_t);
2829 if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnSuccess;
2830 else
2831 {
2832 length = kMaxPath;
2833 buf = IONew(char, length);
2834 if (!buf) err = kIOReturnNoMemory;
2835 else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) err = kIOReturnError;
2836 else
2837 {
2838 *path_oolCnt = length;
2839 err = copyoutkdata(buf, length, path_ool);
2840 }
2841 if (buf) IODelete(buf, char, kMaxPath);
2842 }
2843
2844 return (err);
2845 }
2846
2847
2848 /* Routine io_registry_entry_get_name */
2849 kern_return_t is_io_registry_entry_get_name(
2850 io_object_t registry_entry,
2851 io_name_t name )
2852 {
2853 CHECK( IORegistryEntry, registry_entry, entry );
2854
2855 strncpy( name, entry->getName(), sizeof( io_name_t));
2856
2857 return( kIOReturnSuccess );
2858 }
2859
2860 /* Routine io_registry_entry_get_name_in_plane */
2861 kern_return_t is_io_registry_entry_get_name_in_plane(
2862 io_object_t registry_entry,
2863 io_name_t planeName,
2864 io_name_t name )
2865 {
2866 const IORegistryPlane * plane;
2867 CHECK( IORegistryEntry, registry_entry, entry );
2868
2869 if( planeName[0])
2870 plane = IORegistryEntry::getPlane( planeName );
2871 else
2872 plane = 0;
2873
2874 strncpy( name, entry->getName( plane), sizeof( io_name_t));
2875
2876 return( kIOReturnSuccess );
2877 }
2878
2879 /* Routine io_registry_entry_get_location_in_plane */
2880 kern_return_t is_io_registry_entry_get_location_in_plane(
2881 io_object_t registry_entry,
2882 io_name_t planeName,
2883 io_name_t location )
2884 {
2885 const IORegistryPlane * plane;
2886 CHECK( IORegistryEntry, registry_entry, entry );
2887
2888 if( planeName[0])
2889 plane = IORegistryEntry::getPlane( planeName );
2890 else
2891 plane = 0;
2892
2893 const char * cstr = entry->getLocation( plane );
2894
2895 if( cstr) {
2896 strncpy( location, cstr, sizeof( io_name_t));
2897 return( kIOReturnSuccess );
2898 } else
2899 return( kIOReturnNotFound );
2900 }
2901
2902 /* Routine io_registry_entry_get_registry_entry_id */
2903 kern_return_t is_io_registry_entry_get_registry_entry_id(
2904 io_object_t registry_entry,
2905 uint64_t *entry_id )
2906 {
2907 CHECK( IORegistryEntry, registry_entry, entry );
2908
2909 *entry_id = entry->getRegistryEntryID();
2910
2911 return (kIOReturnSuccess);
2912 }
2913
2914 /* Routine io_registry_entry_get_property */
2915 kern_return_t is_io_registry_entry_get_property_bytes(
2916 io_object_t registry_entry,
2917 io_name_t property_name,
2918 io_struct_inband_t buf,
2919 mach_msg_type_number_t *dataCnt )
2920 {
2921 OSObject * obj;
2922 OSData * data;
2923 OSString * str;
2924 OSBoolean * boo;
2925 OSNumber * off;
2926 UInt64 offsetBytes;
2927 unsigned int len = 0;
2928 const void * bytes = 0;
2929 IOReturn ret = kIOReturnSuccess;
2930
2931 CHECK( IORegistryEntry, registry_entry, entry );
2932
2933 #if CONFIG_MACF
2934 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2935 return kIOReturnNotPermitted;
2936 #endif
2937
2938 obj = entry->copyProperty(property_name);
2939 if( !obj)
2940 return( kIOReturnNoResources );
2941
2942 // One day OSData will be a common container base class
2943 // until then...
2944 if( (data = OSDynamicCast( OSData, obj ))) {
2945 len = data->getLength();
2946 bytes = data->getBytesNoCopy();
2947 if (!data->isSerializable()) len = 0;
2948
2949 } else if( (str = OSDynamicCast( OSString, obj ))) {
2950 len = str->getLength() + 1;
2951 bytes = str->getCStringNoCopy();
2952
2953 } else if( (boo = OSDynamicCast( OSBoolean, obj ))) {
2954 len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
2955 bytes = boo->isTrue() ? "Yes" : "No";
2956
2957 } else if( (off = OSDynamicCast( OSNumber, obj ))) {
2958 offsetBytes = off->unsigned64BitValue();
2959 len = off->numberOfBytes();
2960 if (len > sizeof(offsetBytes)) len = sizeof(offsetBytes);
2961 bytes = &offsetBytes;
2962 #ifdef __BIG_ENDIAN__
2963 bytes = (const void *)
2964 (((UInt32) bytes) + (sizeof( UInt64) - len));
2965 #endif
2966
2967 } else
2968 ret = kIOReturnBadArgument;
2969
2970 if( bytes) {
2971 if( *dataCnt < len)
2972 ret = kIOReturnIPCError;
2973 else {
2974 *dataCnt = len;
2975 bcopy( bytes, buf, len );
2976 }
2977 }
2978 obj->release();
2979
2980 return( ret );
2981 }
2982
2983
2984 /* Routine io_registry_entry_get_property */
2985 kern_return_t is_io_registry_entry_get_property(
2986 io_object_t registry_entry,
2987 io_name_t property_name,
2988 io_buf_ptr_t *properties,
2989 mach_msg_type_number_t *propertiesCnt )
2990 {
2991 kern_return_t err;
2992 vm_size_t len;
2993 OSObject * obj;
2994
2995 CHECK( IORegistryEntry, registry_entry, entry );
2996
2997 #if CONFIG_MACF
2998 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
2999 return kIOReturnNotPermitted;
3000 #endif
3001
3002 obj = entry->copyProperty(property_name);
3003 if( !obj)
3004 return( kIOReturnNotFound );
3005
3006 OSSerialize * s = OSSerialize::withCapacity(4096);
3007 if( !s) {
3008 obj->release();
3009 return( kIOReturnNoMemory );
3010 }
3011
3012 if( obj->serialize( s )) {
3013 len = s->getLength();
3014 *propertiesCnt = len;
3015 err = copyoutkdata( s->text(), len, properties );
3016
3017 } else
3018 err = kIOReturnUnsupported;
3019
3020 s->release();
3021 obj->release();
3022
3023 return( err );
3024 }
3025
3026 /* Routine io_registry_entry_get_property_recursively */
3027 kern_return_t is_io_registry_entry_get_property_recursively(
3028 io_object_t registry_entry,
3029 io_name_t plane,
3030 io_name_t property_name,
3031 uint32_t options,
3032 io_buf_ptr_t *properties,
3033 mach_msg_type_number_t *propertiesCnt )
3034 {
3035 kern_return_t err;
3036 vm_size_t len;
3037 OSObject * obj;
3038
3039 CHECK( IORegistryEntry, registry_entry, entry );
3040
3041 #if CONFIG_MACF
3042 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3043 return kIOReturnNotPermitted;
3044 #endif
3045
3046 obj = entry->copyProperty( property_name,
3047 IORegistryEntry::getPlane( plane ), options );
3048 if( !obj)
3049 return( kIOReturnNotFound );
3050
3051 OSSerialize * s = OSSerialize::withCapacity(4096);
3052 if( !s) {
3053 obj->release();
3054 return( kIOReturnNoMemory );
3055 }
3056
3057 if( obj->serialize( s )) {
3058 len = s->getLength();
3059 *propertiesCnt = len;
3060 err = copyoutkdata( s->text(), len, properties );
3061
3062 } else
3063 err = kIOReturnUnsupported;
3064
3065 s->release();
3066 obj->release();
3067
3068 return( err );
3069 }
3070
3071 /* Routine io_registry_entry_get_properties */
3072 kern_return_t is_io_registry_entry_get_properties(
3073 io_object_t registry_entry,
3074 io_buf_ptr_t *properties,
3075 mach_msg_type_number_t *propertiesCnt )
3076 {
3077 return (kIOReturnUnsupported);
3078 }
3079
3080 #if CONFIG_MACF
3081
3082 struct GetPropertiesEditorRef
3083 {
3084 kauth_cred_t cred;
3085 IORegistryEntry * entry;
3086 OSCollection * root;
3087 };
3088
3089 static const OSMetaClassBase *
3090 GetPropertiesEditor(void * reference,
3091 OSSerialize * s,
3092 OSCollection * container,
3093 const OSSymbol * name,
3094 const OSMetaClassBase * value)
3095 {
3096 GetPropertiesEditorRef * ref = (typeof(ref)) reference;
3097
3098 if (!ref->root) ref->root = container;
3099 if (ref->root == container)
3100 {
3101 if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy()))
3102 {
3103 value = 0;
3104 }
3105 }
3106 if (value) value->retain();
3107 return (value);
3108 }
3109
3110 #endif /* CONFIG_MACF */
3111
3112 /* Routine io_registry_entry_get_properties */
3113 kern_return_t is_io_registry_entry_get_properties_bin(
3114 io_object_t registry_entry,
3115 io_buf_ptr_t *properties,
3116 mach_msg_type_number_t *propertiesCnt)
3117 {
3118 kern_return_t err = kIOReturnSuccess;
3119 vm_size_t len;
3120 OSSerialize * s;
3121 OSSerialize::Editor editor = 0;
3122 void * editRef = 0;
3123
3124 CHECK(IORegistryEntry, registry_entry, entry);
3125
3126 #if CONFIG_MACF
3127 GetPropertiesEditorRef ref;
3128 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry))
3129 {
3130 editor = &GetPropertiesEditor;
3131 editRef = &ref;
3132 ref.cred = kauth_cred_get();
3133 ref.entry = entry;
3134 ref.root = 0;
3135 }
3136 #endif
3137
3138 s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
3139 if (!s) return (kIOReturnNoMemory);
3140
3141 if (!entry->serializeProperties(s)) err = kIOReturnUnsupported;
3142
3143 if (kIOReturnSuccess == err)
3144 {
3145 len = s->getLength();
3146 *propertiesCnt = len;
3147 err = copyoutkdata(s->text(), len, properties);
3148 }
3149 s->release();
3150
3151 return (err);
3152 }
3153
3154 /* Routine io_registry_entry_get_property_bin */
3155 kern_return_t is_io_registry_entry_get_property_bin(
3156 io_object_t registry_entry,
3157 io_name_t plane,
3158 io_name_t property_name,
3159 uint32_t options,
3160 io_buf_ptr_t *properties,
3161 mach_msg_type_number_t *propertiesCnt )
3162 {
3163 kern_return_t err;
3164 vm_size_t len;
3165 OSObject * obj;
3166 const OSSymbol * sym;
3167
3168 CHECK( IORegistryEntry, registry_entry, entry );
3169
3170 #if CONFIG_MACF
3171 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name))
3172 return kIOReturnNotPermitted;
3173 #endif
3174
3175 sym = OSSymbol::withCString(property_name);
3176 if (!sym) return (kIOReturnNoMemory);
3177
3178 if (gIORegistryEntryPropertyKeysKey == sym)
3179 {
3180 obj = entry->copyPropertyKeys();
3181 }
3182 else
3183 {
3184 if ((kIORegistryIterateRecursively & options) && plane[0])
3185 {
3186 obj = entry->copyProperty(property_name,
3187 IORegistryEntry::getPlane(plane), options );
3188 }
3189 else
3190 {
3191 obj = entry->copyProperty(property_name);
3192 }
3193 if (obj && gIORemoveOnReadProperties->containsObject(sym)) entry->removeProperty(sym);
3194 }
3195
3196 sym->release();
3197 if (!obj) return (kIOReturnNotFound);
3198
3199 OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
3200 if( !s) {
3201 obj->release();
3202 return( kIOReturnNoMemory );
3203 }
3204
3205 if( obj->serialize( s )) {
3206 len = s->getLength();
3207 *propertiesCnt = len;
3208 err = copyoutkdata( s->text(), len, properties );
3209
3210 } else err = kIOReturnUnsupported;
3211
3212 s->release();
3213 obj->release();
3214
3215 return( err );
3216 }
3217
3218
3219 /* Routine io_registry_entry_set_properties */
3220 kern_return_t is_io_registry_entry_set_properties
3221 (
3222 io_object_t registry_entry,
3223 io_buf_ptr_t properties,
3224 mach_msg_type_number_t propertiesCnt,
3225 kern_return_t * result)
3226 {
3227 OSObject * obj;
3228 kern_return_t err;
3229 IOReturn res;
3230 vm_offset_t data;
3231 vm_map_offset_t map_data;
3232
3233 CHECK( IORegistryEntry, registry_entry, entry );
3234
3235 if( propertiesCnt > sizeof(io_struct_inband_t) * 1024)
3236 return( kIOReturnMessageTooLarge);
3237
3238 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3239 data = CAST_DOWN(vm_offset_t, map_data);
3240
3241 if( KERN_SUCCESS == err) {
3242
3243 FAKE_STACK_FRAME(entry->getMetaClass());
3244
3245 // must return success after vm_map_copyout() succeeds
3246 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3247 vm_deallocate( kernel_map, data, propertiesCnt );
3248
3249 if (!obj)
3250 res = kIOReturnBadArgument;
3251 #if CONFIG_MACF
3252 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3253 registry_entry, obj))
3254 {
3255 res = kIOReturnNotPermitted;
3256 }
3257 #endif
3258 else
3259 {
3260 res = entry->setProperties( obj );
3261 }
3262
3263 if (obj)
3264 obj->release();
3265
3266 FAKE_STACK_FRAME_END();
3267
3268 } else
3269 res = err;
3270
3271 *result = res;
3272 return( err );
3273 }
3274
3275 /* Routine io_registry_entry_get_child_iterator */
3276 kern_return_t is_io_registry_entry_get_child_iterator(
3277 io_object_t registry_entry,
3278 io_name_t plane,
3279 io_object_t *iterator )
3280 {
3281 CHECK( IORegistryEntry, registry_entry, entry );
3282
3283 *iterator = entry->getChildIterator(
3284 IORegistryEntry::getPlane( plane ));
3285
3286 return( kIOReturnSuccess );
3287 }
3288
3289 /* Routine io_registry_entry_get_parent_iterator */
3290 kern_return_t is_io_registry_entry_get_parent_iterator(
3291 io_object_t registry_entry,
3292 io_name_t plane,
3293 io_object_t *iterator)
3294 {
3295 CHECK( IORegistryEntry, registry_entry, entry );
3296
3297 *iterator = entry->getParentIterator(
3298 IORegistryEntry::getPlane( plane ));
3299
3300 return( kIOReturnSuccess );
3301 }
3302
3303 /* Routine io_service_get_busy_state */
3304 kern_return_t is_io_service_get_busy_state(
3305 io_object_t _service,
3306 uint32_t *busyState )
3307 {
3308 CHECK( IOService, _service, service );
3309
3310 *busyState = service->getBusyState();
3311
3312 return( kIOReturnSuccess );
3313 }
3314
3315 /* Routine io_service_get_state */
3316 kern_return_t is_io_service_get_state(
3317 io_object_t _service,
3318 uint64_t *state,
3319 uint32_t *busy_state,
3320 uint64_t *accumulated_busy_time )
3321 {
3322 CHECK( IOService, _service, service );
3323
3324 *state = service->getState();
3325 *busy_state = service->getBusyState();
3326 *accumulated_busy_time = service->getAccumulatedBusyTime();
3327
3328 return( kIOReturnSuccess );
3329 }
3330
3331 /* Routine io_service_wait_quiet */
3332 kern_return_t is_io_service_wait_quiet(
3333 io_object_t _service,
3334 mach_timespec_t wait_time )
3335 {
3336 uint64_t timeoutNS;
3337
3338 CHECK( IOService, _service, service );
3339
3340 timeoutNS = wait_time.tv_sec;
3341 timeoutNS *= kSecondScale;
3342 timeoutNS += wait_time.tv_nsec;
3343
3344 return( service->waitQuiet(timeoutNS) );
3345 }
3346
3347 /* Routine io_service_request_probe */
3348 kern_return_t is_io_service_request_probe(
3349 io_object_t _service,
3350 uint32_t options )
3351 {
3352 CHECK( IOService, _service, service );
3353
3354 return( service->requestProbe( options ));
3355 }
3356
3357 /* Routine io_service_get_authorization_id */
3358 kern_return_t is_io_service_get_authorization_id(
3359 io_object_t _service,
3360 uint64_t *authorization_id )
3361 {
3362 kern_return_t kr;
3363
3364 CHECK( IOService, _service, service );
3365
3366 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
3367 kIOClientPrivilegeAdministrator );
3368 if( kIOReturnSuccess != kr)
3369 return( kr );
3370
3371 *authorization_id = service->getAuthorizationID();
3372
3373 return( kr );
3374 }
3375
3376 /* Routine io_service_set_authorization_id */
3377 kern_return_t is_io_service_set_authorization_id(
3378 io_object_t _service,
3379 uint64_t authorization_id )
3380 {
3381 CHECK( IOService, _service, service );
3382
3383 return( service->setAuthorizationID( authorization_id ) );
3384 }
3385
3386 /* Routine io_service_open_ndr */
3387 kern_return_t is_io_service_open_extended(
3388 io_object_t _service,
3389 task_t owningTask,
3390 uint32_t connect_type,
3391 NDR_record_t ndr,
3392 io_buf_ptr_t properties,
3393 mach_msg_type_number_t propertiesCnt,
3394 kern_return_t * result,
3395 io_object_t *connection )
3396 {
3397 IOUserClient * client = 0;
3398 kern_return_t err = KERN_SUCCESS;
3399 IOReturn res = kIOReturnSuccess;
3400 OSDictionary * propertiesDict = 0;
3401 bool crossEndian;
3402 bool disallowAccess;
3403
3404 CHECK( IOService, _service, service );
3405
3406 if (!owningTask) return (kIOReturnBadArgument);
3407 assert(owningTask == current_task());
3408 if (owningTask != current_task()) return (kIOReturnBadArgument);
3409
3410 do
3411 {
3412 if (properties) return (kIOReturnUnsupported);
3413 #if 0
3414 {
3415 OSObject * obj;
3416 vm_offset_t data;
3417 vm_map_offset_t map_data;
3418
3419 if( propertiesCnt > sizeof(io_struct_inband_t))
3420 return( kIOReturnMessageTooLarge);
3421
3422 err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
3423 res = err;
3424 data = CAST_DOWN(vm_offset_t, map_data);
3425 if (KERN_SUCCESS == err)
3426 {
3427 // must return success after vm_map_copyout() succeeds
3428 obj = OSUnserializeXML( (const char *) data, propertiesCnt );
3429 vm_deallocate( kernel_map, data, propertiesCnt );
3430 propertiesDict = OSDynamicCast(OSDictionary, obj);
3431 if (!propertiesDict)
3432 {
3433 res = kIOReturnBadArgument;
3434 if (obj)
3435 obj->release();
3436 }
3437 }
3438 if (kIOReturnSuccess != res)
3439 break;
3440 }
3441 #endif
3442 crossEndian = (ndr.int_rep != NDR_record.int_rep);
3443 if (crossEndian)
3444 {
3445 if (!propertiesDict)
3446 propertiesDict = OSDictionary::withCapacity(4);
3447 OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
3448 if (data)
3449 {
3450 if (propertiesDict)
3451 propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
3452 data->release();
3453 }
3454 }
3455
3456 res = service->newUserClient( owningTask, (void *) owningTask,
3457 connect_type, propertiesDict, &client );
3458
3459 if (propertiesDict)
3460 propertiesDict->release();
3461
3462 if (res == kIOReturnSuccess)
3463 {
3464 assert( OSDynamicCast(IOUserClient, client) );
3465
3466 client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
3467 client->closed = false;
3468
3469 disallowAccess = (crossEndian
3470 && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
3471 && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
3472 if (disallowAccess) res = kIOReturnUnsupported;
3473 #if CONFIG_MACF
3474 else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type))
3475 res = kIOReturnNotPermitted;
3476 #endif
3477
3478 if (kIOReturnSuccess == res) res = client->registerOwner(owningTask);
3479
3480 if (kIOReturnSuccess != res)
3481 {
3482 IOStatisticsClientCall();
3483 client->clientClose();
3484 client->release();
3485 client = 0;
3486 break;
3487 }
3488 OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
3489 if (creatorName)
3490 {
3491 client->setProperty(kIOUserClientCreatorKey, creatorName);
3492 creatorName->release();
3493 }
3494 client->setTerminateDefer(service, false);
3495 }
3496 }
3497 while (false);
3498
3499 *connection = client;
3500 *result = res;
3501
3502 return (err);
3503 }
3504
3505 /* Routine io_service_close */
3506 kern_return_t is_io_service_close(
3507 io_object_t connection )
3508 {
3509 OSSet * mappings;
3510 if ((mappings = OSDynamicCast(OSSet, connection)))
3511 return( kIOReturnSuccess );
3512
3513 CHECK( IOUserClient, connection, client );
3514
3515 IOStatisticsClientCall();
3516
3517 if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed))
3518 {
3519 client->clientClose();
3520 }
3521 else
3522 {
3523 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3524 client->getRegistryEntryID(), client->getName());
3525 }
3526
3527 return( kIOReturnSuccess );
3528 }
3529
3530 /* Routine io_connect_get_service */
3531 kern_return_t is_io_connect_get_service(
3532 io_object_t connection,
3533 io_object_t *service )
3534 {
3535 IOService * theService;
3536
3537 CHECK( IOUserClient, connection, client );
3538
3539 theService = client->getService();
3540 if( theService)
3541 theService->retain();
3542
3543 *service = theService;
3544
3545 return( theService ? kIOReturnSuccess : kIOReturnUnsupported );
3546 }
3547
3548 /* Routine io_connect_set_notification_port */
3549 kern_return_t is_io_connect_set_notification_port(
3550 io_object_t connection,
3551 uint32_t notification_type,
3552 mach_port_t port,
3553 uint32_t reference)
3554 {
3555 CHECK( IOUserClient, connection, client );
3556
3557 IOStatisticsClientCall();
3558 return( client->registerNotificationPort( port, notification_type,
3559 (io_user_reference_t) reference ));
3560 }
3561
3562 /* Routine io_connect_set_notification_port */
3563 kern_return_t is_io_connect_set_notification_port_64(
3564 io_object_t connection,
3565 uint32_t notification_type,
3566 mach_port_t port,
3567 io_user_reference_t reference)
3568 {
3569 CHECK( IOUserClient, connection, client );
3570
3571 IOStatisticsClientCall();
3572 return( client->registerNotificationPort( port, notification_type,
3573 reference ));
3574 }
3575
3576 /* Routine io_connect_map_memory_into_task */
3577 kern_return_t is_io_connect_map_memory_into_task
3578 (
3579 io_connect_t connection,
3580 uint32_t memory_type,
3581 task_t into_task,
3582 mach_vm_address_t *address,
3583 mach_vm_size_t *size,
3584 uint32_t flags
3585 )
3586 {
3587 IOReturn err;
3588 IOMemoryMap * map;
3589
3590 CHECK( IOUserClient, connection, client );
3591
3592 if (!into_task) return (kIOReturnBadArgument);
3593
3594 IOStatisticsClientCall();
3595 map = client->mapClientMemory64( memory_type, into_task, flags, *address );
3596
3597 if( map) {
3598 *address = map->getAddress();
3599 if( size)
3600 *size = map->getSize();
3601
3602 if( client->sharedInstance
3603 || (into_task != current_task())) {
3604 // push a name out to the task owning the map,
3605 // so we can clean up maps
3606 mach_port_name_t name __unused =
3607 IOMachPort::makeSendRightForTask(
3608 into_task, map, IKOT_IOKIT_OBJECT );
3609
3610 } else {
3611 // keep it with the user client
3612 IOLockLock( gIOObjectPortLock);
3613 if( 0 == client->mappings)
3614 client->mappings = OSSet::withCapacity(2);
3615 if( client->mappings)
3616 client->mappings->setObject( map);
3617 IOLockUnlock( gIOObjectPortLock);
3618 map->release();
3619 }
3620 err = kIOReturnSuccess;
3621
3622 } else
3623 err = kIOReturnBadArgument;
3624
3625 return( err );
3626 }
3627
3628 /* Routine is_io_connect_map_memory */
3629 kern_return_t is_io_connect_map_memory(
3630 io_object_t connect,
3631 uint32_t type,
3632 task_t task,
3633 uint32_t * mapAddr,
3634 uint32_t * mapSize,
3635 uint32_t flags )
3636 {
3637 IOReturn err;
3638 mach_vm_address_t address;
3639 mach_vm_size_t size;
3640
3641 address = SCALAR64(*mapAddr);
3642 size = SCALAR64(*mapSize);
3643
3644 err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
3645
3646 *mapAddr = SCALAR32(address);
3647 *mapSize = SCALAR32(size);
3648
3649 return (err);
3650 }
3651
3652 } /* extern "C" */
3653
3654 IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
3655 {
3656 OSIterator * iter;
3657 IOMemoryMap * map = 0;
3658
3659 IOLockLock(gIOObjectPortLock);
3660
3661 iter = OSCollectionIterator::withCollection(mappings);
3662 if(iter)
3663 {
3664 while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject())))
3665 {
3666 if(mem == map->getMemoryDescriptor())
3667 {
3668 map->retain();
3669 mappings->removeObject(map);
3670 break;
3671 }
3672 }
3673 iter->release();
3674 }
3675
3676 IOLockUnlock(gIOObjectPortLock);
3677
3678 return (map);
3679 }
3680
3681 extern "C" {
3682
3683 /* Routine io_connect_unmap_memory_from_task */
3684 kern_return_t is_io_connect_unmap_memory_from_task
3685 (
3686 io_connect_t connection,
3687 uint32_t memory_type,
3688 task_t from_task,
3689 mach_vm_address_t address)
3690 {
3691 IOReturn err;
3692 IOOptionBits options = 0;
3693 IOMemoryDescriptor * memory = 0;
3694 IOMemoryMap * map;
3695
3696 CHECK( IOUserClient, connection, client );
3697
3698 if (!from_task) return (kIOReturnBadArgument);
3699
3700 IOStatisticsClientCall();
3701 err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory );
3702
3703 if( memory && (kIOReturnSuccess == err)) {
3704
3705 options = (options & ~kIOMapUserOptionsMask)
3706 | kIOMapAnywhere | kIOMapReference;
3707
3708 map = memory->createMappingInTask( from_task, address, options );
3709 memory->release();
3710 if( map)
3711 {
3712 IOLockLock( gIOObjectPortLock);
3713 if( client->mappings)
3714 client->mappings->removeObject( map);
3715 IOLockUnlock( gIOObjectPortLock);
3716
3717 mach_port_name_t name = 0;
3718 if (from_task != current_task())
3719 name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
3720 if (name)
3721 {
3722 map->userClientUnmap();
3723 err = iokit_mod_send_right( from_task, name, -2 );
3724 err = kIOReturnSuccess;
3725 }
3726 else
3727 IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
3728 if (from_task == current_task())
3729 map->release();
3730 }
3731 else
3732 err = kIOReturnBadArgument;
3733 }
3734
3735 return( err );
3736 }
3737
3738 kern_return_t is_io_connect_unmap_memory(
3739 io_object_t connect,
3740 uint32_t type,
3741 task_t task,
3742 uint32_t mapAddr )
3743 {
3744 IOReturn err;
3745 mach_vm_address_t address;
3746
3747 address = SCALAR64(mapAddr);
3748
3749 err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
3750
3751 return (err);
3752 }
3753
3754
3755 /* Routine io_connect_add_client */
3756 kern_return_t is_io_connect_add_client(
3757 io_object_t connection,
3758 io_object_t connect_to)
3759 {
3760 CHECK( IOUserClient, connection, client );
3761 CHECK( IOUserClient, connect_to, to );
3762
3763 IOStatisticsClientCall();
3764 return( client->connectClient( to ) );
3765 }
3766
3767
3768 /* Routine io_connect_set_properties */
3769 kern_return_t is_io_connect_set_properties(
3770 io_object_t connection,
3771 io_buf_ptr_t properties,
3772 mach_msg_type_number_t propertiesCnt,
3773 kern_return_t * result)
3774 {
3775 return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result ));
3776 }
3777
3778 /* Routine io_user_client_method */
3779 kern_return_t is_io_connect_method_var_output
3780 (
3781 io_connect_t connection,
3782 uint32_t selector,
3783 io_scalar_inband64_t scalar_input,
3784 mach_msg_type_number_t scalar_inputCnt,
3785 io_struct_inband_t inband_input,
3786 mach_msg_type_number_t inband_inputCnt,
3787 mach_vm_address_t ool_input,
3788 mach_vm_size_t ool_input_size,
3789 io_struct_inband_t inband_output,
3790 mach_msg_type_number_t *inband_outputCnt,
3791 io_scalar_inband64_t scalar_output,
3792 mach_msg_type_number_t *scalar_outputCnt,
3793 io_buf_ptr_t *var_output,
3794 mach_msg_type_number_t *var_outputCnt
3795 )
3796 {
3797 CHECK( IOUserClient, connection, client );
3798
3799 IOExternalMethodArguments args;
3800 IOReturn ret;
3801 IOMemoryDescriptor * inputMD = 0;
3802 OSObject * structureVariableOutputData = 0;
3803
3804 bzero(&args.__reserved[0], sizeof(args.__reserved));
3805 args.__reservedA = 0;
3806 args.version = kIOExternalMethodArgumentsCurrentVersion;
3807
3808 args.selector = selector;
3809
3810 args.asyncWakePort = MACH_PORT_NULL;
3811 args.asyncReference = 0;
3812 args.asyncReferenceCount = 0;
3813 args.structureVariableOutputData = &structureVariableOutputData;
3814
3815 args.scalarInput = scalar_input;
3816 args.scalarInputCount = scalar_inputCnt;
3817 args.structureInput = inband_input;
3818 args.structureInputSize = inband_inputCnt;
3819
3820 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3821
3822 if (ool_input)
3823 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3824 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3825 current_task());
3826
3827 args.structureInputDescriptor = inputMD;
3828
3829 args.scalarOutput = scalar_output;
3830 args.scalarOutputCount = *scalar_outputCnt;
3831 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3832 args.structureOutput = inband_output;
3833 args.structureOutputSize = *inband_outputCnt;
3834 args.structureOutputDescriptor = NULL;
3835 args.structureOutputDescriptorSize = 0;
3836
3837 IOStatisticsClientCall();
3838 ret = client->externalMethod( selector, &args );
3839
3840 *scalar_outputCnt = args.scalarOutputCount;
3841 *inband_outputCnt = args.structureOutputSize;
3842
3843 if (var_outputCnt && var_output && (kIOReturnSuccess == ret))
3844 {
3845 OSSerialize * serialize;
3846 OSData * data;
3847 vm_size_t len;
3848
3849 if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData)))
3850 {
3851 len = serialize->getLength();
3852 *var_outputCnt = len;
3853 ret = copyoutkdata(serialize->text(), len, var_output);
3854 }
3855 else if ((data = OSDynamicCast(OSData, structureVariableOutputData)))
3856 {
3857 len = data->getLength();
3858 *var_outputCnt = len;
3859 ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
3860 }
3861 else
3862 {
3863 ret = kIOReturnUnderrun;
3864 }
3865 }
3866
3867 if (inputMD)
3868 inputMD->release();
3869 if (structureVariableOutputData)
3870 structureVariableOutputData->release();
3871
3872 return (ret);
3873 }
3874
3875 /* Routine io_user_client_method */
3876 kern_return_t is_io_connect_method
3877 (
3878 io_connect_t connection,
3879 uint32_t selector,
3880 io_scalar_inband64_t scalar_input,
3881 mach_msg_type_number_t scalar_inputCnt,
3882 io_struct_inband_t inband_input,
3883 mach_msg_type_number_t inband_inputCnt,
3884 mach_vm_address_t ool_input,
3885 mach_vm_size_t ool_input_size,
3886 io_struct_inband_t inband_output,
3887 mach_msg_type_number_t *inband_outputCnt,
3888 io_scalar_inband64_t scalar_output,
3889 mach_msg_type_number_t *scalar_outputCnt,
3890 mach_vm_address_t ool_output,
3891 mach_vm_size_t *ool_output_size
3892 )
3893 {
3894 CHECK( IOUserClient, connection, client );
3895
3896 IOExternalMethodArguments args;
3897 IOReturn ret;
3898 IOMemoryDescriptor * inputMD = 0;
3899 IOMemoryDescriptor * outputMD = 0;
3900
3901 bzero(&args.__reserved[0], sizeof(args.__reserved));
3902 args.__reservedA = 0;
3903 args.version = kIOExternalMethodArgumentsCurrentVersion;
3904
3905 args.selector = selector;
3906
3907 args.asyncWakePort = MACH_PORT_NULL;
3908 args.asyncReference = 0;
3909 args.asyncReferenceCount = 0;
3910 args.structureVariableOutputData = 0;
3911
3912 args.scalarInput = scalar_input;
3913 args.scalarInputCount = scalar_inputCnt;
3914 args.structureInput = inband_input;
3915 args.structureInputSize = inband_inputCnt;
3916
3917 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3918 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
3919
3920 if (ool_input)
3921 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
3922 kIODirectionOut | kIOMemoryMapCopyOnWrite,
3923 current_task());
3924
3925 args.structureInputDescriptor = inputMD;
3926
3927 args.scalarOutput = scalar_output;
3928 args.scalarOutputCount = *scalar_outputCnt;
3929 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
3930 args.structureOutput = inband_output;
3931 args.structureOutputSize = *inband_outputCnt;
3932
3933 if (ool_output && ool_output_size)
3934 {
3935 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
3936 kIODirectionIn, current_task());
3937 }
3938
3939 args.structureOutputDescriptor = outputMD;
3940 args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
3941
3942 IOStatisticsClientCall();
3943 ret = client->externalMethod( selector, &args );
3944
3945 *scalar_outputCnt = args.scalarOutputCount;
3946 *inband_outputCnt = args.structureOutputSize;
3947 *ool_output_size = args.structureOutputDescriptorSize;
3948
3949 if (inputMD)
3950 inputMD->release();
3951 if (outputMD)
3952 outputMD->release();
3953
3954 return (ret);
3955 }
3956
3957 /* Routine io_async_user_client_method */
3958 kern_return_t is_io_connect_async_method
3959 (
3960 io_connect_t connection,
3961 mach_port_t wake_port,
3962 io_async_ref64_t reference,
3963 mach_msg_type_number_t referenceCnt,
3964 uint32_t selector,
3965 io_scalar_inband64_t scalar_input,
3966 mach_msg_type_number_t scalar_inputCnt,
3967 io_struct_inband_t inband_input,
3968 mach_msg_type_number_t inband_inputCnt,
3969 mach_vm_address_t ool_input,
3970 mach_vm_size_t ool_input_size,
3971 io_struct_inband_t inband_output,
3972 mach_msg_type_number_t *inband_outputCnt,
3973 io_scalar_inband64_t scalar_output,
3974 mach_msg_type_number_t *scalar_outputCnt,
3975 mach_vm_address_t ool_output,
3976 mach_vm_size_t * ool_output_size
3977 )
3978 {
3979 CHECK( IOUserClient, connection, client );
3980
3981 IOExternalMethodArguments args;
3982 IOReturn ret;
3983 IOMemoryDescriptor * inputMD = 0;
3984 IOMemoryDescriptor * outputMD = 0;
3985
3986 bzero(&args.__reserved[0], sizeof(args.__reserved));
3987 args.__reservedA = 0;
3988 args.version = kIOExternalMethodArgumentsCurrentVersion;
3989
3990 reference[0] = (io_user_reference_t) wake_port;
3991 if (vm_map_is_64bit(get_task_map(current_task())))
3992 reference[0] |= kIOUCAsync64Flag;
3993
3994 args.selector = selector;
3995
3996 args.asyncWakePort = wake_port;
3997 args.asyncReference = reference;
3998 args.asyncReferenceCount = referenceCnt;
3999
4000 args.structureVariableOutputData = 0;
4001
4002 args.scalarInput = scalar_input;
4003 args.scalarInputCount = scalar_inputCnt;
4004 args.structureInput = inband_input;
4005 args.structureInputSize = inband_inputCnt;
4006
4007 if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4008 if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) return (kIOReturnIPCError);
4009
4010 if (ool_input)
4011 inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
4012 kIODirectionOut | kIOMemoryMapCopyOnWrite,
4013 current_task());
4014
4015 args.structureInputDescriptor = inputMD;
4016
4017 args.scalarOutput = scalar_output;
4018 args.scalarOutputCount = *scalar_outputCnt;
4019 bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
4020 args.structureOutput = inband_output;
4021 args.structureOutputSize = *inband_outputCnt;
4022
4023 if (ool_output)
4024 {
4025 outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
4026 kIODirectionIn, current_task());
4027 }
4028
4029 args.structureOutputDescriptor = outputMD;
4030 args.structureOutputDescriptorSize = *ool_output_size;
4031
4032 IOStatisticsClientCall();
4033 ret = client->externalMethod( selector, &args );
4034
4035 *inband_outputCnt = args.structureOutputSize;
4036 *ool_output_size = args.structureOutputDescriptorSize;
4037
4038 if (inputMD)
4039 inputMD->release();
4040 if (outputMD)
4041 outputMD->release();
4042
4043 return (ret);
4044 }
4045
4046 /* Routine io_connect_method_scalarI_scalarO */
4047 kern_return_t is_io_connect_method_scalarI_scalarO(
4048 io_object_t connect,
4049 uint32_t index,
4050 io_scalar_inband_t input,
4051 mach_msg_type_number_t inputCount,
4052 io_scalar_inband_t output,
4053 mach_msg_type_number_t * outputCount )
4054 {
4055 IOReturn err;
4056 uint32_t i;
4057 io_scalar_inband64_t _input;
4058 io_scalar_inband64_t _output;
4059
4060 mach_msg_type_number_t struct_outputCnt = 0;
4061 mach_vm_size_t ool_output_size = 0;
4062
4063 bzero(&_output[0], sizeof(_output));
4064 for (i = 0; i < inputCount; i++)
4065 _input[i] = SCALAR64(input[i]);
4066
4067 err = is_io_connect_method(connect, index,
4068 _input, inputCount,
4069 NULL, 0,
4070 0, 0,
4071 NULL, &struct_outputCnt,
4072 _output, outputCount,
4073 0, &ool_output_size);
4074
4075 for (i = 0; i < *outputCount; i++)
4076 output[i] = SCALAR32(_output[i]);
4077
4078 return (err);
4079 }
4080
4081 kern_return_t shim_io_connect_method_scalarI_scalarO(
4082 IOExternalMethod * method,
4083 IOService * object,
4084 const io_user_scalar_t * input,
4085 mach_msg_type_number_t inputCount,
4086 io_user_scalar_t * output,
4087 mach_msg_type_number_t * outputCount )
4088 {
4089 IOMethod func;
4090 io_scalar_inband_t _output;
4091 IOReturn err;
4092 err = kIOReturnBadArgument;
4093
4094 bzero(&_output[0], sizeof(_output));
4095 do {
4096
4097 if( inputCount != method->count0)
4098 {
4099 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4100 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4101 continue;
4102 }
4103 if( *outputCount != method->count1)
4104 {
4105 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4106 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4107 continue;
4108 }
4109
4110 func = method->func;
4111
4112 switch( inputCount) {
4113
4114 case 6:
4115 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4116 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4117 break;
4118 case 5:
4119 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4120 ARG32(input[3]), ARG32(input[4]),
4121 &_output[0] );
4122 break;
4123 case 4:
4124 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4125 ARG32(input[3]),
4126 &_output[0], &_output[1] );
4127 break;
4128 case 3:
4129 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4130 &_output[0], &_output[1], &_output[2] );
4131 break;
4132 case 2:
4133 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4134 &_output[0], &_output[1], &_output[2],
4135 &_output[3] );
4136 break;
4137 case 1:
4138 err = (object->*func)( ARG32(input[0]),
4139 &_output[0], &_output[1], &_output[2],
4140 &_output[3], &_output[4] );
4141 break;
4142 case 0:
4143 err = (object->*func)( &_output[0], &_output[1], &_output[2],
4144 &_output[3], &_output[4], &_output[5] );
4145 break;
4146
4147 default:
4148 IOLog("%s: Bad method table\n", object->getName());
4149 }
4150 }
4151 while( false);
4152
4153 uint32_t i;
4154 for (i = 0; i < *outputCount; i++)
4155 output[i] = SCALAR32(_output[i]);
4156
4157 return( err);
4158 }
4159
4160 /* Routine io_async_method_scalarI_scalarO */
4161 kern_return_t is_io_async_method_scalarI_scalarO(
4162 io_object_t connect,
4163 mach_port_t wake_port,
4164 io_async_ref_t reference,
4165 mach_msg_type_number_t referenceCnt,
4166 uint32_t index,
4167 io_scalar_inband_t input,
4168 mach_msg_type_number_t inputCount,
4169 io_scalar_inband_t output,
4170 mach_msg_type_number_t * outputCount )
4171 {
4172 IOReturn err;
4173 uint32_t i;
4174 io_scalar_inband64_t _input;
4175 io_scalar_inband64_t _output;
4176 io_async_ref64_t _reference;
4177
4178 bzero(&_output[0], sizeof(_output));
4179 for (i = 0; i < referenceCnt; i++)
4180 _reference[i] = REF64(reference[i]);
4181
4182 mach_msg_type_number_t struct_outputCnt = 0;
4183 mach_vm_size_t ool_output_size = 0;
4184
4185 for (i = 0; i < inputCount; i++)
4186 _input[i] = SCALAR64(input[i]);
4187
4188 err = is_io_connect_async_method(connect,
4189 wake_port, _reference, referenceCnt,
4190 index,
4191 _input, inputCount,
4192 NULL, 0,
4193 0, 0,
4194 NULL, &struct_outputCnt,
4195 _output, outputCount,
4196 0, &ool_output_size);
4197
4198 for (i = 0; i < *outputCount; i++)
4199 output[i] = SCALAR32(_output[i]);
4200
4201 return (err);
4202 }
4203 /* Routine io_async_method_scalarI_structureO */
4204 kern_return_t is_io_async_method_scalarI_structureO(
4205 io_object_t connect,
4206 mach_port_t wake_port,
4207 io_async_ref_t reference,
4208 mach_msg_type_number_t referenceCnt,
4209 uint32_t index,
4210 io_scalar_inband_t input,
4211 mach_msg_type_number_t inputCount,
4212 io_struct_inband_t output,
4213 mach_msg_type_number_t * outputCount )
4214 {
4215 uint32_t i;
4216 io_scalar_inband64_t _input;
4217 io_async_ref64_t _reference;
4218
4219 for (i = 0; i < referenceCnt; i++)
4220 _reference[i] = REF64(reference[i]);
4221
4222 mach_msg_type_number_t scalar_outputCnt = 0;
4223 mach_vm_size_t ool_output_size = 0;
4224
4225 for (i = 0; i < inputCount; i++)
4226 _input[i] = SCALAR64(input[i]);
4227
4228 return (is_io_connect_async_method(connect,
4229 wake_port, _reference, referenceCnt,
4230 index,
4231 _input, inputCount,
4232 NULL, 0,
4233 0, 0,
4234 output, outputCount,
4235 NULL, &scalar_outputCnt,
4236 0, &ool_output_size));
4237 }
4238
4239 /* Routine io_async_method_scalarI_structureI */
4240 kern_return_t is_io_async_method_scalarI_structureI(
4241 io_connect_t connect,
4242 mach_port_t wake_port,
4243 io_async_ref_t reference,
4244 mach_msg_type_number_t referenceCnt,
4245 uint32_t index,
4246 io_scalar_inband_t input,
4247 mach_msg_type_number_t inputCount,
4248 io_struct_inband_t inputStruct,
4249 mach_msg_type_number_t inputStructCount )
4250 {
4251 uint32_t i;
4252 io_scalar_inband64_t _input;
4253 io_async_ref64_t _reference;
4254
4255 for (i = 0; i < referenceCnt; i++)
4256 _reference[i] = REF64(reference[i]);
4257
4258 mach_msg_type_number_t scalar_outputCnt = 0;
4259 mach_msg_type_number_t inband_outputCnt = 0;
4260 mach_vm_size_t ool_output_size = 0;
4261
4262 for (i = 0; i < inputCount; i++)
4263 _input[i] = SCALAR64(input[i]);
4264
4265 return (is_io_connect_async_method(connect,
4266 wake_port, _reference, referenceCnt,
4267 index,
4268 _input, inputCount,
4269 inputStruct, inputStructCount,
4270 0, 0,
4271 NULL, &inband_outputCnt,
4272 NULL, &scalar_outputCnt,
4273 0, &ool_output_size));
4274 }
4275
4276 /* Routine io_async_method_structureI_structureO */
4277 kern_return_t is_io_async_method_structureI_structureO(
4278 io_object_t connect,
4279 mach_port_t wake_port,
4280 io_async_ref_t reference,
4281 mach_msg_type_number_t referenceCnt,
4282 uint32_t index,
4283 io_struct_inband_t input,
4284 mach_msg_type_number_t inputCount,
4285 io_struct_inband_t output,
4286 mach_msg_type_number_t * outputCount )
4287 {
4288 uint32_t i;
4289 mach_msg_type_number_t scalar_outputCnt = 0;
4290 mach_vm_size_t ool_output_size = 0;
4291 io_async_ref64_t _reference;
4292
4293 for (i = 0; i < referenceCnt; i++)
4294 _reference[i] = REF64(reference[i]);
4295
4296 return (is_io_connect_async_method(connect,
4297 wake_port, _reference, referenceCnt,
4298 index,
4299 NULL, 0,
4300 input, inputCount,
4301 0, 0,
4302 output, outputCount,
4303 NULL, &scalar_outputCnt,
4304 0, &ool_output_size));
4305 }
4306
4307
4308 kern_return_t shim_io_async_method_scalarI_scalarO(
4309 IOExternalAsyncMethod * method,
4310 IOService * object,
4311 mach_port_t asyncWakePort,
4312 io_user_reference_t * asyncReference,
4313 uint32_t asyncReferenceCount,
4314 const io_user_scalar_t * input,
4315 mach_msg_type_number_t inputCount,
4316 io_user_scalar_t * output,
4317 mach_msg_type_number_t * outputCount )
4318 {
4319 IOAsyncMethod func;
4320 uint32_t i;
4321 io_scalar_inband_t _output;
4322 IOReturn err;
4323 io_async_ref_t reference;
4324
4325 bzero(&_output[0], sizeof(_output));
4326 for (i = 0; i < asyncReferenceCount; i++)
4327 reference[i] = REF32(asyncReference[i]);
4328
4329 err = kIOReturnBadArgument;
4330
4331 do {
4332
4333 if( inputCount != method->count0)
4334 {
4335 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4336 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4337 continue;
4338 }
4339 if( *outputCount != method->count1)
4340 {
4341 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
4342 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4343 continue;
4344 }
4345
4346 func = method->func;
4347
4348 switch( inputCount) {
4349
4350 case 6:
4351 err = (object->*func)( reference,
4352 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4353 ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) );
4354 break;
4355 case 5:
4356 err = (object->*func)( reference,
4357 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4358 ARG32(input[3]), ARG32(input[4]),
4359 &_output[0] );
4360 break;
4361 case 4:
4362 err = (object->*func)( reference,
4363 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4364 ARG32(input[3]),
4365 &_output[0], &_output[1] );
4366 break;
4367 case 3:
4368 err = (object->*func)( reference,
4369 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4370 &_output[0], &_output[1], &_output[2] );
4371 break;
4372 case 2:
4373 err = (object->*func)( reference,
4374 ARG32(input[0]), ARG32(input[1]),
4375 &_output[0], &_output[1], &_output[2],
4376 &_output[3] );
4377 break;
4378 case 1:
4379 err = (object->*func)( reference,
4380 ARG32(input[0]),
4381 &_output[0], &_output[1], &_output[2],
4382 &_output[3], &_output[4] );
4383 break;
4384 case 0:
4385 err = (object->*func)( reference,
4386 &_output[0], &_output[1], &_output[2],
4387 &_output[3], &_output[4], &_output[5] );
4388 break;
4389
4390 default:
4391 IOLog("%s: Bad method table\n", object->getName());
4392 }
4393 }
4394 while( false);
4395
4396 for (i = 0; i < *outputCount; i++)
4397 output[i] = SCALAR32(_output[i]);
4398
4399 return( err);
4400 }
4401
4402
4403 /* Routine io_connect_method_scalarI_structureO */
4404 kern_return_t is_io_connect_method_scalarI_structureO(
4405 io_object_t connect,
4406 uint32_t index,
4407 io_scalar_inband_t input,
4408 mach_msg_type_number_t inputCount,
4409 io_struct_inband_t output,
4410 mach_msg_type_number_t * outputCount )
4411 {
4412 uint32_t i;
4413 io_scalar_inband64_t _input;
4414
4415 mach_msg_type_number_t scalar_outputCnt = 0;
4416 mach_vm_size_t ool_output_size = 0;
4417
4418 for (i = 0; i < inputCount; i++)
4419 _input[i] = SCALAR64(input[i]);
4420
4421 return (is_io_connect_method(connect, index,
4422 _input, inputCount,
4423 NULL, 0,
4424 0, 0,
4425 output, outputCount,
4426 NULL, &scalar_outputCnt,
4427 0, &ool_output_size));
4428 }
4429
4430 kern_return_t shim_io_connect_method_scalarI_structureO(
4431
4432 IOExternalMethod * method,
4433 IOService * object,
4434 const io_user_scalar_t * input,
4435 mach_msg_type_number_t inputCount,
4436 io_struct_inband_t output,
4437 IOByteCount * outputCount )
4438 {
4439 IOMethod func;
4440 IOReturn err;
4441
4442 err = kIOReturnBadArgument;
4443
4444 do {
4445 if( inputCount != method->count0)
4446 {
4447 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4448 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4449 continue;
4450 }
4451 if( (kIOUCVariableStructureSize != method->count1)
4452 && (*outputCount != method->count1))
4453 {
4454 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4455 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4456 continue;
4457 }
4458
4459 func = method->func;
4460
4461 switch( inputCount) {
4462
4463 case 5:
4464 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4465 ARG32(input[3]), ARG32(input[4]),
4466 output );
4467 break;
4468 case 4:
4469 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4470 ARG32(input[3]),
4471 output, (void *)outputCount );
4472 break;
4473 case 3:
4474 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4475 output, (void *)outputCount, 0 );
4476 break;
4477 case 2:
4478 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4479 output, (void *)outputCount, 0, 0 );
4480 break;
4481 case 1:
4482 err = (object->*func)( ARG32(input[0]),
4483 output, (void *)outputCount, 0, 0, 0 );
4484 break;
4485 case 0:
4486 err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 );
4487 break;
4488
4489 default:
4490 IOLog("%s: Bad method table\n", object->getName());
4491 }
4492 }
4493 while( false);
4494
4495 return( err);
4496 }
4497
4498
4499 kern_return_t shim_io_async_method_scalarI_structureO(
4500 IOExternalAsyncMethod * method,
4501 IOService * object,
4502 mach_port_t asyncWakePort,
4503 io_user_reference_t * asyncReference,
4504 uint32_t asyncReferenceCount,
4505 const io_user_scalar_t * input,
4506 mach_msg_type_number_t inputCount,
4507 io_struct_inband_t output,
4508 mach_msg_type_number_t * outputCount )
4509 {
4510 IOAsyncMethod func;
4511 uint32_t i;
4512 IOReturn err;
4513 io_async_ref_t reference;
4514
4515 for (i = 0; i < asyncReferenceCount; i++)
4516 reference[i] = REF32(asyncReference[i]);
4517
4518 err = kIOReturnBadArgument;
4519 do {
4520 if( inputCount != method->count0)
4521 {
4522 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4523 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4524 continue;
4525 }
4526 if( (kIOUCVariableStructureSize != method->count1)
4527 && (*outputCount != method->count1))
4528 {
4529 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4530 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4531 continue;
4532 }
4533
4534 func = method->func;
4535
4536 switch( inputCount) {
4537
4538 case 5:
4539 err = (object->*func)( reference,
4540 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4541 ARG32(input[3]), ARG32(input[4]),
4542 output );
4543 break;
4544 case 4:
4545 err = (object->*func)( reference,
4546 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4547 ARG32(input[3]),
4548 output, (void *)outputCount );
4549 break;
4550 case 3:
4551 err = (object->*func)( reference,
4552 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4553 output, (void *)outputCount, 0 );
4554 break;
4555 case 2:
4556 err = (object->*func)( reference,
4557 ARG32(input[0]), ARG32(input[1]),
4558 output, (void *)outputCount, 0, 0 );
4559 break;
4560 case 1:
4561 err = (object->*func)( reference,
4562 ARG32(input[0]),
4563 output, (void *)outputCount, 0, 0, 0 );
4564 break;
4565 case 0:
4566 err = (object->*func)( reference,
4567 output, (void *)outputCount, 0, 0, 0, 0 );
4568 break;
4569
4570 default:
4571 IOLog("%s: Bad method table\n", object->getName());
4572 }
4573 }
4574 while( false);
4575
4576 return( err);
4577 }
4578
4579 /* Routine io_connect_method_scalarI_structureI */
4580 kern_return_t is_io_connect_method_scalarI_structureI(
4581 io_connect_t connect,
4582 uint32_t index,
4583 io_scalar_inband_t input,
4584 mach_msg_type_number_t inputCount,
4585 io_struct_inband_t inputStruct,
4586 mach_msg_type_number_t inputStructCount )
4587 {
4588 uint32_t i;
4589 io_scalar_inband64_t _input;
4590
4591 mach_msg_type_number_t scalar_outputCnt = 0;
4592 mach_msg_type_number_t inband_outputCnt = 0;
4593 mach_vm_size_t ool_output_size = 0;
4594
4595 for (i = 0; i < inputCount; i++)
4596 _input[i] = SCALAR64(input[i]);
4597
4598 return (is_io_connect_method(connect, index,
4599 _input, inputCount,
4600 inputStruct, inputStructCount,
4601 0, 0,
4602 NULL, &inband_outputCnt,
4603 NULL, &scalar_outputCnt,
4604 0, &ool_output_size));
4605 }
4606
4607 kern_return_t shim_io_connect_method_scalarI_structureI(
4608 IOExternalMethod * method,
4609 IOService * object,
4610 const io_user_scalar_t * input,
4611 mach_msg_type_number_t inputCount,
4612 io_struct_inband_t inputStruct,
4613 mach_msg_type_number_t inputStructCount )
4614 {
4615 IOMethod func;
4616 IOReturn err = kIOReturnBadArgument;
4617
4618 do
4619 {
4620 if (inputCount != method->count0)
4621 {
4622 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4623 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4624 continue;
4625 }
4626 if( (kIOUCVariableStructureSize != method->count1)
4627 && (inputStructCount != method->count1))
4628 {
4629 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4630 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4631 continue;
4632 }
4633
4634 func = method->func;
4635
4636 switch( inputCount) {
4637
4638 case 5:
4639 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4640 ARG32(input[3]), ARG32(input[4]),
4641 inputStruct );
4642 break;
4643 case 4:
4644 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2],
4645 ARG32(input[3]),
4646 inputStruct, (void *)(uintptr_t)inputStructCount );
4647 break;
4648 case 3:
4649 err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4650 inputStruct, (void *)(uintptr_t)inputStructCount,
4651 0 );
4652 break;
4653 case 2:
4654 err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
4655 inputStruct, (void *)(uintptr_t)inputStructCount,
4656 0, 0 );
4657 break;
4658 case 1:
4659 err = (object->*func)( ARG32(input[0]),
4660 inputStruct, (void *)(uintptr_t)inputStructCount,
4661 0, 0, 0 );
4662 break;
4663 case 0:
4664 err = (object->*func)( inputStruct, (void *)(uintptr_t)inputStructCount,
4665 0, 0, 0, 0 );
4666 break;
4667
4668 default:
4669 IOLog("%s: Bad method table\n", object->getName());
4670 }
4671 }
4672 while (false);
4673
4674 return( err);
4675 }
4676
4677 kern_return_t shim_io_async_method_scalarI_structureI(
4678 IOExternalAsyncMethod * method,
4679 IOService * object,
4680 mach_port_t asyncWakePort,
4681 io_user_reference_t * asyncReference,
4682 uint32_t asyncReferenceCount,
4683 const io_user_scalar_t * input,
4684 mach_msg_type_number_t inputCount,
4685 io_struct_inband_t inputStruct,
4686 mach_msg_type_number_t inputStructCount )
4687 {
4688 IOAsyncMethod func;
4689 uint32_t i;
4690 IOReturn err = kIOReturnBadArgument;
4691 io_async_ref_t reference;
4692
4693 for (i = 0; i < asyncReferenceCount; i++)
4694 reference[i] = REF32(asyncReference[i]);
4695
4696 do
4697 {
4698 if (inputCount != method->count0)
4699 {
4700 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
4701 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4702 continue;
4703 }
4704 if( (kIOUCVariableStructureSize != method->count1)
4705 && (inputStructCount != method->count1))
4706 {
4707 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputStructCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4708 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputStructCount, uint64_t, (uint64_t)method->count1);
4709 continue;
4710 }
4711
4712 func = method->func;
4713
4714 switch( inputCount) {
4715
4716 case 5:
4717 err = (object->*func)( reference,
4718 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4719 ARG32(input[3]), ARG32(input[4]),
4720 inputStruct );
4721 break;
4722 case 4:
4723 err = (object->*func)( reference,
4724 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4725 ARG32(input[3]),
4726 inputStruct, (void *)(uintptr_t)inputStructCount );
4727 break;
4728 case 3:
4729 err = (object->*func)( reference,
4730 ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
4731 inputStruct, (void *)(uintptr_t)inputStructCount,
4732 0 );
4733 break;
4734 case 2:
4735 err = (object->*func)( reference,
4736 ARG32(input[0]), ARG32(input[1]),
4737 inputStruct, (void *)(uintptr_t)inputStructCount,
4738 0, 0 );
4739 break;
4740 case 1:
4741 err = (object->*func)( reference,
4742 ARG32(input[0]),
4743 inputStruct, (void *)(uintptr_t)inputStructCount,
4744 0, 0, 0 );
4745 break;
4746 case 0:
4747 err = (object->*func)( reference,
4748 inputStruct, (void *)(uintptr_t)inputStructCount,
4749 0, 0, 0, 0 );
4750 break;
4751
4752 default:
4753 IOLog("%s: Bad method table\n", object->getName());
4754 }
4755 }
4756 while (false);
4757
4758 return( err);
4759 }
4760
4761 /* Routine io_connect_method_structureI_structureO */
4762 kern_return_t is_io_connect_method_structureI_structureO(
4763 io_object_t connect,
4764 uint32_t index,
4765 io_struct_inband_t input,
4766 mach_msg_type_number_t inputCount,
4767 io_struct_inband_t output,
4768 mach_msg_type_number_t * outputCount )
4769 {
4770 mach_msg_type_number_t scalar_outputCnt = 0;
4771 mach_vm_size_t ool_output_size = 0;
4772
4773 return (is_io_connect_method(connect, index,
4774 NULL, 0,
4775 input, inputCount,
4776 0, 0,
4777 output, outputCount,
4778 NULL, &scalar_outputCnt,
4779 0, &ool_output_size));
4780 }
4781
4782 kern_return_t shim_io_connect_method_structureI_structureO(
4783 IOExternalMethod * method,
4784 IOService * object,
4785 io_struct_inband_t input,
4786 mach_msg_type_number_t inputCount,
4787 io_struct_inband_t output,
4788 IOByteCount * outputCount )
4789 {
4790 IOMethod func;
4791 IOReturn err = kIOReturnBadArgument;
4792
4793 do
4794 {
4795 if( (kIOUCVariableStructureSize != method->count0)
4796 && (inputCount != method->count0))
4797 {
4798 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4799 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4800 continue;
4801 }
4802 if( (kIOUCVariableStructureSize != method->count1)
4803 && (*outputCount != method->count1))
4804 {
4805 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4806 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4807 continue;
4808 }
4809
4810 func = method->func;
4811
4812 if( method->count1) {
4813 if( method->count0) {
4814 err = (object->*func)( input, output,
4815 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4816 } else {
4817 err = (object->*func)( output, outputCount, 0, 0, 0, 0 );
4818 }
4819 } else {
4820 err = (object->*func)( input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4821 }
4822 }
4823 while( false);
4824
4825
4826 return( err);
4827 }
4828
4829 kern_return_t shim_io_async_method_structureI_structureO(
4830 IOExternalAsyncMethod * method,
4831 IOService * object,
4832 mach_port_t asyncWakePort,
4833 io_user_reference_t * asyncReference,
4834 uint32_t asyncReferenceCount,
4835 io_struct_inband_t input,
4836 mach_msg_type_number_t inputCount,
4837 io_struct_inband_t output,
4838 mach_msg_type_number_t * outputCount )
4839 {
4840 IOAsyncMethod func;
4841 uint32_t i;
4842 IOReturn err;
4843 io_async_ref_t reference;
4844
4845 for (i = 0; i < asyncReferenceCount; i++)
4846 reference[i] = REF32(asyncReference[i]);
4847
4848 err = kIOReturnBadArgument;
4849 do
4850 {
4851 if( (kIOUCVariableStructureSize != method->count0)
4852 && (inputCount != method->count0))
4853 {
4854 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0, (uint64_t)kIOUCVariableStructureSize);
4855 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
4856 continue;
4857 }
4858 if( (kIOUCVariableStructureSize != method->count1)
4859 && (*outputCount != method->count1))
4860 {
4861 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1, (uint64_t)kIOUCVariableStructureSize);
4862 DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
4863 continue;
4864 }
4865
4866 func = method->func;
4867
4868 if( method->count1) {
4869 if( method->count0) {
4870 err = (object->*func)( reference,
4871 input, output,
4872 (void *)(uintptr_t)inputCount, outputCount, 0, 0 );
4873 } else {
4874 err = (object->*func)( reference,
4875 output, outputCount, 0, 0, 0, 0 );
4876 }
4877 } else {
4878 err = (object->*func)( reference,
4879 input, (void *)(uintptr_t)inputCount, 0, 0, 0, 0 );
4880 }
4881 }
4882 while( false);
4883
4884 return( err);
4885 }
4886
4887 #if !NO_KEXTD
4888 bool gIOKextdClearedBusy = false;
4889 #endif
4890
4891 /* Routine io_catalog_send_data */
4892 kern_return_t is_io_catalog_send_data(
4893 mach_port_t master_port,
4894 uint32_t flag,
4895 io_buf_ptr_t inData,
4896 mach_msg_type_number_t inDataCount,
4897 kern_return_t * result)
4898 {
4899 #if NO_KEXTD
4900 return kIOReturnNotPrivileged;
4901 #else /* NO_KEXTD */
4902 OSObject * obj = 0;
4903 vm_offset_t data;
4904 kern_return_t kr = kIOReturnError;
4905
4906 //printf("io_catalog_send_data called. flag: %d\n", flag);
4907
4908 if( master_port != master_device_port)
4909 return kIOReturnNotPrivileged;
4910
4911 if( (flag != kIOCatalogRemoveKernelLinker &&
4912 flag != kIOCatalogKextdActive &&
4913 flag != kIOCatalogKextdFinishedLaunching) &&
4914 ( !inData || !inDataCount) )
4915 {
4916 return kIOReturnBadArgument;
4917 }
4918
4919 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management"))
4920 {
4921 OSString * taskName = IOCopyLogNameForPID(proc_selfpid());
4922 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName ? taskName->getCStringNoCopy() : "");
4923 OSSafeReleaseNULL(taskName);
4924 // For now, fake success to not break applications relying on this function succeeding.
4925 // See <rdar://problem/32554970> for more details.
4926 return kIOReturnSuccess;
4927 }
4928
4929 if (inData) {
4930 vm_map_offset_t map_data;
4931
4932 if( inDataCount > sizeof(io_struct_inband_t) * 1024)
4933 return( kIOReturnMessageTooLarge);
4934
4935 kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData);
4936 data = CAST_DOWN(vm_offset_t, map_data);
4937
4938 if( kr != KERN_SUCCESS)
4939 return kr;
4940
4941 // must return success after vm_map_copyout() succeeds
4942
4943 if( inDataCount ) {
4944 obj = (OSObject *)OSUnserializeXML((const char *)data, inDataCount);
4945 vm_deallocate( kernel_map, data, inDataCount );
4946 if( !obj) {
4947 *result = kIOReturnNoMemory;
4948 return( KERN_SUCCESS);
4949 }
4950 }
4951 }
4952
4953 switch ( flag ) {
4954 case kIOCatalogResetDrivers:
4955 case kIOCatalogResetDriversNoMatch: {
4956 OSArray * array;
4957
4958 array = OSDynamicCast(OSArray, obj);
4959 if (array) {
4960 if ( !gIOCatalogue->resetAndAddDrivers(array,
4961 flag == kIOCatalogResetDrivers) ) {
4962
4963 kr = kIOReturnError;
4964 }
4965 } else {
4966 kr = kIOReturnBadArgument;
4967 }
4968 }
4969 break;
4970
4971 case kIOCatalogAddDrivers:
4972 case kIOCatalogAddDriversNoMatch: {
4973 OSArray * array;
4974
4975 array = OSDynamicCast(OSArray, obj);
4976 if ( array ) {
4977 if ( !gIOCatalogue->addDrivers( array ,
4978 flag == kIOCatalogAddDrivers) ) {
4979 kr = kIOReturnError;
4980 }
4981 }
4982 else {
4983 kr = kIOReturnBadArgument;
4984 }
4985 }
4986 break;
4987
4988 case kIOCatalogRemoveDrivers:
4989 case kIOCatalogRemoveDriversNoMatch: {
4990 OSDictionary * dict;
4991
4992 dict = OSDynamicCast(OSDictionary, obj);
4993 if ( dict ) {
4994 if ( !gIOCatalogue->removeDrivers( dict,
4995 flag == kIOCatalogRemoveDrivers ) ) {
4996 kr = kIOReturnError;
4997 }
4998 }
4999 else {
5000 kr = kIOReturnBadArgument;
5001 }
5002 }
5003 break;
5004
5005 case kIOCatalogStartMatching: {
5006 OSDictionary * dict;
5007
5008 dict = OSDynamicCast(OSDictionary, obj);
5009 if ( dict ) {
5010 if ( !gIOCatalogue->startMatching( dict ) ) {
5011 kr = kIOReturnError;
5012 }
5013 }
5014 else {
5015 kr = kIOReturnBadArgument;
5016 }
5017 }
5018 break;
5019
5020 case kIOCatalogRemoveKernelLinker:
5021 kr = KERN_NOT_SUPPORTED;
5022 break;
5023
5024 case kIOCatalogKextdActive:
5025 #if !NO_KEXTD
5026 IOServiceTrace(IOSERVICE_KEXTD_ALIVE, 0, 0, 0, 0);
5027 OSKext::setKextdActive();
5028
5029 /* Dump all nonloaded startup extensions; kextd will now send them
5030 * down on request.
5031 */
5032 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5033 #endif
5034 kr = kIOReturnSuccess;
5035 break;
5036
5037 case kIOCatalogKextdFinishedLaunching: {
5038 #if !NO_KEXTD
5039 if (!gIOKextdClearedBusy) {
5040 IOService * serviceRoot = IOService::getServiceRoot();
5041 if (serviceRoot) {
5042 IOServiceTrace(IOSERVICE_KEXTD_READY, 0, 0, 0, 0);
5043 serviceRoot->adjustBusy(-1);
5044 gIOKextdClearedBusy = true;
5045 }
5046 }
5047 #endif
5048 kr = kIOReturnSuccess;
5049 }
5050 break;
5051
5052 default:
5053 kr = kIOReturnBadArgument;
5054 break;
5055 }
5056
5057 if (obj) obj->release();
5058
5059 *result = kr;
5060 return( KERN_SUCCESS);
5061 #endif /* NO_KEXTD */
5062 }
5063
5064 /* Routine io_catalog_terminate */
5065 kern_return_t is_io_catalog_terminate(
5066 mach_port_t master_port,
5067 uint32_t flag,
5068 io_name_t name )
5069 {
5070 kern_return_t kr;
5071
5072 if( master_port != master_device_port )
5073 return kIOReturnNotPrivileged;
5074
5075 kr = IOUserClient::clientHasPrivilege( (void *) current_task(),
5076 kIOClientPrivilegeAdministrator );
5077 if( kIOReturnSuccess != kr)
5078 return( kr );
5079
5080 switch ( flag ) {
5081 #if !defined(SECURE_KERNEL)
5082 case kIOCatalogServiceTerminate:
5083 OSIterator * iter;
5084 IOService * service;
5085
5086 iter = IORegistryIterator::iterateOver(gIOServicePlane,
5087 kIORegistryIterateRecursively);
5088 if ( !iter )
5089 return kIOReturnNoMemory;
5090
5091 do {
5092 iter->reset();
5093 while( (service = (IOService *)iter->getNextObject()) ) {
5094 if( service->metaCast(name)) {
5095 if ( !service->terminate( kIOServiceRequired
5096 | kIOServiceSynchronous) ) {
5097 kr = kIOReturnUnsupported;
5098 break;
5099 }
5100 }
5101 }
5102 } while( !service && !iter->isValid());
5103 iter->release();
5104 break;
5105
5106 case kIOCatalogModuleUnload:
5107 case kIOCatalogModuleTerminate:
5108 kr = gIOCatalogue->terminateDriversForModule(name,
5109 flag == kIOCatalogModuleUnload);
5110 break;
5111 #endif
5112
5113 default:
5114 kr = kIOReturnBadArgument;
5115 break;
5116 }
5117
5118 return( kr );
5119 }
5120
5121 /* Routine io_catalog_get_data */
5122 kern_return_t is_io_catalog_get_data(
5123 mach_port_t master_port,
5124 uint32_t flag,
5125 io_buf_ptr_t *outData,
5126 mach_msg_type_number_t *outDataCount)
5127 {
5128 kern_return_t kr = kIOReturnSuccess;
5129 OSSerialize * s;
5130
5131 if( master_port != master_device_port)
5132 return kIOReturnNotPrivileged;
5133
5134 //printf("io_catalog_get_data called. flag: %d\n", flag);
5135
5136 s = OSSerialize::withCapacity(4096);
5137 if ( !s )
5138 return kIOReturnNoMemory;
5139
5140 kr = gIOCatalogue->serializeData(flag, s);
5141
5142 if ( kr == kIOReturnSuccess ) {
5143 vm_offset_t data;
5144 vm_map_copy_t copy;
5145 vm_size_t size;
5146
5147 size = s->getLength();
5148 kr = vm_allocate_kernel(kernel_map, &data, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IOKIT);
5149 if ( kr == kIOReturnSuccess ) {
5150 bcopy(s->text(), (void *)data, size);
5151 kr = vm_map_copyin(kernel_map, (vm_map_address_t)data,
5152 (vm_map_size_t)size, true, &copy);
5153 *outData = (char *)copy;
5154 *outDataCount = size;
5155 }
5156 }
5157
5158 s->release();
5159
5160 return kr;
5161 }
5162
5163 /* Routine io_catalog_get_gen_count */
5164 kern_return_t is_io_catalog_get_gen_count(
5165 mach_port_t master_port,
5166 uint32_t *genCount)
5167 {
5168 if( master_port != master_device_port)
5169 return kIOReturnNotPrivileged;
5170
5171 //printf("io_catalog_get_gen_count called.\n");
5172
5173 if ( !genCount )
5174 return kIOReturnBadArgument;
5175
5176 *genCount = gIOCatalogue->getGenerationCount();
5177
5178 return kIOReturnSuccess;
5179 }
5180
5181 /* Routine io_catalog_module_loaded.
5182 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5183 */
5184 kern_return_t is_io_catalog_module_loaded(
5185 mach_port_t master_port,
5186 io_name_t name)
5187 {
5188 if( master_port != master_device_port)
5189 return kIOReturnNotPrivileged;
5190
5191 //printf("io_catalog_module_loaded called. name %s\n", name);
5192
5193 if ( !name )
5194 return kIOReturnBadArgument;
5195
5196 gIOCatalogue->moduleHasLoaded(name);
5197
5198 return kIOReturnSuccess;
5199 }
5200
5201 kern_return_t is_io_catalog_reset(
5202 mach_port_t master_port,
5203 uint32_t flag)
5204 {
5205 if( master_port != master_device_port)
5206 return kIOReturnNotPrivileged;
5207
5208 switch ( flag ) {
5209 case kIOCatalogResetDefault:
5210 gIOCatalogue->reset();
5211 break;
5212
5213 default:
5214 return kIOReturnBadArgument;
5215 }
5216
5217 return kIOReturnSuccess;
5218 }
5219
5220 kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args)
5221 {
5222 kern_return_t result = kIOReturnBadArgument;
5223 IOUserClient *userClient;
5224
5225 if ((userClient = OSDynamicCast(IOUserClient,
5226 iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) {
5227 IOExternalTrap *trap;
5228 IOService *target = NULL;
5229
5230 trap = userClient->getTargetAndTrapForIndex(&target, args->index);
5231
5232 if (trap && target) {
5233 IOTrap func;
5234
5235 func = trap->func;
5236
5237 if (func) {
5238 result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6);
5239 }
5240 }
5241
5242 iokit_remove_connect_reference(userClient);
5243 }
5244
5245 return result;
5246 }
5247
5248 } /* extern "C" */
5249
5250 IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args,
5251 IOExternalMethodDispatch * dispatch, OSObject * target, void * reference )
5252 {
5253 IOReturn err;
5254 IOService * object;
5255 IOByteCount structureOutputSize;
5256
5257 if (dispatch)
5258 {
5259 uint32_t count;
5260 count = dispatch->checkScalarInputCount;
5261 if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount))
5262 {
5263 return (kIOReturnBadArgument);
5264 }
5265
5266 count = dispatch->checkStructureInputSize;
5267 if ((kIOUCVariableStructureSize != count)
5268 && (count != ((args->structureInputDescriptor)
5269 ? args->structureInputDescriptor->getLength() : args->structureInputSize)))
5270 {
5271 return (kIOReturnBadArgument);
5272 }
5273
5274 count = dispatch->checkScalarOutputCount;
5275 if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount))
5276 {
5277 return (kIOReturnBadArgument);
5278 }
5279
5280 count = dispatch->checkStructureOutputSize;
5281 if ((kIOUCVariableStructureSize != count)
5282 && (count != ((args->structureOutputDescriptor)
5283 ? args->structureOutputDescriptor->getLength() : args->structureOutputSize)))
5284 {
5285 return (kIOReturnBadArgument);
5286 }
5287
5288 if (dispatch->function)
5289 err = (*dispatch->function)(target, reference, args);
5290 else
5291 err = kIOReturnNoCompletion; /* implementator can dispatch */
5292
5293 return (err);
5294 }
5295
5296
5297 // pre-Leopard API's don't do ool structs
5298 if (args->structureInputDescriptor || args->structureOutputDescriptor)
5299 {
5300 err = kIOReturnIPCError;
5301 return (err);
5302 }
5303
5304 structureOutputSize = args->structureOutputSize;
5305
5306 if (args->asyncWakePort)
5307 {
5308 IOExternalAsyncMethod * method;
5309 object = 0;
5310 if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) || !object )
5311 return (kIOReturnUnsupported);
5312
5313 if (kIOUCForegroundOnly & method->flags)
5314 {
5315 if (task_is_gpu_denied(current_task()))
5316 return (kIOReturnNotPermitted);
5317 }
5318
5319 switch (method->flags & kIOUCTypeMask)
5320 {
5321 case kIOUCScalarIStructI:
5322 err = shim_io_async_method_scalarI_structureI( method, object,
5323 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5324 args->scalarInput, args->scalarInputCount,
5325 (char *)args->structureInput, args->structureInputSize );
5326 break;
5327
5328 case kIOUCScalarIScalarO:
5329 err = shim_io_async_method_scalarI_scalarO( method, object,
5330 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5331 args->scalarInput, args->scalarInputCount,
5332 args->scalarOutput, &args->scalarOutputCount );
5333 break;
5334
5335 case kIOUCScalarIStructO:
5336 err = shim_io_async_method_scalarI_structureO( method, object,
5337 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5338 args->scalarInput, args->scalarInputCount,
5339 (char *) args->structureOutput, &args->structureOutputSize );
5340 break;
5341
5342
5343 case kIOUCStructIStructO:
5344 err = shim_io_async_method_structureI_structureO( method, object,
5345 args->asyncWakePort, args->asyncReference, args->asyncReferenceCount,
5346 (char *)args->structureInput, args->structureInputSize,
5347 (char *) args->structureOutput, &args->structureOutputSize );
5348 break;
5349
5350 default:
5351 err = kIOReturnBadArgument;
5352 break;
5353 }
5354 }
5355 else
5356 {
5357 IOExternalMethod * method;
5358 object = 0;
5359 if( !(method = getTargetAndMethodForIndex(&object, selector)) || !object )
5360 return (kIOReturnUnsupported);
5361
5362 if (kIOUCForegroundOnly & method->flags)
5363 {
5364 if (task_is_gpu_denied(current_task()))
5365 return (kIOReturnNotPermitted);
5366 }
5367
5368 switch (method->flags & kIOUCTypeMask)
5369 {
5370 case kIOUCScalarIStructI:
5371 err = shim_io_connect_method_scalarI_structureI( method, object,
5372 args->scalarInput, args->scalarInputCount,
5373 (char *) args->structureInput, args->structureInputSize );
5374 break;
5375
5376 case kIOUCScalarIScalarO:
5377 err = shim_io_connect_method_scalarI_scalarO( method, object,
5378 args->scalarInput, args->scalarInputCount,
5379 args->scalarOutput, &args->scalarOutputCount );
5380 break;
5381
5382 case kIOUCScalarIStructO:
5383 err = shim_io_connect_method_scalarI_structureO( method, object,
5384 args->scalarInput, args->scalarInputCount,
5385 (char *) args->structureOutput, &structureOutputSize );
5386 break;
5387
5388
5389 case kIOUCStructIStructO:
5390 err = shim_io_connect_method_structureI_structureO( method, object,
5391 (char *) args->structureInput, args->structureInputSize,
5392 (char *) args->structureOutput, &structureOutputSize );
5393 break;
5394
5395 default:
5396 err = kIOReturnBadArgument;
5397 break;
5398 }
5399 }
5400
5401 args->structureOutputSize = structureOutputSize;
5402
5403 return (err);
5404 }
5405
5406 #if __LP64__
5407 OSMetaClassDefineReservedUnused(IOUserClient, 0);
5408 OSMetaClassDefineReservedUnused(IOUserClient, 1);
5409 #else
5410 OSMetaClassDefineReservedUsed(IOUserClient, 0);
5411 OSMetaClassDefineReservedUsed(IOUserClient, 1);
5412 #endif
5413 OSMetaClassDefineReservedUnused(IOUserClient, 2);
5414 OSMetaClassDefineReservedUnused(IOUserClient, 3);
5415 OSMetaClassDefineReservedUnused(IOUserClient, 4);
5416 OSMetaClassDefineReservedUnused(IOUserClient, 5);
5417 OSMetaClassDefineReservedUnused(IOUserClient, 6);
5418 OSMetaClassDefineReservedUnused(IOUserClient, 7);
5419 OSMetaClassDefineReservedUnused(IOUserClient, 8);
5420 OSMetaClassDefineReservedUnused(IOUserClient, 9);
5421 OSMetaClassDefineReservedUnused(IOUserClient, 10);
5422 OSMetaClassDefineReservedUnused(IOUserClient, 11);
5423 OSMetaClassDefineReservedUnused(IOUserClient, 12);
5424 OSMetaClassDefineReservedUnused(IOUserClient, 13);
5425 OSMetaClassDefineReservedUnused(IOUserClient, 14);
5426 OSMetaClassDefineReservedUnused(IOUserClient, 15);
5427