2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/IODeviceTreeSupport.h>
44 #include <IOKit/system.h>
45 #include <libkern/OSDebug.h>
47 #include <sys/kauth.h>
48 #include <sys/codesign.h>
55 #include <security/mac_framework.h>
57 #include <sys/kauth.h>
61 #endif /* CONFIG_MACF */
63 #include <IOKit/assert.h>
65 #include "IOServicePrivate.h"
66 #include "IOKitKernelInternal.h"
68 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
69 #define SCALAR32(x) ((uint32_t )x)
70 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
71 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
72 #define REF32(x) ((int)(x))
76 kIOUCAsync0Flags
= 3ULL,
77 kIOUCAsync64Flag
= 1ULL,
78 kIOUCAsyncErrorLoggedFlag
= 2ULL
83 #define IOStatisticsRegisterCounter() \
85 reserved->counter = IOStatistics::registerUserClient(this); \
88 #define IOStatisticsUnregisterCounter() \
91 IOStatistics::unregisterUserClient(reserved->counter); \
94 #define IOStatisticsClientCall() \
96 IOStatistics::countUserClientCall(client); \
101 #define IOStatisticsRegisterCounter()
102 #define IOStatisticsUnregisterCounter()
103 #define IOStatisticsClientCall()
105 #endif /* IOKITSTATS */
107 #if DEVELOPMENT || DEBUG
109 #define FAKE_STACK_FRAME(a) \
110 const void ** __frameptr; \
111 const void * __retaddr; \
112 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
113 __retaddr = __frameptr[1]; \
116 #define FAKE_STACK_FRAME_END() \
117 __frameptr[1] = __retaddr;
119 #else /* DEVELOPMENT || DEBUG */
121 #define FAKE_STACK_FRAME(a)
122 #define FAKE_STACK_FRAME_END()
124 #endif /* DEVELOPMENT || DEBUG */
126 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
130 #include <mach/mach_traps.h>
131 #include <vm/vm_map.h>
135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
137 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
139 class IOMachPort
: public OSObject
141 OSDeclareDefaultStructors(IOMachPort
)
148 static IOMachPort
* portForObject( OSObject
* obj
,
149 ipc_kobject_type_t type
);
150 static bool noMoreSendersForObject( OSObject
* obj
,
151 ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
);
152 static void releasePortForObject( OSObject
* obj
,
153 ipc_kobject_type_t type
);
154 static void setHoldDestroy( OSObject
* obj
, ipc_kobject_type_t type
);
156 static OSDictionary
* dictForType( ipc_kobject_type_t type
);
158 static mach_port_name_t
makeSendRightForTask( task_t task
,
159 io_object_t obj
, ipc_kobject_type_t type
);
161 virtual void free() APPLE_KEXT_OVERRIDE
;
164 #define super OSObject
165 OSDefineMetaClassAndStructors(IOMachPort
, OSObject
)
167 static IOLock
* gIOObjectPortLock
;
169 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
171 // not in dictForType() for debugging ease
172 static OSDictionary
* gIOObjectPorts
;
173 static OSDictionary
* gIOConnectPorts
;
174 static OSDictionary
* gIOIdentifierPorts
;
176 OSDictionary
* IOMachPort::dictForType( ipc_kobject_type_t type
)
178 OSDictionary
** dict
;
182 case IKOT_IOKIT_OBJECT
:
183 dict
= &gIOObjectPorts
;
185 case IKOT_IOKIT_CONNECT
:
186 dict
= &gIOConnectPorts
;
188 case IKOT_IOKIT_IDENT
:
189 dict
= &gIOIdentifierPorts
;
192 panic("dictForType %d", type
);
198 *dict
= OSDictionary::withCapacity( 1 );
203 IOMachPort
* IOMachPort::portForObject ( OSObject
* obj
,
204 ipc_kobject_type_t type
)
206 IOMachPort
* inst
= 0;
209 IOTakeLock( gIOObjectPortLock
);
213 dict
= dictForType( type
);
217 if( (inst
= (IOMachPort
*)
218 dict
->getObject( (const OSSymbol
*) obj
))) {
224 inst
= new IOMachPort
;
225 if( inst
&& !inst
->init()) {
230 inst
->port
= iokit_alloc_object_port( obj
, type
);
233 dict
->setObject( (const OSSymbol
*) obj
, inst
);
243 IOUnlock( gIOObjectPortLock
);
248 bool IOMachPort::noMoreSendersForObject( OSObject
* obj
,
249 ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
)
252 IOMachPort
* machPort
;
254 bool destroyed
= true;
256 IOTakeLock( gIOObjectPortLock
);
258 if( (dict
= dictForType( type
))) {
261 machPort
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
263 destroyed
= (machPort
->mscount
<= *mscount
);
264 if (!destroyed
) *mscount
= machPort
->mscount
;
267 if ((IKOT_IOKIT_CONNECT
== type
) && (uc
= OSDynamicCast(IOUserClient
, obj
)))
271 dict
->removeObject( (const OSSymbol
*) obj
);
277 IOUnlock( gIOObjectPortLock
);
282 void IOMachPort::releasePortForObject( OSObject
* obj
,
283 ipc_kobject_type_t type
)
286 IOMachPort
* machPort
;
288 assert(IKOT_IOKIT_CONNECT
!= type
);
290 IOTakeLock( gIOObjectPortLock
);
292 if( (dict
= dictForType( type
))) {
294 machPort
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
295 if( machPort
&& !machPort
->holdDestroy
)
296 dict
->removeObject( (const OSSymbol
*) obj
);
300 IOUnlock( gIOObjectPortLock
);
303 void IOMachPort::setHoldDestroy( OSObject
* obj
, ipc_kobject_type_t type
)
306 IOMachPort
* machPort
;
308 IOLockLock( gIOObjectPortLock
);
310 if( (dict
= dictForType( type
))) {
311 machPort
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
313 machPort
->holdDestroy
= true;
316 IOLockUnlock( gIOObjectPortLock
);
319 void IOUserClient::destroyUserReferences( OSObject
* obj
)
321 IOMachPort::releasePortForObject( obj
, IKOT_IOKIT_OBJECT
);
324 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
328 IOTakeLock( gIOObjectPortLock
);
331 if( (dict
= IOMachPort::dictForType( IKOT_IOKIT_CONNECT
)))
334 port
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
338 if ((uc
= OSDynamicCast(IOUserClient
, obj
)))
343 dict
->setObject((const OSSymbol
*) uc
->mappings
, port
);
344 iokit_switch_object_port(port
->port
, uc
->mappings
, IKOT_IOKIT_CONNECT
);
346 uc
->mappings
->release();
350 dict
->removeObject( (const OSSymbol
*) obj
);
354 IOUnlock( gIOObjectPortLock
);
357 mach_port_name_t
IOMachPort::makeSendRightForTask( task_t task
,
358 io_object_t obj
, ipc_kobject_type_t type
)
360 return( iokit_make_send_right( task
, obj
, type
));
363 void IOMachPort::free( void )
366 iokit_destroy_object_port( port
);
370 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
372 class IOUserIterator
: public OSIterator
374 OSDeclareDefaultStructors(IOUserIterator
)
376 OSObject
* userIteratorObject
;
379 static IOUserIterator
* withIterator(OSIterator
* iter
);
380 virtual bool init( void ) APPLE_KEXT_OVERRIDE
;
381 virtual void free() APPLE_KEXT_OVERRIDE
;
383 virtual void reset() APPLE_KEXT_OVERRIDE
;
384 virtual bool isValid() APPLE_KEXT_OVERRIDE
;
385 virtual OSObject
* getNextObject() APPLE_KEXT_OVERRIDE
;
386 virtual OSObject
* copyNextObject();
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391 class IOUserNotification
: public IOUserIterator
393 OSDeclareDefaultStructors(IOUserNotification
)
395 #define holdNotify userIteratorObject
399 virtual void free() APPLE_KEXT_OVERRIDE
;
401 virtual void setNotification( IONotifier
* obj
);
403 virtual void reset() APPLE_KEXT_OVERRIDE
;
404 virtual bool isValid() APPLE_KEXT_OVERRIDE
;
407 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
409 OSDefineMetaClassAndStructors( IOUserIterator
, OSIterator
)
412 IOUserIterator::withIterator(OSIterator
* iter
)
416 if (!iter
) return (0);
418 me
= new IOUserIterator
;
419 if (me
&& !me
->init())
425 me
->userIteratorObject
= iter
;
431 IOUserIterator::init( void )
433 if (!OSObject::init()) return (false);
435 lock
= IOLockAlloc();
443 IOUserIterator::free()
445 if (userIteratorObject
) userIteratorObject
->release();
446 if (lock
) IOLockFree(lock
);
451 IOUserIterator::reset()
454 assert(OSDynamicCast(OSIterator
, userIteratorObject
));
455 ((OSIterator
*)userIteratorObject
)->reset();
460 IOUserIterator::isValid()
465 assert(OSDynamicCast(OSIterator
, userIteratorObject
));
466 ret
= ((OSIterator
*)userIteratorObject
)->isValid();
473 IOUserIterator::getNextObject()
480 IOUserIterator::copyNextObject()
482 OSObject
* ret
= NULL
;
485 if (userIteratorObject
) {
486 ret
= ((OSIterator
*)userIteratorObject
)->getNextObject();
487 if (ret
) ret
->retain();
494 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
497 // functions called from osfmk/device/iokit_rpc.c
500 iokit_add_reference( io_object_t obj
, ipc_kobject_type_t type
)
506 if ((IKOT_IOKIT_CONNECT
== type
)
507 && (uc
= OSDynamicCast(IOUserClient
, obj
)))
509 OSIncrementAtomic(&uc
->__ipc
);
516 iokit_remove_reference( io_object_t obj
)
523 iokit_remove_connect_reference( io_object_t obj
)
526 bool finalize
= false;
530 if ((uc
= OSDynamicCast(IOUserClient
, obj
)))
532 if (1 == OSDecrementAtomic(&uc
->__ipc
) && uc
->isInactive())
534 IOLockLock(gIOObjectPortLock
);
535 if ((finalize
= uc
->__ipcFinal
)) uc
->__ipcFinal
= false;
536 IOLockUnlock(gIOObjectPortLock
);
538 if (finalize
) uc
->scheduleFinalize(true);
545 IOUserClient::finalizeUserReferences(OSObject
* obj
)
550 if ((uc
= OSDynamicCast(IOUserClient
, obj
)))
552 IOLockLock(gIOObjectPortLock
);
553 if ((uc
->__ipcFinal
= (0 != uc
->__ipc
))) ok
= false;
554 IOLockUnlock(gIOObjectPortLock
);
560 iokit_port_for_object( io_object_t obj
, ipc_kobject_type_t type
)
562 IOMachPort
* machPort
;
565 if( (machPort
= IOMachPort::portForObject( obj
, type
))) {
567 port
= machPort
->port
;
569 iokit_retain_port( port
);
580 iokit_client_died( io_object_t obj
, ipc_port_t
/* port */,
581 ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
)
583 IOUserClient
* client
;
585 IOUserNotification
* notify
;
587 if( !IOMachPort::noMoreSendersForObject( obj
, type
, mscount
))
588 return( kIOReturnNotReady
);
590 if( IKOT_IOKIT_CONNECT
== type
)
592 if( (client
= OSDynamicCast( IOUserClient
, obj
)))
594 IOStatisticsClientCall();
595 IOLockLock(client
->lock
);
596 client
->clientDied();
597 IOLockUnlock(client
->lock
);
600 else if( IKOT_IOKIT_OBJECT
== type
)
602 if( (map
= OSDynamicCast( IOMemoryMap
, obj
)))
604 else if( (notify
= OSDynamicCast( IOUserNotification
, obj
)))
605 notify
->setNotification( 0 );
608 return( kIOReturnSuccess
);
613 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
615 class IOServiceUserNotification
: public IOUserNotification
617 OSDeclareDefaultStructors(IOServiceUserNotification
)
620 mach_msg_header_t msgHdr
;
621 OSNotificationHeader64 notifyHeader
;
624 enum { kMaxOutstanding
= 1024 };
634 virtual bool init( mach_port_t port
, natural_t type
,
635 void * reference
, vm_size_t referenceSize
,
637 virtual void free() APPLE_KEXT_OVERRIDE
;
638 void invalidatePort(void);
640 static bool _handler( void * target
,
641 void * ref
, IOService
* newService
, IONotifier
* notifier
);
642 virtual bool handler( void * ref
, IOService
* newService
);
644 virtual OSObject
* getNextObject() APPLE_KEXT_OVERRIDE
;
645 virtual OSObject
* copyNextObject() APPLE_KEXT_OVERRIDE
;
648 class IOServiceMessageUserNotification
: public IOUserNotification
650 OSDeclareDefaultStructors(IOServiceMessageUserNotification
)
653 mach_msg_header_t msgHdr
;
654 mach_msg_body_t msgBody
;
655 mach_msg_port_descriptor_t ports
[1];
656 OSNotificationHeader64 notifyHeader
__attribute__ ((packed
));
667 virtual bool init( mach_port_t port
, natural_t type
,
668 void * reference
, vm_size_t referenceSize
,
672 virtual void free() APPLE_KEXT_OVERRIDE
;
673 void invalidatePort(void);
675 static IOReturn
_handler( void * target
, void * ref
,
676 UInt32 messageType
, IOService
* provider
,
677 void * messageArgument
, vm_size_t argSize
);
678 virtual IOReturn
handler( void * ref
,
679 UInt32 messageType
, IOService
* provider
,
680 void * messageArgument
, vm_size_t argSize
);
682 virtual OSObject
* getNextObject() APPLE_KEXT_OVERRIDE
;
683 virtual OSObject
* copyNextObject() APPLE_KEXT_OVERRIDE
;
686 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
689 #define super IOUserIterator
690 OSDefineMetaClass( IOUserNotification
, IOUserIterator
)
691 OSDefineAbstractStructors( IOUserNotification
, IOUserIterator
)
693 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
695 void IOUserNotification::free( void )
699 assert(OSDynamicCast(IONotifier
, holdNotify
));
700 ((IONotifier
*)holdNotify
)->remove();
703 // can't be in handler now
709 void IOUserNotification::setNotification( IONotifier
* notify
)
711 OSObject
* previousNotify
;
713 IOLockLock( gIOObjectPortLock
);
715 previousNotify
= holdNotify
;
718 IOLockUnlock( gIOObjectPortLock
);
722 assert(OSDynamicCast(IONotifier
, previousNotify
));
723 ((IONotifier
*)previousNotify
)->remove();
727 void IOUserNotification::reset()
732 bool IOUserNotification::isValid()
737 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
740 #define super IOUserNotification
741 OSDefineMetaClassAndStructors(IOServiceUserNotification
, IOUserNotification
)
743 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
745 bool IOServiceUserNotification::init( mach_port_t port
, natural_t type
,
746 void * reference
, vm_size_t referenceSize
,
752 newSet
= OSArray::withCapacity( 1 );
756 if (referenceSize
> sizeof(OSAsyncReference64
))
759 msgSize
= sizeof(PingMsg
) - sizeof(OSAsyncReference64
) + referenceSize
;
760 pingMsg
= (PingMsg
*) IOMalloc( msgSize
);
764 bzero( pingMsg
, msgSize
);
766 pingMsg
->msgHdr
.msgh_remote_port
= port
;
767 pingMsg
->msgHdr
.msgh_bits
= MACH_MSGH_BITS(
768 MACH_MSG_TYPE_COPY_SEND
/*remote*/,
769 MACH_MSG_TYPE_MAKE_SEND
/*local*/);
770 pingMsg
->msgHdr
.msgh_size
= msgSize
;
771 pingMsg
->msgHdr
.msgh_id
= kOSNotificationMessageID
;
773 pingMsg
->notifyHeader
.size
= 0;
774 pingMsg
->notifyHeader
.type
= type
;
775 bcopy( reference
, pingMsg
->notifyHeader
.reference
, referenceSize
);
780 void IOServiceUserNotification::invalidatePort(void)
782 if (pingMsg
) pingMsg
->msgHdr
.msgh_remote_port
= MACH_PORT_NULL
;
785 void IOServiceUserNotification::free( void )
797 if( _pingMsg
&& _msgSize
) {
798 if (_pingMsg
->msgHdr
.msgh_remote_port
) {
799 iokit_release_port_send(_pingMsg
->msgHdr
.msgh_remote_port
);
801 IOFree(_pingMsg
, _msgSize
);
808 bool IOServiceUserNotification::_handler( void * target
,
809 void * ref
, IOService
* newService
, IONotifier
* notifier
)
811 return( ((IOServiceUserNotification
*) target
)->handler( ref
, newService
));
814 bool IOServiceUserNotification::handler( void * ref
,
815 IOService
* newService
)
819 ipc_port_t port
= NULL
;
820 bool sendPing
= false;
824 count
= newSet
->getCount();
825 if( count
< kMaxOutstanding
) {
827 newSet
->setObject( newService
);
828 if( (sendPing
= (armed
&& (0 == count
))))
834 if( kIOServiceTerminatedNotificationType
== pingMsg
->notifyHeader
.type
)
835 IOMachPort::setHoldDestroy( newService
, IKOT_IOKIT_OBJECT
);
838 if( (port
= iokit_port_for_object( this, IKOT_IOKIT_OBJECT
) ))
839 pingMsg
->msgHdr
.msgh_local_port
= port
;
841 pingMsg
->msgHdr
.msgh_local_port
= NULL
;
843 kr
= mach_msg_send_from_kernel_with_options( &pingMsg
->msgHdr
,
844 pingMsg
->msgHdr
.msgh_size
,
845 (MACH_SEND_MSG
| MACH_SEND_ALWAYS
| MACH_SEND_IMPORTANCE
),
848 iokit_release_port( port
);
850 if( (KERN_SUCCESS
!= kr
) && !ipcLogged
)
853 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__
, kr
);
859 OSObject
* IOServiceUserNotification::getNextObject()
865 OSObject
* IOServiceUserNotification::copyNextObject()
872 count
= newSet
->getCount();
874 result
= newSet
->getObject( count
- 1 );
876 newSet
->removeObject( count
- 1);
887 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
889 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification
, IOUserNotification
)
891 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
893 bool IOServiceMessageUserNotification::init( mach_port_t port
, natural_t type
,
894 void * reference
, vm_size_t referenceSize
, vm_size_t extraSize
,
900 if (referenceSize
> sizeof(OSAsyncReference64
))
903 clientIs64
= client64
;
905 owningPID
= proc_selfpid();
907 extraSize
+= sizeof(IOServiceInterestContent64
);
908 msgSize
= sizeof(PingMsg
) - sizeof(OSAsyncReference64
) + referenceSize
;
909 pingMsg
= (PingMsg
*) IOMalloc( msgSize
);
913 bzero( pingMsg
, msgSize
);
915 pingMsg
->msgHdr
.msgh_remote_port
= port
;
916 pingMsg
->msgHdr
.msgh_bits
= MACH_MSGH_BITS_COMPLEX
918 MACH_MSG_TYPE_COPY_SEND
/*remote*/,
919 MACH_MSG_TYPE_MAKE_SEND
/*local*/);
920 pingMsg
->msgHdr
.msgh_size
= msgSize
;
921 pingMsg
->msgHdr
.msgh_id
= kOSNotificationMessageID
;
923 pingMsg
->msgBody
.msgh_descriptor_count
= 1;
925 pingMsg
->ports
[0].name
= 0;
926 pingMsg
->ports
[0].disposition
= MACH_MSG_TYPE_MAKE_SEND
;
927 pingMsg
->ports
[0].type
= MACH_MSG_PORT_DESCRIPTOR
;
929 pingMsg
->notifyHeader
.size
= extraSize
;
930 pingMsg
->notifyHeader
.type
= type
;
931 bcopy( reference
, pingMsg
->notifyHeader
.reference
, referenceSize
);
936 void IOServiceMessageUserNotification::invalidatePort(void)
938 if (pingMsg
) pingMsg
->msgHdr
.msgh_remote_port
= MACH_PORT_NULL
;
941 void IOServiceMessageUserNotification::free( void )
951 if( _pingMsg
&& _msgSize
) {
952 if (_pingMsg
->msgHdr
.msgh_remote_port
) {
953 iokit_release_port_send(_pingMsg
->msgHdr
.msgh_remote_port
);
955 IOFree( _pingMsg
, _msgSize
);
959 IOReturn
IOServiceMessageUserNotification::_handler( void * target
, void * ref
,
960 UInt32 messageType
, IOService
* provider
,
961 void * argument
, vm_size_t argSize
)
963 return( ((IOServiceMessageUserNotification
*) target
)->handler(
964 ref
, messageType
, provider
, argument
, argSize
));
967 IOReturn
IOServiceMessageUserNotification::handler( void * ref
,
968 UInt32 messageType
, IOService
* provider
,
969 void * messageArgument
, vm_size_t callerArgSize
)
971 enum { kLocalMsgSize
= 0x100 };
972 uint64_t stackMsg
[kLocalMsgSize
/ sizeof(uint64_t)];
976 vm_size_t thisMsgSize
;
977 ipc_port_t thisPort
, providerPort
;
978 struct PingMsg
* thisMsg
;
979 IOServiceInterestContent64
* data
;
981 if (kIOMessageCopyClientID
== messageType
)
983 *((void **) messageArgument
) = OSNumber::withNumber(owningPID
, 32);
984 return (kIOReturnSuccess
);
987 if (callerArgSize
== 0)
989 if (clientIs64
) argSize
= sizeof(data
->messageArgument
[0]);
990 else argSize
= sizeof(uint32_t);
994 if( callerArgSize
> kIOUserNotifyMaxMessageSize
)
995 callerArgSize
= kIOUserNotifyMaxMessageSize
;
996 argSize
= callerArgSize
;
999 // adjust message size for ipc restrictions
1001 type
= pingMsg
->notifyHeader
.type
;
1002 type
&= ~(kIOKitNoticationMsgSizeMask
<< kIOKitNoticationTypeSizeAdjShift
);
1003 type
|= ((argSize
& kIOKitNoticationMsgSizeMask
) << kIOKitNoticationTypeSizeAdjShift
);
1004 argSize
= (argSize
+ kIOKitNoticationMsgSizeMask
) & ~kIOKitNoticationMsgSizeMask
;
1006 thisMsgSize
= msgSize
1007 + sizeof( IOServiceInterestContent64
)
1008 - sizeof( data
->messageArgument
)
1011 if (thisMsgSize
> sizeof(stackMsg
))
1013 allocMsg
= IOMalloc(thisMsgSize
);
1014 if (!allocMsg
) return (kIOReturnNoMemory
);
1015 thisMsg
= (typeof(thisMsg
)) allocMsg
;
1020 thisMsg
= (typeof(thisMsg
)) stackMsg
;
1023 bcopy(pingMsg
, thisMsg
, msgSize
);
1024 thisMsg
->notifyHeader
.type
= type
;
1025 data
= (IOServiceInterestContent64
*) (((uint8_t *) thisMsg
) + msgSize
);
1026 // == pingMsg->notifyHeader.content;
1027 data
->messageType
= messageType
;
1029 if (callerArgSize
== 0)
1031 data
->messageArgument
[0] = (io_user_reference_t
) messageArgument
;
1034 data
->messageArgument
[0] |= (data
->messageArgument
[0] << 32);
1039 bcopy( messageArgument
, data
->messageArgument
, callerArgSize
);
1040 bzero((void *)(((uintptr_t) &data
->messageArgument
[0]) + callerArgSize
), argSize
- callerArgSize
);
1043 thisMsg
->notifyHeader
.type
= type
;
1044 thisMsg
->msgHdr
.msgh_size
= thisMsgSize
;
1046 providerPort
= iokit_port_for_object( provider
, IKOT_IOKIT_OBJECT
);
1047 thisMsg
->ports
[0].name
= providerPort
;
1048 thisPort
= iokit_port_for_object( this, IKOT_IOKIT_OBJECT
);
1049 thisMsg
->msgHdr
.msgh_local_port
= thisPort
;
1051 kr
= mach_msg_send_from_kernel_with_options( &thisMsg
->msgHdr
,
1052 thisMsg
->msgHdr
.msgh_size
,
1053 (MACH_SEND_MSG
| MACH_SEND_ALWAYS
| MACH_SEND_IMPORTANCE
),
1056 iokit_release_port( thisPort
);
1058 iokit_release_port( providerPort
);
1061 IOFree(allocMsg
, thisMsgSize
);
1063 if((KERN_SUCCESS
!= kr
) && !ipcLogged
)
1066 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__
, kr
);
1069 return( kIOReturnSuccess
);
1072 OSObject
* IOServiceMessageUserNotification::getNextObject()
1077 OSObject
* IOServiceMessageUserNotification::copyNextObject()
1082 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1085 #define super IOService
1086 OSDefineMetaClassAndAbstractStructors( IOUserClient
, IOService
)
1088 IOLock
* gIOUserClientOwnersLock
;
1090 void IOUserClient::initialize( void )
1092 gIOObjectPortLock
= IOLockAlloc();
1093 gIOUserClientOwnersLock
= IOLockAlloc();
1094 assert(gIOObjectPortLock
&& gIOUserClientOwnersLock
);
1097 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef
,
1098 mach_port_t wakePort
,
1099 void *callback
, void *refcon
)
1101 asyncRef
[kIOAsyncReservedIndex
] = ((uintptr_t) wakePort
)
1102 | (kIOUCAsync0Flags
& asyncRef
[kIOAsyncReservedIndex
]);
1103 asyncRef
[kIOAsyncCalloutFuncIndex
] = (uintptr_t) callback
;
1104 asyncRef
[kIOAsyncCalloutRefconIndex
] = (uintptr_t) refcon
;
1107 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef
,
1108 mach_port_t wakePort
,
1109 mach_vm_address_t callback
, io_user_reference_t refcon
)
1111 asyncRef
[kIOAsyncReservedIndex
] = ((io_user_reference_t
) wakePort
)
1112 | (kIOUCAsync0Flags
& asyncRef
[kIOAsyncReservedIndex
]);
1113 asyncRef
[kIOAsyncCalloutFuncIndex
] = (io_user_reference_t
) callback
;
1114 asyncRef
[kIOAsyncCalloutRefconIndex
] = refcon
;
1117 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef
,
1118 mach_port_t wakePort
,
1119 mach_vm_address_t callback
, io_user_reference_t refcon
, task_t task
)
1121 setAsyncReference64(asyncRef
, wakePort
, callback
, refcon
);
1122 if (vm_map_is_64bit(get_task_map(task
))) {
1123 asyncRef
[kIOAsyncReservedIndex
] |= kIOUCAsync64Flag
;
1127 static OSDictionary
* CopyConsoleUser(UInt32 uid
)
1130 OSDictionary
* user
= 0;
1132 if ((array
= OSDynamicCast(OSArray
,
1133 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey
))))
1135 for (unsigned int idx
= 0;
1136 (user
= OSDynamicCast(OSDictionary
, array
->getObject(idx
)));
1140 if ((num
= OSDynamicCast(OSNumber
, user
->getObject(gIOConsoleSessionUIDKey
)))
1141 && (uid
== num
->unsigned32BitValue())) {
1151 static OSDictionary
* CopyUserOnConsole(void)
1154 OSDictionary
* user
= 0;
1156 if ((array
= OSDynamicCast(OSArray
,
1157 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey
))))
1159 for (unsigned int idx
= 0;
1160 (user
= OSDynamicCast(OSDictionary
, array
->getObject(idx
)));
1163 if (kOSBooleanTrue
== user
->getObject(gIOConsoleSessionOnConsoleKey
))
1174 IOReturn
IOUserClient::clientHasAuthorization( task_t task
,
1175 IOService
* service
)
1179 p
= (proc_t
) get_bsdtask_info(task
);
1182 uint64_t authorizationID
;
1184 authorizationID
= proc_uniqueid(p
);
1185 if (authorizationID
)
1187 if (service
->getAuthorizationID() == authorizationID
)
1189 return (kIOReturnSuccess
);
1194 return (kIOReturnNotPermitted
);
1197 IOReturn
IOUserClient::clientHasPrivilege( void * securityToken
,
1198 const char * privilegeName
)
1201 security_token_t token
;
1202 mach_msg_type_number_t count
;
1204 OSDictionary
* user
;
1208 if (!strncmp(privilegeName
, kIOClientPrivilegeForeground
,
1209 sizeof(kIOClientPrivilegeForeground
)))
1211 if (task_is_gpu_denied(current_task()))
1212 return (kIOReturnNotPrivileged
);
1214 return (kIOReturnSuccess
);
1217 if (!strncmp(privilegeName
, kIOClientPrivilegeConsoleSession
,
1218 sizeof(kIOClientPrivilegeConsoleSession
)))
1223 task
= (task_t
) securityToken
;
1225 task
= current_task();
1226 p
= (proc_t
) get_bsdtask_info(task
);
1227 kr
= kIOReturnNotPrivileged
;
1229 if (p
&& (cred
= kauth_cred_proc_ref(p
)))
1231 user
= CopyUserOnConsole();
1235 if ((num
= OSDynamicCast(OSNumber
, user
->getObject(gIOConsoleSessionAuditIDKey
)))
1236 && (cred
->cr_audit
.as_aia_p
->ai_asid
== (au_asid_t
) num
->unsigned32BitValue()))
1238 kr
= kIOReturnSuccess
;
1242 kauth_cred_unref(&cred
);
1247 if ((secureConsole
= !strncmp(privilegeName
, kIOClientPrivilegeSecureConsoleProcess
,
1248 sizeof(kIOClientPrivilegeSecureConsoleProcess
))))
1249 task
= (task_t
)((IOUCProcessToken
*)securityToken
)->token
;
1251 task
= (task_t
)securityToken
;
1253 count
= TASK_SECURITY_TOKEN_COUNT
;
1254 kr
= task_info( task
, TASK_SECURITY_TOKEN
, (task_info_t
) &token
, &count
);
1256 if (KERN_SUCCESS
!= kr
)
1258 else if (!strncmp(privilegeName
, kIOClientPrivilegeAdministrator
,
1259 sizeof(kIOClientPrivilegeAdministrator
))) {
1260 if (0 != token
.val
[0])
1261 kr
= kIOReturnNotPrivileged
;
1262 } else if (!strncmp(privilegeName
, kIOClientPrivilegeLocalUser
,
1263 sizeof(kIOClientPrivilegeLocalUser
))) {
1264 user
= CopyConsoleUser(token
.val
[0]);
1268 kr
= kIOReturnNotPrivileged
;
1269 } else if (secureConsole
|| !strncmp(privilegeName
, kIOClientPrivilegeConsoleUser
,
1270 sizeof(kIOClientPrivilegeConsoleUser
))) {
1271 user
= CopyConsoleUser(token
.val
[0]);
1273 if (user
->getObject(gIOConsoleSessionOnConsoleKey
) != kOSBooleanTrue
)
1274 kr
= kIOReturnNotPrivileged
;
1275 else if ( secureConsole
) {
1276 OSNumber
* pid
= OSDynamicCast(OSNumber
, user
->getObject(gIOConsoleSessionSecureInputPIDKey
));
1277 if ( pid
&& pid
->unsigned32BitValue() != ((IOUCProcessToken
*)securityToken
)->pid
)
1278 kr
= kIOReturnNotPrivileged
;
1283 kr
= kIOReturnNotPrivileged
;
1285 kr
= kIOReturnUnsupported
;
1290 OSObject
* IOUserClient::copyClientEntitlement( task_t task
,
1291 const char * entitlement
)
1293 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1297 char procname
[MAXCOMLEN
+ 1] = "";
1299 void *entitlements_blob
= NULL
;
1300 char *entitlements_data
= NULL
;
1301 OSObject
*entitlements_obj
= NULL
;
1302 OSDictionary
*entitlements
= NULL
;
1303 OSString
*errorString
= NULL
;
1304 OSObject
*value
= NULL
;
1306 p
= (proc_t
)get_bsdtask_info(task
);
1310 proc_name(pid
, procname
, (int)sizeof(procname
));
1312 if (cs_entitlements_blob_get(p
, &entitlements_blob
, &len
) != 0)
1315 if (len
<= offsetof(CS_GenericBlob
, data
))
1319 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1320 * we'll try to parse in the kernel.
1322 len
-= offsetof(CS_GenericBlob
, data
);
1323 if (len
> MAX_ENTITLEMENTS_LEN
) {
1324 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname
, pid
, len
, MAX_ENTITLEMENTS_LEN
);
1329 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1330 * what is stored in the entitlements blob. Copy the string and
1333 entitlements_data
= (char *)IOMalloc(len
+ 1);
1334 if (entitlements_data
== NULL
)
1336 memcpy(entitlements_data
, ((CS_GenericBlob
*)entitlements_blob
)->data
, len
);
1337 entitlements_data
[len
] = '\0';
1339 entitlements_obj
= OSUnserializeXML(entitlements_data
, len
+ 1, &errorString
);
1340 if (errorString
!= NULL
) {
1341 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname
, pid
, errorString
->getCStringNoCopy());
1344 if (entitlements_obj
== NULL
)
1347 entitlements
= OSDynamicCast(OSDictionary
, entitlements_obj
);
1348 if (entitlements
== NULL
)
1351 /* Fetch the entitlement value from the dictionary. */
1352 value
= entitlements
->getObject(entitlement
);
1357 if (entitlements_data
!= NULL
)
1358 IOFree(entitlements_data
, len
+ 1);
1359 if (entitlements_obj
!= NULL
)
1360 entitlements_obj
->release();
1361 if (errorString
!= NULL
)
1362 errorString
->release();
1366 bool IOUserClient::init()
1368 if (getPropertyTable() || super::init())
1374 bool IOUserClient::init(OSDictionary
* dictionary
)
1376 if (getPropertyTable() || super::init(dictionary
))
1382 bool IOUserClient::initWithTask(task_t owningTask
,
1386 if (getPropertyTable() || super::init())
1392 bool IOUserClient::initWithTask(task_t owningTask
,
1395 OSDictionary
* properties
)
1399 ok
= super::init( properties
);
1400 ok
&= initWithTask( owningTask
, securityID
, type
);
1405 bool IOUserClient::reserve()
1408 reserved
= IONew(ExpansionData
, 1);
1413 setTerminateDefer(NULL
, true);
1414 IOStatisticsRegisterCounter();
1419 struct IOUserClientOwner
1422 queue_chain_t taskLink
;
1424 queue_chain_t ucLink
;
1428 IOUserClient::registerOwner(task_t task
)
1430 IOUserClientOwner
* owner
;
1434 IOLockLock(gIOUserClientOwnersLock
);
1437 ret
= kIOReturnSuccess
;
1439 if (!owners
.next
) queue_init(&owners
);
1442 queue_iterate(&owners
, owner
, IOUserClientOwner
*, ucLink
)
1444 if (task
!= owner
->task
) continue;
1451 owner
= IONew(IOUserClientOwner
, 1);
1452 if (!owner
) ret
= kIOReturnNoMemory
;
1457 queue_enter_first(&owners
, owner
, IOUserClientOwner
*, ucLink
);
1458 queue_enter_first(task_io_user_clients(task
), owner
, IOUserClientOwner
*, taskLink
);
1462 IOLockUnlock(gIOUserClientOwnersLock
);
1468 IOUserClient::noMoreSenders(void)
1470 IOUserClientOwner
* owner
;
1472 IOLockLock(gIOUserClientOwnersLock
);
1476 while (!queue_empty(&owners
))
1478 owner
= (IOUserClientOwner
*)(void *) queue_first(&owners
);
1479 queue_remove(task_io_user_clients(owner
->task
), owner
, IOUserClientOwner
*, taskLink
);
1480 queue_remove(&owners
, owner
, IOUserClientOwner
*, ucLink
);
1481 IODelete(owner
, IOUserClientOwner
, 1);
1483 owners
.next
= owners
.prev
= NULL
;
1486 IOLockUnlock(gIOUserClientOwnersLock
);
1489 extern "C" kern_return_t
1490 iokit_task_terminate(task_t task
)
1492 IOUserClientOwner
* owner
;
1493 IOUserClient
* dead
;
1495 queue_head_t
* taskque
;
1497 IOLockLock(gIOUserClientOwnersLock
);
1499 taskque
= task_io_user_clients(task
);
1501 while (!queue_empty(taskque
))
1503 owner
= (IOUserClientOwner
*)(void *) queue_first(taskque
);
1505 queue_remove(taskque
, owner
, IOUserClientOwner
*, taskLink
);
1506 queue_remove(&uc
->owners
, owner
, IOUserClientOwner
*, ucLink
);
1507 if (queue_empty(&uc
->owners
))
1510 IOLog("destroying out of band connect for %s\n", uc
->getName());
1511 // now using the uc queue head as a singly linked queue,
1512 // leaving .next as NULL to mark it empty
1513 uc
->owners
.next
= NULL
;
1514 uc
->owners
.prev
= (queue_entry_t
) dead
;
1517 IODelete(owner
, IOUserClientOwner
, 1);
1520 IOLockUnlock(gIOUserClientOwnersLock
);
1525 dead
= (IOUserClient
*)(void *) dead
->owners
.prev
;
1526 uc
->owners
.prev
= NULL
;
1527 if (uc
->sharedInstance
|| !uc
->closed
) uc
->clientDied();
1531 return (KERN_SUCCESS
);
1534 void IOUserClient::free()
1536 if( mappings
) mappings
->release();
1537 if (lock
) IOLockFree(lock
);
1539 IOStatisticsUnregisterCounter();
1541 assert(!owners
.next
);
1542 assert(!owners
.prev
);
1544 if (reserved
) IODelete(reserved
, ExpansionData
, 1);
1549 IOReturn
IOUserClient::clientDied( void )
1551 IOReturn ret
= kIOReturnNotReady
;
1553 if (sharedInstance
|| OSCompareAndSwap8(0, 1, &closed
))
1555 ret
= clientClose();
1561 IOReturn
IOUserClient::clientClose( void )
1563 return( kIOReturnUnsupported
);
1566 IOService
* IOUserClient::getService( void )
1571 IOReturn
IOUserClient::registerNotificationPort(
1572 mach_port_t
/* port */,
1574 UInt32
/* refCon */)
1576 return( kIOReturnUnsupported
);
1579 IOReturn
IOUserClient::registerNotificationPort(
1582 io_user_reference_t refCon
)
1584 return (registerNotificationPort(port
, type
, (UInt32
) refCon
));
1587 IOReturn
IOUserClient::getNotificationSemaphore( UInt32 notification_type
,
1588 semaphore_t
* semaphore
)
1590 return( kIOReturnUnsupported
);
1593 IOReturn
IOUserClient::connectClient( IOUserClient
* /* client */ )
1595 return( kIOReturnUnsupported
);
1598 IOReturn
IOUserClient::clientMemoryForType( UInt32 type
,
1599 IOOptionBits
* options
,
1600 IOMemoryDescriptor
** memory
)
1602 return( kIOReturnUnsupported
);
1606 IOMemoryMap
* IOUserClient::mapClientMemory(
1609 IOOptionBits mapFlags
,
1610 IOVirtualAddress atAddress
)
1616 IOMemoryMap
* IOUserClient::mapClientMemory64(
1619 IOOptionBits mapFlags
,
1620 mach_vm_address_t atAddress
)
1623 IOOptionBits options
= 0;
1624 IOMemoryDescriptor
* memory
= 0;
1625 IOMemoryMap
* map
= 0;
1627 err
= clientMemoryForType( (UInt32
) type
, &options
, &memory
);
1629 if( memory
&& (kIOReturnSuccess
== err
)) {
1631 FAKE_STACK_FRAME(getMetaClass());
1633 options
= (options
& ~kIOMapUserOptionsMask
)
1634 | (mapFlags
& kIOMapUserOptionsMask
);
1635 map
= memory
->createMappingInTask( task
, atAddress
, options
);
1638 FAKE_STACK_FRAME_END();
1644 IOReturn
IOUserClient::exportObjectToClient(task_t task
,
1645 OSObject
*obj
, io_object_t
*clientObj
)
1647 mach_port_name_t name
;
1649 name
= IOMachPort::makeSendRightForTask( task
, obj
, IKOT_IOKIT_OBJECT
);
1651 *(mach_port_name_t
*)clientObj
= name
;
1653 if (obj
) obj
->release();
1655 return kIOReturnSuccess
;
1658 IOReturn
IOUserClient::copyPortNameForObjectInTask(task_t task
,
1659 OSObject
*obj
, mach_port_name_t
* port_name
)
1661 mach_port_name_t name
;
1663 name
= IOMachPort::makeSendRightForTask( task
, obj
, IKOT_IOKIT_IDENT
);
1665 *(mach_port_name_t
*) port_name
= name
;
1667 return kIOReturnSuccess
;
1670 IOReturn
IOUserClient::copyObjectForPortNameInTask(task_t task
, mach_port_name_t port_name
,
1675 object
= iokit_lookup_object_with_port_name(port_name
, IKOT_IOKIT_IDENT
, task
);
1679 return (object
? kIOReturnSuccess
: kIOReturnIPCError
);
1682 IOReturn
IOUserClient::adjustPortNameReferencesInTask(task_t task
, mach_port_name_t port_name
, mach_port_delta_t delta
)
1684 return (iokit_mod_send_right(task
, port_name
, delta
));
1687 IOExternalMethod
* IOUserClient::getExternalMethodForIndex( UInt32
/* index */)
1692 IOExternalAsyncMethod
* IOUserClient::getExternalAsyncMethodForIndex( UInt32
/* index */)
1697 IOExternalTrap
* IOUserClient::
1698 getExternalTrapForIndex(UInt32 index
)
1703 #pragma clang diagnostic push
1704 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1706 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1707 // functions can break clients of kexts implementing getExternalMethodForIndex()
1708 IOExternalMethod
* IOUserClient::
1709 getTargetAndMethodForIndex(IOService
**targetP
, UInt32 index
)
1711 IOExternalMethod
*method
= getExternalMethodForIndex(index
);
1714 *targetP
= (IOService
*) method
->object
;
1719 IOExternalAsyncMethod
* IOUserClient::
1720 getAsyncTargetAndMethodForIndex(IOService
** targetP
, UInt32 index
)
1722 IOExternalAsyncMethod
*method
= getExternalAsyncMethodForIndex(index
);
1725 *targetP
= (IOService
*) method
->object
;
1730 IOExternalTrap
* IOUserClient::
1731 getTargetAndTrapForIndex(IOService
** targetP
, UInt32 index
)
1733 IOExternalTrap
*trap
= getExternalTrapForIndex(index
);
1736 *targetP
= trap
->object
;
1741 #pragma clang diagnostic pop
1743 IOReturn
IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference
)
1746 port
= (mach_port_t
) (reference
[0] & ~kIOUCAsync0Flags
);
1748 if (MACH_PORT_NULL
!= port
)
1749 iokit_release_port_send(port
);
1751 return (kIOReturnSuccess
);
1754 IOReturn
IOUserClient::releaseNotificationPort(mach_port_t port
)
1756 if (MACH_PORT_NULL
!= port
)
1757 iokit_release_port_send(port
);
1759 return (kIOReturnSuccess
);
1762 IOReturn
IOUserClient::sendAsyncResult(OSAsyncReference reference
,
1763 IOReturn result
, void *args
[], UInt32 numArgs
)
1765 OSAsyncReference64 reference64
;
1766 io_user_reference_t args64
[kMaxAsyncArgs
];
1769 if (numArgs
> kMaxAsyncArgs
)
1770 return kIOReturnMessageTooLarge
;
1772 for (idx
= 0; idx
< kOSAsyncRef64Count
; idx
++)
1773 reference64
[idx
] = REF64(reference
[idx
]);
1775 for (idx
= 0; idx
< numArgs
; idx
++)
1776 args64
[idx
] = REF64(args
[idx
]);
1778 return (sendAsyncResult64(reference64
, result
, args64
, numArgs
));
1781 IOReturn
IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference
,
1782 IOReturn result
, io_user_reference_t args
[], UInt32 numArgs
, IOOptionBits options
)
1784 return _sendAsyncResult64(reference
, result
, args
, numArgs
, options
);
1787 IOReturn
IOUserClient::sendAsyncResult64(OSAsyncReference64 reference
,
1788 IOReturn result
, io_user_reference_t args
[], UInt32 numArgs
)
1790 return _sendAsyncResult64(reference
, result
, args
, numArgs
, 0);
1793 IOReturn
IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference
,
1794 IOReturn result
, io_user_reference_t args
[], UInt32 numArgs
, IOOptionBits options
)
1798 mach_msg_header_t msgHdr
;
1803 OSNotificationHeader notifyHdr
;
1804 IOAsyncCompletionContent asyncContent
;
1805 uint32_t args
[kMaxAsyncArgs
];
1809 OSNotificationHeader64 notifyHdr
;
1810 IOAsyncCompletionContent asyncContent
;
1811 io_user_reference_t args
[kMaxAsyncArgs
] __attribute__ ((packed
));
1816 mach_port_t replyPort
;
1819 // If no reply port, do nothing.
1820 replyPort
= (mach_port_t
) (reference
[0] & ~kIOUCAsync0Flags
);
1821 if (replyPort
== MACH_PORT_NULL
)
1822 return kIOReturnSuccess
;
1824 if (numArgs
> kMaxAsyncArgs
)
1825 return kIOReturnMessageTooLarge
;
1827 bzero(&replyMsg
, sizeof(replyMsg
));
1828 replyMsg
.msgHdr
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
/*remote*/,
1830 replyMsg
.msgHdr
.msgh_remote_port
= replyPort
;
1831 replyMsg
.msgHdr
.msgh_local_port
= 0;
1832 replyMsg
.msgHdr
.msgh_id
= kOSNotificationMessageID
;
1833 if (kIOUCAsync64Flag
& reference
[0])
1835 replyMsg
.msgHdr
.msgh_size
=
1836 sizeof(replyMsg
.msgHdr
) + sizeof(replyMsg
.m
.msg64
)
1837 - (kMaxAsyncArgs
- numArgs
) * sizeof(io_user_reference_t
);
1838 replyMsg
.m
.msg64
.notifyHdr
.size
= sizeof(IOAsyncCompletionContent
)
1839 + numArgs
* sizeof(io_user_reference_t
);
1840 replyMsg
.m
.msg64
.notifyHdr
.type
= kIOAsyncCompletionNotificationType
;
1841 bcopy(reference
, replyMsg
.m
.msg64
.notifyHdr
.reference
, sizeof(OSAsyncReference64
));
1843 replyMsg
.m
.msg64
.asyncContent
.result
= result
;
1845 bcopy(args
, replyMsg
.m
.msg64
.args
, numArgs
* sizeof(io_user_reference_t
));
1851 replyMsg
.msgHdr
.msgh_size
=
1852 sizeof(replyMsg
.msgHdr
) + sizeof(replyMsg
.m
.msg32
)
1853 - (kMaxAsyncArgs
- numArgs
) * sizeof(uint32_t);
1855 replyMsg
.m
.msg32
.notifyHdr
.size
= sizeof(IOAsyncCompletionContent
)
1856 + numArgs
* sizeof(uint32_t);
1857 replyMsg
.m
.msg32
.notifyHdr
.type
= kIOAsyncCompletionNotificationType
;
1859 for (idx
= 0; idx
< kOSAsyncRefCount
; idx
++)
1860 replyMsg
.m
.msg32
.notifyHdr
.reference
[idx
] = REF32(reference
[idx
]);
1862 replyMsg
.m
.msg32
.asyncContent
.result
= result
;
1864 for (idx
= 0; idx
< numArgs
; idx
++)
1865 replyMsg
.m
.msg32
.args
[idx
] = REF32(args
[idx
]);
1868 if ((options
& kIOUserNotifyOptionCanDrop
) != 0) {
1869 kr
= mach_msg_send_from_kernel_with_options( &replyMsg
.msgHdr
,
1870 replyMsg
.msgHdr
.msgh_size
, MACH_SEND_TIMEOUT
, MACH_MSG_TIMEOUT_NONE
);
1872 /* Fail on full queue. */
1873 kr
= mach_msg_send_from_kernel_proper( &replyMsg
.msgHdr
,
1874 replyMsg
.msgHdr
.msgh_size
);
1876 if ((KERN_SUCCESS
!= kr
) && (MACH_SEND_TIMED_OUT
!= kr
) && !(kIOUCAsyncErrorLoggedFlag
& reference
[0]))
1878 reference
[0] |= kIOUCAsyncErrorLoggedFlag
;
1879 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__
, kr
);
1885 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1889 #define CHECK(cls,obj,out) \
1891 if( !(out = OSDynamicCast( cls, obj))) \
1892 return( kIOReturnBadArgument )
1894 #define CHECKLOCKED(cls,obj,out) \
1895 IOUserIterator * oIter; \
1897 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1898 return (kIOReturnBadArgument); \
1899 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1900 return (kIOReturnBadArgument)
1902 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1904 // Create a vm_map_copy_t or kalloc'ed data for memory
1905 // to be copied out. ipc will free after the copyout.
1907 static kern_return_t
copyoutkdata( const void * data
, vm_size_t len
,
1908 io_buf_ptr_t
* buf
)
1913 err
= vm_map_copyin( kernel_map
, CAST_USER_ADDR_T(data
), len
,
1914 false /* src_destroy */, ©
);
1916 assert( err
== KERN_SUCCESS
);
1917 if( err
== KERN_SUCCESS
)
1918 *buf
= (char *) copy
;
1923 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1925 /* Routine io_server_version */
1926 kern_return_t
is_io_server_version(
1927 mach_port_t master_port
,
1930 *version
= IOKIT_SERVER_VERSION
;
1931 return (kIOReturnSuccess
);
1934 /* Routine io_object_get_class */
1935 kern_return_t
is_io_object_get_class(
1937 io_name_t className
)
1939 const OSMetaClass
* my_obj
= NULL
;
1942 return( kIOReturnBadArgument
);
1944 my_obj
= object
->getMetaClass();
1946 return (kIOReturnNotFound
);
1949 strlcpy( className
, my_obj
->getClassName(), sizeof(io_name_t
));
1951 return( kIOReturnSuccess
);
1954 /* Routine io_object_get_superclass */
1955 kern_return_t
is_io_object_get_superclass(
1956 mach_port_t master_port
,
1958 io_name_t class_name
)
1961 const OSMetaClass
* meta
;
1962 const OSMetaClass
* super
;
1963 const OSSymbol
* name
;
1966 if (!obj_name
|| !class_name
) return (kIOReturnBadArgument
);
1967 if (master_port
!= master_device_port
) return( kIOReturnNotPrivileged
);
1969 ret
= kIOReturnNotFound
;
1973 name
= OSSymbol::withCString(obj_name
);
1975 meta
= OSMetaClass::copyMetaClassWithName(name
);
1977 super
= meta
->getSuperClass();
1979 cstr
= super
->getClassName();
1981 strlcpy(class_name
, cstr
, sizeof(io_name_t
));
1982 ret
= kIOReturnSuccess
;
1986 OSSafeReleaseNULL(name
);
1987 if (meta
) meta
->releaseMetaClass();
1992 /* Routine io_object_get_bundle_identifier */
1993 kern_return_t
is_io_object_get_bundle_identifier(
1994 mach_port_t master_port
,
1996 io_name_t bundle_name
)
1999 const OSMetaClass
* meta
;
2000 const OSSymbol
* name
;
2001 const OSSymbol
* identifier
;
2004 if (!obj_name
|| !bundle_name
) return (kIOReturnBadArgument
);
2005 if (master_port
!= master_device_port
) return( kIOReturnNotPrivileged
);
2007 ret
= kIOReturnNotFound
;
2011 name
= OSSymbol::withCString(obj_name
);
2013 meta
= OSMetaClass::copyMetaClassWithName(name
);
2015 identifier
= meta
->getKmodName();
2016 if (!identifier
) break;
2017 cstr
= identifier
->getCStringNoCopy();
2019 strlcpy(bundle_name
, identifier
->getCStringNoCopy(), sizeof(io_name_t
));
2020 ret
= kIOReturnSuccess
;
2024 OSSafeReleaseNULL(name
);
2025 if (meta
) meta
->releaseMetaClass();
2030 /* Routine io_object_conforms_to */
2031 kern_return_t
is_io_object_conforms_to(
2033 io_name_t className
,
2034 boolean_t
*conforms
)
2037 return( kIOReturnBadArgument
);
2039 *conforms
= (0 != object
->metaCast( className
));
2041 return( kIOReturnSuccess
);
2044 /* Routine io_object_get_retain_count */
2045 kern_return_t
is_io_object_get_retain_count(
2047 uint32_t *retainCount
)
2050 return( kIOReturnBadArgument
);
2052 *retainCount
= object
->getRetainCount();
2053 return( kIOReturnSuccess
);
2056 /* Routine io_iterator_next */
2057 kern_return_t
is_io_iterator_next(
2058 io_object_t iterator
,
2059 io_object_t
*object
)
2064 IOUserIterator
* uiter
;
2066 if ((uiter
= OSDynamicCast(IOUserIterator
, iterator
)))
2068 obj
= uiter
->copyNextObject();
2070 else if ((iter
= OSDynamicCast(OSIterator
, iterator
)))
2072 obj
= iter
->getNextObject();
2073 if (obj
) obj
->retain();
2077 return( kIOReturnBadArgument
);
2082 ret
= kIOReturnSuccess
;
2084 ret
= kIOReturnNoDevice
;
2089 /* Routine io_iterator_reset */
2090 kern_return_t
is_io_iterator_reset(
2091 io_object_t iterator
)
2093 CHECK( OSIterator
, iterator
, iter
);
2097 return( kIOReturnSuccess
);
2100 /* Routine io_iterator_is_valid */
2101 kern_return_t
is_io_iterator_is_valid(
2102 io_object_t iterator
,
2103 boolean_t
*is_valid
)
2105 CHECK( OSIterator
, iterator
, iter
);
2107 *is_valid
= iter
->isValid();
2109 return( kIOReturnSuccess
);
2113 static kern_return_t
internal_io_service_match_property_table(
2114 io_service_t _service
,
2115 const char * matching
,
2116 mach_msg_type_number_t matching_size
,
2119 CHECK( IOService
, _service
, service
);
2123 OSDictionary
* dict
;
2125 assert(matching_size
);
2126 obj
= OSUnserializeXML(matching
, matching_size
);
2128 if( (dict
= OSDynamicCast( OSDictionary
, obj
))) {
2129 *matches
= service
->passiveMatch( dict
);
2130 kr
= kIOReturnSuccess
;
2132 kr
= kIOReturnBadArgument
;
2140 /* Routine io_service_match_property_table */
2141 kern_return_t
is_io_service_match_property_table(
2142 io_service_t service
,
2143 io_string_t matching
,
2144 boolean_t
*matches
)
2146 return (kIOReturnUnsupported
);
2150 /* Routine io_service_match_property_table_ool */
2151 kern_return_t
is_io_service_match_property_table_ool(
2152 io_object_t service
,
2153 io_buf_ptr_t matching
,
2154 mach_msg_type_number_t matchingCnt
,
2155 kern_return_t
*result
,
2156 boolean_t
*matches
)
2160 vm_map_offset_t map_data
;
2162 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2163 data
= CAST_DOWN(vm_offset_t
, map_data
);
2165 if( KERN_SUCCESS
== kr
) {
2166 // must return success after vm_map_copyout() succeeds
2167 *result
= internal_io_service_match_property_table(service
,
2168 (const char *)data
, matchingCnt
, matches
);
2169 vm_deallocate( kernel_map
, data
, matchingCnt
);
2175 /* Routine io_service_match_property_table_bin */
2176 kern_return_t
is_io_service_match_property_table_bin(
2177 io_object_t service
,
2178 io_struct_inband_t matching
,
2179 mach_msg_type_number_t matchingCnt
,
2182 return (internal_io_service_match_property_table(service
, matching
, matchingCnt
, matches
));
2185 static kern_return_t
internal_io_service_get_matching_services(
2186 mach_port_t master_port
,
2187 const char * matching
,
2188 mach_msg_type_number_t matching_size
,
2189 io_iterator_t
*existing
)
2193 OSDictionary
* dict
;
2195 if( master_port
!= master_device_port
)
2196 return( kIOReturnNotPrivileged
);
2198 assert(matching_size
);
2199 obj
= OSUnserializeXML(matching
, matching_size
);
2201 if( (dict
= OSDynamicCast( OSDictionary
, obj
))) {
2202 *existing
= IOUserIterator::withIterator(IOService::getMatchingServices( dict
));
2203 kr
= kIOReturnSuccess
;
2205 kr
= kIOReturnBadArgument
;
2213 /* Routine io_service_get_matching_services */
2214 kern_return_t
is_io_service_get_matching_services(
2215 mach_port_t master_port
,
2216 io_string_t matching
,
2217 io_iterator_t
*existing
)
2219 return (kIOReturnUnsupported
);
2222 /* Routine io_service_get_matching_services_ool */
2223 kern_return_t
is_io_service_get_matching_services_ool(
2224 mach_port_t master_port
,
2225 io_buf_ptr_t matching
,
2226 mach_msg_type_number_t matchingCnt
,
2227 kern_return_t
*result
,
2228 io_object_t
*existing
)
2232 vm_map_offset_t map_data
;
2234 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2235 data
= CAST_DOWN(vm_offset_t
, map_data
);
2237 if( KERN_SUCCESS
== kr
) {
2238 // must return success after vm_map_copyout() succeeds
2239 // and mig will copy out objects on success
2241 *result
= internal_io_service_get_matching_services(master_port
,
2242 (const char *) data
, matchingCnt
, existing
);
2243 vm_deallocate( kernel_map
, data
, matchingCnt
);
2249 /* Routine io_service_get_matching_services_bin */
2250 kern_return_t
is_io_service_get_matching_services_bin(
2251 mach_port_t master_port
,
2252 io_struct_inband_t matching
,
2253 mach_msg_type_number_t matchingCnt
,
2254 io_object_t
*existing
)
2256 return (internal_io_service_get_matching_services(master_port
, matching
, matchingCnt
, existing
));
2260 static kern_return_t
internal_io_service_get_matching_service(
2261 mach_port_t master_port
,
2262 const char * matching
,
2263 mach_msg_type_number_t matching_size
,
2264 io_service_t
*service
)
2268 OSDictionary
* dict
;
2270 if( master_port
!= master_device_port
)
2271 return( kIOReturnNotPrivileged
);
2273 assert(matching_size
);
2274 obj
= OSUnserializeXML(matching
, matching_size
);
2276 if( (dict
= OSDynamicCast( OSDictionary
, obj
))) {
2277 *service
= IOService::copyMatchingService( dict
);
2278 kr
= *service
? kIOReturnSuccess
: kIOReturnNotFound
;
2280 kr
= kIOReturnBadArgument
;
2288 /* Routine io_service_get_matching_service */
2289 kern_return_t
is_io_service_get_matching_service(
2290 mach_port_t master_port
,
2291 io_string_t matching
,
2292 io_service_t
*service
)
2294 return (kIOReturnUnsupported
);
2297 /* Routine io_service_get_matching_services_ool */
2298 kern_return_t
is_io_service_get_matching_service_ool(
2299 mach_port_t master_port
,
2300 io_buf_ptr_t matching
,
2301 mach_msg_type_number_t matchingCnt
,
2302 kern_return_t
*result
,
2303 io_object_t
*service
)
2307 vm_map_offset_t map_data
;
2309 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2310 data
= CAST_DOWN(vm_offset_t
, map_data
);
2312 if( KERN_SUCCESS
== kr
) {
2313 // must return success after vm_map_copyout() succeeds
2314 // and mig will copy out objects on success
2316 *result
= internal_io_service_get_matching_service(master_port
,
2317 (const char *) data
, matchingCnt
, service
);
2318 vm_deallocate( kernel_map
, data
, matchingCnt
);
2324 /* Routine io_service_get_matching_service_bin */
2325 kern_return_t
is_io_service_get_matching_service_bin(
2326 mach_port_t master_port
,
2327 io_struct_inband_t matching
,
2328 mach_msg_type_number_t matchingCnt
,
2329 io_object_t
*service
)
2331 return (internal_io_service_get_matching_service(master_port
, matching
, matchingCnt
, service
));
2334 static kern_return_t
internal_io_service_add_notification(
2335 mach_port_t master_port
,
2336 io_name_t notification_type
,
2337 const char * matching
,
2338 size_t matching_size
,
2341 vm_size_t referenceSize
,
2343 io_object_t
* notification
)
2345 IOServiceUserNotification
* userNotify
= 0;
2346 IONotifier
* notify
= 0;
2347 const OSSymbol
* sym
;
2348 OSDictionary
* dict
;
2350 unsigned long int userMsgType
;
2352 if( master_port
!= master_device_port
)
2353 return( kIOReturnNotPrivileged
);
2356 err
= kIOReturnNoResources
;
2358 if (matching_size
> (sizeof(io_struct_inband_t
) * 1024)) return(kIOReturnMessageTooLarge
);
2360 if( !(sym
= OSSymbol::withCString( notification_type
)))
2361 err
= kIOReturnNoResources
;
2363 assert(matching_size
);
2364 dict
= OSDynamicCast(OSDictionary
, OSUnserializeXML(matching
, matching_size
));
2366 err
= kIOReturnBadArgument
;
2370 if( (sym
== gIOPublishNotification
)
2371 || (sym
== gIOFirstPublishNotification
))
2372 userMsgType
= kIOServicePublishNotificationType
;
2373 else if( (sym
== gIOMatchedNotification
)
2374 || (sym
== gIOFirstMatchNotification
))
2375 userMsgType
= kIOServiceMatchedNotificationType
;
2376 else if ((sym
== gIOTerminatedNotification
)
2377 || (sym
== gIOWillTerminateNotification
))
2378 userMsgType
= kIOServiceTerminatedNotificationType
;
2380 userMsgType
= kLastIOKitNotificationType
;
2382 userNotify
= new IOServiceUserNotification
;
2384 if( userNotify
&& !userNotify
->init( port
, userMsgType
,
2385 reference
, referenceSize
, client64
)) {
2386 userNotify
->release();
2392 notify
= IOService::addMatchingNotification( sym
, dict
,
2393 &userNotify
->_handler
, userNotify
);
2395 *notification
= userNotify
;
2396 userNotify
->setNotification( notify
);
2397 err
= kIOReturnSuccess
;
2399 err
= kIOReturnUnsupported
;
2403 if ((kIOReturnSuccess
!= err
) && userNotify
)
2405 userNotify
->invalidatePort();
2406 userNotify
->release();
2419 /* Routine io_service_add_notification */
2420 kern_return_t
is_io_service_add_notification(
2421 mach_port_t master_port
,
2422 io_name_t notification_type
,
2423 io_string_t matching
,
2425 io_async_ref_t reference
,
2426 mach_msg_type_number_t referenceCnt
,
2427 io_object_t
* notification
)
2429 return (kIOReturnUnsupported
);
2432 /* Routine io_service_add_notification_64 */
2433 kern_return_t
is_io_service_add_notification_64(
2434 mach_port_t master_port
,
2435 io_name_t notification_type
,
2436 io_string_t matching
,
2437 mach_port_t wake_port
,
2438 io_async_ref64_t reference
,
2439 mach_msg_type_number_t referenceCnt
,
2440 io_object_t
*notification
)
2442 return (kIOReturnUnsupported
);
2445 /* Routine io_service_add_notification_bin */
2446 kern_return_t is_io_service_add_notification_bin
2448 mach_port_t master_port
,
2449 io_name_t notification_type
,
2450 io_struct_inband_t matching
,
2451 mach_msg_type_number_t matchingCnt
,
2452 mach_port_t wake_port
,
2453 io_async_ref_t reference
,
2454 mach_msg_type_number_t referenceCnt
,
2455 io_object_t
*notification
)
2457 return (internal_io_service_add_notification(master_port
, notification_type
,
2458 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref_t
),
2459 false, notification
));
2462 /* Routine io_service_add_notification_bin_64 */
2463 kern_return_t is_io_service_add_notification_bin_64
2465 mach_port_t master_port
,
2466 io_name_t notification_type
,
2467 io_struct_inband_t matching
,
2468 mach_msg_type_number_t matchingCnt
,
2469 mach_port_t wake_port
,
2470 io_async_ref64_t reference
,
2471 mach_msg_type_number_t referenceCnt
,
2472 io_object_t
*notification
)
2474 return (internal_io_service_add_notification(master_port
, notification_type
,
2475 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref64_t
),
2476 true, notification
));
2479 static kern_return_t
internal_io_service_add_notification_ool(
2480 mach_port_t master_port
,
2481 io_name_t notification_type
,
2482 io_buf_ptr_t matching
,
2483 mach_msg_type_number_t matchingCnt
,
2484 mach_port_t wake_port
,
2486 vm_size_t referenceSize
,
2488 kern_return_t
*result
,
2489 io_object_t
*notification
)
2493 vm_map_offset_t map_data
;
2495 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2496 data
= CAST_DOWN(vm_offset_t
, map_data
);
2498 if( KERN_SUCCESS
== kr
) {
2499 // must return success after vm_map_copyout() succeeds
2500 // and mig will copy out objects on success
2502 *result
= internal_io_service_add_notification( master_port
, notification_type
,
2503 (char *) data
, matchingCnt
, wake_port
, reference
, referenceSize
, client64
, notification
);
2504 vm_deallocate( kernel_map
, data
, matchingCnt
);
2510 /* Routine io_service_add_notification_ool */
2511 kern_return_t
is_io_service_add_notification_ool(
2512 mach_port_t master_port
,
2513 io_name_t notification_type
,
2514 io_buf_ptr_t matching
,
2515 mach_msg_type_number_t matchingCnt
,
2516 mach_port_t wake_port
,
2517 io_async_ref_t reference
,
2518 mach_msg_type_number_t referenceCnt
,
2519 kern_return_t
*result
,
2520 io_object_t
*notification
)
2522 return (internal_io_service_add_notification_ool(master_port
, notification_type
,
2523 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref_t
),
2524 false, result
, notification
));
2527 /* Routine io_service_add_notification_ool_64 */
2528 kern_return_t
is_io_service_add_notification_ool_64(
2529 mach_port_t master_port
,
2530 io_name_t notification_type
,
2531 io_buf_ptr_t matching
,
2532 mach_msg_type_number_t matchingCnt
,
2533 mach_port_t wake_port
,
2534 io_async_ref64_t reference
,
2535 mach_msg_type_number_t referenceCnt
,
2536 kern_return_t
*result
,
2537 io_object_t
*notification
)
2539 return (internal_io_service_add_notification_ool(master_port
, notification_type
,
2540 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref64_t
),
2541 true, result
, notification
));
2544 /* Routine io_service_add_notification_old */
2545 kern_return_t
is_io_service_add_notification_old(
2546 mach_port_t master_port
,
2547 io_name_t notification_type
,
2548 io_string_t matching
,
2550 // for binary compatibility reasons, this must be natural_t for ILP32
2552 io_object_t
* notification
)
2554 return( is_io_service_add_notification( master_port
, notification_type
,
2555 matching
, port
, &ref
, 1, notification
));
2559 static kern_return_t
internal_io_service_add_interest_notification(
2560 io_object_t _service
,
2561 io_name_t type_of_interest
,
2564 vm_size_t referenceSize
,
2566 io_object_t
* notification
)
2569 IOServiceMessageUserNotification
* userNotify
= 0;
2570 IONotifier
* notify
= 0;
2571 const OSSymbol
* sym
;
2574 CHECK( IOService
, _service
, service
);
2576 err
= kIOReturnNoResources
;
2577 if( (sym
= OSSymbol::withCString( type_of_interest
))) do {
2579 userNotify
= new IOServiceMessageUserNotification
;
2581 if( userNotify
&& !userNotify
->init( port
, kIOServiceMessageNotificationType
,
2582 reference
, referenceSize
,
2583 kIOUserNotifyMaxMessageSize
,
2585 userNotify
->release();
2591 notify
= service
->registerInterest( sym
,
2592 &userNotify
->_handler
, userNotify
);
2594 *notification
= userNotify
;
2595 userNotify
->setNotification( notify
);
2596 err
= kIOReturnSuccess
;
2598 err
= kIOReturnUnsupported
;
2604 if ((kIOReturnSuccess
!= err
) && userNotify
)
2606 userNotify
->invalidatePort();
2607 userNotify
->release();
2614 /* Routine io_service_add_message_notification */
2615 kern_return_t
is_io_service_add_interest_notification(
2616 io_object_t service
,
2617 io_name_t type_of_interest
,
2619 io_async_ref_t reference
,
2620 mach_msg_type_number_t referenceCnt
,
2621 io_object_t
* notification
)
2623 return (internal_io_service_add_interest_notification(service
, type_of_interest
,
2624 port
, &reference
[0], sizeof(io_async_ref_t
), false, notification
));
2627 /* Routine io_service_add_interest_notification_64 */
2628 kern_return_t
is_io_service_add_interest_notification_64(
2629 io_object_t service
,
2630 io_name_t type_of_interest
,
2631 mach_port_t wake_port
,
2632 io_async_ref64_t reference
,
2633 mach_msg_type_number_t referenceCnt
,
2634 io_object_t
*notification
)
2636 return (internal_io_service_add_interest_notification(service
, type_of_interest
,
2637 wake_port
, &reference
[0], sizeof(io_async_ref64_t
), true, notification
));
2641 /* Routine io_service_acknowledge_notification */
2642 kern_return_t
is_io_service_acknowledge_notification(
2643 io_object_t _service
,
2644 natural_t notify_ref
,
2645 natural_t response
)
2647 CHECK( IOService
, _service
, service
);
2649 return( service
->acknowledgeNotification( (IONotificationRef
)(uintptr_t) notify_ref
,
2650 (IOOptionBits
) response
));
2654 /* Routine io_connect_get_semaphore */
2655 kern_return_t
is_io_connect_get_notification_semaphore(
2656 io_connect_t connection
,
2657 natural_t notification_type
,
2658 semaphore_t
*semaphore
)
2660 CHECK( IOUserClient
, connection
, client
);
2662 IOStatisticsClientCall();
2663 return( client
->getNotificationSemaphore( (UInt32
) notification_type
,
2667 /* Routine io_registry_get_root_entry */
2668 kern_return_t
is_io_registry_get_root_entry(
2669 mach_port_t master_port
,
2672 IORegistryEntry
* entry
;
2674 if( master_port
!= master_device_port
)
2675 return( kIOReturnNotPrivileged
);
2677 entry
= IORegistryEntry::getRegistryRoot();
2682 return( kIOReturnSuccess
);
2685 /* Routine io_registry_create_iterator */
2686 kern_return_t
is_io_registry_create_iterator(
2687 mach_port_t master_port
,
2690 io_object_t
*iterator
)
2692 if( master_port
!= master_device_port
)
2693 return( kIOReturnNotPrivileged
);
2695 *iterator
= IOUserIterator::withIterator(
2696 IORegistryIterator::iterateOver(
2697 IORegistryEntry::getPlane( plane
), options
));
2699 return( *iterator
? kIOReturnSuccess
: kIOReturnBadArgument
);
2702 /* Routine io_registry_entry_create_iterator */
2703 kern_return_t
is_io_registry_entry_create_iterator(
2704 io_object_t registry_entry
,
2707 io_object_t
*iterator
)
2709 CHECK( IORegistryEntry
, registry_entry
, entry
);
2711 *iterator
= IOUserIterator::withIterator(
2712 IORegistryIterator::iterateOver( entry
,
2713 IORegistryEntry::getPlane( plane
), options
));
2715 return( *iterator
? kIOReturnSuccess
: kIOReturnBadArgument
);
2718 /* Routine io_registry_iterator_enter */
2719 kern_return_t
is_io_registry_iterator_enter_entry(
2720 io_object_t iterator
)
2722 CHECKLOCKED( IORegistryIterator
, iterator
, iter
);
2724 IOLockLock(oIter
->lock
);
2726 IOLockUnlock(oIter
->lock
);
2728 return( kIOReturnSuccess
);
2731 /* Routine io_registry_iterator_exit */
2732 kern_return_t
is_io_registry_iterator_exit_entry(
2733 io_object_t iterator
)
2737 CHECKLOCKED( IORegistryIterator
, iterator
, iter
);
2739 IOLockLock(oIter
->lock
);
2740 didIt
= iter
->exitEntry();
2741 IOLockUnlock(oIter
->lock
);
2743 return( didIt
? kIOReturnSuccess
: kIOReturnNoDevice
);
2746 /* Routine io_registry_entry_from_path */
2747 kern_return_t
is_io_registry_entry_from_path(
2748 mach_port_t master_port
,
2750 io_object_t
*registry_entry
)
2752 IORegistryEntry
* entry
;
2754 if( master_port
!= master_device_port
)
2755 return( kIOReturnNotPrivileged
);
2757 entry
= IORegistryEntry::fromPath( path
);
2759 *registry_entry
= entry
;
2761 return( kIOReturnSuccess
);
2765 /* Routine io_registry_entry_from_path */
2766 kern_return_t
is_io_registry_entry_from_path_ool(
2767 mach_port_t master_port
,
2768 io_string_inband_t path
,
2769 io_buf_ptr_t path_ool
,
2770 mach_msg_type_number_t path_oolCnt
,
2771 kern_return_t
*result
,
2772 io_object_t
*registry_entry
)
2774 IORegistryEntry
* entry
;
2775 vm_map_offset_t map_data
;
2780 if (master_port
!= master_device_port
) return(kIOReturnNotPrivileged
);
2784 res
= err
= KERN_SUCCESS
;
2785 if (path
[0]) cpath
= path
;
2788 if (!path_oolCnt
) return(kIOReturnBadArgument
);
2789 if (path_oolCnt
> (sizeof(io_struct_inband_t
) * 1024)) return(kIOReturnMessageTooLarge
);
2791 err
= vm_map_copyout(kernel_map
, &map_data
, (vm_map_copy_t
) path_ool
);
2792 if (KERN_SUCCESS
== err
)
2794 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2795 cpath
= CAST_DOWN(const char *, map_data
);
2796 if (cpath
[path_oolCnt
- 1]) res
= kIOReturnBadArgument
;
2800 if ((KERN_SUCCESS
== err
) && (KERN_SUCCESS
== res
))
2802 entry
= IORegistryEntry::fromPath(cpath
);
2803 res
= entry
? kIOReturnSuccess
: kIOReturnNotFound
;
2806 if (map_data
) vm_deallocate(kernel_map
, map_data
, path_oolCnt
);
2808 if (KERN_SUCCESS
!= err
) res
= err
;
2809 *registry_entry
= entry
;
2816 /* Routine io_registry_entry_in_plane */
2817 kern_return_t
is_io_registry_entry_in_plane(
2818 io_object_t registry_entry
,
2820 boolean_t
*inPlane
)
2822 CHECK( IORegistryEntry
, registry_entry
, entry
);
2824 *inPlane
= entry
->inPlane( IORegistryEntry::getPlane( plane
));
2826 return( kIOReturnSuccess
);
2830 /* Routine io_registry_entry_get_path */
2831 kern_return_t
is_io_registry_entry_get_path(
2832 io_object_t registry_entry
,
2837 CHECK( IORegistryEntry
, registry_entry
, entry
);
2839 length
= sizeof( io_string_t
);
2840 if( entry
->getPath( path
, &length
, IORegistryEntry::getPlane( plane
)))
2841 return( kIOReturnSuccess
);
2843 return( kIOReturnBadArgument
);
2846 /* Routine io_registry_entry_get_path */
2847 kern_return_t
is_io_registry_entry_get_path_ool(
2848 io_object_t registry_entry
,
2850 io_string_inband_t path
,
2851 io_buf_ptr_t
*path_ool
,
2852 mach_msg_type_number_t
*path_oolCnt
)
2854 enum { kMaxPath
= 16384 };
2859 CHECK( IORegistryEntry
, registry_entry
, entry
);
2863 length
= sizeof(io_string_inband_t
);
2864 if (entry
->getPath(path
, &length
, IORegistryEntry::getPlane(plane
))) err
= kIOReturnSuccess
;
2868 buf
= IONew(char, length
);
2869 if (!buf
) err
= kIOReturnNoMemory
;
2870 else if (!entry
->getPath(buf
, &length
, IORegistryEntry::getPlane(plane
))) err
= kIOReturnError
;
2873 *path_oolCnt
= length
;
2874 err
= copyoutkdata(buf
, length
, path_ool
);
2876 if (buf
) IODelete(buf
, char, kMaxPath
);
2883 /* Routine io_registry_entry_get_name */
2884 kern_return_t
is_io_registry_entry_get_name(
2885 io_object_t registry_entry
,
2888 CHECK( IORegistryEntry
, registry_entry
, entry
);
2890 strncpy( name
, entry
->getName(), sizeof( io_name_t
));
2892 return( kIOReturnSuccess
);
2895 /* Routine io_registry_entry_get_name_in_plane */
2896 kern_return_t
is_io_registry_entry_get_name_in_plane(
2897 io_object_t registry_entry
,
2898 io_name_t planeName
,
2901 const IORegistryPlane
* plane
;
2902 CHECK( IORegistryEntry
, registry_entry
, entry
);
2905 plane
= IORegistryEntry::getPlane( planeName
);
2909 strncpy( name
, entry
->getName( plane
), sizeof( io_name_t
));
2911 return( kIOReturnSuccess
);
2914 /* Routine io_registry_entry_get_location_in_plane */
2915 kern_return_t
is_io_registry_entry_get_location_in_plane(
2916 io_object_t registry_entry
,
2917 io_name_t planeName
,
2918 io_name_t location
)
2920 const IORegistryPlane
* plane
;
2921 CHECK( IORegistryEntry
, registry_entry
, entry
);
2924 plane
= IORegistryEntry::getPlane( planeName
);
2928 const char * cstr
= entry
->getLocation( plane
);
2931 strncpy( location
, cstr
, sizeof( io_name_t
));
2932 return( kIOReturnSuccess
);
2934 return( kIOReturnNotFound
);
2937 /* Routine io_registry_entry_get_registry_entry_id */
2938 kern_return_t
is_io_registry_entry_get_registry_entry_id(
2939 io_object_t registry_entry
,
2940 uint64_t *entry_id
)
2942 CHECK( IORegistryEntry
, registry_entry
, entry
);
2944 *entry_id
= entry
->getRegistryEntryID();
2946 return (kIOReturnSuccess
);
2949 /* Routine io_registry_entry_get_property */
2950 kern_return_t
is_io_registry_entry_get_property_bytes(
2951 io_object_t registry_entry
,
2952 io_name_t property_name
,
2953 io_struct_inband_t buf
,
2954 mach_msg_type_number_t
*dataCnt
)
2962 unsigned int len
= 0;
2963 const void * bytes
= 0;
2964 IOReturn ret
= kIOReturnSuccess
;
2966 CHECK( IORegistryEntry
, registry_entry
, entry
);
2969 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
2970 return kIOReturnNotPermitted
;
2973 obj
= entry
->copyProperty(property_name
);
2975 return( kIOReturnNoResources
);
2977 // One day OSData will be a common container base class
2979 if( (data
= OSDynamicCast( OSData
, obj
))) {
2980 len
= data
->getLength();
2981 bytes
= data
->getBytesNoCopy();
2982 if (!data
->isSerializable()) len
= 0;
2984 } else if( (str
= OSDynamicCast( OSString
, obj
))) {
2985 len
= str
->getLength() + 1;
2986 bytes
= str
->getCStringNoCopy();
2988 } else if( (boo
= OSDynamicCast( OSBoolean
, obj
))) {
2989 len
= boo
->isTrue() ? sizeof("Yes") : sizeof("No");
2990 bytes
= boo
->isTrue() ? "Yes" : "No";
2992 } else if( (off
= OSDynamicCast( OSNumber
, obj
))) {
2993 offsetBytes
= off
->unsigned64BitValue();
2994 len
= off
->numberOfBytes();
2995 if (len
> sizeof(offsetBytes
)) len
= sizeof(offsetBytes
);
2996 bytes
= &offsetBytes
;
2997 #ifdef __BIG_ENDIAN__
2998 bytes
= (const void *)
2999 (((UInt32
) bytes
) + (sizeof( UInt64
) - len
));
3003 ret
= kIOReturnBadArgument
;
3007 ret
= kIOReturnIPCError
;
3010 bcopy( bytes
, buf
, len
);
3019 /* Routine io_registry_entry_get_property */
3020 kern_return_t
is_io_registry_entry_get_property(
3021 io_object_t registry_entry
,
3022 io_name_t property_name
,
3023 io_buf_ptr_t
*properties
,
3024 mach_msg_type_number_t
*propertiesCnt
)
3030 CHECK( IORegistryEntry
, registry_entry
, entry
);
3033 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
3034 return kIOReturnNotPermitted
;
3037 obj
= entry
->copyProperty(property_name
);
3039 return( kIOReturnNotFound
);
3041 OSSerialize
* s
= OSSerialize::withCapacity(4096);
3044 return( kIOReturnNoMemory
);
3047 if( obj
->serialize( s
)) {
3048 len
= s
->getLength();
3049 *propertiesCnt
= len
;
3050 err
= copyoutkdata( s
->text(), len
, properties
);
3053 err
= kIOReturnUnsupported
;
3061 /* Routine io_registry_entry_get_property_recursively */
3062 kern_return_t
is_io_registry_entry_get_property_recursively(
3063 io_object_t registry_entry
,
3065 io_name_t property_name
,
3067 io_buf_ptr_t
*properties
,
3068 mach_msg_type_number_t
*propertiesCnt
)
3074 CHECK( IORegistryEntry
, registry_entry
, entry
);
3077 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
3078 return kIOReturnNotPermitted
;
3081 obj
= entry
->copyProperty( property_name
,
3082 IORegistryEntry::getPlane( plane
), options
);
3084 return( kIOReturnNotFound
);
3086 OSSerialize
* s
= OSSerialize::withCapacity(4096);
3089 return( kIOReturnNoMemory
);
3092 if( obj
->serialize( s
)) {
3093 len
= s
->getLength();
3094 *propertiesCnt
= len
;
3095 err
= copyoutkdata( s
->text(), len
, properties
);
3098 err
= kIOReturnUnsupported
;
3106 /* Routine io_registry_entry_get_properties */
3107 kern_return_t
is_io_registry_entry_get_properties(
3108 io_object_t registry_entry
,
3109 io_buf_ptr_t
*properties
,
3110 mach_msg_type_number_t
*propertiesCnt
)
3112 return (kIOReturnUnsupported
);
3117 struct GetPropertiesEditorRef
3120 IORegistryEntry
* entry
;
3121 OSCollection
* root
;
3124 static const OSMetaClassBase
*
3125 GetPropertiesEditor(void * reference
,
3127 OSCollection
* container
,
3128 const OSSymbol
* name
,
3129 const OSMetaClassBase
* value
)
3131 GetPropertiesEditorRef
* ref
= (typeof(ref
)) reference
;
3133 if (!ref
->root
) ref
->root
= container
;
3134 if (ref
->root
== container
)
3136 if (0 != mac_iokit_check_get_property(ref
->cred
, ref
->entry
, name
->getCStringNoCopy()))
3141 if (value
) value
->retain();
3145 #endif /* CONFIG_MACF */
3147 /* Routine io_registry_entry_get_properties */
3148 kern_return_t
is_io_registry_entry_get_properties_bin(
3149 io_object_t registry_entry
,
3150 io_buf_ptr_t
*properties
,
3151 mach_msg_type_number_t
*propertiesCnt
)
3153 kern_return_t err
= kIOReturnSuccess
;
3156 OSSerialize::Editor editor
= 0;
3159 CHECK(IORegistryEntry
, registry_entry
, entry
);
3162 GetPropertiesEditorRef ref
;
3163 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry
))
3165 editor
= &GetPropertiesEditor
;
3167 ref
.cred
= kauth_cred_get();
3173 s
= OSSerialize::binaryWithCapacity(4096, editor
, editRef
);
3174 if (!s
) return (kIOReturnNoMemory
);
3176 if (!entry
->serializeProperties(s
)) err
= kIOReturnUnsupported
;
3178 if (kIOReturnSuccess
== err
)
3180 len
= s
->getLength();
3181 *propertiesCnt
= len
;
3182 err
= copyoutkdata(s
->text(), len
, properties
);
3189 /* Routine io_registry_entry_get_property_bin */
3190 kern_return_t
is_io_registry_entry_get_property_bin(
3191 io_object_t registry_entry
,
3193 io_name_t property_name
,
3195 io_buf_ptr_t
*properties
,
3196 mach_msg_type_number_t
*propertiesCnt
)
3201 const OSSymbol
* sym
;
3203 CHECK( IORegistryEntry
, registry_entry
, entry
);
3206 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
3207 return kIOReturnNotPermitted
;
3210 sym
= OSSymbol::withCString(property_name
);
3211 if (!sym
) return (kIOReturnNoMemory
);
3213 if (gIORegistryEntryPropertyKeysKey
== sym
)
3215 obj
= entry
->copyPropertyKeys();
3219 if ((kIORegistryIterateRecursively
& options
) && plane
[0])
3221 obj
= entry
->copyProperty(property_name
,
3222 IORegistryEntry::getPlane(plane
), options
);
3226 obj
= entry
->copyProperty(property_name
);
3228 if (obj
&& gIORemoveOnReadProperties
->containsObject(sym
)) entry
->removeProperty(sym
);
3232 if (!obj
) return (kIOReturnNotFound
);
3234 OSSerialize
* s
= OSSerialize::binaryWithCapacity(4096);
3237 return( kIOReturnNoMemory
);
3240 if( obj
->serialize( s
)) {
3241 len
= s
->getLength();
3242 *propertiesCnt
= len
;
3243 err
= copyoutkdata( s
->text(), len
, properties
);
3245 } else err
= kIOReturnUnsupported
;
3254 /* Routine io_registry_entry_set_properties */
3255 kern_return_t is_io_registry_entry_set_properties
3257 io_object_t registry_entry
,
3258 io_buf_ptr_t properties
,
3259 mach_msg_type_number_t propertiesCnt
,
3260 kern_return_t
* result
)
3266 vm_map_offset_t map_data
;
3268 CHECK( IORegistryEntry
, registry_entry
, entry
);
3270 if( propertiesCnt
> sizeof(io_struct_inband_t
) * 1024)
3271 return( kIOReturnMessageTooLarge
);
3273 err
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) properties
);
3274 data
= CAST_DOWN(vm_offset_t
, map_data
);
3276 if( KERN_SUCCESS
== err
) {
3278 FAKE_STACK_FRAME(entry
->getMetaClass());
3280 // must return success after vm_map_copyout() succeeds
3281 obj
= OSUnserializeXML( (const char *) data
, propertiesCnt
);
3282 vm_deallocate( kernel_map
, data
, propertiesCnt
);
3285 res
= kIOReturnBadArgument
;
3287 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3288 registry_entry
, obj
))
3290 res
= kIOReturnNotPermitted
;
3295 res
= entry
->setProperties( obj
);
3301 FAKE_STACK_FRAME_END();
3310 /* Routine io_registry_entry_get_child_iterator */
3311 kern_return_t
is_io_registry_entry_get_child_iterator(
3312 io_object_t registry_entry
,
3314 io_object_t
*iterator
)
3316 CHECK( IORegistryEntry
, registry_entry
, entry
);
3318 *iterator
= IOUserIterator::withIterator(entry
->getChildIterator(
3319 IORegistryEntry::getPlane( plane
)));
3321 return( kIOReturnSuccess
);
3324 /* Routine io_registry_entry_get_parent_iterator */
3325 kern_return_t
is_io_registry_entry_get_parent_iterator(
3326 io_object_t registry_entry
,
3328 io_object_t
*iterator
)
3330 CHECK( IORegistryEntry
, registry_entry
, entry
);
3332 *iterator
= IOUserIterator::withIterator(entry
->getParentIterator(
3333 IORegistryEntry::getPlane( plane
)));
3335 return( kIOReturnSuccess
);
3338 /* Routine io_service_get_busy_state */
3339 kern_return_t
is_io_service_get_busy_state(
3340 io_object_t _service
,
3341 uint32_t *busyState
)
3343 CHECK( IOService
, _service
, service
);
3345 *busyState
= service
->getBusyState();
3347 return( kIOReturnSuccess
);
3350 /* Routine io_service_get_state */
3351 kern_return_t
is_io_service_get_state(
3352 io_object_t _service
,
3354 uint32_t *busy_state
,
3355 uint64_t *accumulated_busy_time
)
3357 CHECK( IOService
, _service
, service
);
3359 *state
= service
->getState();
3360 *busy_state
= service
->getBusyState();
3361 *accumulated_busy_time
= service
->getAccumulatedBusyTime();
3363 return( kIOReturnSuccess
);
3366 /* Routine io_service_wait_quiet */
3367 kern_return_t
is_io_service_wait_quiet(
3368 io_object_t _service
,
3369 mach_timespec_t wait_time
)
3373 CHECK( IOService
, _service
, service
);
3375 timeoutNS
= wait_time
.tv_sec
;
3376 timeoutNS
*= kSecondScale
;
3377 timeoutNS
+= wait_time
.tv_nsec
;
3379 return( service
->waitQuiet(timeoutNS
) );
3382 /* Routine io_service_request_probe */
3383 kern_return_t
is_io_service_request_probe(
3384 io_object_t _service
,
3387 CHECK( IOService
, _service
, service
);
3389 return( service
->requestProbe( options
));
3392 /* Routine io_service_get_authorization_id */
3393 kern_return_t
is_io_service_get_authorization_id(
3394 io_object_t _service
,
3395 uint64_t *authorization_id
)
3399 CHECK( IOService
, _service
, service
);
3401 kr
= IOUserClient::clientHasPrivilege( (void *) current_task(),
3402 kIOClientPrivilegeAdministrator
);
3403 if( kIOReturnSuccess
!= kr
)
3406 *authorization_id
= service
->getAuthorizationID();
3411 /* Routine io_service_set_authorization_id */
3412 kern_return_t
is_io_service_set_authorization_id(
3413 io_object_t _service
,
3414 uint64_t authorization_id
)
3416 CHECK( IOService
, _service
, service
);
3418 return( service
->setAuthorizationID( authorization_id
) );
3421 /* Routine io_service_open_ndr */
3422 kern_return_t
is_io_service_open_extended(
3423 io_object_t _service
,
3425 uint32_t connect_type
,
3427 io_buf_ptr_t properties
,
3428 mach_msg_type_number_t propertiesCnt
,
3429 kern_return_t
* result
,
3430 io_object_t
*connection
)
3432 IOUserClient
* client
= 0;
3433 kern_return_t err
= KERN_SUCCESS
;
3434 IOReturn res
= kIOReturnSuccess
;
3435 OSDictionary
* propertiesDict
= 0;
3437 bool disallowAccess
;
3439 CHECK( IOService
, _service
, service
);
3441 if (!owningTask
) return (kIOReturnBadArgument
);
3442 assert(owningTask
== current_task());
3443 if (owningTask
!= current_task()) return (kIOReturnBadArgument
);
3447 if (properties
) return (kIOReturnUnsupported
);
3452 vm_map_offset_t map_data
;
3454 if( propertiesCnt
> sizeof(io_struct_inband_t
))
3455 return( kIOReturnMessageTooLarge
);
3457 err
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) properties
);
3459 data
= CAST_DOWN(vm_offset_t
, map_data
);
3460 if (KERN_SUCCESS
== err
)
3462 // must return success after vm_map_copyout() succeeds
3463 obj
= OSUnserializeXML( (const char *) data
, propertiesCnt
);
3464 vm_deallocate( kernel_map
, data
, propertiesCnt
);
3465 propertiesDict
= OSDynamicCast(OSDictionary
, obj
);
3466 if (!propertiesDict
)
3468 res
= kIOReturnBadArgument
;
3473 if (kIOReturnSuccess
!= res
)
3477 crossEndian
= (ndr
.int_rep
!= NDR_record
.int_rep
);
3480 if (!propertiesDict
)
3481 propertiesDict
= OSDictionary::withCapacity(4);
3482 OSData
* data
= OSData::withBytes(&ndr
, sizeof(ndr
));
3486 propertiesDict
->setObject(kIOUserClientCrossEndianKey
, data
);
3491 res
= service
->newUserClient( owningTask
, (void *) owningTask
,
3492 connect_type
, propertiesDict
, &client
);
3495 propertiesDict
->release();
3497 if (res
== kIOReturnSuccess
)
3499 assert( OSDynamicCast(IOUserClient
, client
) );
3501 client
->sharedInstance
= (0 != client
->getProperty(kIOUserClientSharedInstanceKey
));
3502 client
->closed
= false;
3503 client
->lock
= IOLockAlloc();
3505 disallowAccess
= (crossEndian
3506 && (kOSBooleanTrue
!= service
->getProperty(kIOUserClientCrossEndianCompatibleKey
))
3507 && (kOSBooleanTrue
!= client
->getProperty(kIOUserClientCrossEndianCompatibleKey
)));
3508 if (disallowAccess
) res
= kIOReturnUnsupported
;
3510 else if (0 != mac_iokit_check_open(kauth_cred_get(), client
, connect_type
))
3511 res
= kIOReturnNotPermitted
;
3514 if (kIOReturnSuccess
== res
) res
= client
->registerOwner(owningTask
);
3516 if (kIOReturnSuccess
!= res
)
3518 IOStatisticsClientCall();
3519 client
->clientClose();
3524 OSString
* creatorName
= IOCopyLogNameForPID(proc_selfpid());
3527 client
->setProperty(kIOUserClientCreatorKey
, creatorName
);
3528 creatorName
->release();
3530 client
->setTerminateDefer(service
, false);
3535 *connection
= client
;
3541 /* Routine io_service_close */
3542 kern_return_t
is_io_service_close(
3543 io_object_t connection
)
3546 if ((mappings
= OSDynamicCast(OSSet
, connection
)))
3547 return( kIOReturnSuccess
);
3549 CHECK( IOUserClient
, connection
, client
);
3551 IOStatisticsClientCall();
3553 if (client
->sharedInstance
|| OSCompareAndSwap8(0, 1, &client
->closed
))
3555 IOLockLock(client
->lock
);
3556 client
->clientClose();
3557 IOLockUnlock(client
->lock
);
3561 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3562 client
->getRegistryEntryID(), client
->getName());
3565 return( kIOReturnSuccess
);
3568 /* Routine io_connect_get_service */
3569 kern_return_t
is_io_connect_get_service(
3570 io_object_t connection
,
3571 io_object_t
*service
)
3573 IOService
* theService
;
3575 CHECK( IOUserClient
, connection
, client
);
3577 theService
= client
->getService();
3579 theService
->retain();
3581 *service
= theService
;
3583 return( theService
? kIOReturnSuccess
: kIOReturnUnsupported
);
3586 /* Routine io_connect_set_notification_port */
3587 kern_return_t
is_io_connect_set_notification_port(
3588 io_object_t connection
,
3589 uint32_t notification_type
,
3594 CHECK( IOUserClient
, connection
, client
);
3596 IOStatisticsClientCall();
3597 IOLockLock(client
->lock
);
3598 ret
= client
->registerNotificationPort( port
, notification_type
,
3599 (io_user_reference_t
) reference
);
3600 IOLockUnlock(client
->lock
);
3604 /* Routine io_connect_set_notification_port */
3605 kern_return_t
is_io_connect_set_notification_port_64(
3606 io_object_t connection
,
3607 uint32_t notification_type
,
3609 io_user_reference_t reference
)
3612 CHECK( IOUserClient
, connection
, client
);
3614 IOStatisticsClientCall();
3615 IOLockLock(client
->lock
);
3616 ret
= client
->registerNotificationPort( port
, notification_type
,
3618 IOLockUnlock(client
->lock
);
3622 /* Routine io_connect_map_memory_into_task */
3623 kern_return_t is_io_connect_map_memory_into_task
3625 io_connect_t connection
,
3626 uint32_t memory_type
,
3628 mach_vm_address_t
*address
,
3629 mach_vm_size_t
*size
,
3636 CHECK( IOUserClient
, connection
, client
);
3638 if (!into_task
) return (kIOReturnBadArgument
);
3640 IOStatisticsClientCall();
3641 map
= client
->mapClientMemory64( memory_type
, into_task
, flags
, *address
);
3644 *address
= map
->getAddress();
3646 *size
= map
->getSize();
3648 if( client
->sharedInstance
3649 || (into_task
!= current_task())) {
3650 // push a name out to the task owning the map,
3651 // so we can clean up maps
3652 mach_port_name_t name __unused
=
3653 IOMachPort::makeSendRightForTask(
3654 into_task
, map
, IKOT_IOKIT_OBJECT
);
3658 // keep it with the user client
3659 IOLockLock( gIOObjectPortLock
);
3660 if( 0 == client
->mappings
)
3661 client
->mappings
= OSSet::withCapacity(2);
3662 if( client
->mappings
)
3663 client
->mappings
->setObject( map
);
3664 IOLockUnlock( gIOObjectPortLock
);
3667 err
= kIOReturnSuccess
;
3670 err
= kIOReturnBadArgument
;
3675 /* Routine is_io_connect_map_memory */
3676 kern_return_t
is_io_connect_map_memory(
3677 io_object_t connect
,
3685 mach_vm_address_t address
;
3686 mach_vm_size_t size
;
3688 address
= SCALAR64(*mapAddr
);
3689 size
= SCALAR64(*mapSize
);
3691 err
= is_io_connect_map_memory_into_task(connect
, type
, task
, &address
, &size
, flags
);
3693 *mapAddr
= SCALAR32(address
);
3694 *mapSize
= SCALAR32(size
);
3701 IOMemoryMap
* IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor
* mem
)
3704 IOMemoryMap
* map
= 0;
3706 IOLockLock(gIOObjectPortLock
);
3708 iter
= OSCollectionIterator::withCollection(mappings
);
3711 while ((map
= OSDynamicCast(IOMemoryMap
, iter
->getNextObject())))
3713 if(mem
== map
->getMemoryDescriptor())
3716 mappings
->removeObject(map
);
3723 IOLockUnlock(gIOObjectPortLock
);
3730 /* Routine io_connect_unmap_memory_from_task */
3731 kern_return_t is_io_connect_unmap_memory_from_task
3733 io_connect_t connection
,
3734 uint32_t memory_type
,
3736 mach_vm_address_t address
)
3739 IOOptionBits options
= 0;
3740 IOMemoryDescriptor
* memory
= 0;
3743 CHECK( IOUserClient
, connection
, client
);
3745 if (!from_task
) return (kIOReturnBadArgument
);
3747 IOStatisticsClientCall();
3748 err
= client
->clientMemoryForType( (UInt32
) memory_type
, &options
, &memory
);
3750 if( memory
&& (kIOReturnSuccess
== err
)) {
3752 options
= (options
& ~kIOMapUserOptionsMask
)
3753 | kIOMapAnywhere
| kIOMapReference
;
3755 map
= memory
->createMappingInTask( from_task
, address
, options
);
3759 IOLockLock( gIOObjectPortLock
);
3760 if( client
->mappings
)
3761 client
->mappings
->removeObject( map
);
3762 IOLockUnlock( gIOObjectPortLock
);
3764 mach_port_name_t name
= 0;
3765 if (from_task
!= current_task())
3767 name
= IOMachPort::makeSendRightForTask( from_task
, map
, IKOT_IOKIT_OBJECT
);
3773 map
->userClientUnmap();
3774 err
= iokit_mod_send_right( from_task
, name
, -2 );
3775 err
= kIOReturnSuccess
;
3778 IOMachPort::releasePortForObject( map
, IKOT_IOKIT_OBJECT
);
3779 if (from_task
== current_task())
3783 err
= kIOReturnBadArgument
;
3789 kern_return_t
is_io_connect_unmap_memory(
3790 io_object_t connect
,
3796 mach_vm_address_t address
;
3798 address
= SCALAR64(mapAddr
);
3800 err
= is_io_connect_unmap_memory_from_task(connect
, type
, task
, mapAddr
);
3806 /* Routine io_connect_add_client */
3807 kern_return_t
is_io_connect_add_client(
3808 io_object_t connection
,
3809 io_object_t connect_to
)
3811 CHECK( IOUserClient
, connection
, client
);
3812 CHECK( IOUserClient
, connect_to
, to
);
3814 IOStatisticsClientCall();
3815 return( client
->connectClient( to
) );
3819 /* Routine io_connect_set_properties */
3820 kern_return_t
is_io_connect_set_properties(
3821 io_object_t connection
,
3822 io_buf_ptr_t properties
,
3823 mach_msg_type_number_t propertiesCnt
,
3824 kern_return_t
* result
)
3826 return( is_io_registry_entry_set_properties( connection
, properties
, propertiesCnt
, result
));
3829 /* Routine io_user_client_method */
3830 kern_return_t is_io_connect_method_var_output
3832 io_connect_t connection
,
3834 io_scalar_inband64_t scalar_input
,
3835 mach_msg_type_number_t scalar_inputCnt
,
3836 io_struct_inband_t inband_input
,
3837 mach_msg_type_number_t inband_inputCnt
,
3838 mach_vm_address_t ool_input
,
3839 mach_vm_size_t ool_input_size
,
3840 io_struct_inband_t inband_output
,
3841 mach_msg_type_number_t
*inband_outputCnt
,
3842 io_scalar_inband64_t scalar_output
,
3843 mach_msg_type_number_t
*scalar_outputCnt
,
3844 io_buf_ptr_t
*var_output
,
3845 mach_msg_type_number_t
*var_outputCnt
3848 CHECK( IOUserClient
, connection
, client
);
3850 IOExternalMethodArguments args
;
3852 IOMemoryDescriptor
* inputMD
= 0;
3853 OSObject
* structureVariableOutputData
= 0;
3855 bzero(&args
.__reserved
[0], sizeof(args
.__reserved
));
3856 args
.__reservedA
= 0;
3857 args
.version
= kIOExternalMethodArgumentsCurrentVersion
;
3859 args
.selector
= selector
;
3861 args
.asyncWakePort
= MACH_PORT_NULL
;
3862 args
.asyncReference
= 0;
3863 args
.asyncReferenceCount
= 0;
3864 args
.structureVariableOutputData
= &structureVariableOutputData
;
3866 args
.scalarInput
= scalar_input
;
3867 args
.scalarInputCount
= scalar_inputCnt
;
3868 args
.structureInput
= inband_input
;
3869 args
.structureInputSize
= inband_inputCnt
;
3871 if (ool_input
&& (ool_input_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
3874 inputMD
= IOMemoryDescriptor::withAddressRange(ool_input
, ool_input_size
,
3875 kIODirectionOut
| kIOMemoryMapCopyOnWrite
,
3878 args
.structureInputDescriptor
= inputMD
;
3880 args
.scalarOutput
= scalar_output
;
3881 args
.scalarOutputCount
= *scalar_outputCnt
;
3882 bzero(&scalar_output
[0], *scalar_outputCnt
* sizeof(scalar_output
[0]));
3883 args
.structureOutput
= inband_output
;
3884 args
.structureOutputSize
= *inband_outputCnt
;
3885 args
.structureOutputDescriptor
= NULL
;
3886 args
.structureOutputDescriptorSize
= 0;
3888 IOStatisticsClientCall();
3889 ret
= client
->externalMethod( selector
, &args
);
3891 *scalar_outputCnt
= args
.scalarOutputCount
;
3892 *inband_outputCnt
= args
.structureOutputSize
;
3894 if (var_outputCnt
&& var_output
&& (kIOReturnSuccess
== ret
))
3896 OSSerialize
* serialize
;
3900 if ((serialize
= OSDynamicCast(OSSerialize
, structureVariableOutputData
)))
3902 len
= serialize
->getLength();
3903 *var_outputCnt
= len
;
3904 ret
= copyoutkdata(serialize
->text(), len
, var_output
);
3906 else if ((data
= OSDynamicCast(OSData
, structureVariableOutputData
)))
3908 len
= data
->getLength();
3909 *var_outputCnt
= len
;
3910 ret
= copyoutkdata(data
->getBytesNoCopy(), len
, var_output
);
3914 ret
= kIOReturnUnderrun
;
3920 if (structureVariableOutputData
)
3921 structureVariableOutputData
->release();
3926 /* Routine io_user_client_method */
3927 kern_return_t is_io_connect_method
3929 io_connect_t connection
,
3931 io_scalar_inband64_t scalar_input
,
3932 mach_msg_type_number_t scalar_inputCnt
,
3933 io_struct_inband_t inband_input
,
3934 mach_msg_type_number_t inband_inputCnt
,
3935 mach_vm_address_t ool_input
,
3936 mach_vm_size_t ool_input_size
,
3937 io_struct_inband_t inband_output
,
3938 mach_msg_type_number_t
*inband_outputCnt
,
3939 io_scalar_inband64_t scalar_output
,
3940 mach_msg_type_number_t
*scalar_outputCnt
,
3941 mach_vm_address_t ool_output
,
3942 mach_vm_size_t
*ool_output_size
3945 CHECK( IOUserClient
, connection
, client
);
3947 IOExternalMethodArguments args
;
3949 IOMemoryDescriptor
* inputMD
= 0;
3950 IOMemoryDescriptor
* outputMD
= 0;
3952 bzero(&args
.__reserved
[0], sizeof(args
.__reserved
));
3953 args
.__reservedA
= 0;
3954 args
.version
= kIOExternalMethodArgumentsCurrentVersion
;
3956 args
.selector
= selector
;
3958 args
.asyncWakePort
= MACH_PORT_NULL
;
3959 args
.asyncReference
= 0;
3960 args
.asyncReferenceCount
= 0;
3961 args
.structureVariableOutputData
= 0;
3963 args
.scalarInput
= scalar_input
;
3964 args
.scalarInputCount
= scalar_inputCnt
;
3965 args
.structureInput
= inband_input
;
3966 args
.structureInputSize
= inband_inputCnt
;
3968 if (ool_input
&& (ool_input_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
3969 if (ool_output
&& (*ool_output_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
3972 inputMD
= IOMemoryDescriptor::withAddressRange(ool_input
, ool_input_size
,
3973 kIODirectionOut
| kIOMemoryMapCopyOnWrite
,
3976 args
.structureInputDescriptor
= inputMD
;
3978 args
.scalarOutput
= scalar_output
;
3979 args
.scalarOutputCount
= *scalar_outputCnt
;
3980 bzero(&scalar_output
[0], *scalar_outputCnt
* sizeof(scalar_output
[0]));
3981 args
.structureOutput
= inband_output
;
3982 args
.structureOutputSize
= *inband_outputCnt
;
3984 if (ool_output
&& ool_output_size
)
3986 outputMD
= IOMemoryDescriptor::withAddressRange(ool_output
, *ool_output_size
,
3987 kIODirectionIn
, current_task());
3990 args
.structureOutputDescriptor
= outputMD
;
3991 args
.structureOutputDescriptorSize
= ool_output_size
? *ool_output_size
: 0;
3993 IOStatisticsClientCall();
3994 ret
= client
->externalMethod( selector
, &args
);
3996 *scalar_outputCnt
= args
.scalarOutputCount
;
3997 *inband_outputCnt
= args
.structureOutputSize
;
3998 *ool_output_size
= args
.structureOutputDescriptorSize
;
4003 outputMD
->release();
4008 /* Routine io_async_user_client_method */
4009 kern_return_t is_io_connect_async_method
4011 io_connect_t connection
,
4012 mach_port_t wake_port
,
4013 io_async_ref64_t reference
,
4014 mach_msg_type_number_t referenceCnt
,
4016 io_scalar_inband64_t scalar_input
,
4017 mach_msg_type_number_t scalar_inputCnt
,
4018 io_struct_inband_t inband_input
,
4019 mach_msg_type_number_t inband_inputCnt
,
4020 mach_vm_address_t ool_input
,
4021 mach_vm_size_t ool_input_size
,
4022 io_struct_inband_t inband_output
,
4023 mach_msg_type_number_t
*inband_outputCnt
,
4024 io_scalar_inband64_t scalar_output
,
4025 mach_msg_type_number_t
*scalar_outputCnt
,
4026 mach_vm_address_t ool_output
,
4027 mach_vm_size_t
* ool_output_size
4030 CHECK( IOUserClient
, connection
, client
);
4032 IOExternalMethodArguments args
;
4034 IOMemoryDescriptor
* inputMD
= 0;
4035 IOMemoryDescriptor
* outputMD
= 0;
4037 bzero(&args
.__reserved
[0], sizeof(args
.__reserved
));
4038 args
.__reservedA
= 0;
4039 args
.version
= kIOExternalMethodArgumentsCurrentVersion
;
4041 reference
[0] = (io_user_reference_t
) wake_port
;
4042 if (vm_map_is_64bit(get_task_map(current_task())))
4043 reference
[0] |= kIOUCAsync64Flag
;
4045 args
.selector
= selector
;
4047 args
.asyncWakePort
= wake_port
;
4048 args
.asyncReference
= reference
;
4049 args
.asyncReferenceCount
= referenceCnt
;
4051 args
.structureVariableOutputData
= 0;
4053 args
.scalarInput
= scalar_input
;
4054 args
.scalarInputCount
= scalar_inputCnt
;
4055 args
.structureInput
= inband_input
;
4056 args
.structureInputSize
= inband_inputCnt
;
4058 if (ool_input
&& (ool_input_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
4059 if (ool_output
&& (*ool_output_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
4062 inputMD
= IOMemoryDescriptor::withAddressRange(ool_input
, ool_input_size
,
4063 kIODirectionOut
| kIOMemoryMapCopyOnWrite
,
4066 args
.structureInputDescriptor
= inputMD
;
4068 args
.scalarOutput
= scalar_output
;
4069 args
.scalarOutputCount
= *scalar_outputCnt
;
4070 bzero(&scalar_output
[0], *scalar_outputCnt
* sizeof(scalar_output
[0]));
4071 args
.structureOutput
= inband_output
;
4072 args
.structureOutputSize
= *inband_outputCnt
;
4076 outputMD
= IOMemoryDescriptor::withAddressRange(ool_output
, *ool_output_size
,
4077 kIODirectionIn
, current_task());
4080 args
.structureOutputDescriptor
= outputMD
;
4081 args
.structureOutputDescriptorSize
= *ool_output_size
;
4083 IOStatisticsClientCall();
4084 ret
= client
->externalMethod( selector
, &args
);
4086 *inband_outputCnt
= args
.structureOutputSize
;
4087 *ool_output_size
= args
.structureOutputDescriptorSize
;
4092 outputMD
->release();
4097 /* Routine io_connect_method_scalarI_scalarO */
4098 kern_return_t
is_io_connect_method_scalarI_scalarO(
4099 io_object_t connect
,
4101 io_scalar_inband_t input
,
4102 mach_msg_type_number_t inputCount
,
4103 io_scalar_inband_t output
,
4104 mach_msg_type_number_t
* outputCount
)
4108 io_scalar_inband64_t _input
;
4109 io_scalar_inband64_t _output
;
4111 mach_msg_type_number_t struct_outputCnt
= 0;
4112 mach_vm_size_t ool_output_size
= 0;
4114 bzero(&_output
[0], sizeof(_output
));
4115 for (i
= 0; i
< inputCount
; i
++)
4116 _input
[i
] = SCALAR64(input
[i
]);
4118 err
= is_io_connect_method(connect
, index
,
4122 NULL
, &struct_outputCnt
,
4123 _output
, outputCount
,
4124 0, &ool_output_size
);
4126 for (i
= 0; i
< *outputCount
; i
++)
4127 output
[i
] = SCALAR32(_output
[i
]);
4132 kern_return_t
shim_io_connect_method_scalarI_scalarO(
4133 IOExternalMethod
* method
,
4135 const io_user_scalar_t
* input
,
4136 mach_msg_type_number_t inputCount
,
4137 io_user_scalar_t
* output
,
4138 mach_msg_type_number_t
* outputCount
)
4141 io_scalar_inband_t _output
;
4143 err
= kIOReturnBadArgument
;
4145 bzero(&_output
[0], sizeof(_output
));
4148 if( inputCount
!= method
->count0
)
4150 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4151 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4154 if( *outputCount
!= method
->count1
)
4156 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
);
4157 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4161 func
= method
->func
;
4163 switch( inputCount
) {
4166 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4167 ARG32(input
[3]), ARG32(input
[4]), ARG32(input
[5]) );
4170 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4171 ARG32(input
[3]), ARG32(input
[4]),
4175 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4177 &_output
[0], &_output
[1] );
4180 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4181 &_output
[0], &_output
[1], &_output
[2] );
4184 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]),
4185 &_output
[0], &_output
[1], &_output
[2],
4189 err
= (object
->*func
)( ARG32(input
[0]),
4190 &_output
[0], &_output
[1], &_output
[2],
4191 &_output
[3], &_output
[4] );
4194 err
= (object
->*func
)( &_output
[0], &_output
[1], &_output
[2],
4195 &_output
[3], &_output
[4], &_output
[5] );
4199 IOLog("%s: Bad method table\n", object
->getName());
4205 for (i
= 0; i
< *outputCount
; i
++)
4206 output
[i
] = SCALAR32(_output
[i
]);
4211 /* Routine io_async_method_scalarI_scalarO */
4212 kern_return_t
is_io_async_method_scalarI_scalarO(
4213 io_object_t connect
,
4214 mach_port_t wake_port
,
4215 io_async_ref_t reference
,
4216 mach_msg_type_number_t referenceCnt
,
4218 io_scalar_inband_t input
,
4219 mach_msg_type_number_t inputCount
,
4220 io_scalar_inband_t output
,
4221 mach_msg_type_number_t
* outputCount
)
4225 io_scalar_inband64_t _input
;
4226 io_scalar_inband64_t _output
;
4227 io_async_ref64_t _reference
;
4229 bzero(&_output
[0], sizeof(_output
));
4230 for (i
= 0; i
< referenceCnt
; i
++)
4231 _reference
[i
] = REF64(reference
[i
]);
4233 mach_msg_type_number_t struct_outputCnt
= 0;
4234 mach_vm_size_t ool_output_size
= 0;
4236 for (i
= 0; i
< inputCount
; i
++)
4237 _input
[i
] = SCALAR64(input
[i
]);
4239 err
= is_io_connect_async_method(connect
,
4240 wake_port
, _reference
, referenceCnt
,
4245 NULL
, &struct_outputCnt
,
4246 _output
, outputCount
,
4247 0, &ool_output_size
);
4249 for (i
= 0; i
< *outputCount
; i
++)
4250 output
[i
] = SCALAR32(_output
[i
]);
4254 /* Routine io_async_method_scalarI_structureO */
4255 kern_return_t
is_io_async_method_scalarI_structureO(
4256 io_object_t connect
,
4257 mach_port_t wake_port
,
4258 io_async_ref_t reference
,
4259 mach_msg_type_number_t referenceCnt
,
4261 io_scalar_inband_t input
,
4262 mach_msg_type_number_t inputCount
,
4263 io_struct_inband_t output
,
4264 mach_msg_type_number_t
* outputCount
)
4267 io_scalar_inband64_t _input
;
4268 io_async_ref64_t _reference
;
4270 for (i
= 0; i
< referenceCnt
; i
++)
4271 _reference
[i
] = REF64(reference
[i
]);
4273 mach_msg_type_number_t scalar_outputCnt
= 0;
4274 mach_vm_size_t ool_output_size
= 0;
4276 for (i
= 0; i
< inputCount
; i
++)
4277 _input
[i
] = SCALAR64(input
[i
]);
4279 return (is_io_connect_async_method(connect
,
4280 wake_port
, _reference
, referenceCnt
,
4285 output
, outputCount
,
4286 NULL
, &scalar_outputCnt
,
4287 0, &ool_output_size
));
4290 /* Routine io_async_method_scalarI_structureI */
4291 kern_return_t
is_io_async_method_scalarI_structureI(
4292 io_connect_t connect
,
4293 mach_port_t wake_port
,
4294 io_async_ref_t reference
,
4295 mach_msg_type_number_t referenceCnt
,
4297 io_scalar_inband_t input
,
4298 mach_msg_type_number_t inputCount
,
4299 io_struct_inband_t inputStruct
,
4300 mach_msg_type_number_t inputStructCount
)
4303 io_scalar_inband64_t _input
;
4304 io_async_ref64_t _reference
;
4306 for (i
= 0; i
< referenceCnt
; i
++)
4307 _reference
[i
] = REF64(reference
[i
]);
4309 mach_msg_type_number_t scalar_outputCnt
= 0;
4310 mach_msg_type_number_t inband_outputCnt
= 0;
4311 mach_vm_size_t ool_output_size
= 0;
4313 for (i
= 0; i
< inputCount
; i
++)
4314 _input
[i
] = SCALAR64(input
[i
]);
4316 return (is_io_connect_async_method(connect
,
4317 wake_port
, _reference
, referenceCnt
,
4320 inputStruct
, inputStructCount
,
4322 NULL
, &inband_outputCnt
,
4323 NULL
, &scalar_outputCnt
,
4324 0, &ool_output_size
));
4327 /* Routine io_async_method_structureI_structureO */
4328 kern_return_t
is_io_async_method_structureI_structureO(
4329 io_object_t connect
,
4330 mach_port_t wake_port
,
4331 io_async_ref_t reference
,
4332 mach_msg_type_number_t referenceCnt
,
4334 io_struct_inband_t input
,
4335 mach_msg_type_number_t inputCount
,
4336 io_struct_inband_t output
,
4337 mach_msg_type_number_t
* outputCount
)
4340 mach_msg_type_number_t scalar_outputCnt
= 0;
4341 mach_vm_size_t ool_output_size
= 0;
4342 io_async_ref64_t _reference
;
4344 for (i
= 0; i
< referenceCnt
; i
++)
4345 _reference
[i
] = REF64(reference
[i
]);
4347 return (is_io_connect_async_method(connect
,
4348 wake_port
, _reference
, referenceCnt
,
4353 output
, outputCount
,
4354 NULL
, &scalar_outputCnt
,
4355 0, &ool_output_size
));
4359 kern_return_t
shim_io_async_method_scalarI_scalarO(
4360 IOExternalAsyncMethod
* method
,
4362 mach_port_t asyncWakePort
,
4363 io_user_reference_t
* asyncReference
,
4364 uint32_t asyncReferenceCount
,
4365 const io_user_scalar_t
* input
,
4366 mach_msg_type_number_t inputCount
,
4367 io_user_scalar_t
* output
,
4368 mach_msg_type_number_t
* outputCount
)
4372 io_scalar_inband_t _output
;
4374 io_async_ref_t reference
;
4376 bzero(&_output
[0], sizeof(_output
));
4377 for (i
= 0; i
< asyncReferenceCount
; i
++)
4378 reference
[i
] = REF32(asyncReference
[i
]);
4380 err
= kIOReturnBadArgument
;
4384 if( inputCount
!= method
->count0
)
4386 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4387 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4390 if( *outputCount
!= method
->count1
)
4392 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
);
4393 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4397 func
= method
->func
;
4399 switch( inputCount
) {
4402 err
= (object
->*func
)( reference
,
4403 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4404 ARG32(input
[3]), ARG32(input
[4]), ARG32(input
[5]) );
4407 err
= (object
->*func
)( reference
,
4408 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4409 ARG32(input
[3]), ARG32(input
[4]),
4413 err
= (object
->*func
)( reference
,
4414 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4416 &_output
[0], &_output
[1] );
4419 err
= (object
->*func
)( reference
,
4420 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4421 &_output
[0], &_output
[1], &_output
[2] );
4424 err
= (object
->*func
)( reference
,
4425 ARG32(input
[0]), ARG32(input
[1]),
4426 &_output
[0], &_output
[1], &_output
[2],
4430 err
= (object
->*func
)( reference
,
4432 &_output
[0], &_output
[1], &_output
[2],
4433 &_output
[3], &_output
[4] );
4436 err
= (object
->*func
)( reference
,
4437 &_output
[0], &_output
[1], &_output
[2],
4438 &_output
[3], &_output
[4], &_output
[5] );
4442 IOLog("%s: Bad method table\n", object
->getName());
4447 for (i
= 0; i
< *outputCount
; i
++)
4448 output
[i
] = SCALAR32(_output
[i
]);
4454 /* Routine io_connect_method_scalarI_structureO */
4455 kern_return_t
is_io_connect_method_scalarI_structureO(
4456 io_object_t connect
,
4458 io_scalar_inband_t input
,
4459 mach_msg_type_number_t inputCount
,
4460 io_struct_inband_t output
,
4461 mach_msg_type_number_t
* outputCount
)
4464 io_scalar_inband64_t _input
;
4466 mach_msg_type_number_t scalar_outputCnt
= 0;
4467 mach_vm_size_t ool_output_size
= 0;
4469 for (i
= 0; i
< inputCount
; i
++)
4470 _input
[i
] = SCALAR64(input
[i
]);
4472 return (is_io_connect_method(connect
, index
,
4476 output
, outputCount
,
4477 NULL
, &scalar_outputCnt
,
4478 0, &ool_output_size
));
4481 kern_return_t
shim_io_connect_method_scalarI_structureO(
4483 IOExternalMethod
* method
,
4485 const io_user_scalar_t
* input
,
4486 mach_msg_type_number_t inputCount
,
4487 io_struct_inband_t output
,
4488 IOByteCount
* outputCount
)
4493 err
= kIOReturnBadArgument
;
4496 if( inputCount
!= method
->count0
)
4498 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4499 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4502 if( (kIOUCVariableStructureSize
!= method
->count1
)
4503 && (*outputCount
!= method
->count1
))
4505 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4506 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4510 func
= method
->func
;
4512 switch( inputCount
) {
4515 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4516 ARG32(input
[3]), ARG32(input
[4]),
4520 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4522 output
, (void *)outputCount
);
4525 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4526 output
, (void *)outputCount
, 0 );
4529 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]),
4530 output
, (void *)outputCount
, 0, 0 );
4533 err
= (object
->*func
)( ARG32(input
[0]),
4534 output
, (void *)outputCount
, 0, 0, 0 );
4537 err
= (object
->*func
)( output
, (void *)outputCount
, 0, 0, 0, 0 );
4541 IOLog("%s: Bad method table\n", object
->getName());
4550 kern_return_t
shim_io_async_method_scalarI_structureO(
4551 IOExternalAsyncMethod
* method
,
4553 mach_port_t asyncWakePort
,
4554 io_user_reference_t
* asyncReference
,
4555 uint32_t asyncReferenceCount
,
4556 const io_user_scalar_t
* input
,
4557 mach_msg_type_number_t inputCount
,
4558 io_struct_inband_t output
,
4559 mach_msg_type_number_t
* outputCount
)
4564 io_async_ref_t reference
;
4566 for (i
= 0; i
< asyncReferenceCount
; i
++)
4567 reference
[i
] = REF32(asyncReference
[i
]);
4569 err
= kIOReturnBadArgument
;
4571 if( inputCount
!= method
->count0
)
4573 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4574 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4577 if( (kIOUCVariableStructureSize
!= method
->count1
)
4578 && (*outputCount
!= method
->count1
))
4580 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4581 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4585 func
= method
->func
;
4587 switch( inputCount
) {
4590 err
= (object
->*func
)( reference
,
4591 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4592 ARG32(input
[3]), ARG32(input
[4]),
4596 err
= (object
->*func
)( reference
,
4597 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4599 output
, (void *)outputCount
);
4602 err
= (object
->*func
)( reference
,
4603 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4604 output
, (void *)outputCount
, 0 );
4607 err
= (object
->*func
)( reference
,
4608 ARG32(input
[0]), ARG32(input
[1]),
4609 output
, (void *)outputCount
, 0, 0 );
4612 err
= (object
->*func
)( reference
,
4614 output
, (void *)outputCount
, 0, 0, 0 );
4617 err
= (object
->*func
)( reference
,
4618 output
, (void *)outputCount
, 0, 0, 0, 0 );
4622 IOLog("%s: Bad method table\n", object
->getName());
4630 /* Routine io_connect_method_scalarI_structureI */
4631 kern_return_t
is_io_connect_method_scalarI_structureI(
4632 io_connect_t connect
,
4634 io_scalar_inband_t input
,
4635 mach_msg_type_number_t inputCount
,
4636 io_struct_inband_t inputStruct
,
4637 mach_msg_type_number_t inputStructCount
)
4640 io_scalar_inband64_t _input
;
4642 mach_msg_type_number_t scalar_outputCnt
= 0;
4643 mach_msg_type_number_t inband_outputCnt
= 0;
4644 mach_vm_size_t ool_output_size
= 0;
4646 for (i
= 0; i
< inputCount
; i
++)
4647 _input
[i
] = SCALAR64(input
[i
]);
4649 return (is_io_connect_method(connect
, index
,
4651 inputStruct
, inputStructCount
,
4653 NULL
, &inband_outputCnt
,
4654 NULL
, &scalar_outputCnt
,
4655 0, &ool_output_size
));
4658 kern_return_t
shim_io_connect_method_scalarI_structureI(
4659 IOExternalMethod
* method
,
4661 const io_user_scalar_t
* input
,
4662 mach_msg_type_number_t inputCount
,
4663 io_struct_inband_t inputStruct
,
4664 mach_msg_type_number_t inputStructCount
)
4667 IOReturn err
= kIOReturnBadArgument
;
4671 if (inputCount
!= method
->count0
)
4673 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4674 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4677 if( (kIOUCVariableStructureSize
!= method
->count1
)
4678 && (inputStructCount
!= method
->count1
))
4680 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputStructCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4681 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputStructCount
, uint64_t, (uint64_t)method
->count1
);
4685 func
= method
->func
;
4687 switch( inputCount
) {
4690 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4691 ARG32(input
[3]), ARG32(input
[4]),
4695 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), (void *) input
[2],
4697 inputStruct
, (void *)(uintptr_t)inputStructCount
);
4700 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4701 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4705 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]),
4706 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4710 err
= (object
->*func
)( ARG32(input
[0]),
4711 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4715 err
= (object
->*func
)( inputStruct
, (void *)(uintptr_t)inputStructCount
,
4720 IOLog("%s: Bad method table\n", object
->getName());
4728 kern_return_t
shim_io_async_method_scalarI_structureI(
4729 IOExternalAsyncMethod
* method
,
4731 mach_port_t asyncWakePort
,
4732 io_user_reference_t
* asyncReference
,
4733 uint32_t asyncReferenceCount
,
4734 const io_user_scalar_t
* input
,
4735 mach_msg_type_number_t inputCount
,
4736 io_struct_inband_t inputStruct
,
4737 mach_msg_type_number_t inputStructCount
)
4741 IOReturn err
= kIOReturnBadArgument
;
4742 io_async_ref_t reference
;
4744 for (i
= 0; i
< asyncReferenceCount
; i
++)
4745 reference
[i
] = REF32(asyncReference
[i
]);
4749 if (inputCount
!= method
->count0
)
4751 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4752 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4755 if( (kIOUCVariableStructureSize
!= method
->count1
)
4756 && (inputStructCount
!= method
->count1
))
4758 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputStructCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4759 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputStructCount
, uint64_t, (uint64_t)method
->count1
);
4763 func
= method
->func
;
4765 switch( inputCount
) {
4768 err
= (object
->*func
)( reference
,
4769 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4770 ARG32(input
[3]), ARG32(input
[4]),
4774 err
= (object
->*func
)( reference
,
4775 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4777 inputStruct
, (void *)(uintptr_t)inputStructCount
);
4780 err
= (object
->*func
)( reference
,
4781 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4782 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4786 err
= (object
->*func
)( reference
,
4787 ARG32(input
[0]), ARG32(input
[1]),
4788 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4792 err
= (object
->*func
)( reference
,
4794 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4798 err
= (object
->*func
)( reference
,
4799 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4804 IOLog("%s: Bad method table\n", object
->getName());
4812 /* Routine io_connect_method_structureI_structureO */
4813 kern_return_t
is_io_connect_method_structureI_structureO(
4814 io_object_t connect
,
4816 io_struct_inband_t input
,
4817 mach_msg_type_number_t inputCount
,
4818 io_struct_inband_t output
,
4819 mach_msg_type_number_t
* outputCount
)
4821 mach_msg_type_number_t scalar_outputCnt
= 0;
4822 mach_vm_size_t ool_output_size
= 0;
4824 return (is_io_connect_method(connect
, index
,
4828 output
, outputCount
,
4829 NULL
, &scalar_outputCnt
,
4830 0, &ool_output_size
));
4833 kern_return_t
shim_io_connect_method_structureI_structureO(
4834 IOExternalMethod
* method
,
4836 io_struct_inband_t input
,
4837 mach_msg_type_number_t inputCount
,
4838 io_struct_inband_t output
,
4839 IOByteCount
* outputCount
)
4842 IOReturn err
= kIOReturnBadArgument
;
4846 if( (kIOUCVariableStructureSize
!= method
->count0
)
4847 && (inputCount
!= method
->count0
))
4849 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
, (uint64_t)kIOUCVariableStructureSize
);
4850 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4853 if( (kIOUCVariableStructureSize
!= method
->count1
)
4854 && (*outputCount
!= method
->count1
))
4856 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4857 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4861 func
= method
->func
;
4863 if( method
->count1
) {
4864 if( method
->count0
) {
4865 err
= (object
->*func
)( input
, output
,
4866 (void *)(uintptr_t)inputCount
, outputCount
, 0, 0 );
4868 err
= (object
->*func
)( output
, outputCount
, 0, 0, 0, 0 );
4871 err
= (object
->*func
)( input
, (void *)(uintptr_t)inputCount
, 0, 0, 0, 0 );
4880 kern_return_t
shim_io_async_method_structureI_structureO(
4881 IOExternalAsyncMethod
* method
,
4883 mach_port_t asyncWakePort
,
4884 io_user_reference_t
* asyncReference
,
4885 uint32_t asyncReferenceCount
,
4886 io_struct_inband_t input
,
4887 mach_msg_type_number_t inputCount
,
4888 io_struct_inband_t output
,
4889 mach_msg_type_number_t
* outputCount
)
4894 io_async_ref_t reference
;
4896 for (i
= 0; i
< asyncReferenceCount
; i
++)
4897 reference
[i
] = REF32(asyncReference
[i
]);
4899 err
= kIOReturnBadArgument
;
4902 if( (kIOUCVariableStructureSize
!= method
->count0
)
4903 && (inputCount
!= method
->count0
))
4905 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
, (uint64_t)kIOUCVariableStructureSize
);
4906 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4909 if( (kIOUCVariableStructureSize
!= method
->count1
)
4910 && (*outputCount
!= method
->count1
))
4912 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4913 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4917 func
= method
->func
;
4919 if( method
->count1
) {
4920 if( method
->count0
) {
4921 err
= (object
->*func
)( reference
,
4923 (void *)(uintptr_t)inputCount
, outputCount
, 0, 0 );
4925 err
= (object
->*func
)( reference
,
4926 output
, outputCount
, 0, 0, 0, 0 );
4929 err
= (object
->*func
)( reference
,
4930 input
, (void *)(uintptr_t)inputCount
, 0, 0, 0, 0 );
4939 bool gIOKextdClearedBusy
= false;
4942 /* Routine io_catalog_send_data */
4943 kern_return_t
is_io_catalog_send_data(
4944 mach_port_t master_port
,
4946 io_buf_ptr_t inData
,
4947 mach_msg_type_number_t inDataCount
,
4948 kern_return_t
* result
)
4951 return kIOReturnNotPrivileged
;
4952 #else /* NO_KEXTD */
4955 kern_return_t kr
= kIOReturnError
;
4957 //printf("io_catalog_send_data called. flag: %d\n", flag);
4959 if( master_port
!= master_device_port
)
4960 return kIOReturnNotPrivileged
;
4962 if( (flag
!= kIOCatalogRemoveKernelLinker
&&
4963 flag
!= kIOCatalogKextdActive
&&
4964 flag
!= kIOCatalogKextdFinishedLaunching
) &&
4965 ( !inData
|| !inDataCount
) )
4967 return kIOReturnBadArgument
;
4970 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-secure-management"))
4972 OSString
* taskName
= IOCopyLogNameForPID(proc_selfpid());
4973 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName
? taskName
->getCStringNoCopy() : "");
4974 OSSafeReleaseNULL(taskName
);
4975 // For now, fake success to not break applications relying on this function succeeding.
4976 // See <rdar://problem/32554970> for more details.
4977 return kIOReturnSuccess
;
4981 vm_map_offset_t map_data
;
4983 if( inDataCount
> sizeof(io_struct_inband_t
) * 1024)
4984 return( kIOReturnMessageTooLarge
);
4986 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
)inData
);
4987 data
= CAST_DOWN(vm_offset_t
, map_data
);
4989 if( kr
!= KERN_SUCCESS
)
4992 // must return success after vm_map_copyout() succeeds
4995 obj
= (OSObject
*)OSUnserializeXML((const char *)data
, inDataCount
);
4996 vm_deallocate( kernel_map
, data
, inDataCount
);
4998 *result
= kIOReturnNoMemory
;
4999 return( KERN_SUCCESS
);
5005 case kIOCatalogResetDrivers
:
5006 case kIOCatalogResetDriversNoMatch
: {
5009 array
= OSDynamicCast(OSArray
, obj
);
5011 if ( !gIOCatalogue
->resetAndAddDrivers(array
,
5012 flag
== kIOCatalogResetDrivers
) ) {
5014 kr
= kIOReturnError
;
5017 kr
= kIOReturnBadArgument
;
5022 case kIOCatalogAddDrivers
:
5023 case kIOCatalogAddDriversNoMatch
: {
5026 array
= OSDynamicCast(OSArray
, obj
);
5028 if ( !gIOCatalogue
->addDrivers( array
,
5029 flag
== kIOCatalogAddDrivers
) ) {
5030 kr
= kIOReturnError
;
5034 kr
= kIOReturnBadArgument
;
5039 case kIOCatalogRemoveDrivers
:
5040 case kIOCatalogRemoveDriversNoMatch
: {
5041 OSDictionary
* dict
;
5043 dict
= OSDynamicCast(OSDictionary
, obj
);
5045 if ( !gIOCatalogue
->removeDrivers( dict
,
5046 flag
== kIOCatalogRemoveDrivers
) ) {
5047 kr
= kIOReturnError
;
5051 kr
= kIOReturnBadArgument
;
5056 case kIOCatalogStartMatching
: {
5057 OSDictionary
* dict
;
5059 dict
= OSDynamicCast(OSDictionary
, obj
);
5061 if ( !gIOCatalogue
->startMatching( dict
) ) {
5062 kr
= kIOReturnError
;
5066 kr
= kIOReturnBadArgument
;
5071 case kIOCatalogRemoveKernelLinker
:
5072 kr
= KERN_NOT_SUPPORTED
;
5075 case kIOCatalogKextdActive
:
5077 IOServiceTrace(IOSERVICE_KEXTD_ALIVE
, 0, 0, 0, 0);
5078 OSKext::setKextdActive();
5080 /* Dump all nonloaded startup extensions; kextd will now send them
5083 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5085 kr
= kIOReturnSuccess
;
5088 case kIOCatalogKextdFinishedLaunching
: {
5090 if (!gIOKextdClearedBusy
) {
5091 IOService
* serviceRoot
= IOService::getServiceRoot();
5093 IOServiceTrace(IOSERVICE_KEXTD_READY
, 0, 0, 0, 0);
5094 serviceRoot
->adjustBusy(-1);
5095 gIOKextdClearedBusy
= true;
5099 kr
= kIOReturnSuccess
;
5104 kr
= kIOReturnBadArgument
;
5108 if (obj
) obj
->release();
5111 return( KERN_SUCCESS
);
5112 #endif /* NO_KEXTD */
5115 /* Routine io_catalog_terminate */
5116 kern_return_t
is_io_catalog_terminate(
5117 mach_port_t master_port
,
5123 if( master_port
!= master_device_port
)
5124 return kIOReturnNotPrivileged
;
5126 kr
= IOUserClient::clientHasPrivilege( (void *) current_task(),
5127 kIOClientPrivilegeAdministrator
);
5128 if( kIOReturnSuccess
!= kr
)
5132 #if !defined(SECURE_KERNEL)
5133 case kIOCatalogServiceTerminate
:
5135 IOService
* service
;
5137 iter
= IORegistryIterator::iterateOver(gIOServicePlane
,
5138 kIORegistryIterateRecursively
);
5140 return kIOReturnNoMemory
;
5144 while( (service
= (IOService
*)iter
->getNextObject()) ) {
5145 if( service
->metaCast(name
)) {
5146 if ( !service
->terminate( kIOServiceRequired
5147 | kIOServiceSynchronous
) ) {
5148 kr
= kIOReturnUnsupported
;
5153 } while( !service
&& !iter
->isValid());
5157 case kIOCatalogModuleUnload
:
5158 case kIOCatalogModuleTerminate
:
5159 kr
= gIOCatalogue
->terminateDriversForModule(name
,
5160 flag
== kIOCatalogModuleUnload
);
5165 kr
= kIOReturnBadArgument
;
5172 /* Routine io_catalog_get_data */
5173 kern_return_t
is_io_catalog_get_data(
5174 mach_port_t master_port
,
5176 io_buf_ptr_t
*outData
,
5177 mach_msg_type_number_t
*outDataCount
)
5179 kern_return_t kr
= kIOReturnSuccess
;
5182 if( master_port
!= master_device_port
)
5183 return kIOReturnNotPrivileged
;
5185 //printf("io_catalog_get_data called. flag: %d\n", flag);
5187 s
= OSSerialize::withCapacity(4096);
5189 return kIOReturnNoMemory
;
5191 kr
= gIOCatalogue
->serializeData(flag
, s
);
5193 if ( kr
== kIOReturnSuccess
) {
5198 size
= s
->getLength();
5199 kr
= vm_allocate_kernel(kernel_map
, &data
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IOKIT
);
5200 if ( kr
== kIOReturnSuccess
) {
5201 bcopy(s
->text(), (void *)data
, size
);
5202 kr
= vm_map_copyin(kernel_map
, (vm_map_address_t
)data
,
5203 (vm_map_size_t
)size
, true, ©
);
5204 *outData
= (char *)copy
;
5205 *outDataCount
= size
;
5214 /* Routine io_catalog_get_gen_count */
5215 kern_return_t
is_io_catalog_get_gen_count(
5216 mach_port_t master_port
,
5219 if( master_port
!= master_device_port
)
5220 return kIOReturnNotPrivileged
;
5222 //printf("io_catalog_get_gen_count called.\n");
5225 return kIOReturnBadArgument
;
5227 *genCount
= gIOCatalogue
->getGenerationCount();
5229 return kIOReturnSuccess
;
5232 /* Routine io_catalog_module_loaded.
5233 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5235 kern_return_t
is_io_catalog_module_loaded(
5236 mach_port_t master_port
,
5239 if( master_port
!= master_device_port
)
5240 return kIOReturnNotPrivileged
;
5242 //printf("io_catalog_module_loaded called. name %s\n", name);
5245 return kIOReturnBadArgument
;
5247 gIOCatalogue
->moduleHasLoaded(name
);
5249 return kIOReturnSuccess
;
5252 kern_return_t
is_io_catalog_reset(
5253 mach_port_t master_port
,
5256 if( master_port
!= master_device_port
)
5257 return kIOReturnNotPrivileged
;
5260 case kIOCatalogResetDefault
:
5261 gIOCatalogue
->reset();
5265 return kIOReturnBadArgument
;
5268 return kIOReturnSuccess
;
5271 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args
*args
)
5273 kern_return_t result
= kIOReturnBadArgument
;
5274 IOUserClient
*userClient
;
5276 if ((userClient
= OSDynamicCast(IOUserClient
,
5277 iokit_lookup_connect_ref_current_task((mach_port_name_t
)(uintptr_t)args
->userClientRef
)))) {
5278 IOExternalTrap
*trap
;
5279 IOService
*target
= NULL
;
5281 trap
= userClient
->getTargetAndTrapForIndex(&target
, args
->index
);
5283 if (trap
&& target
) {
5289 result
= (target
->*func
)(args
->p1
, args
->p2
, args
->p3
, args
->p4
, args
->p5
, args
->p6
);
5293 iokit_remove_connect_reference(userClient
);
5299 /* Routine io_device_tree_entry_exists_with_name */
5300 kern_return_t
is_io_device_tree_entry_exists_with_name(
5301 mach_port_t master_port
,
5305 OSCollectionIterator
*iter
;
5307 if (master_port
!= master_device_port
)
5308 return (kIOReturnNotPrivileged
);
5310 iter
= IODTFindMatchingEntries(IORegistryEntry::getRegistryRoot(), kIODTRecursive
, name
);
5311 *exists
= iter
&& iter
->getNextObject();
5312 OSSafeReleaseNULL(iter
);
5314 return kIOReturnSuccess
;
5319 IOReturn
IOUserClient::externalMethod( uint32_t selector
, IOExternalMethodArguments
* args
,
5320 IOExternalMethodDispatch
* dispatch
, OSObject
* target
, void * reference
)
5324 IOByteCount structureOutputSize
;
5329 count
= dispatch
->checkScalarInputCount
;
5330 if ((kIOUCVariableStructureSize
!= count
) && (count
!= args
->scalarInputCount
))
5332 return (kIOReturnBadArgument
);
5335 count
= dispatch
->checkStructureInputSize
;
5336 if ((kIOUCVariableStructureSize
!= count
)
5337 && (count
!= ((args
->structureInputDescriptor
)
5338 ? args
->structureInputDescriptor
->getLength() : args
->structureInputSize
)))
5340 return (kIOReturnBadArgument
);
5343 count
= dispatch
->checkScalarOutputCount
;
5344 if ((kIOUCVariableStructureSize
!= count
) && (count
!= args
->scalarOutputCount
))
5346 return (kIOReturnBadArgument
);
5349 count
= dispatch
->checkStructureOutputSize
;
5350 if ((kIOUCVariableStructureSize
!= count
)
5351 && (count
!= ((args
->structureOutputDescriptor
)
5352 ? args
->structureOutputDescriptor
->getLength() : args
->structureOutputSize
)))
5354 return (kIOReturnBadArgument
);
5357 if (dispatch
->function
)
5358 err
= (*dispatch
->function
)(target
, reference
, args
);
5360 err
= kIOReturnNoCompletion
; /* implementator can dispatch */
5366 // pre-Leopard API's don't do ool structs
5367 if (args
->structureInputDescriptor
|| args
->structureOutputDescriptor
)
5369 err
= kIOReturnIPCError
;
5373 structureOutputSize
= args
->structureOutputSize
;
5375 if (args
->asyncWakePort
)
5377 IOExternalAsyncMethod
* method
;
5379 if( !(method
= getAsyncTargetAndMethodForIndex(&object
, selector
)) || !object
)
5380 return (kIOReturnUnsupported
);
5382 if (kIOUCForegroundOnly
& method
->flags
)
5384 if (task_is_gpu_denied(current_task()))
5385 return (kIOReturnNotPermitted
);
5388 switch (method
->flags
& kIOUCTypeMask
)
5390 case kIOUCScalarIStructI
:
5391 err
= shim_io_async_method_scalarI_structureI( method
, object
,
5392 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5393 args
->scalarInput
, args
->scalarInputCount
,
5394 (char *)args
->structureInput
, args
->structureInputSize
);
5397 case kIOUCScalarIScalarO
:
5398 err
= shim_io_async_method_scalarI_scalarO( method
, object
,
5399 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5400 args
->scalarInput
, args
->scalarInputCount
,
5401 args
->scalarOutput
, &args
->scalarOutputCount
);
5404 case kIOUCScalarIStructO
:
5405 err
= shim_io_async_method_scalarI_structureO( method
, object
,
5406 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5407 args
->scalarInput
, args
->scalarInputCount
,
5408 (char *) args
->structureOutput
, &args
->structureOutputSize
);
5412 case kIOUCStructIStructO
:
5413 err
= shim_io_async_method_structureI_structureO( method
, object
,
5414 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5415 (char *)args
->structureInput
, args
->structureInputSize
,
5416 (char *) args
->structureOutput
, &args
->structureOutputSize
);
5420 err
= kIOReturnBadArgument
;
5426 IOExternalMethod
* method
;
5428 if( !(method
= getTargetAndMethodForIndex(&object
, selector
)) || !object
)
5429 return (kIOReturnUnsupported
);
5431 if (kIOUCForegroundOnly
& method
->flags
)
5433 if (task_is_gpu_denied(current_task()))
5434 return (kIOReturnNotPermitted
);
5437 switch (method
->flags
& kIOUCTypeMask
)
5439 case kIOUCScalarIStructI
:
5440 err
= shim_io_connect_method_scalarI_structureI( method
, object
,
5441 args
->scalarInput
, args
->scalarInputCount
,
5442 (char *) args
->structureInput
, args
->structureInputSize
);
5445 case kIOUCScalarIScalarO
:
5446 err
= shim_io_connect_method_scalarI_scalarO( method
, object
,
5447 args
->scalarInput
, args
->scalarInputCount
,
5448 args
->scalarOutput
, &args
->scalarOutputCount
);
5451 case kIOUCScalarIStructO
:
5452 err
= shim_io_connect_method_scalarI_structureO( method
, object
,
5453 args
->scalarInput
, args
->scalarInputCount
,
5454 (char *) args
->structureOutput
, &structureOutputSize
);
5458 case kIOUCStructIStructO
:
5459 err
= shim_io_connect_method_structureI_structureO( method
, object
,
5460 (char *) args
->structureInput
, args
->structureInputSize
,
5461 (char *) args
->structureOutput
, &structureOutputSize
);
5465 err
= kIOReturnBadArgument
;
5470 args
->structureOutputSize
= structureOutputSize
;
5476 OSMetaClassDefineReservedUnused(IOUserClient
, 0);
5477 OSMetaClassDefineReservedUnused(IOUserClient
, 1);
5479 OSMetaClassDefineReservedUsed(IOUserClient
, 0);
5480 OSMetaClassDefineReservedUsed(IOUserClient
, 1);
5482 OSMetaClassDefineReservedUnused(IOUserClient
, 2);
5483 OSMetaClassDefineReservedUnused(IOUserClient
, 3);
5484 OSMetaClassDefineReservedUnused(IOUserClient
, 4);
5485 OSMetaClassDefineReservedUnused(IOUserClient
, 5);
5486 OSMetaClassDefineReservedUnused(IOUserClient
, 6);
5487 OSMetaClassDefineReservedUnused(IOUserClient
, 7);
5488 OSMetaClassDefineReservedUnused(IOUserClient
, 8);
5489 OSMetaClassDefineReservedUnused(IOUserClient
, 9);
5490 OSMetaClassDefineReservedUnused(IOUserClient
, 10);
5491 OSMetaClassDefineReservedUnused(IOUserClient
, 11);
5492 OSMetaClassDefineReservedUnused(IOUserClient
, 12);
5493 OSMetaClassDefineReservedUnused(IOUserClient
, 13);
5494 OSMetaClassDefineReservedUnused(IOUserClient
, 14);
5495 OSMetaClassDefineReservedUnused(IOUserClient
, 15);