2 * Copyright (c) 1998-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <libkern/c++/OSKext.h>
31 #include <IOKit/IOKitServer.h>
32 #include <IOKit/IOKitKeysPrivate.h>
33 #include <IOKit/IOUserClient.h>
34 #include <IOKit/IOService.h>
35 #include <IOKit/IORegistryEntry.h>
36 #include <IOKit/IOCatalogue.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOBufferMemoryDescriptor.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOBSD.h>
41 #include <IOKit/IOStatisticsPrivate.h>
42 #include <IOKit/IOTimeStamp.h>
43 #include <IOKit/system.h>
44 #include <libkern/OSDebug.h>
46 #include <sys/kauth.h>
47 #include <sys/codesign.h>
54 #include <security/mac_framework.h>
56 #include <sys/kauth.h>
60 #endif /* CONFIG_MACF */
62 #include <IOKit/assert.h>
64 #include "IOServicePrivate.h"
65 #include "IOKitKernelInternal.h"
67 #define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x))
68 #define SCALAR32(x) ((uint32_t )x)
69 #define ARG32(x) ((void *)(uintptr_t)SCALAR32(x))
70 #define REF64(x) ((io_user_reference_t)((UInt64)(x)))
71 #define REF32(x) ((int)(x))
75 kIOUCAsync0Flags
= 3ULL,
76 kIOUCAsync64Flag
= 1ULL,
77 kIOUCAsyncErrorLoggedFlag
= 2ULL
82 #define IOStatisticsRegisterCounter() \
84 reserved->counter = IOStatistics::registerUserClient(this); \
87 #define IOStatisticsUnregisterCounter() \
90 IOStatistics::unregisterUserClient(reserved->counter); \
93 #define IOStatisticsClientCall() \
95 IOStatistics::countUserClientCall(client); \
100 #define IOStatisticsRegisterCounter()
101 #define IOStatisticsUnregisterCounter()
102 #define IOStatisticsClientCall()
104 #endif /* IOKITSTATS */
106 #if DEVELOPMENT || DEBUG
108 #define FAKE_STACK_FRAME(a) \
109 const void ** __frameptr; \
110 const void * __retaddr; \
111 __frameptr = (typeof(__frameptr)) __builtin_frame_address(0); \
112 __retaddr = __frameptr[1]; \
115 #define FAKE_STACK_FRAME_END() \
116 __frameptr[1] = __retaddr;
118 #else /* DEVELOPMENT || DEBUG */
120 #define FAKE_STACK_FRAME(a)
121 #define FAKE_STACK_FRAME_END()
123 #endif /* DEVELOPMENT || DEBUG */
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
127 // definitions we should get from osfmk
129 //typedef struct ipc_port * ipc_port_t;
130 typedef natural_t ipc_kobject_type_t
;
132 #define IKOT_IOKIT_SPARE 27
133 #define IKOT_IOKIT_CONNECT 29
134 #define IKOT_IOKIT_OBJECT 30
138 extern ipc_port_t
iokit_alloc_object_port( io_object_t obj
,
139 ipc_kobject_type_t type
);
141 extern kern_return_t
iokit_destroy_object_port( ipc_port_t port
);
143 extern mach_port_name_t
iokit_make_send_right( task_t task
,
144 io_object_t obj
, ipc_kobject_type_t type
);
146 extern kern_return_t
iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
);
148 extern io_object_t
iokit_lookup_connect_ref(io_object_t clientRef
, ipc_space_t task
);
150 extern io_object_t
iokit_lookup_connect_ref_current_task(io_object_t clientRef
);
152 extern ipc_port_t master_device_port
;
154 extern void iokit_retain_port( ipc_port_t port
);
155 extern void iokit_release_port( ipc_port_t port
);
156 extern void iokit_release_port_send( ipc_port_t port
);
158 extern kern_return_t
iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
);
160 #include <mach/mach_traps.h>
161 #include <vm/vm_map.h>
166 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
168 // IOMachPort maps OSObjects to ports, avoiding adding an ivar to OSObject.
170 class IOMachPort
: public OSObject
172 OSDeclareDefaultStructors(IOMachPort
)
179 static IOMachPort
* portForObject( OSObject
* obj
,
180 ipc_kobject_type_t type
);
181 static bool noMoreSendersForObject( OSObject
* obj
,
182 ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
);
183 static void releasePortForObject( OSObject
* obj
,
184 ipc_kobject_type_t type
);
185 static void setHoldDestroy( OSObject
* obj
, ipc_kobject_type_t type
);
187 static OSDictionary
* dictForType( ipc_kobject_type_t type
);
189 static mach_port_name_t
makeSendRightForTask( task_t task
,
190 io_object_t obj
, ipc_kobject_type_t type
);
192 virtual void free() APPLE_KEXT_OVERRIDE
;
195 #define super OSObject
196 OSDefineMetaClassAndStructors(IOMachPort
, OSObject
)
198 static IOLock
* gIOObjectPortLock
;
200 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202 // not in dictForType() for debugging ease
203 static OSDictionary
* gIOObjectPorts
;
204 static OSDictionary
* gIOConnectPorts
;
206 OSDictionary
* IOMachPort::dictForType( ipc_kobject_type_t type
)
208 OSDictionary
** dict
;
210 if( IKOT_IOKIT_OBJECT
== type
)
211 dict
= &gIOObjectPorts
;
212 else if( IKOT_IOKIT_CONNECT
== type
)
213 dict
= &gIOConnectPorts
;
218 *dict
= OSDictionary::withCapacity( 1 );
223 IOMachPort
* IOMachPort::portForObject ( OSObject
* obj
,
224 ipc_kobject_type_t type
)
226 IOMachPort
* inst
= 0;
229 IOTakeLock( gIOObjectPortLock
);
233 dict
= dictForType( type
);
237 if( (inst
= (IOMachPort
*)
238 dict
->getObject( (const OSSymbol
*) obj
))) {
244 inst
= new IOMachPort
;
245 if( inst
&& !inst
->init()) {
250 inst
->port
= iokit_alloc_object_port( obj
, type
);
253 dict
->setObject( (const OSSymbol
*) obj
, inst
);
263 IOUnlock( gIOObjectPortLock
);
268 bool IOMachPort::noMoreSendersForObject( OSObject
* obj
,
269 ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
)
272 IOMachPort
* machPort
;
274 bool destroyed
= true;
276 IOTakeLock( gIOObjectPortLock
);
278 if( (dict
= dictForType( type
))) {
281 machPort
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
283 destroyed
= (machPort
->mscount
<= *mscount
);
284 if (!destroyed
) *mscount
= machPort
->mscount
;
287 if ((IKOT_IOKIT_CONNECT
== type
) && (uc
= OSDynamicCast(IOUserClient
, obj
)))
291 dict
->removeObject( (const OSSymbol
*) obj
);
297 IOUnlock( gIOObjectPortLock
);
302 void IOMachPort::releasePortForObject( OSObject
* obj
,
303 ipc_kobject_type_t type
)
306 IOMachPort
* machPort
;
308 assert(IKOT_IOKIT_CONNECT
!= type
);
310 IOTakeLock( gIOObjectPortLock
);
312 if( (dict
= dictForType( type
))) {
314 machPort
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
315 if( machPort
&& !machPort
->holdDestroy
)
316 dict
->removeObject( (const OSSymbol
*) obj
);
320 IOUnlock( gIOObjectPortLock
);
323 void IOMachPort::setHoldDestroy( OSObject
* obj
, ipc_kobject_type_t type
)
326 IOMachPort
* machPort
;
328 IOLockLock( gIOObjectPortLock
);
330 if( (dict
= dictForType( type
))) {
331 machPort
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
333 machPort
->holdDestroy
= true;
336 IOLockUnlock( gIOObjectPortLock
);
339 void IOUserClient::destroyUserReferences( OSObject
* obj
)
341 IOMachPort::releasePortForObject( obj
, IKOT_IOKIT_OBJECT
);
344 // IOMachPort::releasePortForObject( obj, IKOT_IOKIT_CONNECT );
348 IOTakeLock( gIOObjectPortLock
);
351 if( (dict
= IOMachPort::dictForType( IKOT_IOKIT_CONNECT
)))
354 port
= (IOMachPort
*) dict
->getObject( (const OSSymbol
*) obj
);
358 if ((uc
= OSDynamicCast(IOUserClient
, obj
)))
363 dict
->setObject((const OSSymbol
*) uc
->mappings
, port
);
364 iokit_switch_object_port(port
->port
, uc
->mappings
, IKOT_IOKIT_CONNECT
);
366 uc
->mappings
->release();
370 dict
->removeObject( (const OSSymbol
*) obj
);
374 IOUnlock( gIOObjectPortLock
);
377 mach_port_name_t
IOMachPort::makeSendRightForTask( task_t task
,
378 io_object_t obj
, ipc_kobject_type_t type
)
380 return( iokit_make_send_right( task
, obj
, type
));
383 void IOMachPort::free( void )
386 iokit_destroy_object_port( port
);
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
392 class IOUserIterator
: public OSIterator
394 OSDeclareDefaultStructors(IOUserIterator
)
396 OSObject
* userIteratorObject
;
399 static IOUserIterator
* withIterator(OSIterator
* iter
);
400 virtual bool init( void ) APPLE_KEXT_OVERRIDE
;
401 virtual void free() APPLE_KEXT_OVERRIDE
;
403 virtual void reset() APPLE_KEXT_OVERRIDE
;
404 virtual bool isValid() APPLE_KEXT_OVERRIDE
;
405 virtual OSObject
* getNextObject() APPLE_KEXT_OVERRIDE
;
408 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
410 class IOUserNotification
: public IOUserIterator
412 OSDeclareDefaultStructors(IOUserNotification
)
414 #define holdNotify userIteratorObject
418 virtual void free() APPLE_KEXT_OVERRIDE
;
420 virtual void setNotification( IONotifier
* obj
);
422 virtual void reset() APPLE_KEXT_OVERRIDE
;
423 virtual bool isValid() APPLE_KEXT_OVERRIDE
;
426 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
428 OSDefineMetaClassAndStructors( IOUserIterator
, OSIterator
)
431 IOUserIterator::withIterator(OSIterator
* iter
)
435 if (!iter
) return (0);
437 me
= new IOUserIterator
;
438 if (me
&& !me
->init())
444 me
->userIteratorObject
= iter
;
450 IOUserIterator::init( void )
452 if (!OSObject::init()) return (false);
454 lock
= IOLockAlloc();
462 IOUserIterator::free()
464 if (userIteratorObject
) userIteratorObject
->release();
465 if (lock
) IOLockFree(lock
);
470 IOUserIterator::reset()
473 assert(OSDynamicCast(OSIterator
, userIteratorObject
));
474 ((OSIterator
*)userIteratorObject
)->reset();
479 IOUserIterator::isValid()
484 assert(OSDynamicCast(OSIterator
, userIteratorObject
));
485 ret
= ((OSIterator
*)userIteratorObject
)->isValid();
492 IOUserIterator::getNextObject()
497 assert(OSDynamicCast(OSIterator
, userIteratorObject
));
498 ret
= ((OSIterator
*)userIteratorObject
)->getNextObject();
504 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
507 // functions called from osfmk/device/iokit_rpc.c
510 iokit_add_reference( io_object_t obj
)
517 iokit_remove_reference( io_object_t obj
)
524 iokit_add_connect_reference( io_object_t obj
)
530 if ((uc
= OSDynamicCast(IOUserClient
, obj
))) OSIncrementAtomic(&uc
->__ipc
);
536 iokit_remove_connect_reference( io_object_t obj
)
539 bool finalize
= false;
543 if ((uc
= OSDynamicCast(IOUserClient
, obj
)))
545 if (1 == OSDecrementAtomic(&uc
->__ipc
) && uc
->isInactive())
547 IOLockLock(gIOObjectPortLock
);
548 if ((finalize
= uc
->__ipcFinal
)) uc
->__ipcFinal
= false;
549 IOLockUnlock(gIOObjectPortLock
);
551 if (finalize
) uc
->scheduleFinalize(true);
558 IOUserClient::finalizeUserReferences(OSObject
* obj
)
563 if ((uc
= OSDynamicCast(IOUserClient
, obj
)))
565 IOLockLock(gIOObjectPortLock
);
566 if ((uc
->__ipcFinal
= (0 != uc
->__ipc
))) ok
= false;
567 IOLockUnlock(gIOObjectPortLock
);
573 iokit_port_for_object( io_object_t obj
, ipc_kobject_type_t type
)
575 IOMachPort
* machPort
;
578 if( (machPort
= IOMachPort::portForObject( obj
, type
))) {
580 port
= machPort
->port
;
582 iokit_retain_port( port
);
593 iokit_client_died( io_object_t obj
, ipc_port_t
/* port */,
594 ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
)
596 IOUserClient
* client
;
598 IOUserNotification
* notify
;
600 if( !IOMachPort::noMoreSendersForObject( obj
, type
, mscount
))
601 return( kIOReturnNotReady
);
603 if( IKOT_IOKIT_CONNECT
== type
)
605 if( (client
= OSDynamicCast( IOUserClient
, obj
)))
607 IOStatisticsClientCall();
608 client
->clientDied();
611 else if( IKOT_IOKIT_OBJECT
== type
)
613 if( (map
= OSDynamicCast( IOMemoryMap
, obj
)))
615 else if( (notify
= OSDynamicCast( IOUserNotification
, obj
)))
616 notify
->setNotification( 0 );
619 return( kIOReturnSuccess
);
624 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
626 class IOServiceUserNotification
: public IOUserNotification
628 OSDeclareDefaultStructors(IOServiceUserNotification
)
631 mach_msg_header_t msgHdr
;
632 OSNotificationHeader64 notifyHeader
;
635 enum { kMaxOutstanding
= 1024 };
640 OSObject
* lastEntry
;
646 virtual bool init( mach_port_t port
, natural_t type
,
647 void * reference
, vm_size_t referenceSize
,
649 virtual void free() APPLE_KEXT_OVERRIDE
;
650 void invalidatePort(void);
652 static bool _handler( void * target
,
653 void * ref
, IOService
* newService
, IONotifier
* notifier
);
654 virtual bool handler( void * ref
, IOService
* newService
);
656 virtual OSObject
* getNextObject() APPLE_KEXT_OVERRIDE
;
659 class IOServiceMessageUserNotification
: public IOUserNotification
661 OSDeclareDefaultStructors(IOServiceMessageUserNotification
)
664 mach_msg_header_t msgHdr
;
665 mach_msg_body_t msgBody
;
666 mach_msg_port_descriptor_t ports
[1];
667 OSNotificationHeader64 notifyHeader
__attribute__ ((packed
));
678 virtual bool init( mach_port_t port
, natural_t type
,
679 void * reference
, vm_size_t referenceSize
,
683 virtual void free() APPLE_KEXT_OVERRIDE
;
684 void invalidatePort(void);
686 static IOReturn
_handler( void * target
, void * ref
,
687 UInt32 messageType
, IOService
* provider
,
688 void * messageArgument
, vm_size_t argSize
);
689 virtual IOReturn
handler( void * ref
,
690 UInt32 messageType
, IOService
* provider
,
691 void * messageArgument
, vm_size_t argSize
);
693 virtual OSObject
* getNextObject() APPLE_KEXT_OVERRIDE
;
696 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
699 #define super IOUserIterator
700 OSDefineMetaClass( IOUserNotification
, IOUserIterator
)
701 OSDefineAbstractStructors( IOUserNotification
, IOUserIterator
)
703 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
705 void IOUserNotification::free( void )
709 assert(OSDynamicCast(IONotifier
, holdNotify
));
710 ((IONotifier
*)holdNotify
)->remove();
713 // can't be in handler now
719 void IOUserNotification::setNotification( IONotifier
* notify
)
721 OSObject
* previousNotify
;
723 IOLockLock( gIOObjectPortLock
);
725 previousNotify
= holdNotify
;
728 IOLockUnlock( gIOObjectPortLock
);
732 assert(OSDynamicCast(IONotifier
, previousNotify
));
733 ((IONotifier
*)previousNotify
)->remove();
737 void IOUserNotification::reset()
742 bool IOUserNotification::isValid()
747 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
750 #define super IOUserNotification
751 OSDefineMetaClassAndStructors(IOServiceUserNotification
, IOUserNotification
)
753 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
755 bool IOServiceUserNotification::init( mach_port_t port
, natural_t type
,
756 void * reference
, vm_size_t referenceSize
,
762 newSet
= OSArray::withCapacity( 1 );
766 if (referenceSize
> sizeof(OSAsyncReference64
))
769 msgSize
= sizeof(PingMsg
) - sizeof(OSAsyncReference64
) + referenceSize
;
770 pingMsg
= (PingMsg
*) IOMalloc( msgSize
);
774 bzero( pingMsg
, msgSize
);
776 pingMsg
->msgHdr
.msgh_remote_port
= port
;
777 pingMsg
->msgHdr
.msgh_bits
= MACH_MSGH_BITS(
778 MACH_MSG_TYPE_COPY_SEND
/*remote*/,
779 MACH_MSG_TYPE_MAKE_SEND
/*local*/);
780 pingMsg
->msgHdr
.msgh_size
= msgSize
;
781 pingMsg
->msgHdr
.msgh_id
= kOSNotificationMessageID
;
783 pingMsg
->notifyHeader
.size
= 0;
784 pingMsg
->notifyHeader
.type
= type
;
785 bcopy( reference
, pingMsg
->notifyHeader
.reference
, referenceSize
);
790 void IOServiceUserNotification::invalidatePort(void)
792 if (pingMsg
) pingMsg
->msgHdr
.msgh_remote_port
= MACH_PORT_NULL
;
795 void IOServiceUserNotification::free( void )
800 OSObject
* _lastEntry
;
804 _lastEntry
= lastEntry
;
809 if( _pingMsg
&& _msgSize
) {
810 if (_pingMsg
->msgHdr
.msgh_remote_port
) {
811 iokit_release_port_send(_pingMsg
->msgHdr
.msgh_remote_port
);
813 IOFree(_pingMsg
, _msgSize
);
817 _lastEntry
->release();
823 bool IOServiceUserNotification::_handler( void * target
,
824 void * ref
, IOService
* newService
, IONotifier
* notifier
)
826 return( ((IOServiceUserNotification
*) target
)->handler( ref
, newService
));
829 bool IOServiceUserNotification::handler( void * ref
,
830 IOService
* newService
)
834 ipc_port_t port
= NULL
;
835 bool sendPing
= false;
839 count
= newSet
->getCount();
840 if( count
< kMaxOutstanding
) {
842 newSet
->setObject( newService
);
843 if( (sendPing
= (armed
&& (0 == count
))))
849 if( kIOServiceTerminatedNotificationType
== pingMsg
->notifyHeader
.type
)
850 IOMachPort::setHoldDestroy( newService
, IKOT_IOKIT_OBJECT
);
853 if( (port
= iokit_port_for_object( this, IKOT_IOKIT_OBJECT
) ))
854 pingMsg
->msgHdr
.msgh_local_port
= port
;
856 pingMsg
->msgHdr
.msgh_local_port
= NULL
;
858 kr
= mach_msg_send_from_kernel_with_options( &pingMsg
->msgHdr
,
859 pingMsg
->msgHdr
.msgh_size
,
860 (MACH_SEND_MSG
| MACH_SEND_ALWAYS
| MACH_SEND_IMPORTANCE
),
863 iokit_release_port( port
);
865 if( (KERN_SUCCESS
!= kr
) && !ipcLogged
)
868 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__
, kr
);
875 OSObject
* IOServiceUserNotification::getNextObject()
879 OSObject
* releaseEntry
;
883 releaseEntry
= lastEntry
;
884 count
= newSet
->getCount();
886 result
= newSet
->getObject( count
- 1 );
888 newSet
->removeObject( count
- 1);
897 if (releaseEntry
) releaseEntry
->release();
902 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
904 OSDefineMetaClassAndStructors(IOServiceMessageUserNotification
, IOUserNotification
)
906 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
908 bool IOServiceMessageUserNotification::init( mach_port_t port
, natural_t type
,
909 void * reference
, vm_size_t referenceSize
, vm_size_t extraSize
,
915 if (referenceSize
> sizeof(OSAsyncReference64
))
918 clientIs64
= client64
;
920 owningPID
= proc_selfpid();
922 extraSize
+= sizeof(IOServiceInterestContent64
);
923 msgSize
= sizeof(PingMsg
) - sizeof(OSAsyncReference64
) + referenceSize
;
924 pingMsg
= (PingMsg
*) IOMalloc( msgSize
);
928 bzero( pingMsg
, msgSize
);
930 pingMsg
->msgHdr
.msgh_remote_port
= port
;
931 pingMsg
->msgHdr
.msgh_bits
= MACH_MSGH_BITS_COMPLEX
933 MACH_MSG_TYPE_COPY_SEND
/*remote*/,
934 MACH_MSG_TYPE_MAKE_SEND
/*local*/);
935 pingMsg
->msgHdr
.msgh_size
= msgSize
;
936 pingMsg
->msgHdr
.msgh_id
= kOSNotificationMessageID
;
938 pingMsg
->msgBody
.msgh_descriptor_count
= 1;
940 pingMsg
->ports
[0].name
= 0;
941 pingMsg
->ports
[0].disposition
= MACH_MSG_TYPE_MAKE_SEND
;
942 pingMsg
->ports
[0].type
= MACH_MSG_PORT_DESCRIPTOR
;
944 pingMsg
->notifyHeader
.size
= extraSize
;
945 pingMsg
->notifyHeader
.type
= type
;
946 bcopy( reference
, pingMsg
->notifyHeader
.reference
, referenceSize
);
951 void IOServiceMessageUserNotification::invalidatePort(void)
953 if (pingMsg
) pingMsg
->msgHdr
.msgh_remote_port
= MACH_PORT_NULL
;
956 void IOServiceMessageUserNotification::free( void )
966 if( _pingMsg
&& _msgSize
) {
967 if (_pingMsg
->msgHdr
.msgh_remote_port
) {
968 iokit_release_port_send(_pingMsg
->msgHdr
.msgh_remote_port
);
970 IOFree( _pingMsg
, _msgSize
);
974 IOReturn
IOServiceMessageUserNotification::_handler( void * target
, void * ref
,
975 UInt32 messageType
, IOService
* provider
,
976 void * argument
, vm_size_t argSize
)
978 return( ((IOServiceMessageUserNotification
*) target
)->handler(
979 ref
, messageType
, provider
, argument
, argSize
));
982 IOReturn
IOServiceMessageUserNotification::handler( void * ref
,
983 UInt32 messageType
, IOService
* provider
,
984 void * messageArgument
, vm_size_t callerArgSize
)
986 enum { kLocalMsgSize
= 0x100 };
987 uint64_t stackMsg
[kLocalMsgSize
/ sizeof(uint64_t)];
991 vm_size_t thisMsgSize
;
992 ipc_port_t thisPort
, providerPort
;
993 struct PingMsg
* thisMsg
;
994 IOServiceInterestContent64
* data
;
996 if (kIOMessageCopyClientID
== messageType
)
998 *((void **) messageArgument
) = OSNumber::withNumber(owningPID
, 32);
999 return (kIOReturnSuccess
);
1002 if (callerArgSize
== 0)
1004 if (clientIs64
) argSize
= sizeof(data
->messageArgument
[0]);
1005 else argSize
= sizeof(uint32_t);
1009 argSize
= callerArgSize
;
1010 if( argSize
> kIOUserNotifyMaxMessageSize
)
1011 argSize
= kIOUserNotifyMaxMessageSize
;
1014 // adjust message size for ipc restrictions
1016 type
= pingMsg
->notifyHeader
.type
;
1017 type
&= ~(kIOKitNoticationMsgSizeMask
<< kIOKitNoticationTypeSizeAdjShift
);
1018 type
|= ((argSize
& kIOKitNoticationMsgSizeMask
) << kIOKitNoticationTypeSizeAdjShift
);
1019 argSize
= (argSize
+ kIOKitNoticationMsgSizeMask
) & ~kIOKitNoticationMsgSizeMask
;
1021 thisMsgSize
= msgSize
1022 + sizeof( IOServiceInterestContent64
)
1023 - sizeof( data
->messageArgument
)
1026 if (thisMsgSize
> sizeof(stackMsg
))
1028 allocMsg
= IOMalloc(thisMsgSize
);
1029 if (!allocMsg
) return (kIOReturnNoMemory
);
1030 thisMsg
= (typeof(thisMsg
)) allocMsg
;
1035 thisMsg
= (typeof(thisMsg
)) stackMsg
;
1038 bcopy(pingMsg
, thisMsg
, msgSize
);
1039 thisMsg
->notifyHeader
.type
= type
;
1040 data
= (IOServiceInterestContent64
*) (((uint8_t *) thisMsg
) + msgSize
);
1041 // == pingMsg->notifyHeader.content;
1042 data
->messageType
= messageType
;
1044 if (callerArgSize
== 0)
1046 data
->messageArgument
[0] = (io_user_reference_t
) messageArgument
;
1049 data
->messageArgument
[0] |= (data
->messageArgument
[0] << 32);
1054 bcopy( messageArgument
, data
->messageArgument
, callerArgSize
);
1055 bzero((void *)(((uintptr_t) &data
->messageArgument
[0]) + callerArgSize
), argSize
- callerArgSize
);
1058 thisMsg
->notifyHeader
.type
= type
;
1059 thisMsg
->msgHdr
.msgh_size
= thisMsgSize
;
1061 providerPort
= iokit_port_for_object( provider
, IKOT_IOKIT_OBJECT
);
1062 thisMsg
->ports
[0].name
= providerPort
;
1063 thisPort
= iokit_port_for_object( this, IKOT_IOKIT_OBJECT
);
1064 thisMsg
->msgHdr
.msgh_local_port
= thisPort
;
1066 kr
= mach_msg_send_from_kernel_with_options( &thisMsg
->msgHdr
,
1067 thisMsg
->msgHdr
.msgh_size
,
1068 (MACH_SEND_MSG
| MACH_SEND_ALWAYS
| MACH_SEND_IMPORTANCE
),
1071 iokit_release_port( thisPort
);
1073 iokit_release_port( providerPort
);
1076 IOFree(allocMsg
, thisMsgSize
);
1078 if((KERN_SUCCESS
!= kr
) && !ipcLogged
)
1081 IOLog("%s: mach_msg_send_from_kernel_proper (0x%x)\n", __PRETTY_FUNCTION__
, kr
);
1084 return( kIOReturnSuccess
);
1087 OSObject
* IOServiceMessageUserNotification::getNextObject()
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1095 #define super IOService
1096 OSDefineMetaClassAndAbstractStructors( IOUserClient
, IOService
)
1098 IOLock
* gIOUserClientOwnersLock
;
1100 void IOUserClient::initialize( void )
1102 gIOObjectPortLock
= IOLockAlloc();
1103 gIOUserClientOwnersLock
= IOLockAlloc();
1104 assert(gIOObjectPortLock
&& gIOUserClientOwnersLock
);
1107 void IOUserClient::setAsyncReference(OSAsyncReference asyncRef
,
1108 mach_port_t wakePort
,
1109 void *callback
, void *refcon
)
1111 asyncRef
[kIOAsyncReservedIndex
] = ((uintptr_t) wakePort
)
1112 | (kIOUCAsync0Flags
& asyncRef
[kIOAsyncReservedIndex
]);
1113 asyncRef
[kIOAsyncCalloutFuncIndex
] = (uintptr_t) callback
;
1114 asyncRef
[kIOAsyncCalloutRefconIndex
] = (uintptr_t) refcon
;
1117 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef
,
1118 mach_port_t wakePort
,
1119 mach_vm_address_t callback
, io_user_reference_t refcon
)
1121 asyncRef
[kIOAsyncReservedIndex
] = ((io_user_reference_t
) wakePort
)
1122 | (kIOUCAsync0Flags
& asyncRef
[kIOAsyncReservedIndex
]);
1123 asyncRef
[kIOAsyncCalloutFuncIndex
] = (io_user_reference_t
) callback
;
1124 asyncRef
[kIOAsyncCalloutRefconIndex
] = refcon
;
1127 void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef
,
1128 mach_port_t wakePort
,
1129 mach_vm_address_t callback
, io_user_reference_t refcon
, task_t task
)
1131 setAsyncReference64(asyncRef
, wakePort
, callback
, refcon
);
1132 if (vm_map_is_64bit(get_task_map(task
))) {
1133 asyncRef
[kIOAsyncReservedIndex
] |= kIOUCAsync64Flag
;
1137 static OSDictionary
* CopyConsoleUser(UInt32 uid
)
1140 OSDictionary
* user
= 0;
1142 if ((array
= OSDynamicCast(OSArray
,
1143 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey
))))
1145 for (unsigned int idx
= 0;
1146 (user
= OSDynamicCast(OSDictionary
, array
->getObject(idx
)));
1150 if ((num
= OSDynamicCast(OSNumber
, user
->getObject(gIOConsoleSessionUIDKey
)))
1151 && (uid
== num
->unsigned32BitValue())) {
1161 static OSDictionary
* CopyUserOnConsole(void)
1164 OSDictionary
* user
= 0;
1166 if ((array
= OSDynamicCast(OSArray
,
1167 IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey
))))
1169 for (unsigned int idx
= 0;
1170 (user
= OSDynamicCast(OSDictionary
, array
->getObject(idx
)));
1173 if (kOSBooleanTrue
== user
->getObject(gIOConsoleSessionOnConsoleKey
))
1184 IOReturn
IOUserClient::clientHasAuthorization( task_t task
,
1185 IOService
* service
)
1189 p
= (proc_t
) get_bsdtask_info(task
);
1192 uint64_t authorizationID
;
1194 authorizationID
= proc_uniqueid(p
);
1195 if (authorizationID
)
1197 if (service
->getAuthorizationID() == authorizationID
)
1199 return (kIOReturnSuccess
);
1204 return (kIOReturnNotPermitted
);
1207 IOReturn
IOUserClient::clientHasPrivilege( void * securityToken
,
1208 const char * privilegeName
)
1211 security_token_t token
;
1212 mach_msg_type_number_t count
;
1214 OSDictionary
* user
;
1218 if (!strncmp(privilegeName
, kIOClientPrivilegeForeground
,
1219 sizeof(kIOClientPrivilegeForeground
)))
1221 if (task_is_gpu_denied(current_task()))
1222 return (kIOReturnNotPrivileged
);
1224 return (kIOReturnSuccess
);
1227 if (!strncmp(privilegeName
, kIOClientPrivilegeConsoleSession
,
1228 sizeof(kIOClientPrivilegeConsoleSession
)))
1233 task
= (task_t
) securityToken
;
1235 task
= current_task();
1236 p
= (proc_t
) get_bsdtask_info(task
);
1237 kr
= kIOReturnNotPrivileged
;
1239 if (p
&& (cred
= kauth_cred_proc_ref(p
)))
1241 user
= CopyUserOnConsole();
1245 if ((num
= OSDynamicCast(OSNumber
, user
->getObject(gIOConsoleSessionAuditIDKey
)))
1246 && (cred
->cr_audit
.as_aia_p
->ai_asid
== (au_asid_t
) num
->unsigned32BitValue()))
1248 kr
= kIOReturnSuccess
;
1252 kauth_cred_unref(&cred
);
1257 if ((secureConsole
= !strncmp(privilegeName
, kIOClientPrivilegeSecureConsoleProcess
,
1258 sizeof(kIOClientPrivilegeSecureConsoleProcess
))))
1259 task
= (task_t
)((IOUCProcessToken
*)securityToken
)->token
;
1261 task
= (task_t
)securityToken
;
1263 count
= TASK_SECURITY_TOKEN_COUNT
;
1264 kr
= task_info( task
, TASK_SECURITY_TOKEN
, (task_info_t
) &token
, &count
);
1266 if (KERN_SUCCESS
!= kr
)
1268 else if (!strncmp(privilegeName
, kIOClientPrivilegeAdministrator
,
1269 sizeof(kIOClientPrivilegeAdministrator
))) {
1270 if (0 != token
.val
[0])
1271 kr
= kIOReturnNotPrivileged
;
1272 } else if (!strncmp(privilegeName
, kIOClientPrivilegeLocalUser
,
1273 sizeof(kIOClientPrivilegeLocalUser
))) {
1274 user
= CopyConsoleUser(token
.val
[0]);
1278 kr
= kIOReturnNotPrivileged
;
1279 } else if (secureConsole
|| !strncmp(privilegeName
, kIOClientPrivilegeConsoleUser
,
1280 sizeof(kIOClientPrivilegeConsoleUser
))) {
1281 user
= CopyConsoleUser(token
.val
[0]);
1283 if (user
->getObject(gIOConsoleSessionOnConsoleKey
) != kOSBooleanTrue
)
1284 kr
= kIOReturnNotPrivileged
;
1285 else if ( secureConsole
) {
1286 OSNumber
* pid
= OSDynamicCast(OSNumber
, user
->getObject(gIOConsoleSessionSecureInputPIDKey
));
1287 if ( pid
&& pid
->unsigned32BitValue() != ((IOUCProcessToken
*)securityToken
)->pid
)
1288 kr
= kIOReturnNotPrivileged
;
1293 kr
= kIOReturnNotPrivileged
;
1295 kr
= kIOReturnUnsupported
;
1300 OSObject
* IOUserClient::copyClientEntitlement( task_t task
,
1301 const char * entitlement
)
1303 #define MAX_ENTITLEMENTS_LEN (128 * 1024)
1307 char procname
[MAXCOMLEN
+ 1] = "";
1309 void *entitlements_blob
= NULL
;
1310 char *entitlements_data
= NULL
;
1311 OSObject
*entitlements_obj
= NULL
;
1312 OSDictionary
*entitlements
= NULL
;
1313 OSString
*errorString
= NULL
;
1314 OSObject
*value
= NULL
;
1316 p
= (proc_t
)get_bsdtask_info(task
);
1320 proc_name(pid
, procname
, (int)sizeof(procname
));
1322 if (cs_entitlements_blob_get(p
, &entitlements_blob
, &len
) != 0)
1325 if (len
<= offsetof(CS_GenericBlob
, data
))
1329 * Per <rdar://problem/11593877>, enforce a limit on the amount of XML
1330 * we'll try to parse in the kernel.
1332 len
-= offsetof(CS_GenericBlob
, data
);
1333 if (len
> MAX_ENTITLEMENTS_LEN
) {
1334 IOLog("failed to parse entitlements for %s[%u]: %lu bytes of entitlements exceeds maximum of %u\n", procname
, pid
, len
, MAX_ENTITLEMENTS_LEN
);
1339 * OSUnserializeXML() expects a nul-terminated string, but that isn't
1340 * what is stored in the entitlements blob. Copy the string and
1343 entitlements_data
= (char *)IOMalloc(len
+ 1);
1344 if (entitlements_data
== NULL
)
1346 memcpy(entitlements_data
, ((CS_GenericBlob
*)entitlements_blob
)->data
, len
);
1347 entitlements_data
[len
] = '\0';
1349 entitlements_obj
= OSUnserializeXML(entitlements_data
, len
+ 1, &errorString
);
1350 if (errorString
!= NULL
) {
1351 IOLog("failed to parse entitlements for %s[%u]: %s\n", procname
, pid
, errorString
->getCStringNoCopy());
1354 if (entitlements_obj
== NULL
)
1357 entitlements
= OSDynamicCast(OSDictionary
, entitlements_obj
);
1358 if (entitlements
== NULL
)
1361 /* Fetch the entitlement value from the dictionary. */
1362 value
= entitlements
->getObject(entitlement
);
1367 if (entitlements_data
!= NULL
)
1368 IOFree(entitlements_data
, len
+ 1);
1369 if (entitlements_obj
!= NULL
)
1370 entitlements_obj
->release();
1371 if (errorString
!= NULL
)
1372 errorString
->release();
1376 bool IOUserClient::init()
1378 if (getPropertyTable() || super::init())
1384 bool IOUserClient::init(OSDictionary
* dictionary
)
1386 if (getPropertyTable() || super::init(dictionary
))
1392 bool IOUserClient::initWithTask(task_t owningTask
,
1396 if (getPropertyTable() || super::init())
1402 bool IOUserClient::initWithTask(task_t owningTask
,
1405 OSDictionary
* properties
)
1409 ok
= super::init( properties
);
1410 ok
&= initWithTask( owningTask
, securityID
, type
);
1415 bool IOUserClient::reserve()
1418 reserved
= IONew(ExpansionData
, 1);
1423 setTerminateDefer(NULL
, true);
1424 IOStatisticsRegisterCounter();
1429 struct IOUserClientOwner
1432 queue_chain_t taskLink
;
1434 queue_chain_t ucLink
;
1438 IOUserClient::registerOwner(task_t task
)
1440 IOUserClientOwner
* owner
;
1444 IOLockLock(gIOUserClientOwnersLock
);
1447 ret
= kIOReturnSuccess
;
1449 if (!owners
.next
) queue_init(&owners
);
1452 queue_iterate(&owners
, owner
, IOUserClientOwner
*, ucLink
)
1454 if (task
!= owner
->task
) continue;
1461 owner
= IONew(IOUserClientOwner
, 1);
1462 if (!newOwner
) ret
= kIOReturnNoMemory
;
1467 queue_enter_first(&owners
, owner
, IOUserClientOwner
*, ucLink
);
1468 queue_enter_first(task_io_user_clients(task
), owner
, IOUserClientOwner
*, taskLink
);
1472 IOLockUnlock(gIOUserClientOwnersLock
);
1478 IOUserClient::noMoreSenders(void)
1480 IOUserClientOwner
* owner
;
1482 IOLockLock(gIOUserClientOwnersLock
);
1486 while (!queue_empty(&owners
))
1488 owner
= (IOUserClientOwner
*)(void *) queue_first(&owners
);
1489 queue_remove(task_io_user_clients(owner
->task
), owner
, IOUserClientOwner
*, taskLink
);
1490 queue_remove(&owners
, owner
, IOUserClientOwner
*, ucLink
);
1491 IODelete(owner
, IOUserClientOwner
, 1);
1493 owners
.next
= owners
.prev
= NULL
;
1496 IOLockUnlock(gIOUserClientOwnersLock
);
1499 extern "C" kern_return_t
1500 iokit_task_terminate(task_t task
)
1502 IOUserClientOwner
* owner
;
1503 IOUserClient
* dead
;
1505 queue_head_t
* taskque
;
1507 IOLockLock(gIOUserClientOwnersLock
);
1509 taskque
= task_io_user_clients(task
);
1511 while (!queue_empty(taskque
))
1513 owner
= (IOUserClientOwner
*)(void *) queue_first(taskque
);
1515 queue_remove(taskque
, owner
, IOUserClientOwner
*, taskLink
);
1516 queue_remove(&uc
->owners
, owner
, IOUserClientOwner
*, ucLink
);
1517 if (queue_empty(&uc
->owners
))
1520 IOLog("destroying out of band connect for %s\n", uc
->getName());
1521 // now using the uc queue head as a singly linked queue,
1522 // leaving .next as NULL to mark it empty
1523 uc
->owners
.next
= NULL
;
1524 uc
->owners
.prev
= (queue_entry_t
) dead
;
1527 IODelete(owner
, IOUserClientOwner
, 1);
1530 IOLockUnlock(gIOUserClientOwnersLock
);
1535 dead
= (IOUserClient
*)(void *) dead
->owners
.prev
;
1536 uc
->owners
.prev
= NULL
;
1537 if (uc
->sharedInstance
|| !uc
->closed
) uc
->clientDied();
1541 return (KERN_SUCCESS
);
1544 void IOUserClient::free()
1546 if( mappings
) mappings
->release();
1548 IOStatisticsUnregisterCounter();
1550 assert(!owners
.next
);
1551 assert(!owners
.prev
);
1553 if (reserved
) IODelete(reserved
, ExpansionData
, 1);
1558 IOReturn
IOUserClient::clientDied( void )
1560 IOReturn ret
= kIOReturnNotReady
;
1562 if (sharedInstance
|| OSCompareAndSwap8(0, 1, &closed
))
1564 ret
= clientClose();
1570 IOReturn
IOUserClient::clientClose( void )
1572 return( kIOReturnUnsupported
);
1575 IOService
* IOUserClient::getService( void )
1580 IOReturn
IOUserClient::registerNotificationPort(
1581 mach_port_t
/* port */,
1583 UInt32
/* refCon */)
1585 return( kIOReturnUnsupported
);
1588 IOReturn
IOUserClient::registerNotificationPort(
1591 io_user_reference_t refCon
)
1593 return (registerNotificationPort(port
, type
, (UInt32
) refCon
));
1596 IOReturn
IOUserClient::getNotificationSemaphore( UInt32 notification_type
,
1597 semaphore_t
* semaphore
)
1599 return( kIOReturnUnsupported
);
1602 IOReturn
IOUserClient::connectClient( IOUserClient
* /* client */ )
1604 return( kIOReturnUnsupported
);
1607 IOReturn
IOUserClient::clientMemoryForType( UInt32 type
,
1608 IOOptionBits
* options
,
1609 IOMemoryDescriptor
** memory
)
1611 return( kIOReturnUnsupported
);
1615 IOMemoryMap
* IOUserClient::mapClientMemory(
1618 IOOptionBits mapFlags
,
1619 IOVirtualAddress atAddress
)
1625 IOMemoryMap
* IOUserClient::mapClientMemory64(
1628 IOOptionBits mapFlags
,
1629 mach_vm_address_t atAddress
)
1632 IOOptionBits options
= 0;
1633 IOMemoryDescriptor
* memory
= 0;
1634 IOMemoryMap
* map
= 0;
1636 err
= clientMemoryForType( (UInt32
) type
, &options
, &memory
);
1638 if( memory
&& (kIOReturnSuccess
== err
)) {
1640 FAKE_STACK_FRAME(getMetaClass());
1642 options
= (options
& ~kIOMapUserOptionsMask
)
1643 | (mapFlags
& kIOMapUserOptionsMask
);
1644 map
= memory
->createMappingInTask( task
, atAddress
, options
);
1647 FAKE_STACK_FRAME_END();
1653 IOReturn
IOUserClient::exportObjectToClient(task_t task
,
1654 OSObject
*obj
, io_object_t
*clientObj
)
1656 mach_port_name_t name
;
1658 name
= IOMachPort::makeSendRightForTask( task
, obj
, IKOT_IOKIT_OBJECT
);
1660 *(mach_port_name_t
*)clientObj
= name
;
1661 return kIOReturnSuccess
;
1664 IOExternalMethod
* IOUserClient::getExternalMethodForIndex( UInt32
/* index */)
1669 IOExternalAsyncMethod
* IOUserClient::getExternalAsyncMethodForIndex( UInt32
/* index */)
1674 IOExternalTrap
* IOUserClient::
1675 getExternalTrapForIndex(UInt32 index
)
1680 #pragma clang diagnostic push
1681 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1683 // Suppressing the deprecated-declarations warning. Avoiding the use of deprecated
1684 // functions can break clients of kexts implementing getExternalMethodForIndex()
1685 IOExternalMethod
* IOUserClient::
1686 getTargetAndMethodForIndex(IOService
**targetP
, UInt32 index
)
1688 IOExternalMethod
*method
= getExternalMethodForIndex(index
);
1691 *targetP
= (IOService
*) method
->object
;
1696 IOExternalAsyncMethod
* IOUserClient::
1697 getAsyncTargetAndMethodForIndex(IOService
** targetP
, UInt32 index
)
1699 IOExternalAsyncMethod
*method
= getExternalAsyncMethodForIndex(index
);
1702 *targetP
= (IOService
*) method
->object
;
1707 IOExternalTrap
* IOUserClient::
1708 getTargetAndTrapForIndex(IOService
** targetP
, UInt32 index
)
1710 IOExternalTrap
*trap
= getExternalTrapForIndex(index
);
1713 *targetP
= trap
->object
;
1718 #pragma clang diagnostic pop
1720 IOReturn
IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference
)
1723 port
= (mach_port_t
) (reference
[0] & ~kIOUCAsync0Flags
);
1725 if (MACH_PORT_NULL
!= port
)
1726 iokit_release_port_send(port
);
1728 return (kIOReturnSuccess
);
1731 IOReturn
IOUserClient::releaseNotificationPort(mach_port_t port
)
1733 if (MACH_PORT_NULL
!= port
)
1734 iokit_release_port_send(port
);
1736 return (kIOReturnSuccess
);
1739 IOReturn
IOUserClient::sendAsyncResult(OSAsyncReference reference
,
1740 IOReturn result
, void *args
[], UInt32 numArgs
)
1742 OSAsyncReference64 reference64
;
1743 io_user_reference_t args64
[kMaxAsyncArgs
];
1746 if (numArgs
> kMaxAsyncArgs
)
1747 return kIOReturnMessageTooLarge
;
1749 for (idx
= 0; idx
< kOSAsyncRef64Count
; idx
++)
1750 reference64
[idx
] = REF64(reference
[idx
]);
1752 for (idx
= 0; idx
< numArgs
; idx
++)
1753 args64
[idx
] = REF64(args
[idx
]);
1755 return (sendAsyncResult64(reference64
, result
, args64
, numArgs
));
1758 IOReturn
IOUserClient::sendAsyncResult64WithOptions(OSAsyncReference64 reference
,
1759 IOReturn result
, io_user_reference_t args
[], UInt32 numArgs
, IOOptionBits options
)
1761 return _sendAsyncResult64(reference
, result
, args
, numArgs
, options
);
1764 IOReturn
IOUserClient::sendAsyncResult64(OSAsyncReference64 reference
,
1765 IOReturn result
, io_user_reference_t args
[], UInt32 numArgs
)
1767 return _sendAsyncResult64(reference
, result
, args
, numArgs
, 0);
1770 IOReturn
IOUserClient::_sendAsyncResult64(OSAsyncReference64 reference
,
1771 IOReturn result
, io_user_reference_t args
[], UInt32 numArgs
, IOOptionBits options
)
1775 mach_msg_header_t msgHdr
;
1780 OSNotificationHeader notifyHdr
;
1781 IOAsyncCompletionContent asyncContent
;
1782 uint32_t args
[kMaxAsyncArgs
];
1786 OSNotificationHeader64 notifyHdr
;
1787 IOAsyncCompletionContent asyncContent
;
1788 io_user_reference_t args
[kMaxAsyncArgs
] __attribute__ ((packed
));
1793 mach_port_t replyPort
;
1796 // If no reply port, do nothing.
1797 replyPort
= (mach_port_t
) (reference
[0] & ~kIOUCAsync0Flags
);
1798 if (replyPort
== MACH_PORT_NULL
)
1799 return kIOReturnSuccess
;
1801 if (numArgs
> kMaxAsyncArgs
)
1802 return kIOReturnMessageTooLarge
;
1804 bzero(&replyMsg
, sizeof(replyMsg
));
1805 replyMsg
.msgHdr
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
/*remote*/,
1807 replyMsg
.msgHdr
.msgh_remote_port
= replyPort
;
1808 replyMsg
.msgHdr
.msgh_local_port
= 0;
1809 replyMsg
.msgHdr
.msgh_id
= kOSNotificationMessageID
;
1810 if (kIOUCAsync64Flag
& reference
[0])
1812 replyMsg
.msgHdr
.msgh_size
=
1813 sizeof(replyMsg
.msgHdr
) + sizeof(replyMsg
.m
.msg64
)
1814 - (kMaxAsyncArgs
- numArgs
) * sizeof(io_user_reference_t
);
1815 replyMsg
.m
.msg64
.notifyHdr
.size
= sizeof(IOAsyncCompletionContent
)
1816 + numArgs
* sizeof(io_user_reference_t
);
1817 replyMsg
.m
.msg64
.notifyHdr
.type
= kIOAsyncCompletionNotificationType
;
1818 bcopy(reference
, replyMsg
.m
.msg64
.notifyHdr
.reference
, sizeof(OSAsyncReference64
));
1820 replyMsg
.m
.msg64
.asyncContent
.result
= result
;
1822 bcopy(args
, replyMsg
.m
.msg64
.args
, numArgs
* sizeof(io_user_reference_t
));
1828 replyMsg
.msgHdr
.msgh_size
=
1829 sizeof(replyMsg
.msgHdr
) + sizeof(replyMsg
.m
.msg32
)
1830 - (kMaxAsyncArgs
- numArgs
) * sizeof(uint32_t);
1832 replyMsg
.m
.msg32
.notifyHdr
.size
= sizeof(IOAsyncCompletionContent
)
1833 + numArgs
* sizeof(uint32_t);
1834 replyMsg
.m
.msg32
.notifyHdr
.type
= kIOAsyncCompletionNotificationType
;
1836 for (idx
= 0; idx
< kOSAsyncRefCount
; idx
++)
1837 replyMsg
.m
.msg32
.notifyHdr
.reference
[idx
] = REF32(reference
[idx
]);
1839 replyMsg
.m
.msg32
.asyncContent
.result
= result
;
1841 for (idx
= 0; idx
< numArgs
; idx
++)
1842 replyMsg
.m
.msg32
.args
[idx
] = REF32(args
[idx
]);
1845 if ((options
& kIOUserNotifyOptionCanDrop
) != 0) {
1846 kr
= mach_msg_send_from_kernel_with_options( &replyMsg
.msgHdr
,
1847 replyMsg
.msgHdr
.msgh_size
, MACH_SEND_TIMEOUT
, MACH_MSG_TIMEOUT_NONE
);
1849 /* Fail on full queue. */
1850 kr
= mach_msg_send_from_kernel_proper( &replyMsg
.msgHdr
,
1851 replyMsg
.msgHdr
.msgh_size
);
1853 if ((KERN_SUCCESS
!= kr
) && (MACH_SEND_TIMED_OUT
!= kr
) && !(kIOUCAsyncErrorLoggedFlag
& reference
[0]))
1855 reference
[0] |= kIOUCAsyncErrorLoggedFlag
;
1856 IOLog("%s: mach_msg_send_from_kernel_proper(0x%x)\n", __PRETTY_FUNCTION__
, kr
);
1862 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1866 #define CHECK(cls,obj,out) \
1868 if( !(out = OSDynamicCast( cls, obj))) \
1869 return( kIOReturnBadArgument )
1871 #define CHECKLOCKED(cls,obj,out) \
1872 IOUserIterator * oIter; \
1874 if( !(oIter = OSDynamicCast(IOUserIterator, obj))) \
1875 return (kIOReturnBadArgument); \
1876 if( !(out = OSDynamicCast(cls, oIter->userIteratorObject))) \
1877 return (kIOReturnBadArgument)
1879 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1881 // Create a vm_map_copy_t or kalloc'ed data for memory
1882 // to be copied out. ipc will free after the copyout.
1884 static kern_return_t
copyoutkdata( const void * data
, vm_size_t len
,
1885 io_buf_ptr_t
* buf
)
1890 err
= vm_map_copyin( kernel_map
, CAST_USER_ADDR_T(data
), len
,
1891 false /* src_destroy */, ©
);
1893 assert( err
== KERN_SUCCESS
);
1894 if( err
== KERN_SUCCESS
)
1895 *buf
= (char *) copy
;
1900 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1902 /* Routine io_server_version */
1903 kern_return_t
is_io_server_version(
1904 mach_port_t master_port
,
1907 *version
= IOKIT_SERVER_VERSION
;
1908 return (kIOReturnSuccess
);
1911 /* Routine io_object_get_class */
1912 kern_return_t
is_io_object_get_class(
1914 io_name_t className
)
1916 const OSMetaClass
* my_obj
= NULL
;
1919 return( kIOReturnBadArgument
);
1921 my_obj
= object
->getMetaClass();
1923 return (kIOReturnNotFound
);
1926 strlcpy( className
, my_obj
->getClassName(), sizeof(io_name_t
));
1928 return( kIOReturnSuccess
);
1931 /* Routine io_object_get_superclass */
1932 kern_return_t
is_io_object_get_superclass(
1933 mach_port_t master_port
,
1935 io_name_t class_name
)
1938 const OSMetaClass
* meta
;
1939 const OSMetaClass
* super
;
1940 const OSSymbol
* name
;
1943 if (!obj_name
|| !class_name
) return (kIOReturnBadArgument
);
1944 if (master_port
!= master_device_port
) return( kIOReturnNotPrivileged
);
1946 ret
= kIOReturnNotFound
;
1950 name
= OSSymbol::withCString(obj_name
);
1952 meta
= OSMetaClass::copyMetaClassWithName(name
);
1954 super
= meta
->getSuperClass();
1956 cstr
= super
->getClassName();
1958 strlcpy(class_name
, cstr
, sizeof(io_name_t
));
1959 ret
= kIOReturnSuccess
;
1963 OSSafeReleaseNULL(name
);
1964 if (meta
) meta
->releaseMetaClass();
1969 /* Routine io_object_get_bundle_identifier */
1970 kern_return_t
is_io_object_get_bundle_identifier(
1971 mach_port_t master_port
,
1973 io_name_t bundle_name
)
1976 const OSMetaClass
* meta
;
1977 const OSSymbol
* name
;
1978 const OSSymbol
* identifier
;
1981 if (!obj_name
|| !bundle_name
) return (kIOReturnBadArgument
);
1982 if (master_port
!= master_device_port
) return( kIOReturnNotPrivileged
);
1984 ret
= kIOReturnNotFound
;
1988 name
= OSSymbol::withCString(obj_name
);
1990 meta
= OSMetaClass::copyMetaClassWithName(name
);
1992 identifier
= meta
->getKmodName();
1993 if (!identifier
) break;
1994 cstr
= identifier
->getCStringNoCopy();
1996 strlcpy(bundle_name
, identifier
->getCStringNoCopy(), sizeof(io_name_t
));
1997 ret
= kIOReturnSuccess
;
2001 OSSafeReleaseNULL(name
);
2002 if (meta
) meta
->releaseMetaClass();
2007 /* Routine io_object_conforms_to */
2008 kern_return_t
is_io_object_conforms_to(
2010 io_name_t className
,
2011 boolean_t
*conforms
)
2014 return( kIOReturnBadArgument
);
2016 *conforms
= (0 != object
->metaCast( className
));
2018 return( kIOReturnSuccess
);
2021 /* Routine io_object_get_retain_count */
2022 kern_return_t
is_io_object_get_retain_count(
2024 uint32_t *retainCount
)
2027 return( kIOReturnBadArgument
);
2029 *retainCount
= object
->getRetainCount();
2030 return( kIOReturnSuccess
);
2033 /* Routine io_iterator_next */
2034 kern_return_t
is_io_iterator_next(
2035 io_object_t iterator
,
2036 io_object_t
*object
)
2041 CHECK( OSIterator
, iterator
, iter
);
2043 obj
= iter
->getNextObject();
2047 ret
= kIOReturnSuccess
;
2049 ret
= kIOReturnNoDevice
;
2054 /* Routine io_iterator_reset */
2055 kern_return_t
is_io_iterator_reset(
2056 io_object_t iterator
)
2058 CHECK( OSIterator
, iterator
, iter
);
2062 return( kIOReturnSuccess
);
2065 /* Routine io_iterator_is_valid */
2066 kern_return_t
is_io_iterator_is_valid(
2067 io_object_t iterator
,
2068 boolean_t
*is_valid
)
2070 CHECK( OSIterator
, iterator
, iter
);
2072 *is_valid
= iter
->isValid();
2074 return( kIOReturnSuccess
);
2078 static kern_return_t
internal_io_service_match_property_table(
2079 io_service_t _service
,
2080 const char * matching
,
2081 mach_msg_type_number_t matching_size
,
2084 CHECK( IOService
, _service
, service
);
2088 OSDictionary
* dict
;
2090 assert(matching_size
);
2091 obj
= OSUnserializeXML(matching
, matching_size
);
2093 if( (dict
= OSDynamicCast( OSDictionary
, obj
))) {
2094 *matches
= service
->passiveMatch( dict
);
2095 kr
= kIOReturnSuccess
;
2097 kr
= kIOReturnBadArgument
;
2105 /* Routine io_service_match_property_table */
2106 kern_return_t
is_io_service_match_property_table(
2107 io_service_t service
,
2108 io_string_t matching
,
2109 boolean_t
*matches
)
2111 return (kIOReturnUnsupported
);
2115 /* Routine io_service_match_property_table_ool */
2116 kern_return_t
is_io_service_match_property_table_ool(
2117 io_object_t service
,
2118 io_buf_ptr_t matching
,
2119 mach_msg_type_number_t matchingCnt
,
2120 kern_return_t
*result
,
2121 boolean_t
*matches
)
2125 vm_map_offset_t map_data
;
2127 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2128 data
= CAST_DOWN(vm_offset_t
, map_data
);
2130 if( KERN_SUCCESS
== kr
) {
2131 // must return success after vm_map_copyout() succeeds
2132 *result
= internal_io_service_match_property_table(service
,
2133 (const char *)data
, matchingCnt
, matches
);
2134 vm_deallocate( kernel_map
, data
, matchingCnt
);
2140 /* Routine io_service_match_property_table_bin */
2141 kern_return_t
is_io_service_match_property_table_bin(
2142 io_object_t service
,
2143 io_struct_inband_t matching
,
2144 mach_msg_type_number_t matchingCnt
,
2147 return (internal_io_service_match_property_table(service
, matching
, matchingCnt
, matches
));
2150 static kern_return_t
internal_io_service_get_matching_services(
2151 mach_port_t master_port
,
2152 const char * matching
,
2153 mach_msg_type_number_t matching_size
,
2154 io_iterator_t
*existing
)
2158 OSDictionary
* dict
;
2160 if( master_port
!= master_device_port
)
2161 return( kIOReturnNotPrivileged
);
2163 assert(matching_size
);
2164 obj
= OSUnserializeXML(matching
, matching_size
);
2166 if( (dict
= OSDynamicCast( OSDictionary
, obj
))) {
2167 *existing
= IOUserIterator::withIterator(IOService::getMatchingServices( dict
));
2168 kr
= kIOReturnSuccess
;
2170 kr
= kIOReturnBadArgument
;
2178 /* Routine io_service_get_matching_services */
2179 kern_return_t
is_io_service_get_matching_services(
2180 mach_port_t master_port
,
2181 io_string_t matching
,
2182 io_iterator_t
*existing
)
2184 return (kIOReturnUnsupported
);
2187 /* Routine io_service_get_matching_services_ool */
2188 kern_return_t
is_io_service_get_matching_services_ool(
2189 mach_port_t master_port
,
2190 io_buf_ptr_t matching
,
2191 mach_msg_type_number_t matchingCnt
,
2192 kern_return_t
*result
,
2193 io_object_t
*existing
)
2197 vm_map_offset_t map_data
;
2199 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2200 data
= CAST_DOWN(vm_offset_t
, map_data
);
2202 if( KERN_SUCCESS
== kr
) {
2203 // must return success after vm_map_copyout() succeeds
2204 // and mig will copy out objects on success
2206 *result
= internal_io_service_get_matching_services(master_port
,
2207 (const char *) data
, matchingCnt
, existing
);
2208 vm_deallocate( kernel_map
, data
, matchingCnt
);
2214 /* Routine io_service_get_matching_services_bin */
2215 kern_return_t
is_io_service_get_matching_services_bin(
2216 mach_port_t master_port
,
2217 io_struct_inband_t matching
,
2218 mach_msg_type_number_t matchingCnt
,
2219 io_object_t
*existing
)
2221 return (internal_io_service_get_matching_services(master_port
, matching
, matchingCnt
, existing
));
2225 static kern_return_t
internal_io_service_get_matching_service(
2226 mach_port_t master_port
,
2227 const char * matching
,
2228 mach_msg_type_number_t matching_size
,
2229 io_service_t
*service
)
2233 OSDictionary
* dict
;
2235 if( master_port
!= master_device_port
)
2236 return( kIOReturnNotPrivileged
);
2238 assert(matching_size
);
2239 obj
= OSUnserializeXML(matching
, matching_size
);
2241 if( (dict
= OSDynamicCast( OSDictionary
, obj
))) {
2242 *service
= IOService::copyMatchingService( dict
);
2243 kr
= *service
? kIOReturnSuccess
: kIOReturnNotFound
;
2245 kr
= kIOReturnBadArgument
;
2253 /* Routine io_service_get_matching_service */
2254 kern_return_t
is_io_service_get_matching_service(
2255 mach_port_t master_port
,
2256 io_string_t matching
,
2257 io_service_t
*service
)
2259 return (kIOReturnUnsupported
);
2262 /* Routine io_service_get_matching_services_ool */
2263 kern_return_t
is_io_service_get_matching_service_ool(
2264 mach_port_t master_port
,
2265 io_buf_ptr_t matching
,
2266 mach_msg_type_number_t matchingCnt
,
2267 kern_return_t
*result
,
2268 io_object_t
*service
)
2272 vm_map_offset_t map_data
;
2274 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2275 data
= CAST_DOWN(vm_offset_t
, map_data
);
2277 if( KERN_SUCCESS
== kr
) {
2278 // must return success after vm_map_copyout() succeeds
2279 // and mig will copy out objects on success
2281 *result
= internal_io_service_get_matching_service(master_port
,
2282 (const char *) data
, matchingCnt
, service
);
2283 vm_deallocate( kernel_map
, data
, matchingCnt
);
2289 /* Routine io_service_get_matching_service_bin */
2290 kern_return_t
is_io_service_get_matching_service_bin(
2291 mach_port_t master_port
,
2292 io_struct_inband_t matching
,
2293 mach_msg_type_number_t matchingCnt
,
2294 io_object_t
*service
)
2296 return (internal_io_service_get_matching_service(master_port
, matching
, matchingCnt
, service
));
2299 static kern_return_t
internal_io_service_add_notification(
2300 mach_port_t master_port
,
2301 io_name_t notification_type
,
2302 const char * matching
,
2303 size_t matching_size
,
2306 vm_size_t referenceSize
,
2308 io_object_t
* notification
)
2310 IOServiceUserNotification
* userNotify
= 0;
2311 IONotifier
* notify
= 0;
2312 const OSSymbol
* sym
;
2313 OSDictionary
* dict
;
2315 unsigned long int userMsgType
;
2317 if( master_port
!= master_device_port
)
2318 return( kIOReturnNotPrivileged
);
2321 err
= kIOReturnNoResources
;
2323 if( !(sym
= OSSymbol::withCString( notification_type
)))
2324 err
= kIOReturnNoResources
;
2326 assert(matching_size
);
2327 dict
= OSDynamicCast(OSDictionary
, OSUnserializeXML(matching
, matching_size
));
2329 err
= kIOReturnBadArgument
;
2333 if( (sym
== gIOPublishNotification
)
2334 || (sym
== gIOFirstPublishNotification
))
2335 userMsgType
= kIOServicePublishNotificationType
;
2336 else if( (sym
== gIOMatchedNotification
)
2337 || (sym
== gIOFirstMatchNotification
))
2338 userMsgType
= kIOServiceMatchedNotificationType
;
2339 else if ((sym
== gIOTerminatedNotification
)
2340 || (sym
== gIOWillTerminateNotification
))
2341 userMsgType
= kIOServiceTerminatedNotificationType
;
2343 userMsgType
= kLastIOKitNotificationType
;
2345 userNotify
= new IOServiceUserNotification
;
2347 if( userNotify
&& !userNotify
->init( port
, userMsgType
,
2348 reference
, referenceSize
, client64
)) {
2349 userNotify
->release();
2355 notify
= IOService::addMatchingNotification( sym
, dict
,
2356 &userNotify
->_handler
, userNotify
);
2358 *notification
= userNotify
;
2359 userNotify
->setNotification( notify
);
2360 err
= kIOReturnSuccess
;
2362 err
= kIOReturnUnsupported
;
2366 if ((kIOReturnSuccess
!= err
) && userNotify
)
2368 userNotify
->invalidatePort();
2369 userNotify
->release();
2382 /* Routine io_service_add_notification */
2383 kern_return_t
is_io_service_add_notification(
2384 mach_port_t master_port
,
2385 io_name_t notification_type
,
2386 io_string_t matching
,
2388 io_async_ref_t reference
,
2389 mach_msg_type_number_t referenceCnt
,
2390 io_object_t
* notification
)
2392 return (kIOReturnUnsupported
);
2395 /* Routine io_service_add_notification_64 */
2396 kern_return_t
is_io_service_add_notification_64(
2397 mach_port_t master_port
,
2398 io_name_t notification_type
,
2399 io_string_t matching
,
2400 mach_port_t wake_port
,
2401 io_async_ref64_t reference
,
2402 mach_msg_type_number_t referenceCnt
,
2403 io_object_t
*notification
)
2405 return (kIOReturnUnsupported
);
2408 /* Routine io_service_add_notification_bin */
2409 kern_return_t is_io_service_add_notification_bin
2411 mach_port_t master_port
,
2412 io_name_t notification_type
,
2413 io_struct_inband_t matching
,
2414 mach_msg_type_number_t matchingCnt
,
2415 mach_port_t wake_port
,
2416 io_async_ref_t reference
,
2417 mach_msg_type_number_t referenceCnt
,
2418 io_object_t
*notification
)
2420 return (internal_io_service_add_notification(master_port
, notification_type
,
2421 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref_t
),
2422 false, notification
));
2425 /* Routine io_service_add_notification_bin_64 */
2426 kern_return_t is_io_service_add_notification_bin_64
2428 mach_port_t master_port
,
2429 io_name_t notification_type
,
2430 io_struct_inband_t matching
,
2431 mach_msg_type_number_t matchingCnt
,
2432 mach_port_t wake_port
,
2433 io_async_ref64_t reference
,
2434 mach_msg_type_number_t referenceCnt
,
2435 io_object_t
*notification
)
2437 return (internal_io_service_add_notification(master_port
, notification_type
,
2438 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref64_t
),
2439 true, notification
));
2442 static kern_return_t
internal_io_service_add_notification_ool(
2443 mach_port_t master_port
,
2444 io_name_t notification_type
,
2445 io_buf_ptr_t matching
,
2446 mach_msg_type_number_t matchingCnt
,
2447 mach_port_t wake_port
,
2449 vm_size_t referenceSize
,
2451 kern_return_t
*result
,
2452 io_object_t
*notification
)
2456 vm_map_offset_t map_data
;
2458 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) matching
);
2459 data
= CAST_DOWN(vm_offset_t
, map_data
);
2461 if( KERN_SUCCESS
== kr
) {
2462 // must return success after vm_map_copyout() succeeds
2463 // and mig will copy out objects on success
2465 *result
= internal_io_service_add_notification( master_port
, notification_type
,
2466 (char *) data
, matchingCnt
, wake_port
, reference
, referenceSize
, client64
, notification
);
2467 vm_deallocate( kernel_map
, data
, matchingCnt
);
2473 /* Routine io_service_add_notification_ool */
2474 kern_return_t
is_io_service_add_notification_ool(
2475 mach_port_t master_port
,
2476 io_name_t notification_type
,
2477 io_buf_ptr_t matching
,
2478 mach_msg_type_number_t matchingCnt
,
2479 mach_port_t wake_port
,
2480 io_async_ref_t reference
,
2481 mach_msg_type_number_t referenceCnt
,
2482 kern_return_t
*result
,
2483 io_object_t
*notification
)
2485 return (internal_io_service_add_notification_ool(master_port
, notification_type
,
2486 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref_t
),
2487 false, result
, notification
));
2490 /* Routine io_service_add_notification_ool_64 */
2491 kern_return_t
is_io_service_add_notification_ool_64(
2492 mach_port_t master_port
,
2493 io_name_t notification_type
,
2494 io_buf_ptr_t matching
,
2495 mach_msg_type_number_t matchingCnt
,
2496 mach_port_t wake_port
,
2497 io_async_ref64_t reference
,
2498 mach_msg_type_number_t referenceCnt
,
2499 kern_return_t
*result
,
2500 io_object_t
*notification
)
2502 return (internal_io_service_add_notification_ool(master_port
, notification_type
,
2503 matching
, matchingCnt
, wake_port
, &reference
[0], sizeof(io_async_ref64_t
),
2504 true, result
, notification
));
2507 /* Routine io_service_add_notification_old */
2508 kern_return_t
is_io_service_add_notification_old(
2509 mach_port_t master_port
,
2510 io_name_t notification_type
,
2511 io_string_t matching
,
2513 // for binary compatibility reasons, this must be natural_t for ILP32
2515 io_object_t
* notification
)
2517 return( is_io_service_add_notification( master_port
, notification_type
,
2518 matching
, port
, &ref
, 1, notification
));
2522 static kern_return_t
internal_io_service_add_interest_notification(
2523 io_object_t _service
,
2524 io_name_t type_of_interest
,
2527 vm_size_t referenceSize
,
2529 io_object_t
* notification
)
2532 IOServiceMessageUserNotification
* userNotify
= 0;
2533 IONotifier
* notify
= 0;
2534 const OSSymbol
* sym
;
2537 CHECK( IOService
, _service
, service
);
2539 err
= kIOReturnNoResources
;
2540 if( (sym
= OSSymbol::withCString( type_of_interest
))) do {
2542 userNotify
= new IOServiceMessageUserNotification
;
2544 if( userNotify
&& !userNotify
->init( port
, kIOServiceMessageNotificationType
,
2545 reference
, referenceSize
,
2546 kIOUserNotifyMaxMessageSize
,
2548 userNotify
->release();
2554 notify
= service
->registerInterest( sym
,
2555 &userNotify
->_handler
, userNotify
);
2557 *notification
= userNotify
;
2558 userNotify
->setNotification( notify
);
2559 err
= kIOReturnSuccess
;
2561 err
= kIOReturnUnsupported
;
2567 if ((kIOReturnSuccess
!= err
) && userNotify
)
2569 userNotify
->invalidatePort();
2570 userNotify
->release();
2577 /* Routine io_service_add_message_notification */
2578 kern_return_t
is_io_service_add_interest_notification(
2579 io_object_t service
,
2580 io_name_t type_of_interest
,
2582 io_async_ref_t reference
,
2583 mach_msg_type_number_t referenceCnt
,
2584 io_object_t
* notification
)
2586 return (internal_io_service_add_interest_notification(service
, type_of_interest
,
2587 port
, &reference
[0], sizeof(io_async_ref_t
), false, notification
));
2590 /* Routine io_service_add_interest_notification_64 */
2591 kern_return_t
is_io_service_add_interest_notification_64(
2592 io_object_t service
,
2593 io_name_t type_of_interest
,
2594 mach_port_t wake_port
,
2595 io_async_ref64_t reference
,
2596 mach_msg_type_number_t referenceCnt
,
2597 io_object_t
*notification
)
2599 return (internal_io_service_add_interest_notification(service
, type_of_interest
,
2600 wake_port
, &reference
[0], sizeof(io_async_ref64_t
), true, notification
));
2604 /* Routine io_service_acknowledge_notification */
2605 kern_return_t
is_io_service_acknowledge_notification(
2606 io_object_t _service
,
2607 natural_t notify_ref
,
2608 natural_t response
)
2610 CHECK( IOService
, _service
, service
);
2612 return( service
->acknowledgeNotification( (IONotificationRef
)(uintptr_t) notify_ref
,
2613 (IOOptionBits
) response
));
2617 /* Routine io_connect_get_semaphore */
2618 kern_return_t
is_io_connect_get_notification_semaphore(
2619 io_connect_t connection
,
2620 natural_t notification_type
,
2621 semaphore_t
*semaphore
)
2623 CHECK( IOUserClient
, connection
, client
);
2625 IOStatisticsClientCall();
2626 return( client
->getNotificationSemaphore( (UInt32
) notification_type
,
2630 /* Routine io_registry_get_root_entry */
2631 kern_return_t
is_io_registry_get_root_entry(
2632 mach_port_t master_port
,
2635 IORegistryEntry
* entry
;
2637 if( master_port
!= master_device_port
)
2638 return( kIOReturnNotPrivileged
);
2640 entry
= IORegistryEntry::getRegistryRoot();
2645 return( kIOReturnSuccess
);
2648 /* Routine io_registry_create_iterator */
2649 kern_return_t
is_io_registry_create_iterator(
2650 mach_port_t master_port
,
2653 io_object_t
*iterator
)
2655 if( master_port
!= master_device_port
)
2656 return( kIOReturnNotPrivileged
);
2658 *iterator
= IOUserIterator::withIterator(
2659 IORegistryIterator::iterateOver(
2660 IORegistryEntry::getPlane( plane
), options
));
2662 return( *iterator
? kIOReturnSuccess
: kIOReturnBadArgument
);
2665 /* Routine io_registry_entry_create_iterator */
2666 kern_return_t
is_io_registry_entry_create_iterator(
2667 io_object_t registry_entry
,
2670 io_object_t
*iterator
)
2672 CHECK( IORegistryEntry
, registry_entry
, entry
);
2674 *iterator
= IOUserIterator::withIterator(
2675 IORegistryIterator::iterateOver( entry
,
2676 IORegistryEntry::getPlane( plane
), options
));
2678 return( *iterator
? kIOReturnSuccess
: kIOReturnBadArgument
);
2681 /* Routine io_registry_iterator_enter */
2682 kern_return_t
is_io_registry_iterator_enter_entry(
2683 io_object_t iterator
)
2685 CHECKLOCKED( IORegistryIterator
, iterator
, iter
);
2687 IOLockLock(oIter
->lock
);
2689 IOLockUnlock(oIter
->lock
);
2691 return( kIOReturnSuccess
);
2694 /* Routine io_registry_iterator_exit */
2695 kern_return_t
is_io_registry_iterator_exit_entry(
2696 io_object_t iterator
)
2700 CHECKLOCKED( IORegistryIterator
, iterator
, iter
);
2702 IOLockLock(oIter
->lock
);
2703 didIt
= iter
->exitEntry();
2704 IOLockUnlock(oIter
->lock
);
2706 return( didIt
? kIOReturnSuccess
: kIOReturnNoDevice
);
2709 /* Routine io_registry_entry_from_path */
2710 kern_return_t
is_io_registry_entry_from_path(
2711 mach_port_t master_port
,
2713 io_object_t
*registry_entry
)
2715 IORegistryEntry
* entry
;
2717 if( master_port
!= master_device_port
)
2718 return( kIOReturnNotPrivileged
);
2720 entry
= IORegistryEntry::fromPath( path
);
2722 *registry_entry
= entry
;
2724 return( kIOReturnSuccess
);
2728 /* Routine io_registry_entry_from_path */
2729 kern_return_t
is_io_registry_entry_from_path_ool(
2730 mach_port_t master_port
,
2731 io_string_inband_t path
,
2732 io_buf_ptr_t path_ool
,
2733 mach_msg_type_number_t path_oolCnt
,
2734 kern_return_t
*result
,
2735 io_object_t
*registry_entry
)
2737 IORegistryEntry
* entry
;
2738 vm_map_offset_t map_data
;
2743 if (master_port
!= master_device_port
) return(kIOReturnNotPrivileged
);
2747 res
= err
= KERN_SUCCESS
;
2748 if (path
[0]) cpath
= path
;
2751 if (!path_oolCnt
) return(kIOReturnBadArgument
);
2752 if (path_oolCnt
> (sizeof(io_struct_inband_t
) * 1024)) return(kIOReturnMessageTooLarge
);
2754 err
= vm_map_copyout(kernel_map
, &map_data
, (vm_map_copy_t
) path_ool
);
2755 if (KERN_SUCCESS
== err
)
2757 // must return success to mig after vm_map_copyout() succeeds, so result is actual
2758 cpath
= CAST_DOWN(const char *, map_data
);
2759 if (cpath
[path_oolCnt
- 1]) res
= kIOReturnBadArgument
;
2763 if ((KERN_SUCCESS
== err
) && (KERN_SUCCESS
== res
))
2765 entry
= IORegistryEntry::fromPath(cpath
);
2766 res
= entry
? kIOReturnSuccess
: kIOReturnNotFound
;
2769 if (map_data
) vm_deallocate(kernel_map
, map_data
, path_oolCnt
);
2771 if (KERN_SUCCESS
!= err
) res
= err
;
2772 *registry_entry
= entry
;
2779 /* Routine io_registry_entry_in_plane */
2780 kern_return_t
is_io_registry_entry_in_plane(
2781 io_object_t registry_entry
,
2783 boolean_t
*inPlane
)
2785 CHECK( IORegistryEntry
, registry_entry
, entry
);
2787 *inPlane
= entry
->inPlane( IORegistryEntry::getPlane( plane
));
2789 return( kIOReturnSuccess
);
2793 /* Routine io_registry_entry_get_path */
2794 kern_return_t
is_io_registry_entry_get_path(
2795 io_object_t registry_entry
,
2800 CHECK( IORegistryEntry
, registry_entry
, entry
);
2802 length
= sizeof( io_string_t
);
2803 if( entry
->getPath( path
, &length
, IORegistryEntry::getPlane( plane
)))
2804 return( kIOReturnSuccess
);
2806 return( kIOReturnBadArgument
);
2809 /* Routine io_registry_entry_get_path */
2810 kern_return_t
is_io_registry_entry_get_path_ool(
2811 io_object_t registry_entry
,
2813 io_string_inband_t path
,
2814 io_buf_ptr_t
*path_ool
,
2815 mach_msg_type_number_t
*path_oolCnt
)
2817 enum { kMaxPath
= 16384 };
2822 CHECK( IORegistryEntry
, registry_entry
, entry
);
2826 length
= sizeof(io_string_inband_t
);
2827 if (entry
->getPath(path
, &length
, IORegistryEntry::getPlane(plane
))) err
= kIOReturnSuccess
;
2831 buf
= IONew(char, length
);
2832 if (!buf
) err
= kIOReturnNoMemory
;
2833 else if (!entry
->getPath(buf
, &length
, IORegistryEntry::getPlane(plane
))) err
= kIOReturnError
;
2836 *path_oolCnt
= length
;
2837 err
= copyoutkdata(buf
, length
, path_ool
);
2839 if (buf
) IODelete(buf
, char, kMaxPath
);
2846 /* Routine io_registry_entry_get_name */
2847 kern_return_t
is_io_registry_entry_get_name(
2848 io_object_t registry_entry
,
2851 CHECK( IORegistryEntry
, registry_entry
, entry
);
2853 strncpy( name
, entry
->getName(), sizeof( io_name_t
));
2855 return( kIOReturnSuccess
);
2858 /* Routine io_registry_entry_get_name_in_plane */
2859 kern_return_t
is_io_registry_entry_get_name_in_plane(
2860 io_object_t registry_entry
,
2861 io_name_t planeName
,
2864 const IORegistryPlane
* plane
;
2865 CHECK( IORegistryEntry
, registry_entry
, entry
);
2868 plane
= IORegistryEntry::getPlane( planeName
);
2872 strncpy( name
, entry
->getName( plane
), sizeof( io_name_t
));
2874 return( kIOReturnSuccess
);
2877 /* Routine io_registry_entry_get_location_in_plane */
2878 kern_return_t
is_io_registry_entry_get_location_in_plane(
2879 io_object_t registry_entry
,
2880 io_name_t planeName
,
2881 io_name_t location
)
2883 const IORegistryPlane
* plane
;
2884 CHECK( IORegistryEntry
, registry_entry
, entry
);
2887 plane
= IORegistryEntry::getPlane( planeName
);
2891 const char * cstr
= entry
->getLocation( plane
);
2894 strncpy( location
, cstr
, sizeof( io_name_t
));
2895 return( kIOReturnSuccess
);
2897 return( kIOReturnNotFound
);
2900 /* Routine io_registry_entry_get_registry_entry_id */
2901 kern_return_t
is_io_registry_entry_get_registry_entry_id(
2902 io_object_t registry_entry
,
2903 uint64_t *entry_id
)
2905 CHECK( IORegistryEntry
, registry_entry
, entry
);
2907 *entry_id
= entry
->getRegistryEntryID();
2909 return (kIOReturnSuccess
);
2912 /* Routine io_registry_entry_get_property */
2913 kern_return_t
is_io_registry_entry_get_property_bytes(
2914 io_object_t registry_entry
,
2915 io_name_t property_name
,
2916 io_struct_inband_t buf
,
2917 mach_msg_type_number_t
*dataCnt
)
2925 unsigned int len
= 0;
2926 const void * bytes
= 0;
2927 IOReturn ret
= kIOReturnSuccess
;
2929 CHECK( IORegistryEntry
, registry_entry
, entry
);
2932 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
2933 return kIOReturnNotPermitted
;
2936 obj
= entry
->copyProperty(property_name
);
2938 return( kIOReturnNoResources
);
2940 // One day OSData will be a common container base class
2942 if( (data
= OSDynamicCast( OSData
, obj
))) {
2943 len
= data
->getLength();
2944 bytes
= data
->getBytesNoCopy();
2945 if (!data
->isSerializable()) len
= 0;
2947 } else if( (str
= OSDynamicCast( OSString
, obj
))) {
2948 len
= str
->getLength() + 1;
2949 bytes
= str
->getCStringNoCopy();
2951 } else if( (boo
= OSDynamicCast( OSBoolean
, obj
))) {
2952 len
= boo
->isTrue() ? sizeof("Yes") : sizeof("No");
2953 bytes
= boo
->isTrue() ? "Yes" : "No";
2955 } else if( (off
= OSDynamicCast( OSNumber
, obj
))) {
2956 offsetBytes
= off
->unsigned64BitValue();
2957 len
= off
->numberOfBytes();
2958 if (len
> sizeof(offsetBytes
)) len
= sizeof(offsetBytes
);
2959 bytes
= &offsetBytes
;
2960 #ifdef __BIG_ENDIAN__
2961 bytes
= (const void *)
2962 (((UInt32
) bytes
) + (sizeof( UInt64
) - len
));
2966 ret
= kIOReturnBadArgument
;
2970 ret
= kIOReturnIPCError
;
2973 bcopy( bytes
, buf
, len
);
2982 /* Routine io_registry_entry_get_property */
2983 kern_return_t
is_io_registry_entry_get_property(
2984 io_object_t registry_entry
,
2985 io_name_t property_name
,
2986 io_buf_ptr_t
*properties
,
2987 mach_msg_type_number_t
*propertiesCnt
)
2993 CHECK( IORegistryEntry
, registry_entry
, entry
);
2996 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
2997 return kIOReturnNotPermitted
;
3000 obj
= entry
->copyProperty(property_name
);
3002 return( kIOReturnNotFound
);
3004 OSSerialize
* s
= OSSerialize::withCapacity(4096);
3007 return( kIOReturnNoMemory
);
3010 if( obj
->serialize( s
)) {
3011 len
= s
->getLength();
3012 *propertiesCnt
= len
;
3013 err
= copyoutkdata( s
->text(), len
, properties
);
3016 err
= kIOReturnUnsupported
;
3024 /* Routine io_registry_entry_get_property_recursively */
3025 kern_return_t
is_io_registry_entry_get_property_recursively(
3026 io_object_t registry_entry
,
3028 io_name_t property_name
,
3030 io_buf_ptr_t
*properties
,
3031 mach_msg_type_number_t
*propertiesCnt
)
3037 CHECK( IORegistryEntry
, registry_entry
, entry
);
3040 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
3041 return kIOReturnNotPermitted
;
3044 obj
= entry
->copyProperty( property_name
,
3045 IORegistryEntry::getPlane( plane
), options
);
3047 return( kIOReturnNotFound
);
3049 OSSerialize
* s
= OSSerialize::withCapacity(4096);
3052 return( kIOReturnNoMemory
);
3055 if( obj
->serialize( s
)) {
3056 len
= s
->getLength();
3057 *propertiesCnt
= len
;
3058 err
= copyoutkdata( s
->text(), len
, properties
);
3061 err
= kIOReturnUnsupported
;
3069 /* Routine io_registry_entry_get_properties */
3070 kern_return_t
is_io_registry_entry_get_properties(
3071 io_object_t registry_entry
,
3072 io_buf_ptr_t
*properties
,
3073 mach_msg_type_number_t
*propertiesCnt
)
3075 return (kIOReturnUnsupported
);
3080 struct GetPropertiesEditorRef
3083 IORegistryEntry
* entry
;
3084 OSCollection
* root
;
3087 static const OSMetaClassBase
*
3088 GetPropertiesEditor(void * reference
,
3090 OSCollection
* container
,
3091 const OSSymbol
* name
,
3092 const OSMetaClassBase
* value
)
3094 GetPropertiesEditorRef
* ref
= (typeof(ref
)) reference
;
3096 if (!ref
->root
) ref
->root
= container
;
3097 if (ref
->root
== container
)
3099 if (0 != mac_iokit_check_get_property(ref
->cred
, ref
->entry
, name
->getCStringNoCopy()))
3104 if (value
) value
->retain();
3108 #endif /* CONFIG_MACF */
3110 /* Routine io_registry_entry_get_properties */
3111 kern_return_t
is_io_registry_entry_get_properties_bin(
3112 io_object_t registry_entry
,
3113 io_buf_ptr_t
*properties
,
3114 mach_msg_type_number_t
*propertiesCnt
)
3116 kern_return_t err
= kIOReturnSuccess
;
3119 OSSerialize::Editor editor
= 0;
3122 CHECK(IORegistryEntry
, registry_entry
, entry
);
3125 GetPropertiesEditorRef ref
;
3126 if (mac_iokit_check_filter_properties(kauth_cred_get(), entry
))
3128 editor
= &GetPropertiesEditor
;
3130 ref
.cred
= kauth_cred_get();
3136 s
= OSSerialize::binaryWithCapacity(4096, editor
, editRef
);
3137 if (!s
) return (kIOReturnNoMemory
);
3139 if (!entry
->serializeProperties(s
)) err
= kIOReturnUnsupported
;
3141 if (kIOReturnSuccess
== err
)
3143 len
= s
->getLength();
3144 *propertiesCnt
= len
;
3145 err
= copyoutkdata(s
->text(), len
, properties
);
3152 /* Routine io_registry_entry_get_property_bin */
3153 kern_return_t
is_io_registry_entry_get_property_bin(
3154 io_object_t registry_entry
,
3156 io_name_t property_name
,
3158 io_buf_ptr_t
*properties
,
3159 mach_msg_type_number_t
*propertiesCnt
)
3164 const OSSymbol
* sym
;
3166 CHECK( IORegistryEntry
, registry_entry
, entry
);
3169 if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry
, property_name
))
3170 return kIOReturnNotPermitted
;
3173 sym
= OSSymbol::withCString(property_name
);
3174 if (!sym
) return (kIOReturnNoMemory
);
3176 if (gIORegistryEntryPropertyKeysKey
== sym
)
3178 obj
= entry
->copyPropertyKeys();
3182 if ((kIORegistryIterateRecursively
& options
) && plane
[0])
3184 obj
= entry
->copyProperty(property_name
,
3185 IORegistryEntry::getPlane(plane
), options
);
3189 obj
= entry
->copyProperty(property_name
);
3191 if (obj
&& gIORemoveOnReadProperties
->containsObject(sym
)) entry
->removeProperty(sym
);
3195 if (!obj
) return (kIOReturnNotFound
);
3197 OSSerialize
* s
= OSSerialize::binaryWithCapacity(4096);
3200 return( kIOReturnNoMemory
);
3203 if( obj
->serialize( s
)) {
3204 len
= s
->getLength();
3205 *propertiesCnt
= len
;
3206 err
= copyoutkdata( s
->text(), len
, properties
);
3208 } else err
= kIOReturnUnsupported
;
3217 /* Routine io_registry_entry_set_properties */
3218 kern_return_t is_io_registry_entry_set_properties
3220 io_object_t registry_entry
,
3221 io_buf_ptr_t properties
,
3222 mach_msg_type_number_t propertiesCnt
,
3223 kern_return_t
* result
)
3229 vm_map_offset_t map_data
;
3231 CHECK( IORegistryEntry
, registry_entry
, entry
);
3233 if( propertiesCnt
> sizeof(io_struct_inband_t
) * 1024)
3234 return( kIOReturnMessageTooLarge
);
3236 err
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) properties
);
3237 data
= CAST_DOWN(vm_offset_t
, map_data
);
3239 if( KERN_SUCCESS
== err
) {
3241 FAKE_STACK_FRAME(entry
->getMetaClass());
3243 // must return success after vm_map_copyout() succeeds
3244 obj
= OSUnserializeXML( (const char *) data
, propertiesCnt
);
3245 vm_deallocate( kernel_map
, data
, propertiesCnt
);
3248 res
= kIOReturnBadArgument
;
3250 else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
3251 registry_entry
, obj
))
3253 res
= kIOReturnNotPermitted
;
3258 res
= entry
->setProperties( obj
);
3264 FAKE_STACK_FRAME_END();
3273 /* Routine io_registry_entry_get_child_iterator */
3274 kern_return_t
is_io_registry_entry_get_child_iterator(
3275 io_object_t registry_entry
,
3277 io_object_t
*iterator
)
3279 CHECK( IORegistryEntry
, registry_entry
, entry
);
3281 *iterator
= entry
->getChildIterator(
3282 IORegistryEntry::getPlane( plane
));
3284 return( kIOReturnSuccess
);
3287 /* Routine io_registry_entry_get_parent_iterator */
3288 kern_return_t
is_io_registry_entry_get_parent_iterator(
3289 io_object_t registry_entry
,
3291 io_object_t
*iterator
)
3293 CHECK( IORegistryEntry
, registry_entry
, entry
);
3295 *iterator
= entry
->getParentIterator(
3296 IORegistryEntry::getPlane( plane
));
3298 return( kIOReturnSuccess
);
3301 /* Routine io_service_get_busy_state */
3302 kern_return_t
is_io_service_get_busy_state(
3303 io_object_t _service
,
3304 uint32_t *busyState
)
3306 CHECK( IOService
, _service
, service
);
3308 *busyState
= service
->getBusyState();
3310 return( kIOReturnSuccess
);
3313 /* Routine io_service_get_state */
3314 kern_return_t
is_io_service_get_state(
3315 io_object_t _service
,
3317 uint32_t *busy_state
,
3318 uint64_t *accumulated_busy_time
)
3320 CHECK( IOService
, _service
, service
);
3322 *state
= service
->getState();
3323 *busy_state
= service
->getBusyState();
3324 *accumulated_busy_time
= service
->getAccumulatedBusyTime();
3326 return( kIOReturnSuccess
);
3329 /* Routine io_service_wait_quiet */
3330 kern_return_t
is_io_service_wait_quiet(
3331 io_object_t _service
,
3332 mach_timespec_t wait_time
)
3336 CHECK( IOService
, _service
, service
);
3338 timeoutNS
= wait_time
.tv_sec
;
3339 timeoutNS
*= kSecondScale
;
3340 timeoutNS
+= wait_time
.tv_nsec
;
3342 return( service
->waitQuiet(timeoutNS
) );
3345 /* Routine io_service_request_probe */
3346 kern_return_t
is_io_service_request_probe(
3347 io_object_t _service
,
3350 CHECK( IOService
, _service
, service
);
3352 return( service
->requestProbe( options
));
3355 /* Routine io_service_get_authorization_id */
3356 kern_return_t
is_io_service_get_authorization_id(
3357 io_object_t _service
,
3358 uint64_t *authorization_id
)
3362 CHECK( IOService
, _service
, service
);
3364 kr
= IOUserClient::clientHasPrivilege( (void *) current_task(),
3365 kIOClientPrivilegeAdministrator
);
3366 if( kIOReturnSuccess
!= kr
)
3369 *authorization_id
= service
->getAuthorizationID();
3374 /* Routine io_service_set_authorization_id */
3375 kern_return_t
is_io_service_set_authorization_id(
3376 io_object_t _service
,
3377 uint64_t authorization_id
)
3379 CHECK( IOService
, _service
, service
);
3381 return( service
->setAuthorizationID( authorization_id
) );
3384 /* Routine io_service_open_ndr */
3385 kern_return_t
is_io_service_open_extended(
3386 io_object_t _service
,
3388 uint32_t connect_type
,
3390 io_buf_ptr_t properties
,
3391 mach_msg_type_number_t propertiesCnt
,
3392 kern_return_t
* result
,
3393 io_object_t
*connection
)
3395 IOUserClient
* client
= 0;
3396 kern_return_t err
= KERN_SUCCESS
;
3397 IOReturn res
= kIOReturnSuccess
;
3398 OSDictionary
* propertiesDict
= 0;
3400 bool disallowAccess
;
3402 CHECK( IOService
, _service
, service
);
3404 if (!owningTask
) return (kIOReturnBadArgument
);
3405 assert(owningTask
== current_task());
3406 if (owningTask
!= current_task()) return (kIOReturnBadArgument
);
3410 if (properties
) return (kIOReturnUnsupported
);
3415 vm_map_offset_t map_data
;
3417 if( propertiesCnt
> sizeof(io_struct_inband_t
))
3418 return( kIOReturnMessageTooLarge
);
3420 err
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
) properties
);
3422 data
= CAST_DOWN(vm_offset_t
, map_data
);
3423 if (KERN_SUCCESS
== err
)
3425 // must return success after vm_map_copyout() succeeds
3426 obj
= OSUnserializeXML( (const char *) data
, propertiesCnt
);
3427 vm_deallocate( kernel_map
, data
, propertiesCnt
);
3428 propertiesDict
= OSDynamicCast(OSDictionary
, obj
);
3429 if (!propertiesDict
)
3431 res
= kIOReturnBadArgument
;
3436 if (kIOReturnSuccess
!= res
)
3440 crossEndian
= (ndr
.int_rep
!= NDR_record
.int_rep
);
3443 if (!propertiesDict
)
3444 propertiesDict
= OSDictionary::withCapacity(4);
3445 OSData
* data
= OSData::withBytes(&ndr
, sizeof(ndr
));
3449 propertiesDict
->setObject(kIOUserClientCrossEndianKey
, data
);
3454 res
= service
->newUserClient( owningTask
, (void *) owningTask
,
3455 connect_type
, propertiesDict
, &client
);
3458 propertiesDict
->release();
3460 if (res
== kIOReturnSuccess
)
3462 assert( OSDynamicCast(IOUserClient
, client
) );
3464 client
->sharedInstance
= (0 != client
->getProperty(kIOUserClientSharedInstanceKey
));
3465 client
->closed
= false;
3467 disallowAccess
= (crossEndian
3468 && (kOSBooleanTrue
!= service
->getProperty(kIOUserClientCrossEndianCompatibleKey
))
3469 && (kOSBooleanTrue
!= client
->getProperty(kIOUserClientCrossEndianCompatibleKey
)));
3470 if (disallowAccess
) res
= kIOReturnUnsupported
;
3472 else if (0 != mac_iokit_check_open(kauth_cred_get(), client
, connect_type
))
3473 res
= kIOReturnNotPermitted
;
3476 if (kIOReturnSuccess
== res
) res
= client
->registerOwner(owningTask
);
3478 if (kIOReturnSuccess
!= res
)
3480 IOStatisticsClientCall();
3481 client
->clientClose();
3486 OSString
* creatorName
= IOCopyLogNameForPID(proc_selfpid());
3489 client
->setProperty(kIOUserClientCreatorKey
, creatorName
);
3490 creatorName
->release();
3492 client
->setTerminateDefer(service
, false);
3497 *connection
= client
;
3503 /* Routine io_service_close */
3504 kern_return_t
is_io_service_close(
3505 io_object_t connection
)
3508 if ((mappings
= OSDynamicCast(OSSet
, connection
)))
3509 return( kIOReturnSuccess
);
3511 CHECK( IOUserClient
, connection
, client
);
3513 IOStatisticsClientCall();
3515 if (client
->sharedInstance
|| OSCompareAndSwap8(0, 1, &client
->closed
))
3517 client
->clientClose();
3521 IOLog("ignored is_io_service_close(0x%qx,%s)\n",
3522 client
->getRegistryEntryID(), client
->getName());
3525 return( kIOReturnSuccess
);
3528 /* Routine io_connect_get_service */
3529 kern_return_t
is_io_connect_get_service(
3530 io_object_t connection
,
3531 io_object_t
*service
)
3533 IOService
* theService
;
3535 CHECK( IOUserClient
, connection
, client
);
3537 theService
= client
->getService();
3539 theService
->retain();
3541 *service
= theService
;
3543 return( theService
? kIOReturnSuccess
: kIOReturnUnsupported
);
3546 /* Routine io_connect_set_notification_port */
3547 kern_return_t
is_io_connect_set_notification_port(
3548 io_object_t connection
,
3549 uint32_t notification_type
,
3553 CHECK( IOUserClient
, connection
, client
);
3555 IOStatisticsClientCall();
3556 return( client
->registerNotificationPort( port
, notification_type
,
3557 (io_user_reference_t
) reference
));
3560 /* Routine io_connect_set_notification_port */
3561 kern_return_t
is_io_connect_set_notification_port_64(
3562 io_object_t connection
,
3563 uint32_t notification_type
,
3565 io_user_reference_t reference
)
3567 CHECK( IOUserClient
, connection
, client
);
3569 IOStatisticsClientCall();
3570 return( client
->registerNotificationPort( port
, notification_type
,
3574 /* Routine io_connect_map_memory_into_task */
3575 kern_return_t is_io_connect_map_memory_into_task
3577 io_connect_t connection
,
3578 uint32_t memory_type
,
3580 mach_vm_address_t
*address
,
3581 mach_vm_size_t
*size
,
3588 CHECK( IOUserClient
, connection
, client
);
3590 if (!into_task
) return (kIOReturnBadArgument
);
3592 IOStatisticsClientCall();
3593 map
= client
->mapClientMemory64( memory_type
, into_task
, flags
, *address
);
3596 *address
= map
->getAddress();
3598 *size
= map
->getSize();
3600 if( client
->sharedInstance
3601 || (into_task
!= current_task())) {
3602 // push a name out to the task owning the map,
3603 // so we can clean up maps
3604 mach_port_name_t name __unused
=
3605 IOMachPort::makeSendRightForTask(
3606 into_task
, map
, IKOT_IOKIT_OBJECT
);
3609 // keep it with the user client
3610 IOLockLock( gIOObjectPortLock
);
3611 if( 0 == client
->mappings
)
3612 client
->mappings
= OSSet::withCapacity(2);
3613 if( client
->mappings
)
3614 client
->mappings
->setObject( map
);
3615 IOLockUnlock( gIOObjectPortLock
);
3618 err
= kIOReturnSuccess
;
3621 err
= kIOReturnBadArgument
;
3626 /* Routine is_io_connect_map_memory */
3627 kern_return_t
is_io_connect_map_memory(
3628 io_object_t connect
,
3636 mach_vm_address_t address
;
3637 mach_vm_size_t size
;
3639 address
= SCALAR64(*mapAddr
);
3640 size
= SCALAR64(*mapSize
);
3642 err
= is_io_connect_map_memory_into_task(connect
, type
, task
, &address
, &size
, flags
);
3644 *mapAddr
= SCALAR32(address
);
3645 *mapSize
= SCALAR32(size
);
3652 IOMemoryMap
* IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor
* mem
)
3655 IOMemoryMap
* map
= 0;
3657 IOLockLock(gIOObjectPortLock
);
3659 iter
= OSCollectionIterator::withCollection(mappings
);
3662 while ((map
= OSDynamicCast(IOMemoryMap
, iter
->getNextObject())))
3664 if(mem
== map
->getMemoryDescriptor())
3667 mappings
->removeObject(map
);
3674 IOLockUnlock(gIOObjectPortLock
);
3681 /* Routine io_connect_unmap_memory_from_task */
3682 kern_return_t is_io_connect_unmap_memory_from_task
3684 io_connect_t connection
,
3685 uint32_t memory_type
,
3687 mach_vm_address_t address
)
3690 IOOptionBits options
= 0;
3691 IOMemoryDescriptor
* memory
= 0;
3694 CHECK( IOUserClient
, connection
, client
);
3696 if (!from_task
) return (kIOReturnBadArgument
);
3698 IOStatisticsClientCall();
3699 err
= client
->clientMemoryForType( (UInt32
) memory_type
, &options
, &memory
);
3701 if( memory
&& (kIOReturnSuccess
== err
)) {
3703 options
= (options
& ~kIOMapUserOptionsMask
)
3704 | kIOMapAnywhere
| kIOMapReference
;
3706 map
= memory
->createMappingInTask( from_task
, address
, options
);
3710 IOLockLock( gIOObjectPortLock
);
3711 if( client
->mappings
)
3712 client
->mappings
->removeObject( map
);
3713 IOLockUnlock( gIOObjectPortLock
);
3715 mach_port_name_t name
= 0;
3716 if (from_task
!= current_task())
3717 name
= IOMachPort::makeSendRightForTask( from_task
, map
, IKOT_IOKIT_OBJECT
);
3720 map
->userClientUnmap();
3721 err
= iokit_mod_send_right( from_task
, name
, -2 );
3722 err
= kIOReturnSuccess
;
3725 IOMachPort::releasePortForObject( map
, IKOT_IOKIT_OBJECT
);
3726 if (from_task
== current_task())
3730 err
= kIOReturnBadArgument
;
3736 kern_return_t
is_io_connect_unmap_memory(
3737 io_object_t connect
,
3743 mach_vm_address_t address
;
3745 address
= SCALAR64(mapAddr
);
3747 err
= is_io_connect_unmap_memory_from_task(connect
, type
, task
, mapAddr
);
3753 /* Routine io_connect_add_client */
3754 kern_return_t
is_io_connect_add_client(
3755 io_object_t connection
,
3756 io_object_t connect_to
)
3758 CHECK( IOUserClient
, connection
, client
);
3759 CHECK( IOUserClient
, connect_to
, to
);
3761 IOStatisticsClientCall();
3762 return( client
->connectClient( to
) );
3766 /* Routine io_connect_set_properties */
3767 kern_return_t
is_io_connect_set_properties(
3768 io_object_t connection
,
3769 io_buf_ptr_t properties
,
3770 mach_msg_type_number_t propertiesCnt
,
3771 kern_return_t
* result
)
3773 return( is_io_registry_entry_set_properties( connection
, properties
, propertiesCnt
, result
));
3776 /* Routine io_user_client_method */
3777 kern_return_t is_io_connect_method_var_output
3779 io_connect_t connection
,
3781 io_scalar_inband64_t scalar_input
,
3782 mach_msg_type_number_t scalar_inputCnt
,
3783 io_struct_inband_t inband_input
,
3784 mach_msg_type_number_t inband_inputCnt
,
3785 mach_vm_address_t ool_input
,
3786 mach_vm_size_t ool_input_size
,
3787 io_struct_inband_t inband_output
,
3788 mach_msg_type_number_t
*inband_outputCnt
,
3789 io_scalar_inband64_t scalar_output
,
3790 mach_msg_type_number_t
*scalar_outputCnt
,
3791 io_buf_ptr_t
*var_output
,
3792 mach_msg_type_number_t
*var_outputCnt
3795 CHECK( IOUserClient
, connection
, client
);
3797 IOExternalMethodArguments args
;
3799 IOMemoryDescriptor
* inputMD
= 0;
3800 OSObject
* structureVariableOutputData
= 0;
3802 bzero(&args
.__reserved
[0], sizeof(args
.__reserved
));
3803 args
.__reservedA
= 0;
3804 args
.version
= kIOExternalMethodArgumentsCurrentVersion
;
3806 args
.selector
= selector
;
3808 args
.asyncWakePort
= MACH_PORT_NULL
;
3809 args
.asyncReference
= 0;
3810 args
.asyncReferenceCount
= 0;
3811 args
.structureVariableOutputData
= &structureVariableOutputData
;
3813 args
.scalarInput
= scalar_input
;
3814 args
.scalarInputCount
= scalar_inputCnt
;
3815 args
.structureInput
= inband_input
;
3816 args
.structureInputSize
= inband_inputCnt
;
3818 if (ool_input
&& (ool_input_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
3821 inputMD
= IOMemoryDescriptor::withAddressRange(ool_input
, ool_input_size
,
3822 kIODirectionOut
| kIOMemoryMapCopyOnWrite
,
3825 args
.structureInputDescriptor
= inputMD
;
3827 args
.scalarOutput
= scalar_output
;
3828 args
.scalarOutputCount
= *scalar_outputCnt
;
3829 bzero(&scalar_output
[0], *scalar_outputCnt
* sizeof(scalar_output
[0]));
3830 args
.structureOutput
= inband_output
;
3831 args
.structureOutputSize
= *inband_outputCnt
;
3832 args
.structureOutputDescriptor
= NULL
;
3833 args
.structureOutputDescriptorSize
= 0;
3835 IOStatisticsClientCall();
3836 ret
= client
->externalMethod( selector
, &args
);
3838 *scalar_outputCnt
= args
.scalarOutputCount
;
3839 *inband_outputCnt
= args
.structureOutputSize
;
3841 if (var_outputCnt
&& var_output
&& (kIOReturnSuccess
== ret
))
3843 OSSerialize
* serialize
;
3847 if ((serialize
= OSDynamicCast(OSSerialize
, structureVariableOutputData
)))
3849 len
= serialize
->getLength();
3850 *var_outputCnt
= len
;
3851 ret
= copyoutkdata(serialize
->text(), len
, var_output
);
3853 else if ((data
= OSDynamicCast(OSData
, structureVariableOutputData
)))
3855 len
= data
->getLength();
3856 *var_outputCnt
= len
;
3857 ret
= copyoutkdata(data
->getBytesNoCopy(), len
, var_output
);
3861 ret
= kIOReturnUnderrun
;
3867 if (structureVariableOutputData
)
3868 structureVariableOutputData
->release();
3873 /* Routine io_user_client_method */
3874 kern_return_t is_io_connect_method
3876 io_connect_t connection
,
3878 io_scalar_inband64_t scalar_input
,
3879 mach_msg_type_number_t scalar_inputCnt
,
3880 io_struct_inband_t inband_input
,
3881 mach_msg_type_number_t inband_inputCnt
,
3882 mach_vm_address_t ool_input
,
3883 mach_vm_size_t ool_input_size
,
3884 io_struct_inband_t inband_output
,
3885 mach_msg_type_number_t
*inband_outputCnt
,
3886 io_scalar_inband64_t scalar_output
,
3887 mach_msg_type_number_t
*scalar_outputCnt
,
3888 mach_vm_address_t ool_output
,
3889 mach_vm_size_t
*ool_output_size
3892 CHECK( IOUserClient
, connection
, client
);
3894 IOExternalMethodArguments args
;
3896 IOMemoryDescriptor
* inputMD
= 0;
3897 IOMemoryDescriptor
* outputMD
= 0;
3899 bzero(&args
.__reserved
[0], sizeof(args
.__reserved
));
3900 args
.__reservedA
= 0;
3901 args
.version
= kIOExternalMethodArgumentsCurrentVersion
;
3903 args
.selector
= selector
;
3905 args
.asyncWakePort
= MACH_PORT_NULL
;
3906 args
.asyncReference
= 0;
3907 args
.asyncReferenceCount
= 0;
3908 args
.structureVariableOutputData
= 0;
3910 args
.scalarInput
= scalar_input
;
3911 args
.scalarInputCount
= scalar_inputCnt
;
3912 args
.structureInput
= inband_input
;
3913 args
.structureInputSize
= inband_inputCnt
;
3915 if (ool_input
&& (ool_input_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
3916 if (ool_output
&& (*ool_output_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
3919 inputMD
= IOMemoryDescriptor::withAddressRange(ool_input
, ool_input_size
,
3920 kIODirectionOut
| kIOMemoryMapCopyOnWrite
,
3923 args
.structureInputDescriptor
= inputMD
;
3925 args
.scalarOutput
= scalar_output
;
3926 args
.scalarOutputCount
= *scalar_outputCnt
;
3927 bzero(&scalar_output
[0], *scalar_outputCnt
* sizeof(scalar_output
[0]));
3928 args
.structureOutput
= inband_output
;
3929 args
.structureOutputSize
= *inband_outputCnt
;
3931 if (ool_output
&& ool_output_size
)
3933 outputMD
= IOMemoryDescriptor::withAddressRange(ool_output
, *ool_output_size
,
3934 kIODirectionIn
, current_task());
3937 args
.structureOutputDescriptor
= outputMD
;
3938 args
.structureOutputDescriptorSize
= ool_output_size
? *ool_output_size
: 0;
3940 IOStatisticsClientCall();
3941 ret
= client
->externalMethod( selector
, &args
);
3943 *scalar_outputCnt
= args
.scalarOutputCount
;
3944 *inband_outputCnt
= args
.structureOutputSize
;
3945 *ool_output_size
= args
.structureOutputDescriptorSize
;
3950 outputMD
->release();
3955 /* Routine io_async_user_client_method */
3956 kern_return_t is_io_connect_async_method
3958 io_connect_t connection
,
3959 mach_port_t wake_port
,
3960 io_async_ref64_t reference
,
3961 mach_msg_type_number_t referenceCnt
,
3963 io_scalar_inband64_t scalar_input
,
3964 mach_msg_type_number_t scalar_inputCnt
,
3965 io_struct_inband_t inband_input
,
3966 mach_msg_type_number_t inband_inputCnt
,
3967 mach_vm_address_t ool_input
,
3968 mach_vm_size_t ool_input_size
,
3969 io_struct_inband_t inband_output
,
3970 mach_msg_type_number_t
*inband_outputCnt
,
3971 io_scalar_inband64_t scalar_output
,
3972 mach_msg_type_number_t
*scalar_outputCnt
,
3973 mach_vm_address_t ool_output
,
3974 mach_vm_size_t
* ool_output_size
3977 CHECK( IOUserClient
, connection
, client
);
3979 IOExternalMethodArguments args
;
3981 IOMemoryDescriptor
* inputMD
= 0;
3982 IOMemoryDescriptor
* outputMD
= 0;
3984 bzero(&args
.__reserved
[0], sizeof(args
.__reserved
));
3985 args
.__reservedA
= 0;
3986 args
.version
= kIOExternalMethodArgumentsCurrentVersion
;
3988 reference
[0] = (io_user_reference_t
) wake_port
;
3989 if (vm_map_is_64bit(get_task_map(current_task())))
3990 reference
[0] |= kIOUCAsync64Flag
;
3992 args
.selector
= selector
;
3994 args
.asyncWakePort
= wake_port
;
3995 args
.asyncReference
= reference
;
3996 args
.asyncReferenceCount
= referenceCnt
;
3998 args
.structureVariableOutputData
= 0;
4000 args
.scalarInput
= scalar_input
;
4001 args
.scalarInputCount
= scalar_inputCnt
;
4002 args
.structureInput
= inband_input
;
4003 args
.structureInputSize
= inband_inputCnt
;
4005 if (ool_input
&& (ool_input_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
4006 if (ool_output
&& (*ool_output_size
<= sizeof(io_struct_inband_t
))) return (kIOReturnIPCError
);
4009 inputMD
= IOMemoryDescriptor::withAddressRange(ool_input
, ool_input_size
,
4010 kIODirectionOut
| kIOMemoryMapCopyOnWrite
,
4013 args
.structureInputDescriptor
= inputMD
;
4015 args
.scalarOutput
= scalar_output
;
4016 args
.scalarOutputCount
= *scalar_outputCnt
;
4017 bzero(&scalar_output
[0], *scalar_outputCnt
* sizeof(scalar_output
[0]));
4018 args
.structureOutput
= inband_output
;
4019 args
.structureOutputSize
= *inband_outputCnt
;
4023 outputMD
= IOMemoryDescriptor::withAddressRange(ool_output
, *ool_output_size
,
4024 kIODirectionIn
, current_task());
4027 args
.structureOutputDescriptor
= outputMD
;
4028 args
.structureOutputDescriptorSize
= *ool_output_size
;
4030 IOStatisticsClientCall();
4031 ret
= client
->externalMethod( selector
, &args
);
4033 *inband_outputCnt
= args
.structureOutputSize
;
4034 *ool_output_size
= args
.structureOutputDescriptorSize
;
4039 outputMD
->release();
4044 /* Routine io_connect_method_scalarI_scalarO */
4045 kern_return_t
is_io_connect_method_scalarI_scalarO(
4046 io_object_t connect
,
4048 io_scalar_inband_t input
,
4049 mach_msg_type_number_t inputCount
,
4050 io_scalar_inband_t output
,
4051 mach_msg_type_number_t
* outputCount
)
4055 io_scalar_inband64_t _input
;
4056 io_scalar_inband64_t _output
;
4058 mach_msg_type_number_t struct_outputCnt
= 0;
4059 mach_vm_size_t ool_output_size
= 0;
4061 bzero(&_output
[0], sizeof(_output
));
4062 for (i
= 0; i
< inputCount
; i
++)
4063 _input
[i
] = SCALAR64(input
[i
]);
4065 err
= is_io_connect_method(connect
, index
,
4069 NULL
, &struct_outputCnt
,
4070 _output
, outputCount
,
4071 0, &ool_output_size
);
4073 for (i
= 0; i
< *outputCount
; i
++)
4074 output
[i
] = SCALAR32(_output
[i
]);
4079 kern_return_t
shim_io_connect_method_scalarI_scalarO(
4080 IOExternalMethod
* method
,
4082 const io_user_scalar_t
* input
,
4083 mach_msg_type_number_t inputCount
,
4084 io_user_scalar_t
* output
,
4085 mach_msg_type_number_t
* outputCount
)
4088 io_scalar_inband_t _output
;
4090 err
= kIOReturnBadArgument
;
4092 bzero(&_output
[0], sizeof(_output
));
4095 if( inputCount
!= method
->count0
)
4097 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4098 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4101 if( *outputCount
!= method
->count1
)
4103 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
);
4104 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4108 func
= method
->func
;
4110 switch( inputCount
) {
4113 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4114 ARG32(input
[3]), ARG32(input
[4]), ARG32(input
[5]) );
4117 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4118 ARG32(input
[3]), ARG32(input
[4]),
4122 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4124 &_output
[0], &_output
[1] );
4127 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4128 &_output
[0], &_output
[1], &_output
[2] );
4131 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]),
4132 &_output
[0], &_output
[1], &_output
[2],
4136 err
= (object
->*func
)( ARG32(input
[0]),
4137 &_output
[0], &_output
[1], &_output
[2],
4138 &_output
[3], &_output
[4] );
4141 err
= (object
->*func
)( &_output
[0], &_output
[1], &_output
[2],
4142 &_output
[3], &_output
[4], &_output
[5] );
4146 IOLog("%s: Bad method table\n", object
->getName());
4152 for (i
= 0; i
< *outputCount
; i
++)
4153 output
[i
] = SCALAR32(_output
[i
]);
4158 /* Routine io_async_method_scalarI_scalarO */
4159 kern_return_t
is_io_async_method_scalarI_scalarO(
4160 io_object_t connect
,
4161 mach_port_t wake_port
,
4162 io_async_ref_t reference
,
4163 mach_msg_type_number_t referenceCnt
,
4165 io_scalar_inband_t input
,
4166 mach_msg_type_number_t inputCount
,
4167 io_scalar_inband_t output
,
4168 mach_msg_type_number_t
* outputCount
)
4172 io_scalar_inband64_t _input
;
4173 io_scalar_inband64_t _output
;
4174 io_async_ref64_t _reference
;
4176 bzero(&_output
[0], sizeof(_output
));
4177 for (i
= 0; i
< referenceCnt
; i
++)
4178 _reference
[i
] = REF64(reference
[i
]);
4180 mach_msg_type_number_t struct_outputCnt
= 0;
4181 mach_vm_size_t ool_output_size
= 0;
4183 for (i
= 0; i
< inputCount
; i
++)
4184 _input
[i
] = SCALAR64(input
[i
]);
4186 err
= is_io_connect_async_method(connect
,
4187 wake_port
, _reference
, referenceCnt
,
4192 NULL
, &struct_outputCnt
,
4193 _output
, outputCount
,
4194 0, &ool_output_size
);
4196 for (i
= 0; i
< *outputCount
; i
++)
4197 output
[i
] = SCALAR32(_output
[i
]);
4201 /* Routine io_async_method_scalarI_structureO */
4202 kern_return_t
is_io_async_method_scalarI_structureO(
4203 io_object_t connect
,
4204 mach_port_t wake_port
,
4205 io_async_ref_t reference
,
4206 mach_msg_type_number_t referenceCnt
,
4208 io_scalar_inband_t input
,
4209 mach_msg_type_number_t inputCount
,
4210 io_struct_inband_t output
,
4211 mach_msg_type_number_t
* outputCount
)
4214 io_scalar_inband64_t _input
;
4215 io_async_ref64_t _reference
;
4217 for (i
= 0; i
< referenceCnt
; i
++)
4218 _reference
[i
] = REF64(reference
[i
]);
4220 mach_msg_type_number_t scalar_outputCnt
= 0;
4221 mach_vm_size_t ool_output_size
= 0;
4223 for (i
= 0; i
< inputCount
; i
++)
4224 _input
[i
] = SCALAR64(input
[i
]);
4226 return (is_io_connect_async_method(connect
,
4227 wake_port
, _reference
, referenceCnt
,
4232 output
, outputCount
,
4233 NULL
, &scalar_outputCnt
,
4234 0, &ool_output_size
));
4237 /* Routine io_async_method_scalarI_structureI */
4238 kern_return_t
is_io_async_method_scalarI_structureI(
4239 io_connect_t connect
,
4240 mach_port_t wake_port
,
4241 io_async_ref_t reference
,
4242 mach_msg_type_number_t referenceCnt
,
4244 io_scalar_inband_t input
,
4245 mach_msg_type_number_t inputCount
,
4246 io_struct_inband_t inputStruct
,
4247 mach_msg_type_number_t inputStructCount
)
4250 io_scalar_inband64_t _input
;
4251 io_async_ref64_t _reference
;
4253 for (i
= 0; i
< referenceCnt
; i
++)
4254 _reference
[i
] = REF64(reference
[i
]);
4256 mach_msg_type_number_t scalar_outputCnt
= 0;
4257 mach_msg_type_number_t inband_outputCnt
= 0;
4258 mach_vm_size_t ool_output_size
= 0;
4260 for (i
= 0; i
< inputCount
; i
++)
4261 _input
[i
] = SCALAR64(input
[i
]);
4263 return (is_io_connect_async_method(connect
,
4264 wake_port
, _reference
, referenceCnt
,
4267 inputStruct
, inputStructCount
,
4269 NULL
, &inband_outputCnt
,
4270 NULL
, &scalar_outputCnt
,
4271 0, &ool_output_size
));
4274 /* Routine io_async_method_structureI_structureO */
4275 kern_return_t
is_io_async_method_structureI_structureO(
4276 io_object_t connect
,
4277 mach_port_t wake_port
,
4278 io_async_ref_t reference
,
4279 mach_msg_type_number_t referenceCnt
,
4281 io_struct_inband_t input
,
4282 mach_msg_type_number_t inputCount
,
4283 io_struct_inband_t output
,
4284 mach_msg_type_number_t
* outputCount
)
4287 mach_msg_type_number_t scalar_outputCnt
= 0;
4288 mach_vm_size_t ool_output_size
= 0;
4289 io_async_ref64_t _reference
;
4291 for (i
= 0; i
< referenceCnt
; i
++)
4292 _reference
[i
] = REF64(reference
[i
]);
4294 return (is_io_connect_async_method(connect
,
4295 wake_port
, _reference
, referenceCnt
,
4300 output
, outputCount
,
4301 NULL
, &scalar_outputCnt
,
4302 0, &ool_output_size
));
4306 kern_return_t
shim_io_async_method_scalarI_scalarO(
4307 IOExternalAsyncMethod
* method
,
4309 mach_port_t asyncWakePort
,
4310 io_user_reference_t
* asyncReference
,
4311 uint32_t asyncReferenceCount
,
4312 const io_user_scalar_t
* input
,
4313 mach_msg_type_number_t inputCount
,
4314 io_user_scalar_t
* output
,
4315 mach_msg_type_number_t
* outputCount
)
4319 io_scalar_inband_t _output
;
4321 io_async_ref_t reference
;
4323 bzero(&_output
[0], sizeof(_output
));
4324 for (i
= 0; i
< asyncReferenceCount
; i
++)
4325 reference
[i
] = REF32(asyncReference
[i
]);
4327 err
= kIOReturnBadArgument
;
4331 if( inputCount
!= method
->count0
)
4333 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4334 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4337 if( *outputCount
!= method
->count1
)
4339 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
);
4340 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4344 func
= method
->func
;
4346 switch( inputCount
) {
4349 err
= (object
->*func
)( reference
,
4350 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4351 ARG32(input
[3]), ARG32(input
[4]), ARG32(input
[5]) );
4354 err
= (object
->*func
)( reference
,
4355 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4356 ARG32(input
[3]), ARG32(input
[4]),
4360 err
= (object
->*func
)( reference
,
4361 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4363 &_output
[0], &_output
[1] );
4366 err
= (object
->*func
)( reference
,
4367 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4368 &_output
[0], &_output
[1], &_output
[2] );
4371 err
= (object
->*func
)( reference
,
4372 ARG32(input
[0]), ARG32(input
[1]),
4373 &_output
[0], &_output
[1], &_output
[2],
4377 err
= (object
->*func
)( reference
,
4379 &_output
[0], &_output
[1], &_output
[2],
4380 &_output
[3], &_output
[4] );
4383 err
= (object
->*func
)( reference
,
4384 &_output
[0], &_output
[1], &_output
[2],
4385 &_output
[3], &_output
[4], &_output
[5] );
4389 IOLog("%s: Bad method table\n", object
->getName());
4394 for (i
= 0; i
< *outputCount
; i
++)
4395 output
[i
] = SCALAR32(_output
[i
]);
4401 /* Routine io_connect_method_scalarI_structureO */
4402 kern_return_t
is_io_connect_method_scalarI_structureO(
4403 io_object_t connect
,
4405 io_scalar_inband_t input
,
4406 mach_msg_type_number_t inputCount
,
4407 io_struct_inband_t output
,
4408 mach_msg_type_number_t
* outputCount
)
4411 io_scalar_inband64_t _input
;
4413 mach_msg_type_number_t scalar_outputCnt
= 0;
4414 mach_vm_size_t ool_output_size
= 0;
4416 for (i
= 0; i
< inputCount
; i
++)
4417 _input
[i
] = SCALAR64(input
[i
]);
4419 return (is_io_connect_method(connect
, index
,
4423 output
, outputCount
,
4424 NULL
, &scalar_outputCnt
,
4425 0, &ool_output_size
));
4428 kern_return_t
shim_io_connect_method_scalarI_structureO(
4430 IOExternalMethod
* method
,
4432 const io_user_scalar_t
* input
,
4433 mach_msg_type_number_t inputCount
,
4434 io_struct_inband_t output
,
4435 IOByteCount
* outputCount
)
4440 err
= kIOReturnBadArgument
;
4443 if( inputCount
!= method
->count0
)
4445 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4446 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4449 if( (kIOUCVariableStructureSize
!= method
->count1
)
4450 && (*outputCount
!= method
->count1
))
4452 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4453 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4457 func
= method
->func
;
4459 switch( inputCount
) {
4462 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4463 ARG32(input
[3]), ARG32(input
[4]),
4467 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4469 output
, (void *)outputCount
);
4472 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4473 output
, (void *)outputCount
, 0 );
4476 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]),
4477 output
, (void *)outputCount
, 0, 0 );
4480 err
= (object
->*func
)( ARG32(input
[0]),
4481 output
, (void *)outputCount
, 0, 0, 0 );
4484 err
= (object
->*func
)( output
, (void *)outputCount
, 0, 0, 0, 0 );
4488 IOLog("%s: Bad method table\n", object
->getName());
4497 kern_return_t
shim_io_async_method_scalarI_structureO(
4498 IOExternalAsyncMethod
* method
,
4500 mach_port_t asyncWakePort
,
4501 io_user_reference_t
* asyncReference
,
4502 uint32_t asyncReferenceCount
,
4503 const io_user_scalar_t
* input
,
4504 mach_msg_type_number_t inputCount
,
4505 io_struct_inband_t output
,
4506 mach_msg_type_number_t
* outputCount
)
4511 io_async_ref_t reference
;
4513 for (i
= 0; i
< asyncReferenceCount
; i
++)
4514 reference
[i
] = REF32(asyncReference
[i
]);
4516 err
= kIOReturnBadArgument
;
4518 if( inputCount
!= method
->count0
)
4520 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4521 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4524 if( (kIOUCVariableStructureSize
!= method
->count1
)
4525 && (*outputCount
!= method
->count1
))
4527 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4528 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4532 func
= method
->func
;
4534 switch( inputCount
) {
4537 err
= (object
->*func
)( reference
,
4538 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4539 ARG32(input
[3]), ARG32(input
[4]),
4543 err
= (object
->*func
)( reference
,
4544 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4546 output
, (void *)outputCount
);
4549 err
= (object
->*func
)( reference
,
4550 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4551 output
, (void *)outputCount
, 0 );
4554 err
= (object
->*func
)( reference
,
4555 ARG32(input
[0]), ARG32(input
[1]),
4556 output
, (void *)outputCount
, 0, 0 );
4559 err
= (object
->*func
)( reference
,
4561 output
, (void *)outputCount
, 0, 0, 0 );
4564 err
= (object
->*func
)( reference
,
4565 output
, (void *)outputCount
, 0, 0, 0, 0 );
4569 IOLog("%s: Bad method table\n", object
->getName());
4577 /* Routine io_connect_method_scalarI_structureI */
4578 kern_return_t
is_io_connect_method_scalarI_structureI(
4579 io_connect_t connect
,
4581 io_scalar_inband_t input
,
4582 mach_msg_type_number_t inputCount
,
4583 io_struct_inband_t inputStruct
,
4584 mach_msg_type_number_t inputStructCount
)
4587 io_scalar_inband64_t _input
;
4589 mach_msg_type_number_t scalar_outputCnt
= 0;
4590 mach_msg_type_number_t inband_outputCnt
= 0;
4591 mach_vm_size_t ool_output_size
= 0;
4593 for (i
= 0; i
< inputCount
; i
++)
4594 _input
[i
] = SCALAR64(input
[i
]);
4596 return (is_io_connect_method(connect
, index
,
4598 inputStruct
, inputStructCount
,
4600 NULL
, &inband_outputCnt
,
4601 NULL
, &scalar_outputCnt
,
4602 0, &ool_output_size
));
4605 kern_return_t
shim_io_connect_method_scalarI_structureI(
4606 IOExternalMethod
* method
,
4608 const io_user_scalar_t
* input
,
4609 mach_msg_type_number_t inputCount
,
4610 io_struct_inband_t inputStruct
,
4611 mach_msg_type_number_t inputStructCount
)
4614 IOReturn err
= kIOReturnBadArgument
;
4618 if (inputCount
!= method
->count0
)
4620 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4621 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4624 if( (kIOUCVariableStructureSize
!= method
->count1
)
4625 && (inputStructCount
!= method
->count1
))
4627 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputStructCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4628 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputStructCount
, uint64_t, (uint64_t)method
->count1
);
4632 func
= method
->func
;
4634 switch( inputCount
) {
4637 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4638 ARG32(input
[3]), ARG32(input
[4]),
4642 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), (void *) input
[2],
4644 inputStruct
, (void *)(uintptr_t)inputStructCount
);
4647 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4648 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4652 err
= (object
->*func
)( ARG32(input
[0]), ARG32(input
[1]),
4653 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4657 err
= (object
->*func
)( ARG32(input
[0]),
4658 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4662 err
= (object
->*func
)( inputStruct
, (void *)(uintptr_t)inputStructCount
,
4667 IOLog("%s: Bad method table\n", object
->getName());
4675 kern_return_t
shim_io_async_method_scalarI_structureI(
4676 IOExternalAsyncMethod
* method
,
4678 mach_port_t asyncWakePort
,
4679 io_user_reference_t
* asyncReference
,
4680 uint32_t asyncReferenceCount
,
4681 const io_user_scalar_t
* input
,
4682 mach_msg_type_number_t inputCount
,
4683 io_struct_inband_t inputStruct
,
4684 mach_msg_type_number_t inputStructCount
)
4688 IOReturn err
= kIOReturnBadArgument
;
4689 io_async_ref_t reference
;
4691 for (i
= 0; i
< asyncReferenceCount
; i
++)
4692 reference
[i
] = REF32(asyncReference
[i
]);
4696 if (inputCount
!= method
->count0
)
4698 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
);
4699 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4702 if( (kIOUCVariableStructureSize
!= method
->count1
)
4703 && (inputStructCount
!= method
->count1
))
4705 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputStructCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4706 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputStructCount
, uint64_t, (uint64_t)method
->count1
);
4710 func
= method
->func
;
4712 switch( inputCount
) {
4715 err
= (object
->*func
)( reference
,
4716 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4717 ARG32(input
[3]), ARG32(input
[4]),
4721 err
= (object
->*func
)( reference
,
4722 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4724 inputStruct
, (void *)(uintptr_t)inputStructCount
);
4727 err
= (object
->*func
)( reference
,
4728 ARG32(input
[0]), ARG32(input
[1]), ARG32(input
[2]),
4729 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4733 err
= (object
->*func
)( reference
,
4734 ARG32(input
[0]), ARG32(input
[1]),
4735 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4739 err
= (object
->*func
)( reference
,
4741 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4745 err
= (object
->*func
)( reference
,
4746 inputStruct
, (void *)(uintptr_t)inputStructCount
,
4751 IOLog("%s: Bad method table\n", object
->getName());
4759 /* Routine io_connect_method_structureI_structureO */
4760 kern_return_t
is_io_connect_method_structureI_structureO(
4761 io_object_t connect
,
4763 io_struct_inband_t input
,
4764 mach_msg_type_number_t inputCount
,
4765 io_struct_inband_t output
,
4766 mach_msg_type_number_t
* outputCount
)
4768 mach_msg_type_number_t scalar_outputCnt
= 0;
4769 mach_vm_size_t ool_output_size
= 0;
4771 return (is_io_connect_method(connect
, index
,
4775 output
, outputCount
,
4776 NULL
, &scalar_outputCnt
,
4777 0, &ool_output_size
));
4780 kern_return_t
shim_io_connect_method_structureI_structureO(
4781 IOExternalMethod
* method
,
4783 io_struct_inband_t input
,
4784 mach_msg_type_number_t inputCount
,
4785 io_struct_inband_t output
,
4786 IOByteCount
* outputCount
)
4789 IOReturn err
= kIOReturnBadArgument
;
4793 if( (kIOUCVariableStructureSize
!= method
->count0
)
4794 && (inputCount
!= method
->count0
))
4796 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
, (uint64_t)kIOUCVariableStructureSize
);
4797 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4800 if( (kIOUCVariableStructureSize
!= method
->count1
)
4801 && (*outputCount
!= method
->count1
))
4803 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4804 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4808 func
= method
->func
;
4810 if( method
->count1
) {
4811 if( method
->count0
) {
4812 err
= (object
->*func
)( input
, output
,
4813 (void *)(uintptr_t)inputCount
, outputCount
, 0, 0 );
4815 err
= (object
->*func
)( output
, outputCount
, 0, 0, 0, 0 );
4818 err
= (object
->*func
)( input
, (void *)(uintptr_t)inputCount
, 0, 0, 0, 0 );
4827 kern_return_t
shim_io_async_method_structureI_structureO(
4828 IOExternalAsyncMethod
* method
,
4830 mach_port_t asyncWakePort
,
4831 io_user_reference_t
* asyncReference
,
4832 uint32_t asyncReferenceCount
,
4833 io_struct_inband_t input
,
4834 mach_msg_type_number_t inputCount
,
4835 io_struct_inband_t output
,
4836 mach_msg_type_number_t
* outputCount
)
4841 io_async_ref_t reference
;
4843 for (i
= 0; i
< asyncReferenceCount
; i
++)
4844 reference
[i
] = REF32(asyncReference
[i
]);
4846 err
= kIOReturnBadArgument
;
4849 if( (kIOUCVariableStructureSize
!= method
->count0
)
4850 && (inputCount
!= method
->count0
))
4852 IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)inputCount
, (uint64_t)method
->count0
, (uint64_t)kIOUCVariableStructureSize
);
4853 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)inputCount
, uint64_t, (uint64_t)method
->count0
);
4856 if( (kIOUCVariableStructureSize
!= method
->count1
)
4857 && (*outputCount
!= method
->count1
))
4859 IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx 0x%llx\n", __FUNCTION__
, __LINE__
, object
->getName(), (uint64_t)*outputCount
, (uint64_t)method
->count1
, (uint64_t)kIOUCVariableStructureSize
);
4860 DTRACE_IO2(iokit_count_mismatch
, uint64_t, (uint64_t)*outputCount
, uint64_t, (uint64_t)method
->count1
);
4864 func
= method
->func
;
4866 if( method
->count1
) {
4867 if( method
->count0
) {
4868 err
= (object
->*func
)( reference
,
4870 (void *)(uintptr_t)inputCount
, outputCount
, 0, 0 );
4872 err
= (object
->*func
)( reference
,
4873 output
, outputCount
, 0, 0, 0, 0 );
4876 err
= (object
->*func
)( reference
,
4877 input
, (void *)(uintptr_t)inputCount
, 0, 0, 0, 0 );
4886 bool gIOKextdClearedBusy
= false;
4889 /* Routine io_catalog_send_data */
4890 kern_return_t
is_io_catalog_send_data(
4891 mach_port_t master_port
,
4893 io_buf_ptr_t inData
,
4894 mach_msg_type_number_t inDataCount
,
4895 kern_return_t
* result
)
4898 return kIOReturnNotPrivileged
;
4899 #else /* NO_KEXTD */
4902 kern_return_t kr
= kIOReturnError
;
4904 //printf("io_catalog_send_data called. flag: %d\n", flag);
4906 if( master_port
!= master_device_port
)
4907 return kIOReturnNotPrivileged
;
4909 if( (flag
!= kIOCatalogRemoveKernelLinker
&&
4910 flag
!= kIOCatalogKextdActive
&&
4911 flag
!= kIOCatalogKextdFinishedLaunching
) &&
4912 ( !inData
|| !inDataCount
) )
4914 return kIOReturnBadArgument
;
4917 if (!IOTaskHasEntitlement(current_task(), "com.apple.rootless.kext-management"))
4919 OSString
* taskName
= IOCopyLogNameForPID(proc_selfpid());
4920 IOLog("IOCatalogueSendData(%s): Not entitled\n", taskName
? taskName
->getCStringNoCopy() : "");
4921 OSSafeReleaseNULL(taskName
);
4922 // For now, fake success to not break applications relying on this function succeeding.
4923 // See <rdar://problem/32554970> for more details.
4924 return kIOReturnSuccess
;
4928 vm_map_offset_t map_data
;
4930 if( inDataCount
> sizeof(io_struct_inband_t
) * 1024)
4931 return( kIOReturnMessageTooLarge
);
4933 kr
= vm_map_copyout( kernel_map
, &map_data
, (vm_map_copy_t
)inData
);
4934 data
= CAST_DOWN(vm_offset_t
, map_data
);
4936 if( kr
!= KERN_SUCCESS
)
4939 // must return success after vm_map_copyout() succeeds
4942 obj
= (OSObject
*)OSUnserializeXML((const char *)data
, inDataCount
);
4943 vm_deallocate( kernel_map
, data
, inDataCount
);
4945 *result
= kIOReturnNoMemory
;
4946 return( KERN_SUCCESS
);
4952 case kIOCatalogResetDrivers
:
4953 case kIOCatalogResetDriversNoMatch
: {
4956 array
= OSDynamicCast(OSArray
, obj
);
4958 if ( !gIOCatalogue
->resetAndAddDrivers(array
,
4959 flag
== kIOCatalogResetDrivers
) ) {
4961 kr
= kIOReturnError
;
4964 kr
= kIOReturnBadArgument
;
4969 case kIOCatalogAddDrivers
:
4970 case kIOCatalogAddDriversNoMatch
: {
4973 array
= OSDynamicCast(OSArray
, obj
);
4975 if ( !gIOCatalogue
->addDrivers( array
,
4976 flag
== kIOCatalogAddDrivers
) ) {
4977 kr
= kIOReturnError
;
4981 kr
= kIOReturnBadArgument
;
4986 case kIOCatalogRemoveDrivers
:
4987 case kIOCatalogRemoveDriversNoMatch
: {
4988 OSDictionary
* dict
;
4990 dict
= OSDynamicCast(OSDictionary
, obj
);
4992 if ( !gIOCatalogue
->removeDrivers( dict
,
4993 flag
== kIOCatalogRemoveDrivers
) ) {
4994 kr
= kIOReturnError
;
4998 kr
= kIOReturnBadArgument
;
5003 case kIOCatalogStartMatching
: {
5004 OSDictionary
* dict
;
5006 dict
= OSDynamicCast(OSDictionary
, obj
);
5008 if ( !gIOCatalogue
->startMatching( dict
) ) {
5009 kr
= kIOReturnError
;
5013 kr
= kIOReturnBadArgument
;
5018 case kIOCatalogRemoveKernelLinker
:
5019 kr
= KERN_NOT_SUPPORTED
;
5022 case kIOCatalogKextdActive
:
5024 IOServiceTrace(IOSERVICE_KEXTD_ALIVE
, 0, 0, 0, 0);
5025 OSKext::setKextdActive();
5027 /* Dump all nonloaded startup extensions; kextd will now send them
5030 OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false);
5032 kr
= kIOReturnSuccess
;
5035 case kIOCatalogKextdFinishedLaunching
: {
5037 if (!gIOKextdClearedBusy
) {
5038 IOService
* serviceRoot
= IOService::getServiceRoot();
5040 IOServiceTrace(IOSERVICE_KEXTD_READY
, 0, 0, 0, 0);
5041 serviceRoot
->adjustBusy(-1);
5042 gIOKextdClearedBusy
= true;
5046 kr
= kIOReturnSuccess
;
5051 kr
= kIOReturnBadArgument
;
5055 if (obj
) obj
->release();
5058 return( KERN_SUCCESS
);
5059 #endif /* NO_KEXTD */
5062 /* Routine io_catalog_terminate */
5063 kern_return_t
is_io_catalog_terminate(
5064 mach_port_t master_port
,
5070 if( master_port
!= master_device_port
)
5071 return kIOReturnNotPrivileged
;
5073 kr
= IOUserClient::clientHasPrivilege( (void *) current_task(),
5074 kIOClientPrivilegeAdministrator
);
5075 if( kIOReturnSuccess
!= kr
)
5079 #if !defined(SECURE_KERNEL)
5080 case kIOCatalogServiceTerminate
:
5082 IOService
* service
;
5084 iter
= IORegistryIterator::iterateOver(gIOServicePlane
,
5085 kIORegistryIterateRecursively
);
5087 return kIOReturnNoMemory
;
5091 while( (service
= (IOService
*)iter
->getNextObject()) ) {
5092 if( service
->metaCast(name
)) {
5093 if ( !service
->terminate( kIOServiceRequired
5094 | kIOServiceSynchronous
) ) {
5095 kr
= kIOReturnUnsupported
;
5100 } while( !service
&& !iter
->isValid());
5104 case kIOCatalogModuleUnload
:
5105 case kIOCatalogModuleTerminate
:
5106 kr
= gIOCatalogue
->terminateDriversForModule(name
,
5107 flag
== kIOCatalogModuleUnload
);
5112 kr
= kIOReturnBadArgument
;
5119 /* Routine io_catalog_get_data */
5120 kern_return_t
is_io_catalog_get_data(
5121 mach_port_t master_port
,
5123 io_buf_ptr_t
*outData
,
5124 mach_msg_type_number_t
*outDataCount
)
5126 kern_return_t kr
= kIOReturnSuccess
;
5129 if( master_port
!= master_device_port
)
5130 return kIOReturnNotPrivileged
;
5132 //printf("io_catalog_get_data called. flag: %d\n", flag);
5134 s
= OSSerialize::withCapacity(4096);
5136 return kIOReturnNoMemory
;
5138 kr
= gIOCatalogue
->serializeData(flag
, s
);
5140 if ( kr
== kIOReturnSuccess
) {
5145 size
= s
->getLength();
5146 kr
= vm_allocate_kernel(kernel_map
, &data
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IOKIT
);
5147 if ( kr
== kIOReturnSuccess
) {
5148 bcopy(s
->text(), (void *)data
, size
);
5149 kr
= vm_map_copyin(kernel_map
, (vm_map_address_t
)data
,
5150 (vm_map_size_t
)size
, true, ©
);
5151 *outData
= (char *)copy
;
5152 *outDataCount
= size
;
5161 /* Routine io_catalog_get_gen_count */
5162 kern_return_t
is_io_catalog_get_gen_count(
5163 mach_port_t master_port
,
5166 if( master_port
!= master_device_port
)
5167 return kIOReturnNotPrivileged
;
5169 //printf("io_catalog_get_gen_count called.\n");
5172 return kIOReturnBadArgument
;
5174 *genCount
= gIOCatalogue
->getGenerationCount();
5176 return kIOReturnSuccess
;
5179 /* Routine io_catalog_module_loaded.
5180 * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used.
5182 kern_return_t
is_io_catalog_module_loaded(
5183 mach_port_t master_port
,
5186 if( master_port
!= master_device_port
)
5187 return kIOReturnNotPrivileged
;
5189 //printf("io_catalog_module_loaded called. name %s\n", name);
5192 return kIOReturnBadArgument
;
5194 gIOCatalogue
->moduleHasLoaded(name
);
5196 return kIOReturnSuccess
;
5199 kern_return_t
is_io_catalog_reset(
5200 mach_port_t master_port
,
5203 if( master_port
!= master_device_port
)
5204 return kIOReturnNotPrivileged
;
5207 case kIOCatalogResetDefault
:
5208 gIOCatalogue
->reset();
5212 return kIOReturnBadArgument
;
5215 return kIOReturnSuccess
;
5218 kern_return_t
iokit_user_client_trap(struct iokit_user_client_trap_args
*args
)
5220 kern_return_t result
= kIOReturnBadArgument
;
5221 IOUserClient
*userClient
;
5223 if ((userClient
= OSDynamicCast(IOUserClient
,
5224 iokit_lookup_connect_ref_current_task((OSObject
*)(args
->userClientRef
))))) {
5225 IOExternalTrap
*trap
;
5226 IOService
*target
= NULL
;
5228 trap
= userClient
->getTargetAndTrapForIndex(&target
, args
->index
);
5230 if (trap
&& target
) {
5236 result
= (target
->*func
)(args
->p1
, args
->p2
, args
->p3
, args
->p4
, args
->p5
, args
->p6
);
5240 iokit_remove_connect_reference(userClient
);
5248 IOReturn
IOUserClient::externalMethod( uint32_t selector
, IOExternalMethodArguments
* args
,
5249 IOExternalMethodDispatch
* dispatch
, OSObject
* target
, void * reference
)
5253 IOByteCount structureOutputSize
;
5258 count
= dispatch
->checkScalarInputCount
;
5259 if ((kIOUCVariableStructureSize
!= count
) && (count
!= args
->scalarInputCount
))
5261 return (kIOReturnBadArgument
);
5264 count
= dispatch
->checkStructureInputSize
;
5265 if ((kIOUCVariableStructureSize
!= count
)
5266 && (count
!= ((args
->structureInputDescriptor
)
5267 ? args
->structureInputDescriptor
->getLength() : args
->structureInputSize
)))
5269 return (kIOReturnBadArgument
);
5272 count
= dispatch
->checkScalarOutputCount
;
5273 if ((kIOUCVariableStructureSize
!= count
) && (count
!= args
->scalarOutputCount
))
5275 return (kIOReturnBadArgument
);
5278 count
= dispatch
->checkStructureOutputSize
;
5279 if ((kIOUCVariableStructureSize
!= count
)
5280 && (count
!= ((args
->structureOutputDescriptor
)
5281 ? args
->structureOutputDescriptor
->getLength() : args
->structureOutputSize
)))
5283 return (kIOReturnBadArgument
);
5286 if (dispatch
->function
)
5287 err
= (*dispatch
->function
)(target
, reference
, args
);
5289 err
= kIOReturnNoCompletion
; /* implementator can dispatch */
5295 // pre-Leopard API's don't do ool structs
5296 if (args
->structureInputDescriptor
|| args
->structureOutputDescriptor
)
5298 err
= kIOReturnIPCError
;
5302 structureOutputSize
= args
->structureOutputSize
;
5304 if (args
->asyncWakePort
)
5306 IOExternalAsyncMethod
* method
;
5308 if( !(method
= getAsyncTargetAndMethodForIndex(&object
, selector
)) || !object
)
5309 return (kIOReturnUnsupported
);
5311 if (kIOUCForegroundOnly
& method
->flags
)
5313 if (task_is_gpu_denied(current_task()))
5314 return (kIOReturnNotPermitted
);
5317 switch (method
->flags
& kIOUCTypeMask
)
5319 case kIOUCScalarIStructI
:
5320 err
= shim_io_async_method_scalarI_structureI( method
, object
,
5321 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5322 args
->scalarInput
, args
->scalarInputCount
,
5323 (char *)args
->structureInput
, args
->structureInputSize
);
5326 case kIOUCScalarIScalarO
:
5327 err
= shim_io_async_method_scalarI_scalarO( method
, object
,
5328 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5329 args
->scalarInput
, args
->scalarInputCount
,
5330 args
->scalarOutput
, &args
->scalarOutputCount
);
5333 case kIOUCScalarIStructO
:
5334 err
= shim_io_async_method_scalarI_structureO( method
, object
,
5335 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5336 args
->scalarInput
, args
->scalarInputCount
,
5337 (char *) args
->structureOutput
, &args
->structureOutputSize
);
5341 case kIOUCStructIStructO
:
5342 err
= shim_io_async_method_structureI_structureO( method
, object
,
5343 args
->asyncWakePort
, args
->asyncReference
, args
->asyncReferenceCount
,
5344 (char *)args
->structureInput
, args
->structureInputSize
,
5345 (char *) args
->structureOutput
, &args
->structureOutputSize
);
5349 err
= kIOReturnBadArgument
;
5355 IOExternalMethod
* method
;
5357 if( !(method
= getTargetAndMethodForIndex(&object
, selector
)) || !object
)
5358 return (kIOReturnUnsupported
);
5360 if (kIOUCForegroundOnly
& method
->flags
)
5362 if (task_is_gpu_denied(current_task()))
5363 return (kIOReturnNotPermitted
);
5366 switch (method
->flags
& kIOUCTypeMask
)
5368 case kIOUCScalarIStructI
:
5369 err
= shim_io_connect_method_scalarI_structureI( method
, object
,
5370 args
->scalarInput
, args
->scalarInputCount
,
5371 (char *) args
->structureInput
, args
->structureInputSize
);
5374 case kIOUCScalarIScalarO
:
5375 err
= shim_io_connect_method_scalarI_scalarO( method
, object
,
5376 args
->scalarInput
, args
->scalarInputCount
,
5377 args
->scalarOutput
, &args
->scalarOutputCount
);
5380 case kIOUCScalarIStructO
:
5381 err
= shim_io_connect_method_scalarI_structureO( method
, object
,
5382 args
->scalarInput
, args
->scalarInputCount
,
5383 (char *) args
->structureOutput
, &structureOutputSize
);
5387 case kIOUCStructIStructO
:
5388 err
= shim_io_connect_method_structureI_structureO( method
, object
,
5389 (char *) args
->structureInput
, args
->structureInputSize
,
5390 (char *) args
->structureOutput
, &structureOutputSize
);
5394 err
= kIOReturnBadArgument
;
5399 args
->structureOutputSize
= structureOutputSize
;
5405 OSMetaClassDefineReservedUnused(IOUserClient
, 0);
5406 OSMetaClassDefineReservedUnused(IOUserClient
, 1);
5408 OSMetaClassDefineReservedUsed(IOUserClient
, 0);
5409 OSMetaClassDefineReservedUsed(IOUserClient
, 1);
5411 OSMetaClassDefineReservedUnused(IOUserClient
, 2);
5412 OSMetaClassDefineReservedUnused(IOUserClient
, 3);
5413 OSMetaClassDefineReservedUnused(IOUserClient
, 4);
5414 OSMetaClassDefineReservedUnused(IOUserClient
, 5);
5415 OSMetaClassDefineReservedUnused(IOUserClient
, 6);
5416 OSMetaClassDefineReservedUnused(IOUserClient
, 7);
5417 OSMetaClassDefineReservedUnused(IOUserClient
, 8);
5418 OSMetaClassDefineReservedUnused(IOUserClient
, 9);
5419 OSMetaClassDefineReservedUnused(IOUserClient
, 10);
5420 OSMetaClassDefineReservedUnused(IOUserClient
, 11);
5421 OSMetaClassDefineReservedUnused(IOUserClient
, 12);
5422 OSMetaClassDefineReservedUnused(IOUserClient
, 13);
5423 OSMetaClassDefineReservedUnused(IOUserClient
, 14);
5424 OSMetaClassDefineReservedUnused(IOUserClient
, 15);