X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/55e303ae13a4cf49d70f2294092726f2fffb9ef2..b7266188b87f3620ec3f9f717e57194a7dd989fe:/iokit/Kernel/IOUserClient.cpp diff --git a/iokit/Kernel/IOUserClient.cpp b/iokit/Kernel/IOUserClient.cpp index 9739846e0..7f2c78d13 100644 --- a/iokit/Kernel/IOUserClient.cpp +++ b/iokit/Kernel/IOUserClient.cpp @@ -1,16 +1,19 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2008 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,22 +23,39 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ +#include #include +#include #include #include -#include #include #include #include +#include #include +#include +#include #include #include "IOServicePrivate.h" +#include "IOKitKernelInternal.h" + +#define SCALAR64(x) ((io_user_scalar_t)((unsigned int)x)) +#define SCALAR32(x) ((uint32_t )x) +#define ARG32(x) ((void *)SCALAR32(x)) +#define REF64(x) ((io_user_reference_t)((UInt64)(x))) +#define REF32(x) ((int)(x)) + +enum +{ + kIOUCAsync0Flags = 3ULL, + kIOUCAsync64Flag = 1ULL +}; /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ @@ -58,6 +78,8 @@ extern kern_return_t iokit_destroy_object_port( ipc_port_t port ); extern mach_port_name_t iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ); +extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ); + extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task); extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef); @@ -66,9 +88,11 @@ extern ipc_port_t master_device_port; extern void iokit_retain_port( ipc_port_t port ); extern void iokit_release_port( ipc_port_t port ); +extern void iokit_release_port_send( ipc_port_t port ); extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ); +#include #include } /* extern "C" */ @@ -190,7 +214,7 @@ bool IOMachPort::noMoreSendersForObject( OSObject * obj, machPort = (IOMachPort *) dict->getObject( (const OSSymbol *) obj ); if( machPort) { - destroyed = (machPort->mscount == *mscount); + destroyed = (machPort->mscount <= *mscount); if( destroyed) dict->removeObject( (const OSSymbol *) obj ); else @@ -383,12 +407,12 @@ class IOServiceUserNotification : public IOUserNotification struct PingMsg { mach_msg_header_t msgHdr; - OSNotificationHeader notifyHeader; + OSNotificationHeader64 notifyHeader; }; - enum { kMaxOutstanding = 256 }; + enum { kMaxOutstanding = 1024 }; - PingMsg * pingMsg; + PingMsg * pingMsg; vm_size_t msgSize; OSArray * newSet; OSObject * lastEntry; @@ -397,11 +421,12 @@ class IOServiceUserNotification : public IOUserNotification public: virtual bool init( mach_port_t port, natural_t type, - OSAsyncReference reference ); + void * reference, vm_size_t referenceSize, + bool clientIs64 ); virtual void free(); static bool _handler( void * target, - void * ref, IOService * newService ); + void * ref, IOService * newService, IONotifier * notifier ); virtual bool handler( void * ref, IOService * newService ); virtual OSObject * getNextObject(); @@ -415,16 +440,21 @@ class IOServiceMessageUserNotification : public IOUserNotification mach_msg_header_t msgHdr; mach_msg_body_t msgBody; mach_msg_port_descriptor_t ports[1]; - OSNotificationHeader notifyHeader; + OSNotificationHeader64 notifyHeader __attribute__ ((packed)); }; PingMsg * pingMsg; vm_size_t msgSize; + uint8_t clientIs64; + int owningPID; public: virtual bool init( mach_port_t port, natural_t type, - OSAsyncReference reference, vm_size_t extraSize ); + void * reference, vm_size_t referenceSize, + vm_size_t extraSize, + bool clientIs64 ); + virtual void free(); static IOReturn _handler( void * target, void * ref, @@ -505,13 +535,17 @@ OSDefineMetaClassAndStructors(IOServiceUserNotification, IOUserNotification) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOServiceUserNotification::init( mach_port_t port, natural_t type, - OSAsyncReference reference ) + void * reference, vm_size_t referenceSize, + bool clientIs64 ) { newSet = OSArray::withCapacity( 1 ); if( !newSet) return( false ); - msgSize = sizeof( PingMsg) + 0; + if (referenceSize > sizeof(OSAsyncReference64)) + return( false ); + + msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize; pingMsg = (PingMsg *) IOMalloc( msgSize); if( !pingMsg) return( false ); @@ -527,7 +561,7 @@ bool IOServiceUserNotification::init( mach_port_t port, natural_t type, pingMsg->notifyHeader.size = 0; pingMsg->notifyHeader.type = type; - bcopy( reference, pingMsg->notifyHeader.reference, sizeof(OSAsyncReference) ); + bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); return( super::init() ); } @@ -557,7 +591,7 @@ void IOServiceUserNotification::free( void ) } bool IOServiceUserNotification::_handler( void * target, - void * ref, IOService * newService ) + void * ref, IOService * newService, IONotifier * notifier ) { return( ((IOServiceUserNotification *) target)->handler( ref, newService )); } @@ -591,13 +625,13 @@ bool IOServiceUserNotification::handler( void * ref, else pingMsg->msgHdr.msgh_local_port = NULL; - kr = mach_msg_send_from_kernel( &pingMsg->msgHdr, + kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr, pingMsg->msgHdr.msgh_size); if( port) iokit_release_port( port ); if( KERN_SUCCESS != kr) - IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); + IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); } return( true ); @@ -636,11 +670,19 @@ OSDefineMetaClassAndStructors(IOServiceMessageUserNotification, IOUserNotificati /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, - OSAsyncReference reference, vm_size_t extraSize ) + void * reference, vm_size_t referenceSize, vm_size_t extraSize, + bool client64 ) { - extraSize += sizeof(IOServiceInterestContent); - msgSize = sizeof( PingMsg) + extraSize; + if (referenceSize > sizeof(OSAsyncReference64)) + return( false ); + + clientIs64 = client64; + + owningPID = proc_selfpid(); + + extraSize += sizeof(IOServiceInterestContent64); + msgSize = sizeof(PingMsg) - sizeof(OSAsyncReference64) + referenceSize + extraSize; pingMsg = (PingMsg *) IOMalloc( msgSize); if( !pingMsg) return( false ); @@ -663,7 +705,7 @@ bool IOServiceMessageUserNotification::init( mach_port_t port, natural_t type, pingMsg->notifyHeader.size = extraSize; pingMsg->notifyHeader.type = type; - bcopy( reference, pingMsg->notifyHeader.reference, sizeof(OSAsyncReference) ); + bcopy( reference, pingMsg->notifyHeader.reference, referenceSize ); return( super::init() ); } @@ -694,22 +736,39 @@ IOReturn IOServiceMessageUserNotification::handler( void * ref, UInt32 messageType, IOService * provider, void * messageArgument, vm_size_t argSize ) { - kern_return_t kr; - ipc_port_t thisPort, providerPort; - IOServiceInterestContent * data = (IOServiceInterestContent *) - pingMsg->notifyHeader.content; + kern_return_t kr; + ipc_port_t thisPort, providerPort; + IOServiceInterestContent64 * data = (IOServiceInterestContent64 *) + ((((uint8_t *) pingMsg) + msgSize) - pingMsg->notifyHeader.size); + // == pingMsg->notifyHeader.content; + + if (kIOMessageCopyClientID == messageType) + { + *((void **) messageArgument) = IOCopyLogNameForPID(owningPID); + return (kIOReturnSuccess); + } data->messageType = messageType; - if( argSize == 0) { - argSize = sizeof( messageArgument); - data->messageArgument[0] = messageArgument; - } else { + + if( argSize == 0) + { + data->messageArgument[0] = (io_user_reference_t) messageArgument; + if (clientIs64) + argSize = sizeof(data->messageArgument[0]); + else + { + data->messageArgument[0] |= (data->messageArgument[0] << 32); + argSize = sizeof(uint32_t); + } + } + else + { if( argSize > kIOUserNotifyMaxMessageSize) argSize = kIOUserNotifyMaxMessageSize; bcopy( messageArgument, data->messageArgument, argSize ); } - pingMsg->msgHdr.msgh_size = sizeof( PingMsg) - + sizeof( IOServiceInterestContent ) + pingMsg->msgHdr.msgh_size = msgSize - pingMsg->notifyHeader.size + + sizeof( IOServiceInterestContent64 ) - sizeof( data->messageArgument) + argSize; @@ -717,7 +776,7 @@ IOReturn IOServiceMessageUserNotification::handler( void * ref, pingMsg->ports[0].name = providerPort; thisPort = iokit_port_for_object( this, IKOT_IOKIT_OBJECT ); pingMsg->msgHdr.msgh_local_port = thisPort; - kr = mach_msg_send_from_kernel( &pingMsg->msgHdr, + kr = mach_msg_send_from_kernel_proper( &pingMsg->msgHdr, pingMsg->msgHdr.msgh_size); if( thisPort) iokit_release_port( thisPort ); @@ -725,7 +784,7 @@ IOReturn IOServiceMessageUserNotification::handler( void * ref, iokit_release_port( providerPort ); if( KERN_SUCCESS != kr) - IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); + IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); return( kIOReturnSuccess ); } @@ -752,57 +811,115 @@ void IOUserClient::setAsyncReference(OSAsyncReference asyncRef, mach_port_t wakePort, void *callback, void *refcon) { - asyncRef[kIOAsyncReservedIndex] = (natural_t) wakePort; - asyncRef[kIOAsyncCalloutFuncIndex] = (natural_t) callback; - asyncRef[kIOAsyncCalloutRefconIndex] = (natural_t) refcon; + asyncRef[kIOAsyncReservedIndex] = ((uintptr_t) wakePort) + | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); + asyncRef[kIOAsyncCalloutFuncIndex] = (uintptr_t) callback; + asyncRef[kIOAsyncCalloutRefconIndex] = (uintptr_t) refcon; } -IOReturn IOUserClient::clientHasPrivilege( void * securityToken, - const char * privilegeName ) +void IOUserClient::setAsyncReference64(OSAsyncReference64 asyncRef, + mach_port_t wakePort, + mach_vm_address_t callback, io_user_reference_t refcon) { - kern_return_t kr; - security_token_t token; - mach_msg_type_number_t count; - - count = TASK_SECURITY_TOKEN_COUNT; - kr = task_info( (task_t) securityToken, TASK_SECURITY_TOKEN, - (task_info_t) &token, &count ); + asyncRef[kIOAsyncReservedIndex] = ((io_user_reference_t) wakePort) + | (kIOUCAsync0Flags & asyncRef[kIOAsyncReservedIndex]); + asyncRef[kIOAsyncCalloutFuncIndex] = (io_user_reference_t) callback; + asyncRef[kIOAsyncCalloutRefconIndex] = refcon; +} - if (KERN_SUCCESS != kr) - {} - else if (!strcmp(privilegeName, kIOClientPrivilegeAdministrator)) - { - if (0 != token.val[0]) - kr = kIOReturnNotPrivileged; - } - else if (!strcmp(privilegeName, kIOClientPrivilegeLocalUser)) - { - OSArray * array; - OSDictionary * user = 0; +static OSDictionary * CopyConsoleUser(UInt32 uid) +{ + OSArray * array; + OSDictionary * user = 0; if ((array = OSDynamicCast(OSArray, IORegistryEntry::getRegistryRoot()->copyProperty(gIOConsoleUsersKey)))) { for (unsigned int idx = 0; (user = OSDynamicCast(OSDictionary, array->getObject(idx))); - idx++) - { - OSNumber * num; - if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) - && (token.val[0] == num->unsigned32BitValue())) - break; + idx++) { + OSNumber * num; + + if ((num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionUIDKey))) + && (uid == num->unsigned32BitValue())) { + user->retain(); + break; + } } array->release(); } - if (!user) - kr = kIOReturnNotPrivileged; - } + return user; +} + +IOReturn IOUserClient::clientHasPrivilege( void * securityToken, + const char * privilegeName ) +{ + kern_return_t kr; + security_token_t token; + mach_msg_type_number_t count; + task_t task; + OSDictionary * user; + bool secureConsole; + + if ((secureConsole = !strncmp(privilegeName, kIOClientPrivilegeSecureConsoleProcess, + sizeof(kIOClientPrivilegeSecureConsoleProcess)))) + task = (task_t)((IOUCProcessToken *)securityToken)->token; else - kr = kIOReturnUnsupported; + task = (task_t)securityToken; + + count = TASK_SECURITY_TOKEN_COUNT; + kr = task_info( task, TASK_SECURITY_TOKEN, (task_info_t) &token, &count ); + + if (KERN_SUCCESS != kr) + {} + else if (!strncmp(privilegeName, kIOClientPrivilegeAdministrator, + sizeof(kIOClientPrivilegeAdministrator))) { + if (0 != token.val[0]) + kr = kIOReturnNotPrivileged; + } else if (!strncmp(privilegeName, kIOClientPrivilegeLocalUser, + sizeof(kIOClientPrivilegeLocalUser))) { + user = CopyConsoleUser(token.val[0]); + if ( user ) + user->release(); + else + kr = kIOReturnNotPrivileged; + } else if (secureConsole || !strncmp(privilegeName, kIOClientPrivilegeConsoleUser, + sizeof(kIOClientPrivilegeConsoleUser))) { + user = CopyConsoleUser(token.val[0]); + if ( user ) { + if (user->getObject(gIOConsoleSessionOnConsoleKey) != kOSBooleanTrue) + kr = kIOReturnNotPrivileged; + else if ( secureConsole ) { + OSNumber * pid = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionSecureInputPIDKey)); + if ( pid && pid->unsigned32BitValue() != ((IOUCProcessToken *)securityToken)->pid) + kr = kIOReturnNotPrivileged; + } + user->release(); + } + else + kr = kIOReturnNotPrivileged; + } else + kr = kIOReturnUnsupported; return (kr); } +bool IOUserClient::init() +{ + if( getPropertyTable()) + return true; + else + return super::init(); +} + +bool IOUserClient::init(OSDictionary * dictionary) +{ + if( getPropertyTable()) + return true; + else + return super::init(dictionary); +} + bool IOUserClient::initWithTask(task_t owningTask, void * securityID, UInt32 type ) @@ -857,6 +974,14 @@ IOReturn IOUserClient::registerNotificationPort( return( kIOReturnUnsupported); } +IOReturn IOUserClient::registerNotificationPort( + mach_port_t port, + UInt32 type, + io_user_reference_t refCon) +{ + return (registerNotificationPort(port, type, (UInt32) refCon)); +} + IOReturn IOUserClient::getNotificationSemaphore( UInt32 notification_type, semaphore_t * semaphore ) { @@ -875,11 +1000,22 @@ IOReturn IOUserClient::clientMemoryForType( UInt32 type, return( kIOReturnUnsupported); } +#if !__LP64__ IOMemoryMap * IOUserClient::mapClientMemory( IOOptionBits type, task_t task, IOOptionBits mapFlags, IOVirtualAddress atAddress ) +{ + return (NULL); +} +#endif + +IOMemoryMap * IOUserClient::mapClientMemory64( + IOOptionBits type, + task_t task, + IOOptionBits mapFlags, + mach_vm_address_t atAddress ) { IOReturn err; IOOptionBits options = 0; @@ -892,7 +1028,7 @@ IOMemoryMap * IOUserClient::mapClientMemory( options = (options & ~kIOMapUserOptionsMask) | (mapFlags & kIOMapUserOptionsMask); - map = memory->map( task, atAddress, options ); + map = memory->createMappingInTask( task, atAddress, options ); memory->release(); } @@ -961,49 +1097,126 @@ getTargetAndTrapForIndex(IOService ** targetP, UInt32 index) return trap; } +IOReturn IOUserClient::releaseAsyncReference64(OSAsyncReference64 reference) +{ + mach_port_t port; + port = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); + + if (MACH_PORT_NULL != port) + iokit_release_port_send(port); + + return (kIOReturnSuccess); +} + +IOReturn IOUserClient::releaseNotificationPort(mach_port_t port) +{ + if (MACH_PORT_NULL != port) + iokit_release_port_send(port); + + return (kIOReturnSuccess); +} + IOReturn IOUserClient::sendAsyncResult(OSAsyncReference reference, IOReturn result, void *args[], UInt32 numArgs) { - struct ReplyMsg { - mach_msg_header_t msgHdr; - OSNotificationHeader notifyHdr; - IOAsyncCompletionContent asyncContent; - void * args[kMaxAsyncArgs]; + OSAsyncReference64 reference64; + io_user_reference_t args64[kMaxAsyncArgs]; + unsigned int idx; + + if (numArgs > kMaxAsyncArgs) + return kIOReturnMessageTooLarge; + + for (idx = 0; idx < kOSAsyncRef64Count; idx++) + reference64[idx] = REF64(reference[idx]); + + for (idx = 0; idx < numArgs; idx++) + args64[idx] = REF64(args[idx]); + + return (sendAsyncResult64(reference64, result, args64, numArgs)); +} + +IOReturn IOUserClient::sendAsyncResult64(OSAsyncReference64 reference, + IOReturn result, io_user_reference_t args[], UInt32 numArgs) +{ + struct ReplyMsg + { + mach_msg_header_t msgHdr; + union + { + struct + { + OSNotificationHeader notifyHdr; + IOAsyncCompletionContent asyncContent; + uint32_t args[kMaxAsyncArgs]; + } msg32; + struct + { + OSNotificationHeader64 notifyHdr; + IOAsyncCompletionContent asyncContent; + io_user_reference_t args[kMaxAsyncArgs] __attribute__ ((packed)); + } msg64; + } m; }; - ReplyMsg replyMsg; - mach_port_t replyPort; + ReplyMsg replyMsg; + mach_port_t replyPort; kern_return_t kr; // If no reply port, do nothing. - replyPort = (mach_port_t) reference[0]; - if(replyPort == MACH_PORT_NULL) + replyPort = (mach_port_t) (reference[0] & ~kIOUCAsync0Flags); + if (replyPort == MACH_PORT_NULL) return kIOReturnSuccess; - if(numArgs > kMaxAsyncArgs) + if (numArgs > kMaxAsyncArgs) return kIOReturnMessageTooLarge; + replyMsg.msgHdr.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND /*remote*/, - 0 /*local*/); - replyMsg.msgHdr.msgh_size = - sizeof(replyMsg) - (kMaxAsyncArgs-numArgs)*sizeof(void *); + 0 /*local*/); replyMsg.msgHdr.msgh_remote_port = replyPort; - replyMsg.msgHdr.msgh_local_port = 0; - replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; - - replyMsg.notifyHdr.size = sizeof(IOAsyncCompletionContent) - + numArgs*sizeof(void *); - replyMsg.notifyHdr.type = kIOAsyncCompletionNotificationType; - bcopy( reference, replyMsg.notifyHdr.reference, sizeof(OSAsyncReference)); - - replyMsg.asyncContent.result = result; - if(numArgs > 0) - bcopy(args, replyMsg.args, sizeof(void *)*numArgs); - kr = mach_msg_send_from_kernel( &replyMsg.msgHdr, + replyMsg.msgHdr.msgh_local_port = 0; + replyMsg.msgHdr.msgh_id = kOSNotificationMessageID; + if (kIOUCAsync64Flag & reference[0]) + { + replyMsg.msgHdr.msgh_size = + sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg64) + - (kMaxAsyncArgs - numArgs) * sizeof(io_user_reference_t); + replyMsg.m.msg64.notifyHdr.size = sizeof(IOAsyncCompletionContent) + + numArgs * sizeof(io_user_reference_t); + replyMsg.m.msg64.notifyHdr.type = kIOAsyncCompletionNotificationType; + bcopy(reference, replyMsg.m.msg64.notifyHdr.reference, sizeof(OSAsyncReference64)); + + replyMsg.m.msg64.asyncContent.result = result; + if (numArgs) + bcopy(args, replyMsg.m.msg64.args, numArgs * sizeof(io_user_reference_t)); + } + else + { + unsigned int idx; + + replyMsg.msgHdr.msgh_size = + sizeof(replyMsg.msgHdr) + sizeof(replyMsg.m.msg32) + - (kMaxAsyncArgs - numArgs) * sizeof(uint32_t); + + replyMsg.m.msg32.notifyHdr.size = sizeof(IOAsyncCompletionContent) + + numArgs * sizeof(uint32_t); + replyMsg.m.msg32.notifyHdr.type = kIOAsyncCompletionNotificationType; + + for (idx = 0; idx < kOSAsyncRefCount; idx++) + replyMsg.m.msg32.notifyHdr.reference[idx] = REF32(reference[idx]); + + replyMsg.m.msg32.asyncContent.result = result; + + for (idx = 0; idx < numArgs; idx++) + replyMsg.m.msg32.args[idx] = REF32(args[idx]); + } + + kr = mach_msg_send_from_kernel_proper( &replyMsg.msgHdr, replyMsg.msgHdr.msgh_size); if( KERN_SUCCESS != kr) - IOLog("%s: mach_msg_send_from_kernel {%x}\n", __FILE__, kr ); + IOLog("%s: mach_msg_send_from_kernel_proper {%x}\n", __FILE__, kr ); return kr; } + /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ extern "C" { @@ -1018,13 +1231,100 @@ kern_return_t is_io_object_get_class( io_object_t object, io_name_t className ) { - if( !object) - return( kIOReturnBadArgument ); - - strcpy( className, object->getMetaClass()->getClassName()); + const OSMetaClass* my_obj = NULL; + + if( !object) + return( kIOReturnBadArgument ); + + my_obj = object->getMetaClass(); + if (!my_obj) { + return (kIOReturnNotFound); + } + + strlcpy( className, my_obj->getClassName(), sizeof(io_name_t)); return( kIOReturnSuccess ); } +/* Routine io_object_get_superclass */ +kern_return_t is_io_object_get_superclass( + mach_port_t master_port, + io_name_t obj_name, + io_name_t class_name) +{ + const OSMetaClass* my_obj = NULL; + const OSMetaClass* superclass = NULL; + const OSSymbol *my_name = NULL; + const char *my_cstr = NULL; + + if (!obj_name || !class_name) + return (kIOReturnBadArgument); + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + my_name = OSSymbol::withCString(obj_name); + + if (my_name) { + my_obj = OSMetaClass::getMetaClassWithName(my_name); + my_name->release(); + } + if (my_obj) { + superclass = my_obj->getSuperClass(); + } + + if (!superclass) { + return( kIOReturnNotFound ); + } + + my_cstr = superclass->getClassName(); + + if (my_cstr) { + strlcpy(class_name, my_cstr, sizeof(io_name_t)); + return( kIOReturnSuccess ); + } + return (kIOReturnNotFound); +} + +/* Routine io_object_get_bundle_identifier */ +kern_return_t is_io_object_get_bundle_identifier( + mach_port_t master_port, + io_name_t obj_name, + io_name_t bundle_name) +{ + const OSMetaClass* my_obj = NULL; + const OSSymbol *my_name = NULL; + const OSSymbol *identifier = NULL; + const char *my_cstr = NULL; + + if (!obj_name || !bundle_name) + return (kIOReturnBadArgument); + + if( master_port != master_device_port) + return( kIOReturnNotPrivileged); + + my_name = OSSymbol::withCString(obj_name); + + if (my_name) { + my_obj = OSMetaClass::getMetaClassWithName(my_name); + my_name->release(); + } + + if (my_obj) { + identifier = my_obj->getKmodName(); + } + if (!identifier) { + return( kIOReturnNotFound ); + } + + my_cstr = identifier->getCStringNoCopy(); + if (my_cstr) { + strlcpy(bundle_name, identifier->getCStringNoCopy(), sizeof(io_name_t)); + return( kIOReturnSuccess ); + } + + return (kIOReturnBadArgument); +} + /* Routine io_object_conforms_to */ kern_return_t is_io_object_conforms_to( io_object_t object, @@ -1041,7 +1341,7 @@ kern_return_t is_io_object_conforms_to( /* Routine io_object_get_retain_count */ kern_return_t is_io_object_get_retain_count( io_object_t object, - int *retainCount ) + uint32_t *retainCount ) { if( !object) return( kIOReturnBadArgument ); @@ -1122,13 +1422,15 @@ kern_return_t is_io_service_match_property_table_ool( io_object_t service, io_buf_ptr_t matching, mach_msg_type_number_t matchingCnt, - natural_t *result, + kern_return_t *result, boolean_t *matches ) { - kern_return_t kr; - vm_offset_t data; + kern_return_t kr; + vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) matching ); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); if( KERN_SUCCESS == kr) { // must return success after vm_map_copyout() succeeds @@ -1172,13 +1474,15 @@ kern_return_t is_io_service_get_matching_services_ool( mach_port_t master_port, io_buf_ptr_t matching, mach_msg_type_number_t matchingCnt, - natural_t *result, + kern_return_t *result, io_object_t *existing ) { kern_return_t kr; vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) matching ); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); if( KERN_SUCCESS == kr) { // must return success after vm_map_copyout() succeeds @@ -1190,14 +1494,14 @@ kern_return_t is_io_service_get_matching_services_ool( return( kr ); } -/* Routine io_service_add_notification */ -kern_return_t is_io_service_add_notification( +static kern_return_t internal_io_service_add_notification( mach_port_t master_port, io_name_t notification_type, io_string_t matching, mach_port_t port, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, + void * reference, + vm_size_t referenceSize, + bool client64, io_object_t * notification ) { IOServiceUserNotification * userNotify = 0; @@ -1237,17 +1541,16 @@ kern_return_t is_io_service_add_notification( userNotify = new IOServiceUserNotification; if( userNotify && !userNotify->init( port, userMsgType, - reference)) { + reference, referenceSize, client64)) { userNotify->release(); userNotify = 0; } if( !userNotify) continue; - notify = IOService::addNotification( sym, dict, + notify = IOService::addMatchingNotification( sym, dict, &userNotify->_handler, userNotify ); if( notify) { - dict = 0; *notification = userNotify; userNotify->setNotification( notify ); err = kIOReturnSuccess; @@ -1264,33 +1567,100 @@ kern_return_t is_io_service_add_notification( return( err ); } -/* Routine io_service_add_notification_ool */ -kern_return_t is_io_service_add_notification_ool( + +/* Routine io_service_add_notification */ +kern_return_t is_io_service_add_notification( + mach_port_t master_port, + io_name_t notification_type, + io_string_t matching, + mach_port_t port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + io_object_t * notification ) +{ + return (internal_io_service_add_notification(master_port, notification_type, + matching, port, &reference[0], sizeof(io_async_ref_t), + false, notification)); +} + +/* Routine io_service_add_notification_64 */ +kern_return_t is_io_service_add_notification_64( + mach_port_t master_port, + io_name_t notification_type, + io_string_t matching, + mach_port_t wake_port, + io_async_ref64_t reference, + mach_msg_type_number_t referenceCnt, + io_object_t *notification ) +{ + return (internal_io_service_add_notification(master_port, notification_type, + matching, wake_port, &reference[0], sizeof(io_async_ref64_t), + true, notification)); +} + + +static kern_return_t internal_io_service_add_notification_ool( mach_port_t master_port, io_name_t notification_type, io_buf_ptr_t matching, mach_msg_type_number_t matchingCnt, mach_port_t wake_port, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, - natural_t *result, + void * reference, + vm_size_t referenceSize, + bool client64, + kern_return_t *result, io_object_t *notification ) { kern_return_t kr; vm_offset_t data; + vm_map_offset_t map_data; - kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) matching ); + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching ); + data = CAST_DOWN(vm_offset_t, map_data); if( KERN_SUCCESS == kr) { // must return success after vm_map_copyout() succeeds - *result = is_io_service_add_notification( master_port, notification_type, - (char *) data, wake_port, reference, referenceCnt, notification ); + *result = internal_io_service_add_notification( master_port, notification_type, + (char *) data, wake_port, reference, referenceSize, client64, notification ); vm_deallocate( kernel_map, data, matchingCnt ); } return( kr ); } +/* Routine io_service_add_notification_ool */ +kern_return_t is_io_service_add_notification_ool( + mach_port_t master_port, + io_name_t notification_type, + io_buf_ptr_t matching, + mach_msg_type_number_t matchingCnt, + mach_port_t wake_port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + kern_return_t *result, + io_object_t *notification ) +{ + return (internal_io_service_add_notification_ool(master_port, notification_type, + matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref_t), + false, result, notification)); +} + +/* Routine io_service_add_notification_ool_64 */ +kern_return_t is_io_service_add_notification_ool_64( + mach_port_t master_port, + io_name_t notification_type, + io_buf_ptr_t matching, + mach_msg_type_number_t matchingCnt, + mach_port_t wake_port, + io_async_ref64_t reference, + mach_msg_type_number_t referenceCnt, + kern_return_t *result, + io_object_t *notification ) +{ + return (internal_io_service_add_notification_ool(master_port, notification_type, + matching, matchingCnt, wake_port, &reference[0], sizeof(io_async_ref64_t), + true, result, notification)); +} /* Routine io_service_add_notification_old */ kern_return_t is_io_service_add_notification_old( @@ -1298,6 +1668,7 @@ kern_return_t is_io_service_add_notification_old( io_name_t notification_type, io_string_t matching, mach_port_t port, + // for binary compatibility reasons, this must be natural_t for ILP32 natural_t ref, io_object_t * notification ) { @@ -1305,13 +1676,14 @@ kern_return_t is_io_service_add_notification_old( matching, port, &ref, 1, notification )); } -/* Routine io_service_add_message_notification */ -kern_return_t is_io_service_add_interest_notification( + +static kern_return_t internal_io_service_add_interest_notification( io_object_t _service, io_name_t type_of_interest, mach_port_t port, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, + void * reference, + vm_size_t referenceSize, + bool client64, io_object_t * notification ) { @@ -1328,7 +1700,9 @@ kern_return_t is_io_service_add_interest_notification( userNotify = new IOServiceMessageUserNotification; if( userNotify && !userNotify->init( port, kIOServiceMessageNotificationType, - reference, kIOUserNotifyMaxMessageSize )) { + reference, referenceSize, + kIOUserNotifyMaxMessageSize, + client64 )) { userNotify->release(); userNotify = 0; } @@ -1351,6 +1725,33 @@ kern_return_t is_io_service_add_interest_notification( return( err ); } +/* Routine io_service_add_message_notification */ +kern_return_t is_io_service_add_interest_notification( + io_object_t service, + io_name_t type_of_interest, + mach_port_t port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + io_object_t * notification ) +{ + return (internal_io_service_add_interest_notification(service, type_of_interest, + port, &reference[0], sizeof(io_async_ref_t), false, notification)); +} + +/* Routine io_service_add_interest_notification_64 */ +kern_return_t is_io_service_add_interest_notification_64( + io_object_t service, + io_name_t type_of_interest, + mach_port_t wake_port, + io_async_ref64_t reference, + mach_msg_type_number_t referenceCnt, + io_object_t *notification ) +{ + return (internal_io_service_add_interest_notification(service, type_of_interest, + wake_port, &reference[0], sizeof(io_async_ref64_t), true, notification)); +} + + /* Routine io_service_acknowledge_notification */ kern_return_t is_io_service_acknowledge_notification( io_object_t _service, @@ -1398,7 +1799,7 @@ kern_return_t is_io_registry_get_root_entry( kern_return_t is_io_registry_create_iterator( mach_port_t master_port, io_name_t plane, - int options, + uint32_t options, io_object_t *iterator ) { if( master_port != master_device_port) @@ -1414,7 +1815,7 @@ kern_return_t is_io_registry_create_iterator( kern_return_t is_io_registry_entry_create_iterator( io_object_t registry_entry, io_name_t plane, - int options, + uint32_t options, io_object_t *iterator ) { CHECK( IORegistryEntry, registry_entry, entry ); @@ -1552,6 +1953,18 @@ kern_return_t is_io_registry_entry_get_location_in_plane( return( kIOReturnNotFound ); } +/* Routine io_registry_entry_get_registry_entry_id */ +kern_return_t is_io_registry_entry_get_registry_entry_id( + io_object_t registry_entry, + uint64_t *entry_id ) +{ + CHECK( IORegistryEntry, registry_entry, entry ); + + *entry_id = entry->getRegistryEntryID(); + + return (kIOReturnSuccess); +} + // Create a vm_map_copy_t or kalloc'ed data for memory // to be copied out. ipc will free after the copyout. @@ -1561,7 +1974,7 @@ static kern_return_t copyoutkdata( void * data, vm_size_t len, kern_return_t err; vm_map_copy_t copy; - err = vm_map_copyin( kernel_map, (vm_offset_t) data, len, + err = vm_map_copyin( kernel_map, CAST_USER_ADDR_T(data), len, false /* src_destroy */, ©); assert( err == KERN_SUCCESS ); @@ -1575,7 +1988,7 @@ static kern_return_t copyoutkdata( void * data, vm_size_t len, kern_return_t is_io_registry_entry_get_property_bytes( io_object_t registry_entry, io_name_t property_name, - io_scalar_inband_t buf, + io_struct_inband_t buf, mach_msg_type_number_t *dataCnt ) { OSObject * obj; @@ -1633,6 +2046,7 @@ kern_return_t is_io_registry_entry_get_property_bytes( return( ret ); } + /* Routine io_registry_entry_get_property */ kern_return_t is_io_registry_entry_get_property( io_object_t registry_entry, @@ -1676,7 +2090,7 @@ kern_return_t is_io_registry_entry_get_property_recursively( io_object_t registry_entry, io_name_t plane, io_name_t property_name, - int options, + uint32_t options, io_buf_ptr_t *properties, mach_msg_type_number_t *propertiesCnt ) { @@ -1749,16 +2163,18 @@ kern_return_t is_io_registry_entry_set_properties io_object_t registry_entry, io_buf_ptr_t properties, mach_msg_type_number_t propertiesCnt, - natural_t * result) + kern_return_t * result) { OSObject * obj; kern_return_t err; IOReturn res; vm_offset_t data; + vm_map_offset_t map_data; CHECK( IORegistryEntry, registry_entry, entry ); - err = vm_map_copyout( kernel_map, &data, (vm_map_copy_t) properties ); + err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); + data = CAST_DOWN(vm_offset_t, map_data); if( KERN_SUCCESS == err) { @@ -1809,7 +2225,7 @@ kern_return_t is_io_registry_entry_get_parent_iterator( /* Routine io_service_get_busy_state */ kern_return_t is_io_service_get_busy_state( io_object_t _service, - int *busyState ) + uint32_t *busyState ) { CHECK( IOService, _service, service ); @@ -1821,11 +2237,15 @@ kern_return_t is_io_service_get_busy_state( /* Routine io_service_get_state */ kern_return_t is_io_service_get_state( io_object_t _service, - uint64_t *state ) + uint64_t *state, + uint32_t *busy_state, + uint64_t *accumulated_busy_time ) { CHECK( IOService, _service, service ); - *state = service->getState(); + *state = service->getState(); + *busy_state = service->getBusyState(); + *accumulated_busy_time = service->getAccumulatedBusyTime(); return( kIOReturnSuccess ); } @@ -1835,15 +2255,21 @@ kern_return_t is_io_service_wait_quiet( io_object_t _service, mach_timespec_t wait_time ) { + uint64_t timeoutNS; + CHECK( IOService, _service, service ); - return( service->waitQuiet( &wait_time )); + timeoutNS = wait_time.tv_sec; + timeoutNS *= kSecondScale; + timeoutNS += wait_time.tv_nsec; + + return( service->waitQuiet(timeoutNS) ); } /* Routine io_service_request_probe */ kern_return_t is_io_service_request_probe( io_object_t _service, - int options ) + uint32_t options ) { CHECK( IOService, _service, service ); @@ -1855,7 +2281,7 @@ kern_return_t is_io_service_request_probe( kern_return_t is_io_service_open( io_object_t _service, task_t owningTask, - int connect_type, + uint32_t connect_type, io_object_t *connection ) { IOUserClient * client; @@ -1864,7 +2290,7 @@ kern_return_t is_io_service_open( CHECK( IOService, _service, service ); err = service->newUserClient( owningTask, (void *) owningTask, - connect_type, &client ); + connect_type, 0, &client ); if( err == kIOReturnSuccess) { assert( OSDynamicCast(IOUserClient, client) ); @@ -1874,19 +2300,120 @@ kern_return_t is_io_service_open( return( err); } -/* Routine io_service_close */ -kern_return_t is_io_service_close( - io_object_t connection ) +/* Routine io_service_open_ndr */ +kern_return_t is_io_service_open_extended( + io_object_t _service, + task_t owningTask, + uint32_t connect_type, + NDR_record_t ndr, + io_buf_ptr_t properties, + mach_msg_type_number_t propertiesCnt, + kern_return_t * result, + io_object_t *connection ) { - OSSet * mappings; - if ((mappings = OSDynamicCast(OSSet, connection))) - return( kIOReturnSuccess ); + IOUserClient * client = 0; + kern_return_t err = KERN_SUCCESS; + IOReturn res = kIOReturnSuccess; + OSDictionary * propertiesDict = 0; + bool crossEndian; + bool disallowAccess; - CHECK( IOUserClient, connection, client ); + CHECK( IOService, _service, service ); - client->clientClose(); + do + { + if (properties) + { + OSObject * obj; + vm_offset_t data; + vm_map_offset_t map_data; + + err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties ); + res = err; + data = CAST_DOWN(vm_offset_t, map_data); + if (KERN_SUCCESS == err) + { + // must return success after vm_map_copyout() succeeds + obj = OSUnserializeXML( (const char *) data ); + vm_deallocate( kernel_map, data, propertiesCnt ); + propertiesDict = OSDynamicCast(OSDictionary, obj); + if (!propertiesDict) + { + res = kIOReturnBadArgument; + if (obj) + obj->release(); + } + } + if (kIOReturnSuccess != res) + break; + } - return( kIOReturnSuccess ); + crossEndian = (ndr.int_rep != NDR_record.int_rep); + if (crossEndian) + { + if (!propertiesDict) + propertiesDict = OSDictionary::withCapacity(4); + OSData * data = OSData::withBytes(&ndr, sizeof(ndr)); + if (data) + { + if (propertiesDict) + propertiesDict->setObject(kIOUserClientCrossEndianKey, data); + data->release(); + } + } + + res = service->newUserClient( owningTask, (void *) owningTask, + connect_type, propertiesDict, &client ); + + if (propertiesDict) + propertiesDict->release(); + + if (res == kIOReturnSuccess) + { + assert( OSDynamicCast(IOUserClient, client) ); + + disallowAccess = (crossEndian + && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey)) + && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey))); + + if (disallowAccess) + { + client->clientClose(); + client->release(); + client = 0; + res = kIOReturnUnsupported; + break; + } + client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey)); + OSString * creatorName = IOCopyLogNameForPID(proc_selfpid()); + if (creatorName) + { + client->setProperty(kIOUserClientCreatorKey, creatorName); + creatorName->release(); + } + } + } + while (false); + + *connection = client; + *result = res; + + return (err); +} + +/* Routine io_service_close */ +kern_return_t is_io_service_close( + io_object_t connection ) +{ + OSSet * mappings; + if ((mappings = OSDynamicCast(OSSet, connection))) + return( kIOReturnSuccess ); + + CHECK( IOUserClient, connection, client ); + + client->clientClose(); + + return( kIOReturnSuccess ); } /* Routine io_connect_get_service */ @@ -1910,9 +2437,22 @@ kern_return_t is_io_connect_get_service( /* Routine io_connect_set_notification_port */ kern_return_t is_io_connect_set_notification_port( io_object_t connection, - int notification_type, + uint32_t notification_type, + mach_port_t port, + uint32_t reference) +{ + CHECK( IOUserClient, connection, client ); + + return( client->registerNotificationPort( port, notification_type, + (io_user_reference_t) reference )); +} + +/* Routine io_connect_set_notification_port */ +kern_return_t is_io_connect_set_notification_port_64( + io_object_t connection, + uint32_t notification_type, mach_port_t port, - int reference) + io_user_reference_t reference) { CHECK( IOUserClient, connection, client ); @@ -1920,34 +2460,36 @@ kern_return_t is_io_connect_set_notification_port( reference )); } -kern_return_t is_io_connect_map_memory( - io_object_t connect, - int type, - task_t task, - vm_address_t * mapAddr, - vm_size_t * mapSize, - int flags ) +/* Routine io_connect_map_memory_into_task */ +kern_return_t is_io_connect_map_memory_into_task +( + io_connect_t connection, + uint32_t memory_type, + task_t into_task, + mach_vm_address_t *address, + mach_vm_size_t *size, + uint32_t flags +) { IOReturn err; IOMemoryMap * map; - CHECK( IOUserClient, connect, client ); + CHECK( IOUserClient, connection, client ); - map = client->mapClientMemory( type, task, flags, *mapAddr ); + map = client->mapClientMemory64( memory_type, into_task, flags, *address ); if( map) { - *mapAddr = map->getVirtualAddress(); - if( mapSize) - *mapSize = map->getLength(); + *address = map->getAddress(); + if( size) + *size = map->getSize(); - if( task != current_task()) { + if( client->sharedInstance + || (into_task != current_task())) { // push a name out to the task owning the map, // so we can clean up maps -#if IOASSERT - mach_port_name_t name = -#endif - IOMachPort::makeSendRightForTask( - task, map, IKOT_IOKIT_OBJECT ); + mach_port_name_t name __unused = + IOMachPort::makeSendRightForTask( + into_task, map, IKOT_IOKIT_OBJECT ); assert( name ); } else { @@ -1968,42 +2510,125 @@ kern_return_t is_io_connect_map_memory( return( err ); } -kern_return_t is_io_connect_unmap_memory( +/* Routine is_io_connect_map_memory */ +kern_return_t is_io_connect_map_memory( io_object_t connect, - int type, + uint32_t type, task_t task, - vm_address_t mapAddr ) + vm_address_t * mapAddr, + vm_size_t * mapSize, + uint32_t flags ) +{ + IOReturn err; + mach_vm_address_t address; + mach_vm_size_t size; + + address = SCALAR64(*mapAddr); + size = SCALAR64(*mapSize); + + err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags); + + *mapAddr = SCALAR32(address); + *mapSize = SCALAR32(size); + + return (err); +} + +IOMemoryMap * IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem) +{ + OSIterator * iter; + IOMemoryMap * map = 0; + + IOLockLock(gIOObjectPortLock); + + iter = OSCollectionIterator::withCollection(mappings); + if(iter) + { + while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) + { + if(mem == map->getMemoryDescriptor()) + { + map->retain(); + mappings->removeObject(map); + break; + } + } + iter->release(); + } + + IOLockUnlock(gIOObjectPortLock); + + return (map); +} + +/* Routine io_connect_unmap_memory_from_task */ +kern_return_t is_io_connect_unmap_memory_from_task +( + io_connect_t connection, + uint32_t memory_type, + task_t from_task, + mach_vm_address_t address) { IOReturn err; IOOptionBits options = 0; IOMemoryDescriptor * memory; IOMemoryMap * map; - CHECK( IOUserClient, connect, client ); + CHECK( IOUserClient, connection, client ); - err = client->clientMemoryForType( (UInt32) type, &options, &memory ); + err = client->clientMemoryForType( (UInt32) memory_type, &options, &memory ); if( memory && (kIOReturnSuccess == err)) { options = (options & ~kIOMapUserOptionsMask) | kIOMapAnywhere | kIOMapReference; - map = memory->map( task, mapAddr, options ); + map = memory->createMappingInTask( from_task, address, options ); memory->release(); - if( map) { + if( map) + { IOLockLock( gIOObjectPortLock); if( client->mappings) client->mappings->removeObject( map); IOLockUnlock( gIOObjectPortLock); - IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); - map->release(); - } else + + mach_port_name_t name = 0; + if (from_task != current_task()) + name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT ); + if (name) + { + map->userClientUnmap(); + err = iokit_mod_send_right( from_task, name, -2 ); + err = kIOReturnSuccess; + } + else + IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT ); + if (from_task == current_task()) + map->release(); + } + else err = kIOReturnBadArgument; } return( err ); } +kern_return_t is_io_connect_unmap_memory( + io_object_t connect, + uint32_t type, + task_t task, + vm_address_t mapAddr ) +{ + IOReturn err; + mach_vm_address_t address; + + address = SCALAR64(mapAddr); + + err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr); + + return (err); +} + /* Routine io_connect_add_client */ kern_return_t is_io_connect_add_client( @@ -2022,417 +2647,671 @@ kern_return_t is_io_connect_set_properties( io_object_t connection, io_buf_ptr_t properties, mach_msg_type_number_t propertiesCnt, - natural_t * result) + kern_return_t * result) { return( is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result )); } -/* Routine io_connect_method_scalarI_scalarO */ -kern_return_t is_io_connect_method_scalarI_scalarO( - io_object_t connect, - UInt32 index, - void * input[], - IOByteCount inputCount, - void * output[], - IOByteCount * outputCount ) +/* Routine io_user_client_method */ +kern_return_t is_io_connect_method +( + io_connect_t connection, + uint32_t selector, + io_scalar_inband64_t scalar_input, + mach_msg_type_number_t scalar_inputCnt, + io_struct_inband_t inband_input, + mach_msg_type_number_t inband_inputCnt, + mach_vm_address_t ool_input, + mach_vm_size_t ool_input_size, + io_scalar_inband64_t scalar_output, + mach_msg_type_number_t *scalar_outputCnt, + io_struct_inband_t inband_output, + mach_msg_type_number_t *inband_outputCnt, + mach_vm_address_t ool_output, + mach_vm_size_t * ool_output_size +) { - IOReturn err; - IOExternalMethod * method; - IOService * object; - IOMethod func; + CHECK( IOUserClient, connection, client ); - CHECK( IOUserClient, connect, client); - if( (method = client->getTargetAndMethodForIndex(&object, index))) { - do { - err = kIOReturnBadArgument; - if( kIOUCScalarIScalarO != (method->flags & kIOUCTypeMask)) - continue; - if( inputCount != method->count0) - continue; - if( *outputCount != method->count1) - continue; + IOExternalMethodArguments args; + IOReturn ret; + IOMemoryDescriptor * inputMD = 0; + IOMemoryDescriptor * outputMD = 0; - func = method->func; + bzero(&args.__reserved[0], sizeof(args.__reserved)); + args.version = kIOExternalMethodArgumentsCurrentVersion; - switch( inputCount) { + args.selector = selector; - case 6: - err = (object->*func)( input[0], input[1], input[2], - input[3], input[4], input[5] ); - break; - case 5: - err = (object->*func)( input[0], input[1], input[2], - input[3], input[4], - &output[0] ); - break; - case 4: - err = (object->*func)( input[0], input[1], input[2], - input[3], - &output[0], &output[1] ); - break; - case 3: - err = (object->*func)( input[0], input[1], input[2], - &output[0], &output[1], &output[2] ); - break; - case 2: - err = (object->*func)( input[0], input[1], - &output[0], &output[1], &output[2], - &output[3] ); - break; - case 1: - err = (object->*func)( input[0], - &output[0], &output[1], &output[2], - &output[3], &output[4] ); - break; - case 0: - err = (object->*func)( &output[0], &output[1], &output[2], - &output[3], &output[4], &output[5] ); - break; + args.asyncWakePort = MACH_PORT_NULL; + args.asyncReference = 0; + args.asyncReferenceCount = 0; - default: - IOLog("%s: Bad method table\n", client->getName()); - } - } while( false); + args.scalarInput = scalar_input; + args.scalarInputCount = scalar_inputCnt; + args.structureInput = inband_input; + args.structureInputSize = inband_inputCnt; - } else - err = kIOReturnUnsupported; + if (ool_input) + inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, + kIODirectionOut, current_task()); - return( err); + args.structureInputDescriptor = inputMD; + + args.scalarOutput = scalar_output; + args.scalarOutputCount = *scalar_outputCnt; + args.structureOutput = inband_output; + args.structureOutputSize = *inband_outputCnt; + + if (ool_output) + { + outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, + kIODirectionIn, current_task()); + } + + args.structureOutputDescriptor = outputMD; + args.structureOutputDescriptorSize = *ool_output_size; + + ret = client->externalMethod( selector, &args ); + + *scalar_outputCnt = args.scalarOutputCount; + *inband_outputCnt = args.structureOutputSize; + *ool_output_size = args.structureOutputDescriptorSize; + + if (inputMD) + inputMD->release(); + if (outputMD) + outputMD->release(); + + return (ret); } -/* Routine io_connect_method_scalarI_structureO */ -kern_return_t is_io_connect_method_scalarI_structureO( - io_object_t connect, - UInt32 index, - void * input[], - IOByteCount inputCount, - void * output, - IOByteCount * outputCount ) +/* Routine io_async_user_client_method */ +kern_return_t is_io_connect_async_method +( + io_connect_t connection, + mach_port_t wake_port, + io_async_ref64_t reference, + mach_msg_type_number_t referenceCnt, + uint32_t selector, + io_scalar_inband64_t scalar_input, + mach_msg_type_number_t scalar_inputCnt, + io_struct_inband_t inband_input, + mach_msg_type_number_t inband_inputCnt, + mach_vm_address_t ool_input, + mach_vm_size_t ool_input_size, + io_scalar_inband64_t scalar_output, + mach_msg_type_number_t *scalar_outputCnt, + io_struct_inband_t inband_output, + mach_msg_type_number_t *inband_outputCnt, + mach_vm_address_t ool_output, + mach_vm_size_t * ool_output_size +) { - IOReturn err; - IOExternalMethod * method; - IOService * object; - IOMethod func; + CHECK( IOUserClient, connection, client ); - CHECK( IOUserClient, connect, client); + IOExternalMethodArguments args; + IOReturn ret; + IOMemoryDescriptor * inputMD = 0; + IOMemoryDescriptor * outputMD = 0; - if( (method = client->getTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCScalarIStructO != (method->flags & kIOUCTypeMask)) - continue; - if( inputCount != method->count0) - continue; - if( (0xffffffff != method->count1) - && (*outputCount != method->count1)) - continue; + bzero(&args.__reserved[0], sizeof(args.__reserved)); + args.version = kIOExternalMethodArgumentsCurrentVersion; - func = method->func; + reference[0] = (io_user_reference_t) wake_port; + if (vm_map_is_64bit(get_task_map(current_task()))) + reference[0] |= kIOUCAsync64Flag; - switch( inputCount) { + args.selector = selector; - case 5: - err = (object->*func)( input[0], input[1], input[2], - input[3], input[4], - output ); - break; - case 4: - err = (object->*func)( input[0], input[1], input[2], - input[3], - output, (void *)outputCount ); - break; - case 3: - err = (object->*func)( input[0], input[1], input[2], - output, (void *)outputCount, 0 ); - break; - case 2: - err = (object->*func)( input[0], input[1], - output, (void *)outputCount, 0, 0 ); - break; - case 1: - err = (object->*func)( input[0], - output, (void *)outputCount, 0, 0, 0 ); - break; - case 0: - err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); - break; + args.asyncWakePort = wake_port; + args.asyncReference = reference; + args.asyncReferenceCount = referenceCnt; - default: - IOLog("%s: Bad method table\n", client->getName()); - } - } while( false); + args.scalarInput = scalar_input; + args.scalarInputCount = scalar_inputCnt; + args.structureInput = inband_input; + args.structureInputSize = inband_inputCnt; - } else - err = kIOReturnUnsupported; + if (ool_input) + inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, + kIODirectionOut, current_task()); - return( err); + args.structureInputDescriptor = inputMD; + + args.scalarOutput = scalar_output; + args.scalarOutputCount = *scalar_outputCnt; + args.structureOutput = inband_output; + args.structureOutputSize = *inband_outputCnt; + + if (ool_output) + { + outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size, + kIODirectionIn, current_task()); + } + + args.structureOutputDescriptor = outputMD; + args.structureOutputDescriptorSize = *ool_output_size; + + ret = client->externalMethod( selector, &args ); + + *inband_outputCnt = args.structureOutputSize; + *ool_output_size = args.structureOutputDescriptorSize; + + if (inputMD) + inputMD->release(); + if (outputMD) + outputMD->release(); + + return (ret); } -/* Routine io_connect_method_scalarI_structureI */ -kern_return_t is_io_connect_method_scalarI_structureI( - io_connect_t connect, - UInt32 index, - void * input[], - IOByteCount inputCount, - UInt8 * inputStruct, - IOByteCount inputStructCount ) +/* Routine io_connect_method_scalarI_scalarO */ +kern_return_t is_io_connect_method_scalarI_scalarO( + io_object_t connect, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_scalar_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + IOReturn err; + uint32_t i; + io_scalar_inband64_t _input; + io_scalar_inband64_t _output; + + mach_msg_type_number_t struct_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) + _input[i] = SCALAR64(input[i]); + + err = is_io_connect_method(connect, index, + _input, inputCount, + NULL, 0, + 0, 0, + _output, outputCount, + NULL, &struct_outputCnt, + 0, &ool_output_size); + + for (i = 0; i < *outputCount; i++) + output[i] = SCALAR32(_output[i]); + + return (err); +} + +kern_return_t shim_io_connect_method_scalarI_scalarO( + IOExternalMethod * method, + IOService * object, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_user_scalar_t * output, + mach_msg_type_number_t * outputCount ) { - IOReturn err; - IOExternalMethod * method; - IOService * object; IOMethod func; + io_scalar_inband_t _output; + IOReturn err; + err = kIOReturnBadArgument; - CHECK( IOUserClient, connect, client); + do { - if( (method = client->getTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCScalarIStructI != (method->flags & kIOUCTypeMask)) - continue; - if( (0xffffffff != method->count0) - && (inputCount != method->count0)) + if( inputCount != method->count0) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); continue; - if( (0xffffffff != method->count1) - && (inputStructCount != method->count1)) + } + if( *outputCount != method->count1) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); continue; + } func = method->func; switch( inputCount) { + case 6: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); + break; case 5: - err = (object->*func)( input[0], input[1], input[2], - input[3], input[4], - inputStruct ); + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + &_output[0] ); break; case 4: - err = (object->*func)( input[0], input[1], input[2], - input[3], - inputStruct, (void *)inputStructCount ); + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + &_output[0], &_output[1] ); break; case 3: - err = (object->*func)( input[0], input[1], input[2], - inputStruct, (void *)inputStructCount, - 0 ); + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + &_output[0], &_output[1], &_output[2] ); break; case 2: - err = (object->*func)( input[0], input[1], - inputStruct, (void *)inputStructCount, - 0, 0 ); + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), + &_output[0], &_output[1], &_output[2], + &_output[3] ); break; case 1: - err = (object->*func)( input[0], - inputStruct, (void *)inputStructCount, - 0, 0, 0 ); + err = (object->*func)( ARG32(input[0]), + &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4] ); break; case 0: - err = (object->*func)( inputStruct, (void *)inputStructCount, - 0, 0, 0, 0 ); + err = (object->*func)( &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4], &_output[5] ); break; default: - IOLog("%s: Bad method table\n", client->getName()); + IOLog("%s: Bad method table\n", object->getName()); } - } while( false); + } + while( false); - } else - err = kIOReturnUnsupported; + uint32_t i; + for (i = 0; i < *outputCount; i++) + output[i] = SCALAR32(_output[i]); return( err); } -/* Routine io_connect_method_structureI_structureO */ -kern_return_t is_io_connect_method_structureI_structureO( +/* Routine io_async_method_scalarI_scalarO */ +kern_return_t is_io_async_method_scalarI_scalarO( + io_object_t connect, + mach_port_t wake_port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_scalar_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + IOReturn err; + uint32_t i; + io_scalar_inband64_t _input; + io_scalar_inband64_t _output; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) + _reference[i] = REF64(reference[i]); + + mach_msg_type_number_t struct_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) + _input[i] = SCALAR64(input[i]); + + err = is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + _input, inputCount, + NULL, 0, + 0, 0, + _output, outputCount, + NULL, &struct_outputCnt, + 0, &ool_output_size); + + for (i = 0; i < *outputCount; i++) + output[i] = SCALAR32(_output[i]); + + return (err); +} +/* Routine io_async_method_scalarI_structureO */ +kern_return_t is_io_async_method_scalarI_structureO( io_object_t connect, - UInt32 index, - UInt8 * input, - IOByteCount inputCount, - UInt8 * output, - IOByteCount * outputCount ) + mach_port_t wake_port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) { - IOReturn err; - IOExternalMethod * method; - IOService * object; - IOMethod func; - - CHECK( IOUserClient, connect, client); + uint32_t i; + io_scalar_inband64_t _input; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) + _reference[i] = REF64(reference[i]); + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) + _input[i] = SCALAR64(input[i]); + + return (is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + _input, inputCount, + NULL, 0, + 0, 0, + NULL, &scalar_outputCnt, + output, outputCount, + 0, &ool_output_size)); +} - if( (method = client->getTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCStructIStructO != (method->flags & kIOUCTypeMask)) - continue; - if( (0xffffffff != method->count0) - && (inputCount != method->count0)) - continue; - if( (0xffffffff != method->count1) - && (*outputCount != method->count1)) - continue; +/* Routine io_async_method_scalarI_structureI */ +kern_return_t is_io_async_method_scalarI_structureI( + io_connect_t connect, + mach_port_t wake_port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) + _reference[i] = REF64(reference[i]); + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_msg_type_number_t inband_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) + _input[i] = SCALAR64(input[i]); + + return (is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + _input, inputCount, + inputStruct, inputStructCount, + 0, 0, + NULL, &scalar_outputCnt, + NULL, &inband_outputCnt, + 0, &ool_output_size)); +} - func = method->func; +/* Routine io_async_method_structureI_structureO */ +kern_return_t is_io_async_method_structureI_structureO( + io_object_t connect, + mach_port_t wake_port, + io_async_ref_t reference, + mach_msg_type_number_t referenceCnt, + uint32_t index, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + uint32_t i; + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + io_async_ref64_t _reference; + + for (i = 0; i < referenceCnt; i++) + _reference[i] = REF64(reference[i]); + + return (is_io_connect_async_method(connect, + wake_port, _reference, referenceCnt, + index, + NULL, 0, + input, inputCount, + 0, 0, + NULL, &scalar_outputCnt, + output, outputCount, + 0, &ool_output_size)); +} - if( method->count1) { - if( method->count0) { - err = (object->*func)( input, output, - (void *)inputCount, outputCount, 0, 0 ); - } else { - err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); - } - } else { - err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 ); - } - } while( false); +kern_return_t shim_io_async_method_scalarI_scalarO( + IOExternalAsyncMethod * method, + IOService * object, + mach_port_t asyncWakePort, + io_user_reference_t * asyncReference, + uint32_t asyncReferenceCount, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_user_scalar_t * output, + mach_msg_type_number_t * outputCount ) +{ + IOAsyncMethod func; + uint32_t i; + io_scalar_inband_t _output; + IOReturn err; + io_async_ref_t reference; - } else - err = kIOReturnUnsupported; + for (i = 0; i < asyncReferenceCount; i++) + reference[i] = REF32(asyncReference[i]); - return( err); -} + err = kIOReturnBadArgument; -kern_return_t is_io_async_method_scalarI_scalarO( - io_object_t connect, - mach_port_t wakePort, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, - UInt32 index, - void * input[], - IOByteCount inputCount, - void * output[], - IOByteCount * outputCount ) -{ - IOReturn err; - IOExternalAsyncMethod *method; - IOService * object; - IOAsyncMethod func; + do { - CHECK( IOUserClient, connect, client); - if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCScalarIScalarO != (method->flags & kIOUCTypeMask)) - continue; - if( inputCount != method->count0) - continue; - if( *outputCount != method->count1) - continue; + if( inputCount != method->count0) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( *outputCount != method->count1) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } - reference[0] = (natural_t) wakePort; - func = method->func; + func = method->func; switch( inputCount) { case 6: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], input[4], input[5] ); + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), ARG32(input[5]) ); break; case 5: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], input[4], - &output[0] ); + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + &_output[0] ); break; case 4: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], - &output[0], &output[1] ); + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + &_output[0], &_output[1] ); break; case 3: err = (object->*func)( reference, - input[0], input[1], input[2], - &output[0], &output[1], &output[2] ); + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + &_output[0], &_output[1], &_output[2] ); break; case 2: err = (object->*func)( reference, - input[0], input[1], - &output[0], &output[1], &output[2], - &output[3] ); + ARG32(input[0]), ARG32(input[1]), + &_output[0], &_output[1], &_output[2], + &_output[3] ); break; case 1: err = (object->*func)( reference, - input[0], - &output[0], &output[1], &output[2], - &output[3], &output[4] ); + ARG32(input[0]), + &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4] ); break; case 0: err = (object->*func)( reference, - &output[0], &output[1], &output[2], - &output[3], &output[4], &output[5] ); + &_output[0], &_output[1], &_output[2], + &_output[3], &_output[4], &_output[5] ); break; - default: - IOLog("%s: Bad method table\n", client->getName()); - } - } while( false); + default: + IOLog("%s: Bad method table\n", object->getName()); + } + } + while( false); + + for (i = 0; i < *outputCount; i++) + output[i] = SCALAR32(_output[i]); + + return( err); +} + + +/* Routine io_connect_method_scalarI_structureO */ +kern_return_t is_io_connect_method_scalarI_structureO( + io_object_t connect, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) + _input[i] = SCALAR64(input[i]); + + return (is_io_connect_method(connect, index, + _input, inputCount, + NULL, 0, + 0, 0, + NULL, &scalar_outputCnt, + output, outputCount, + 0, &ool_output_size)); +} + +kern_return_t shim_io_connect_method_scalarI_structureO( + + IOExternalMethod * method, + IOService * object, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + IOByteCount * outputCount ) +{ + IOMethod func; + IOReturn err; + + err = kIOReturnBadArgument; + + do { + if( inputCount != method->count0) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( (kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } + + func = method->func; + + switch( inputCount) { + + case 5: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + output ); + break; + case 4: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), + output, (void *)outputCount ); + break; + case 3: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + output, (void *)outputCount, 0 ); + break; + case 2: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), + output, (void *)outputCount, 0, 0 ); + break; + case 1: + err = (object->*func)( ARG32(input[0]), + output, (void *)outputCount, 0, 0, 0 ); + break; + case 0: + err = (object->*func)( output, (void *)outputCount, 0, 0, 0, 0 ); + break; - } else - err = kIOReturnUnsupported; + default: + IOLog("%s: Bad method table\n", object->getName()); + } + } + while( false); return( err); } -kern_return_t is_io_async_method_scalarI_structureO( - io_object_t connect, - mach_port_t wakePort, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, - UInt32 index, - void * input[], - IOByteCount inputCount, - void * output, - IOByteCount * outputCount ) + +kern_return_t shim_io_async_method_scalarI_structureO( + IOExternalAsyncMethod * method, + IOService * object, + mach_port_t asyncWakePort, + io_user_reference_t * asyncReference, + uint32_t asyncReferenceCount, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) { - IOReturn err; - IOExternalAsyncMethod *method; - IOService * object; IOAsyncMethod func; + uint32_t i; + IOReturn err; + io_async_ref_t reference; - CHECK( IOUserClient, connect, client); + for (i = 0; i < asyncReferenceCount; i++) + reference[i] = REF32(asyncReference[i]); - if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCScalarIStructO != (method->flags & kIOUCTypeMask)) - continue; - if( inputCount != method->count0) - continue; - if( (0xffffffff != method->count1) - && (*outputCount != method->count1)) - continue; + err = kIOReturnBadArgument; + do { + if( inputCount != method->count0) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( (kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } - reference[0] = (natural_t) wakePort; - func = method->func; + func = method->func; switch( inputCount) { case 5: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], input[4], + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), output ); break; case 4: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), output, (void *)outputCount ); break; case 3: err = (object->*func)( reference, - input[0], input[1], input[2], + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), output, (void *)outputCount, 0 ); break; case 2: err = (object->*func)( reference, - input[0], input[1], + ARG32(input[0]), ARG32(input[1]), output, (void *)outputCount, 0, 0 ); break; case 1: err = (object->*func)( reference, - input[0], + ARG32(input[0]), output, (void *)outputCount, 0, 0, 0 ); break; case 0: @@ -2441,78 +3320,176 @@ kern_return_t is_io_async_method_scalarI_structureO( break; default: - IOLog("%s: Bad method table\n", client->getName()); + IOLog("%s: Bad method table\n", object->getName()); } - } while( false); + } + while( false); - } else - err = kIOReturnUnsupported; + return( err); +} + +/* Routine io_connect_method_scalarI_structureI */ +kern_return_t is_io_connect_method_scalarI_structureI( + io_connect_t connect, + uint32_t index, + io_scalar_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + uint32_t i; + io_scalar_inband64_t _input; + + mach_msg_type_number_t scalar_outputCnt = 0; + mach_msg_type_number_t inband_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + for (i = 0; i < inputCount; i++) + _input[i] = SCALAR64(input[i]); + + return (is_io_connect_method(connect, index, + _input, inputCount, + inputStruct, inputStructCount, + 0, 0, + NULL, &scalar_outputCnt, + NULL, &inband_outputCnt, + 0, &ool_output_size)); +} + +kern_return_t shim_io_connect_method_scalarI_structureI( + IOExternalMethod * method, + IOService * object, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) +{ + IOMethod func; + IOReturn err = kIOReturnBadArgument; + + do + { + if( (kIOUCVariableStructureSize != method->count0) + && (inputCount != method->count0)) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( (kIOUCVariableStructureSize != method->count1) + && (inputStructCount != method->count1)) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } + + func = method->func; + + switch( inputCount) { + + case 5: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), + inputStruct ); + break; + case 4: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), (void *) input[2], + ARG32(input[3]), + inputStruct, (void *)inputStructCount ); + break; + case 3: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + inputStruct, (void *)inputStructCount, + 0 ); + break; + case 2: + err = (object->*func)( ARG32(input[0]), ARG32(input[1]), + inputStruct, (void *)inputStructCount, + 0, 0 ); + break; + case 1: + err = (object->*func)( ARG32(input[0]), + inputStruct, (void *)inputStructCount, + 0, 0, 0 ); + break; + case 0: + err = (object->*func)( inputStruct, (void *)inputStructCount, + 0, 0, 0, 0 ); + break; + + default: + IOLog("%s: Bad method table\n", object->getName()); + } + } + while (false); return( err); } -kern_return_t is_io_async_method_scalarI_structureI( - io_connect_t connect, - mach_port_t wakePort, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, - UInt32 index, - void * input[], - IOByteCount inputCount, - UInt8 * inputStruct, - IOByteCount inputStructCount ) +kern_return_t shim_io_async_method_scalarI_structureI( + IOExternalAsyncMethod * method, + IOService * object, + mach_port_t asyncWakePort, + io_user_reference_t * asyncReference, + uint32_t asyncReferenceCount, + const io_user_scalar_t * input, + mach_msg_type_number_t inputCount, + io_struct_inband_t inputStruct, + mach_msg_type_number_t inputStructCount ) { - IOReturn err; - IOExternalAsyncMethod *method; - IOService * object; IOAsyncMethod func; + uint32_t i; + IOReturn err = kIOReturnBadArgument; + io_async_ref_t reference; - CHECK( IOUserClient, connect, client); + for (i = 0; i < asyncReferenceCount; i++) + reference[i] = REF32(asyncReference[i]); - if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCScalarIStructI != (method->flags & kIOUCTypeMask)) - continue; - if( (0xffffffff != method->count0) - && (inputCount != method->count0)) - continue; - if( (0xffffffff != method->count1) - && (inputStructCount != method->count1)) - continue; + do + { + if( (kIOUCVariableStructureSize != method->count0) + && (inputCount != method->count0)) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( (kIOUCVariableStructureSize != method->count1) + && (inputStructCount != method->count1)) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } - reference[0] = (natural_t) wakePort; func = method->func; switch( inputCount) { case 5: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], input[4], + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), ARG32(input[4]), inputStruct ); break; case 4: err = (object->*func)( reference, - input[0], input[1], input[2], - input[3], + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), + ARG32(input[3]), inputStruct, (void *)inputStructCount ); break; case 3: err = (object->*func)( reference, - input[0], input[1], input[2], + ARG32(input[0]), ARG32(input[1]), ARG32(input[2]), inputStruct, (void *)inputStructCount, 0 ); break; case 2: err = (object->*func)( reference, - input[0], input[1], + ARG32(input[0]), ARG32(input[1]), inputStruct, (void *)inputStructCount, 0, 0 ); break; case 1: err = (object->*func)( reference, - input[0], + ARG32(input[0]), inputStruct, (void *)inputStructCount, 0, 0, 0 ); break; @@ -2523,47 +3500,115 @@ kern_return_t is_io_async_method_scalarI_structureI( break; default: - IOLog("%s: Bad method table\n", client->getName()); + IOLog("%s: Bad method table\n", object->getName()); } - } while( false); - - } else - err = kIOReturnUnsupported; + } + while (false); return( err); } -kern_return_t is_io_async_method_structureI_structureO( - io_object_t connect, - mach_port_t wakePort, - io_async_ref_t reference, - mach_msg_type_number_t referenceCnt, - UInt32 index, - UInt8 * input, - IOByteCount inputCount, - UInt8 * output, +/* Routine io_connect_method_structureI_structureO */ +kern_return_t is_io_connect_method_structureI_structureO( + io_object_t connect, + uint32_t index, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ + mach_msg_type_number_t scalar_outputCnt = 0; + mach_vm_size_t ool_output_size = 0; + + return (is_io_connect_method(connect, index, + NULL, 0, + input, inputCount, + 0, 0, + NULL, &scalar_outputCnt, + output, outputCount, + 0, &ool_output_size)); +} + +kern_return_t shim_io_connect_method_structureI_structureO( + IOExternalMethod * method, + IOService * object, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, IOByteCount * outputCount ) { - IOReturn err; - IOExternalAsyncMethod *method; - IOService * object; + IOMethod func; + IOReturn err = kIOReturnBadArgument; + + do + { + if( (kIOUCVariableStructureSize != method->count0) + && (inputCount != method->count0)) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( (kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } + + func = method->func; + + if( method->count1) { + if( method->count0) { + err = (object->*func)( input, output, + (void *)inputCount, outputCount, 0, 0 ); + } else { + err = (object->*func)( output, outputCount, 0, 0, 0, 0 ); + } + } else { + err = (object->*func)( input, (void *)inputCount, 0, 0, 0, 0 ); + } + } + while( false); + + + return( err); +} + +kern_return_t shim_io_async_method_structureI_structureO( + IOExternalAsyncMethod * method, + IOService * object, + mach_port_t asyncWakePort, + io_user_reference_t * asyncReference, + uint32_t asyncReferenceCount, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, + io_struct_inband_t output, + mach_msg_type_number_t * outputCount ) +{ IOAsyncMethod func; + uint32_t i; + IOReturn err; + io_async_ref_t reference; - CHECK( IOUserClient, connect, client); + for (i = 0; i < asyncReferenceCount; i++) + reference[i] = REF32(asyncReference[i]); - if( (method = client->getAsyncTargetAndMethodForIndex(&object, index)) ) { - do { - err = kIOReturnBadArgument; - if( kIOUCStructIStructO != (method->flags & kIOUCTypeMask)) - continue; - if( (0xffffffff != method->count0) - && (inputCount != method->count0)) - continue; - if( (0xffffffff != method->count1) - && (*outputCount != method->count1)) - continue; + err = kIOReturnBadArgument; + do + { + if( (kIOUCVariableStructureSize != method->count0) + && (inputCount != method->count0)) + { + IOLog("%s: IOUserClient inputCount count mismatch\n", object->getName()); + continue; + } + if( (kIOUCVariableStructureSize != method->count1) + && (*outputCount != method->count1)) + { + IOLog("%s: IOUserClient outputCount count mismatch\n", object->getName()); + continue; + } - reference[0] = (natural_t) wakePort; func = method->func; if( method->count1) { @@ -2579,21 +3624,19 @@ kern_return_t is_io_async_method_structureI_structureO( err = (object->*func)( reference, input, (void *)inputCount, 0, 0, 0, 0 ); } - - } while( false); - - } else - err = kIOReturnUnsupported; + } + while( false); return( err); } + /* Routine io_make_matching */ kern_return_t is_io_make_matching( - mach_port_t master_port, - UInt32 type, - IOOptionBits options, - UInt8 * input, - IOByteCount inputCount, + mach_port_t master_port, + uint32_t type, + uint32_t options, + io_struct_inband_t input, + mach_msg_type_number_t inputCount, io_string_t matching ) { OSSerialize * s; @@ -2641,9 +3684,9 @@ kern_return_t is_io_make_matching( err = kIOReturnNoMemory; continue; } else - strcpy( matching, s->text()); - - } while( false); + strlcpy(matching, s->text(), sizeof(io_string_t)); + } + while( false); if( s) s->release(); @@ -2656,10 +3699,10 @@ kern_return_t is_io_make_matching( /* Routine io_catalog_send_data */ kern_return_t is_io_catalog_send_data( mach_port_t master_port, - int flag, + uint32_t flag, io_buf_ptr_t inData, mach_msg_type_number_t inDataCount, - natural_t * result) + kern_return_t * result) { OSObject * obj = 0; vm_offset_t data; @@ -2670,12 +3713,20 @@ kern_return_t is_io_catalog_send_data( if( master_port != master_device_port) return kIOReturnNotPrivileged; - // FIXME: This is a hack. Should have own function for removeKernelLinker() - if(flag != kIOCatalogRemoveKernelLinker && ( !inData || !inDataCount) ) + if( (flag != kIOCatalogRemoveKernelLinker && + flag != kIOCatalogKextdActive && + flag != kIOCatalogKextdFinishedLaunching) && + ( !inData || !inDataCount) ) + { return kIOReturnBadArgument; + } + + if (inData) { + vm_map_offset_t map_data; + + kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t)inData); + data = CAST_DOWN(vm_offset_t, map_data); - if (data) { - kr = vm_map_copyout( kernel_map, &data, (vm_map_copy_t)inData); if( kr != KERN_SUCCESS) return kr; @@ -2741,12 +3792,35 @@ kern_return_t is_io_catalog_send_data( } break; - case kIOCatalogRemoveKernelLinker: { - if (gIOCatalogue->removeKernelLinker() != KERN_SUCCESS) { - kr = kIOReturnError; - } else { - kr = kIOReturnSuccess; + case kIOCatalogRemoveKernelLinker: + kr = KERN_NOT_SUPPORTED; + break; + + case kIOCatalogKextdActive: +#if !NO_KEXTD + OSKext::setKextdActive(); + + /* Dump all nonloaded startup extensions; kextd will now send them + * down on request. + */ + OSKext::flushNonloadedKexts( /* flushPrelinkedKexts */ false); +#endif + kr = kIOReturnSuccess; + break; + + case kIOCatalogKextdFinishedLaunching: { +#if !NO_KEXTD + static bool clearedBusy = false; + + if (!clearedBusy) { + IOService * serviceRoot = IOService::getServiceRoot(); + if (serviceRoot) { + serviceRoot->adjustBusy(-1); + clearedBusy = true; + } } +#endif + kr = kIOReturnSuccess; } break; @@ -2764,7 +3838,7 @@ kern_return_t is_io_catalog_send_data( /* Routine io_catalog_terminate */ kern_return_t is_io_catalog_terminate( mach_port_t master_port, - int flag, + uint32_t flag, io_name_t name ) { kern_return_t kr; @@ -2819,7 +3893,7 @@ kern_return_t is_io_catalog_terminate( /* Routine io_catalog_get_data */ kern_return_t is_io_catalog_get_data( mach_port_t master_port, - int flag, + uint32_t flag, io_buf_ptr_t *outData, mach_msg_type_number_t *outDataCount) { @@ -2845,10 +3919,11 @@ kern_return_t is_io_catalog_get_data( vm_size_t size; size = s->getLength(); - kr = vm_allocate(kernel_map, &data, size, true); + kr = vm_allocate(kernel_map, &data, size, VM_FLAGS_ANYWHERE); if ( kr == kIOReturnSuccess ) { bcopy(s->text(), (void *)data, size); - kr = vm_map_copyin(kernel_map, data, size, true, ©); + kr = vm_map_copyin(kernel_map, (vm_map_address_t)data, + (vm_map_size_t)size, true, ©); *outData = (char *)copy; *outDataCount = size; } @@ -2862,7 +3937,7 @@ kern_return_t is_io_catalog_get_data( /* Routine io_catalog_get_gen_count */ kern_return_t is_io_catalog_get_gen_count( mach_port_t master_port, - int *genCount) + uint32_t *genCount) { if( master_port != master_device_port) return kIOReturnNotPrivileged; @@ -2877,7 +3952,9 @@ kern_return_t is_io_catalog_get_gen_count( return kIOReturnSuccess; } -/* Routine io_catalog_module_loaded */ +/* Routine io_catalog_module_loaded. + * Is invoked from IOKitLib's IOCatalogueModuleLoaded(). Doesn't seem to be used. + */ kern_return_t is_io_catalog_module_loaded( mach_port_t master_port, io_name_t name) @@ -2897,7 +3974,7 @@ kern_return_t is_io_catalog_module_loaded( kern_return_t is_io_catalog_reset( mach_port_t master_port, - int flag) + uint32_t flag) { if( master_port != master_device_port) return kIOReturnNotPrivileged; @@ -2914,19 +3991,17 @@ kern_return_t is_io_catalog_reset( return kIOReturnSuccess; } -kern_return_t iokit_user_client_trap(io_object_t userClientRef, UInt32 index, - void *p1, void *p2, void *p3, - void *p4, void *p5, void *p6) +kern_return_t iokit_user_client_trap(struct iokit_user_client_trap_args *args) { kern_return_t result = kIOReturnBadArgument; IOUserClient *userClient; if ((userClient = OSDynamicCast(IOUserClient, - iokit_lookup_connect_ref_current_task(userClientRef)))) { + iokit_lookup_connect_ref_current_task((OSObject *)(args->userClientRef))))) { IOExternalTrap *trap; IOService *target = NULL; - trap = userClient->getTargetAndTrapForIndex(&target, index); + trap = userClient->getTargetAndTrapForIndex(&target, args->index); if (trap && target) { IOTrap func; @@ -2934,7 +4009,7 @@ kern_return_t iokit_user_client_trap(io_object_t userClientRef, UInt32 index, func = trap->func; if (func) { - result = (target->*func)(p1, p2, p3, p4, p5, p6); + result = (target->*func)(args->p1, args->p2, args->p3, args->p4, args->p5, args->p6); } } @@ -2944,10 +4019,160 @@ kern_return_t iokit_user_client_trap(io_object_t userClientRef, UInt32 index, return result; } +IOReturn IOUserClient::externalMethod( uint32_t selector, IOExternalMethodArguments * args, + IOExternalMethodDispatch * dispatch, OSObject * target, void * reference ) +{ + IOReturn err; + IOService * object; + IOByteCount structureOutputSize; + + if (dispatch) + { + uint32_t count; + count = dispatch->checkScalarInputCount; + if ((kIOUCVariableStructureSize != count) && (count != args->scalarInputCount)) + { + return (kIOReturnBadArgument); + } + + count = dispatch->checkStructureInputSize; + if ((kIOUCVariableStructureSize != count) + && (count != ((args->structureInputDescriptor) + ? args->structureInputDescriptor->getLength() : args->structureInputSize))) + { + return (kIOReturnBadArgument); + } + + count = dispatch->checkScalarOutputCount; + if ((kIOUCVariableStructureSize != count) && (count != args->scalarOutputCount)) + { + return (kIOReturnBadArgument); + } + + count = dispatch->checkStructureOutputSize; + if ((kIOUCVariableStructureSize != count) + && (count != ((args->structureOutputDescriptor) + ? args->structureOutputDescriptor->getLength() : args->structureOutputSize))) + { + return (kIOReturnBadArgument); + } + + if (dispatch->function) + err = (*dispatch->function)(target, reference, args); + else + err = kIOReturnNoCompletion; /* implementator can dispatch */ + + return (err); + } + + + // pre-Leopard API's don't do ool structs + if (args->structureInputDescriptor || args->structureOutputDescriptor) + { + err = kIOReturnIPCError; + return (err); + } + + structureOutputSize = args->structureOutputSize; + + if (args->asyncWakePort) + { + IOExternalAsyncMethod * method; + + if( !(method = getAsyncTargetAndMethodForIndex(&object, selector)) ) + return (kIOReturnUnsupported); + + switch (method->flags & kIOUCTypeMask) + { + case kIOUCScalarIStructI: + err = shim_io_async_method_scalarI_structureI( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + args->scalarInput, args->scalarInputCount, + (char *)args->structureInput, args->structureInputSize ); + break; + + case kIOUCScalarIScalarO: + err = shim_io_async_method_scalarI_scalarO( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + args->scalarInput, args->scalarInputCount, + args->scalarOutput, &args->scalarOutputCount ); + break; + + case kIOUCScalarIStructO: + err = shim_io_async_method_scalarI_structureO( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + args->scalarInput, args->scalarInputCount, + (char *) args->structureOutput, &args->structureOutputSize ); + break; + + + case kIOUCStructIStructO: + err = shim_io_async_method_structureI_structureO( method, object, + args->asyncWakePort, args->asyncReference, args->asyncReferenceCount, + (char *)args->structureInput, args->structureInputSize, + (char *) args->structureOutput, &args->structureOutputSize ); + break; + + default: + err = kIOReturnBadArgument; + break; + } + } + else + { + IOExternalMethod * method; + + if( !(method = getTargetAndMethodForIndex(&object, selector)) ) + return (kIOReturnUnsupported); + + switch (method->flags & kIOUCTypeMask) + { + case kIOUCScalarIStructI: + err = shim_io_connect_method_scalarI_structureI( method, object, + args->scalarInput, args->scalarInputCount, + (char *) args->structureInput, args->structureInputSize ); + break; + + case kIOUCScalarIScalarO: + err = shim_io_connect_method_scalarI_scalarO( method, object, + args->scalarInput, args->scalarInputCount, + args->scalarOutput, &args->scalarOutputCount ); + break; + + case kIOUCScalarIStructO: + err = shim_io_connect_method_scalarI_structureO( method, object, + args->scalarInput, args->scalarInputCount, + (char *) args->structureOutput, &structureOutputSize ); + break; + + + case kIOUCStructIStructO: + err = shim_io_connect_method_structureI_structureO( method, object, + (char *) args->structureInput, args->structureInputSize, + (char *) args->structureOutput, &structureOutputSize ); + break; + + default: + err = kIOReturnBadArgument; + break; + } + } + + args->structureOutputSize = structureOutputSize; + + return (err); +} + + }; /* extern "C" */ +#if __LP64__ OSMetaClassDefineReservedUnused(IOUserClient, 0); OSMetaClassDefineReservedUnused(IOUserClient, 1); +#else +OSMetaClassDefineReservedUsed(IOUserClient, 0); +OSMetaClassDefineReservedUsed(IOUserClient, 1); +#endif OSMetaClassDefineReservedUnused(IOUserClient, 2); OSMetaClassDefineReservedUnused(IOUserClient, 3); OSMetaClassDefineReservedUnused(IOUserClient, 4);