+static kern_return_t
+internal_io_service_add_notification(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ const char * matching,
+ size_t matching_size,
+ mach_port_t port,
+ void * reference,
+ vm_size_t referenceSize,
+ bool client64,
+ io_object_t * notification )
+{
+ IOServiceUserNotification * userNotify = 0;
+ IONotifier * notify = 0;
+ const OSSymbol * sym;
+ OSDictionary * dict;
+ IOReturn err;
+ unsigned long int userMsgType;
+
+ if (master_port != master_device_port) {
+ return kIOReturnNotPrivileged;
+ }
+
+ do {
+ err = kIOReturnNoResources;
+
+ if (matching_size > (sizeof(io_struct_inband_t) * 1024)) {
+ return kIOReturnMessageTooLarge;
+ }
+
+ if (!(sym = OSSymbol::withCString( notification_type ))) {
+ err = kIOReturnNoResources;
+ }
+
+ assert(matching_size);
+ dict = OSDynamicCast(OSDictionary, OSUnserializeXML(matching, matching_size));
+ if (!dict) {
+ err = kIOReturnBadArgument;
+ continue;
+ }
+
+ if ((sym == gIOPublishNotification)
+ || (sym == gIOFirstPublishNotification)) {
+ userMsgType = kIOServicePublishNotificationType;
+ } else if ((sym == gIOMatchedNotification)
+ || (sym == gIOFirstMatchNotification)) {
+ userMsgType = kIOServiceMatchedNotificationType;
+ } else if ((sym == gIOTerminatedNotification)
+ || (sym == gIOWillTerminateNotification)) {
+ userMsgType = kIOServiceTerminatedNotificationType;
+ } else {
+ userMsgType = kLastIOKitNotificationType;
+ }
+
+ userNotify = new IOServiceUserNotification;
+
+ if (userNotify && !userNotify->init( port, userMsgType,
+ reference, referenceSize, client64)) {
+ userNotify->release();
+ userNotify = 0;
+ }
+ if (!userNotify) {
+ continue;
+ }
+
+ notify = IOService::addMatchingNotification( sym, dict,
+ &userNotify->_handler, userNotify );
+ if (notify) {
+ *notification = userNotify;
+ userNotify->setNotification( notify );
+ err = kIOReturnSuccess;
+ } else {
+ err = kIOReturnUnsupported;
+ }
+ } while (false);
+
+ if ((kIOReturnSuccess != err) && userNotify) {
+ userNotify->invalidatePort();
+ userNotify->release();
+ userNotify = 0;
+ }
+
+ if (sym) {
+ sym->release();
+ }
+ if (dict) {
+ dict->release();
+ }
+
+ return err;
+}
+
+
+/* Routine io_service_add_notification */
+kern_return_t
+is_io_service_add_notification(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_string_t matching,
+ mach_port_t port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ io_object_t * notification )
+{
+ return kIOReturnUnsupported;
+}
+
+/* Routine io_service_add_notification_64 */
+kern_return_t
+is_io_service_add_notification_64(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_string_t matching,
+ mach_port_t wake_port,
+ io_async_ref64_t reference,
+ mach_msg_type_number_t referenceCnt,
+ io_object_t *notification )
+{
+ return kIOReturnUnsupported;
+}
+
+/* Routine io_service_add_notification_bin */
+kern_return_t
+is_io_service_add_notification_bin
+(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_struct_inband_t matching,
+ mach_msg_type_number_t matchingCnt,
+ mach_port_t wake_port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ io_object_t *notification)
+{
+ io_async_ref_t zreference;
+
+ if (referenceCnt > ASYNC_REF_COUNT) {
+ return kIOReturnBadArgument;
+ }
+ bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
+ bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
+
+ return internal_io_service_add_notification(master_port, notification_type,
+ matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
+ false, notification);
+}
+
+/* Routine io_service_add_notification_bin_64 */
+kern_return_t
+is_io_service_add_notification_bin_64
+(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_struct_inband_t matching,
+ mach_msg_type_number_t matchingCnt,
+ mach_port_t wake_port,
+ io_async_ref64_t reference,
+ mach_msg_type_number_t referenceCnt,
+ io_object_t *notification)
+{
+ io_async_ref64_t zreference;
+
+ if (referenceCnt > ASYNC_REF64_COUNT) {
+ return kIOReturnBadArgument;
+ }
+ bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
+ bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
+
+ return internal_io_service_add_notification(master_port, notification_type,
+ matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
+ true, notification);
+}
+
+static kern_return_t
+internal_io_service_add_notification_ool(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_buf_ptr_t matching,
+ mach_msg_type_number_t matchingCnt,
+ mach_port_t wake_port,
+ void * reference,
+ vm_size_t referenceSize,
+ bool client64,
+ kern_return_t *result,
+ io_object_t *notification )
+{
+ kern_return_t kr;
+ vm_offset_t data;
+ vm_map_offset_t map_data;
+
+ kr = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) matching );
+ data = CAST_DOWN(vm_offset_t, map_data);
+
+ if (KERN_SUCCESS == kr) {
+ // must return success after vm_map_copyout() succeeds
+ // and mig will copy out objects on success
+ *notification = 0;
+ *result = internal_io_service_add_notification( master_port, notification_type,
+ (char *) data, matchingCnt, wake_port, reference, referenceSize, client64, notification );
+ vm_deallocate( kernel_map, data, matchingCnt );
+ }
+
+ return kr;
+}
+
+/* Routine io_service_add_notification_ool */
+kern_return_t
+is_io_service_add_notification_ool(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_buf_ptr_t matching,
+ mach_msg_type_number_t matchingCnt,
+ mach_port_t wake_port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ kern_return_t *result,
+ io_object_t *notification )
+{
+ io_async_ref_t zreference;
+
+ if (referenceCnt > ASYNC_REF_COUNT) {
+ return kIOReturnBadArgument;
+ }
+ bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
+ bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
+
+ return internal_io_service_add_notification_ool(master_port, notification_type,
+ matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref_t),
+ false, result, notification);
+}
+
+/* Routine io_service_add_notification_ool_64 */
+kern_return_t
+is_io_service_add_notification_ool_64(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_buf_ptr_t matching,
+ mach_msg_type_number_t matchingCnt,
+ mach_port_t wake_port,
+ io_async_ref64_t reference,
+ mach_msg_type_number_t referenceCnt,
+ kern_return_t *result,
+ io_object_t *notification )
+{
+ io_async_ref64_t zreference;
+
+ if (referenceCnt > ASYNC_REF64_COUNT) {
+ return kIOReturnBadArgument;
+ }
+ bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
+ bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
+
+ return internal_io_service_add_notification_ool(master_port, notification_type,
+ matching, matchingCnt, wake_port, &zreference[0], sizeof(io_async_ref64_t),
+ true, result, notification);
+}
+
+/* Routine io_service_add_notification_old */
+kern_return_t
+is_io_service_add_notification_old(
+ mach_port_t master_port,
+ io_name_t notification_type,
+ io_string_t matching,
+ mach_port_t port,
+ // for binary compatibility reasons, this must be natural_t for ILP32
+ natural_t ref,
+ io_object_t * notification )
+{
+ return is_io_service_add_notification( master_port, notification_type,
+ matching, port, &ref, 1, notification );
+}
+
+
+static kern_return_t
+internal_io_service_add_interest_notification(
+ io_object_t _service,
+ io_name_t type_of_interest,
+ mach_port_t port,
+ void * reference,
+ vm_size_t referenceSize,
+ bool client64,
+ io_object_t * notification )
+{
+ IOServiceMessageUserNotification * userNotify = 0;
+ IONotifier * notify = 0;
+ const OSSymbol * sym;
+ IOReturn err;
+
+ CHECK( IOService, _service, service );
+
+ err = kIOReturnNoResources;
+ if ((sym = OSSymbol::withCString( type_of_interest ))) {
+ do {
+ userNotify = new IOServiceMessageUserNotification;
+
+ if (userNotify && !userNotify->init( port, kIOServiceMessageNotificationType,
+ reference, referenceSize,
+ kIOUserNotifyMaxMessageSize,
+ client64 )) {
+ userNotify->release();
+ userNotify = 0;
+ }
+ if (!userNotify) {
+ continue;
+ }
+
+ notify = service->registerInterest( sym,
+ &userNotify->_handler, userNotify );
+ if (notify) {
+ *notification = userNotify;
+ userNotify->setNotification( notify );
+ err = kIOReturnSuccess;
+ } else {
+ err = kIOReturnUnsupported;
+ }
+
+ sym->release();
+ } while (false);
+ }
+
+ if ((kIOReturnSuccess != err) && userNotify) {
+ userNotify->invalidatePort();
+ userNotify->release();
+ userNotify = 0;
+ }
+
+ return err;
+}
+
+/* Routine io_service_add_message_notification */
+kern_return_t
+is_io_service_add_interest_notification(
+ io_object_t service,
+ io_name_t type_of_interest,
+ mach_port_t port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ io_object_t * notification )
+{
+ io_async_ref_t zreference;
+
+ if (referenceCnt > ASYNC_REF_COUNT) {
+ return kIOReturnBadArgument;
+ }
+ bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
+ bzero(&zreference[referenceCnt], (ASYNC_REF_COUNT - referenceCnt) * sizeof(zreference[0]));
+
+ return internal_io_service_add_interest_notification(service, type_of_interest,
+ port, &zreference[0], sizeof(io_async_ref_t), false, notification);
+}
+
+/* Routine io_service_add_interest_notification_64 */
+kern_return_t
+is_io_service_add_interest_notification_64(
+ io_object_t service,
+ io_name_t type_of_interest,
+ mach_port_t wake_port,
+ io_async_ref64_t reference,
+ mach_msg_type_number_t referenceCnt,
+ io_object_t *notification )
+{
+ io_async_ref64_t zreference;
+
+ if (referenceCnt > ASYNC_REF64_COUNT) {
+ return kIOReturnBadArgument;
+ }
+ bcopy(&reference[0], &zreference[0], referenceCnt * sizeof(zreference[0]));
+ bzero(&zreference[referenceCnt], (ASYNC_REF64_COUNT - referenceCnt) * sizeof(zreference[0]));
+
+ return internal_io_service_add_interest_notification(service, type_of_interest,
+ wake_port, &zreference[0], sizeof(io_async_ref64_t), true, notification);
+}
+
+
+/* Routine io_service_acknowledge_notification */
+kern_return_t
+is_io_service_acknowledge_notification(
+ io_object_t _service,
+ natural_t notify_ref,
+ natural_t response )
+{
+ CHECK( IOService, _service, service );
+
+ return service->acknowledgeNotification((IONotificationRef)(uintptr_t) notify_ref,
+ (IOOptionBits) response );
+}
+
+/* Routine io_connect_get_semaphore */
+kern_return_t
+is_io_connect_get_notification_semaphore(
+ io_connect_t connection,
+ natural_t notification_type,
+ semaphore_t *semaphore )
+{
+ CHECK( IOUserClient, connection, client );
+
+ IOStatisticsClientCall();
+ return client->getNotificationSemaphore((UInt32) notification_type,
+ semaphore );
+}
+
+/* Routine io_registry_get_root_entry */
+kern_return_t
+is_io_registry_get_root_entry(
+ mach_port_t master_port,
+ io_object_t *root )
+{
+ IORegistryEntry * entry;
+
+ if (master_port != master_device_port) {
+ return kIOReturnNotPrivileged;
+ }
+
+ entry = IORegistryEntry::getRegistryRoot();
+ if (entry) {
+ entry->retain();
+ }
+ *root = entry;
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_registry_create_iterator */
+kern_return_t
+is_io_registry_create_iterator(
+ mach_port_t master_port,
+ io_name_t plane,
+ uint32_t options,
+ io_object_t *iterator )
+{
+ if (master_port != master_device_port) {
+ return kIOReturnNotPrivileged;
+ }
+
+ *iterator = IOUserIterator::withIterator(
+ IORegistryIterator::iterateOver(
+ IORegistryEntry::getPlane( plane ), options ));
+
+ return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
+}
+
+/* Routine io_registry_entry_create_iterator */
+kern_return_t
+is_io_registry_entry_create_iterator(
+ io_object_t registry_entry,
+ io_name_t plane,
+ uint32_t options,
+ io_object_t *iterator )
+{
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ *iterator = IOUserIterator::withIterator(
+ IORegistryIterator::iterateOver( entry,
+ IORegistryEntry::getPlane( plane ), options ));
+
+ return *iterator ? kIOReturnSuccess : kIOReturnBadArgument;
+}
+
+/* Routine io_registry_iterator_enter */
+kern_return_t
+is_io_registry_iterator_enter_entry(
+ io_object_t iterator )
+{
+ CHECKLOCKED( IORegistryIterator, iterator, iter );
+
+ IOLockLock(oIter->lock);
+ iter->enterEntry();
+ IOLockUnlock(oIter->lock);
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_registry_iterator_exit */
+kern_return_t
+is_io_registry_iterator_exit_entry(
+ io_object_t iterator )
+{
+ bool didIt;
+
+ CHECKLOCKED( IORegistryIterator, iterator, iter );
+
+ IOLockLock(oIter->lock);
+ didIt = iter->exitEntry();
+ IOLockUnlock(oIter->lock);
+
+ return didIt ? kIOReturnSuccess : kIOReturnNoDevice;
+}
+
+/* Routine io_registry_entry_from_path */
+kern_return_t
+is_io_registry_entry_from_path(
+ mach_port_t master_port,
+ io_string_t path,
+ io_object_t *registry_entry )
+{
+ IORegistryEntry * entry;
+
+ if (master_port != master_device_port) {
+ return kIOReturnNotPrivileged;
+ }
+
+ entry = IORegistryEntry::fromPath( path );
+
+ *registry_entry = entry;
+
+ return kIOReturnSuccess;
+}
+
+
+/* Routine io_registry_entry_from_path */
+kern_return_t
+is_io_registry_entry_from_path_ool(
+ mach_port_t master_port,
+ io_string_inband_t path,
+ io_buf_ptr_t path_ool,
+ mach_msg_type_number_t path_oolCnt,
+ kern_return_t *result,
+ io_object_t *registry_entry)
+{
+ IORegistryEntry * entry;
+ vm_map_offset_t map_data;
+ const char * cpath;
+ IOReturn res;
+ kern_return_t err;
+
+ if (master_port != master_device_port) {
+ return kIOReturnNotPrivileged;
+ }
+
+ map_data = 0;
+ entry = 0;
+ res = err = KERN_SUCCESS;
+ if (path[0]) {
+ cpath = path;
+ } else {
+ if (!path_oolCnt) {
+ return kIOReturnBadArgument;
+ }
+ if (path_oolCnt > (sizeof(io_struct_inband_t) * 1024)) {
+ return kIOReturnMessageTooLarge;
+ }
+
+ err = vm_map_copyout(kernel_map, &map_data, (vm_map_copy_t) path_ool);
+ if (KERN_SUCCESS == err) {
+ // must return success to mig after vm_map_copyout() succeeds, so result is actual
+ cpath = CAST_DOWN(const char *, map_data);
+ if (cpath[path_oolCnt - 1]) {
+ res = kIOReturnBadArgument;
+ }
+ }
+ }
+
+ if ((KERN_SUCCESS == err) && (KERN_SUCCESS == res)) {
+ entry = IORegistryEntry::fromPath(cpath);
+ res = entry ? kIOReturnSuccess : kIOReturnNotFound;
+ }
+
+ if (map_data) {
+ vm_deallocate(kernel_map, map_data, path_oolCnt);
+ }
+
+ if (KERN_SUCCESS != err) {
+ res = err;
+ }
+ *registry_entry = entry;
+ *result = res;
+
+ return err;
+}
+
+
+/* Routine io_registry_entry_in_plane */
+kern_return_t
+is_io_registry_entry_in_plane(
+ io_object_t registry_entry,
+ io_name_t plane,
+ boolean_t *inPlane )
+{
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ *inPlane = entry->inPlane( IORegistryEntry::getPlane( plane ));
+
+ return kIOReturnSuccess;
+}
+
+
+/* Routine io_registry_entry_get_path */
+kern_return_t
+is_io_registry_entry_get_path(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_string_t path )
+{
+ int length;
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ length = sizeof(io_string_t);
+ if (entry->getPath( path, &length, IORegistryEntry::getPlane( plane ))) {
+ return kIOReturnSuccess;
+ } else {
+ return kIOReturnBadArgument;
+ }
+}
+
+/* Routine io_registry_entry_get_path */
+kern_return_t
+is_io_registry_entry_get_path_ool(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_string_inband_t path,
+ io_buf_ptr_t *path_ool,
+ mach_msg_type_number_t *path_oolCnt)
+{
+ enum { kMaxPath = 16384 };
+ IOReturn err;
+ int length;
+ char * buf;
+
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ *path_ool = NULL;
+ *path_oolCnt = 0;
+ length = sizeof(io_string_inband_t);
+ if (entry->getPath(path, &length, IORegistryEntry::getPlane(plane))) {
+ err = kIOReturnSuccess;
+ } else {
+ length = kMaxPath;
+ buf = IONew(char, length);
+ if (!buf) {
+ err = kIOReturnNoMemory;
+ } else if (!entry->getPath(buf, &length, IORegistryEntry::getPlane(plane))) {
+ err = kIOReturnError;
+ } else {
+ *path_oolCnt = length;
+ err = copyoutkdata(buf, length, path_ool);
+ }
+ if (buf) {
+ IODelete(buf, char, kMaxPath);
+ }
+ }
+
+ return err;
+}
+
+
+/* Routine io_registry_entry_get_name */
+kern_return_t
+is_io_registry_entry_get_name(
+ io_object_t registry_entry,
+ io_name_t name )
+{
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ strncpy( name, entry->getName(), sizeof(io_name_t));
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_registry_entry_get_name_in_plane */
+kern_return_t
+is_io_registry_entry_get_name_in_plane(
+ io_object_t registry_entry,
+ io_name_t planeName,
+ io_name_t name )
+{
+ const IORegistryPlane * plane;
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ if (planeName[0]) {
+ plane = IORegistryEntry::getPlane( planeName );
+ } else {
+ plane = 0;
+ }
+
+ strncpy( name, entry->getName( plane), sizeof(io_name_t));
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_registry_entry_get_location_in_plane */
+kern_return_t
+is_io_registry_entry_get_location_in_plane(
+ io_object_t registry_entry,
+ io_name_t planeName,
+ io_name_t location )
+{
+ const IORegistryPlane * plane;
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ if (planeName[0]) {
+ plane = IORegistryEntry::getPlane( planeName );
+ } else {
+ plane = 0;
+ }
+
+ const char * cstr = entry->getLocation( plane );
+
+ if (cstr) {
+ strncpy( location, cstr, sizeof(io_name_t));
+ return kIOReturnSuccess;
+ } else {
+ return kIOReturnNotFound;
+ }
+}
+
+/* Routine io_registry_entry_get_registry_entry_id */
+kern_return_t
+is_io_registry_entry_get_registry_entry_id(
+ io_object_t registry_entry,
+ uint64_t *entry_id )
+{
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ *entry_id = entry->getRegistryEntryID();
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_registry_entry_get_property */
+kern_return_t
+is_io_registry_entry_get_property_bytes(
+ io_object_t registry_entry,
+ io_name_t property_name,
+ io_struct_inband_t buf,
+ mach_msg_type_number_t *dataCnt )
+{
+ OSObject * obj;
+ OSData * data;
+ OSString * str;
+ OSBoolean * boo;
+ OSNumber * off;
+ UInt64 offsetBytes;
+ unsigned int len = 0;
+ const void * bytes = 0;
+ IOReturn ret = kIOReturnSuccess;
+
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+#if CONFIG_MACF
+ if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
+ return kIOReturnNotPermitted;
+ }
+#endif
+
+ obj = entry->copyProperty(property_name);
+ if (!obj) {
+ return kIOReturnNoResources;
+ }
+
+ // One day OSData will be a common container base class
+ // until then...
+ if ((data = OSDynamicCast( OSData, obj ))) {
+ len = data->getLength();
+ bytes = data->getBytesNoCopy();
+ if (!data->isSerializable()) {
+ len = 0;
+ }
+ } else if ((str = OSDynamicCast( OSString, obj ))) {
+ len = str->getLength() + 1;
+ bytes = str->getCStringNoCopy();
+ } else if ((boo = OSDynamicCast( OSBoolean, obj ))) {
+ len = boo->isTrue() ? sizeof("Yes") : sizeof("No");
+ bytes = boo->isTrue() ? "Yes" : "No";
+ } else if ((off = OSDynamicCast( OSNumber, obj ))) {
+ offsetBytes = off->unsigned64BitValue();
+ len = off->numberOfBytes();
+ if (len > sizeof(offsetBytes)) {
+ len = sizeof(offsetBytes);
+ }
+ bytes = &offsetBytes;
+#ifdef __BIG_ENDIAN__
+ bytes = (const void *)
+ (((UInt32) bytes) + (sizeof(UInt64) - len));
+#endif
+ } else {
+ ret = kIOReturnBadArgument;
+ }
+
+ if (bytes) {
+ if (*dataCnt < len) {
+ ret = kIOReturnIPCError;
+ } else {
+ *dataCnt = len;
+ bcopy( bytes, buf, len );
+ }
+ }
+ obj->release();
+
+ return ret;
+}
+
+
+/* Routine io_registry_entry_get_property */
+kern_return_t
+is_io_registry_entry_get_property(
+ io_object_t registry_entry,
+ io_name_t property_name,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt )
+{
+ kern_return_t err;
+ vm_size_t len;
+ OSObject * obj;
+
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+#if CONFIG_MACF
+ if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
+ return kIOReturnNotPermitted;
+ }
+#endif
+
+ obj = entry->copyProperty(property_name);
+ if (!obj) {
+ return kIOReturnNotFound;
+ }
+
+ OSSerialize * s = OSSerialize::withCapacity(4096);
+ if (!s) {
+ obj->release();
+ return kIOReturnNoMemory;
+ }
+
+ if (obj->serialize( s )) {
+ len = s->getLength();
+ *propertiesCnt = len;
+ err = copyoutkdata( s->text(), len, properties );
+ } else {
+ err = kIOReturnUnsupported;
+ }
+
+ s->release();
+ obj->release();
+
+ return err;
+}
+
+/* Routine io_registry_entry_get_property_recursively */
+kern_return_t
+is_io_registry_entry_get_property_recursively(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_name_t property_name,
+ uint32_t options,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt )
+{
+ kern_return_t err;
+ vm_size_t len;
+ OSObject * obj;
+
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+#if CONFIG_MACF
+ if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
+ return kIOReturnNotPermitted;
+ }
+#endif
+
+ obj = entry->copyProperty( property_name,
+ IORegistryEntry::getPlane( plane ), options );
+ if (!obj) {
+ return kIOReturnNotFound;
+ }
+
+ OSSerialize * s = OSSerialize::withCapacity(4096);
+ if (!s) {
+ obj->release();
+ return kIOReturnNoMemory;
+ }
+
+ if (obj->serialize( s )) {
+ len = s->getLength();
+ *propertiesCnt = len;
+ err = copyoutkdata( s->text(), len, properties );
+ } else {
+ err = kIOReturnUnsupported;
+ }
+
+ s->release();
+ obj->release();
+
+ return err;
+}
+
+/* Routine io_registry_entry_get_properties */
+kern_return_t
+is_io_registry_entry_get_properties(
+ io_object_t registry_entry,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt )
+{
+ return kIOReturnUnsupported;
+}
+
+#if CONFIG_MACF
+
+struct GetPropertiesEditorRef {
+ kauth_cred_t cred;
+ IORegistryEntry * entry;
+ OSCollection * root;
+};
+
+static const OSMetaClassBase *
+GetPropertiesEditor(void * reference,
+ OSSerialize * s,
+ OSCollection * container,
+ const OSSymbol * name,
+ const OSMetaClassBase * value)
+{
+ GetPropertiesEditorRef * ref = (typeof(ref))reference;
+
+ if (!ref->root) {
+ ref->root = container;
+ }
+ if (ref->root == container) {
+ if (0 != mac_iokit_check_get_property(ref->cred, ref->entry, name->getCStringNoCopy())) {
+ value = 0;
+ }
+ }
+ if (value) {
+ value->retain();
+ }
+ return value;
+}
+
+#endif /* CONFIG_MACF */
+
+/* Routine io_registry_entry_get_properties */
+kern_return_t
+is_io_registry_entry_get_properties_bin(
+ io_object_t registry_entry,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt)
+{
+ kern_return_t err = kIOReturnSuccess;
+ vm_size_t len;
+ OSSerialize * s;
+ OSSerialize::Editor editor = 0;
+ void * editRef = 0;
+
+ CHECK(IORegistryEntry, registry_entry, entry);
+
+#if CONFIG_MACF
+ GetPropertiesEditorRef ref;
+ if (mac_iokit_check_filter_properties(kauth_cred_get(), entry)) {
+ editor = &GetPropertiesEditor;
+ editRef = &ref;
+ ref.cred = kauth_cred_get();
+ ref.entry = entry;
+ ref.root = 0;
+ }
+#endif
+
+ s = OSSerialize::binaryWithCapacity(4096, editor, editRef);
+ if (!s) {
+ return kIOReturnNoMemory;
+ }
+
+ if (!entry->serializeProperties(s)) {
+ err = kIOReturnUnsupported;
+ }
+
+ if (kIOReturnSuccess == err) {
+ len = s->getLength();
+ *propertiesCnt = len;
+ err = copyoutkdata(s->text(), len, properties);
+ }
+ s->release();
+
+ return err;
+}
+
+/* Routine io_registry_entry_get_property_bin */
+kern_return_t
+is_io_registry_entry_get_property_bin(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_name_t property_name,
+ uint32_t options,
+ io_buf_ptr_t *properties,
+ mach_msg_type_number_t *propertiesCnt )
+{
+ kern_return_t err;
+ vm_size_t len;
+ OSObject * obj;
+ const OSSymbol * sym;
+
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+#if CONFIG_MACF
+ if (0 != mac_iokit_check_get_property(kauth_cred_get(), entry, property_name)) {
+ return kIOReturnNotPermitted;
+ }
+#endif
+
+ sym = OSSymbol::withCString(property_name);
+ if (!sym) {
+ return kIOReturnNoMemory;
+ }
+
+ if (gIORegistryEntryPropertyKeysKey == sym) {
+ obj = entry->copyPropertyKeys();
+ } else {
+ if ((kIORegistryIterateRecursively & options) && plane[0]) {
+ obj = entry->copyProperty(property_name,
+ IORegistryEntry::getPlane(plane), options );
+ } else {
+ obj = entry->copyProperty(property_name);
+ }
+ if (obj && gIORemoveOnReadProperties->containsObject(sym)) {
+ entry->removeProperty(sym);
+ }
+ }
+
+ sym->release();
+ if (!obj) {
+ return kIOReturnNotFound;
+ }
+
+ OSSerialize * s = OSSerialize::binaryWithCapacity(4096);
+ if (!s) {
+ obj->release();
+ return kIOReturnNoMemory;
+ }
+
+ if (obj->serialize( s )) {
+ len = s->getLength();
+ *propertiesCnt = len;
+ err = copyoutkdata( s->text(), len, properties );
+ } else {
+ err = kIOReturnUnsupported;
+ }
+
+ s->release();
+ obj->release();
+
+ return err;
+}
+
+
+/* Routine io_registry_entry_set_properties */
+kern_return_t
+is_io_registry_entry_set_properties
+(
+ io_object_t registry_entry,
+ io_buf_ptr_t properties,
+ mach_msg_type_number_t propertiesCnt,
+ kern_return_t * result)
+{
+ OSObject * obj;
+ kern_return_t err;
+ IOReturn res;
+ vm_offset_t data;
+ vm_map_offset_t map_data;
+
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ if (propertiesCnt > sizeof(io_struct_inband_t) * 1024) {
+ return kIOReturnMessageTooLarge;
+ }
+
+ err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
+ data = CAST_DOWN(vm_offset_t, map_data);
+
+ if (KERN_SUCCESS == err) {
+ FAKE_STACK_FRAME(entry->getMetaClass());
+
+ // must return success after vm_map_copyout() succeeds
+ obj = OSUnserializeXML((const char *) data, propertiesCnt );
+ vm_deallocate( kernel_map, data, propertiesCnt );
+
+ if (!obj) {
+ res = kIOReturnBadArgument;
+ }
+#if CONFIG_MACF
+ else if (0 != mac_iokit_check_set_properties(kauth_cred_get(),
+ registry_entry, obj)) {
+ res = kIOReturnNotPermitted;
+ }
+#endif
+ else {
+ res = entry->setProperties( obj );
+ }
+
+ if (obj) {
+ obj->release();
+ }
+
+ FAKE_STACK_FRAME_END();
+ } else {
+ res = err;
+ }
+
+ *result = res;
+ return err;
+}
+
+/* Routine io_registry_entry_get_child_iterator */
+kern_return_t
+is_io_registry_entry_get_child_iterator(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_object_t *iterator )
+{
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ *iterator = IOUserIterator::withIterator(entry->getChildIterator(
+ IORegistryEntry::getPlane( plane )));
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_registry_entry_get_parent_iterator */
+kern_return_t
+is_io_registry_entry_get_parent_iterator(
+ io_object_t registry_entry,
+ io_name_t plane,
+ io_object_t *iterator)
+{
+ CHECK( IORegistryEntry, registry_entry, entry );
+
+ *iterator = IOUserIterator::withIterator(entry->getParentIterator(
+ IORegistryEntry::getPlane( plane )));
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_service_get_busy_state */
+kern_return_t
+is_io_service_get_busy_state(
+ io_object_t _service,
+ uint32_t *busyState )
+{
+ CHECK( IOService, _service, service );
+
+ *busyState = service->getBusyState();
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_service_get_state */
+kern_return_t
+is_io_service_get_state(
+ io_object_t _service,
+ uint64_t *state,
+ uint32_t *busy_state,
+ uint64_t *accumulated_busy_time )
+{
+ CHECK( IOService, _service, service );
+
+ *state = service->getState();
+ *busy_state = service->getBusyState();
+ *accumulated_busy_time = service->getAccumulatedBusyTime();
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_service_wait_quiet */
+kern_return_t
+is_io_service_wait_quiet(
+ io_object_t _service,
+ mach_timespec_t wait_time )
+{
+ uint64_t timeoutNS;
+
+ CHECK( IOService, _service, service );
+
+ timeoutNS = wait_time.tv_sec;
+ timeoutNS *= kSecondScale;
+ timeoutNS += wait_time.tv_nsec;
+
+ return service->waitQuiet(timeoutNS);
+}
+
+/* Routine io_service_request_probe */
+kern_return_t
+is_io_service_request_probe(
+ io_object_t _service,
+ uint32_t options )
+{
+ CHECK( IOService, _service, service );
+
+ return service->requestProbe( options );
+}
+
+/* Routine io_service_get_authorization_id */
+kern_return_t
+is_io_service_get_authorization_id(
+ io_object_t _service,
+ uint64_t *authorization_id )
+{
+ kern_return_t kr;
+
+ CHECK( IOService, _service, service );
+
+ kr = IOUserClient::clientHasPrivilege((void *) current_task(),
+ kIOClientPrivilegeAdministrator );
+ if (kIOReturnSuccess != kr) {
+ return kr;
+ }
+
+ *authorization_id = service->getAuthorizationID();
+
+ return kr;
+}
+
+/* Routine io_service_set_authorization_id */
+kern_return_t
+is_io_service_set_authorization_id(
+ io_object_t _service,
+ uint64_t authorization_id )
+{
+ CHECK( IOService, _service, service );
+
+ return service->setAuthorizationID( authorization_id );
+}
+
+/* Routine io_service_open_ndr */
+kern_return_t
+is_io_service_open_extended(
+ io_object_t _service,
+ task_t owningTask,
+ uint32_t connect_type,
+ NDR_record_t ndr,
+ io_buf_ptr_t properties,
+ mach_msg_type_number_t propertiesCnt,
+ kern_return_t * result,
+ io_object_t *connection )
+{
+ IOUserClient * client = 0;
+ kern_return_t err = KERN_SUCCESS;
+ IOReturn res = kIOReturnSuccess;
+ OSDictionary * propertiesDict = 0;
+ bool crossEndian;
+ bool disallowAccess;
+
+ CHECK( IOService, _service, service );
+
+ if (!owningTask) {
+ return kIOReturnBadArgument;
+ }
+ assert(owningTask == current_task());
+ if (owningTask != current_task()) {
+ return kIOReturnBadArgument;
+ }
+
+ do{
+ if (properties) {
+ return kIOReturnUnsupported;
+ }
+#if 0
+ {
+ OSObject * obj;
+ vm_offset_t data;
+ vm_map_offset_t map_data;
+
+ if (propertiesCnt > sizeof(io_struct_inband_t)) {
+ return kIOReturnMessageTooLarge;
+ }
+
+ err = vm_map_copyout( kernel_map, &map_data, (vm_map_copy_t) properties );
+ res = err;
+ data = CAST_DOWN(vm_offset_t, map_data);
+ if (KERN_SUCCESS == err) {
+ // must return success after vm_map_copyout() succeeds
+ obj = OSUnserializeXML((const char *) data, propertiesCnt );
+ vm_deallocate( kernel_map, data, propertiesCnt );
+ propertiesDict = OSDynamicCast(OSDictionary, obj);
+ if (!propertiesDict) {
+ res = kIOReturnBadArgument;
+ if (obj) {
+ obj->release();
+ }
+ }
+ }
+ if (kIOReturnSuccess != res) {
+ break;
+ }
+ }
+#endif
+ crossEndian = (ndr.int_rep != NDR_record.int_rep);
+ if (crossEndian) {
+ if (!propertiesDict) {
+ propertiesDict = OSDictionary::withCapacity(4);
+ }
+ OSData * data = OSData::withBytes(&ndr, sizeof(ndr));
+ if (data) {
+ if (propertiesDict) {
+ propertiesDict->setObject(kIOUserClientCrossEndianKey, data);
+ }
+ data->release();
+ }
+ }
+
+ res = service->newUserClient( owningTask, (void *) owningTask,
+ connect_type, propertiesDict, &client );
+
+ if (propertiesDict) {
+ propertiesDict->release();
+ }
+
+ if (res == kIOReturnSuccess) {
+ assert( OSDynamicCast(IOUserClient, client));
+
+ client->sharedInstance = (0 != client->getProperty(kIOUserClientSharedInstanceKey));
+ client->closed = false;
+ client->lock = IOLockAlloc();
+
+ disallowAccess = (crossEndian
+ && (kOSBooleanTrue != service->getProperty(kIOUserClientCrossEndianCompatibleKey))
+ && (kOSBooleanTrue != client->getProperty(kIOUserClientCrossEndianCompatibleKey)));
+ if (disallowAccess) {
+ res = kIOReturnUnsupported;
+ }
+#if CONFIG_MACF
+ else if (0 != mac_iokit_check_open(kauth_cred_get(), client, connect_type)) {
+ res = kIOReturnNotPermitted;
+ }
+#endif
+
+ if (kIOReturnSuccess == res) {
+ res = client->registerOwner(owningTask);
+ }
+
+ if (kIOReturnSuccess != res) {
+ IOStatisticsClientCall();
+ client->clientClose();
+ client->release();
+ client = 0;
+ break;
+ }
+ OSString * creatorName = IOCopyLogNameForPID(proc_selfpid());
+ if (creatorName) {
+ client->setProperty(kIOUserClientCreatorKey, creatorName);
+ creatorName->release();
+ }
+ client->setTerminateDefer(service, false);
+ }
+ }while (false);
+
+ *connection = client;
+ *result = res;
+
+ return err;
+}
+
+/* Routine io_service_close */
+kern_return_t
+is_io_service_close(
+ io_object_t connection )
+{
+ OSSet * mappings;
+ if ((mappings = OSDynamicCast(OSSet, connection))) {
+ return kIOReturnSuccess;
+ }
+
+ CHECK( IOUserClient, connection, client );
+
+ IOStatisticsClientCall();
+
+ if (client->sharedInstance || OSCompareAndSwap8(0, 1, &client->closed)) {
+ IOLockLock(client->lock);
+ client->clientClose();
+ IOLockUnlock(client->lock);
+ } else {
+ IOLog("ignored is_io_service_close(0x%qx,%s)\n",
+ client->getRegistryEntryID(), client->getName());
+ }
+
+ return kIOReturnSuccess;
+}
+
+/* Routine io_connect_get_service */
+kern_return_t
+is_io_connect_get_service(
+ io_object_t connection,
+ io_object_t *service )
+{
+ IOService * theService;
+
+ CHECK( IOUserClient, connection, client );
+
+ theService = client->getService();
+ if (theService) {
+ theService->retain();
+ }
+
+ *service = theService;
+
+ return theService ? kIOReturnSuccess : kIOReturnUnsupported;
+}
+
+/* Routine io_connect_set_notification_port */
+kern_return_t
+is_io_connect_set_notification_port(
+ io_object_t connection,
+ uint32_t notification_type,
+ mach_port_t port,
+ uint32_t reference)
+{
+ kern_return_t ret;
+ CHECK( IOUserClient, connection, client );
+
+ IOStatisticsClientCall();
+ IOLockLock(client->lock);
+ ret = client->registerNotificationPort( port, notification_type,
+ (io_user_reference_t) reference );
+ IOLockUnlock(client->lock);
+ return ret;
+}
+
+/* Routine io_connect_set_notification_port */
+kern_return_t
+is_io_connect_set_notification_port_64(
+ io_object_t connection,
+ uint32_t notification_type,
+ mach_port_t port,
+ io_user_reference_t reference)
+{
+ kern_return_t ret;
+ CHECK( IOUserClient, connection, client );
+
+ IOStatisticsClientCall();
+ IOLockLock(client->lock);
+ ret = client->registerNotificationPort( port, notification_type,
+ reference );
+ IOLockUnlock(client->lock);
+ return ret;
+}
+
+/* Routine io_connect_map_memory_into_task */
+kern_return_t
+is_io_connect_map_memory_into_task
+(
+ io_connect_t connection,
+ uint32_t memory_type,
+ task_t into_task,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ uint32_t flags
+)
+{
+ IOReturn err;
+ IOMemoryMap * map;
+
+ CHECK( IOUserClient, connection, client );
+
+ if (!into_task) {
+ return kIOReturnBadArgument;
+ }
+
+ IOStatisticsClientCall();
+ map = client->mapClientMemory64( memory_type, into_task, flags, *address );
+
+ if (map) {
+ *address = map->getAddress();
+ if (size) {
+ *size = map->getSize();
+ }
+
+ if (client->sharedInstance
+ || (into_task != current_task())) {
+ // push a name out to the task owning the map,
+ // so we can clean up maps
+ mach_port_name_t name __unused =
+ IOMachPort::makeSendRightForTask(
+ into_task, map, IKOT_IOKIT_OBJECT );
+ map->release();
+ } else {
+ // keep it with the user client
+ IOLockLock( gIOObjectPortLock);
+ if (0 == client->mappings) {
+ client->mappings = OSSet::withCapacity(2);
+ }
+ if (client->mappings) {
+ client->mappings->setObject( map);
+ }
+ IOLockUnlock( gIOObjectPortLock);
+ map->release();
+ }
+ err = kIOReturnSuccess;
+ } else {
+ err = kIOReturnBadArgument;
+ }
+
+ return err;
+}
+
+/* Routine is_io_connect_map_memory */
+kern_return_t
+is_io_connect_map_memory(
+ io_object_t connect,
+ uint32_t type,
+ task_t task,
+ uint32_t * mapAddr,
+ uint32_t * mapSize,
+ uint32_t flags )
+{
+ IOReturn err;
+ mach_vm_address_t address;
+ mach_vm_size_t size;
+
+ address = SCALAR64(*mapAddr);
+ size = SCALAR64(*mapSize);
+
+ err = is_io_connect_map_memory_into_task(connect, type, task, &address, &size, flags);
+
+ *mapAddr = SCALAR32(address);
+ *mapSize = SCALAR32(size);
+
+ return err;
+}
+} /* extern "C" */
+
+IOMemoryMap *
+IOUserClient::removeMappingForDescriptor(IOMemoryDescriptor * mem)
+{
+ OSIterator * iter;
+ IOMemoryMap * map = 0;
+
+ IOLockLock(gIOObjectPortLock);
+
+ iter = OSCollectionIterator::withCollection(mappings);
+ if (iter) {
+ while ((map = OSDynamicCast(IOMemoryMap, iter->getNextObject()))) {
+ if (mem == map->getMemoryDescriptor()) {
+ map->retain();
+ mappings->removeObject(map);
+ break;
+ }
+ }
+ iter->release();
+ }
+
+ IOLockUnlock(gIOObjectPortLock);
+
+ return map;
+}
+
+extern "C" {
+/* Routine io_connect_unmap_memory_from_task */
+kern_return_t
+is_io_connect_unmap_memory_from_task
+(
+ io_connect_t connection,
+ uint32_t memory_type,
+ task_t from_task,
+ mach_vm_address_t address)
+{
+ IOReturn err;
+ IOOptionBits options = 0;
+ IOMemoryDescriptor * memory = 0;
+ IOMemoryMap * map;
+
+ CHECK( IOUserClient, connection, client );
+
+ if (!from_task) {
+ return kIOReturnBadArgument;
+ }
+
+ IOStatisticsClientCall();
+ err = client->clientMemoryForType((UInt32) memory_type, &options, &memory );
+
+ if (memory && (kIOReturnSuccess == err)) {
+ options = (options & ~kIOMapUserOptionsMask)
+ | kIOMapAnywhere | kIOMapReference;
+
+ map = memory->createMappingInTask( from_task, address, options );
+ memory->release();
+ if (map) {
+ IOLockLock( gIOObjectPortLock);
+ if (client->mappings) {
+ client->mappings->removeObject( map);
+ }
+ IOLockUnlock( gIOObjectPortLock);
+
+ mach_port_name_t name = 0;
+ if (from_task != current_task()) {
+ name = IOMachPort::makeSendRightForTask( from_task, map, IKOT_IOKIT_OBJECT );
+ map->release();
+ }
+
+ if (name) {
+ map->userClientUnmap();
+ err = iokit_mod_send_right( from_task, name, -2 );
+ err = kIOReturnSuccess;
+ } else {
+ IOMachPort::releasePortForObject( map, IKOT_IOKIT_OBJECT );
+ }
+ if (from_task == current_task()) {
+ map->release();
+ }
+ } else {
+ err = kIOReturnBadArgument;
+ }
+ }
+
+ return err;
+}
+
+kern_return_t
+is_io_connect_unmap_memory(
+ io_object_t connect,
+ uint32_t type,
+ task_t task,
+ uint32_t mapAddr )
+{
+ IOReturn err;
+ mach_vm_address_t address;
+
+ address = SCALAR64(mapAddr);
+
+ err = is_io_connect_unmap_memory_from_task(connect, type, task, mapAddr);
+
+ return err;
+}
+
+
+/* Routine io_connect_add_client */
+kern_return_t
+is_io_connect_add_client(
+ io_object_t connection,
+ io_object_t connect_to)
+{
+ CHECK( IOUserClient, connection, client );
+ CHECK( IOUserClient, connect_to, to );
+
+ IOStatisticsClientCall();
+ return client->connectClient( to );
+}
+
+
+/* Routine io_connect_set_properties */
+kern_return_t
+is_io_connect_set_properties(
+ io_object_t connection,
+ io_buf_ptr_t properties,
+ mach_msg_type_number_t propertiesCnt,
+ kern_return_t * result)
+{
+ return is_io_registry_entry_set_properties( connection, properties, propertiesCnt, result );
+}
+
+/* Routine io_user_client_method */
+kern_return_t
+is_io_connect_method_var_output
+(
+ io_connect_t connection,
+ uint32_t selector,
+ io_scalar_inband64_t scalar_input,
+ mach_msg_type_number_t scalar_inputCnt,
+ io_struct_inband_t inband_input,
+ mach_msg_type_number_t inband_inputCnt,
+ mach_vm_address_t ool_input,
+ mach_vm_size_t ool_input_size,
+ io_struct_inband_t inband_output,
+ mach_msg_type_number_t *inband_outputCnt,
+ io_scalar_inband64_t scalar_output,
+ mach_msg_type_number_t *scalar_outputCnt,
+ io_buf_ptr_t *var_output,
+ mach_msg_type_number_t *var_outputCnt
+)
+{
+ CHECK( IOUserClient, connection, client );
+
+ IOExternalMethodArguments args;
+ IOReturn ret;
+ IOMemoryDescriptor * inputMD = 0;
+ OSObject * structureVariableOutputData = 0;
+
+ bzero(&args.__reserved[0], sizeof(args.__reserved));
+ args.__reservedA = 0;
+ args.version = kIOExternalMethodArgumentsCurrentVersion;
+
+ args.selector = selector;
+
+ args.asyncWakePort = MACH_PORT_NULL;
+ args.asyncReference = 0;
+ args.asyncReferenceCount = 0;
+ args.structureVariableOutputData = &structureVariableOutputData;
+
+ args.scalarInput = scalar_input;
+ args.scalarInputCount = scalar_inputCnt;
+ args.structureInput = inband_input;
+ args.structureInputSize = inband_inputCnt;
+
+ if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
+ return kIOReturnIPCError;
+ }
+
+ if (ool_input) {
+ inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
+ kIODirectionOut | kIOMemoryMapCopyOnWrite,
+ current_task());
+ }
+
+ args.structureInputDescriptor = inputMD;
+
+ args.scalarOutput = scalar_output;
+ args.scalarOutputCount = *scalar_outputCnt;
+ bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
+ args.structureOutput = inband_output;
+ args.structureOutputSize = *inband_outputCnt;
+ args.structureOutputDescriptor = NULL;
+ args.structureOutputDescriptorSize = 0;
+
+ IOStatisticsClientCall();
+ ret = client->externalMethod( selector, &args );
+
+ *scalar_outputCnt = args.scalarOutputCount;
+ *inband_outputCnt = args.structureOutputSize;
+
+ if (var_outputCnt && var_output && (kIOReturnSuccess == ret)) {
+ OSSerialize * serialize;
+ OSData * data;
+ vm_size_t len;
+
+ if ((serialize = OSDynamicCast(OSSerialize, structureVariableOutputData))) {
+ len = serialize->getLength();
+ *var_outputCnt = len;
+ ret = copyoutkdata(serialize->text(), len, var_output);
+ } else if ((data = OSDynamicCast(OSData, structureVariableOutputData))) {
+ len = data->getLength();
+ *var_outputCnt = len;
+ ret = copyoutkdata(data->getBytesNoCopy(), len, var_output);
+ } else {
+ ret = kIOReturnUnderrun;
+ }
+ }
+
+ if (inputMD) {
+ inputMD->release();
+ }
+ if (structureVariableOutputData) {
+ structureVariableOutputData->release();
+ }
+
+ return ret;
+}
+
+/* Routine io_user_client_method */
+kern_return_t
+is_io_connect_method
+(
+ io_connect_t connection,
+ uint32_t selector,
+ io_scalar_inband64_t scalar_input,
+ mach_msg_type_number_t scalar_inputCnt,
+ io_struct_inband_t inband_input,
+ mach_msg_type_number_t inband_inputCnt,
+ mach_vm_address_t ool_input,
+ mach_vm_size_t ool_input_size,
+ io_struct_inband_t inband_output,
+ mach_msg_type_number_t *inband_outputCnt,
+ io_scalar_inband64_t scalar_output,
+ mach_msg_type_number_t *scalar_outputCnt,
+ mach_vm_address_t ool_output,
+ mach_vm_size_t *ool_output_size
+)
+{
+ CHECK( IOUserClient, connection, client );
+
+ IOExternalMethodArguments args;
+ IOReturn ret;
+ IOMemoryDescriptor * inputMD = 0;
+ IOMemoryDescriptor * outputMD = 0;
+
+ bzero(&args.__reserved[0], sizeof(args.__reserved));
+ args.__reservedA = 0;
+ args.version = kIOExternalMethodArgumentsCurrentVersion;
+
+ args.selector = selector;
+
+ args.asyncWakePort = MACH_PORT_NULL;
+ args.asyncReference = 0;
+ args.asyncReferenceCount = 0;
+ args.structureVariableOutputData = 0;
+
+ args.scalarInput = scalar_input;
+ args.scalarInputCount = scalar_inputCnt;
+ args.structureInput = inband_input;
+ args.structureInputSize = inband_inputCnt;
+
+ if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
+ return kIOReturnIPCError;
+ }
+ if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
+ return kIOReturnIPCError;
+ }
+
+ if (ool_input) {
+ inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
+ kIODirectionOut | kIOMemoryMapCopyOnWrite,
+ current_task());
+ }
+
+ args.structureInputDescriptor = inputMD;
+
+ args.scalarOutput = scalar_output;
+ args.scalarOutputCount = *scalar_outputCnt;
+ bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
+ args.structureOutput = inband_output;
+ args.structureOutputSize = *inband_outputCnt;
+
+ if (ool_output && ool_output_size) {
+ outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
+ kIODirectionIn, current_task());
+ }
+
+ args.structureOutputDescriptor = outputMD;
+ args.structureOutputDescriptorSize = ool_output_size ? *ool_output_size : 0;
+
+ IOStatisticsClientCall();
+ ret = client->externalMethod( selector, &args );
+
+ *scalar_outputCnt = args.scalarOutputCount;
+ *inband_outputCnt = args.structureOutputSize;
+ *ool_output_size = args.structureOutputDescriptorSize;
+
+ if (inputMD) {
+ inputMD->release();
+ }
+ if (outputMD) {
+ outputMD->release();
+ }
+
+ return ret;
+}
+
+/* Routine io_async_user_client_method */
+kern_return_t
+is_io_connect_async_method
+(
+ io_connect_t connection,
+ mach_port_t wake_port,
+ io_async_ref64_t reference,
+ mach_msg_type_number_t referenceCnt,
+ uint32_t selector,
+ io_scalar_inband64_t scalar_input,
+ mach_msg_type_number_t scalar_inputCnt,
+ io_struct_inband_t inband_input,
+ mach_msg_type_number_t inband_inputCnt,
+ mach_vm_address_t ool_input,
+ mach_vm_size_t ool_input_size,
+ io_struct_inband_t inband_output,
+ mach_msg_type_number_t *inband_outputCnt,
+ io_scalar_inband64_t scalar_output,
+ mach_msg_type_number_t *scalar_outputCnt,
+ mach_vm_address_t ool_output,
+ mach_vm_size_t * ool_output_size
+)
+{
+ CHECK( IOUserClient, connection, client );
+
+ IOExternalMethodArguments args;
+ IOReturn ret;
+ IOMemoryDescriptor * inputMD = 0;
+ IOMemoryDescriptor * outputMD = 0;
+
+ bzero(&args.__reserved[0], sizeof(args.__reserved));
+ args.__reservedA = 0;
+ args.version = kIOExternalMethodArgumentsCurrentVersion;
+
+ reference[0] = (io_user_reference_t) wake_port;
+ if (vm_map_is_64bit(get_task_map(current_task()))) {
+ reference[0] |= kIOUCAsync64Flag;
+ }
+
+ args.selector = selector;
+
+ args.asyncWakePort = wake_port;
+ args.asyncReference = reference;
+ args.asyncReferenceCount = referenceCnt;
+
+ args.structureVariableOutputData = 0;
+
+ args.scalarInput = scalar_input;
+ args.scalarInputCount = scalar_inputCnt;
+ args.structureInput = inband_input;
+ args.structureInputSize = inband_inputCnt;
+
+ if (ool_input && (ool_input_size <= sizeof(io_struct_inband_t))) {
+ return kIOReturnIPCError;
+ }
+ if (ool_output && (*ool_output_size <= sizeof(io_struct_inband_t))) {
+ return kIOReturnIPCError;
+ }
+
+ if (ool_input) {
+ inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
+ kIODirectionOut | kIOMemoryMapCopyOnWrite,
+ current_task());
+ }
+
+ args.structureInputDescriptor = inputMD;
+
+ args.scalarOutput = scalar_output;
+ args.scalarOutputCount = *scalar_outputCnt;
+ bzero(&scalar_output[0], *scalar_outputCnt * sizeof(scalar_output[0]));
+ args.structureOutput = inband_output;
+ args.structureOutputSize = *inband_outputCnt;
+
+ if (ool_output) {
+ outputMD = IOMemoryDescriptor::withAddressRange(ool_output, *ool_output_size,
+ kIODirectionIn, current_task());
+ }
+
+ args.structureOutputDescriptor = outputMD;
+ args.structureOutputDescriptorSize = *ool_output_size;
+
+ IOStatisticsClientCall();
+ ret = client->externalMethod( selector, &args );
+
+ *inband_outputCnt = args.structureOutputSize;
+ *ool_output_size = args.structureOutputDescriptorSize;
+
+ if (inputMD) {
+ inputMD->release();
+ }
+ if (outputMD) {
+ outputMD->release();
+ }
+
+ return ret;
+}
+
+/* Routine io_connect_method_scalarI_scalarO */
+kern_return_t
+is_io_connect_method_scalarI_scalarO(
+ io_object_t connect,
+ uint32_t index,
+ io_scalar_inband_t input,
+ mach_msg_type_number_t inputCount,
+ io_scalar_inband_t output,
+ mach_msg_type_number_t * outputCount )
+{
+ IOReturn err;
+ uint32_t i;
+ io_scalar_inband64_t _input;
+ io_scalar_inband64_t _output;
+
+ mach_msg_type_number_t struct_outputCnt = 0;
+ mach_vm_size_t ool_output_size = 0;
+
+ bzero(&_output[0], sizeof(_output));
+ for (i = 0; i < inputCount; i++) {
+ _input[i] = SCALAR64(input[i]);
+ }
+
+ err = is_io_connect_method(connect, index,
+ _input, inputCount,
+ NULL, 0,
+ 0, 0,
+ NULL, &struct_outputCnt,
+ _output, outputCount,
+ 0, &ool_output_size);
+
+ for (i = 0; i < *outputCount; i++) {
+ output[i] = SCALAR32(_output[i]);
+ }
+
+ return err;
+}
+
+kern_return_t
+shim_io_connect_method_scalarI_scalarO(
+ IOExternalMethod * method,
+ IOService * object,
+ const io_user_scalar_t * input,
+ mach_msg_type_number_t inputCount,
+ io_user_scalar_t * output,
+ mach_msg_type_number_t * outputCount )
+{
+ IOMethod func;
+ io_scalar_inband_t _output;
+ IOReturn err;
+ err = kIOReturnBadArgument;
+
+ bzero(&_output[0], sizeof(_output));
+ do {
+ if (inputCount != method->count0) {
+ IOLog("%s:%d %s: IOUserClient inputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)inputCount, (uint64_t)method->count0);
+ DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)inputCount, uint64_t, (uint64_t)method->count0);
+ continue;
+ }
+ if (*outputCount != method->count1) {
+ IOLog("%s:%d %s: IOUserClient outputCount count mismatch 0x%llx 0x%llx\n", __FUNCTION__, __LINE__, object->getName(), (uint64_t)*outputCount, (uint64_t)method->count1);
+ DTRACE_IO2(iokit_count_mismatch, uint64_t, (uint64_t)*outputCount, uint64_t, (uint64_t)method->count1);
+ continue;
+ }
+
+ func = method->func;
+
+ switch (inputCount) {
+ case 6:
+ err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
+ ARG32(input[3]), ARG32(input[4]), ARG32(input[5]));
+ break;
+ case 5:
+ err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
+ ARG32(input[3]), ARG32(input[4]),
+ &_output[0] );
+ break;
+ case 4:
+ err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
+ ARG32(input[3]),
+ &_output[0], &_output[1] );
+ break;
+ case 3:
+ err = (object->*func)( ARG32(input[0]), ARG32(input[1]), ARG32(input[2]),
+ &_output[0], &_output[1], &_output[2] );
+ break;
+ case 2:
+ err = (object->*func)( ARG32(input[0]), ARG32(input[1]),
+ &_output[0], &_output[1], &_output[2],
+ &_output[3] );
+ break;
+ case 1:
+ err = (object->*func)( ARG32(input[0]),
+ &_output[0], &_output[1], &_output[2],
+ &_output[3], &_output[4] );
+ break;
+ case 0:
+ err = (object->*func)( &_output[0], &_output[1], &_output[2],
+ &_output[3], &_output[4], &_output[5] );
+ break;
+
+ default:
+ IOLog("%s: Bad method table\n", object->getName());
+ }
+ }while (false);
+
+ uint32_t i;
+ for (i = 0; i < *outputCount; i++) {
+ output[i] = SCALAR32(_output[i]);
+ }
+
+ return err;
+}
+
+/* Routine io_async_method_scalarI_scalarO */
+kern_return_t
+is_io_async_method_scalarI_scalarO(
+ io_object_t connect,
+ mach_port_t wake_port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ uint32_t index,
+ io_scalar_inband_t input,
+ mach_msg_type_number_t inputCount,
+ io_scalar_inband_t output,
+ mach_msg_type_number_t * outputCount )
+{
+ IOReturn err;
+ uint32_t i;
+ io_scalar_inband64_t _input;
+ io_scalar_inband64_t _output;
+ io_async_ref64_t _reference;
+
+ bzero(&_output[0], sizeof(_output));
+ for (i = 0; i < referenceCnt; i++) {
+ _reference[i] = REF64(reference[i]);
+ }
+
+ mach_msg_type_number_t struct_outputCnt = 0;
+ mach_vm_size_t ool_output_size = 0;
+
+ for (i = 0; i < inputCount; i++) {
+ _input[i] = SCALAR64(input[i]);
+ }
+
+ err = is_io_connect_async_method(connect,
+ wake_port, _reference, referenceCnt,
+ index,
+ _input, inputCount,
+ NULL, 0,
+ 0, 0,
+ NULL, &struct_outputCnt,
+ _output, outputCount,
+ 0, &ool_output_size);
+
+ for (i = 0; i < *outputCount; i++) {
+ output[i] = SCALAR32(_output[i]);
+ }
+
+ return err;
+}
+/* Routine io_async_method_scalarI_structureO */
+kern_return_t
+is_io_async_method_scalarI_structureO(
+ io_object_t connect,
+ mach_port_t wake_port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ uint32_t index,
+ io_scalar_inband_t input,
+ mach_msg_type_number_t inputCount,
+ io_struct_inband_t output,
+ mach_msg_type_number_t * outputCount )
+{
+ uint32_t i;
+ io_scalar_inband64_t _input;
+ io_async_ref64_t _reference;
+
+ for (i = 0; i < referenceCnt; i++) {
+ _reference[i] = REF64(reference[i]);
+ }
+
+ mach_msg_type_number_t scalar_outputCnt = 0;
+ mach_vm_size_t ool_output_size = 0;
+
+ for (i = 0; i < inputCount; i++) {
+ _input[i] = SCALAR64(input[i]);
+ }
+
+ return is_io_connect_async_method(connect,
+ wake_port, _reference, referenceCnt,
+ index,
+ _input, inputCount,
+ NULL, 0,
+ 0, 0,
+ output, outputCount,
+ NULL, &scalar_outputCnt,
+ 0, &ool_output_size);
+}
+
+/* Routine io_async_method_scalarI_structureI */
+kern_return_t
+is_io_async_method_scalarI_structureI(
+ io_connect_t connect,
+ mach_port_t wake_port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ uint32_t index,
+ io_scalar_inband_t input,
+ mach_msg_type_number_t inputCount,
+ io_struct_inband_t inputStruct,
+ mach_msg_type_number_t inputStructCount )
+{
+ uint32_t i;
+ io_scalar_inband64_t _input;
+ io_async_ref64_t _reference;
+
+ for (i = 0; i < referenceCnt; i++) {
+ _reference[i] = REF64(reference[i]);
+ }
+
+ mach_msg_type_number_t scalar_outputCnt = 0;
+ mach_msg_type_number_t inband_outputCnt = 0;
+ mach_vm_size_t ool_output_size = 0;
+
+ for (i = 0; i < inputCount; i++) {
+ _input[i] = SCALAR64(input[i]);
+ }
+
+ return is_io_connect_async_method(connect,
+ wake_port, _reference, referenceCnt,
+ index,
+ _input, inputCount,
+ inputStruct, inputStructCount,
+ 0, 0,
+ NULL, &inband_outputCnt,
+ NULL, &scalar_outputCnt,
+ 0, &ool_output_size);
+}
+
+/* Routine io_async_method_structureI_structureO */
+kern_return_t
+is_io_async_method_structureI_structureO(
+ io_object_t connect,
+ mach_port_t wake_port,
+ io_async_ref_t reference,
+ mach_msg_type_number_t referenceCnt,
+ uint32_t index,
+ io_struct_inband_t input,
+ mach_msg_type_number_t inputCount,
+ io_struct_inband_t output,
+ mach_msg_type_number_t * outputCount )