X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/36401178fd6817c043cc00b0c00c7f723e58efae..4bd07ac2140668789aa3ee8ec4dde4a3e0a3bba5:/osfmk/device/iokit_rpc.c diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c index 3e750473e..1412f4115 100644 --- a/osfmk/device/iokit_rpc.c +++ b/osfmk/device/iokit_rpc.c @@ -25,10 +25,7 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include #include -#include - #include #include #include @@ -63,10 +60,7 @@ #include -#ifdef __ppc__ -#include -#endif -#ifdef __i386 +#if defined(__i386__) || defined(__x86_64__) #include #endif #include @@ -117,6 +111,10 @@ extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef); extern void iokit_retain_port( ipc_port_t port ); extern void iokit_release_port( ipc_port_t port ); +extern void iokit_release_port_send( ipc_port_t port ); + +extern void iokit_lock_port(ipc_port_t port); +extern void iokit_unlock_port(ipc_port_t port); extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ); @@ -147,7 +145,7 @@ iokit_lookup_object_port( if (!IP_VALID(port)) return (NULL); - ip_lock(port); + iokit_lock_port(port); if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) { obj = (io_object_t) port->ip_kobject; iokit_add_reference( obj ); @@ -155,7 +153,7 @@ iokit_lookup_object_port( else obj = NULL; - ip_unlock(port); + iokit_unlock_port(port); return( obj ); } @@ -169,7 +167,7 @@ iokit_lookup_connect_port( if (!IP_VALID(port)) return (NULL); - ip_lock(port); + iokit_lock_port(port); if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { obj = (io_object_t) port->ip_kobject; iokit_add_reference( obj ); @@ -177,7 +175,7 @@ iokit_lookup_connect_port( else obj = NULL; - ip_unlock(port); + iokit_unlock_port(port); return( obj ); } @@ -187,21 +185,26 @@ iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space) { io_object_t obj = NULL; - if (connectRef && MACH_PORT_VALID((mach_port_name_t)connectRef)) { + if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) { ipc_port_t port; kern_return_t kr; - kr = ipc_object_translate(space, (mach_port_name_t)connectRef, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port); + kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port); if (kr == KERN_SUCCESS) { - assert(IP_VALID(port)); - - if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { - obj = (io_object_t) port->ip_kobject; - iokit_add_reference(obj); - } - - ip_unlock(port); + assert(IP_VALID(port)); + + ip_reference(port); + ip_unlock(port); + + iokit_lock_port(port); + if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { + obj = (io_object_t) port->ip_kobject; + iokit_add_reference(obj); + } + iokit_unlock_port(port); + + ip_release(port); } } @@ -226,6 +229,26 @@ iokit_release_port( ipc_port_t port ) ipc_port_release( port ); } +EXTERN void +iokit_release_port_send( ipc_port_t port ) +{ + ipc_port_release_send( port ); +} + +extern lck_mtx_t iokit_obj_to_port_binding_lock; + +EXTERN void +iokit_lock_port( __unused ipc_port_t port ) +{ + lck_mtx_lock(&iokit_obj_to_port_binding_lock); +} + +EXTERN void +iokit_unlock_port( __unused ipc_port_t port ) +{ + lck_mtx_unlock(&iokit_obj_to_port_binding_lock); +} + /* * Get the port for a device. * Consumes a device reference; produces a naked send right. @@ -294,9 +317,10 @@ iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ) ipc_kobject_set( port, (ipc_kobject_t) obj, type); /* Request no-senders notifications on the port. */ - notify = ipc_port_make_sonce( port); ip_lock( port); + notify = ipc_port_make_sonce_locked( port); ipc_port_nsrequest( port, 1, notify, ¬ify); + /* port unlocked */ assert( notify == IP_NULL); gIOKitPortCount++; @@ -309,10 +333,12 @@ iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ) EXTERN kern_return_t iokit_destroy_object_port( ipc_port_t port ) { + + iokit_lock_port(port); ipc_kobject_set( port, IKO_NULL, IKOT_NONE); // iokit_remove_reference( obj ); - + iokit_unlock_port(port); ipc_port_dealloc_kernel( port); gIOKitPortCount--; @@ -322,7 +348,9 @@ iokit_destroy_object_port( ipc_port_t port ) EXTERN kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ) { + iokit_lock_port(port); ipc_kobject_set( port, (ipc_kobject_t) obj, type); + iokit_unlock_port(port); return( KERN_SUCCESS); } @@ -332,7 +360,7 @@ iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) { ipc_port_t port; ipc_port_t sendPort; - mach_port_name_t name; + mach_port_name_t name = 0; if( obj == NULL) return MACH_PORT_NULL; @@ -348,8 +376,10 @@ iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) kern_return_t kr; kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort, MACH_MSG_TYPE_PORT_SEND, TRUE, &name); - if ( kr != KERN_SUCCESS) - name = MACH_PORT_NULL; + if ( kr != KERN_SUCCESS) { + ipc_port_release_send( sendPort ); + name = MACH_PORT_NULL; + } } else if ( sendPort == IP_NULL) name = MACH_PORT_NULL; else if ( sendPort == IP_DEAD) @@ -384,7 +414,7 @@ iokit_no_senders( mach_no_senders_notification_t * notification ) // convert a port to io_object_t. if( IP_VALID(port)) { - ip_lock(port); + iokit_lock_port(port); if( ip_active(port)) { obj = (io_object_t) port->ip_kobject; type = ip_kotype( port ); @@ -394,7 +424,7 @@ iokit_no_senders( mach_no_senders_notification_t * notification ) else obj = NULL; } - ip_unlock(port); + iokit_unlock_port(port); if( obj ) { @@ -402,11 +432,17 @@ iokit_no_senders( mach_no_senders_notification_t * notification ) if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) { - /* Re-request no-senders notifications on the port. */ - notify = ipc_port_make_sonce( port); - ip_lock( port); - ipc_port_nsrequest( port, mscount + 1, notify, ¬ify); - assert( notify == IP_NULL); + /* Re-request no-senders notifications on the port (if still active) */ + ip_lock(port); + if (ip_active(port)) { + notify = ipc_port_make_sonce_locked(port); + ipc_port_nsrequest( port, mscount + 1, notify, ¬ify); + /* port unlocked */ + if ( notify != IP_NULL) + ipc_port_release_sonce(notify); + } else { + ip_unlock(port); + } } iokit_remove_reference( obj ); } @@ -436,19 +472,22 @@ iokit_notify( mach_msg_header_t * msg ) /* need to create a pmap function to generalize */ unsigned int IODefaultCacheBits(addr64_t pa) { - return(pmap_cache_attributes(pa >> PAGE_SHIFT)); + return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT))); } kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, mach_vm_size_t length, unsigned int options) { - vm_prot_t prot; + vm_prot_t prot; unsigned int flags; + ppnum_t pagenum; pmap_t pmap = map->pmap; prot = (options & kIOMapReadOnly) ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); + pagenum = (ppnum_t)atop_64(pa); + switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ case kIOMapDefaultCache: @@ -471,10 +510,18 @@ kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t p case kIOMapCopybackCache: flags = VM_WIMG_COPYBACK; break; + case kIOMapCopybackInnerCache: + flags = VM_WIMG_INNERWBACK; + break; } + pmap_set_cache_attributes(pagenum, flags); + + vm_map_set_cache_attr(map, (vm_map_offset_t)va); + + // Set up a block mapped area - pmap_map_block(pmap, va, (ppnum_t)atop_64(pa), (uint32_t) atop_64(round_page_64(length)), prot, flags, 0); + pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0); return( KERN_SUCCESS ); } @@ -491,14 +538,12 @@ kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t le kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va, mach_vm_size_t __unused length, unsigned int __unused options) { -#if __ppc__ - // can't remap block mappings, but ppc doesn't speculatively read from WC -#else - mach_vm_size_t off; vm_prot_t prot; unsigned int flags; pmap_t pmap = map->pmap; + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; prot = (options & kIOMapReadOnly) ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); @@ -527,45 +572,51 @@ kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unus break; } + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + // enter each page's physical address in the target map for (off = 0; off < length; off += page_size) { ppnum_t ppnum = pmap_find_phys(pmap, va + off); - if (ppnum) - pmap_enter(pmap, va + off, ppnum, prot, flags, TRUE); + if (ppnum) { + pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE, + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + delayed_pmap_flush = TRUE; + } } - -#endif + if (delayed_pmap_flush == TRUE) + pmap_flush(&pmap_flush_context_storage); return (KERN_SUCCESS); } ppnum_t IOGetLastPageNumber(void) { - ppnum_t lastPage, highest = 0; - unsigned int idx; - -#if __ppc__ - for (idx = 0; idx < pmap_mem_regions_count; idx++) - { - lastPage = pmap_mem_regions[idx].mrEnd; -#elif __i386__ - for (idx = 0; idx < pmap_memory_region_count; idx++) - { - lastPage = pmap_memory_regions[idx].end - 1; +#if __i386__ || __x86_64__ + ppnum_t lastPage, highest = 0; + unsigned int idx; + + for (idx = 0; idx < pmap_memory_region_count; idx++) + { + lastPage = pmap_memory_regions[idx].end - 1; + if (lastPage > highest) + highest = lastPage; + } + return (highest); #else -#error arch +#error unknown arch #endif - if (lastPage > highest) - highest = lastPage; - } - return (highest); } void IOGetTime( mach_timespec_t * clock_time); void IOGetTime( mach_timespec_t * clock_time) { - clock_get_system_nanotime(&clock_time->tv_sec, (uint32_t *) &clock_time->tv_nsec); + clock_sec_t sec; + clock_nsec_t nsec; + clock_get_system_nanotime(&sec, &nsec); + clock_time->tv_sec = (typeof(clock_time->tv_sec))sec; + clock_time->tv_nsec = nsec; }