X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/de355530ae67247cbd0da700edb3a2a1dae884c2..3a60a9f5b85abb8c2cf24e1926c5c7b3f608a5e2:/osfmk/device/iokit_rpc.c diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c index a61ad9276..c1e4c2d51 100644 --- a/osfmk/device/iokit_rpc.c +++ b/osfmk/device/iokit_rpc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -39,12 +39,10 @@ #include #include -#include #include #include #include #include -#include #include #include #include @@ -61,7 +59,9 @@ #ifdef __ppc__ #include -#include +#endif +#ifdef __i386 +#include #endif #include @@ -74,13 +74,11 @@ extern void iokit_add_reference( io_object_t obj ); -extern void iokit_remove_reference( io_object_t obj ); - extern ipc_port_t iokit_port_for_object( io_object_t obj, ipc_kobject_type_t type ); extern kern_return_t iokit_client_died( io_object_t obj, - ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t mscount ); + ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount ); extern kern_return_t iokit_client_memory_for_type( @@ -279,6 +277,14 @@ iokit_destroy_object_port( ipc_port_t port ) return( KERN_SUCCESS); } +EXTERN kern_return_t +iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ) +{ + ipc_kobject_set( port, (ipc_kobject_t) obj, type); + + return( KERN_SUCCESS); +} + EXTERN mach_port_name_t iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) { @@ -312,6 +318,12 @@ iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) return( name ); } +EXTERN kern_return_t +iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ) +{ + return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta )); +} + /* * Handle the No-More_Senders notification generated from a device port destroy. * Since there are no longer any tasks which hold a send right to this device @@ -379,67 +391,80 @@ iokit_notify( mach_msg_header_t * msg ) } } -#ifndef i386 -unsigned int IOTranslateCacheBits(struct phys_entry *pp) +/* need to create a pmap function to generalize */ +unsigned int IODefaultCacheBits(addr64_t pa) { - unsigned int flags; - unsigned int memattr; + unsigned int flags; +#ifndef i386 + struct phys_entry * pp; + + // Find physical address + if ((pp = pmap_find_physentry(pa >> 12))) { + // Use physical attributes as default + // NOTE: DEVICE_PAGER_FLAGS are made to line up + flags = VM_MEM_COHERENT; /* We only support coherent memory */ + if(pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */ + if(pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */ + } else + // If no physical, just hard code attributes + flags = VM_WIMG_IO; +#else + extern pmap_paddr_t avail_end; - /* need to create a pmap function to generalize */ - memattr = ((pp->pte1 & 0x00000078) >> 3); + if (pa < avail_end) + flags = VM_WIMG_COPYBACK; + else + flags = VM_WIMG_IO; +#endif - /* NOTE: DEVICE_PAGER_FLAGS are made to line up */ - flags = memattr & VM_WIMG_MASK; - return flags; + return flags; } -#endif kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, vm_size_t length, unsigned int options) { vm_size_t off; vm_prot_t prot; - int memattr; - struct phys_entry *pp; - pmap_t pmap = map->pmap; + unsigned int flags; + pmap_t pmap = map->pmap; prot = (options & kIOMapReadOnly) ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); + switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ + + case kIOMapDefaultCache: + default: + flags = IODefaultCacheBits(pa); + break; + + case kIOMapInhibitCache: + flags = VM_WIMG_IO; + break; + + case kIOMapWriteThruCache: + flags = VM_WIMG_WTHRU; + break; + + case kIOWriteCombineCache: + flags = VM_WIMG_WCOMB; + break; + + case kIOMapCopybackCache: + flags = VM_WIMG_COPYBACK; + break; + } #if __ppc__ - switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ - - case kIOMapDefaultCache: - default: - if(pp = pmap_find_physentry(pa)) { /* Find physical address */ - memattr = ((pp->pte1 & 0x00000078) >> 3); /* Use physical attributes as default */ - } - else { /* If no physical, just hard code attributes */ - memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; - } - break; - - case kIOMapInhibitCache: - memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED; - break; - - case kIOMapWriteThruCache: - memattr = PTE_WIMG_WT_CACHED_COHERENT_GUARDED; - break; - - case kIOMapCopybackCache: - memattr = PTE_WIMG_CB_CACHED_COHERENT; - break; - } + // Set up a block mapped area + pmap_map_block(pmap, (addr64_t)va, (ppnum_t)(pa >> 12), (uint32_t)(length >> 12), prot, flags, 0); - pmap_map_block(pmap, va, pa, length, prot, memattr, 0); /* Set up a block mapped area */ - #else -// enter each page's physical address in the target map - for (off = 0; off < length; off += page_size) { /* Loop for the whole length */ - pmap_enter(pmap, va + off, pa + off, prot, VM_WIMG_USE_DEFAULT, TRUE); /* Map it in */ - } +// enter each page's physical address in the target map + + for (off = 0; off < length; off += page_size) + pmap_enter(pmap, va + off, (pa + off) >> 12, prot, flags, TRUE); + #endif return( KERN_SUCCESS ); @@ -449,7 +474,7 @@ kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length) { pmap_t pmap = map->pmap; - pmap_remove(pmap, trunc_page(va), round_page(va + length)); + pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length)); return( KERN_SUCCESS ); } @@ -457,6 +482,5 @@ kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length) void IOGetTime( mach_timespec_t * clock_time); void IOGetTime( mach_timespec_t * clock_time) { - *clock_time = clock_get_system_value(); + clock_get_system_nanotime(&clock_time->tv_sec, &clock_time->tv_nsec); } -