/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <kern/clock.h>
#include <kern/spl.h>
-#include <kern/ast.h>
#include <kern/counters.h>
#include <kern/queue.h>
#include <kern/zalloc.h>
#include <kern/thread.h>
-#include <kern/thread_swap.h>
#include <kern/task.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
#ifdef __ppc__
#include <ppc/mappings.h>
-#include <ppc/pmap_internals.h>
+#endif
+#ifdef __i386
+#include <i386/pmap.h>
#endif
#include <IOKit/IOTypes.h>
extern void iokit_add_reference( io_object_t obj );
-extern void iokit_remove_reference( io_object_t obj );
-
extern ipc_port_t iokit_port_for_object( io_object_t obj,
ipc_kobject_type_t type );
extern kern_return_t iokit_client_died( io_object_t obj,
- ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t mscount );
+ ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount );
extern kern_return_t
iokit_client_memory_for_type(
return( KERN_SUCCESS);
}
+EXTERN kern_return_t
+iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
+{
+ ipc_kobject_set( port, (ipc_kobject_t) obj, type);
+
+ return( KERN_SUCCESS);
+}
+
EXTERN mach_port_name_t
iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type )
{
return( name );
}
+EXTERN kern_return_t
+iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta )
+{
+ return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ));
+}
+
/*
* Handle the No-More_Senders notification generated from a device port destroy.
* Since there are no longer any tasks which hold a send right to this device
}
}
-#ifndef i386
-unsigned int IOTranslateCacheBits(struct phys_entry *pp)
+/* need to create a pmap function to generalize */
+unsigned int IODefaultCacheBits(addr64_t pa)
{
- unsigned int flags;
- unsigned int memattr;
+ unsigned int flags;
+#ifndef i386
+ struct phys_entry * pp;
+
+ // Find physical address
+ if ((pp = pmap_find_physentry(pa >> 12))) {
+ // Use physical attributes as default
+ // NOTE: DEVICE_PAGER_FLAGS are made to line up
+ flags = VM_MEM_COHERENT; /* We only support coherent memory */
+ if(pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */
+ if(pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */
+ } else
+ // If no physical, just hard code attributes
+ flags = VM_WIMG_IO;
+#else
+ extern pmap_paddr_t avail_end;
- /* need to create a pmap function to generalize */
- memattr = ((pp->pte1 & 0x00000078) >> 3);
+ if (pa < avail_end)
+ flags = VM_WIMG_COPYBACK;
+ else
+ flags = VM_WIMG_IO;
+#endif
- /* NOTE: DEVICE_PAGER_FLAGS are made to line up */
- flags = memattr & VM_WIMG_MASK;
- return flags;
+ return flags;
}
-#endif
kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
vm_size_t length, unsigned int options)
{
vm_size_t off;
vm_prot_t prot;
- int memattr;
- struct phys_entry *pp;
- pmap_t pmap = map->pmap;
+ unsigned int flags;
+ pmap_t pmap = map->pmap;
prot = (options & kIOMapReadOnly)
? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
+ switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
+
+ case kIOMapDefaultCache:
+ default:
+ flags = IODefaultCacheBits(pa);
+ break;
+
+ case kIOMapInhibitCache:
+ flags = VM_WIMG_IO;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = VM_WIMG_WTHRU;
+ break;
+
+ case kIOWriteCombineCache:
+ flags = VM_WIMG_WCOMB;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = VM_WIMG_COPYBACK;
+ break;
+ }
#if __ppc__
- switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
-
- case kIOMapDefaultCache:
- default:
- if(pp = pmap_find_physentry(pa)) { /* Find physical address */
- memattr = ((pp->pte1 & 0x00000078) >> 3); /* Use physical attributes as default */
- }
- else { /* If no physical, just hard code attributes */
- memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED;
- }
- break;
-
- case kIOMapInhibitCache:
- memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED;
- break;
-
- case kIOMapWriteThruCache:
- memattr = PTE_WIMG_WT_CACHED_COHERENT_GUARDED;
- break;
-
- case kIOMapCopybackCache:
- memattr = PTE_WIMG_CB_CACHED_COHERENT;
- break;
- }
+ // Set up a block mapped area
+ pmap_map_block(pmap, (addr64_t)va, (ppnum_t)(pa >> 12), (uint32_t)(length >> 12), prot, flags, 0);
- pmap_map_block(pmap, va, pa, length, prot, memattr, 0); /* Set up a block mapped area */
-
#else
-// enter each page's physical address in the target map
- for (off = 0; off < length; off += page_size) { /* Loop for the whole length */
- pmap_enter(pmap, va + off, pa + off, prot, VM_WIMG_USE_DEFAULT, TRUE); /* Map it in */
- }
+// enter each page's physical address in the target map
+
+ for (off = 0; off < length; off += page_size)
+ pmap_enter(pmap, va + off, (pa + off) >> 12, prot, flags, TRUE);
+
#endif
return( KERN_SUCCESS );
{
pmap_t pmap = map->pmap;
- pmap_remove(pmap, trunc_page(va), round_page(va + length));
+ pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
return( KERN_SUCCESS );
}
void IOGetTime( mach_timespec_t * clock_time);
void IOGetTime( mach_timespec_t * clock_time)
{
- *clock_time = clock_get_system_value();
+ clock_get_system_nanotime(&clock_time->tv_sec, &clock_time->tv_nsec);
}
-