/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <mach/port.h>
#include <mach/vm_param.h>
#include <mach/notify.h>
-#include <mach/mach_host_server.h>
+//#include <mach/mach_host_server.h>
#include <mach/mach_types.h>
#include <machine/machparam.h> /* spl definitions */
#ifdef __ppc__
#include <ppc/mappings.h>
#endif
-#ifdef __i386
+#if defined(__i386__) || defined(__x86_64__)
#include <i386/pmap.h>
#endif
#include <IOKit/IOTypes.h>
vm_address_t * address,
vm_size_t * size );
+
+extern ppnum_t IOGetLastPageNumber(void);
+
+/*
+ * Functions imported by iokit:IOUserClient.cpp
+ */
+
+extern ipc_port_t iokit_alloc_object_port( io_object_t obj,
+ ipc_kobject_type_t type );
+
+extern kern_return_t iokit_destroy_object_port( ipc_port_t port );
+
+extern mach_port_name_t iokit_make_send_right( task_t task,
+ io_object_t obj, ipc_kobject_type_t type );
+
+extern kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta );
+
+extern io_object_t iokit_lookup_connect_ref(io_object_t clientRef, ipc_space_t task);
+
+extern io_object_t iokit_lookup_connect_ref_current_task(io_object_t clientRef);
+
+extern void iokit_retain_port( ipc_port_t port );
+extern void iokit_release_port( ipc_port_t port );
+extern void iokit_release_port_send( ipc_port_t port );
+
+extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
+
+/*
+ * Functions imported by iokit:IOMemoryDescriptor.cpp
+ */
+
+extern kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
+ mach_vm_size_t length, unsigned int mapFlags);
+
+extern kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length);
+
+extern kern_return_t IOProtectCacheMode(vm_map_t map, mach_vm_address_t va,
+ mach_vm_size_t length, unsigned int options);
+
+extern unsigned int IODefaultCacheBits(addr64_t pa);
+
/*
* Lookup a device by its port.
* Doesn't consume the naked send right; produces a device reference.
{
io_object_t obj = NULL;
- if (connectRef && MACH_PORT_VALID((mach_port_name_t)connectRef)) {
+ if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
ipc_port_t port;
kern_return_t kr;
- kr = ipc_object_translate(space, (mach_port_name_t)connectRef, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
+ kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
if (kr == KERN_SUCCESS) {
assert(IP_VALID(port));
ipc_port_release( port );
}
+EXTERN void
+iokit_release_port_send( ipc_port_t port )
+{
+ ipc_port_release_send( port );
+}
+
/*
* Get the port for a device.
* Consumes a device reference; produces a naked send right.
return( sendPort);
}
-
-EXTERN ipc_port_t
-iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type );
-
int gIOKitPortCount;
EXTERN ipc_port_t
{
ipc_port_t port;
io_object_t obj = NULL;
- ipc_kobject_type_t type;
+ ipc_kobject_type_t type = IKOT_NONE;
ipc_port_t notify;
port = (ipc_port_t) notification->not_header.msgh_remote_port;
case MACH_NOTIFY_SEND_ONCE:
case MACH_NOTIFY_DEAD_NAME:
default:
- printf("iokit_notify: strange notification %ld\n", msg->msgh_id);
+ printf("iokit_notify: strange notification %d\n", msg->msgh_id);
return FALSE;
}
}
/* need to create a pmap function to generalize */
unsigned int IODefaultCacheBits(addr64_t pa)
{
- unsigned int flags;
-#ifndef i386
- struct phys_entry * pp;
-
- // Find physical address
- if ((pp = pmap_find_physentry(pa >> 12))) {
- // Use physical attributes as default
- // NOTE: DEVICE_PAGER_FLAGS are made to line up
- flags = VM_MEM_COHERENT; /* We only support coherent memory */
- if(pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */
- if(pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */
- } else
- // If no physical, just hard code attributes
- flags = VM_WIMG_IO;
-#else
- extern pmap_paddr_t avail_end;
-
- if (pa < avail_end)
- flags = VM_WIMG_COPYBACK;
- else
- flags = VM_WIMG_IO;
-#endif
-
- return flags;
+ return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
}
-kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa,
- vm_size_t length, unsigned int options)
+kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
+ mach_vm_size_t length, unsigned int options)
{
- vm_size_t off;
vm_prot_t prot;
unsigned int flags;
pmap_t pmap = map->pmap;
flags = VM_WIMG_WTHRU;
break;
- case kIOWriteCombineCache:
+ case kIOMapWriteCombineCache:
flags = VM_WIMG_WCOMB;
break;
flags = VM_WIMG_COPYBACK;
break;
}
-#if __ppc__
// Set up a block mapped area
- pmap_map_block(pmap, (addr64_t)va, (ppnum_t)(pa >> 12), (uint32_t)(length >> 12), prot, flags, 0);
+ pmap_map_block(pmap, va, (ppnum_t)atop_64(pa), (uint32_t) atop_64(round_page_64(length)), prot, flags, 0);
+
+ return( KERN_SUCCESS );
+}
+
+kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
+{
+ pmap_t pmap = map->pmap;
+
+ pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
+
+ return( KERN_SUCCESS );
+}
+kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
+ mach_vm_size_t __unused length, unsigned int __unused options)
+{
+#if __ppc__
+ // can't remap block mappings, but ppc doesn't speculatively read from WC
#else
-// enter each page's physical address in the target map
+ mach_vm_size_t off;
+ vm_prot_t prot;
+ unsigned int flags;
+ pmap_t pmap = map->pmap;
+
+ prot = (options & kIOMapReadOnly)
+ ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
+
+ switch (options & kIOMapCacheMask)
+ {
+ // what cache mode do we need?
+ case kIOMapDefaultCache:
+ default:
+ return (KERN_INVALID_ARGUMENT);
+
+ case kIOMapInhibitCache:
+ flags = VM_WIMG_IO;
+ break;
+
+ case kIOMapWriteThruCache:
+ flags = VM_WIMG_WTHRU;
+ break;
+
+ case kIOMapWriteCombineCache:
+ flags = VM_WIMG_WCOMB;
+ break;
+
+ case kIOMapCopybackCache:
+ flags = VM_WIMG_COPYBACK;
+ break;
+ }
+
+ // enter each page's physical address in the target map
for (off = 0; off < length; off += page_size)
- pmap_enter(pmap, va + off, (pa + off) >> 12, prot, flags, TRUE);
+ {
+ ppnum_t ppnum = pmap_find_phys(pmap, va + off);
+ if (ppnum)
+ pmap_enter(pmap, va + off, ppnum, prot, flags, TRUE);
+ }
#endif
- return( KERN_SUCCESS );
+ return (KERN_SUCCESS);
}
-kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length)
+ppnum_t IOGetLastPageNumber(void)
{
- pmap_t pmap = map->pmap;
-
- pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length));
+ ppnum_t lastPage, highest = 0;
+ unsigned int idx;
- return( KERN_SUCCESS );
+#if __ppc__
+ for (idx = 0; idx < pmap_mem_regions_count; idx++)
+ {
+ lastPage = pmap_mem_regions[idx].mrEnd;
+#elif __i386__ || __x86_64__
+ for (idx = 0; idx < pmap_memory_region_count; idx++)
+ {
+ lastPage = pmap_memory_regions[idx].end - 1;
+#else
+#error arch
+#endif
+ if (lastPage > highest)
+ highest = lastPage;
+ }
+ return (highest);
}
+
void IOGetTime( mach_timespec_t * clock_time);
void IOGetTime( mach_timespec_t * clock_time)
{
- clock_get_system_nanotime(&clock_time->tv_sec, &clock_time->tv_nsec);
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ clock_get_system_nanotime(&sec, &nsec);
+ clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
+ clock_time->tv_nsec = nsec;
}
+