*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-#include <mach_kdb.h>
#include <zone_debug.h>
-#include <mach_kdb.h>
-
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/mig_errors.h>
#include <machine/machparam.h>
-#ifdef __ppc__
-#include <ppc/mappings.h>
-#endif
-#ifdef __i386
+#if defined(__i386__) || defined(__x86_64__)
#include <i386/pmap.h>
#endif
+#if defined(__arm__) || defined(__arm64__)
+#include <arm/pmap.h>
+#endif
#include <IOKit/IOTypes.h>
#define EXTERN
*/
extern void iokit_add_reference( io_object_t obj );
+extern void iokit_add_connect_reference( io_object_t obj );
extern ipc_port_t iokit_port_for_object( io_object_t obj,
ipc_kobject_type_t type );
extern void iokit_retain_port( ipc_port_t port );
extern void iokit_release_port( ipc_port_t port );
+extern void iokit_release_port_send( ipc_port_t port );
+
+extern void iokit_lock_port(ipc_port_t port);
+extern void iokit_unlock_port(ipc_port_t port);
extern kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type );
iokit_lookup_object_port(
ipc_port_t port)
{
- register io_object_t obj;
+ io_object_t obj;
if (!IP_VALID(port))
return (NULL);
- ip_lock(port);
+ iokit_lock_port(port);
if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) {
obj = (io_object_t) port->ip_kobject;
iokit_add_reference( obj );
else
obj = NULL;
- ip_unlock(port);
+ iokit_unlock_port(port);
return( obj );
}
iokit_lookup_connect_port(
ipc_port_t port)
{
- register io_object_t obj;
+ io_object_t obj;
if (!IP_VALID(port))
return (NULL);
- ip_lock(port);
+ iokit_lock_port(port);
if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
obj = (io_object_t) port->ip_kobject;
- iokit_add_reference( obj );
+ iokit_add_connect_reference( obj );
}
else
obj = NULL;
- ip_unlock(port);
+ iokit_unlock_port(port);
return( obj );
}
{
io_object_t obj = NULL;
- if (connectRef && MACH_PORT_VALID((mach_port_name_t)connectRef)) {
+ if (connectRef && MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef))) {
ipc_port_t port;
kern_return_t kr;
- kr = ipc_object_translate(space, (mach_port_name_t)connectRef, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
+ kr = ipc_object_translate(space, CAST_MACH_PORT_TO_NAME(connectRef), MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port);
if (kr == KERN_SUCCESS) {
- assert(IP_VALID(port));
-
- if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
- obj = (io_object_t) port->ip_kobject;
- iokit_add_reference(obj);
- }
-
- ip_unlock(port);
+ assert(IP_VALID(port));
+
+ ip_reference(port);
+ ip_unlock(port);
+
+ iokit_lock_port(port);
+ if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) {
+ obj = (io_object_t) port->ip_kobject;
+ iokit_add_connect_reference(obj);
+ }
+ iokit_unlock_port(port);
+
+ ip_release(port);
}
}
ipc_port_release( port );
}
+EXTERN void
+iokit_release_port_send( ipc_port_t port )
+{
+ ipc_port_release_send( port );
+}
+
+extern lck_mtx_t iokit_obj_to_port_binding_lock;
+
+EXTERN void
+iokit_lock_port( __unused ipc_port_t port )
+{
+ lck_mtx_lock(&iokit_obj_to_port_binding_lock);
+}
+
+EXTERN void
+iokit_unlock_port( __unused ipc_port_t port )
+{
+ lck_mtx_unlock(&iokit_obj_to_port_binding_lock);
+}
+
/*
* Get the port for a device.
* Consumes a device reference; produces a naked send right.
iokit_make_object_port(
io_object_t obj )
{
- register ipc_port_t port;
- register ipc_port_t sendPort;
+ ipc_port_t port;
+ ipc_port_t sendPort;
if( obj == NULL)
return IP_NULL;
iokit_make_connect_port(
io_object_t obj )
{
- register ipc_port_t port;
- register ipc_port_t sendPort;
+ ipc_port_t port;
+ ipc_port_t sendPort;
if( obj == NULL)
return IP_NULL;
ipc_kobject_set( port, (ipc_kobject_t) obj, type);
/* Request no-senders notifications on the port. */
- notify = ipc_port_make_sonce( port);
ip_lock( port);
+ notify = ipc_port_make_sonce_locked( port);
ipc_port_nsrequest( port, 1, notify, ¬ify);
+ /* port unlocked */
assert( notify == IP_NULL);
gIOKitPortCount++;
EXTERN kern_return_t
iokit_destroy_object_port( ipc_port_t port )
{
+
+ iokit_lock_port(port);
ipc_kobject_set( port, IKO_NULL, IKOT_NONE);
// iokit_remove_reference( obj );
-
+ iokit_unlock_port(port);
ipc_port_dealloc_kernel( port);
gIOKitPortCount--;
EXTERN kern_return_t
iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type )
{
+ iokit_lock_port(port);
ipc_kobject_set( port, (ipc_kobject_t) obj, type);
+ iokit_unlock_port(port);
return( KERN_SUCCESS);
}
{
ipc_port_t port;
ipc_port_t sendPort;
- mach_port_name_t name;
+ mach_port_name_t name = 0;
if( obj == NULL)
return MACH_PORT_NULL;
kern_return_t kr;
kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort,
MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
- if ( kr != KERN_SUCCESS)
- name = MACH_PORT_NULL;
+ if ( kr != KERN_SUCCESS) {
+ ipc_port_release_send( sendPort );
+ name = MACH_PORT_NULL;
+ }
} else if ( sendPort == IP_NULL)
name = MACH_PORT_NULL;
else if ( sendPort == IP_DEAD)
// convert a port to io_object_t.
if( IP_VALID(port)) {
- ip_lock(port);
+ iokit_lock_port(port);
if( ip_active(port)) {
obj = (io_object_t) port->ip_kobject;
type = ip_kotype( port );
else
obj = NULL;
}
- ip_unlock(port);
+ iokit_unlock_port(port);
if( obj ) {
if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount ))
{
- /* Re-request no-senders notifications on the port. */
- notify = ipc_port_make_sonce( port);
- ip_lock( port);
- ipc_port_nsrequest( port, mscount + 1, notify, ¬ify);
- assert( notify == IP_NULL);
+ /* Re-request no-senders notifications on the port (if still active) */
+ ip_lock(port);
+ if (ip_active(port)) {
+ notify = ipc_port_make_sonce_locked(port);
+ ipc_port_nsrequest( port, mscount + 1, notify, ¬ify);
+ /* port unlocked */
+ if ( notify != IP_NULL)
+ ipc_port_release_sonce(notify);
+ } else {
+ ip_unlock(port);
+ }
}
iokit_remove_reference( obj );
}
/* need to create a pmap function to generalize */
unsigned int IODefaultCacheBits(addr64_t pa)
{
- return(pmap_cache_attributes(pa >> PAGE_SHIFT));
+ return(pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)));
}
kern_return_t IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa,
mach_vm_size_t length, unsigned int options)
{
- vm_prot_t prot;
+ vm_prot_t prot;
unsigned int flags;
+ ppnum_t pagenum;
pmap_t pmap = map->pmap;
prot = (options & kIOMapReadOnly)
? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
+ pagenum = (ppnum_t)atop_64(pa);
+
switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */
case kIOMapDefaultCache:
case kIOMapCopybackCache:
flags = VM_WIMG_COPYBACK;
break;
+
+ case kIOMapCopybackInnerCache:
+ flags = VM_WIMG_INNERWBACK;
+ break;
+
+ case kIOMapPostedWrite:
+ flags = VM_WIMG_POSTED;
+ break;
}
- // Set up a block mapped area
- pmap_map_block(pmap, va, (ppnum_t)atop_64(pa), (uint32_t) atop_64(round_page_64(length)), prot, flags, 0);
+ pmap_set_cache_attributes(pagenum, flags);
- return( KERN_SUCCESS );
+ vm_map_set_cache_attr(map, (vm_map_offset_t)va);
+
+
+ // Set up a block mapped area
+ return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0);
}
kern_return_t IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length)
kern_return_t IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va,
mach_vm_size_t __unused length, unsigned int __unused options)
{
-#if __ppc__
- // can't remap block mappings, but ppc doesn't speculatively read from WC
-#else
-
mach_vm_size_t off;
vm_prot_t prot;
unsigned int flags;
pmap_t pmap = map->pmap;
+ pmap_flush_context pmap_flush_context_storage;
+ boolean_t delayed_pmap_flush = FALSE;
prot = (options & kIOMapReadOnly)
? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE);
case kIOMapCopybackCache:
flags = VM_WIMG_COPYBACK;
break;
+
+ case kIOMapCopybackInnerCache:
+ flags = VM_WIMG_INNERWBACK;
+ break;
+
+ case kIOMapPostedWrite:
+ flags = VM_WIMG_POSTED;
+ break;
}
+ pmap_flush_context_init(&pmap_flush_context_storage);
+ delayed_pmap_flush = FALSE;
+
// enter each page's physical address in the target map
for (off = 0; off < length; off += page_size)
{
ppnum_t ppnum = pmap_find_phys(pmap, va + off);
- if (ppnum)
- pmap_enter(pmap, va + off, ppnum, prot, flags, TRUE);
+ if (ppnum) {
+ pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE,
+ PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
+ delayed_pmap_flush = TRUE;
+ }
}
-
-#endif
+ if (delayed_pmap_flush == TRUE)
+ pmap_flush(&pmap_flush_context_storage);
return (KERN_SUCCESS);
}
ppnum_t IOGetLastPageNumber(void)
{
- ppnum_t lastPage, highest = 0;
- unsigned int idx;
-
-#if __ppc__
- for (idx = 0; idx < pmap_mem_regions_count; idx++)
- {
- lastPage = pmap_mem_regions[idx].mrEnd;
-#elif __i386__
- for (idx = 0; idx < pmap_memory_region_count; idx++)
- {
- lastPage = pmap_memory_regions[idx].end - 1;
-#elif __arm__
- if (0) /* XXX */
- {
+#if __i386__ || __x86_64__
+ ppnum_t lastPage, highest = 0;
+ unsigned int idx;
+
+ for (idx = 0; idx < pmap_memory_region_count; idx++)
+ {
+ lastPage = pmap_memory_regions[idx].end - 1;
+ if (lastPage > highest)
+ highest = lastPage;
+ }
+ return (highest);
+#elif __arm__ || __arm64__
+ return 0;
#else
-#error arch
+#error unknown arch
#endif
- if (lastPage > highest)
- highest = lastPage;
- }
- return (highest);
}
void IOGetTime( mach_timespec_t * clock_time);
void IOGetTime( mach_timespec_t * clock_time)
{
- clock_get_system_nanotime(&clock_time->tv_sec, (uint32_t *) &clock_time->tv_nsec);
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ clock_get_system_nanotime(&sec, &nsec);
+ clock_time->tv_sec = (typeof(clock_time->tv_sec))sec;
+ clock_time->tv_nsec = nsec;
}