X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/8f6c56a50524aa785f7e596d52dddfb331e18961..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/osfmk/device/iokit_rpc.c diff --git a/osfmk/device/iokit_rpc.c b/osfmk/device/iokit_rpc.c index 29ef1f998..6c1c7fac3 100644 --- a/osfmk/device/iokit_rpc.c +++ b/osfmk/device/iokit_rpc.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,30 +22,25 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -#include -#include -#include - #include #include #include #include #include #include -#include +//#include #include -#include /* spl definitions */ +#include /* spl definitions */ #include #include #include #include -#include #include #include #include @@ -63,105 +58,92 @@ #include -#ifdef __ppc__ -#include -#endif -#ifdef __i386 +#if defined(__i386__) || defined(__x86_64__) #include #endif -#include +#if defined(__arm__) || defined(__arm64__) +#include +#endif +#include #define EXTERN #define MIGEXTERN -/* - * Functions in iokit:IOUserClient.cpp - */ - -extern void iokit_add_reference( io_object_t obj ); - -extern ipc_port_t iokit_port_for_object( io_object_t obj, - ipc_kobject_type_t type ); - -extern kern_return_t iokit_client_died( io_object_t obj, - ipc_port_t port, ipc_kobject_type_t type, mach_port_mscount_t * mscount ); - -extern kern_return_t -iokit_client_memory_for_type( - io_object_t connect, - unsigned int type, - unsigned int * flags, - vm_address_t * address, - vm_size_t * size ); +LCK_GRP_DECLARE(dev_lck_grp, "device"); +LCK_MTX_DECLARE(iokit_obj_to_port_binding_lock, &dev_lck_grp); /* * Lookup a device by its port. * Doesn't consume the naked send right; produces a device reference. */ -MIGEXTERN io_object_t -iokit_lookup_object_port( - ipc_port_t port) +io_object_t +iokit_lookup_io_object(ipc_port_t port, ipc_kobject_type_t type) { - register io_object_t obj; + io_object_t obj; - if (!IP_VALID(port)) - return (NULL); + if (!IP_VALID(port)) { + return NULL; + } - ip_lock(port); - if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_OBJECT)) { - obj = (io_object_t) port->ip_kobject; - iokit_add_reference( obj ); + iokit_lock_port(port); + if (ip_active(port) && (ip_kotype(port) == type)) { + obj = (io_object_t) ip_get_kobject(port); + iokit_add_reference( obj, type ); + } else { + obj = NULL; } - else - obj = NULL; - ip_unlock(port); + iokit_unlock_port(port); - return( obj ); + return obj; } MIGEXTERN io_object_t -iokit_lookup_connect_port( - ipc_port_t port) +iokit_lookup_object_port( + ipc_port_t port) { - register io_object_t obj; - - if (!IP_VALID(port)) - return (NULL); - - ip_lock(port); - if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { - obj = (io_object_t) port->ip_kobject; - iokit_add_reference( obj ); - } - else - obj = NULL; + return iokit_lookup_io_object(port, IKOT_IOKIT_OBJECT); +} - ip_unlock(port); +MIGEXTERN io_object_t +iokit_lookup_connect_port( + ipc_port_t port) +{ + return iokit_lookup_io_object(port, IKOT_IOKIT_CONNECT); +} - return( obj ); +MIGEXTERN io_object_t +iokit_lookup_uext_object_port( + ipc_port_t port) +{ + return iokit_lookup_io_object(port, IKOT_UEXT_OBJECT); } -EXTERN io_object_t -iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space) +static io_object_t +iokit_lookup_object_in_space_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, ipc_space_t space) { io_object_t obj = NULL; - if (connectRef && MACH_PORT_VALID((mach_port_name_t)connectRef)) { + if (name && MACH_PORT_VALID(name)) { ipc_port_t port; kern_return_t kr; - kr = ipc_object_translate(space, (mach_port_name_t)connectRef, MACH_PORT_RIGHT_SEND, (ipc_object_t *)&port); + kr = ipc_port_translate_send(space, name, &port); if (kr == KERN_SUCCESS) { - assert(IP_VALID(port)); - - if (ip_active(port) && (ip_kotype(port) == IKOT_IOKIT_CONNECT)) { - obj = (io_object_t) port->ip_kobject; - iokit_add_reference(obj); - } - - ip_unlock(port); + assert(IP_VALID(port)); + require_ip_active(port); + ip_reference(port); + ip_unlock(port); + + iokit_lock_port(port); + if (ip_kotype(port) == type) { + obj = (io_object_t) ip_get_kobject(port); + iokit_add_reference(obj, type); + } + iokit_unlock_port(port); + + ip_release(port); } } @@ -169,212 +151,242 @@ iokit_lookup_connect_ref(io_object_t connectRef, ipc_space_t space) } EXTERN io_object_t -iokit_lookup_connect_ref_current_task(io_object_t connectRef) +iokit_lookup_object_with_port_name(mach_port_name_t name, ipc_kobject_type_t type, task_t task) +{ + return iokit_lookup_object_in_space_with_port_name(name, type, task->itk_space); +} + +EXTERN io_object_t +iokit_lookup_connect_ref_current_task(mach_port_name_t name) { - return iokit_lookup_connect_ref(connectRef, current_space()); + return iokit_lookup_object_in_space_with_port_name(name, IKOT_IOKIT_CONNECT, current_space()); +} + +EXTERN io_object_t +iokit_lookup_uext_ref_current_task(mach_port_name_t name) +{ + return iokit_lookup_object_in_space_with_port_name(name, IKOT_UEXT_OBJECT, current_space()); } EXTERN void iokit_retain_port( ipc_port_t port ) { - ipc_port_reference( port ); + ipc_port_reference( port ); } EXTERN void iokit_release_port( ipc_port_t port ) { - ipc_port_release( port ); + ipc_port_release( port ); +} + +EXTERN void +iokit_make_port_send( ipc_port_t port ) +{ + ipc_port_make_send( port ); +} + +EXTERN void +iokit_release_port_send( ipc_port_t port ) +{ + ipc_port_release_send( port ); +} + +EXTERN void +iokit_lock_port( __unused ipc_port_t port ) +{ + lck_mtx_lock(&iokit_obj_to_port_binding_lock); +} + +EXTERN void +iokit_unlock_port( __unused ipc_port_t port ) +{ + lck_mtx_unlock(&iokit_obj_to_port_binding_lock); } /* * Get the port for a device. * Consumes a device reference; produces a naked send right. */ -MIGEXTERN ipc_port_t -iokit_make_object_port( - io_object_t obj ) + +static ipc_port_t +iokit_make_port_of_type(io_object_t obj, ipc_kobject_type_t type) { - register ipc_port_t port; - register ipc_port_t sendPort; + ipc_port_t port; + ipc_port_t sendPort; - if( obj == NULL) - return IP_NULL; + if (obj == NULL) { + return IP_NULL; + } - port = iokit_port_for_object( obj, IKOT_IOKIT_OBJECT ); - if( port) { - sendPort = ipc_port_make_send( port); - iokit_release_port( port ); - } else - sendPort = IP_NULL; + port = iokit_port_for_object( obj, type ); + if (port) { + sendPort = ipc_port_make_send( port); + iokit_release_port( port ); + } else { + sendPort = IP_NULL; + } - iokit_remove_reference( obj ); + iokit_remove_reference( obj ); - return( sendPort); + return sendPort; } MIGEXTERN ipc_port_t -iokit_make_connect_port( - io_object_t obj ) +iokit_make_object_port( + io_object_t obj ) { - register ipc_port_t port; - register ipc_port_t sendPort; - - if( obj == NULL) - return IP_NULL; - - port = iokit_port_for_object( obj, IKOT_IOKIT_CONNECT ); - if( port) { - sendPort = ipc_port_make_send( port); - iokit_release_port( port ); - } else - sendPort = IP_NULL; - - iokit_remove_reference( obj ); - - return( sendPort); + return iokit_make_port_of_type(obj, IKOT_IOKIT_OBJECT); } - -EXTERN ipc_port_t -iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ); +MIGEXTERN ipc_port_t +iokit_make_connect_port( + io_object_t obj ) +{ + return iokit_make_port_of_type(obj, IKOT_IOKIT_CONNECT); +} int gIOKitPortCount; EXTERN ipc_port_t iokit_alloc_object_port( io_object_t obj, ipc_kobject_type_t type ) { - ipc_port_t notify; - ipc_port_t port; - - do { - /* Allocate port, keeping a reference for it. */ - port = ipc_port_alloc_kernel(); - if( port == IP_NULL) - continue; - - /* set kobject & type */ -// iokit_add_reference( obj ); - ipc_kobject_set( port, (ipc_kobject_t) obj, type); - - /* Request no-senders notifications on the port. */ - notify = ipc_port_make_sonce( port); - ip_lock( port); - ipc_port_nsrequest( port, 1, notify, ¬ify); - assert( notify == IP_NULL); gIOKitPortCount++; - - } while( FALSE); - - return( port ); + ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_NSREQUEST; + if (type == IKOT_IOKIT_CONNECT) { + options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND; + } + if (type == IKOT_UEXT_OBJECT) { + ipc_label_t label = IPC_LABEL_DEXT; + return ipc_kobject_alloc_labeled_port((ipc_kobject_t) obj, type, label, options); + } else { + return ipc_kobject_alloc_port((ipc_kobject_t) obj, type, options); + } } - EXTERN kern_return_t iokit_destroy_object_port( ipc_port_t port ) { - ipc_kobject_set( port, IKO_NULL, IKOT_NONE); + iokit_lock_port(port); + ipc_kobject_set( port, IKO_NULL, IKOT_NONE); // iokit_remove_reference( obj ); + iokit_unlock_port(port); + ipc_port_dealloc_kernel( port); + gIOKitPortCount--; - ipc_port_dealloc_kernel( port); - gIOKitPortCount--; - - return( KERN_SUCCESS); + return KERN_SUCCESS; } EXTERN kern_return_t iokit_switch_object_port( ipc_port_t port, io_object_t obj, ipc_kobject_type_t type ) { - ipc_kobject_set( port, (ipc_kobject_t) obj, type); + iokit_lock_port(port); + ipc_kobject_set( port, (ipc_kobject_t) obj, type); + iokit_unlock_port(port); - return( KERN_SUCCESS); + return KERN_SUCCESS; } EXTERN mach_port_name_t iokit_make_send_right( task_t task, io_object_t obj, ipc_kobject_type_t type ) { - ipc_port_t port; - ipc_port_t sendPort; - mach_port_name_t name; - - if( obj == NULL) - return MACH_PORT_NULL; - - port = iokit_port_for_object( obj, type ); - if( port) { - sendPort = ipc_port_make_send( port); - iokit_release_port( port ); - } else - sendPort = IP_NULL; - - if (IP_VALID( sendPort )) { - kern_return_t kr; - kr = ipc_object_copyout( task->itk_space, (ipc_object_t) sendPort, - MACH_MSG_TYPE_PORT_SEND, TRUE, &name); - if ( kr != KERN_SUCCESS) - name = MACH_PORT_NULL; - } else if ( sendPort == IP_NULL) - name = MACH_PORT_NULL; - else if ( sendPort == IP_DEAD) - name = MACH_PORT_DEAD; + ipc_port_t port; + ipc_port_t sendPort; + mach_port_name_t name = 0; + + if (obj == NULL) { + return MACH_PORT_NULL; + } - iokit_remove_reference( obj ); + port = iokit_port_for_object( obj, type ); + if (port) { + sendPort = ipc_port_make_send( port); + iokit_release_port( port ); + } else { + sendPort = IP_NULL; + } + + if (IP_VALID( sendPort )) { + kern_return_t kr; + // Remove once is fixed. + // We need to make ith_knote NULL as ipc_object_copyout() uses + // thread-argument-passing and its value should not be garbage + current_thread()->ith_knote = ITH_KNOTE_NULL; + kr = ipc_object_copyout( task->itk_space, ip_to_object(sendPort), + MACH_MSG_TYPE_PORT_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, &name); + if (kr != KERN_SUCCESS) { + name = MACH_PORT_NULL; + } + } else if (sendPort == IP_NULL) { + name = MACH_PORT_NULL; + } else if (sendPort == IP_DEAD) { + name = MACH_PORT_DEAD; + } - return( name ); + return name; } EXTERN kern_return_t iokit_mod_send_right( task_t task, mach_port_name_t name, mach_port_delta_t delta ) { - return (mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta )); + return mach_port_mod_refs( task->itk_space, name, MACH_PORT_RIGHT_SEND, delta ); } /* * Handle the No-More_Senders notification generated from a device port destroy. * Since there are no longer any tasks which hold a send right to this device - * port a NMS notification has been generated. + * port a NMS notification has been generated. */ static void iokit_no_senders( mach_no_senders_notification_t * notification ) { - ipc_port_t port; - io_object_t obj = NULL; - ipc_kobject_type_t type; - ipc_port_t notify; - - port = (ipc_port_t) notification->not_header.msgh_remote_port; - - // convert a port to io_object_t. - if( IP_VALID(port)) { - ip_lock(port); - if( ip_active(port)) { - obj = (io_object_t) port->ip_kobject; - type = ip_kotype( port ); - if( (IKOT_IOKIT_OBJECT == type) - || (IKOT_IOKIT_CONNECT == type)) - iokit_add_reference( obj ); - else - obj = NULL; + ipc_port_t port; + io_object_t obj = NULL; + ipc_kobject_type_t type = IKOT_NONE; + ipc_port_t notify; + + port = notification->not_header.msgh_remote_port; + + // convert a port to io_object_t. + if (IP_VALID(port)) { + iokit_lock_port(port); + if (ip_active(port)) { + obj = (io_object_t) ip_get_kobject(port); + type = ip_kotype( port ); + if ((IKOT_IOKIT_OBJECT == type) + || (IKOT_IOKIT_CONNECT == type) + || (IKOT_IOKIT_IDENT == type) + || (IKOT_UEXT_OBJECT == type)) { + iokit_add_reference( obj, IKOT_IOKIT_OBJECT ); + } else { + obj = NULL; + } + } + iokit_unlock_port(port); + + if (obj) { + mach_port_mscount_t mscount = notification->not_count; + + if (KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) { + /* Re-request no-senders notifications on the port (if still active) */ + ip_lock(port); + if (ip_active(port)) { + notify = ipc_port_make_sonce_locked(port); + ipc_port_nsrequest( port, mscount + 1, notify, ¬ify); + /* port unlocked */ + if (notify != IP_NULL) { + ipc_port_release_sonce(notify); + } + } else { + ip_unlock(port); + } + } + iokit_remove_reference( obj ); + } } - ip_unlock(port); - - if( obj ) { - - mach_port_mscount_t mscount = notification->not_count; - - if( KERN_SUCCESS != iokit_client_died( obj, port, type, &mscount )) - { - /* Re-request no-senders notifications on the port. */ - notify = ipc_port_make_sonce( port); - ip_lock( port); - ipc_port_nsrequest( port, mscount + 1, notify, ¬ify); - assert( notify == IP_NULL); - } - iokit_remove_reference( obj ); - } - } } @@ -382,111 +394,199 @@ EXTERN boolean_t iokit_notify( mach_msg_header_t * msg ) { - switch (msg->msgh_id) { - case MACH_NOTIFY_NO_SENDERS: - iokit_no_senders((mach_no_senders_notification_t *) msg); - return TRUE; - - case MACH_NOTIFY_PORT_DELETED: - case MACH_NOTIFY_PORT_DESTROYED: - case MACH_NOTIFY_SEND_ONCE: - case MACH_NOTIFY_DEAD_NAME: - default: - printf("iokit_notify: strange notification %ld\n", msg->msgh_id); - return FALSE; - } + switch (msg->msgh_id) { + case MACH_NOTIFY_NO_SENDERS: + iokit_no_senders((mach_no_senders_notification_t *) msg); + return TRUE; + + case MACH_NOTIFY_PORT_DELETED: + case MACH_NOTIFY_PORT_DESTROYED: + case MACH_NOTIFY_SEND_ONCE: + case MACH_NOTIFY_DEAD_NAME: + default: + printf("iokit_notify: strange notification %d\n", msg->msgh_id); + return FALSE; + } } -/* need to create a pmap function to generalize */ -unsigned int IODefaultCacheBits(addr64_t pa) +kern_return_t +iokit_label_dext_task(task_t task) { - unsigned int flags; -#ifndef i386 - struct phys_entry * pp; - - // Find physical address - if ((pp = pmap_find_physentry(pa >> 12))) { - // Use physical attributes as default - // NOTE: DEVICE_PAGER_FLAGS are made to line up - flags = VM_MEM_COHERENT; /* We only support coherent memory */ - if(pp->ppLink & ppG) flags |= VM_MEM_GUARDED; /* Add in guarded if it is */ - if(pp->ppLink & ppI) flags |= VM_MEM_NOT_CACHEABLE; /* Add in cache inhibited if so */ - } else - // If no physical, just hard code attributes - flags = VM_WIMG_IO; -#else - extern pmap_paddr_t avail_end; - - if (pa < avail_end) - flags = VM_WIMG_COPYBACK; - else - flags = VM_WIMG_IO; -#endif + return ipc_space_add_label(task->itk_space, IPC_LABEL_DEXT); +} - return flags; +/* need to create a pmap function to generalize */ +unsigned int +IODefaultCacheBits(addr64_t pa) +{ + return pmap_cache_attributes((ppnum_t)(pa >> PAGE_SHIFT)); } -kern_return_t IOMapPages(vm_map_t map, vm_offset_t va, vm_offset_t pa, - vm_size_t length, unsigned int options) +kern_return_t +IOMapPages(vm_map_t map, mach_vm_address_t va, mach_vm_address_t pa, + mach_vm_size_t length, unsigned int options) { - vm_size_t off; - vm_prot_t prot; - unsigned int flags; - pmap_t pmap = map->pmap; + vm_prot_t prot; + unsigned int flags; + ppnum_t pagenum; + pmap_t pmap = map->pmap; - prot = (options & kIOMapReadOnly) - ? VM_PROT_READ : (VM_PROT_READ|VM_PROT_WRITE); + prot = (options & kIOMapReadOnly) + ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE); - switch(options & kIOMapCacheMask ) { /* What cache mode do we need? */ + pagenum = (ppnum_t)atop_64(pa); + switch (options & kIOMapCacheMask) { /* What cache mode do we need? */ case kIOMapDefaultCache: default: - flags = IODefaultCacheBits(pa); - break; + flags = IODefaultCacheBits(pa); + break; case kIOMapInhibitCache: - flags = VM_WIMG_IO; - break; + flags = VM_WIMG_IO; + break; case kIOMapWriteThruCache: - flags = VM_WIMG_WTHRU; - break; + flags = VM_WIMG_WTHRU; + break; - case kIOWriteCombineCache: - flags = VM_WIMG_WCOMB; - break; + case kIOMapWriteCombineCache: + flags = VM_WIMG_WCOMB; + break; case kIOMapCopybackCache: - flags = VM_WIMG_COPYBACK; - break; - } -#if __ppc__ + flags = VM_WIMG_COPYBACK; + break; - // Set up a block mapped area - pmap_map_block(pmap, (addr64_t)va, (ppnum_t)(pa >> 12), (uint32_t)(length >> 12), prot, flags, 0); + case kIOMapCopybackInnerCache: + flags = VM_WIMG_INNERWBACK; + break; -#else -// enter each page's physical address in the target map + case kIOMapPostedWrite: + flags = VM_WIMG_POSTED; + break; - for (off = 0; off < length; off += page_size) - pmap_enter(pmap, va + off, (pa + off) >> 12, prot, flags, TRUE); + case kIOMapRealTimeCache: + flags = VM_WIMG_RT; + break; + } -#endif + pmap_set_cache_attributes(pagenum, flags); + + vm_map_set_cache_attr(map, (vm_map_offset_t)va); + + + // Set up a block mapped area + return pmap_map_block(pmap, va, pagenum, (uint32_t) atop_64(round_page_64(length)), prot, 0, 0); +} + +kern_return_t +IOUnmapPages(vm_map_t map, mach_vm_address_t va, mach_vm_size_t length) +{ + pmap_t pmap = map->pmap; + + pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length)); - return( KERN_SUCCESS ); + return KERN_SUCCESS; } -kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length) +kern_return_t +IOProtectCacheMode(vm_map_t __unused map, mach_vm_address_t __unused va, + mach_vm_size_t __unused length, unsigned int __unused options) { - pmap_t pmap = map->pmap; + mach_vm_size_t off; + vm_prot_t prot; + unsigned int flags; + pmap_t pmap = map->pmap; + pmap_flush_context pmap_flush_context_storage; + boolean_t delayed_pmap_flush = FALSE; + + prot = (options & kIOMapReadOnly) + ? VM_PROT_READ : (VM_PROT_READ | VM_PROT_WRITE); + + switch (options & kIOMapCacheMask) { + // what cache mode do we need? + case kIOMapDefaultCache: + default: + return KERN_INVALID_ARGUMENT; + + case kIOMapInhibitCache: + flags = VM_WIMG_IO; + break; + + case kIOMapWriteThruCache: + flags = VM_WIMG_WTHRU; + break; + + case kIOMapWriteCombineCache: + flags = VM_WIMG_WCOMB; + break; + + case kIOMapCopybackCache: + flags = VM_WIMG_COPYBACK; + break; + + case kIOMapCopybackInnerCache: + flags = VM_WIMG_INNERWBACK; + break; + + case kIOMapPostedWrite: + flags = VM_WIMG_POSTED; + break; + + case kIOMapRealTimeCache: + flags = VM_WIMG_RT; + break; + } - pmap_remove(pmap, trunc_page_64(va), round_page_64(va + length)); + pmap_flush_context_init(&pmap_flush_context_storage); + delayed_pmap_flush = FALSE; + + // enter each page's physical address in the target map + for (off = 0; off < length; off += page_size) { + ppnum_t ppnum = pmap_find_phys(pmap, va + off); + if (ppnum) { + pmap_enter_options(pmap, va + off, ppnum, prot, VM_PROT_NONE, flags, TRUE, + PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage); + delayed_pmap_flush = TRUE; + } + } + if (delayed_pmap_flush == TRUE) { + pmap_flush(&pmap_flush_context_storage); + } - return( KERN_SUCCESS ); + return KERN_SUCCESS; } +ppnum_t +IOGetLastPageNumber(void) +{ +#if __i386__ || __x86_64__ + ppnum_t lastPage, highest = 0; + unsigned int idx; + + for (idx = 0; idx < pmap_memory_region_count; idx++) { + lastPage = pmap_memory_regions[idx].end - 1; + if (lastPage > highest) { + highest = lastPage; + } + } + return highest; +#elif __arm__ || __arm64__ + return 0; +#else +#error unknown arch +#endif +} + + void IOGetTime( mach_timespec_t * clock_time); -void IOGetTime( mach_timespec_t * clock_time) +void +IOGetTime( mach_timespec_t * clock_time) { - clock_get_system_nanotime(&clock_time->tv_sec, &clock_time->tv_nsec); + clock_sec_t sec; + clock_nsec_t nsec; + clock_get_system_nanotime(&sec, &nsec); + clock_time->tv_sec = (typeof(clock_time->tv_sec))sec; + clock_time->tv_nsec = nsec; }