X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/150bd0745008d57c273e65929515dd6bbe568d83..43866e378188c25dd1e2208016ab3cbeb086ae6c:/osfmk/vm/vm_user.c diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index ec4dcfbf2..83ec5a11b 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -1,21 +1,24 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * * @APPLE_LICENSE_HEADER_END@ */ @@ -55,10 +58,6 @@ * * User-exported virtual memory functions. */ -#ifdef MACH_BSD -/* remove after component interface available */ -extern int vnode_pager_workaround; -#endif #include #include @@ -71,7 +70,9 @@ extern int vnode_pager_workaround; #include #include #include + #include +#include #include #include @@ -512,6 +513,7 @@ vm_map_64( named_entry_unlock(named_entry); return(KERN_INVALID_OBJECT); } + object->true_share = TRUE; named_entry->object = object; named_entry_unlock(named_entry); /* create an extra reference for the named entry */ @@ -528,13 +530,28 @@ vm_map_64( vm_object_unlock(object); } } - } else { - if ((object = vm_object_enter(port, size, FALSE, FALSE, FALSE)) + } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { + /* + * JMM - This is temporary until we unify named entries + * and raw memory objects. + * + * Detected fake ip_kotype for a memory object. In + * this case, the port isn't really a port at all, but + * instead is just a raw memory object. + */ + + if ((object = vm_object_enter((memory_object_t)port, + size, FALSE, FALSE, FALSE)) == VM_OBJECT_NULL) return(KERN_INVALID_OBJECT); /* wait for object (if any) to be ready */ if (object != VM_OBJECT_NULL) { + if(object == kernel_object) { + printf("Warning: Attempt to map kernel object" + " by a non-private kernel entity\n"); + return(KERN_INVALID_OBJECT); + } vm_object_lock(object); while (!object->pager_ready) { vm_object_wait(object, @@ -544,6 +561,8 @@ vm_map_64( } vm_object_unlock(object); } + } else { + return (KERN_INVALID_OBJECT); } *address = trunc_page(*address); @@ -837,7 +856,7 @@ vm_msync( kill_pages = -1; } if (kill_pages != -1) - memory_object_deactivate_pages(object, offset, + vm_object_deactivate_pages(object, offset, (vm_object_size_t)flush_size, kill_pages); vm_object_unlock(object); vm_map_unlock(map); @@ -848,8 +867,8 @@ vm_msync( * Don't bother to sync internal objects, since there can't * be any "permanent" storage for these objects anyway. */ - if ((object->pager == IP_NULL) || (object->internal) || - (object->private)) { + if ((object->pager == MEMORY_OBJECT_NULL) || + (object->internal) || (object->private)) { vm_object_unlock(object); vm_map_unlock(map); continue; @@ -864,7 +883,7 @@ vm_msync( vm_map_unlock(map); - do_sync_req = memory_object_sync(object, + do_sync_req = vm_object_sync(object, offset, flush_size, sync_flags & VM_SYNC_INVALIDATE, @@ -917,31 +936,11 @@ re_iterate: queue_enter(&req_q, new_msr, msync_req_t, req_q); -#ifdef MACH_BSD - if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == - ((rpc_subsystem_t) &vnode_pager_workaround)) { - (void) vnode_pager_synchronize( - object->pager, - object->pager_request, - offset, - flush_size, - sync_flags); - } else { - (void) memory_object_synchronize( - object->pager, - object->pager_request, - offset, - flush_size, - sync_flags); - } -#else (void) memory_object_synchronize( object->pager, - object->pager_request, offset, flush_size, sync_flags); -#endif }/* while */ /* @@ -1178,7 +1177,8 @@ vm_allocate_cpm( m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); vm_object_unlock(cpm_obj); assert(m != VM_PAGE_NULL); - PMAP_ENTER(pmap, va, m, VM_PROT_ALL, TRUE); + PMAP_ENTER(pmap, va, m, VM_PROT_ALL, + VM_WIMG_USE_DEFAULT, TRUE); } #if MACH_ASSERT @@ -1249,7 +1249,7 @@ mach_memory_object_memory_entry_64( boolean_t internal, vm_object_offset_t size, vm_prot_t permission, - ipc_port_t pager, + memory_object_t pager, ipc_port_t *entry_handle) { vm_named_entry_t user_object; @@ -1305,7 +1305,7 @@ mach_memory_object_memory_entry( boolean_t internal, vm_size_t size, vm_prot_t permission, - ipc_port_t pager, + memory_object_t pager, ipc_port_t *entry_handle) { return mach_memory_object_memory_entry_64( host, internal, @@ -1334,18 +1334,26 @@ mach_make_memory_entry_64( vm_map_t pmap_map; /* needed for call to vm_map_lookup_locked */ - boolean_t wired; + boolean_t wired; vm_object_offset_t obj_off; - vm_prot_t prot; + vm_prot_t prot; vm_object_offset_t lo_offset, hi_offset; vm_behavior_t behavior; - vm_object_t object; + vm_object_t object; + vm_object_t shadow_object; /* needed for direct map entry manipulation */ vm_map_entry_t map_entry; - vm_map_t local_map; + vm_map_entry_t next_entry; + vm_map_t local_map; + vm_map_t original_map = target_map; + vm_offset_t local_offset; vm_object_size_t mappable_size; + vm_object_size_t total_size; + + offset = trunc_page_64(offset); + *size = round_page_64(*size); user_object = (vm_named_entry_t) kalloc(sizeof (struct vm_named_entry)); @@ -1393,22 +1401,37 @@ mach_make_memory_entry_64( vm_map_unlock_read(target_map); goto make_mem_done; } - if ((prot & permission) != permission) { + if (((prot & permission) != permission) + || (object == kernel_object)) { kr = KERN_INVALID_RIGHT; vm_object_unlock(object); vm_map_unlock_read(target_map); if(pmap_map != target_map) vm_map_unlock_read(pmap_map); + if(object == kernel_object) { + printf("Warning: Attempt to create a named" + " entry from the kernel_object\n"); + } goto make_mem_done; } /* We have an object, now check to see if this object */ /* is suitable. If not, create a shadow and share that */ - local_map = target_map; redo_lookup: + local_map = original_map; + local_offset = offset; + if(target_map != local_map) { + vm_map_unlock_read(target_map); + if(pmap_map != target_map) + vm_map_unlock_read(pmap_map); + vm_map_lock_read(local_map); + target_map = local_map; + pmap_map = local_map; + } while(TRUE) { - if(!vm_map_lookup_entry(local_map, offset, &map_entry)) { + if(!vm_map_lookup_entry(local_map, + local_offset, &map_entry)) { kr = KERN_INVALID_ARGUMENT; vm_object_unlock(object); vm_map_unlock_read(target_map); @@ -1425,14 +1448,21 @@ redo_lookup: vm_map_unlock_read(pmap_map); goto make_mem_done; } + if(map_entry->wired_count) { + object->true_share = TRUE; + } break; } else { + vm_map_t tmap; + tmap = local_map; local_map = map_entry->object.sub_map; + vm_map_lock_read(local_map); - vm_map_unlock_read(target_map); - if(pmap_map != target_map) - vm_map_unlock_read(pmap_map); + vm_map_unlock_read(tmap); target_map = local_map; + pmap_map = local_map; + local_offset = local_offset - map_entry->vme_start; + local_offset += map_entry->offset; } } if(((map_entry->max_protection) & permission) != permission) { @@ -1443,6 +1473,40 @@ redo_lookup: vm_map_unlock_read(pmap_map); goto make_mem_done; } + + mappable_size = hi_offset - obj_off; + total_size = map_entry->vme_end - map_entry->vme_start; + if(*size > mappable_size) { + /* try to extend mappable size if the entries */ + /* following are from the same object and are */ + /* compatible */ + next_entry = map_entry->vme_next; + /* lets see if the next map entry is still */ + /* pointing at this object and is contiguous */ + while(*size > mappable_size) { + if((next_entry->object.vm_object == object) && + (next_entry->vme_start == + next_entry->vme_prev->vme_end) && + (next_entry->offset == + next_entry->vme_prev->offset + + (next_entry->vme_prev->vme_end - + next_entry->vme_prev->vme_start))) { + if(((next_entry->max_protection) + & permission) != permission) { + break; + } + mappable_size += next_entry->vme_end + - next_entry->vme_start; + total_size += next_entry->vme_end + - next_entry->vme_start; + next_entry = next_entry->vme_next; + } else { + break; + } + + } + } + if(object->internal) { /* vm_map_lookup_locked will create a shadow if */ /* needs_copy is set but does not check for the */ @@ -1450,10 +1514,9 @@ redo_lookup: /* set up an object which will not be pulled from */ /* under us. */ - if (map_entry->needs_copy || object->shadowed || - (object->size > - ((vm_object_size_t)map_entry->vme_end - - map_entry->vme_start))) { + if ((map_entry->needs_copy || object->shadowed || + (object->size > total_size)) + && !object->true_share) { if (vm_map_lock_read_to_write(target_map)) { vm_map_lock_read(target_map); goto redo_lookup; @@ -1461,25 +1524,48 @@ redo_lookup: /* create a shadow object */ + vm_object_shadow(&map_entry->object.vm_object, + &map_entry->offset, total_size); + shadow_object = map_entry->object.vm_object; + vm_object_unlock(object); + vm_object_pmap_protect( + object, map_entry->offset, + total_size, + ((map_entry->is_shared + || target_map->mapped) + ? PMAP_NULL : + target_map->pmap), + map_entry->vme_start, + map_entry->protection & ~VM_PROT_WRITE); + total_size -= (map_entry->vme_end + - map_entry->vme_start); + next_entry = map_entry->vme_next; + map_entry->needs_copy = FALSE; + while (total_size) { + if(next_entry->object.vm_object == object) { + next_entry->object.vm_object + = shadow_object; + next_entry->offset + = next_entry->vme_prev->offset + + (next_entry->vme_prev->vme_end + - next_entry->vme_prev->vme_start); + next_entry->needs_copy = FALSE; + } else { + panic("mach_make_memory_entry_64:" + " map entries out of sync\n"); + } + total_size -= + next_entry->vme_end + - next_entry->vme_start; + next_entry = next_entry->vme_next; + } + + object = shadow_object; + vm_object_lock(object); + obj_off = (local_offset - map_entry->vme_start) + + map_entry->offset; + vm_map_lock_write_to_read(target_map); - vm_object_shadow(&map_entry->object.vm_object, - &map_entry->offset, - (map_entry->vme_end - - map_entry->vme_start)); - map_entry->needs_copy = FALSE; - vm_object_unlock(object); - object = map_entry->object.vm_object; - vm_object_lock(object); - object->size = map_entry->vme_end - - map_entry->vme_start; - obj_off = (offset - map_entry->vme_start) + - map_entry->offset; - lo_offset = map_entry->offset; - hi_offset = (map_entry->vme_end - - map_entry->vme_start) + - map_entry->offset; - - vm_map_lock_write_to_read(target_map); } } @@ -1509,7 +1595,6 @@ redo_lookup: /* offset of our beg addr within entry */ /* it corresponds to this: */ - mappable_size = hi_offset - obj_off; if(*size > mappable_size) *size = mappable_size; @@ -1523,7 +1608,6 @@ redo_lookup: vm_object_unlock(object); ipc_kobject_set(user_handle, (ipc_kobject_t) user_object, IKOT_NAMED_ENTRY); - *size = user_object->size; *object_handle = user_handle; vm_map_unlock_read(target_map); if(pmap_map != target_map) @@ -1725,16 +1809,17 @@ kern_return_t vm_map_region_replace( (entry->vme_start < end)) { if((entry->is_sub_map) && (entry->object.sub_map == old_submap)) { - entry->object.sub_map = new_submap; if(entry->use_pmap) { if((start & 0xfffffff) || ((end - start) != 0x10000000)) { vm_map_unlock(old_submap); + vm_map_deallocate(old_submap); vm_map_unlock(target_map); return KERN_INVALID_ARGUMENT; } nested_pmap = 1; } + entry->object.sub_map = new_submap; vm_map_reference(new_submap); vm_map_deallocate(old_submap); } @@ -1744,13 +1829,19 @@ kern_return_t vm_map_region_replace( if(nested_pmap) { #ifndef i386 pmap_unnest(target_map->pmap, start, end - start); + if(target_map->mapped) { + vm_map_submap_pmap_clean(target_map, + start, end, old_submap, 0); + } pmap_nest(target_map->pmap, new_submap->pmap, start, end - start); #endif i386 } else { - pmap_remove(target_map->pmap, start, end); + vm_map_submap_pmap_clean(target_map, + start, end, old_submap, 0); } vm_map_unlock(old_submap); + vm_map_deallocate(old_submap); vm_map_unlock(target_map); return KERN_SUCCESS; } @@ -1875,6 +1966,10 @@ set_dp_control_port( { if (host_priv == HOST_PRIV_NULL) return (KERN_INVALID_HOST); + + if (IP_VALID(dynamic_pager_control_port)) + ipc_port_release_send(dynamic_pager_control_port); + dynamic_pager_control_port = control_port; return KERN_SUCCESS; } @@ -1886,41 +1981,25 @@ get_dp_control_port( { if (host_priv == HOST_PRIV_NULL) return (KERN_INVALID_HOST); - *control_port = dynamic_pager_control_port; + + *control_port = ipc_port_copy_send(dynamic_pager_control_port); return KERN_SUCCESS; } -void -mach_destroy_upl( - ipc_port_t port) -{ - upl_t upl; -#if MACH_ASSERT - assert(ip_kotype(port) == IKOT_NAMED_ENTRY); -#endif /* MACH_ASSERT */ - upl = (upl_t)port->ip_kobject; - mutex_lock(&(upl)->Lock); - upl->ref_count-=1; - if(upl->ref_count == 0) { - mutex_unlock(&(upl)->Lock); - uc_upl_abort(upl, UPL_ABORT_ERROR); - } else - mutex_unlock(&(upl)->Lock); -} /* Retrieve a upl for an object underlying an address range in a map */ kern_return_t vm_map_get_upl( - vm_map_t map, - vm_offset_t offset, - vm_size_t *upl_size, - upl_t *upl, - upl_page_info_t **page_list, - int *count, - int *flags, - int force_data_sync) + vm_map_t map, + vm_address_t offset, + vm_size_t *upl_size, + upl_t *upl, + upl_page_info_array_t page_list, + unsigned int *count, + int *flags, + int force_data_sync) { vm_map_entry_t entry; int caller_flags; @@ -1936,12 +2015,34 @@ vm_map_get_upl( } if(upl == NULL) return KERN_INVALID_ARGUMENT; + + REDISCOVER_ENTRY: vm_map_lock(map); if (vm_map_lookup_entry(map, offset, &entry)) { + if (entry->object.vm_object == VM_OBJECT_NULL || + !entry->object.vm_object->phys_contiguous) { + if((*upl_size/page_size) > MAX_UPL_TRANSFER) { + *upl_size = MAX_UPL_TRANSFER * page_size; + } + } if((entry->vme_end - offset) < *upl_size) { *upl_size = entry->vme_end - offset; } + if (caller_flags & UPL_QUERY_OBJECT_TYPE) { + if (entry->object.vm_object == VM_OBJECT_NULL) { + *flags = 0; + } else if (entry->object.vm_object->private) { + *flags = UPL_DEV_MEMORY; + if (entry->object.vm_object->phys_contiguous) { + *flags |= UPL_PHYS_CONTIG; + } + } else { + *flags = 0; + } + vm_map_unlock(map); + return KERN_SUCCESS; + } /* * Create an object if necessary. */ @@ -1951,8 +2052,7 @@ REDISCOVER_ENTRY: entry->offset = 0; } if (!(caller_flags & UPL_COPYOUT_FROM)) { - if (entry->needs_copy - || entry->object.vm_object->copy) { + if (entry->needs_copy) { vm_map_t local_map; vm_object_t object; vm_object_offset_t offset_hi; @@ -2003,7 +2103,8 @@ REDISCOVER_ENTRY: } if (sync_cow_data) { - if (entry->object.vm_object->shadow) { + if (entry->object.vm_object->shadow + || entry->object.vm_object->copy) { int flags; local_object = entry->object.vm_object; @@ -2036,14 +2137,21 @@ REDISCOVER_ENTRY: vm_object_unlock(local_object); } - memory_object_lock_request( - local_object, ((offset - local_start) - + local_offset) + - local_object->paging_offset, - (vm_object_size_t)*upl_size, FALSE, - flags, - VM_PROT_NO_CHANGE, NULL, 0); + if (entry->object.vm_object->shadow && + entry->object.vm_object->copy) { + vm_object_lock_request( + local_object->shadow, + (vm_object_offset_t) + ((offset - local_start) + + local_offset) + + local_object->shadow_offset + + local_object->paging_offset, + *upl_size, FALSE, + MEMORY_OBJECT_DATA_SYNC, + VM_PROT_NO_CHANGE); + } sync_cow_data = FALSE; + vm_object_deallocate(local_object); goto REDISCOVER_ENTRY; } } @@ -2074,15 +2182,16 @@ REDISCOVER_ENTRY: vm_object_unlock(local_object); } - memory_object_lock_request( - local_object, ((offset - local_start) - + local_offset) + - local_object->paging_offset, + vm_object_lock_request( + local_object, + (vm_object_offset_t) + ((offset - local_start) + local_offset) + + local_object->paging_offset, (vm_object_size_t)*upl_size, FALSE, MEMORY_OBJECT_DATA_SYNC, - VM_PROT_NO_CHANGE, - NULL, 0); + VM_PROT_NO_CHANGE); force_data_sync = FALSE; + vm_object_deallocate(local_object); goto REDISCOVER_ENTRY; } @@ -2102,12 +2211,13 @@ REDISCOVER_ENTRY: local_start = entry->vme_start; vm_object_reference(local_object); vm_map_unlock(map); - ret = (vm_fault_list_request(local_object, - ((offset - local_start) + local_offset), + ret = (vm_object_upl_request(local_object, + (vm_object_offset_t) + ((offset - local_start) + local_offset), *upl_size, upl, page_list, - *count, + count, caller_flags)); vm_object_deallocate(local_object); return(ret); @@ -2118,227 +2228,6 @@ REDISCOVER_ENTRY: } - -kern_return_t -vm_object_upl_request( - vm_object_t object, - vm_object_offset_t offset, - vm_size_t size, - ipc_port_t *upl, - upl_page_info_t *page_list, - mach_msg_type_number_t *count, - int cntrl_flags) -{ - upl_t upl_object; - ipc_port_t upl_port; - ipc_port_t previous; - upl_page_info_t *pl; - kern_return_t kr; - - pl = page_list; - kr = vm_fault_list_request(object, offset, size, &upl_object, - &pl, *count, cntrl_flags); - - - if(kr != KERN_SUCCESS) { - *upl = MACH_PORT_NULL; - return KERN_FAILURE; - } - - upl_port = ipc_port_alloc_kernel(); - - - ip_lock(upl_port); - - /* make a sonce right */ - upl_port->ip_sorights++; - ip_reference(upl_port); - - upl_port->ip_destination = IP_NULL; - upl_port->ip_receiver_name = MACH_PORT_NULL; - upl_port->ip_receiver = ipc_space_kernel; - - /* make a send right */ - upl_port->ip_mscount++; - upl_port->ip_srights++; - ip_reference(upl_port); - - ipc_port_nsrequest(upl_port, 1, upl_port, &previous); - /* nsrequest unlocks user_handle */ - - /* Create a named object based on a submap of specified size */ - - - ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL); - *upl = upl_port; - return KERN_SUCCESS; -} - -kern_return_t -vm_pager_upl_request( - vm_object_t object, - vm_object_offset_t offset, - vm_size_t size, - vm_size_t super_size, - ipc_port_t *upl, - upl_page_info_t *page_list, - mach_msg_type_number_t *count, - int cntrl_flags) -{ - upl_t upl_object; - ipc_port_t upl_port; - ipc_port_t previous; - upl_page_info_t *pl; - kern_return_t kr; - - pl = page_list; - kr = upl_system_list_request(object, offset, size, super_size, - &upl_object, &pl, *count, cntrl_flags); - - if(kr != KERN_SUCCESS) { - *upl = MACH_PORT_NULL; - return KERN_FAILURE; - } - - - upl_port = ipc_port_alloc_kernel(); - - - ip_lock(upl_port); - - /* make a sonce right */ - upl_port->ip_sorights++; - ip_reference(upl_port); - - upl_port->ip_destination = IP_NULL; - upl_port->ip_receiver_name = MACH_PORT_NULL; - upl_port->ip_receiver = ipc_space_kernel; - - /* make a send right */ - upl_port->ip_mscount++; - upl_port->ip_srights++; - ip_reference(upl_port); - - ipc_port_nsrequest(upl_port, 1, upl_port, &previous); - /* nsrequest unlocks user_handle */ - - /* Create a named object based on a submap of specified size */ - - - ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL); - *upl = upl_port; - return KERN_SUCCESS; -} - -kern_return_t -vm_upl_map( - vm_map_t map, - ipc_port_t upl_port, - vm_offset_t *dst_addr) -{ - upl_t upl; - kern_return_t kr; - - if (!IP_VALID(upl_port)) { - return KERN_INVALID_ARGUMENT; - } else if (ip_kotype(upl_port) == IKOT_UPL) { - upl_lock(upl); - upl = (upl_t)upl_port->ip_kobject; - kr = uc_upl_map(map, upl, dst_addr); - upl_unlock(upl); - return kr; - } else { - return KERN_FAILURE; - } -} - - -kern_return_t -vm_upl_unmap( - vm_map_t map, - ipc_port_t upl_port) -{ - upl_t upl; - kern_return_t kr; - - if (!IP_VALID(upl_port)) { - return KERN_INVALID_ARGUMENT; - } else if (ip_kotype(upl_port) == IKOT_UPL) { - upl_lock(upl); - upl = (upl_t)upl_port->ip_kobject; - kr = uc_upl_un_map(map, upl); - upl_unlock(upl); - return kr; - } else { - return KERN_FAILURE; - } -} - -kern_return_t -vm_upl_commit( - upl_t upl, - upl_page_list_ptr_t page_list, - mach_msg_type_number_t count) -{ - kern_return_t kr; - upl_lock(upl); - if(count) { - kr = uc_upl_commit(upl, (upl_page_info_t *)page_list); - } else { - kr = uc_upl_commit(upl, (upl_page_info_t *) NULL); - } - upl_unlock(upl); - return kr; -} - -kern_return_t -vm_upl_commit_range( - upl_t upl, - vm_offset_t offset, - vm_size_t size, - upl_page_list_ptr_t page_list, - int flags, - mach_msg_type_number_t count) -{ - kern_return_t kr; - upl_lock(upl); - if(count) { - kr = uc_upl_commit_range(upl, offset, size, flags, - (upl_page_info_t *)page_list); - } else { - kr = uc_upl_commit_range(upl, offset, size, flags, - (upl_page_info_t *) NULL); - } - upl_unlock(upl); - return kr; -} - -kern_return_t -vm_upl_abort_range( - upl_t upl, - vm_offset_t offset, - vm_size_t size, - int abort_flags) -{ - kern_return_t kr; - upl_lock(upl); - kr = uc_upl_abort_range(upl, offset, size, abort_flags); - upl_unlock(upl); - return kr; -} - -kern_return_t -vm_upl_abort( - upl_t upl, - int abort_type) -{ - kern_return_t kr; - upl_lock(upl); - kr = uc_upl_abort(upl, abort_type); - upl_unlock(upl); - return kr; -} - /* ******* Temporary Internal calls to UPL for BSD ***** */ kern_return_t kernel_upl_map( @@ -2346,209 +2235,88 @@ kernel_upl_map( upl_t upl, vm_offset_t *dst_addr) { - kern_return_t kr; - - upl_lock(upl); - kr = uc_upl_map(map, upl, dst_addr); - if(kr == KERN_SUCCESS) { - upl->ref_count += 1; - } - upl_unlock(upl); - return kr; + return (vm_upl_map(map, upl, dst_addr)); } kern_return_t kernel_upl_unmap( vm_map_t map, - upl_t upl) + upl_t upl) { - kern_return_t kr; - - upl_lock(upl); - kr = uc_upl_un_map(map, upl); - if(kr == KERN_SUCCESS) { - if(upl->ref_count == 1) { - upl_dealloc(upl); - } else { - upl->ref_count -= 1; - upl_unlock(upl); - } - } else { - upl_unlock(upl); - } - return kr; + return(vm_upl_unmap(map, upl)); } kern_return_t kernel_upl_commit( upl_t upl, - upl_page_list_ptr_t page_list, - mach_msg_type_number_t count) + upl_page_info_t *pl, + mach_msg_type_number_t count) { - kern_return_t kr; - upl_lock(upl); - upl->ref_count += 1; - if(count) { - kr = uc_upl_commit(upl, (upl_page_info_t *)page_list); - } else { - kr = uc_upl_commit(upl, (upl_page_info_t *) NULL); - } - if(upl->ref_count == 1) { - upl_dealloc(upl); - } else { - upl->ref_count -= 1; - upl_unlock(upl); - } + kern_return_t kr; + + kr = upl_commit(upl, pl, count); + upl_deallocate(upl); return kr; } + kern_return_t kernel_upl_commit_range( upl_t upl, vm_offset_t offset, vm_size_t size, int flags, - upl_page_list_ptr_t page_list, - mach_msg_type_number_t count) + upl_page_info_array_t pl, + mach_msg_type_number_t count) { - kern_return_t kr; - upl_lock(upl); - upl->ref_count += 1; - if(count) { - kr = uc_upl_commit_range(upl, offset, size, flags, - (upl_page_info_t *)page_list); - } else { - kr = uc_upl_commit_range(upl, offset, size, flags, - (upl_page_info_t *) NULL); - } - if(upl->ref_count == 1) { - upl_dealloc(upl); - } else { - upl->ref_count -= 1; - upl_unlock(upl); - } + boolean_t finished = FALSE; + kern_return_t kr; + + if (flags & UPL_COMMIT_FREE_ON_EMPTY) + flags |= UPL_COMMIT_NOTIFY_EMPTY; + + kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); + + if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) + upl_deallocate(upl); + return kr; } kern_return_t kernel_upl_abort_range( - upl_t upl, - vm_offset_t offset, - vm_size_t size, - int abort_flags) + upl_t upl, + vm_offset_t offset, + vm_size_t size, + int abort_flags) { - kern_return_t kr; - upl_lock(upl); - upl->ref_count += 1; - kr = uc_upl_abort_range(upl, offset, size, abort_flags); - if(upl->ref_count == 1) { - upl_dealloc(upl); - } else { - upl->ref_count -= 1; - upl_unlock(upl); - } - return kr; -} + kern_return_t kr; + boolean_t finished = FALSE; -kern_return_t -kernel_upl_abort( - upl_t upl, - int abort_type) -{ - kern_return_t kr; - upl_lock(upl); - upl->ref_count += 1; - kr = uc_upl_abort(upl, abort_type); - if(upl->ref_count == 1) { - upl_dealloc(upl); - } else { - upl->ref_count -= 1; - upl_unlock(upl); - } - return kr; -} + if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) + abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; + kr = upl_abort_range(upl, offset, size, abort_flags, &finished); + if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) + upl_deallocate(upl); -/* code snippet from vm_map */ -kern_return_t -vm_object_create_nomap(ipc_port_t port, vm_object_size_t size) -{ - vm_object_t object_ptr; - return memory_object_create_named(port, size, &object_ptr); + return kr; } - -/* - * Temporary interface to overcome old style ipc artifacts, and allow - * ubc to call this routine directly. Will disappear with new RPC - * component architecture. - * NOTE: call to memory_object_destroy removes the vm_object's association - * with its abstract memory object and hence the named flag is set to false. - */ kern_return_t -memory_object_destroy_named( - vm_object_t object, - kern_return_t reason) +kernel_upl_abort( + upl_t upl, + int abort_type) { - vm_object_lock(object); - if(object->named == FALSE) { - panic("memory_object_destroy_named called by party which doesn't hold right"); - } - object->ref_count++; - vm_object_res_reference(object); - vm_object_unlock(object); - return (memory_object_destroy(object, reason)); -} + kern_return_t kr; -/* - * Temporary interface to overcome old style ipc artifacts, and allow - * ubc to call this routine directly. Will disappear with new RPC - * component architecture. - * Note: No change is made in the named flag. - */ -kern_return_t -memory_object_lock_request_named( - vm_object_t object, - vm_object_offset_t offset, - vm_object_size_t size, - memory_object_return_t should_return, - boolean_t should_flush, - vm_prot_t prot, - ipc_port_t reply_to) -{ - vm_object_lock(object); - if(object->named == FALSE) { - panic("memory_object_lock_request_named called by party which doesn't hold right"); - } - object->ref_count++; - vm_object_res_reference(object); - vm_object_unlock(object); - return (memory_object_lock_request(object, - offset, size, should_return, should_flush, prot, - reply_to, 0)); + kr = upl_abort(upl, abort_type); + upl_deallocate(upl); + return kr; } -kern_return_t -memory_object_change_attributes_named( - vm_object_t object, - memory_object_flavor_t flavor, - memory_object_info_t attributes, - mach_msg_type_number_t count, - ipc_port_t reply_to, - mach_msg_type_name_t reply_to_type) -{ - vm_object_lock(object); - if(object->named == FALSE) { - panic("memory_object_lock_request_named called by party which doesn't hold right"); - } - object->ref_count++; - vm_object_res_reference(object); - vm_object_unlock(object); - return (memory_object_change_attributes(object, - flavor, attributes, count, reply_to, reply_to_type)); -} kern_return_t vm_get_shared_region( @@ -2669,9 +2437,7 @@ shared_region_mapping_ref( { if(shared_region == NULL) return KERN_SUCCESS; - shared_region_mapping_lock(shared_region); - shared_region->ref_count++; - shared_region_mapping_unlock(shared_region); + hw_atomic_add(&shared_region->ref_count, 1); return KERN_SUCCESS; } @@ -2680,44 +2446,46 @@ shared_region_mapping_dealloc( shared_region_mapping_t shared_region) { struct shared_region_task_mappings sm_info; - shared_region_mapping_t next; - - if(shared_region == NULL) - return KERN_SUCCESS; - shared_region_mapping_lock(shared_region); - - if((--shared_region->ref_count) == 0) { - - sm_info.text_region = shared_region->text_region; - sm_info.text_size = shared_region->text_size; - sm_info.data_region = shared_region->data_region; - sm_info.data_size = shared_region->data_size; - sm_info.region_mappings = shared_region->region_mappings; - sm_info.client_base = shared_region->client_base; - sm_info.alternate_base = shared_region->alternate_base; - sm_info.alternate_next = shared_region->alternate_next; - sm_info.flags = shared_region->flags; - sm_info.self = (vm_offset_t)shared_region; - - lsf_remove_regions_mappings(shared_region, &sm_info); - pmap_remove(((vm_named_entry_t) - (shared_region->text_region->ip_kobject)) - ->backing.map->pmap, - sm_info.client_base, - sm_info.client_base + sm_info.text_size); - ipc_port_release_send(shared_region->text_region); - ipc_port_release_send(shared_region->data_region); - if(shared_region->object_chain) { - shared_region_mapping_dealloc( - shared_region->object_chain->object_chain_region); - kfree((vm_offset_t)shared_region->object_chain, - sizeof (struct shared_region_object_chain)); - } - kfree((vm_offset_t)shared_region, + shared_region_mapping_t next = NULL; + + while (shared_region) { + if (hw_atomic_sub(&shared_region->ref_count, 1) == 0) { + shared_region_mapping_lock(shared_region); + + sm_info.text_region = shared_region->text_region; + sm_info.text_size = shared_region->text_size; + sm_info.data_region = shared_region->data_region; + sm_info.data_size = shared_region->data_size; + sm_info.region_mappings = shared_region->region_mappings; + sm_info.client_base = shared_region->client_base; + sm_info.alternate_base = shared_region->alternate_base; + sm_info.alternate_next = shared_region->alternate_next; + sm_info.flags = shared_region->flags; + sm_info.self = (vm_offset_t)shared_region; + + lsf_remove_regions_mappings(shared_region, &sm_info); + pmap_remove(((vm_named_entry_t) + (shared_region->text_region->ip_kobject)) + ->backing.map->pmap, + sm_info.client_base, + sm_info.client_base + sm_info.text_size); + ipc_port_release_send(shared_region->text_region); + ipc_port_release_send(shared_region->data_region); + if (shared_region->object_chain) { + next = shared_region->object_chain->object_chain_region; + kfree((vm_offset_t)shared_region->object_chain, + sizeof (struct shared_region_object_chain)); + } else { + next = NULL; + } + shared_region_mapping_unlock(shared_region); + kfree((vm_offset_t)shared_region, sizeof (struct shared_region_mapping)); - return KERN_SUCCESS; + shared_region = next; + } else { + break; + } } - shared_region_mapping_unlock(shared_region); return KERN_SUCCESS; } @@ -2748,6 +2516,24 @@ vm_map_get_phys_page( vm_map_unlock(old_map); continue; } + if (entry->object.vm_object->phys_contiguous) { + /* These are not standard pageable memory mappings */ + /* If they are not present in the object they will */ + /* have to be picked up from the pager through the */ + /* fault mechanism. */ + if(entry->object.vm_object->shadow_offset == 0) { + /* need to call vm_fault */ + vm_map_unlock(map); + vm_fault(map, offset, VM_PROT_NONE, + FALSE, THREAD_UNINT, NULL, 0); + vm_map_lock(map); + continue; + } + offset = entry->offset + (offset - entry->vme_start); + phys_addr = entry->object.vm_object->shadow_offset + offset; + break; + + } offset = entry->offset + (offset - entry->vme_start); object = entry->object.vm_object; vm_object_lock(object);