X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..d41d1dae2cd00cc08c7982087d1c445180cad9f5:/osfmk/vm/vm_user.c?ds=sidebyside diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index 5db1859b8..59c26ff70 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2007 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -62,6 +62,29 @@ * User-exported virtual memory functions. */ +/* + * There are three implementations of the "XXX_allocate" functionality in + * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate + * (for a task with the same address space size, especially the current task), + * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate + * in the kernel should only be used on the kernel_task. vm32_vm_allocate only + * makes sense on platforms where a user task can either be 32 or 64, or the kernel + * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred + * for new code. + * + * The entrypoints into the kernel are more complex. All platforms support a + * mach_vm_allocate-style API (subsystem 4800) which operates with the largest + * size types for the platform. On platforms that only support U32/K32, + * subsystem 4800 is all you need. On platforms that support both U32 and U64, + * subsystem 3800 is used disambiguate the size of parameters, and they will + * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms, + * the MIG glue should never call into vm_allocate directly, because the calling + * task and kernel_task are unlikely to use the same size parameters + * + * New VM call implementations should be added here and to mach_vm.defs + * (subsystem 4800), and use mach_vm_* "wide" types. + */ + #include #include @@ -78,9 +101,7 @@ #include #include -#include #include -#include #include #include @@ -116,7 +137,11 @@ mach_vm_allocate( vm_map_offset_t map_addr; vm_map_size_t map_size; kern_return_t result; - boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); + boolean_t anywhere; + + /* filter out any kernel-only flags */ + if (flags & ~VM_FLAGS_USER_ALLOCATE) + return KERN_INVALID_ARGUMENT; if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); @@ -125,6 +150,7 @@ mach_vm_allocate( return(KERN_SUCCESS); } + anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); if (anywhere) { /* * No specific address requested, so start candidate address @@ -178,7 +204,11 @@ vm_allocate( vm_map_offset_t map_addr; vm_map_size_t map_size; kern_return_t result; - boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); + boolean_t anywhere; + + /* filter out any kernel-only flags */ + if (flags & ~VM_FLAGS_USER_ALLOCATE) + return KERN_INVALID_ARGUMENT; if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); @@ -187,6 +217,7 @@ vm_allocate( return(KERN_SUCCESS); } + anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); if (anywhere) { /* * No specific address requested, so start candidate address @@ -455,6 +486,8 @@ mach_vm_read( if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); + if ((mach_msg_type_number_t) size != size) + return KERN_INVALID_ARGUMENT; error = vm_map_copyin(map, (vm_map_address_t)addr, @@ -464,7 +497,8 @@ mach_vm_read( if (KERN_SUCCESS == error) { *data = (pointer_t) ipc_address; - *data_size = size; + *data_size = (mach_msg_type_number_t) size; + assert(*data_size == size); } return(error); } @@ -493,6 +527,16 @@ vm_read( if (map == VM_MAP_NULL) return(KERN_INVALID_ARGUMENT); + if (size > (unsigned)(mach_msg_type_number_t) -1) { + /* + * The kernel could handle a 64-bit "size" value, but + * it could not return the size of the data in "*data_size" + * without overflowing. + * Let's reject this "size" as invalid. + */ + return KERN_INVALID_ARGUMENT; + } + error = vm_map_copyin(map, (vm_map_address_t)addr, (vm_map_size_t)size, @@ -501,7 +545,8 @@ vm_read( if (KERN_SUCCESS == error) { *data = (pointer_t) ipc_address; - *data_size = size; + *data_size = (mach_msg_type_number_t) size; + assert(*data_size == size); } return(error); } @@ -836,276 +881,21 @@ mach_vm_map( vm_prot_t max_protection, vm_inherit_t inheritance) { - vm_map_address_t map_addr; - vm_map_size_t map_size; - vm_object_t object; - vm_object_size_t size; - kern_return_t result; - - /* - * Check arguments for validity - */ - if ((target_map == VM_MAP_NULL) || - (cur_protection & ~VM_PROT_ALL) || - (max_protection & ~VM_PROT_ALL) || - (inheritance > VM_INHERIT_LAST_VALID) || - initial_size == 0) - return(KERN_INVALID_ARGUMENT); - - map_addr = vm_map_trunc_page(*address); - map_size = vm_map_round_page(initial_size); - size = vm_object_round_page(initial_size); - - /* - * Find the vm object (if any) corresponding to this port. - */ - if (!IP_VALID(port)) { - object = VM_OBJECT_NULL; - offset = 0; - copy = FALSE; - } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { - vm_named_entry_t named_entry; - - named_entry = (vm_named_entry_t)port->ip_kobject; - /* a few checks to make sure user is obeying rules */ - if(size == 0) { - if(offset >= named_entry->size) - return(KERN_INVALID_RIGHT); - size = named_entry->size - offset; - } - if((named_entry->protection & max_protection) != max_protection) - return(KERN_INVALID_RIGHT); - if((named_entry->protection & cur_protection) != cur_protection) - return(KERN_INVALID_RIGHT); - if(named_entry->size < (offset + size)) - return(KERN_INVALID_ARGUMENT); - - /* the callers parameter offset is defined to be the */ - /* offset from beginning of named entry offset in object */ - offset = offset + named_entry->offset; - - named_entry_lock(named_entry); - if(named_entry->is_sub_map) { - vm_map_entry_t map_entry; - - named_entry_unlock(named_entry); - vm_object_reference(vm_submap_object); - if ((result = vm_map_enter(target_map, - &map_addr, map_size, - (vm_map_offset_t)mask, flags, - vm_submap_object, 0, - FALSE, - cur_protection, max_protection, inheritance - )) != KERN_SUCCESS) { - vm_object_deallocate(vm_submap_object); - } else { - char alias; - - VM_GET_FLAGS_ALIAS(flags, alias); - if ((alias == VM_MEMORY_SHARED_PMAP) && - !copy) { - vm_map_submap(target_map, map_addr, - map_addr + map_size, - named_entry->backing.map, - (vm_map_offset_t)offset, TRUE); - } else { - vm_map_submap(target_map, map_addr, - map_addr + map_size, - named_entry->backing.map, - (vm_map_offset_t)offset, FALSE); - } - if(copy) { - if(vm_map_lookup_entry( - target_map, map_addr, &map_entry)) { - map_entry->needs_copy = TRUE; - } - } - *address = map_addr; - } - return(result); - - } else if (named_entry->is_pager) { - unsigned int access; - vm_prot_t protections; - unsigned int wimg_mode; - boolean_t cache_attr; - - protections = named_entry->protection - & VM_PROT_ALL; - access = GET_MAP_MEM(named_entry->protection); - - object = vm_object_enter( - named_entry->backing.pager, - named_entry->size, - named_entry->internal, - FALSE, - FALSE); - if (object == VM_OBJECT_NULL) { - named_entry_unlock(named_entry); - return(KERN_INVALID_OBJECT); - } - - /* JMM - drop reference on pager here */ - - /* create an extra ref for the named entry */ - vm_object_lock(object); - vm_object_reference_locked(object); - named_entry->backing.object = object; - named_entry->is_pager = FALSE; - named_entry_unlock(named_entry); - - wimg_mode = object->wimg_bits; - if(access == MAP_MEM_IO) { - wimg_mode = VM_WIMG_IO; - } else if (access == MAP_MEM_COPYBACK) { - wimg_mode = VM_WIMG_USE_DEFAULT; - } else if (access == MAP_MEM_WTHRU) { - wimg_mode = VM_WIMG_WTHRU; - } else if (access == MAP_MEM_WCOMB) { - wimg_mode = VM_WIMG_WCOMB; - } - if ((wimg_mode == VM_WIMG_IO) - || (wimg_mode == VM_WIMG_WCOMB)) - cache_attr = TRUE; - else - cache_attr = FALSE; - - /* wait for object (if any) to be ready */ - if (!named_entry->internal) { - while (!object->pager_ready) { - vm_object_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); - vm_object_lock(object); - } - } - - if(object->wimg_bits != wimg_mode) { - vm_page_t p; - - vm_object_paging_wait(object, THREAD_UNINT); - - object->wimg_bits = wimg_mode; - queue_iterate(&object->memq, p, vm_page_t, listq) { - if (!p->fictitious) { - pmap_disconnect(p->phys_page); - if (cache_attr) - pmap_sync_page_attributes_phys(p->phys_page); - } - } - } - object->true_share = TRUE; - if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) - object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; - vm_object_unlock(object); - } else { - /* This is the case where we are going to map */ - /* an already mapped object. If the object is */ - /* not ready it is internal. An external */ - /* object cannot be mapped until it is ready */ - /* we can therefore avoid the ready check */ - /* in this case. */ - object = named_entry->backing.object; - assert(object != VM_OBJECT_NULL); - named_entry_unlock(named_entry); - vm_object_reference(object); - } - } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { - /* - * JMM - This is temporary until we unify named entries - * and raw memory objects. - * - * Detected fake ip_kotype for a memory object. In - * this case, the port isn't really a port at all, but - * instead is just a raw memory object. - */ - - if ((object = vm_object_enter((memory_object_t)port, - size, FALSE, FALSE, FALSE)) - == VM_OBJECT_NULL) - return(KERN_INVALID_OBJECT); - - /* wait for object (if any) to be ready */ - if (object != VM_OBJECT_NULL) { - if(object == kernel_object) { - printf("Warning: Attempt to map kernel object" - " by a non-private kernel entity\n"); - return(KERN_INVALID_OBJECT); - } - vm_object_lock(object); - while (!object->pager_ready) { - vm_object_wait(object, - VM_OBJECT_EVENT_PAGER_READY, - THREAD_UNINT); - vm_object_lock(object); - } - vm_object_unlock(object); - } - } else { - return (KERN_INVALID_OBJECT); - } - - /* - * Perform the copy if requested - */ - - if (copy) { - vm_object_t new_object; - vm_object_offset_t new_offset; - - result = vm_object_copy_strategically(object, offset, size, - &new_object, &new_offset, - ©); - - - if (result == KERN_MEMORY_RESTART_COPY) { - boolean_t success; - boolean_t src_needs_copy; - - /* - * XXX - * We currently ignore src_needs_copy. - * This really is the issue of how to make - * MEMORY_OBJECT_COPY_SYMMETRIC safe for - * non-kernel users to use. Solution forthcoming. - * In the meantime, since we don't allow non-kernel - * memory managers to specify symmetric copy, - * we won't run into problems here. - */ - new_object = object; - new_offset = offset; - success = vm_object_copy_quickly(&new_object, - new_offset, size, - &src_needs_copy, - ©); - assert(success); - result = KERN_SUCCESS; - } - /* - * Throw away the reference to the - * original object, as it won't be mapped. - */ - - vm_object_deallocate(object); - - if (result != KERN_SUCCESS) - return (result); - - object = new_object; - offset = new_offset; - } + /* filter out any kernel-only flags */ + if (flags & ~VM_FLAGS_USER_MAP) + return KERN_INVALID_ARGUMENT; - if ((result = vm_map_enter(target_map, - &map_addr, map_size, - (vm_map_offset_t)mask, - flags, - object, offset, - copy, - cur_protection, max_protection, inheritance - )) != KERN_SUCCESS) - vm_object_deallocate(object); - *address = map_addr; - return(result); + return vm_map_enter_mem_object(target_map, + address, + initial_size, + mask, + flags, + port, + offset, + copy, + cur_protection, + max_protection, + inheritance); } @@ -1136,7 +926,7 @@ vm_map_64( kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, port, offset, copy, cur_protection, max_protection, inheritance); - *address = CAST_DOWN(vm_address_t, map_addr); + *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -1169,7 +959,7 @@ vm_map( kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, port, obj_offset, copy, cur_protection, max_protection, inheritance); - *address = CAST_DOWN(vm_address_t, map_addr); + *address = CAST_DOWN(vm_offset_t, map_addr); return kr; } @@ -1297,7 +1087,7 @@ mach_vm_wire( if (map == VM_MAP_NULL) return KERN_INVALID_TASK; - if (access & ~VM_PROT_ALL) + if (access & ~VM_PROT_ALL || (start + size < start)) return KERN_INVALID_ARGUMENT; if (access != VM_PROT_NONE) { @@ -1766,6 +1556,22 @@ vm_region_recurse( return kr; } +kern_return_t +mach_vm_purgable_control( + vm_map_t map, + mach_vm_offset_t address, + vm_purgable_t control, + int *state) +{ + if (VM_MAP_NULL == map) + return KERN_INVALID_ARGUMENT; + + return vm_map_purgable_control(map, + vm_map_trunc_page(address), + control, + state); +} + kern_return_t vm_purgable_control( vm_map_t map, @@ -1842,9 +1648,9 @@ mach_vm_page_query( if (VM_MAP_NULL == map) return KERN_INVALID_ARGUMENT; - return vm_map_page_info(map, - vm_map_trunc_page(offset), - disposition, ref_count); + return vm_map_page_query_internal(map, + vm_map_trunc_page(offset), + disposition, ref_count); } kern_return_t @@ -1857,9 +1663,27 @@ vm_map_page_query( if (VM_MAP_NULL == map) return KERN_INVALID_ARGUMENT; - return vm_map_page_info(map, - vm_map_trunc_page(offset), - disposition, ref_count); + return vm_map_page_query_internal(map, + vm_map_trunc_page(offset), + disposition, ref_count); +} + +kern_return_t +mach_vm_page_info( + vm_map_t map, + mach_vm_address_t address, + vm_page_info_flavor_t flavor, + vm_page_info_t info, + mach_msg_type_number_t *count) +{ + kern_return_t kr; + + if (map == VM_MAP_NULL) { + return KERN_INVALID_ARGUMENT; + } + + kr = vm_map_page_info(map, address, flavor, info, count); + return kr; } /* map a (whole) upl into an address space */ @@ -1867,7 +1691,7 @@ kern_return_t vm_upl_map( vm_map_t map, upl_t upl, - vm_offset_t *dst_addr) + vm_address_t *dst_addr) { vm_map_offset_t map_addr; kern_return_t kr; @@ -1876,7 +1700,7 @@ vm_upl_map( return KERN_INVALID_ARGUMENT; kr = vm_map_enter_upl(map, upl, &map_addr); - *dst_addr = CAST_DOWN(vm_offset_t, map_addr); + *dst_addr = CAST_DOWN(vm_address_t, map_addr); return kr; } @@ -1926,12 +1750,6 @@ vm_map_get_upl( return kr; } - -__private_extern__ kern_return_t -mach_memory_entry_allocate( - vm_named_entry_t *user_entry_p, - ipc_port_t *user_handle_p); /* forward */ - /* * mach_make_memory_entry_64 * @@ -1961,8 +1779,7 @@ mach_make_memory_entry_64( boolean_t wired; vm_object_offset_t obj_off; vm_prot_t prot; - vm_map_offset_t lo_offset, hi_offset; - vm_behavior_t behavior; + struct vm_object_fault_info fault_info; vm_object_t object; vm_object_t shadow_object; @@ -2055,7 +1872,8 @@ mach_make_memory_entry_64( queue_iterate(&object->memq, p, vm_page_t, listq) { if (!p->fictitious) { - pmap_disconnect(p->phys_page); + if (p->pmapped) + pmap_disconnect(p->phys_page); if (cache_attr) pmap_sync_page_attributes_phys(p->phys_page); } @@ -2077,9 +1895,9 @@ mach_make_memory_entry_64( /* * Force the creation of the VM object now. */ - if (map_size > (vm_map_size_t) VM_MAX_ADDRESS) { + if (map_size > (vm_map_size_t) ANON_MAX_SIZE) { /* - * LP64todo - for now, we can only allocate 4GB + * LP64todo - for now, we can only allocate 4GB-4096 * internal objects because the default pager can't * page bigger ones. Remove this when it can. */ @@ -2097,7 +1915,7 @@ mach_make_memory_entry_64( kr = KERN_INVALID_ARGUMENT; goto make_mem_done; } - object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE; + object->purgable = VM_PURGABLE_NONVOLATILE; } /* @@ -2154,6 +1972,10 @@ mach_make_memory_entry_64( /* Create a named object based on address range within the task map */ /* Go find the object at given address */ + if (target_map == VM_MAP_NULL) { + return KERN_INVALID_TASK; + } + redo_lookup: vm_map_lock_read(target_map); @@ -2162,9 +1984,10 @@ redo_lookup: /* that requested by the caller */ kr = vm_map_lookup_locked(&target_map, map_offset, - protections, &version, - &object, &obj_off, &prot, &wired, &behavior, - &lo_offset, &hi_offset, &real_map); + protections, OBJECT_LOCK_EXCLUSIVE, &version, + &object, &obj_off, &prot, &wired, + &fault_info, + &real_map); if (kr != KERN_SUCCESS) { vm_map_unlock_read(target_map); goto make_mem_done; @@ -2265,7 +2088,7 @@ redo_lookup: goto make_mem_done; } - mappable_size = hi_offset - obj_off; + mappable_size = fault_info.hi_offset - obj_off; total_size = map_entry->vme_end - map_entry->vme_start; if(map_size > mappable_size) { /* try to extend mappable size if the entries */ @@ -2350,6 +2173,11 @@ redo_lookup: shadow_object = map_entry->object.vm_object; vm_object_unlock(object); + prot = map_entry->protection & ~VM_PROT_WRITE; + + if (override_nx(target_map, map_entry->alias) && prot) + prot |= VM_PROT_EXECUTE; + vm_object_pmap_protect( object, map_entry->offset, total_size, @@ -2358,15 +2186,16 @@ redo_lookup: ? PMAP_NULL : target_map->pmap), map_entry->vme_start, - map_entry->protection & ~VM_PROT_WRITE); + prot); total_size -= (map_entry->vme_end - map_entry->vme_start); next_entry = map_entry->vme_next; map_entry->needs_copy = FALSE; + + vm_object_lock(shadow_object); while (total_size) { if(next_entry->object.vm_object == object) { - shadow_object->ref_count++; - vm_object_res_reference(shadow_object); + vm_object_reference_locked(shadow_object); next_entry->object.vm_object = shadow_object; vm_object_deallocate(object); @@ -2397,8 +2226,6 @@ redo_lookup: + map_entry->offset; vm_map_lock_write_to_read(target_map); - vm_object_lock(object); - } } @@ -2454,7 +2281,8 @@ redo_lookup: queue_iterate(&object->memq, p, vm_page_t, listq) { if (!p->fictitious) { - pmap_disconnect(p->phys_page); + if (p->pmapped) + pmap_disconnect(p->phys_page); if (cache_attr) pmap_sync_page_attributes_phys(p->phys_page); } @@ -2589,10 +2417,12 @@ redo_lookup: make_mem_done: if (user_handle != IP_NULL) { - ipc_port_dealloc_kernel(user_handle); - } - if (user_entry != NULL) { - kfree(user_entry, sizeof *user_entry); + /* + * Releasing "user_handle" causes the kernel object + * associated with it ("user_entry" here) to also be + * released and freed. + */ + mach_memory_entry_port_release(user_handle); } return kr; } @@ -2606,10 +2436,10 @@ _mach_make_memory_entry( ipc_port_t *object_handle, ipc_port_t parent_entry) { - memory_object_offset_t mo_size; + memory_object_size_t mo_size; kern_return_t kr; - mo_size = (memory_object_offset_t)*size; + mo_size = (memory_object_size_t)*size; kr = mach_make_memory_entry_64(target_map, &mo_size, (memory_object_offset_t)offset, permission, object_handle, parent_entry); @@ -2626,10 +2456,10 @@ mach_make_memory_entry( ipc_port_t *object_handle, ipc_port_t parent_entry) { - memory_object_offset_t mo_size; + memory_object_size_t mo_size; kern_return_t kr; - mo_size = (memory_object_offset_t)*size; + mo_size = (memory_object_size_t)*size; kr = mach_make_memory_entry_64(target_map, &mo_size, (memory_object_offset_t)offset, permission, object_handle, parent_entry); @@ -2702,8 +2532,10 @@ mach_memory_entry_allocate( user_entry->backing.pager = NULL; user_entry->is_sub_map = FALSE; user_entry->is_pager = FALSE; - user_entry->size = 0; user_entry->internal = FALSE; + user_entry->size = 0; + user_entry->offset = 0; + user_entry->protection = VM_PROT_NONE; user_entry->ref_count = 1; ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry, @@ -2787,6 +2619,14 @@ mach_memory_entry_purgable_control( ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { return KERN_INVALID_ARGUMENT; } + if (control != VM_PURGABLE_SET_STATE && + control != VM_PURGABLE_GET_STATE) + return(KERN_INVALID_ARGUMENT); + + if (control == VM_PURGABLE_SET_STATE && + (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || + ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) + return(KERN_INVALID_ARGUMENT); mem_entry = (vm_named_entry_t) entry_port->ip_kobject; @@ -2857,7 +2697,7 @@ mach_destroy_memory_entry( assert(ip_kotype(port) == IKOT_NAMED_ENTRY); #endif /* MACH_ASSERT */ named_entry = (vm_named_entry_t)port->ip_kobject; - mutex_lock(&(named_entry)->Lock); + lck_mtx_lock(&(named_entry)->Lock); named_entry->ref_count -= 1; if(named_entry->ref_count == 0) { if (named_entry->is_sub_map) { @@ -2867,14 +2707,114 @@ mach_destroy_memory_entry( vm_object_deallocate(named_entry->backing.object); } /* else JMM - need to drop reference on pager in that case */ - mutex_unlock(&(named_entry)->Lock); + lck_mtx_unlock(&(named_entry)->Lock); kfree((void *) port->ip_kobject, sizeof (struct vm_named_entry)); } else - mutex_unlock(&(named_entry)->Lock); + lck_mtx_unlock(&(named_entry)->Lock); } +/* Allow manipulation of individual page state. This is actually part of */ +/* the UPL regimen but takes place on the memory entry rather than on a UPL */ + +kern_return_t +mach_memory_entry_page_op( + ipc_port_t entry_port, + vm_object_offset_t offset, + int ops, + ppnum_t *phys_entry, + int *flags) +{ + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; + + if (entry_port == IP_NULL || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + + mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map || mem_entry->is_pager) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + object = mem_entry->backing.object; + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + vm_object_reference(object); + named_entry_unlock(mem_entry); + + kr = vm_object_page_op(object, offset, ops, phys_entry, flags); + + vm_object_deallocate(object); + + return kr; +} + +/* + * mach_memory_entry_range_op offers performance enhancement over + * mach_memory_entry_page_op for page_op functions which do not require page + * level state to be returned from the call. Page_op was created to provide + * a low-cost alternative to page manipulation via UPLs when only a single + * page was involved. The range_op call establishes the ability in the _op + * family of functions to work on multiple pages where the lack of page level + * state handling allows the caller to avoid the overhead of the upl structures. + */ + +kern_return_t +mach_memory_entry_range_op( + ipc_port_t entry_port, + vm_object_offset_t offset_beg, + vm_object_offset_t offset_end, + int ops, + int *range) +{ + vm_named_entry_t mem_entry; + vm_object_t object; + kern_return_t kr; + + if (entry_port == IP_NULL || + ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { + return KERN_INVALID_ARGUMENT; + } + + mem_entry = (vm_named_entry_t) entry_port->ip_kobject; + + named_entry_lock(mem_entry); + + if (mem_entry->is_sub_map || mem_entry->is_pager) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + object = mem_entry->backing.object; + if (object == VM_OBJECT_NULL) { + named_entry_unlock(mem_entry); + return KERN_INVALID_ARGUMENT; + } + + vm_object_reference(object); + named_entry_unlock(mem_entry); + + kr = vm_object_range_op(object, + offset_beg, + offset_end, + ops, + (uint32_t *) range); + + vm_object_deallocate(object); + + return kr; +} kern_return_t @@ -2987,6 +2927,10 @@ kernel_upl_commit_range( if (flags & UPL_COMMIT_FREE_ON_EMPTY) flags |= UPL_COMMIT_NOTIFY_EMPTY; + if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { + return KERN_INVALID_ARGUMENT; + } + kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) @@ -3157,7 +3101,7 @@ vm_map_get_phys_page( kern_return_t kernel_object_iopl_request( /* forward */ vm_named_entry_t named_entry, memory_object_offset_t offset, - vm_size_t *upl_size, + upl_size_t *upl_size, upl_t *upl_ptr, upl_page_info_array_t user_page_list, unsigned int *page_list_count, @@ -3167,7 +3111,7 @@ kern_return_t kernel_object_iopl_request( vm_named_entry_t named_entry, memory_object_offset_t offset, - vm_size_t *upl_size, + upl_size_t *upl_size, upl_t *upl_ptr, upl_page_info_array_t user_page_list, unsigned int *page_list_count, @@ -3192,7 +3136,9 @@ kernel_object_iopl_request( if(*upl_size == 0) { if(offset >= named_entry->size) return(KERN_INVALID_RIGHT); - *upl_size = named_entry->size - offset; + *upl_size = (upl_size_t) (named_entry->size - offset); + if (*upl_size != named_entry->size - offset) + return KERN_INVALID_ARGUMENT; } if(caller_flags & UPL_COPYOUT_FROM) { if((named_entry->protection & VM_PROT_READ)