+kern_return_t
+vm_map_enter_mem_object(
+ vm_map_t target_map,
+ vm_map_offset_t *address,
+ vm_map_size_t initial_size,
+ vm_map_offset_t mask,
+ int flags,
+ ipc_port_t port,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ vm_object_t object;
+ vm_object_size_t size;
+ kern_return_t result;
+
+ /*
+ * Check arguments for validity
+ */
+ if ((target_map == VM_MAP_NULL) ||
+ (cur_protection & ~VM_PROT_ALL) ||
+ (max_protection & ~VM_PROT_ALL) ||
+ (inheritance > VM_INHERIT_LAST_VALID) ||
+ initial_size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ map_addr = vm_map_trunc_page(*address);
+ map_size = vm_map_round_page(initial_size);
+ size = vm_object_round_page(initial_size);
+
+ /*
+ * Find the vm object (if any) corresponding to this port.
+ */
+ if (!IP_VALID(port)) {
+ object = VM_OBJECT_NULL;
+ offset = 0;
+ copy = FALSE;
+ } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) {
+ vm_named_entry_t named_entry;
+
+ named_entry = (vm_named_entry_t) port->ip_kobject;
+ /* a few checks to make sure user is obeying rules */
+ if (size == 0) {
+ if (offset >= named_entry->size)
+ return KERN_INVALID_RIGHT;
+ size = named_entry->size - offset;
+ }
+ if ((named_entry->protection & max_protection) !=
+ max_protection)
+ return KERN_INVALID_RIGHT;
+ if ((named_entry->protection & cur_protection) !=
+ cur_protection)
+ return KERN_INVALID_RIGHT;
+ if (named_entry->size < (offset + size))
+ return KERN_INVALID_ARGUMENT;
+
+ /* the callers parameter offset is defined to be the */
+ /* offset from beginning of named entry offset in object */
+ offset = offset + named_entry->offset;
+
+ named_entry_lock(named_entry);
+ if (named_entry->is_sub_map) {
+ vm_map_t submap;
+
+ submap = named_entry->backing.map;
+ vm_map_lock(submap);
+ vm_map_reference(submap);
+ vm_map_unlock(submap);
+ named_entry_unlock(named_entry);
+
+ result = vm_map_enter(target_map,
+ &map_addr,
+ map_size,
+ mask,
+ flags | VM_FLAGS_SUBMAP,
+ (vm_object_t) submap,
+ offset,
+ copy,
+ cur_protection,
+ max_protection,
+ inheritance);
+ if (result != KERN_SUCCESS) {
+ vm_map_deallocate(submap);
+ } else {
+ /*
+ * No need to lock "submap" just to check its
+ * "mapped" flag: that flag is never reset
+ * once it's been set and if we race, we'll
+ * just end up setting it twice, which is OK.
+ */
+ if (submap->mapped == FALSE) {
+ /*
+ * This submap has never been mapped.
+ * Set its "mapped" flag now that it
+ * has been mapped.
+ * This happens only for the first ever
+ * mapping of a "submap".
+ */
+ vm_map_lock(submap);
+ submap->mapped = TRUE;
+ vm_map_unlock(submap);
+ }
+ *address = map_addr;
+ }
+ return result;
+
+ } else if (named_entry->is_pager) {
+ unsigned int access;
+ vm_prot_t protections;
+ unsigned int wimg_mode;
+ boolean_t cache_attr;
+
+ protections = named_entry->protection & VM_PROT_ALL;
+ access = GET_MAP_MEM(named_entry->protection);
+
+ object = vm_object_enter(named_entry->backing.pager,
+ named_entry->size,
+ named_entry->internal,
+ FALSE,
+ FALSE);
+ if (object == VM_OBJECT_NULL) {
+ named_entry_unlock(named_entry);
+ return KERN_INVALID_OBJECT;
+ }
+
+ /* JMM - drop reference on pager here */
+
+ /* create an extra ref for the named entry */
+ vm_object_lock(object);
+ vm_object_reference_locked(object);
+ named_entry->backing.object = object;
+ named_entry->is_pager = FALSE;
+ named_entry_unlock(named_entry);
+
+ wimg_mode = object->wimg_bits;
+ if (access == MAP_MEM_IO) {
+ wimg_mode = VM_WIMG_IO;
+ } else if (access == MAP_MEM_COPYBACK) {
+ wimg_mode = VM_WIMG_USE_DEFAULT;
+ } else if (access == MAP_MEM_WTHRU) {
+ wimg_mode = VM_WIMG_WTHRU;
+ } else if (access == MAP_MEM_WCOMB) {
+ wimg_mode = VM_WIMG_WCOMB;
+ }
+ if (wimg_mode == VM_WIMG_IO ||
+ wimg_mode == VM_WIMG_WCOMB)
+ cache_attr = TRUE;
+ else
+ cache_attr = FALSE;
+
+ /* wait for object (if any) to be ready */
+ if (!named_entry->internal) {
+ while (!object->pager_ready) {
+ vm_object_wait(
+ object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ vm_object_lock(object);
+ }
+ }
+
+ if (object->wimg_bits != wimg_mode) {
+ vm_page_t p;
+
+ vm_object_paging_wait(object, THREAD_UNINT);
+
+ object->wimg_bits = wimg_mode;
+ queue_iterate(&object->memq, p, vm_page_t, listq) {
+ if (!p->fictitious) {
+ if (p->pmapped)
+ pmap_disconnect(p->phys_page);
+ if (cache_attr)
+ pmap_sync_page_attributes_phys(p->phys_page);
+ }
+ }
+ }
+ object->true_share = TRUE;
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ vm_object_unlock(object);
+ } else {
+ /* This is the case where we are going to map */
+ /* an already mapped object. If the object is */
+ /* not ready it is internal. An external */
+ /* object cannot be mapped until it is ready */
+ /* we can therefore avoid the ready check */
+ /* in this case. */
+ object = named_entry->backing.object;
+ assert(object != VM_OBJECT_NULL);
+ named_entry_unlock(named_entry);
+ vm_object_reference(object);
+ }
+ } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
+ /*
+ * JMM - This is temporary until we unify named entries
+ * and raw memory objects.
+ *
+ * Detected fake ip_kotype for a memory object. In
+ * this case, the port isn't really a port at all, but
+ * instead is just a raw memory object.
+ */
+
+ object = vm_object_enter((memory_object_t)port,
+ size, FALSE, FALSE, FALSE);
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_OBJECT;
+
+ /* wait for object (if any) to be ready */
+ if (object != VM_OBJECT_NULL) {
+ if (object == kernel_object) {
+ printf("Warning: Attempt to map kernel object"
+ " by a non-private kernel entity\n");
+ return KERN_INVALID_OBJECT;
+ }
+ if (!object->pager_ready) {
+ vm_object_lock(object);
+
+ while (!object->pager_ready) {
+ vm_object_wait(object,
+ VM_OBJECT_EVENT_PAGER_READY,
+ THREAD_UNINT);
+ vm_object_lock(object);
+ }
+ vm_object_unlock(object);
+ }
+ }
+ } else {
+ return KERN_INVALID_OBJECT;
+ }
+
+ if (object != VM_OBJECT_NULL &&
+ object->named &&
+ object->pager != MEMORY_OBJECT_NULL &&
+ object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
+ memory_object_t pager;
+ vm_prot_t pager_prot;
+ kern_return_t kr;
+
+ /*
+ * For "named" VM objects, let the pager know that the
+ * memory object is being mapped. Some pagers need to keep
+ * track of this, to know when they can reclaim the memory
+ * object, for example.
+ * VM calls memory_object_map() for each mapping (specifying
+ * the protection of each mapping) and calls
+ * memory_object_last_unmap() when all the mappings are gone.
+ */
+ pager_prot = max_protection;
+ if (copy) {
+ /*
+ * Copy-On-Write mapping: won't modify the
+ * memory object.
+ */
+ pager_prot &= ~VM_PROT_WRITE;
+ }
+ vm_object_lock(object);
+ pager = object->pager;
+ if (object->named &&
+ pager != MEMORY_OBJECT_NULL &&
+ object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
+ assert(object->pager_ready);
+ vm_object_mapping_wait(object, THREAD_UNINT);
+ vm_object_mapping_begin(object);
+ vm_object_unlock(object);
+
+ kr = memory_object_map(pager, pager_prot);
+ assert(kr == KERN_SUCCESS);
+
+ vm_object_lock(object);
+ vm_object_mapping_end(object);
+ }
+ vm_object_unlock(object);
+ }
+
+ /*
+ * Perform the copy if requested
+ */
+
+ if (copy) {
+ vm_object_t new_object;
+ vm_object_offset_t new_offset;
+
+ result = vm_object_copy_strategically(object, offset, size,
+ &new_object, &new_offset,
+ ©);
+
+
+ if (result == KERN_MEMORY_RESTART_COPY) {
+ boolean_t success;
+ boolean_t src_needs_copy;
+
+ /*
+ * XXX
+ * We currently ignore src_needs_copy.
+ * This really is the issue of how to make
+ * MEMORY_OBJECT_COPY_SYMMETRIC safe for
+ * non-kernel users to use. Solution forthcoming.
+ * In the meantime, since we don't allow non-kernel
+ * memory managers to specify symmetric copy,
+ * we won't run into problems here.
+ */
+ new_object = object;
+ new_offset = offset;
+ success = vm_object_copy_quickly(&new_object,
+ new_offset, size,
+ &src_needs_copy,
+ ©);
+ assert(success);
+ result = KERN_SUCCESS;
+ }
+ /*
+ * Throw away the reference to the
+ * original object, as it won't be mapped.
+ */
+
+ vm_object_deallocate(object);
+
+ if (result != KERN_SUCCESS)
+ return result;
+
+ object = new_object;
+ offset = new_offset;
+ }
+
+ result = vm_map_enter(target_map,
+ &map_addr, map_size,
+ (vm_map_offset_t)mask,
+ flags,
+ object, offset,
+ copy,
+ cur_protection, max_protection, inheritance);
+ if (result != KERN_SUCCESS)
+ vm_object_deallocate(object);
+ *address = map_addr;
+ return result;
+}
+
+
+
+
+kern_return_t
+vm_map_enter_mem_object_control(
+ vm_map_t target_map,
+ vm_map_offset_t *address,
+ vm_map_size_t initial_size,
+ vm_map_offset_t mask,
+ int flags,
+ memory_object_control_t control,
+ vm_object_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
+{
+ vm_map_address_t map_addr;
+ vm_map_size_t map_size;
+ vm_object_t object;
+ vm_object_size_t size;
+ kern_return_t result;
+ memory_object_t pager;
+ vm_prot_t pager_prot;
+ kern_return_t kr;
+
+ /*
+ * Check arguments for validity
+ */
+ if ((target_map == VM_MAP_NULL) ||
+ (cur_protection & ~VM_PROT_ALL) ||
+ (max_protection & ~VM_PROT_ALL) ||
+ (inheritance > VM_INHERIT_LAST_VALID) ||
+ initial_size == 0)
+ return KERN_INVALID_ARGUMENT;
+
+ map_addr = vm_map_trunc_page(*address);
+ map_size = vm_map_round_page(initial_size);
+ size = vm_object_round_page(initial_size);
+
+ object = memory_object_control_to_vm_object(control);
+
+ if (object == VM_OBJECT_NULL)
+ return KERN_INVALID_OBJECT;
+
+ if (object == kernel_object) {
+ printf("Warning: Attempt to map kernel object"
+ " by a non-private kernel entity\n");
+ return KERN_INVALID_OBJECT;
+ }
+
+ vm_object_lock(object);
+ object->ref_count++;
+ vm_object_res_reference(object);
+
+ /*
+ * For "named" VM objects, let the pager know that the
+ * memory object is being mapped. Some pagers need to keep
+ * track of this, to know when they can reclaim the memory
+ * object, for example.
+ * VM calls memory_object_map() for each mapping (specifying
+ * the protection of each mapping) and calls
+ * memory_object_last_unmap() when all the mappings are gone.
+ */
+ pager_prot = max_protection;
+ if (copy) {
+ pager_prot &= ~VM_PROT_WRITE;
+ }
+ pager = object->pager;
+ if (object->named &&
+ pager != MEMORY_OBJECT_NULL &&
+ object->copy_strategy != MEMORY_OBJECT_COPY_NONE) {
+ assert(object->pager_ready);
+ vm_object_mapping_wait(object, THREAD_UNINT);
+ vm_object_mapping_begin(object);
+ vm_object_unlock(object);
+
+ kr = memory_object_map(pager, pager_prot);
+ assert(kr == KERN_SUCCESS);
+
+ vm_object_lock(object);
+ vm_object_mapping_end(object);
+ }
+ vm_object_unlock(object);
+
+ /*
+ * Perform the copy if requested
+ */
+
+ if (copy) {
+ vm_object_t new_object;
+ vm_object_offset_t new_offset;
+
+ result = vm_object_copy_strategically(object, offset, size,
+ &new_object, &new_offset,
+ ©);
+
+
+ if (result == KERN_MEMORY_RESTART_COPY) {
+ boolean_t success;
+ boolean_t src_needs_copy;
+
+ /*
+ * XXX
+ * We currently ignore src_needs_copy.
+ * This really is the issue of how to make
+ * MEMORY_OBJECT_COPY_SYMMETRIC safe for
+ * non-kernel users to use. Solution forthcoming.
+ * In the meantime, since we don't allow non-kernel
+ * memory managers to specify symmetric copy,
+ * we won't run into problems here.
+ */
+ new_object = object;
+ new_offset = offset;
+ success = vm_object_copy_quickly(&new_object,
+ new_offset, size,
+ &src_needs_copy,
+ ©);
+ assert(success);
+ result = KERN_SUCCESS;
+ }
+ /*
+ * Throw away the reference to the
+ * original object, as it won't be mapped.
+ */
+
+ vm_object_deallocate(object);
+
+ if (result != KERN_SUCCESS)
+ return result;
+
+ object = new_object;
+ offset = new_offset;
+ }
+
+ result = vm_map_enter(target_map,
+ &map_addr, map_size,
+ (vm_map_offset_t)mask,
+ flags,
+ object, offset,
+ copy,
+ cur_protection, max_protection, inheritance);
+ if (result != KERN_SUCCESS)
+ vm_object_deallocate(object);
+ *address = map_addr;
+
+ return result;
+}
+