+ DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr);
+ return kr;
+}
+
+kern_return_t
+_mach_make_memory_entry(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_entry)
+{
+ memory_object_size_t mo_size;
+ kern_return_t kr;
+
+ mo_size = (memory_object_size_t)*size;
+ kr = mach_make_memory_entry_64(target_map, &mo_size,
+ (memory_object_offset_t)offset, permission, object_handle,
+ parent_entry);
+ *size = mo_size;
+ return kr;
+}
+
+kern_return_t
+mach_make_memory_entry(
+ vm_map_t target_map,
+ vm_size_t *size,
+ vm_offset_t offset,
+ vm_prot_t permission,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_entry)
+{
+ memory_object_size_t mo_size;
+ kern_return_t kr;
+
+ mo_size = (memory_object_size_t)*size;
+ kr = mach_make_memory_entry_64(target_map, &mo_size,
+ (memory_object_offset_t)offset, permission, object_handle,
+ parent_entry);
+ *size = CAST_DOWN(vm_size_t, mo_size);
+ return kr;
+}
+
+/*
+ * task_wire
+ *
+ * Set or clear the map's wiring_required flag. This flag, if set,
+ * will cause all future virtual memory allocation to allocate
+ * user wired memory. Unwiring pages wired down as a result of
+ * this routine is done with the vm_wire interface.
+ */
+kern_return_t
+task_wire(
+ vm_map_t map,
+ boolean_t must_wire)
+{
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_map_lock(map);
+ map->wiring_required = (must_wire == TRUE);
+ vm_map_unlock(map);
+
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_map_exec_lockdown(
+ vm_map_t map)
+{
+ if (map == VM_MAP_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ vm_map_lock(map);
+ map->map_disallow_new_exec = TRUE;
+ vm_map_unlock(map);
+
+ return KERN_SUCCESS;
+}
+
+#if VM_NAMED_ENTRY_LIST
+queue_head_t vm_named_entry_list = QUEUE_HEAD_INITIALIZER(vm_named_entry_list);
+int vm_named_entry_count = 0;
+LCK_MTX_EARLY_DECLARE_ATTR(vm_named_entry_list_lock_data,
+ &vm_object_lck_grp, &vm_object_lck_attr);
+#endif /* VM_NAMED_ENTRY_LIST */
+
+__private_extern__ kern_return_t
+mach_memory_entry_allocate(
+ vm_named_entry_t *user_entry_p,
+ ipc_port_t *user_handle_p)
+{
+ vm_named_entry_t user_entry;
+ ipc_port_t user_handle;
+
+ user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry);
+ if (user_entry == NULL) {
+ return KERN_FAILURE;
+ }
+ bzero(user_entry, sizeof(*user_entry));
+
+ named_entry_lock_init(user_entry);
+
+ user_entry->backing.copy = NULL;
+ user_entry->is_object = FALSE;
+ user_entry->is_sub_map = FALSE;
+ user_entry->is_copy = FALSE;
+ user_entry->internal = FALSE;
+ user_entry->size = 0;
+ user_entry->offset = 0;
+ user_entry->data_offset = 0;
+ user_entry->protection = VM_PROT_NONE;
+ user_entry->ref_count = 1;
+
+ user_handle = ipc_kobject_alloc_port((ipc_kobject_t)user_entry,
+ IKOT_NAMED_ENTRY,
+ IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
+
+ *user_entry_p = user_entry;
+ *user_handle_p = user_handle;
+
+#if VM_NAMED_ENTRY_LIST
+ /* keep a loose (no reference) pointer to the Mach port, for debugging only */
+ user_entry->named_entry_port = user_handle;
+ /* backtrace at allocation time, for debugging only */
+ OSBacktrace(&user_entry->named_entry_bt[0],
+ NAMED_ENTRY_BT_DEPTH);
+
+ /* add this new named entry to the global list */
+ lck_mtx_lock_spin(&vm_named_entry_list_lock_data);
+ queue_enter(&vm_named_entry_list, user_entry,
+ vm_named_entry_t, named_entry_list);
+ vm_named_entry_count++;
+ lck_mtx_unlock(&vm_named_entry_list_lock_data);
+#endif /* VM_NAMED_ENTRY_LIST */
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * mach_memory_object_memory_entry_64
+ *
+ * Create a named entry backed by the provided pager.
+ *
+ */
+kern_return_t
+mach_memory_object_memory_entry_64(
+ host_t host,
+ boolean_t internal,
+ vm_object_offset_t size,
+ vm_prot_t permission,
+ memory_object_t pager,
+ ipc_port_t *entry_handle)
+{
+ unsigned int access;
+ vm_named_entry_t user_entry;
+ ipc_port_t user_handle;
+ vm_object_t object;
+ kern_return_t kr;
+
+ if (host == HOST_NULL) {
+ return KERN_INVALID_HOST;
+ }
+
+ if (pager == MEMORY_OBJECT_NULL && internal) {
+ object = vm_object_allocate(size);
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
+ } else {
+ object = memory_object_to_vm_object(pager);
+ if (object != VM_OBJECT_NULL) {
+ vm_object_reference(object);
+ }
+ }
+ if (object == VM_OBJECT_NULL) {
+ return KERN_INVALID_ARGUMENT;