vm_prot_t permission,
ipc_port_t *object_handle,
ipc_port_t parent_handle)
+{
+ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
+ /*
+ * Unknown flag: reject for forward compatibility.
+ */
+ return KERN_INVALID_VALUE;
+ }
+
+ return mach_make_memory_entry_internal(target_map,
+ size,
+ offset,
+ permission,
+ object_handle,
+ parent_handle);
+}
+
+extern int pacified_purgeable_iokit;
+
+kern_return_t
+mach_make_memory_entry_internal(
+ vm_map_t target_map,
+ memory_object_size_t *size,
+ memory_object_offset_t offset,
+ vm_prot_t permission,
+ ipc_port_t *object_handle,
+ ipc_port_t parent_handle)
{
vm_map_version_t version;
vm_named_entry_t parent_entry;
boolean_t use_data_addr;
boolean_t use_4K_compat;
- if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) {
+ if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) {
/*
* Unknown flag: reject for forward compatibility.
*/
assert(object->resident_page_count == 0);
assert(object->wired_page_count == 0);
vm_object_lock(object);
- if (object->purgeable_only_by_kernel) {
- vm_purgeable_nonvolatile_enqueue(object,
- kernel_task);
+ if (pacified_purgeable_iokit) {
+ if (permission & MAP_MEM_LEDGER_TAG_NETWORK) {
+ vm_purgeable_nonvolatile_enqueue(object,
+ kernel_task);
+ } else {
+ vm_purgeable_nonvolatile_enqueue(object,
+ current_task());
+ }
} else {
- vm_purgeable_nonvolatile_enqueue(object,
- current_task());
+ if (object->purgeable_only_by_kernel) {
+ vm_purgeable_nonvolatile_enqueue(object,
+ kernel_task);
+ } else {
+ vm_purgeable_nonvolatile_enqueue(object,
+ current_task());
+ }
}
vm_object_unlock(object);
}
(VME_OFFSET(next_entry->vme_prev) +
(next_entry->vme_prev->vme_end
- next_entry->vme_prev->vme_start)));
+ next_entry->use_pmap = TRUE;
next_entry->needs_copy = FALSE;
} else {
panic("mach_make_memory_entry_64:"
return(KERN_SUCCESS);
}
+kern_return_t
+vm_map_exec_lockdown(
+ vm_map_t map)
+{
+ if (map == VM_MAP_NULL)
+ return(KERN_INVALID_ARGUMENT);
+
+ vm_map_lock(map);
+ map->map_disallow_new_exec = TRUE;
+ vm_map_unlock(map);
+
+ return(KERN_SUCCESS);
+}
+
__private_extern__ kern_return_t
mach_memory_entry_allocate(
vm_named_entry_t *user_entry_p,
if (pager == MEMORY_OBJECT_NULL && internal) {
object = vm_object_allocate(size);
+ if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
+ object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+ }
} else {
object = memory_object_to_vm_object(pager);
if (object != VM_OBJECT_NULL) {