+
+#ifdef KERNEL_PRIVATE
+typedef struct {
+ unsigned int
+ vmkf_atomic_entry:1,
+ vmkf_permanent:1,
+ vmkf_guard_after:1,
+ vmkf_guard_before:1,
+ vmkf_submap:1,
+ vmkf_already:1,
+ vmkf_beyond_max:1,
+ vmkf_no_pmap_check:1,
+ vmkf_map_jit:1,
+ vmkf_iokit_acct:1,
+ vmkf_keep_map_locked:1,
+ vmkf_fourk:1,
+ vmkf_overwrite_immutable:1,
+ vmkf_remap_prot_copy:1,
+ __vmkf_unused:18;
+} vm_map_kernel_flags_t;
+#define VM_MAP_KERNEL_FLAGS_NONE (vm_map_kernel_flags_t) { \
+ .vmkf_atomic_entry = 0, /* keep entry atomic (no coalescing) */ \
+ .vmkf_permanent = 0, /* mapping can NEVER be unmapped */ \
+ .vmkf_guard_after = 0, /* guard page after the mapping */ \
+ .vmkf_guard_before = 0, /* guard page before the mapping */ \
+ .vmkf_submap = 0, /* mapping a VM submap */ \
+ .vmkf_already = 0, /* OK if same mapping already exists */ \
+ .vmkf_beyond_max = 0, /* map beyond the map's max offset */ \
+ .vmkf_no_pmap_check = 0, /* do not check that pmap is empty */ \
+ .vmkf_map_jit = 0, /* mark entry as JIT region */ \
+ .vmkf_iokit_acct = 0, /* IOKit accounting */ \
+ .vmkf_keep_map_locked = 0, /* keep map locked when returning from vm_map_enter() */ \
+ .vmkf_fourk = 0, /* use fourk pager */ \
+ .vmkf_overwrite_immutable = 0, /* can overwrite immutable mappings */ \
+ .vmkf_remap_prot_copy = 0, /* vm_remap for VM_PROT_COPY */ \
+ .__vmkf_unused = 0 \
+}
+#endif /* KERNEL_PRIVATE */
+
+
+