vm_map_entry_t *entry);
static vm_map_entry_t _vm_map_entry_create(
- struct vm_map_header *map_header);
+ struct vm_map_header *map_header, boolean_t map_locked);
static void _vm_map_entry_dispose(
struct vm_map_header *map_header,
* wire count; it's used for map splitting and zone changing in
* vm_map_copyout.
*/
-#define vm_map_entry_copy(NEW,OLD) \
-MACRO_BEGIN \
+#define vm_map_entry_copy(NEW,OLD) \
+MACRO_BEGIN \
+boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \
*(NEW) = *(OLD); \
(NEW)->is_shared = FALSE; \
(NEW)->needs_wakeup = FALSE; \
(NEW)->wired_count = 0; \
(NEW)->user_wired_count = 0; \
(NEW)->permanent = FALSE; \
+ (NEW)->from_reserved_zone = _vmec_reserved; \
MACRO_END
-#define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD))
+#define vm_map_entry_copy_full(NEW,OLD) \
+MACRO_BEGIN \
+boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \
+(*(NEW) = *(OLD)); \
+(NEW)->from_reserved_zone = _vmecf_reserved; \
+MACRO_END
/*
* Decide if we want to allow processes to execute from their data or stack areas.
static zone_t vm_map_zone; /* zone for vm_map structures */
static zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */
-static zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
+static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking
+ * allocations */
static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */
static vm_size_t map_data_size;
static void *kentry_data;
static vm_size_t kentry_data_size;
-static int kentry_count = 2048; /* to init kentry_data_size */
#if CONFIG_EMBEDDED
#define NO_COALESCE_LIMIT 0
*
* vm_map_zone: used to allocate maps.
* vm_map_entry_zone: used to allocate map entries.
- * vm_map_kentry_zone: used to allocate map entries for the kernel.
+ * vm_map_entry_reserved_zone: fallback zone for kernel map entries
*
* The kernel allocates map entries from a special zone that is initially
* "crammed" with memory. It would be difficult (perhaps impossible) for
vm_map_init(
void)
{
+ vm_size_t entry_zone_alloc_size;
vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40*1024,
PAGE_SIZE, "maps");
zone_change(vm_map_zone, Z_NOENCRYPT, TRUE);
-
+#if defined(__LP64__)
+ entry_zone_alloc_size = PAGE_SIZE * 5;
+#else
+ entry_zone_alloc_size = PAGE_SIZE * 6;
+#endif
+
vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
- 1024*1024, PAGE_SIZE*5,
- "non-kernel map entries");
+ 1024*1024, entry_zone_alloc_size,
+ "VM map entries");
zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE);
+ zone_change(vm_map_entry_zone, Z_NOCALLOUT, TRUE);
- vm_map_kentry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
- kentry_data_size, kentry_data_size,
- "kernel map entries");
- zone_change(vm_map_kentry_zone, Z_NOENCRYPT, TRUE);
+ vm_map_entry_reserved_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry),
+ kentry_data_size * 64, kentry_data_size,
+ "Reserved VM map entries");
+ zone_change(vm_map_entry_reserved_zone, Z_NOENCRYPT, TRUE);
vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy),
- 16*1024, PAGE_SIZE, "map copies");
+ 16*1024, PAGE_SIZE, "VM map copies");
zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE);
/*
* Cram the map and kentry zones with initial data.
- * Set kentry_zone non-collectible to aid zone_gc().
+ * Set reserved_zone non-collectible to aid zone_gc().
*/
zone_change(vm_map_zone, Z_COLLECT, FALSE);
- zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE);
- zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE);
- zone_change(vm_map_kentry_zone, Z_FOREIGN, TRUE);
- zone_change(vm_map_kentry_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
+
+ zone_change(vm_map_entry_reserved_zone, Z_COLLECT, FALSE);
+ zone_change(vm_map_entry_reserved_zone, Z_EXPAND, FALSE);
+ zone_change(vm_map_entry_reserved_zone, Z_FOREIGN, TRUE);
+ zone_change(vm_map_entry_reserved_zone, Z_NOCALLOUT, TRUE);
+ zone_change(vm_map_entry_reserved_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
zone_change(vm_map_copy_zone, Z_CALLERACCT, FALSE); /* don't charge caller */
- zcram(vm_map_zone, map_data, map_data_size);
- zcram(vm_map_kentry_zone, kentry_data, kentry_data_size);
+ zcram(vm_map_zone, (vm_offset_t)map_data, map_data_size);
+ zcram(vm_map_entry_reserved_zone, (vm_offset_t)kentry_data, kentry_data_size);
lck_grp_attr_setdefault(&vm_map_lck_grp_attr);
lck_grp_init(&vm_map_lck_grp, "vm_map", &vm_map_lck_grp_attr);
vm_map_steal_memory(
void)
{
+ uint32_t kentry_initial_pages;
+
map_data_size = round_page(10 * sizeof(struct _vm_map));
map_data = pmap_steal_memory(map_data_size);
-#if 0
/*
- * Limiting worst case: vm_map_kentry_zone needs to map each "available"
- * physical page (i.e. that beyond the kernel image and page tables)
- * individually; we guess at most one entry per eight pages in the
- * real world. This works out to roughly .1 of 1% of physical memory,
- * or roughly 1900 entries (64K) for a 64M machine with 4K pages.
+ * kentry_initial_pages corresponds to the number of kernel map entries
+ * required during bootstrap until the asynchronous replenishment
+ * scheme is activated and/or entries are available from the general
+ * map entry pool.
*/
+#if defined(__LP64__)
+ kentry_initial_pages = 10;
+#else
+ kentry_initial_pages = 6;
#endif
- kentry_count = pmap_free_pages() / 8;
-
-
- kentry_data_size =
- round_page(kentry_count * sizeof(struct vm_map_entry));
+ kentry_data_size = kentry_initial_pages * PAGE_SIZE;
kentry_data = pmap_steal_memory(kentry_data_size);
}
+void vm_kernel_reserved_entry_init(void) {
+ zone_prio_refill_configure(vm_map_entry_reserved_zone, (6*PAGE_SIZE)/sizeof(struct vm_map_entry));
+}
+
/*
* vm_map_create:
*
* Allocates a VM map entry for insertion in the
* given map (or map copy). No fields are filled.
*/
-#define vm_map_entry_create(map) \
- _vm_map_entry_create(&(map)->hdr)
+#define vm_map_entry_create(map, map_locked) _vm_map_entry_create(&(map)->hdr, map_locked)
-#define vm_map_copy_entry_create(copy) \
- _vm_map_entry_create(&(copy)->cpy_hdr)
+#define vm_map_copy_entry_create(copy, map_locked) \
+ _vm_map_entry_create(&(copy)->cpy_hdr, map_locked)
+unsigned reserved_zalloc_count, nonreserved_zalloc_count;
static vm_map_entry_t
_vm_map_entry_create(
- register struct vm_map_header *map_header)
+ struct vm_map_header *map_header, boolean_t __unused map_locked)
{
- register zone_t zone;
- register vm_map_entry_t entry;
+ zone_t zone;
+ vm_map_entry_t entry;
- if (map_header->entries_pageable)
- zone = vm_map_entry_zone;
- else
- zone = vm_map_kentry_zone;
+ zone = vm_map_entry_zone;
+
+ assert(map_header->entries_pageable ? !map_locked : TRUE);
+
+ if (map_header->entries_pageable) {
+ entry = (vm_map_entry_t) zalloc(zone);
+ }
+ else {
+ entry = (vm_map_entry_t) zalloc_canblock(zone, FALSE);
+
+ if (entry == VM_MAP_ENTRY_NULL) {
+ zone = vm_map_entry_reserved_zone;
+ entry = (vm_map_entry_t) zalloc(zone);
+ OSAddAtomic(1, &reserved_zalloc_count);
+ } else
+ OSAddAtomic(1, &nonreserved_zalloc_count);
+ }
- entry = (vm_map_entry_t) zalloc(zone);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
+ entry->from_reserved_zone = (zone == vm_map_entry_reserved_zone);
+
vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE);
return(entry);
{
register zone_t zone;
- if (map_header->entries_pageable)
+ if (map_header->entries_pageable || !(entry->from_reserved_zone))
zone = vm_map_entry_zone;
else
- zone = vm_map_kentry_zone;
+ zone = vm_map_entry_reserved_zone;
+
+ if (!map_header->entries_pageable) {
+ if (zone == vm_map_entry_zone)
+ OSAddAtomic(-1, &nonreserved_zalloc_count);
+ else
+ OSAddAtomic(-1, &reserved_zalloc_count);
+ }
zfree(zone, entry);
}
size += PAGE_SIZE_64;
}
- new_entry = vm_map_entry_create(map);
+ new_entry = vm_map_entry_create(map, FALSE);
/*
* Look for the first possible address; if there's already
* address.
*/
- new_entry = _vm_map_entry_create(map_header);
+ new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable);
vm_map_entry_copy_full(new_entry, entry);
new_entry->vme_end = start;
* AFTER the specified entry
*/
- new_entry = _vm_map_entry_create(map_header);
+ new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable);
vm_map_entry_copy_full(new_entry, entry);
assert(entry->vme_start < end);
/*
* Find the zone that the copies were allocated from
*/
- old_zone = (copy->cpy_hdr.entries_pageable)
- ? vm_map_entry_zone
- : vm_map_kentry_zone;
+
entry = vm_map_copy_first_entry(copy);
/*
* Copy each entry.
*/
while (entry != vm_map_copy_to_entry(copy)) {
- new = vm_map_copy_entry_create(copy);
+ new = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
vm_map_entry_copy_full(new, entry);
new->use_pmap = FALSE; /* clr address space specifics */
vm_map_copy_entry_link(copy,
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
+ old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone;
zfree(old_zone, entry);
entry = next;
}
copy->offset = src_addr;
copy->size = len;
- new_entry = vm_map_copy_entry_create(copy);
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
#define RETURN(x) \
MACRO_BEGIN \
version.main_timestamp = src_map->timestamp;
vm_map_unlock(src_map);
- new_entry = vm_map_copy_entry_create(copy);
+ new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable);
vm_map_lock(src_map);
if ((version.main_timestamp + 1) != src_map->timestamp) {
* Mark both entries as shared.
*/
- new_entry = vm_map_entry_create(new_map);
+ new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel
+ * map or descendants */
vm_map_entry_copy(new_entry, old_entry);
old_entry->is_shared = TRUE;
new_entry->is_shared = TRUE;
goto slow_vm_map_fork_copy;
}
- new_entry = vm_map_entry_create(new_map);
+ new_entry = vm_map_entry_create(new_map, FALSE); /* never the kernel map or descendants */
vm_map_entry_copy(new_entry, old_entry);
/* clear address space specifics */
new_entry->use_pmap = FALSE;
assert(insp_entry != (vm_map_entry_t)0);
- new_entry = vm_map_entry_create(map);
+ new_entry = vm_map_entry_create(map, !map->hdr.entries_pageable);
new_entry->vme_start = start;
new_entry->vme_end = end;
offset = src_entry->offset + (src_start - src_entry->vme_start);
- new_entry = _vm_map_entry_create(map_header);
+ new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable);
vm_map_entry_copy(new_entry, src_entry);
new_entry->use_pmap = FALSE; /* clr address space specifics */