+ /*
+ * limit the size of a single extent of wired memory
+ * to try and limit the damage to the system if
+ * too many pages get wired down
+ */
+ if (map_size > (1 << 30)) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /*
+ * Guard pages:
+ *
+ * Guard pages are implemented as ficticious pages. By placing guard pages
+ * on either end of a stack, they can help detect cases where a thread walks
+ * off either end of its stack. They are allocated and set up here and attempts
+ * to access those pages are trapped in vm_fault_page().
+ *
+ * The map_size we were passed may include extra space for
+ * guard pages. If those were requested, then back it out of fill_size
+ * since vm_map_find_space() takes just the actual size not including
+ * guard pages. Similarly, fill_start indicates where the actual pages
+ * will begin in the range.
+ */
+
+ fill_start = 0;
+ fill_size = map_size;
+
+ if (flags & KMA_GUARD_FIRST) {
+ vm_alloc_flags |= VM_FLAGS_GUARD_BEFORE;
+ fill_start += PAGE_SIZE_64;
+ fill_size -= PAGE_SIZE_64;
+ if (map_size < fill_start + fill_size) {
+ /* no space for a guard page */
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+ guard_page_count++;
+ }
+ if (flags & KMA_GUARD_LAST) {
+ vm_alloc_flags |= VM_FLAGS_GUARD_AFTER;
+ fill_size -= PAGE_SIZE_64;
+ if (map_size <= fill_start + fill_size) {
+ /* no space for a guard page */
+ *addrp = 0;
+ return KERN_INVALID_ARGUMENT;
+ }
+ guard_page_count++;
+ }
+ wired_page_count = (int) (fill_size / PAGE_SIZE_64);
+ assert(wired_page_count * PAGE_SIZE_64 == fill_size);
+
+ for (i = 0; i < guard_page_count; i++) {
+ for (;;) {
+ mem = vm_page_grab_guard();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ vm_page_more_fictitious();
+ }
+ mem->pageq.next = (queue_entry_t)guard_page_list;
+ guard_page_list = mem;
+ }
+
+ for (i = 0; i < wired_page_count; i++) {
+ uint64_t unavailable;
+
+ for (;;) {
+ if (flags & KMA_LOMEM)
+ mem = vm_page_grablo();
+ else
+ mem = vm_page_grab();
+
+ if (mem != VM_PAGE_NULL)
+ break;
+
+ if (flags & KMA_NOPAGEWAIT) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
+
+ if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
+ kr = KERN_RESOURCE_SHORTAGE;
+ goto out;
+ }
+ VM_PAGE_WAIT();
+ }
+ mem->pageq.next = (queue_entry_t)wired_page_list;
+ wired_page_list = mem;
+ }