-/*
- * vm_page_more_fictitious:
- *
- * Add more fictitious pages to the zone.
- * Allowed to block. This routine is way intimate
- * with the zones code, for several reasons:
- * 1. we need to carve some page structures out of physical
- * memory before zones work, so they _cannot_ come from
- * the zone restricted submap.
- * 2. the zone needs to be collectable in order to prevent
- * growth without bound. These structures are used by
- * the device pager (by the hundreds and thousands), as
- * private pages for pageout, and as blocking pages for
- * pagein. Temporary bursts in demand should not result in
- * permanent allocation of a resource.
- * 3. To smooth allocation humps, we allocate single pages
- * with kernel_memory_allocate(), and cram them into the
- * zone.
- */
-
-void
-vm_page_more_fictitious(void)
-{
- vm_offset_t addr;
- kern_return_t retval;
-
- c_vm_page_more_fictitious++;
-
- /*
- * Allocate a single page from the zone restricted submap. Do not wait
- * if no physical pages are immediately available, and do not zero the
- * space. We need our own blocking lock here to prevent having multiple,
- * simultaneous requests from piling up on the zone restricted submap
- * lock.
- * Exactly one (of our) threads should be potentially waiting on the map
- * lock. If winner is not vm-privileged, then the page allocation will
- * fail, and it will temporarily block here in the vm_page_wait().
- */
- lck_mtx_lock(&vm_page_alloc_lock);
- /*
- * If another thread allocated space, just bail out now.
- */
- if (os_atomic_load(&vm_page_zone->countfree, relaxed) > 5) {
- /*
- * The number "5" is a small number that is larger than the
- * number of fictitious pages that any single caller will
- * attempt to allocate. Otherwise, a thread will attempt to
- * acquire a fictitious page (vm_page_grab_fictitious), fail,
- * release all of the resources and locks already acquired,
- * and then call this routine. This routine finds the pages
- * that the caller released, so fails to allocate new space.
- * The process repeats infinitely. The largest known number
- * of fictitious pages required in this manner is 2. 5 is
- * simply a somewhat larger number.
- */
- lck_mtx_unlock(&vm_page_alloc_lock);
- return;
- }
-
- retval = kernel_memory_allocate(zone_submap(vm_page_zone),
- &addr, PAGE_SIZE, 0, KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
- VM_KERN_MEMORY_ZONE);
-
- if (retval != KERN_SUCCESS) {
- /*
- * No page was available. Drop the
- * lock to give another thread a chance at it, and
- * wait for the pageout daemon to make progress.
- */
- lck_mtx_unlock(&vm_page_alloc_lock);
- vm_page_wait(THREAD_UNINT);
- return;
- }
-
- zcram(vm_page_zone, addr, PAGE_SIZE);
-
- lck_mtx_unlock(&vm_page_alloc_lock);
-}
-
-