/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the
- * License may not be used to create, or enable the creation or
- * redistribution of, unlawful or unlicensed copies of an Apple operating
- * system, or to circumvent, violate, or enable the circumvention or
- * violation of, any terms of an Apple operating system software license
- * agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <mach/vm_map.h>
#include <mach/vm_param.h>
#include <mach/vm_statistics.h>
+#include <mach/sdt.h>
#include <kern/kern_types.h>
#include <kern/counters.h>
#include <kern/host_statistics.h>
#include <kern/machine.h>
#include <kern/misc_protos.h>
+#include <kern/sched.h>
#include <kern/thread.h>
#include <kern/xpr.h>
#include <kern/kalloc.h>
#include <machine/vm_tuning.h>
+#include <machine/commpage.h>
+
+#if CONFIG_EMBEDDED
+#include <sys/kern_memorystatus.h>
+#endif
#include <vm/pmap.h>
#include <vm/vm_fault.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h> /* must be last */
+#include <vm/memory_object.h>
+#include <vm/vm_purgeable_internal.h>
/*
* ENCRYPTED SWAP:
*/
-#ifdef __ppc__
-#include <ppc/mappings.h>
-#endif /* __ppc__ */
#include <../bsd/crypto/aes/aes.h>
+extern u_int32_t random(void); /* from <libkern/libkern.h> */
-extern ipc_port_t memory_manager_default;
-
+#if UPL_DEBUG
+#include <libkern/OSDebug.h>
+#endif
-#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE
-#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 10000 /* maximum iterations of the active queue to move pages to inactive */
+#ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
+#define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
#endif
-#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE
-#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096 /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
+#ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
+#ifdef CONFIG_EMBEDDED
+#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
+#else
+#define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
+#endif
#endif
#ifndef VM_PAGEOUT_DEADLOCK_RELIEF
#define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
#endif /* VM_PAGEOUT_IDLE_WAIT */
+#ifndef VM_PAGE_SPECULATIVE_TARGET
+#define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / 20)
+#endif /* VM_PAGE_SPECULATIVE_TARGET */
+
+#ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
+#define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
+#endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
+
/*
* To obtain a reasonable LRU approximation, the inactive queue
*/
#ifndef VM_PAGE_FREE_TARGET
+#ifdef CONFIG_EMBEDDED
+#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
+#else
#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
+#endif
#endif /* VM_PAGE_FREE_TARGET */
/*
*/
#ifndef VM_PAGE_FREE_MIN
-#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
+#ifdef CONFIG_EMBEDDED
+#define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
+#else
+#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
+#endif
#endif /* VM_PAGE_FREE_MIN */
+#define VM_PAGE_FREE_MIN_LIMIT 1500
+#define VM_PAGE_FREE_TARGET_LIMIT 2000
+
+
/*
* When vm_page_free_count falls below vm_page_free_reserved,
* only vm-privileged threads can allocate pages. vm-privilege
#ifndef VM_PAGE_FREE_RESERVED
#define VM_PAGE_FREE_RESERVED(n) \
- ((6 * VM_PAGE_LAUNDRY_MAX) + (n))
+ ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
#endif /* VM_PAGE_FREE_RESERVED */
-
/*
- * must hold the page queues lock to
- * manipulate this structure
+ * When we dequeue pages from the inactive list, they are
+ * reactivated (ie, put back on the active queue) if referenced.
+ * However, it is possible to starve the free list if other
+ * processors are referencing pages faster than we can turn off
+ * the referenced bit. So we limit the number of reactivations
+ * we will make per call of vm_pageout_scan().
*/
-struct vm_pageout_queue {
- queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */
- unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */
- unsigned int pgo_maxlaundry;
-
- unsigned int pgo_idle:1, /* iothread is blocked waiting for work to do */
- pgo_busy:1, /* iothread is currently processing request from pgo_pending */
- pgo_throttled:1,/* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */
- :0;
-};
-
-#define VM_PAGE_Q_THROTTLED(q) \
- ((q)->pgo_laundry >= (q)->pgo_maxlaundry)
+#define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
+#ifndef VM_PAGE_REACTIVATE_LIMIT
+#ifdef CONFIG_EMBEDDED
+#define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
+#else
+#define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
+#endif
+#endif /* VM_PAGE_REACTIVATE_LIMIT */
+#define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
/*
static void vm_pageout_iothread_continue(struct vm_pageout_queue *);
static void vm_pageout_iothread_external(void);
static void vm_pageout_iothread_internal(void);
-static void vm_pageout_queue_steal(vm_page_t);
extern void vm_pageout_continue(void);
extern void vm_pageout_scan(void);
+static thread_t vm_pageout_external_iothread = THREAD_NULL;
+static thread_t vm_pageout_internal_iothread = THREAD_NULL;
+
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
* from existing backing store and files
*/
unsigned int vm_accellerate_zf_pageout_trigger = 400;
-unsigned int vm_zf_iterator;
-unsigned int vm_zf_iterator_count = 40;
-unsigned int last_page_zf;
+unsigned int zf_queue_min_count = 100;
+unsigned int vm_zf_queue_count = 0;
+
+#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
unsigned int vm_zf_count = 0;
+#else
+uint64_t vm_zf_count __attribute__((aligned(8))) = 0;
+#endif
/*
* These variables record the pageout daemon's actions:
unsigned int vm_pageout_inactive_used = 0; /* debugging */
unsigned int vm_pageout_inactive_clean = 0; /* debugging */
unsigned int vm_pageout_inactive_dirty = 0; /* debugging */
+unsigned int vm_pageout_inactive_deactivated = 0; /* debugging */
+unsigned int vm_pageout_inactive_zf = 0; /* debugging */
unsigned int vm_pageout_dirty_no_pager = 0; /* debugging */
unsigned int vm_pageout_purged_objects = 0; /* debugging */
unsigned int vm_stat_discard = 0; /* debugging */
unsigned int vm_stat_discard_sent = 0; /* debugging */
unsigned int vm_stat_discard_failure = 0; /* debugging */
unsigned int vm_stat_discard_throttle = 0; /* debugging */
+unsigned int vm_pageout_reactivation_limit_exceeded = 0; /* debugging */
+unsigned int vm_pageout_catch_ups = 0; /* debugging */
+unsigned int vm_pageout_inactive_force_reclaim = 0; /* debugging */
unsigned int vm_pageout_scan_active_throttled = 0;
unsigned int vm_pageout_scan_inactive_throttled = 0;
unsigned int vm_pageout_scan_throttle = 0; /* debugging */
+unsigned int vm_pageout_scan_throttle_aborted = 0; /* debugging */
unsigned int vm_pageout_scan_burst_throttle = 0; /* debugging */
unsigned int vm_pageout_scan_empty_throttle = 0; /* debugging */
unsigned int vm_pageout_scan_deadlock_detected = 0; /* debugging */
unsigned int vm_pageout_scan_active_throttle_success = 0; /* debugging */
unsigned int vm_pageout_scan_inactive_throttle_success = 0; /* debugging */
+
+unsigned int vm_page_speculative_count_drifts = 0;
+unsigned int vm_page_speculative_count_drift_max = 0;
+
/*
* Backing store throttle when BS is exhausted
*/
unsigned int vm_pageout_out_of_line = 0;
unsigned int vm_pageout_in_place = 0;
+unsigned int vm_page_steal_pageout_page = 0;
+
/*
* ENCRYPTED SWAP:
* counters and statistics...
unsigned long vm_page_encrypt_already_encrypted_counter = 0;
boolean_t vm_pages_encrypted = FALSE; /* are there encrypted pages ? */
-
struct vm_pageout_queue vm_pageout_queue_internal;
struct vm_pageout_queue vm_pageout_queue_external;
+unsigned int vm_page_speculative_target = 0;
+
+vm_object_t vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
+boolean_t (* volatile consider_buffer_cache_collect)(int) = NULL;
+
+#if DEVELOPMENT || DEBUG
+unsigned long vm_cs_validated_resets = 0;
+#endif
/*
* Routine: vm_backing_store_disable
}
-/*
- * Routine: vm_pageout_object_allocate
- * Purpose:
- * Allocate an object for use as out-of-line memory in a
- * data_return/data_initialize message.
- * The page must be in an unlocked object.
- *
- * If the page belongs to a trusted pager, cleaning in place
- * will be used, which utilizes a special "pageout object"
- * containing private alias pages for the real page frames.
- * Untrusted pagers use normal out-of-line memory.
- */
-vm_object_t
-vm_pageout_object_allocate(
- vm_page_t m,
- vm_size_t size,
- vm_object_offset_t offset)
-{
- vm_object_t object = m->object;
- vm_object_t new_object;
-
- assert(object->pager_ready);
-
- new_object = vm_object_allocate(size);
-
- if (object->pager_trusted) {
- assert (offset < object->size);
-
- vm_object_lock(new_object);
- new_object->pageout = TRUE;
- new_object->shadow = object;
- new_object->can_persist = FALSE;
- new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
- new_object->shadow_offset = offset;
- vm_object_unlock(new_object);
-
- /*
- * Take a paging reference on the object. This will be dropped
- * in vm_pageout_object_terminate()
- */
- vm_object_lock(object);
- vm_object_paging_begin(object);
- vm_page_lock_queues();
- vm_page_unlock_queues();
- vm_object_unlock(object);
-
- vm_pageout_in_place++;
- } else
- vm_pageout_out_of_line++;
- return(new_object);
-}
-
#if MACH_CLUSTER_STATS
unsigned long vm_pageout_cluster_dirtied = 0;
unsigned long vm_pageout_cluster_cleaned = 0;
/*
* Routine: vm_pageout_object_terminate
* Purpose:
- * Destroy the pageout_object allocated by
- * vm_pageout_object_allocate(), and perform all of the
+ * Destroy the pageout_object, and perform all of the
* required cleanup actions.
*
* In/Out conditions:
vm_object_t object)
{
vm_object_t shadow_object;
- boolean_t shadow_internal;
/*
* Deal with the deallocation (last reference) of a pageout object
assert(object->pageout);
shadow_object = object->shadow;
vm_object_lock(shadow_object);
- shadow_internal = shadow_object->internal;
while (!queue_empty(&object->memq)) {
vm_page_t p, m;
/* caller's page list indication */
m->dump_cleaning = FALSE;
- /*
- * Account for the paging reference taken when
- * m->cleaning was set on this page.
- */
- vm_object_paging_end(shadow_object);
assert((m->dirty) || (m->precious) ||
(m->busy && m->cleaning));
assert(m->busy);
assert(m->wire_count == 1);
m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
m->pageout = FALSE;
#if MACH_CLUSTER_STATS
if (m->wanted) vm_pageout_target_collisions++;
if (m->dirty) {
CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
- vm_page_unwire(m);/* reactivates */
- VM_STAT(reactivations++);
+ vm_page_unwire(m, TRUE); /* reactivates */
+ VM_STAT_INCR(reactivations);
PAGE_WAKEUP_DONE(m);
} else {
CLUSTER_STAT(vm_pageout_target_page_freed++;)
* If prep_pin_count is nonzero, then someone is using the
* page, so make it active.
*/
- if (!m->active && !m->inactive && !m->private) {
+ if (!m->active && !m->inactive && !m->throttled && !m->private) {
if (m->reference)
vm_page_activate(m);
else
/* will take care of resetting dirty. We clear the */
/* modify however for the Programmed I/O case. */
pmap_clear_modify(m->phys_page);
- if(m->absent) {
- m->absent = FALSE;
- if(shadow_object->absent_count == 1)
- vm_object_absent_release(shadow_object);
- else
- shadow_object->absent_count--;
- }
+
+ m->absent = FALSE;
m->overwriting = FALSE;
} else if (m->overwriting) {
/* alternate request page list, write to page_list */
/* case. Occurs when the original page was wired */
/* at the time of the list request */
- assert(m->wire_count != 0);
- vm_page_unwire(m);/* reactivates */
+ assert(VM_PAGE_WIRED(m));
+ vm_page_unwire(m, TRUE); /* reactivates */
m->overwriting = FALSE;
} else {
/*
#endif
}
m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
/*
* Wakeup any thread waiting for the page to be un-cleaning.
/*
* Account for the paging reference taken in vm_paging_object_allocate.
*/
- vm_object_paging_end(shadow_object);
+ vm_object_activity_end(shadow_object);
vm_object_unlock(shadow_object);
assert(object->ref_count == 0);
assert(object->paging_in_progress == 0);
+ assert(object->activity_in_progress == 0);
assert(object->resident_page_count == 0);
return;
}
-/*
- * Routine: vm_pageout_setup
- * Purpose:
- * Set up a page for pageout (clean & flush).
- *
- * Move the page to a new object, as part of which it will be
- * sent to its memory manager in a memory_object_data_write or
- * memory_object_initialize message.
- *
- * The "new_object" and "new_offset" arguments
- * indicate where the page should be moved.
- *
- * In/Out conditions:
- * The page in question must not be on any pageout queues,
- * and must be busy. The object to which it belongs
- * must be unlocked, and the caller must hold a paging
- * reference to it. The new_object must not be locked.
- *
- * This routine returns a pointer to a place-holder page,
- * inserted at the same offset, to block out-of-order
- * requests for the page. The place-holder page must
- * be freed after the data_write or initialize message
- * has been sent.
- *
- * The original page is put on a paging queue and marked
- * not busy on exit.
- */
-vm_page_t
-vm_pageout_setup(
- register vm_page_t m,
- register vm_object_t new_object,
- vm_object_offset_t new_offset)
-{
- register vm_object_t old_object = m->object;
- vm_object_offset_t paging_offset;
- vm_object_offset_t offset;
- register vm_page_t holding_page;
- register vm_page_t new_m;
- boolean_t need_to_wire = FALSE;
-
-
- XPR(XPR_VM_PAGEOUT,
- "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
- (integer_t)m->object, (integer_t)m->offset,
- (integer_t)m, (integer_t)new_object,
- (integer_t)new_offset);
- assert(m && m->busy && !m->absent && !m->fictitious && !m->error &&
- !m->restart);
-
- assert(m->dirty || m->precious);
-
- /*
- * Create a place-holder page where the old one was, to prevent
- * attempted pageins of this page while we're unlocked.
- */
- VM_PAGE_GRAB_FICTITIOUS(holding_page);
-
- vm_object_lock(old_object);
-
- offset = m->offset;
- paging_offset = offset + old_object->paging_offset;
-
- if (old_object->pager_trusted) {
- /*
- * This pager is trusted, so we can clean this page
- * in place. Leave it in the old object, and mark it
- * cleaning & pageout.
- */
- new_m = holding_page;
- holding_page = VM_PAGE_NULL;
-
- /*
- * Set up new page to be private shadow of real page.
- */
- new_m->phys_page = m->phys_page;
- new_m->fictitious = FALSE;
- new_m->pageout = TRUE;
-
- /*
- * Mark real page as cleaning (indicating that we hold a
- * paging reference to be released via m_o_d_r_c) and
- * pageout (indicating that the page should be freed
- * when the pageout completes).
- */
- pmap_clear_modify(m->phys_page);
- vm_page_lock_queues();
- new_m->private = TRUE;
- vm_page_wire(new_m);
- m->cleaning = TRUE;
- m->pageout = TRUE;
-
- vm_page_wire(m);
- assert(m->wire_count == 1);
- vm_page_unlock_queues();
-
- m->dirty = TRUE;
- m->precious = FALSE;
- m->page_lock = VM_PROT_NONE;
- m->unusual = FALSE;
- m->unlock_request = VM_PROT_NONE;
- } else {
- /*
- * Cannot clean in place, so rip the old page out of the
- * object, and stick the holding page in. Set new_m to the
- * page in the new object.
- */
- vm_page_lock_queues();
- VM_PAGE_QUEUES_REMOVE(m);
- vm_page_remove(m);
-
- vm_page_insert(holding_page, old_object, offset);
- vm_page_unlock_queues();
-
- m->dirty = TRUE;
- m->precious = FALSE;
- new_m = m;
- new_m->page_lock = VM_PROT_NONE;
- new_m->unlock_request = VM_PROT_NONE;
-
- if (old_object->internal)
- need_to_wire = TRUE;
- }
- /*
- * Record that this page has been written out
- */
-#if MACH_PAGEMAP
- vm_external_state_set(old_object->existence_map, offset);
-#endif /* MACH_PAGEMAP */
-
- vm_object_unlock(old_object);
-
- vm_object_lock(new_object);
-
- /*
- * Put the page into the new object. If it is a not wired
- * (if it's the real page) it will be activated.
- */
-
- vm_page_lock_queues();
- vm_page_insert(new_m, new_object, new_offset);
- if (need_to_wire)
- vm_page_wire(new_m);
- else
- vm_page_activate(new_m);
- PAGE_WAKEUP_DONE(new_m);
- vm_page_unlock_queues();
-
- vm_object_unlock(new_object);
-
- /*
- * Return the placeholder page to simplify cleanup.
- */
- return (holding_page);
-}
-
/*
* Routine: vm_pageclean_setup
*
* necessarily flushed from the VM page cache.
* This is accomplished by cleaning in place.
*
- * The page must not be busy, and the object and page
- * queues must be locked.
- *
+ * The page must not be busy, and new_object
+ * must be locked.
+ *
*/
void
vm_pageclean_setup(
vm_object_t new_object,
vm_object_offset_t new_offset)
{
- vm_object_t old_object = m->object;
assert(!m->busy);
+#if 0
assert(!m->cleaning);
+#endif
XPR(XPR_VM_PAGEOUT,
"vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
- (integer_t)old_object, m->offset, (integer_t)m,
- (integer_t)new_m, new_offset);
+ m->object, m->offset, m,
+ new_m, new_offset);
pmap_clear_modify(m->phys_page);
- vm_object_paging_begin(old_object);
-
- /*
- * Record that this page has been written out
- */
-#if MACH_PAGEMAP
- vm_external_state_set(old_object->existence_map, m->offset);
-#endif /*MACH_PAGEMAP*/
/*
* Mark original page as cleaning in place.
* the real page.
*/
assert(new_m->fictitious);
+ assert(new_m->phys_page == vm_page_fictitious_addr);
new_m->fictitious = FALSE;
new_m->private = TRUE;
new_m->pageout = TRUE;
new_m->phys_page = m->phys_page;
+
+ vm_page_lockspin_queues();
vm_page_wire(new_m);
+ vm_page_unlock_queues();
vm_page_insert(new_m, new_object, new_offset);
assert(!new_m->wanted);
new_m->busy = FALSE;
}
-void
-vm_pageclean_copy(
- vm_page_t m,
- vm_page_t new_m,
- vm_object_t new_object,
- vm_object_offset_t new_offset)
-{
- XPR(XPR_VM_PAGEOUT,
- "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
- m, new_m, new_object, new_offset, 0);
-
- assert((!m->busy) && (!m->cleaning));
-
- assert(!new_m->private && !new_m->fictitious);
-
- pmap_clear_modify(m->phys_page);
-
- m->busy = TRUE;
- vm_object_paging_begin(m->object);
- vm_page_unlock_queues();
- vm_object_unlock(m->object);
-
- /*
- * Copy the original page to the new page.
- */
- vm_page_copy(m, new_m);
-
- /*
- * Mark the old page as clean. A request to pmap_is_modified
- * will get the right answer.
- */
- vm_object_lock(m->object);
- m->dirty = FALSE;
-
- vm_object_paging_end(m->object);
-
- vm_page_lock_queues();
- if (!m->active && !m->inactive)
- vm_page_activate(m);
- PAGE_WAKEUP_DONE(m);
-
- vm_page_insert(new_m, new_object, new_offset);
- vm_page_activate(new_m);
- new_m->busy = FALSE; /* No other thread can be waiting */
-}
-
-
/*
* Routine: vm_pageout_initialize_page
* Purpose:
vm_object_t object;
vm_object_offset_t paging_offset;
vm_page_t holding_page;
-
+ memory_object_t pager;
XPR(XPR_VM_PAGEOUT,
"vm_pageout_initialize_page, page 0x%X\n",
- (integer_t)m, 0, 0, 0, 0);
+ m, 0, 0, 0, 0);
assert(m->busy);
/*
*/
object = m->object;
paging_offset = m->offset + object->paging_offset;
- vm_object_paging_begin(object);
- if (m->absent || m->error || m->restart ||
- (!m->dirty && !m->precious)) {
+
+ if (m->absent || m->error || m->restart || (!m->dirty && !m->precious)) {
VM_PAGE_FREE(m);
panic("reservation without pageout?"); /* alan */
- vm_object_unlock(object);
+ vm_object_unlock(object);
+
+ return;
+ }
+
+ /*
+ * If there's no pager, then we can't clean the page. This should
+ * never happen since this should be a copy object and therefore not
+ * an external object, so the pager should always be there.
+ */
+
+ pager = object->pager;
+
+ if (pager == MEMORY_OBJECT_NULL) {
+ VM_PAGE_FREE(m);
+ panic("missing pager for copy object");
return;
}
/* set the page for future call to vm_fault_list_request */
+ vm_object_paging_begin(object);
holding_page = NULL;
- vm_page_lock_queues();
+
pmap_clear_modify(m->phys_page);
m->dirty = TRUE;
m->busy = TRUE;
m->list_req_pending = TRUE;
m->cleaning = TRUE;
m->pageout = TRUE;
+
+ vm_page_lockspin_queues();
vm_page_wire(m);
vm_page_unlock_queues();
+
vm_object_unlock(object);
/*
* [The object reference from its allocation is donated
* to the eventual recipient.]
*/
- memory_object_data_initialize(object->pager,
- paging_offset,
- PAGE_SIZE);
+ memory_object_data_initialize(pager, paging_offset, PAGE_SIZE);
vm_object_lock(object);
+ vm_object_paging_end(object);
}
#if MACH_CLUSTER_STATS
} cluster_stats[MAXCLUSTERPAGES];
#endif /* MACH_CLUSTER_STATS */
-boolean_t allow_clustered_pageouts = FALSE;
/*
* vm_pageout_cluster:
XPR(XPR_VM_PAGEOUT,
"vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
- (integer_t)object, m->offset, (integer_t)m, 0, 0);
+ object, m->offset, m, 0, 0);
+
+ VM_PAGE_CHECK(m);
/*
* Only a certain kind of page is appreciated here.
*/
- assert(m->busy && (m->dirty || m->precious) && (m->wire_count == 0));
+ assert(m->busy && (m->dirty || m->precious) && (!VM_PAGE_WIRED(m)));
assert(!m->cleaning && !m->pageout && !m->inactive && !m->active);
+ assert(!m->throttled);
/*
* protect the object from collapse -
m->list_req_pending = TRUE;
m->cleaning = TRUE;
m->pageout = TRUE;
- m->laundry = TRUE;
if (object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
+
+ /*
+ * pgo_laundry count is tied to the laundry bit
+ */
+ m->laundry = TRUE;
q->pgo_laundry++;
m->pageout_queue = TRUE;
q->pgo_idle = FALSE;
thread_wakeup((event_t) &q->pgo_pending);
}
+
+ VM_PAGE_CHECK(m);
}
unsigned long vm_pageout_throttle_up_count = 0;
/*
- * A page is back from laundry. See if there are some pages waiting to
+ * A page is back from laundry or we are stealing it back from
+ * the laundering state. See if there are some pages waiting to
* go to laundry and if we can let some of them go now.
*
* Object and page queues must be locked.
{
struct vm_pageout_queue *q;
- vm_pageout_throttle_up_count++;
-
- assert(m->laundry);
assert(m->object != VM_OBJECT_NULL);
assert(m->object != kernel_object);
+ vm_pageout_throttle_up_count++;
+
if (m->object->internal == TRUE)
q = &vm_pageout_queue_internal;
else
q = &vm_pageout_queue_external;
- m->laundry = FALSE;
- q->pgo_laundry--;
+ if (m->pageout_queue == TRUE) {
- if (q->pgo_throttled == TRUE) {
- q->pgo_throttled = FALSE;
- thread_wakeup((event_t) &q->pgo_laundry);
+ queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
+ m->pageout_queue = FALSE;
+
+ m->pageq.next = NULL;
+ m->pageq.prev = NULL;
+
+ vm_object_paging_end(m->object);
+ }
+ if (m->laundry == TRUE) {
+ m->laundry = FALSE;
+ q->pgo_laundry--;
+
+ if (q->pgo_throttled == TRUE) {
+ q->pgo_throttled = FALSE;
+ thread_wakeup((event_t) &q->pgo_laundry);
+ }
+ if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
+ q->pgo_draining = FALSE;
+ thread_wakeup((event_t) (&q->pgo_laundry+1));
+ }
}
}
* vm_page_free_wanted == 0.
*/
-#define DELAYED_UNLOCK_LIMIT (3 * MAX_UPL_TRANSFER)
+#define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT (3 * MAX_UPL_TRANSFER)
#define FCS_IDLE 0
#define FCS_DELAYED 1
mach_timespec_t ts;
};
+
+/*
+ * VM memory pressure monitoring.
+ *
+ * vm_pageout_scan() keeps track of the number of pages it considers and
+ * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
+ *
+ * compute_memory_pressure() is called every second from compute_averages()
+ * and moves "vm_pageout_stat_now" forward, to start accumulating the number
+ * of recalimed pages in a new vm_pageout_stat[] bucket.
+ *
+ * mach_vm_pressure_monitor() collects past statistics about memory pressure.
+ * The caller provides the number of seconds ("nsecs") worth of statistics
+ * it wants, up to 30 seconds.
+ * It computes the number of pages reclaimed in the past "nsecs" seconds and
+ * also returns the number of pages the system still needs to reclaim at this
+ * moment in time.
+ */
+#define VM_PAGEOUT_STAT_SIZE 31
+struct vm_pageout_stat {
+ unsigned int considered;
+ unsigned int reclaimed;
+} vm_pageout_stats[VM_PAGEOUT_STAT_SIZE] = {{0,0}, };
+unsigned int vm_pageout_stat_now = 0;
+unsigned int vm_memory_pressure = 0;
+
+#define VM_PAGEOUT_STAT_BEFORE(i) \
+ (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
+#define VM_PAGEOUT_STAT_AFTER(i) \
+ (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
+
+/*
+ * Called from compute_averages().
+ */
+void
+compute_memory_pressure(
+ __unused void *arg)
+{
+ unsigned int vm_pageout_next;
+
+ vm_memory_pressure =
+ vm_pageout_stats[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now)].reclaimed;
+
+ commpage_set_memory_pressure( vm_memory_pressure );
+
+ /* move "now" forward */
+ vm_pageout_next = VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now);
+ vm_pageout_stats[vm_pageout_next].considered = 0;
+ vm_pageout_stats[vm_pageout_next].reclaimed = 0;
+ vm_pageout_stat_now = vm_pageout_next;
+}
+
+unsigned int
+mach_vm_ctl_page_free_wanted(void)
+{
+ unsigned int page_free_target, page_free_count, page_free_wanted;
+
+ page_free_target = vm_page_free_target;
+ page_free_count = vm_page_free_count;
+ if (page_free_target > page_free_count) {
+ page_free_wanted = page_free_target - page_free_count;
+ } else {
+ page_free_wanted = 0;
+ }
+
+ return page_free_wanted;
+}
+
+kern_return_t
+mach_vm_pressure_monitor(
+ boolean_t wait_for_pressure,
+ unsigned int nsecs_monitored,
+ unsigned int *pages_reclaimed_p,
+ unsigned int *pages_wanted_p)
+{
+ wait_result_t wr;
+ unsigned int vm_pageout_then, vm_pageout_now;
+ unsigned int pages_reclaimed;
+
+ /*
+ * We don't take the vm_page_queue_lock here because we don't want
+ * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
+ * thread when it's trying to reclaim memory. We don't need fully
+ * accurate monitoring anyway...
+ */
+
+ if (wait_for_pressure) {
+ /* wait until there's memory pressure */
+ while (vm_page_free_count >= vm_page_free_target) {
+ wr = assert_wait((event_t) &vm_page_free_wanted,
+ THREAD_INTERRUPTIBLE);
+ if (wr == THREAD_WAITING) {
+ wr = thread_block(THREAD_CONTINUE_NULL);
+ }
+ if (wr == THREAD_INTERRUPTED) {
+ return KERN_ABORTED;
+ }
+ if (wr == THREAD_AWAKENED) {
+ /*
+ * The memory pressure might have already
+ * been relieved but let's not block again
+ * and let's report that there was memory
+ * pressure at some point.
+ */
+ break;
+ }
+ }
+ }
+
+ /* provide the number of pages the system wants to reclaim */
+ if (pages_wanted_p != NULL) {
+ *pages_wanted_p = mach_vm_ctl_page_free_wanted();
+ }
+
+ if (pages_reclaimed_p == NULL) {
+ return KERN_SUCCESS;
+ }
+
+ /* provide number of pages reclaimed in the last "nsecs_monitored" */
+ do {
+ vm_pageout_now = vm_pageout_stat_now;
+ pages_reclaimed = 0;
+ for (vm_pageout_then =
+ VM_PAGEOUT_STAT_BEFORE(vm_pageout_now);
+ vm_pageout_then != vm_pageout_now &&
+ nsecs_monitored-- != 0;
+ vm_pageout_then =
+ VM_PAGEOUT_STAT_BEFORE(vm_pageout_then)) {
+ pages_reclaimed += vm_pageout_stats[vm_pageout_then].reclaimed;
+ }
+ } while (vm_pageout_now != vm_pageout_stat_now);
+ *pages_reclaimed_p = pages_reclaimed;
+
+ return KERN_SUCCESS;
+}
+
+/* Page States: Used below to maintain the page state
+ before it's removed from it's Q. This saved state
+ helps us do the right accounting in certain cases
+*/
+
+#define PAGE_STATE_SPECULATIVE 1
+#define PAGE_STATE_THROTTLED 2
+#define PAGE_STATE_ZEROFILL 3
+#define PAGE_STATE_INACTIVE 4
+
+#define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
+ MACRO_BEGIN \
+ /* \
+ * If a "reusable" page somehow made it back into \
+ * the active queue, it's been re-used and is not \
+ * quite re-usable. \
+ * If the VM object was "all_reusable", consider it \
+ * as "all re-used" instead of converting it to \
+ * "partially re-used", which could be expensive. \
+ */ \
+ if ((m)->reusable || \
+ (m)->object->all_reusable) { \
+ vm_object_reuse_pages((m)->object, \
+ (m)->offset, \
+ (m)->offset + PAGE_SIZE_64, \
+ FALSE); \
+ } \
+ MACRO_END
+
void
vm_pageout_scan(void)
{
unsigned int loop_count = 0;
unsigned int inactive_burst_count = 0;
unsigned int active_burst_count = 0;
- vm_page_t local_freeq = 0;
+ unsigned int reactivated_this_call;
+ unsigned int reactivate_limit;
+ vm_page_t local_freeq = NULL;
int local_freed = 0;
- int delayed_unlock = 0;
- int need_internal_inactive = 0;
+ int delayed_unlock;
int refmod_state = 0;
int vm_pageout_deadlock_target = 0;
struct vm_pageout_queue *iq;
struct vm_pageout_queue *eq;
- struct flow_control flow_control;
- boolean_t active_throttled = FALSE;
+ struct vm_speculative_age_q *sq;
+ struct flow_control flow_control = { 0, { 0, 0 } };
boolean_t inactive_throttled = FALSE;
+ boolean_t try_failed;
mach_timespec_t ts;
unsigned int msecs = 0;
vm_object_t object;
-
-
+ vm_object_t last_object_tried;
+#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
+ unsigned int zf_ratio;
+ unsigned int zf_run_count;
+#else
+ uint64_t zf_ratio;
+ uint64_t zf_run_count;
+#endif
+ uint32_t catch_up_count = 0;
+ uint32_t inactive_reclaim_run;
+ boolean_t forced_reclaim;
+ int page_prev_state = 0;
+
flow_control.state = FCS_IDLE;
iq = &vm_pageout_queue_internal;
eq = &vm_pageout_queue_external;
+ sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
+
XPR(XPR_VM_PAGEOUT, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
+
+ vm_page_lock_queues();
+ delayed_unlock = 1; /* must be nonzero if Qs are locked, 0 if unlocked */
+
+ /*
+ * Calculate the max number of referenced pages on the inactive
+ * queue that we will reactivate.
+ */
+ reactivated_this_call = 0;
+ reactivate_limit = VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count +
+ vm_page_inactive_count);
+ inactive_reclaim_run = 0;
+
+
/*???*/ /*
* We want to gradually dribble pages from the active queue
* to the inactive queue. If we let the inactive queue get
* aren't vm-privileged. If we kept sending dirty pages to them,
* we could exhaust the free list.
*/
- vm_page_lock_queues();
- delayed_unlock = 1;
Restart:
+ assert(delayed_unlock!=0);
+
+ /*
+ * A page is "zero-filled" if it was not paged in from somewhere,
+ * and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
+ * Recalculate the zero-filled page ratio. We use this to apportion
+ * victimized pages between the normal and zero-filled inactive
+ * queues according to their relative abundance in memory. Thus if a task
+ * is flooding memory with zf pages, we begin to hunt them down.
+ * It would be better to throttle greedy tasks at a higher level,
+ * but at the moment mach vm cannot do this.
+ */
+ {
+#if defined(__ppc__) /* On ppc, vm statistics are still 32-bit */
+ uint32_t total = vm_page_active_count + vm_page_inactive_count;
+ uint32_t normal = total - vm_zf_count;
+#else
+ uint64_t total = vm_page_active_count + vm_page_inactive_count;
+ uint64_t normal = total - vm_zf_count;
+#endif
+
+ /* zf_ratio is the number of zf pages we victimize per normal page */
+
+ if (vm_zf_count < vm_accellerate_zf_pageout_trigger)
+ zf_ratio = 0;
+ else if ((vm_zf_count <= normal) || (normal == 0))
+ zf_ratio = 1;
+ else
+ zf_ratio = vm_zf_count / normal;
+
+ zf_run_count = 0;
+ }
+
/*
* Recalculate vm_page_inactivate_target.
*/
vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
- vm_page_inactive_count);
- object = NULL;
+ vm_page_inactive_count +
+ vm_page_speculative_count);
+ /*
+ * don't want to wake the pageout_scan thread up everytime we fall below
+ * the targets... set a low water mark at 0.25% below the target
+ */
+ vm_page_inactive_min = vm_page_inactive_target - (vm_page_inactive_target / 400);
+ vm_page_speculative_target = VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count +
+ vm_page_inactive_count);
+ object = NULL;
+ last_object_tried = NULL;
+ try_failed = FALSE;
+
+ if ((vm_page_inactive_count + vm_page_speculative_count) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count))
+ catch_up_count = vm_page_inactive_count + vm_page_speculative_count;
+ else
+ catch_up_count = 0;
+
for (;;) {
vm_page_t m;
- if (delayed_unlock == 0)
- vm_page_lock_queues();
+ DTRACE_VM2(rev, int, 1, (uint64_t *), NULL);
- active_burst_count = vm_page_active_count;
+ if (delayed_unlock == 0) {
+ vm_page_lock_queues();
+ delayed_unlock = 1;
+ }
- if (active_burst_count > vm_pageout_burst_active_throttle)
- active_burst_count = vm_pageout_burst_active_throttle;
+ /*
+ * Don't sweep through active queue more than the throttle
+ * which should be kept relatively low
+ */
+ active_burst_count = MIN(vm_pageout_burst_active_throttle,
+ vm_page_active_count);
/*
* Move pages from active to inactive.
*/
- while ((need_internal_inactive ||
- vm_page_inactive_count < vm_page_inactive_target) &&
- !queue_empty(&vm_page_queue_active) &&
- ((active_burst_count--) > 0)) {
+ if ((vm_page_inactive_count + vm_page_speculative_count) >= vm_page_inactive_target)
+ goto done_moving_active_pages;
+
+ while (!queue_empty(&vm_page_queue_active) && active_burst_count) {
+
+ if (active_burst_count)
+ active_burst_count--;
vm_pageout_active++;
assert(m->active && !m->inactive);
assert(!m->laundry);
assert(m->object != kernel_object);
+ assert(m->phys_page != vm_page_guard_addr);
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
/*
* Try to lock object; since we've already got the
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
- if (!vm_object_lock_try(m->object)) {
+ if (!vm_object_lock_try_scan(m->object)) {
/*
* move page to end of active queue and continue
*/
vm_page_t, pageq);
queue_enter(&vm_page_queue_active, m,
vm_page_t, pageq);
+
+ try_failed = TRUE;
+ m = (vm_page_t) queue_first(&vm_page_queue_active);
+ /*
+ * this is the next object we're going to be interested in
+ * try to make sure it's available after the mutex_yield
+ * returns control
+ */
+ vm_pageout_scan_wants_object = m->object;
+
goto done_with_activepage;
}
object = m->object;
+
+ try_failed = FALSE;
}
+
/*
* if the page is BUSY, then we pull it
* off the active queue and leave it alone.
goto done_with_activepage;
}
- if (need_internal_inactive) {
- /*
- * If we're unable to make forward progress
- * with the current set of pages on the
- * inactive queue due to busy objects or
- * throttled pageout queues, then
- * move a page that is already clean
- * or belongs to a pageout queue that
- * isn't currently throttled
- */
- active_throttled = FALSE;
-
- if (object->internal) {
- if ((VM_PAGE_Q_THROTTLED(iq) || !IP_VALID(memory_manager_default)))
- active_throttled = TRUE;
- } else if (VM_PAGE_Q_THROTTLED(eq)) {
- active_throttled = TRUE;
- }
- if (active_throttled == TRUE) {
- if (!m->dirty) {
- refmod_state = pmap_get_refmod(m->phys_page);
-
- if (refmod_state & VM_MEM_REFERENCED)
- m->reference = TRUE;
- if (refmod_state & VM_MEM_MODIFIED)
- m->dirty = TRUE;
- }
- if (m->dirty || m->precious) {
- /*
- * page is dirty and targets a THROTTLED queue
- * so all we can do is move it back to the
- * end of the active queue to get it out
- * of the way
- */
- queue_remove(&vm_page_queue_active, m,
- vm_page_t, pageq);
- queue_enter(&vm_page_queue_active, m,
- vm_page_t, pageq);
- vm_pageout_scan_active_throttled++;
+ /* deal with a rogue "reusable" page */
+ VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
- goto done_with_activepage;
- }
- }
- vm_pageout_scan_active_throttle_success++;
- need_internal_inactive--;
- }
/*
* Deactivate the page while holding the object
* locked, so we know the page is still not busy.
* can handle that.
*/
vm_page_deactivate(m);
+
done_with_activepage:
- if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
+ if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
if (object != NULL) {
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_object_unlock(object);
object = NULL;
}
if (local_freeq) {
- vm_page_free_list(local_freeq);
+ vm_page_unlock_queues();
+ vm_page_free_list(local_freeq, TRUE);
- local_freeq = 0;
+ local_freeq = NULL;
local_freed = 0;
- }
- delayed_unlock = 0;
- vm_page_unlock_queues();
+ vm_page_lock_queues();
+ } else
+ lck_mtx_yield(&vm_page_queue_lock);
+
+ delayed_unlock = 1;
- mutex_pause();
- vm_page_lock_queues();
/*
* continue the while loop processing
* the active queue... need to hold
* the page queues lock
*/
- continue;
}
}
* and the inactive queue
**********************************************************************/
-
+done_moving_active_pages:
/*
* We are done if we have met our target *and*
vm_object_unlock(object);
object = NULL;
}
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
if (local_freeq) {
- vm_page_free_list(local_freeq);
+ vm_page_unlock_queues();
+ vm_page_free_list(local_freeq, TRUE);
- local_freeq = 0;
+ local_freeq = NULL;
local_freed = 0;
+ vm_page_lock_queues();
}
- mutex_lock(&vm_page_queue_free_lock);
+ /*
+ * inactive target still not met... keep going
+ * until we get the queues balanced
+ */
+
+ /*
+ * Recalculate vm_page_inactivate_target.
+ */
+ vm_page_inactive_target = VM_PAGE_INACTIVE_TARGET(vm_page_active_count +
+ vm_page_inactive_count +
+ vm_page_speculative_count);
+
+#ifndef CONFIG_EMBEDDED
+ /*
+ * XXX: if no active pages can be reclaimed, pageout scan can be stuck trying
+ * to balance the queues
+ */
+ if (((vm_page_inactive_count + vm_page_speculative_count) < vm_page_inactive_target) &&
+ !queue_empty(&vm_page_queue_active))
+ continue;
+#endif
+
+ lck_mtx_lock(&vm_page_queue_free_lock);
if ((vm_page_free_count >= vm_page_free_target) &&
- (vm_page_free_wanted == 0)) {
+ (vm_page_free_wanted == 0) && (vm_page_free_wanted_privileged == 0)) {
vm_page_unlock_queues();
thread_wakeup((event_t) &vm_pageout_garbage_collect);
+
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
return;
}
- mutex_unlock(&vm_page_queue_free_lock);
+ lck_mtx_unlock(&vm_page_queue_free_lock);
+ }
+
+ /*
+ * Before anything, we check if we have any ripe volatile
+ * objects around. If so, try to purge the first object.
+ * If the purge fails, fall through to reclaim a page instead.
+ * If the purge succeeds, go back to the top and reevalute
+ * the new memory situation.
+ */
+ assert (available_for_purge>=0);
+ if (available_for_purge)
+ {
+ if (object != NULL) {
+ vm_object_unlock(object);
+ object = NULL;
+ }
+ if(TRUE == vm_purgeable_object_purge_one()) {
+ continue;
+ }
}
+
+ if (queue_empty(&sq->age_q) && vm_page_speculative_count) {
+ /*
+ * try to pull pages from the aging bins
+ * see vm_page.h for an explanation of how
+ * this mechanism works
+ */
+ struct vm_speculative_age_q *aq;
+ mach_timespec_t ts_fully_aged;
+ boolean_t can_steal = FALSE;
+ int num_scanned_queues;
+
+ aq = &vm_page_queue_speculative[speculative_steal_index];
+
+ num_scanned_queues = 0;
+ while (queue_empty(&aq->age_q) &&
+ num_scanned_queues++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
+
+ speculative_steal_index++;
+
+ if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q)
+ speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
+
+ aq = &vm_page_queue_speculative[speculative_steal_index];
+ }
+
+ if (num_scanned_queues ==
+ VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1) {
+ /*
+ * XXX We've scanned all the speculative
+ * queues but still haven't found one
+ * that is not empty, even though
+ * vm_page_speculative_count is not 0.
+ */
+ /* report the anomaly... */
+ printf("vm_pageout_scan: "
+ "all speculative queues empty "
+ "but count=%d. Re-adjusting.\n",
+ vm_page_speculative_count);
+ if (vm_page_speculative_count >
+ vm_page_speculative_count_drift_max)
+ vm_page_speculative_count_drift_max = vm_page_speculative_count;
+ vm_page_speculative_count_drifts++;
+#if 6553678
+ Debugger("vm_pageout_scan: no speculative pages");
+#endif
+ /* readjust... */
+ vm_page_speculative_count = 0;
+ /* ... and continue */
+ continue;
+ }
+
+ if (vm_page_speculative_count > vm_page_speculative_target)
+ can_steal = TRUE;
+ else {
+ ts_fully_aged.tv_sec = (VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) / 1000;
+ ts_fully_aged.tv_nsec = ((VM_PAGE_MAX_SPECULATIVE_AGE_Q * VM_PAGE_SPECULATIVE_Q_AGE_MS) % 1000)
+ * 1000 * NSEC_PER_USEC;
+ ADD_MACH_TIMESPEC(&ts_fully_aged, &aq->age_ts);
+
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
+
+ if (CMP_MACH_TIMESPEC(&ts, &ts_fully_aged) >= 0)
+ can_steal = TRUE;
+ }
+ if (can_steal == TRUE)
+ vm_page_speculate_ageit(aq);
+ }
/*
* Sometimes we have to pause:
* 3) Loop control - no acceptable pages found on the inactive queue
* within the last vm_pageout_burst_inactive_throttle iterations
*/
- if ((queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf))) {
+ if (queue_empty(&vm_page_queue_inactive) && queue_empty(&vm_page_queue_zf) && queue_empty(&sq->age_q) &&
+ (VM_PAGE_Q_THROTTLED(iq) || queue_empty(&vm_page_queue_throttled))) {
vm_pageout_scan_empty_throttle++;
msecs = vm_pageout_empty_wait;
goto vm_pageout_scan_delay;
- } else if (inactive_burst_count >= vm_pageout_burst_inactive_throttle) {
+ } else if (inactive_burst_count >=
+ MIN(vm_pageout_burst_inactive_throttle,
+ (vm_page_inactive_count +
+ vm_page_speculative_count))) {
vm_pageout_scan_burst_throttle++;
msecs = vm_pageout_burst_wait;
goto vm_pageout_scan_delay;
- } else if (VM_PAGE_Q_THROTTLED(iq)) {
+ } else if (VM_PAGE_Q_THROTTLED(iq) && IP_VALID(memory_manager_default)) {
+ clock_sec_t sec;
+ clock_nsec_t nsec;
switch (flow_control.state) {
reset_deadlock_timer:
ts.tv_sec = vm_pageout_deadlock_wait / 1000;
ts.tv_nsec = (vm_pageout_deadlock_wait % 1000) * 1000 * NSEC_PER_USEC;
- clock_get_system_nanotime(
- &flow_control.ts.tv_sec,
- (uint32_t *) &flow_control.ts.tv_nsec);
+ clock_get_system_nanotime(&sec, &nsec);
+ flow_control.ts.tv_sec = (unsigned int) sec;
+ flow_control.ts.tv_nsec = nsec;
ADD_MACH_TIMESPEC(&flow_control.ts, &ts);
flow_control.state = FCS_DELAYED;
break;
case FCS_DELAYED:
- clock_get_system_nanotime(
- &ts.tv_sec,
- (uint32_t *) &ts.tv_nsec);
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = (unsigned int) sec;
+ ts.tv_nsec = nsec;
if (CMP_MACH_TIMESPEC(&ts, &flow_control.ts) >= 0) {
/*
* with a new timeout target since we have no way of knowing
* whether we've broken the deadlock except through observation
* of the queue associated with the default pager... we need to
- * stop moving pagings and allow the system to run to see what
+ * stop moving pages and allow the system to run to see what
* state it settles into.
*/
- vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted;
+ vm_pageout_deadlock_target = vm_pageout_deadlock_relief + vm_page_free_wanted + vm_page_free_wanted_privileged;
vm_pageout_scan_deadlock_detected++;
flow_control.state = FCS_DEADLOCK_DETECTED;
vm_object_unlock(object);
object = NULL;
}
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
+
if (local_freeq) {
- vm_page_free_list(local_freeq);
+ vm_page_unlock_queues();
+ vm_page_free_list(local_freeq, TRUE);
- local_freeq = 0;
+ local_freeq = NULL;
local_freed = 0;
+ vm_page_lock_queues();
+
+ if (flow_control.state == FCS_DELAYED &&
+ !VM_PAGE_Q_THROTTLED(iq)) {
+ flow_control.state = FCS_IDLE;
+ vm_pageout_scan_throttle_aborted++;
+ goto consider_inactive;
+ }
}
- assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
+#if CONFIG_EMBEDDED
+ {
+ int percent_avail;
+ /*
+ * Decide if we need to send a memory status notification.
+ */
+ percent_avail =
+ (vm_page_active_count + vm_page_inactive_count +
+ vm_page_speculative_count + vm_page_free_count +
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
+ atop_64(max_mem);
+ if (percent_avail >= (kern_memorystatus_level + 5) ||
+ percent_avail <= (kern_memorystatus_level - 5)) {
+ kern_memorystatus_level = percent_avail;
+ thread_wakeup((event_t)&kern_memorystatus_wakeup);
+ }
+ }
+#endif
+ assert_wait_timeout((event_t) &iq->pgo_laundry, THREAD_INTERRUPTIBLE, msecs, 1000*NSEC_PER_USEC);
counter(c_vm_pageout_scan_block++);
vm_page_unlock_queues();
-
+
+ assert(vm_pageout_scan_wants_object == VM_OBJECT_NULL);
+
thread_block(THREAD_CONTINUE_NULL);
vm_page_lock_queues();
iq->pgo_throttled = FALSE;
- if (loop_count >= vm_page_inactive_count) {
- if (VM_PAGE_Q_THROTTLED(eq) || VM_PAGE_Q_THROTTLED(iq)) {
- /*
- * Make sure we move enough "appropriate"
- * pages to the inactive queue before trying
- * again.
- */
- need_internal_inactive = vm_pageout_inactive_relief;
- }
+ if (loop_count >= vm_page_inactive_count)
loop_count = 0;
- }
inactive_burst_count = 0;
goto Restart;
inactive_burst_count++;
vm_pageout_inactive++;
- if (!queue_empty(&vm_page_queue_inactive)) {
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ /* Choose a victim. */
+
+ while (1) {
+ m = NULL;
- if (m->clustered && (m->no_isync == TRUE)) {
- goto use_this_page;
+ if (IP_VALID(memory_manager_default)) {
+ assert(vm_page_throttled_count == 0);
+ assert(queue_empty(&vm_page_queue_throttled));
}
- }
- if (vm_zf_count < vm_accellerate_zf_pageout_trigger) {
- vm_zf_iterator = 0;
- } else {
- last_page_zf = 0;
- if((vm_zf_iterator+=1) >= vm_zf_iterator_count) {
- vm_zf_iterator = 0;
+
+ /*
+ * The most eligible pages are ones we paged in speculatively,
+ * but which have not yet been touched.
+ */
+ if ( !queue_empty(&sq->age_q) ) {
+ m = (vm_page_t) queue_first(&sq->age_q);
+ break;
}
+ /*
+ * Time for a zero-filled inactive page?
+ */
+ if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
+ queue_empty(&vm_page_queue_inactive)) {
+ if ( !queue_empty(&vm_page_queue_zf) ) {
+ m = (vm_page_t) queue_first(&vm_page_queue_zf);
+ zf_run_count++;
+ break;
+ }
+ }
+ /*
+ * It's either a normal inactive page or nothing.
+ */
+ if ( !queue_empty(&vm_page_queue_inactive) ) {
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ zf_run_count = 0;
+ break;
+ }
+
+ panic("vm_pageout: no victim");
}
- if (queue_empty(&vm_page_queue_zf) ||
- (((last_page_zf) || (vm_zf_iterator == 0)) &&
- !queue_empty(&vm_page_queue_inactive))) {
- m = (vm_page_t) queue_first(&vm_page_queue_inactive);
- last_page_zf = 0;
- } else {
- m = (vm_page_t) queue_first(&vm_page_queue_zf);
- last_page_zf = 1;
- }
-use_this_page:
- assert(!m->active && m->inactive);
+
+ assert(!m->active && (m->inactive || m->speculative || m->throttled));
assert(!m->laundry);
assert(m->object != kernel_object);
+ assert(m->phys_page != vm_page_guard_addr);
+
+ if (!m->speculative) {
+ vm_pageout_stats[vm_pageout_stat_now].considered++;
+ }
+
+ DTRACE_VM2(scan, int, 1, (uint64_t *), NULL);
/*
- * Try to lock object; since we've alread got the
- * page queues lock, we can only 'try' for this one.
- * if the 'try' fails, we need to do a mutex_pause
- * to allow the owner of the object lock a chance to
- * run... otherwise, we're likely to trip over this
- * object in the same state as we work our way through
- * the queue... clumps of pages associated with the same
- * object are fairly typical on the inactive and active queues
+ * check to see if we currently are working
+ * with the same object... if so, we've
+ * already got the lock
*/
if (m->object != object) {
+ /*
+ * the object associated with candidate page is
+ * different from the one we were just working
+ * with... dump the lock if we still own it
+ */
if (object != NULL) {
vm_object_unlock(object);
object = NULL;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
}
- if (!vm_object_lock_try(m->object)) {
+ /*
+ * Try to lock object; since we've alread got the
+ * page queues lock, we can only 'try' for this one.
+ * if the 'try' fails, we need to do a mutex_pause
+ * to allow the owner of the object lock a chance to
+ * run... otherwise, we're likely to trip over this
+ * object in the same state as we work our way through
+ * the queue... clumps of pages associated with the same
+ * object are fairly typical on the inactive and active queues
+ */
+ if (!vm_object_lock_try_scan(m->object)) {
+ vm_pageout_inactive_nolock++;
+
+ requeue_page:
/*
* Move page to end and continue.
* Don't re-issue ticket
*/
if (m->zero_fill) {
+ if (m->speculative) {
+ panic("vm_pageout_scan(): page %p speculative and zero-fill !?\n", m);
+ }
+ assert(!m->speculative);
queue_remove(&vm_page_queue_zf, m,
vm_page_t, pageq);
queue_enter(&vm_page_queue_zf, m,
vm_page_t, pageq);
+ } else if (m->speculative) {
+ remque(&m->pageq);
+ m->speculative = FALSE;
+ vm_page_speculative_count--;
+
+ /*
+ * move to the head of the inactive queue
+ * to get it out of the way... the speculative
+ * queue is generally too small to depend
+ * on there being enough pages from other
+ * objects to make cycling it back on the
+ * same queue a winning proposition
+ */
+ queue_enter_first(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ m->inactive = TRUE;
+ vm_page_inactive_count++;
+ token_new_pagecount++;
+ } else if (m->throttled) {
+ queue_remove(&vm_page_queue_throttled, m,
+ vm_page_t, pageq);
+ m->throttled = FALSE;
+ vm_page_throttled_count--;
+
+ /*
+ * not throttled any more, so can stick
+ * it on the inactive queue.
+ */
+ queue_enter(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ m->inactive = TRUE;
+ vm_page_inactive_count++;
+ token_new_pagecount++;
} else {
queue_remove(&vm_page_queue_inactive, m,
vm_page_t, pageq);
+#if MACH_ASSERT
+ vm_page_inactive_count--; /* balance for purgeable queue asserts */
+#endif
+ vm_purgeable_q_advance_all();
+
queue_enter(&vm_page_queue_inactive, m,
vm_page_t, pageq);
+#if MACH_ASSERT
+ vm_page_inactive_count++; /* balance for purgeable queue asserts */
+#endif
+ token_new_pagecount++;
}
- vm_pageout_inactive_nolock++;
+ pmap_clear_reference(m->phys_page);
+ m->reference = FALSE;
+
+ if ( !queue_empty(&sq->age_q) )
+ m = (vm_page_t) queue_first(&sq->age_q);
+ else if ( ((zf_run_count < zf_ratio) && vm_zf_queue_count >= zf_queue_min_count) ||
+ queue_empty(&vm_page_queue_inactive)) {
+ if ( !queue_empty(&vm_page_queue_zf) )
+ m = (vm_page_t) queue_first(&vm_page_queue_zf);
+ } else if ( !queue_empty(&vm_page_queue_inactive) ) {
+ m = (vm_page_t) queue_first(&vm_page_queue_inactive);
+ }
+ /*
+ * this is the next object we're going to be interested in
+ * try to make sure its available after the mutex_yield
+ * returns control
+ */
+ vm_pageout_scan_wants_object = m->object;
/*
* force us to dump any collected free pages
* and to pause before moving on
*/
- delayed_unlock = DELAYED_UNLOCK_LIMIT + 1;
+ try_failed = TRUE;
goto done_with_inactivepage;
}
object = m->object;
- }
- /*
- * If the page belongs to a purgable object with no pending copies
- * against it, then we reap all of the pages in the object
- * and note that the object has been "emptied". It'll be up to the
- * application the discover this and recreate its contents if desired.
- */
- if ((object->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
- object->purgable == VM_OBJECT_PURGABLE_EMPTY) &&
- object->copy == VM_OBJECT_NULL) {
-
- (void) vm_object_purge(object);
- vm_pageout_purged_objects++;
- /*
- * we've just taken all of the pages from this object,
- * so drop the lock now since we're not going to find
- * any more pages belonging to it anytime soon
- */
- vm_object_unlock(object);
- object = NULL;
-
- inactive_burst_count = 0;
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
- goto done_with_inactivepage;
+ try_failed = FALSE;
}
/*
* one of its logically adjacent fellows is
* targeted.
*/
- if (m->zero_fill) {
- queue_remove(&vm_page_queue_zf, m,
- vm_page_t, pageq);
- queue_enter(&vm_page_queue_zf, m,
- vm_page_t, pageq);
- last_page_zf = 1;
- vm_zf_iterator = vm_zf_iterator_count - 1;
- } else {
- queue_remove(&vm_page_queue_inactive, m,
- vm_page_t, pageq);
- queue_enter(&vm_page_queue_inactive, m,
- vm_page_t, pageq);
- last_page_zf = 0;
- vm_zf_iterator = 1;
- }
vm_pageout_inactive_avoid++;
-
- goto done_with_inactivepage;
+ goto requeue_page;
}
/*
- * Remove the page from the inactive list.
+ * Remove the page from its list.
*/
- if (m->zero_fill) {
- queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
+ if (m->speculative) {
+ remque(&m->pageq);
+ page_prev_state = PAGE_STATE_SPECULATIVE;
+ m->speculative = FALSE;
+ vm_page_speculative_count--;
+ } else if (m->throttled) {
+ queue_remove(&vm_page_queue_throttled, m, vm_page_t, pageq);
+ page_prev_state = PAGE_STATE_THROTTLED;
+ m->throttled = FALSE;
+ vm_page_throttled_count--;
} else {
- queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+ if (m->zero_fill) {
+ queue_remove(&vm_page_queue_zf, m, vm_page_t, pageq);
+ page_prev_state = PAGE_STATE_ZEROFILL;
+ vm_zf_queue_count--;
+ } else {
+ page_prev_state = PAGE_STATE_INACTIVE;
+ queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq);
+ }
+ m->inactive = FALSE;
+ if (!m->fictitious)
+ vm_page_inactive_count--;
+ vm_purgeable_q_advance_all();
}
+
m->pageq.next = NULL;
m->pageq.prev = NULL;
- m->inactive = FALSE;
- if (!m->fictitious)
- vm_page_inactive_count--;
- if (m->busy || !object->alive) {
+ if ( !m->fictitious && catch_up_count)
+ catch_up_count--;
+
+ /*
+ * ENCRYPTED SWAP:
+ * if this page has already been picked up as part of a
+ * page-out cluster, it will be busy because it is being
+ * encrypted (see vm_object_upl_request()). But we still
+ * want to demote it from "clean-in-place" (aka "adjacent")
+ * to "clean-and-free" (aka "target"), so let's ignore its
+ * "busy" bit here and proceed to check for "cleaning" a
+ * little bit below...
+ */
+ if ( !m->encrypted_cleaning && (m->busy || !object->alive)) {
/*
* Somebody is already playing with this page.
* Leave it off the pageout queues.
+ *
*/
vm_pageout_inactive_busy++;
vm_pageout_scan_inactive_throttle_success++;
vm_pageout_deadlock_target--;
}
+
+ DTRACE_VM2(dfree, int, 1, (uint64_t *), NULL);
+
+ if (object->internal) {
+ DTRACE_VM2(anonfree, int, 1, (uint64_t *), NULL);
+ } else {
+ DTRACE_VM2(fsfree, int, 1, (uint64_t *), NULL);
+ }
+ vm_page_free_prepare_queues(m);
+
+ /*
+ * remove page from object here since we're already
+ * behind the object lock... defer the rest of the work
+ * we'd normally do in vm_page_free_prepare_object
+ * until 'vm_page_free_list' is called
+ */
if (m->tabled)
- vm_page_remove(m); /* clears tabled, object, offset */
- if (m->absent)
- vm_object_absent_release(object);
+ vm_page_remove(m, TRUE);
assert(m->pageq.next == NULL &&
m->pageq.prev == NULL);
inactive_burst_count = 0;
+ if(page_prev_state != PAGE_STATE_SPECULATIVE) {
+ vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
+ page_prev_state = 0;
+ }
+
goto done_with_inactivepage;
}
goto done_with_inactivepage;
}
+ /*
+ * If the object is empty, the page must be reclaimed even
+ * if dirty or used.
+ * If the page belongs to a volatile object, we stick it back
+ * on.
+ */
+ if (object->copy == VM_OBJECT_NULL) {
+ if (object->purgable == VM_PURGABLE_EMPTY) {
+ m->busy = TRUE;
+ if (m->pmapped == TRUE) {
+ /* unmap the page */
+ refmod_state = pmap_disconnect(m->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED) {
+ m->dirty = TRUE;
+ }
+ }
+ if (m->dirty || m->precious) {
+ /* we saved the cost of cleaning this page ! */
+ vm_page_purged_count++;
+ }
+ goto reclaim_page;
+ }
+ if (object->purgable == VM_PURGABLE_VOLATILE) {
+ /* if it's wired, we can't put it on our queue */
+ assert(!VM_PAGE_WIRED(m));
+ /* just stick it back on! */
+ goto reactivate_page;
+ }
+ }
+
/*
* If it's being used, reactivate.
* (Fictitious pages are either busy or absent.)
+ * First, update the reference and dirty bits
+ * to make sure the page is unreferenced.
*/
- if ( (!m->reference) ) {
+ refmod_state = -1;
+
+ if (m->reference == FALSE && m->pmapped == TRUE) {
refmod_state = pmap_get_refmod(m->phys_page);
if (refmod_state & VM_MEM_REFERENCED)
if (refmod_state & VM_MEM_MODIFIED)
m->dirty = TRUE;
}
- if (m->reference) {
-was_referenced:
- vm_page_activate(m);
- VM_STAT(reactivations++);
- vm_pageout_inactive_used++;
- last_page_zf = 0;
- inactive_burst_count = 0;
+ if (m->reference || m->dirty) {
+ /* deal with a rogue "reusable" page */
+ VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m);
+ }
- goto done_with_inactivepage;
+ if (m->reference && !m->no_cache) {
+ /*
+ * The page we pulled off the inactive list has
+ * been referenced. It is possible for other
+ * processors to be touching pages faster than we
+ * can clear the referenced bit and traverse the
+ * inactive queue, so we limit the number of
+ * reactivations.
+ */
+ if (++reactivated_this_call >= reactivate_limit) {
+ vm_pageout_reactivation_limit_exceeded++;
+ } else if (catch_up_count) {
+ vm_pageout_catch_ups++;
+ } else if (++inactive_reclaim_run >= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM) {
+ vm_pageout_inactive_force_reclaim++;
+ } else {
+ uint32_t isinuse;
+reactivate_page:
+ if ( !object->internal && object->pager != MEMORY_OBJECT_NULL &&
+ vnode_pager_get_isinuse(object->pager, &isinuse) == KERN_SUCCESS && !isinuse) {
+ /*
+ * no explict mappings of this object exist
+ * and it's not open via the filesystem
+ */
+ vm_page_deactivate(m);
+ vm_pageout_inactive_deactivated++;
+ } else {
+ /*
+ * The page was/is being used, so put back on active list.
+ */
+ vm_page_activate(m);
+ VM_STAT_INCR(reactivations);
+ }
+ vm_pageout_inactive_used++;
+ inactive_burst_count = 0;
+
+ goto done_with_inactivepage;
+ }
+ /*
+ * Make sure we call pmap_get_refmod() if it
+ * wasn't already called just above, to update
+ * the dirty bit.
+ */
+ if ((refmod_state == -1) && !m->dirty && m->pmapped) {
+ refmod_state = pmap_get_refmod(m->phys_page);
+ if (refmod_state & VM_MEM_MODIFIED)
+ m->dirty = TRUE;
+ }
+ forced_reclaim = TRUE;
+ } else {
+ forced_reclaim = FALSE;
}
XPR(XPR_VM_PAGEOUT,
"vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
- (integer_t)object, (integer_t)m->offset, (integer_t)m, 0,0);
+ object, m->offset, m, 0,0);
/*
* we've got a candidate page to steal...
* m->dirty is up to date courtesy of the
* preceding check for m->reference... if
* we get here, then m->reference had to be
- * FALSE which means we did a pmap_get_refmod
- * and updated both m->reference and m->dirty
+ * FALSE (or possibly "reactivate_limit" was
+ * exceeded), but in either case we called
+ * pmap_get_refmod() and updated both
+ * m->reference and m->dirty
*
* if it's dirty or precious we need to
* see if the target queue is throtttled
* it if is, we need to skip over it by moving it back
* to the end of the inactive queue
*/
+
inactive_throttled = FALSE;
if (m->dirty || m->precious) {
if (object->internal) {
- if ((VM_PAGE_Q_THROTTLED(iq) || !IP_VALID(memory_manager_default)))
+ if (VM_PAGE_Q_THROTTLED(iq))
inactive_throttled = TRUE;
} else if (VM_PAGE_Q_THROTTLED(eq)) {
- inactive_throttled = TRUE;
+ inactive_throttled = TRUE;
}
}
if (inactive_throttled == TRUE) {
- if (m->zero_fill) {
- queue_enter(&vm_page_queue_zf, m,
+throttle_inactive:
+ if (!IP_VALID(memory_manager_default) &&
+ object->internal && m->dirty &&
+ (object->purgable == VM_PURGABLE_DENY ||
+ object->purgable == VM_PURGABLE_NONVOLATILE ||
+ object->purgable == VM_PURGABLE_VOLATILE)) {
+ queue_enter(&vm_page_queue_throttled, m,
vm_page_t, pageq);
+ m->throttled = TRUE;
+ vm_page_throttled_count++;
} else {
- queue_enter(&vm_page_queue_inactive, m,
- vm_page_t, pageq);
+ if (m->zero_fill) {
+ queue_enter(&vm_page_queue_zf, m,
+ vm_page_t, pageq);
+ vm_zf_queue_count++;
+ } else
+ queue_enter(&vm_page_queue_inactive, m,
+ vm_page_t, pageq);
+ m->inactive = TRUE;
+ if (!m->fictitious) {
+ vm_page_inactive_count++;
+ token_new_pagecount++;
+ }
}
- if (!m->fictitious)
- vm_page_inactive_count++;
- m->inactive = TRUE;
-
vm_pageout_scan_inactive_throttled++;
-
goto done_with_inactivepage;
}
+
/*
* we've got a page that we can steal...
* eliminate all mappings and make sure
* since we already set m->busy = TRUE, before
* going off to reactivate it
*
- * if we don't need the pmap_disconnect, then
- * m->dirty is up to date courtesy of the
- * earlier check for m->reference... if
- * we get here, then m->reference had to be
- * FALSE which means we did a pmap_get_refmod
- * and updated both m->reference and m->dirty...
+ * Note that if 'pmapped' is FALSE then the page is not
+ * and has not been in any map, so there is no point calling
+ * pmap_disconnect(). m->dirty and/or m->reference could
+ * have been set in anticipation of likely usage of the page.
*/
- if (m->no_isync == FALSE) {
+ if (m->pmapped == TRUE) {
refmod_state = pmap_disconnect(m->phys_page);
if (refmod_state & VM_MEM_MODIFIED)
m->dirty = TRUE;
if (refmod_state & VM_MEM_REFERENCED) {
- m->reference = TRUE;
-
- PAGE_WAKEUP_DONE(m);
- goto was_referenced;
+
+ /* If m->reference is already set, this page must have
+ * already failed the reactivate_limit test, so don't
+ * bump the counts twice.
+ */
+ if ( ! m->reference ) {
+ m->reference = TRUE;
+ if (forced_reclaim ||
+ ++reactivated_this_call >= reactivate_limit)
+ vm_pageout_reactivation_limit_exceeded++;
+ else {
+ PAGE_WAKEUP_DONE(m);
+ goto reactivate_page;
+ }
+ }
}
}
+ /*
+ * reset our count of pages that have been reclaimed
+ * since the last page was 'stolen'
+ */
+ inactive_reclaim_run = 0;
+
/*
* If it's clean and not precious, we can free the page.
*/
if (!m->dirty && !m->precious) {
+ if (m->zero_fill)
+ vm_pageout_inactive_zf++;
vm_pageout_inactive_clean++;
+
goto reclaim_page;
}
+
+ /*
+ * The page may have been dirtied since the last check
+ * for a throttled target queue (which may have been skipped
+ * if the page was clean then). With the dirty page
+ * disconnected here, we can make one final check.
+ */
+ {
+ boolean_t disconnect_throttled = FALSE;
+ if (object->internal) {
+ if (VM_PAGE_Q_THROTTLED(iq))
+ disconnect_throttled = TRUE;
+ } else if (VM_PAGE_Q_THROTTLED(eq)) {
+ disconnect_throttled = TRUE;
+ }
+
+ if (disconnect_throttled == TRUE) {
+ PAGE_WAKEUP_DONE(m);
+ goto throttle_inactive;
+ }
+ }
+
+ vm_pageout_stats[vm_pageout_stat_now].reclaimed++;
+
vm_pageout_cluster(m);
+ if (m->zero_fill)
+ vm_pageout_inactive_zf++;
vm_pageout_inactive_dirty++;
inactive_burst_count = 0;
done_with_inactivepage:
- if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
+ if (delayed_unlock++ > VM_PAGEOUT_DELAYED_UNLOCK_LIMIT || try_failed == TRUE) {
if (object != NULL) {
+ vm_pageout_scan_wants_object = VM_OBJECT_NULL;
vm_object_unlock(object);
object = NULL;
}
if (local_freeq) {
- vm_page_free_list(local_freeq);
+ vm_page_unlock_queues();
+ vm_page_free_list(local_freeq, TRUE);
- local_freeq = 0;
+ local_freeq = NULL;
local_freed = 0;
- }
- delayed_unlock = 0;
- vm_page_unlock_queues();
- mutex_pause();
+ vm_page_lock_queues();
+ } else
+ lck_mtx_yield(&vm_page_queue_lock);
+
+ delayed_unlock = 1;
}
/*
* back to top of pageout scan loop
vm_page_free_min = vm_page_free_reserved +
VM_PAGE_FREE_MIN(free_after_reserve);
+ if (vm_page_free_min > VM_PAGE_FREE_MIN_LIMIT)
+ vm_page_free_min = VM_PAGE_FREE_MIN_LIMIT;
+
vm_page_free_target = vm_page_free_reserved +
VM_PAGE_FREE_TARGET(free_after_reserve);
+ if (vm_page_free_target > VM_PAGE_FREE_TARGET_LIMIT)
+ vm_page_free_target = VM_PAGE_FREE_TARGET_LIMIT;
+
if (vm_page_free_target < vm_page_free_min + 5)
vm_page_free_target = vm_page_free_min + 5;
+
+ vm_page_throttle_limit = vm_page_free_target - (vm_page_free_target / 3);
+ vm_page_creation_throttle = vm_page_free_target / 2;
}
/*
void
vm_pageout_continue(void)
{
+ DTRACE_VM2(pgrrun, int, 1, (uint64_t *), NULL);
vm_pageout_scan_event_counter++;
vm_pageout_scan();
/* we hold vm_page_queue_free_lock now */
assert(vm_page_free_wanted == 0);
+ assert(vm_page_free_wanted_privileged == 0);
assert_wait((event_t) &vm_page_free_wanted, THREAD_UNINT);
- mutex_unlock(&vm_page_queue_free_lock);
+ lck_mtx_unlock(&vm_page_queue_free_lock);
counter(c_vm_pageout_block++);
thread_block((thread_continue_t)vm_pageout_continue);
}
-/*
- * must be called with the
- * queues and object locks held
- */
-static void
-vm_pageout_queue_steal(vm_page_t m)
-{
- struct vm_pageout_queue *q;
-
- if (m->object->internal == TRUE)
- q = &vm_pageout_queue_internal;
- else
- q = &vm_pageout_queue_external;
-
- m->laundry = FALSE;
- m->pageout_queue = FALSE;
- queue_remove(&q->pgo_pending, m, vm_page_t, pageq);
-
- m->pageq.next = NULL;
- m->pageq.prev = NULL;
-
- vm_object_paging_end(m->object);
-
- q->pgo_laundry--;
-}
-
-
#ifdef FAKE_DEADLOCK
#define FAKE_COUNT 5000
{
vm_page_t m = NULL;
vm_object_t object;
- boolean_t need_wakeup;
+ memory_object_t pager;
+ thread_t self = current_thread();
- vm_page_lock_queues();
+ if ((vm_pageout_internal_iothread != THREAD_NULL)
+ && (self == vm_pageout_external_iothread )
+ && (self->options & TH_OPT_VMPRIV))
+ self->options &= ~TH_OPT_VMPRIV;
+
+ vm_page_lockspin_queues();
while ( !queue_empty(&q->pgo_pending) ) {
q->pgo_busy = TRUE;
queue_remove_first(&q->pgo_pending, m, vm_page_t, pageq);
+ VM_PAGE_CHECK(m);
m->pageout_queue = FALSE;
- vm_page_unlock_queues();
-
m->pageq.next = NULL;
m->pageq.prev = NULL;
+ vm_page_unlock_queues();
+
#ifdef FAKE_DEADLOCK
if (q == &vm_pageout_queue_internal) {
vm_offset_t addr;
#endif
object = m->object;
+ vm_object_lock(object);
+
if (!object->pager_initialized) {
- vm_object_lock(object);
/*
* If there is no memory object for the page, create
* Should only happen if there is no
* default pager.
*/
- m->list_req_pending = FALSE;
- m->cleaning = FALSE;
- m->pageout = FALSE;
- vm_page_unwire(m);
-
- vm_pageout_throttle_up(m);
+ vm_page_lockspin_queues();
- vm_page_lock_queues();
+ vm_pageout_queue_steal(m, TRUE);
vm_pageout_dirty_no_pager++;
vm_page_activate(m);
+
vm_page_unlock_queues();
/*
vm_object_paging_end(object);
vm_object_unlock(object);
- vm_page_lock_queues();
+ vm_page_lockspin_queues();
continue;
- } else if (object->pager == MEMORY_OBJECT_NULL) {
- /*
- * This pager has been destroyed by either
- * memory_object_destroy or vm_object_destroy, and
- * so there is nowhere for the page to go.
+ }
+ }
+ pager = object->pager;
+ if (pager == MEMORY_OBJECT_NULL) {
+ /*
+ * This pager has been destroyed by either
+ * memory_object_destroy or vm_object_destroy, and
+ * so there is nowhere for the page to go.
+ */
+ if (m->pageout) {
+ /*
* Just free the page... VM_PAGE_FREE takes
* care of cleaning up all the state...
* including doing the vm_pageout_throttle_up
*/
- VM_PAGE_FREE(m);
+ VM_PAGE_FREE(m);
+ } else {
+ vm_page_lockspin_queues();
- vm_object_paging_end(object);
- vm_object_unlock(object);
+ vm_pageout_queue_steal(m, TRUE);
+ vm_page_activate(m);
+
+ vm_page_unlock_queues();
- vm_page_lock_queues();
- continue;
+ /*
+ * And we are done with it.
+ */
+ PAGE_WAKEUP_DONE(m);
}
+ vm_object_paging_end(object);
vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ continue;
}
+ VM_PAGE_CHECK(m);
+ vm_object_unlock(object);
/*
* we expect the paging_in_progress reference to have
* already been taken on the object before it was added
* Send the data to the pager.
* any pageout clustering happens there
*/
- memory_object_data_return(object->pager,
+ memory_object_data_return(pager,
m->offset + object->paging_offset,
PAGE_SIZE,
NULL,
vm_object_paging_end(object);
vm_object_unlock(object);
- vm_page_lock_queues();
+ vm_page_lockspin_queues();
}
assert_wait((event_t) q, THREAD_UNINT);
-
if (q->pgo_throttled == TRUE && !VM_PAGE_Q_THROTTLED(q)) {
q->pgo_throttled = FALSE;
- need_wakeup = TRUE;
- } else
- need_wakeup = FALSE;
-
+ thread_wakeup((event_t) &q->pgo_laundry);
+ }
+ if (q->pgo_draining == TRUE && q->pgo_laundry == 0) {
+ q->pgo_draining = FALSE;
+ thread_wakeup((event_t) (&q->pgo_laundry+1));
+ }
q->pgo_busy = FALSE;
q->pgo_idle = TRUE;
vm_page_unlock_queues();
- if (need_wakeup == TRUE)
- thread_wakeup((event_t) &q->pgo_laundry);
-
thread_block_parameter((thread_continue_t)vm_pageout_iothread_continue, (void *) &q->pgo_pending);
/*NOTREACHED*/
}
static void
vm_pageout_iothread_external(void)
{
+ thread_t self = current_thread();
+
+ self->options |= TH_OPT_VMPRIV;
vm_pageout_iothread_continue(&vm_pageout_queue_external);
/*NOTREACHED*/
/*NOTREACHED*/
}
+kern_return_t
+vm_set_buffer_cleanup_callout(boolean_t (*func)(int))
+{
+ if (OSCompareAndSwapPtr(NULL, func, (void * volatile *) &consider_buffer_cache_collect)) {
+ return KERN_SUCCESS;
+ } else {
+ return KERN_FAILURE; /* Already set */
+ }
+}
+
static void
vm_pageout_garbage_collect(int collect)
{
if (collect) {
+ boolean_t buf_large_zfree = FALSE;
stack_collect();
/*
* might return memory to zones.
*/
consider_machine_collect();
- consider_zone_gc();
+ if (consider_buffer_cache_collect != NULL) {
+ buf_large_zfree = (*consider_buffer_cache_collect)(0);
+ }
+ consider_zone_gc(buf_large_zfree);
consider_machine_adjust();
}
self->priority = BASEPRI_PREEMPT - 1;
set_sched_pri(self, self->priority);
thread_unlock(self);
+
+ if (!self->reserved_stack)
+ self->reserved_stack = self->kernel_stack;
+
splx(s);
/*
task_unlock(kernel_task);
vm_page_free_count_init = vm_page_free_count;
- vm_zf_iterator = 0;
+
/*
* even if we've already called vm_page_free_reserve
* call it again here to insure that the targets are
vm_pageout_queue_external.pgo_idle = FALSE;
vm_pageout_queue_external.pgo_busy = FALSE;
vm_pageout_queue_external.pgo_throttled = FALSE;
+ vm_pageout_queue_external.pgo_draining = FALSE;
queue_init(&vm_pageout_queue_internal.pgo_pending);
- vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+ vm_pageout_queue_internal.pgo_maxlaundry = 0;
vm_pageout_queue_internal.pgo_laundry = 0;
vm_pageout_queue_internal.pgo_idle = FALSE;
vm_pageout_queue_internal.pgo_busy = FALSE;
vm_pageout_queue_internal.pgo_throttled = FALSE;
+ vm_pageout_queue_internal.pgo_draining = FALSE;
- result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &thread);
- if (result != KERN_SUCCESS)
- panic("vm_pageout_iothread_internal: create failed");
-
- thread_deallocate(thread);
-
+ /* internal pageout thread started when default pager registered first time */
+ /* external pageout and garbage collection threads started here */
- result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL, BASEPRI_PREEMPT - 1, &thread);
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_external, NULL,
+ BASEPRI_PREEMPT - 1,
+ &vm_pageout_external_iothread);
if (result != KERN_SUCCESS)
panic("vm_pageout_iothread_external: create failed");
- thread_deallocate(thread);
-
+ thread_deallocate(vm_pageout_external_iothread);
- result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL, BASEPRI_PREEMPT - 2, &thread);
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_garbage_collect, NULL,
+ MINPRI_KERNEL,
+ &thread);
if (result != KERN_SUCCESS)
panic("vm_pageout_garbage_collect: create failed");
thread_deallocate(thread);
+ vm_object_reaper_init();
+
vm_pageout_continue();
+
+ /*
+ * Unreached code!
+ *
+ * The vm_pageout_continue() call above never returns, so the code below is never
+ * executed. We take advantage of this to declare several DTrace VM related probe
+ * points that our kernel doesn't have an analog for. These are probe points that
+ * exist in Solaris and are in the DTrace documentation, so people may have written
+ * scripts that use them. Declaring the probe points here means their scripts will
+ * compile and execute which we want for portability of the scripts, but since this
+ * section of code is never reached, the probe points will simply never fire. Yes,
+ * this is basically a hack. The problem is the DTrace probe points were chosen with
+ * Solaris specific VM events in mind, not portability to different VM implementations.
+ */
+
+ DTRACE_VM2(execfree, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(execpgout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(pgswapout, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapin, int, 1, (uint64_t *), NULL);
+ DTRACE_VM2(swapout, int, 1, (uint64_t *), NULL);
/*NOTREACHED*/
}
+kern_return_t
+vm_pageout_internal_start(void)
+{
+ kern_return_t result;
+
+ vm_pageout_queue_internal.pgo_maxlaundry = VM_PAGE_LAUNDRY_MAX;
+ result = kernel_thread_start_priority((thread_continue_t)vm_pageout_iothread_internal, NULL, BASEPRI_PREEMPT - 1, &vm_pageout_internal_iothread);
+ if (result == KERN_SUCCESS)
+ thread_deallocate(vm_pageout_internal_iothread);
+ return result;
+}
+
+
+/*
+ * when marshalling pages into a UPL and subsequently committing
+ * or aborting them, it is necessary to hold
+ * the vm_page_queue_lock (a hot global lock) for certain operations
+ * on the page... however, the majority of the work can be done
+ * while merely holding the object lock... in fact there are certain
+ * collections of pages that don't require any work brokered by the
+ * vm_page_queue_lock... to mitigate the time spent behind the global
+ * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
+ * while doing all of the work that doesn't require the vm_page_queue_lock...
+ * then call dw_do_work to acquire the vm_page_queue_lock and do the
+ * necessary work for each page... we will grab the busy bit on the page
+ * if it's not already held so that dw_do_work can drop the object lock
+ * if it can't immediately take the vm_page_queue_lock in order to compete
+ * for the locks in the same order that vm_pageout_scan takes them.
+ * the operation names are modeled after the names of the routines that
+ * need to be called in order to make the changes very obvious in the
+ * original loop
+ */
+
+#define DELAYED_WORK_LIMIT 32
+
+#define DW_vm_page_unwire 0x01
+#define DW_vm_page_wire 0x02
+#define DW_vm_page_free 0x04
+#define DW_vm_page_activate 0x08
+#define DW_vm_page_deactivate_internal 0x10
+#define DW_vm_page_speculate 0x20
+#define DW_vm_page_lru 0x40
+#define DW_vm_pageout_throttle_up 0x80
+#define DW_PAGE_WAKEUP 0x100
+#define DW_clear_busy 0x200
+#define DW_clear_reference 0x400
+#define DW_set_reference 0x800
+
+struct dw {
+ vm_page_t dw_m;
+ int dw_mask;
+};
+
+
+static void dw_do_work(vm_object_t object, struct dw *dwp, int dw_count);
+
+
static upl_t
-upl_create(
- int flags,
- upl_size_t size)
+upl_create(int type, int flags, upl_size_t size)
{
upl_t upl;
- int page_field_size; /* bit field in word size buf */
+ int page_field_size = 0;
+ int upl_flags = 0;
+ int upl_size = sizeof(struct upl);
- page_field_size = 0;
- if (flags & UPL_CREATE_LITE) {
- page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
+ size = round_page_32(size);
+
+ if (type & UPL_CREATE_LITE) {
+ page_field_size = (atop(size) + 7) >> 3;
page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
+
+ upl_flags |= UPL_LITE;
}
- if(flags & UPL_CREATE_INTERNAL) {
- upl = (upl_t)kalloc(sizeof(struct upl)
- + (sizeof(struct upl_page_info)*(size/PAGE_SIZE))
- + page_field_size);
- } else {
- upl = (upl_t)kalloc(sizeof(struct upl) + page_field_size);
+ if (type & UPL_CREATE_INTERNAL) {
+ upl_size += (int) sizeof(struct upl_page_info) * atop(size);
+
+ upl_flags |= UPL_INTERNAL;
}
- upl->flags = 0;
+ upl = (upl_t)kalloc(upl_size + page_field_size);
+
+ if (page_field_size)
+ bzero((char *)upl + upl_size, page_field_size);
+
+ upl->flags = upl_flags | flags;
upl->src_object = NULL;
upl->kaddr = (vm_offset_t)0;
upl->size = 0;
upl->ref_count = 1;
upl->highest_page = 0;
upl_lock_init(upl);
-#ifdef UPL_DEBUG
+ upl->vector_upl = NULL;
+#if UPL_DEBUG
upl->ubc_alias1 = 0;
upl->ubc_alias2 = 0;
+
+ upl->upl_creator = current_thread();
+ upl->upl_state = 0;
+ upl->upl_commit_index = 0;
+ bzero(&upl->upl_commit_records[0], sizeof(upl->upl_commit_records));
+
+ (void) OSBacktrace(&upl->upl_create_retaddr[0], UPL_DEBUG_STACK_FRAMES);
#endif /* UPL_DEBUG */
+
return(upl);
}
static void
-upl_destroy(
- upl_t upl)
+upl_destroy(upl_t upl)
{
int page_field_size; /* bit field in word size buf */
+ int size;
-#ifdef UPL_DEBUG
+#if UPL_DEBUG
{
- upl_t upl_ele;
vm_object_t object;
- if (upl->map_object->pageout) {
+
+ if (upl->flags & UPL_SHADOWED) {
object = upl->map_object->shadow;
} else {
object = upl->map_object;
}
vm_object_lock(object);
- queue_iterate(&object->uplq, upl_ele, upl_t, uplq) {
- if(upl_ele == upl) {
- queue_remove(&object->uplq,
- upl_ele, upl_t, uplq);
- break;
- }
- }
+ queue_remove(&object->uplq, upl, upl_t, uplq);
vm_object_unlock(object);
}
#endif /* UPL_DEBUG */
- /* drop a reference on the map_object whether or */
- /* not a pageout object is inserted */
- if(upl->map_object->pageout)
+ /*
+ * drop a reference on the map_object whether or
+ * not a pageout object is inserted
+ */
+ if (upl->flags & UPL_SHADOWED)
vm_object_deallocate(upl->map_object);
+ if (upl->flags & UPL_DEVICE_MEMORY)
+ size = PAGE_SIZE;
+ else
+ size = upl->size;
page_field_size = 0;
+
if (upl->flags & UPL_LITE) {
- page_field_size = ((upl->size/PAGE_SIZE) + 7) >> 3;
+ page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
page_field_size = (page_field_size + 3) & 0xFFFFFFFC;
}
- if(upl->flags & UPL_INTERNAL) {
+ upl_lock_destroy(upl);
+ upl->vector_upl = (vector_upl_t) 0xfeedbeef;
+ if (upl->flags & UPL_INTERNAL) {
kfree(upl,
sizeof(struct upl) +
- (sizeof(struct upl_page_info) * (upl->size/PAGE_SIZE))
+ (sizeof(struct upl_page_info) * (size/PAGE_SIZE))
+ page_field_size);
} else {
kfree(upl, sizeof(struct upl) + page_field_size);
void uc_upl_dealloc(upl_t upl);
__private_extern__ void
-uc_upl_dealloc(
- upl_t upl)
+uc_upl_dealloc(upl_t upl)
{
- upl->ref_count -= 1;
- if(upl->ref_count == 0) {
+ if (--upl->ref_count == 0)
upl_destroy(upl);
- }
}
void
-upl_deallocate(
- upl_t upl)
+upl_deallocate(upl_t upl)
{
-
- upl->ref_count -= 1;
- if(upl->ref_count == 0) {
+ if (--upl->ref_count == 0) {
+ if(vector_upl_is_valid(upl))
+ vector_upl_deallocate(upl);
upl_destroy(upl);
}
}
-/*
+#if DEVELOPMENT || DEBUG
+/*/*
* Statistics about UPL enforcement of copy-on-write obligations.
*/
unsigned long upl_cow = 0;
unsigned long upl_cow_again = 0;
-unsigned long upl_cow_contiguous = 0;
unsigned long upl_cow_pages = 0;
unsigned long upl_cow_again_pages = 0;
-unsigned long upl_cow_contiguous_pages = 0;
+
+unsigned long iopl_cow = 0;
+unsigned long iopl_cow_pages = 0;
+#endif
/*
* Routine: vm_object_upl_request
int cntrl_flags)
{
vm_page_t dst_page = VM_PAGE_NULL;
- vm_object_offset_t dst_offset = offset;
- upl_size_t xfer_size = size;
- boolean_t do_m_lock = FALSE;
+ vm_object_offset_t dst_offset;
+ upl_size_t xfer_size;
boolean_t dirty;
boolean_t hw_dirty;
upl_t upl = NULL;
boolean_t encountered_lrp = FALSE;
#endif
vm_page_t alias_page = NULL;
- int page_ticket;
- int refmod_state;
+ int refmod_state = 0;
wpl_array_t lite_list = NULL;
vm_object_t last_copy_object;
-
+ struct dw dw_array[DELAYED_WORK_LIMIT];
+ struct dw *dwp;
+ int dw_count;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
/*
*/
return KERN_INVALID_VALUE;
}
+ if ( (!object->internal) && (object->paging_offset != 0) )
+ panic("vm_object_upl_request: external object with non-zero paging offset\n");
+ if (object->phys_contiguous)
+ panic("vm_object_upl_request: contiguous object specified\n");
- page_ticket = (cntrl_flags & UPL_PAGE_TICKET_MASK)
- >> UPL_PAGE_TICKET_SHIFT;
-
- if(((size/PAGE_SIZE) > MAX_UPL_TRANSFER) && !object->phys_contiguous) {
- size = MAX_UPL_TRANSFER * PAGE_SIZE;
- }
- if(cntrl_flags & UPL_SET_INTERNAL)
- if(page_list_count != NULL)
- *page_list_count = MAX_UPL_TRANSFER;
+ if ((size / PAGE_SIZE) > MAX_UPL_SIZE)
+ size = MAX_UPL_SIZE * PAGE_SIZE;
- if((!object->internal) && (object->paging_offset != 0))
- panic("vm_object_upl_request: external object with non-zero paging offset\n");
+ if ( (cntrl_flags & UPL_SET_INTERNAL) && page_list_count != NULL)
+ *page_list_count = MAX_UPL_SIZE;
- if((cntrl_flags & UPL_COPYOUT_FROM) && (upl_ptr == NULL)) {
- return KERN_SUCCESS;
- }
+ if (cntrl_flags & UPL_SET_INTERNAL) {
+ if (cntrl_flags & UPL_SET_LITE) {
- vm_object_lock(object);
- vm_object_paging_begin(object);
- vm_object_unlock(object);
+ upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, 0, size);
- if(upl_ptr) {
- if(cntrl_flags & UPL_SET_INTERNAL) {
- if(cntrl_flags & UPL_SET_LITE) {
- uintptr_t page_field_size;
- upl = upl_create(
- UPL_CREATE_INTERNAL | UPL_CREATE_LITE,
- size);
- user_page_list = (upl_page_info_t *)
- (((uintptr_t)upl) + sizeof(struct upl));
- lite_list = (wpl_array_t)
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t)
(((uintptr_t)user_page_list) +
- ((size/PAGE_SIZE) *
- sizeof(upl_page_info_t)));
- page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
- page_field_size =
- (page_field_size + 3) & 0xFFFFFFFC;
- bzero((char *)lite_list, page_field_size);
- upl->flags =
- UPL_LITE | UPL_INTERNAL;
- } else {
- upl = upl_create(UPL_CREATE_INTERNAL, size);
- user_page_list = (upl_page_info_t *)
- (((uintptr_t)upl) + sizeof(struct upl));
- upl->flags = UPL_INTERNAL;
+ ((size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+ if (size == 0) {
+ user_page_list = NULL;
+ lite_list = NULL;
}
} else {
- if(cntrl_flags & UPL_SET_LITE) {
- uintptr_t page_field_size;
- upl = upl_create(UPL_CREATE_LITE, size);
- lite_list = (wpl_array_t)
- (((uintptr_t)upl) + sizeof(struct upl));
- page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
- page_field_size =
- (page_field_size + 3) & 0xFFFFFFFC;
- bzero((char *)lite_list, page_field_size);
- upl->flags = UPL_LITE;
- } else {
- upl = upl_create(UPL_CREATE_EXTERNAL, size);
- upl->flags = 0;
- }
- }
-
- if (object->phys_contiguous) {
- if ((cntrl_flags & UPL_WILL_MODIFY) &&
- object->copy != VM_OBJECT_NULL) {
- /* Honor copy-on-write obligations */
-
- /*
- * XXX FBDP
- * We could still have a race...
- * A is here building the UPL for a write().
- * A pushes the pages to the current copy
- * object.
- * A returns the UPL to the caller.
- * B comes along and establishes another
- * private mapping on this object, inserting
- * a new copy object between the original
- * object and the old copy object.
- * B reads a page and gets the original contents
- * from the original object.
- * A modifies the page in the original object.
- * B reads the page again and sees A's changes,
- * which is wrong...
- *
- * The problem is that the pages are not
- * marked "busy" in the original object, so
- * nothing prevents B from reading it before
- * before A's changes are completed.
- *
- * The "paging_in_progress" might protect us
- * from the insertion of a new copy object
- * though... To be verified.
- */
- vm_object_lock_request(object,
- offset,
- size,
- FALSE,
- MEMORY_OBJECT_COPY_SYNC,
- VM_PROT_NO_CHANGE);
- upl_cow_contiguous++;
- upl_cow_contiguous_pages += size >> PAGE_SHIFT;
- }
-
- upl->map_object = object;
- /* don't need any shadow mappings for this one */
- /* since it is already I/O memory */
- upl->flags |= UPL_DEVICE_MEMORY;
-
-
- /* paging_in_progress protects paging_offset */
- upl->offset = offset + object->paging_offset;
- upl->size = size;
- *upl_ptr = upl;
- if(user_page_list) {
- user_page_list[0].phys_addr =
- (offset + object->shadow_offset)>>PAGE_SHIFT;
- user_page_list[0].device = TRUE;
- }
- upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
+ upl = upl_create(UPL_CREATE_INTERNAL, 0, size);
- if(page_list_count != NULL) {
- if (upl->flags & UPL_INTERNAL) {
- *page_list_count = 0;
- } else {
- *page_list_count = 1;
- }
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ if (size == 0) {
+ user_page_list = NULL;
}
-
- return KERN_SUCCESS;
}
+ } else {
+ if (cntrl_flags & UPL_SET_LITE) {
- if(user_page_list)
- user_page_list[0].device = FALSE;
+ upl = upl_create(UPL_CREATE_EXTERNAL | UPL_CREATE_LITE, 0, size);
- if(cntrl_flags & UPL_SET_LITE) {
- upl->map_object = object;
+ lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+ if (size == 0) {
+ lite_list = NULL;
+ }
} else {
- upl->map_object = vm_object_allocate(size);
- /*
- * No neeed to lock the new object: nobody else knows
- * about it yet, so it's all ours so far.
- */
- upl->map_object->shadow = object;
- upl->map_object->pageout = TRUE;
- upl->map_object->can_persist = FALSE;
- upl->map_object->copy_strategy =
- MEMORY_OBJECT_COPY_NONE;
- upl->map_object->shadow_offset = offset;
- upl->map_object->wimg_bits = object->wimg_bits;
+ upl = upl_create(UPL_CREATE_EXTERNAL, 0, size);
}
-
}
- if (!(cntrl_flags & UPL_SET_LITE)) {
+ *upl_ptr = upl;
+
+ if (user_page_list)
+ user_page_list[0].device = FALSE;
+
+ if (cntrl_flags & UPL_SET_LITE) {
+ upl->map_object = object;
+ } else {
+ upl->map_object = vm_object_allocate(size);
+ /*
+ * No neeed to lock the new object: nobody else knows
+ * about it yet, so it's all ours so far.
+ */
+ upl->map_object->shadow = object;
+ upl->map_object->pageout = TRUE;
+ upl->map_object->can_persist = FALSE;
+ upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ upl->map_object->shadow_offset = offset;
+ upl->map_object->wimg_bits = object->wimg_bits;
+
VM_PAGE_GRAB_FICTITIOUS(alias_page);
- }
+ upl->flags |= UPL_SHADOWED;
+ }
/*
* ENCRYPTED SWAP:
* Just mark the UPL as "encrypted" here.
* in upl_encrypt(), when the caller has
* selected which pages need to go to swap.
*/
- if (cntrl_flags & UPL_ENCRYPT) {
+ if (cntrl_flags & UPL_ENCRYPT)
upl->flags |= UPL_ENCRYPTED;
- }
- if (cntrl_flags & UPL_FOR_PAGEOUT) {
+
+ if (cntrl_flags & UPL_FOR_PAGEOUT)
upl->flags |= UPL_PAGEOUT;
- }
+
vm_object_lock(object);
+ vm_object_activity_begin(object);
- /* we can lock in the paging_offset once paging_in_progress is set */
- if(upl_ptr) {
- upl->size = size;
- upl->offset = offset + object->paging_offset;
- *upl_ptr = upl;
-#ifdef UPL_DEBUG
- queue_enter(&object->uplq, upl, upl_t, uplq);
-#endif /* UPL_DEBUG */
- }
+ /*
+ * we can lock in the paging_offset once paging_in_progress is set
+ */
+ upl->size = size;
+ upl->offset = offset + object->paging_offset;
- if ((cntrl_flags & UPL_WILL_MODIFY) &&
- object->copy != VM_OBJECT_NULL) {
- /* Honor copy-on-write obligations */
+#if UPL_DEBUG
+ queue_enter(&object->uplq, upl, upl_t, uplq);
+#endif /* UPL_DEBUG */
+ if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != VM_OBJECT_NULL) {
/*
+ * Honor copy-on-write obligations
+ *
* The caller is gathering these pages and
* might modify their contents. We need to
* make sure that the copy object has its own
FALSE, /* should_return */
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
+#if DEVELOPMENT || DEBUG
upl_cow++;
upl_cow_pages += size >> PAGE_SHIFT;
-
+#endif
}
- /* remember which copy object we synchronized with */
+ /*
+ * remember which copy object we synchronized with
+ */
last_copy_object = object->copy;
-
entry = 0;
- if(cntrl_flags & UPL_COPYOUT_FROM) {
- upl->flags |= UPL_PAGE_SYNC_DONE;
- while (xfer_size) {
- if((alias_page == NULL) &&
- !(cntrl_flags & UPL_SET_LITE)) {
- vm_object_unlock(object);
- VM_PAGE_GRAB_FICTITIOUS(alias_page);
- vm_object_lock(object);
- }
+ xfer_size = size;
+ dst_offset = offset;
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+
+ while (xfer_size) {
+
+ dwp->dw_mask = 0;
+
+ if ((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
+ vm_object_unlock(object);
+ VM_PAGE_GRAB_FICTITIOUS(alias_page);
+ vm_object_lock(object);
+ }
+ if (cntrl_flags & UPL_COPYOUT_FROM) {
+ upl->flags |= UPL_PAGE_SYNC_DONE;
+
if ( ((dst_page = vm_page_lookup(object, dst_offset)) == VM_PAGE_NULL) ||
dst_page->fictitious ||
dst_page->absent ||
dst_page->error ||
- (dst_page->wire_count && !dst_page->pageout) ||
-
- ((!dst_page->inactive) && (cntrl_flags & UPL_FOR_PAGEOUT) &&
- (dst_page->page_ticket != page_ticket) &&
- ((dst_page->page_ticket+1) != page_ticket)) ) {
+ (VM_PAGE_WIRED(dst_page) && !dst_page->pageout && !dst_page->list_req_pending)) {
if (user_page_list)
user_page_list[entry].phys_addr = 0;
- } else {
+
+ goto try_next_page;
+ }
+ /*
+ * grab this up front...
+ * a high percentange of the time we're going to
+ * need the hardware modification state a bit later
+ * anyway... so we can eliminate an extra call into
+ * the pmap layer by grabbing it here and recording it
+ */
+ if (dst_page->pmapped)
+ refmod_state = pmap_get_refmod(dst_page->phys_page);
+ else
+ refmod_state = 0;
+
+ if ( (refmod_state & VM_MEM_REFERENCED) && dst_page->inactive ) {
/*
- * grab this up front...
- * a high percentange of the time we're going to
- * need the hardware modification state a bit later
- * anyway... so we can eliminate an extra call into
- * the pmap layer by grabbing it here and recording it
+ * page is on inactive list and referenced...
+ * reactivate it now... this gets it out of the
+ * way of vm_pageout_scan which would have to
+ * reactivate it upon tripping over it
*/
- refmod_state = pmap_get_refmod(dst_page->phys_page);
-
- if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
+ dwp->dw_mask |= DW_vm_page_activate;
+ }
+ if (cntrl_flags & UPL_RET_ONLY_DIRTY) {
+ /*
+ * we're only asking for DIRTY pages to be returned
+ */
+ if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
/*
- * we're only asking for DIRTY pages to be returned
- */
-
- if (dst_page->list_req_pending || !(cntrl_flags & UPL_FOR_PAGEOUT)) {
- /*
- * if we were the page stolen by vm_pageout_scan to be
- * cleaned (as opposed to a buddy being clustered in
- * or this request is not being driven by a PAGEOUT cluster
- * then we only need to check for the page being diry or
- * precious to decide whether to return it
- */
- if (dst_page->dirty || dst_page->precious ||
- (refmod_state & VM_MEM_MODIFIED)) {
- goto check_busy;
- }
- }
- /*
- * this is a request for a PAGEOUT cluster and this page
- * is merely along for the ride as a 'buddy'... not only
- * does it have to be dirty to be returned, but it also
- * can't have been referenced recently... note that we've
- * already filtered above based on whether this page is
- * currently on the inactive queue or it meets the page
- * ticket (generation count) check
+ * if we were the page stolen by vm_pageout_scan to be
+ * cleaned (as opposed to a buddy being clustered in
+ * or this request is not being driven by a PAGEOUT cluster
+ * then we only need to check for the page being dirty or
+ * precious to decide whether to return it
*/
- if ( !(refmod_state & VM_MEM_REFERENCED) &&
- ((refmod_state & VM_MEM_MODIFIED) ||
- dst_page->dirty || dst_page->precious) ) {
+ if (dst_page->dirty || dst_page->precious || (refmod_state & VM_MEM_MODIFIED))
goto check_busy;
- }
- /*
- * if we reach here, we're not to return
- * the page... go on to the next one
- */
- if (user_page_list)
- user_page_list[entry].phys_addr = 0;
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- continue;
+ goto dont_return;
}
-check_busy:
- if(dst_page->busy &&
- (!(dst_page->list_req_pending &&
- dst_page->pageout))) {
- if(cntrl_flags & UPL_NOBLOCK) {
- if(user_page_list) {
- user_page_list[entry].phys_addr = 0;
- }
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- continue;
- }
- /*
- * someone else is playing with the
- * page. We will have to wait.
- */
- PAGE_SLEEP(object, dst_page, THREAD_UNINT);
- continue;
+ /*
+ * this is a request for a PAGEOUT cluster and this page
+ * is merely along for the ride as a 'buddy'... not only
+ * does it have to be dirty to be returned, but it also
+ * can't have been referenced recently... note that we've
+ * already filtered above based on whether this page is
+ * currently on the inactive queue or it meets the page
+ * ticket (generation count) check
+ */
+ if ( (cntrl_flags & UPL_CLEAN_IN_PLACE || !(refmod_state & VM_MEM_REFERENCED)) &&
+ ((refmod_state & VM_MEM_MODIFIED) || dst_page->dirty || dst_page->precious) ) {
+ goto check_busy;
}
- /* Someone else already cleaning the page? */
- if((dst_page->cleaning || dst_page->absent ||
- dst_page->wire_count != 0) &&
- !dst_page->list_req_pending) {
- if(user_page_list) {
- user_page_list[entry].phys_addr = 0;
- }
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- continue;
+dont_return:
+ /*
+ * if we reach here, we're not to return
+ * the page... go on to the next one
+ */
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+check_busy:
+ if (dst_page->busy && (!(dst_page->list_req_pending && (dst_page->pageout || dst_page->cleaning)))) {
+ if (cntrl_flags & UPL_NOBLOCK) {
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
}
- /* eliminate all mappings from the */
- /* original object and its prodigy */
-
- vm_page_lock_queues();
+ /*
+ * someone else is playing with the
+ * page. We will have to wait.
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
+ /*
+ * Someone else already cleaning the page?
+ */
+ if ((dst_page->cleaning || dst_page->absent || VM_PAGE_WIRED(dst_page)) && !dst_page->list_req_pending) {
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ /*
+ * ENCRYPTED SWAP:
+ * The caller is gathering this page and might
+ * access its contents later on. Decrypt the
+ * page before adding it to the UPL, so that
+ * the caller never sees encrypted data.
+ */
+ if (! (cntrl_flags & UPL_ENCRYPT) && dst_page->encrypted) {
+ int was_busy;
+
+ /*
+ * save the current state of busy
+ * mark page as busy while decrypt
+ * is in progress since it will drop
+ * the object lock...
+ */
+ was_busy = dst_page->busy;
+ dst_page->busy = TRUE;
+
+ vm_page_decrypt(dst_page, 0);
+ vm_page_decrypt_for_upl_counter++;
+ /*
+ * restore to original busy state
+ */
+ dst_page->busy = was_busy;
+ }
+ if (dst_page->pageout_queue == TRUE) {
+
+ vm_page_lockspin_queues();
+
+#if CONFIG_EMBEDDED
+ if (dst_page->laundry)
+#else
if (dst_page->pageout_queue == TRUE)
- /*
+#endif
+ {
+ /*
* we've buddied up a page for a clustered pageout
* that has already been moved to the pageout
* queue by pageout_scan... we need to remove
* it from the queue and drop the laundry count
* on that queue
*/
- vm_pageout_queue_steal(dst_page);
-#if MACH_CLUSTER_STATS
- /* pageout statistics gathering. count */
- /* all the pages we will page out that */
- /* were not counted in the initial */
- /* vm_pageout_scan work */
- if(dst_page->list_req_pending)
- encountered_lrp = TRUE;
- if((dst_page->dirty ||
- (dst_page->object->internal &&
- dst_page->precious)) &&
- (dst_page->list_req_pending
- == FALSE)) {
- if(encountered_lrp) {
- CLUSTER_STAT
- (pages_at_higher_offsets++;)
- } else {
- CLUSTER_STAT
- (pages_at_lower_offsets++;)
- }
+ vm_pageout_throttle_up(dst_page);
}
+ vm_page_unlock_queues();
+ }
+#if MACH_CLUSTER_STATS
+ /*
+ * pageout statistics gathering. count
+ * all the pages we will page out that
+ * were not counted in the initial
+ * vm_pageout_scan work
+ */
+ if (dst_page->list_req_pending)
+ encountered_lrp = TRUE;
+ if ((dst_page->dirty || (dst_page->object->internal && dst_page->precious)) && !dst_page->list_req_pending) {
+ if (encountered_lrp)
+ CLUSTER_STAT(pages_at_higher_offsets++;)
+ else
+ CLUSTER_STAT(pages_at_lower_offsets++;)
+ }
#endif
- /* Turn off busy indication on pending */
- /* pageout. Note: we can only get here */
- /* in the request pending case. */
- dst_page->list_req_pending = FALSE;
- dst_page->busy = FALSE;
- dst_page->cleaning = FALSE;
-
- hw_dirty = refmod_state & VM_MEM_MODIFIED;
- dirty = hw_dirty ? TRUE : dst_page->dirty;
-
- if(cntrl_flags & UPL_SET_LITE) {
- int pg_num;
- pg_num = (dst_offset-offset)/PAGE_SIZE;
- lite_list[pg_num>>5] |=
- 1 << (pg_num & 31);
- if (hw_dirty)
- pmap_clear_modify(dst_page->phys_page);
- /*
- * Record that this page has been
- * written out
- */
-#if MACH_PAGEMAP
- vm_external_state_set(
- object->existence_map,
- dst_page->offset);
-#endif /*MACH_PAGEMAP*/
+ /*
+ * Turn off busy indication on pending
+ * pageout. Note: we can only get here
+ * in the request pending case.
+ */
+ dst_page->list_req_pending = FALSE;
+ dst_page->busy = FALSE;
- /*
- * Mark original page as cleaning
- * in place.
- */
- dst_page->cleaning = TRUE;
- dst_page->dirty = TRUE;
- dst_page->precious = FALSE;
- } else {
- /* use pageclean setup, it is more */
- /* convenient even for the pageout */
- /* cases here */
-
- vm_object_lock(upl->map_object);
- vm_pageclean_setup(dst_page,
- alias_page, upl->map_object,
- size - xfer_size);
- vm_object_unlock(upl->map_object);
-
- alias_page->absent = FALSE;
- alias_page = NULL;
- }
-
- if(!dirty) {
- dst_page->dirty = FALSE;
- dst_page->precious = TRUE;
- }
+ hw_dirty = refmod_state & VM_MEM_MODIFIED;
+ dirty = hw_dirty ? TRUE : dst_page->dirty;
- if(dst_page->pageout)
- dst_page->busy = TRUE;
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
- if ( (cntrl_flags & UPL_ENCRYPT) ) {
- /*
- * ENCRYPTED SWAP:
- * We want to deny access to the target page
- * because its contents are about to be
- * encrypted and the user would be very
- * confused to see encrypted data instead
- * of their data.
- */
- dst_page->busy = TRUE;
- }
- if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
- /*
- * deny access to the target page
- * while it is being worked on
- */
- if ((!dst_page->pageout) &&
- (dst_page->wire_count == 0)) {
- dst_page->busy = TRUE;
- dst_page->pageout = TRUE;
- vm_page_wire(dst_page);
- }
- }
+ if (cntrl_flags & UPL_SET_LITE) {
+ unsigned int pg_num;
- if (dst_page->phys_page > upl->highest_page)
- upl->highest_page = dst_page->phys_page;
-
- if(user_page_list) {
- user_page_list[entry].phys_addr
- = dst_page->phys_page;
- user_page_list[entry].dirty =
- dst_page->dirty;
- user_page_list[entry].pageout =
- dst_page->pageout;
- user_page_list[entry].absent =
- dst_page->absent;
- user_page_list[entry].precious =
- dst_page->precious;
- }
- vm_page_unlock_queues();
+ pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+ assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+ lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+ if (hw_dirty)
+ pmap_clear_modify(dst_page->phys_page);
/*
- * ENCRYPTED SWAP:
- * The caller is gathering this page and might
- * access its contents later on. Decrypt the
- * page before adding it to the UPL, so that
- * the caller never sees encrypted data.
+ * Mark original page as cleaning
+ * in place.
*/
- if (! (cntrl_flags & UPL_ENCRYPT) &&
- dst_page->encrypted) {
- assert(dst_page->busy);
-
- vm_page_decrypt(dst_page, 0);
- vm_page_decrypt_for_upl_counter++;
+ dst_page->cleaning = TRUE;
+ dst_page->precious = FALSE;
+ } else {
+ /*
+ * use pageclean setup, it is more
+ * convenient even for the pageout
+ * cases here
+ */
+ vm_object_lock(upl->map_object);
+ vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
+ vm_object_unlock(upl->map_object);
- /*
- * Retry this page, since anything
- * could have changed while we were
- * decrypting.
- */
- continue;
- }
- }
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- }
- } else {
- while (xfer_size) {
- if((alias_page == NULL) &&
- !(cntrl_flags & UPL_SET_LITE)) {
- vm_object_unlock(object);
- VM_PAGE_GRAB_FICTITIOUS(alias_page);
- vm_object_lock(object);
+ alias_page->absent = FALSE;
+ alias_page = NULL;
}
+#if MACH_PAGEMAP
+ /*
+ * Record that this page has been
+ * written out
+ */
+ vm_external_state_set(object->existence_map, dst_page->offset);
+#endif /*MACH_PAGEMAP*/
+ dst_page->dirty = dirty;
- if ((cntrl_flags & UPL_WILL_MODIFY) &&
- object->copy != last_copy_object) {
- /* Honor copy-on-write obligations */
+ if (!dirty)
+ dst_page->precious = TRUE;
+ if (dst_page->pageout)
+ dst_page->busy = TRUE;
+
+ if ( (cntrl_flags & UPL_ENCRYPT) ) {
+ /*
+ * ENCRYPTED SWAP:
+ * We want to deny access to the target page
+ * because its contents are about to be
+ * encrypted and the user would be very
+ * confused to see encrypted data instead
+ * of their data.
+ * We also set "encrypted_cleaning" to allow
+ * vm_pageout_scan() to demote that page
+ * from "adjacent/clean-in-place" to
+ * "target/clean-and-free" if it bumps into
+ * this page during its scanning while we're
+ * still processing this cluster.
+ */
+ dst_page->busy = TRUE;
+ dst_page->encrypted_cleaning = TRUE;
+ }
+ if ( !(cntrl_flags & UPL_CLEAN_IN_PLACE) ) {
+ /*
+ * deny access to the target page
+ * while it is being worked on
+ */
+ if ((!dst_page->pageout) && ( !VM_PAGE_WIRED(dst_page))) {
+ dst_page->busy = TRUE;
+ dst_page->pageout = TRUE;
+
+ dwp->dw_mask |= DW_vm_page_wire;
+ }
+ }
+ } else {
+ if ((cntrl_flags & UPL_WILL_MODIFY) && object->copy != last_copy_object) {
/*
+ * Honor copy-on-write obligations
+ *
* The copy object has changed since we
* last synchronized for copy-on-write.
* Another copy object might have been
FALSE, /* should_return */
MEMORY_OBJECT_COPY_SYNC,
VM_PROT_NO_CHANGE);
+
+#if DEVELOPMENT || DEBUG
upl_cow_again++;
- upl_cow_again_pages +=
- xfer_size >> PAGE_SHIFT;
+ upl_cow_again_pages += xfer_size >> PAGE_SHIFT;
+#endif
}
- /* remember the copy object we synced with */
+ /*
+ * remember the copy object we synced with
+ */
last_copy_object = object->copy;
}
-
dst_page = vm_page_lookup(object, dst_offset);
- if(dst_page != VM_PAGE_NULL) {
- if((cntrl_flags & UPL_RET_ONLY_ABSENT) &&
- !((dst_page->list_req_pending)
- && (dst_page->absent))) {
- /* we are doing extended range */
- /* requests. we want to grab */
- /* pages around some which are */
- /* already present. */
- if(user_page_list) {
- user_page_list[entry].phys_addr = 0;
+ if (dst_page != VM_PAGE_NULL) {
+
+ if ((cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+
+ if ( !(dst_page->absent && dst_page->list_req_pending) ) {
+ /*
+ * skip over pages already present in the cache
+ */
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
}
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- continue;
- }
- if((dst_page->cleaning) &&
- !(dst_page->list_req_pending)) {
- /*someone else is writing to the */
- /* page. We will have to wait. */
- PAGE_SLEEP(object,dst_page,THREAD_UNINT);
- continue;
}
- if ((dst_page->fictitious &&
- dst_page->list_req_pending)) {
- /* dump the fictitious page */
- dst_page->list_req_pending = FALSE;
- dst_page->clustered = FALSE;
+ if ( !(dst_page->list_req_pending) ) {
- vm_page_lock_queues();
- vm_page_free(dst_page);
- vm_page_unlock_queues();
+ if (dst_page->cleaning) {
+ /*
+ * someone else is writing to the page... wait...
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+
+ continue;
+ }
+ } else {
+ if (dst_page->fictitious &&
+ dst_page->phys_page == vm_page_fictitious_addr) {
+ assert( !dst_page->speculative);
+ /*
+ * dump the fictitious page
+ */
+ dst_page->list_req_pending = FALSE;
+
+ VM_PAGE_FREE(dst_page);
+
+ dst_page = NULL;
+
+ } else if (dst_page->absent) {
+ /*
+ * the default_pager case
+ */
+ dst_page->list_req_pending = FALSE;
+ dst_page->busy = FALSE;
+
+ } else if (dst_page->pageout || dst_page->cleaning) {
+ /*
+ * page was earmarked by vm_pageout_scan
+ * to be cleaned and stolen... we're going
+ * to take it back since we are not attempting
+ * to read that page and we don't want to stall
+ * waiting for it to be cleaned for 2 reasons...
+ * 1 - no use paging it out and back in
+ * 2 - if we stall, we may casue a deadlock in
+ * the FS trying to acquire the its locks
+ * on the VNOP_PAGEOUT path presuming that
+ * those locks are already held on the read
+ * path before trying to create this UPL
+ *
+ * so undo all of the state that vm_pageout_scan
+ * hung on this page
+ */
+ dst_page->busy = FALSE;
- dst_page = NULL;
- } else if ((dst_page->absent &&
- dst_page->list_req_pending)) {
- /* the default_pager case */
- dst_page->list_req_pending = FALSE;
- dst_page->busy = FALSE;
+ vm_pageout_queue_steal(dst_page, FALSE);
+ }
}
}
- if(dst_page == VM_PAGE_NULL) {
- if(object->private) {
+ if (dst_page == VM_PAGE_NULL) {
+ if (object->private) {
/*
* This is a nasty wrinkle for users
* of upl who encounter device or
* private memory however, it is
* unavoidable, only a fault can
- * reslove the actual backing
+ * resolve the actual backing
* physical page by asking the
* backing device.
*/
- if(user_page_list) {
+ if (user_page_list)
user_page_list[entry].phys_addr = 0;
- }
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- continue;
+
+ goto try_next_page;
}
- /* need to allocate a page */
- dst_page = vm_page_alloc(object, dst_offset);
+ /*
+ * need to allocate a page
+ */
+ dst_page = vm_page_grab();
+
if (dst_page == VM_PAGE_NULL) {
+ if ( (cntrl_flags & (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) == (UPL_RET_ONLY_ABSENT | UPL_NOBLOCK)) {
+ /*
+ * we don't want to stall waiting for pages to come onto the free list
+ * while we're already holding absent pages in this UPL
+ * the caller will deal with the empty slots
+ */
+ if (user_page_list)
+ user_page_list[entry].phys_addr = 0;
+
+ goto try_next_page;
+ }
+ /*
+ * no pages available... wait
+ * then try again for the same
+ * offset...
+ */
vm_object_unlock(object);
VM_PAGE_WAIT();
vm_object_lock(object);
+
continue;
}
+ vm_page_insert(dst_page, object, dst_offset);
+
+ dst_page->absent = TRUE;
dst_page->busy = FALSE;
-#if 0
- if(cntrl_flags & UPL_NO_SYNC) {
- dst_page->page_lock = 0;
- dst_page->unlock_request = 0;
- }
-#endif
- if(cntrl_flags & UPL_RET_ONLY_ABSENT) {
+
+ if (cntrl_flags & UPL_RET_ONLY_ABSENT) {
/*
* if UPL_RET_ONLY_ABSENT was specified,
* than we're definitely setting up a
* upl for a clustered read/pagein
* operation... mark the pages as clustered
- * so vm_fault can correctly attribute them
- * to the 'pagein' bucket the first time
- * a fault happens on them
+ * so upl_commit_range can put them on the
+ * speculative list
*/
dst_page->clustered = TRUE;
}
- dst_page->absent = TRUE;
- object->absent_count++;
}
-#if 1
- if(cntrl_flags & UPL_NO_SYNC) {
- dst_page->page_lock = 0;
- dst_page->unlock_request = 0;
+ if (dst_page->fictitious) {
+ panic("need corner case for fictitious page");
}
-#endif /* 1 */
+ if (dst_page->busy) {
+ /*
+ * someone else is playing with the
+ * page. We will have to wait.
+ */
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
/*
* ENCRYPTED SWAP:
*/
*/
dst_page->encrypted = FALSE;
}
-
dst_page->overwriting = TRUE;
- if(dst_page->fictitious) {
- panic("need corner case for fictitious page");
- }
- if(dst_page->page_lock) {
- do_m_lock = TRUE;
- }
- if(upl_ptr) {
- /* eliminate all mappings from the */
- /* original object and its prodigy */
-
- if(dst_page->busy) {
- /*someone else is playing with the */
- /* page. We will have to wait. */
- PAGE_SLEEP(object, dst_page, THREAD_UNINT);
- continue;
- }
- vm_page_lock_queues();
-
- if( !(cntrl_flags & UPL_FILE_IO))
- hw_dirty = pmap_disconnect(dst_page->phys_page) & VM_MEM_MODIFIED;
- else
- hw_dirty = pmap_get_refmod(dst_page->phys_page) & VM_MEM_MODIFIED;
- dirty = hw_dirty ? TRUE : dst_page->dirty;
-
- if(cntrl_flags & UPL_SET_LITE) {
- int pg_num;
- pg_num = (dst_offset-offset)/PAGE_SIZE;
- lite_list[pg_num>>5] |=
- 1 << (pg_num & 31);
- if (hw_dirty)
- pmap_clear_modify(dst_page->phys_page);
- /*
- * Record that this page has been
- * written out
+ if (dst_page->pmapped) {
+ if ( !(cntrl_flags & UPL_FILE_IO))
+ /*
+ * eliminate all mappings from the
+ * original object and its prodigy
*/
-#if MACH_PAGEMAP
- vm_external_state_set(
- object->existence_map,
- dst_page->offset);
-#endif /*MACH_PAGEMAP*/
+ refmod_state = pmap_disconnect(dst_page->phys_page);
+ else
+ refmod_state = pmap_get_refmod(dst_page->phys_page);
+ } else
+ refmod_state = 0;
- /*
- * Mark original page as cleaning
- * in place.
- */
- dst_page->cleaning = TRUE;
- dst_page->dirty = TRUE;
- dst_page->precious = FALSE;
- } else {
- /* use pageclean setup, it is more */
- /* convenient even for the pageout */
- /* cases here */
- vm_object_lock(upl->map_object);
- vm_pageclean_setup(dst_page,
- alias_page, upl->map_object,
- size - xfer_size);
- vm_object_unlock(upl->map_object);
-
- alias_page->absent = FALSE;
- alias_page = NULL;
- }
+ hw_dirty = refmod_state & VM_MEM_MODIFIED;
+ dirty = hw_dirty ? TRUE : dst_page->dirty;
- if(cntrl_flags & UPL_CLEAN_IN_PLACE) {
- /* clean in place for read implies */
- /* that a write will be done on all */
- /* the pages that are dirty before */
- /* a upl commit is done. The caller */
- /* is obligated to preserve the */
- /* contents of all pages marked */
- /* dirty. */
- upl->flags |= UPL_CLEAR_DIRTY;
- }
+ if (cntrl_flags & UPL_SET_LITE) {
+ unsigned int pg_num;
- if(!dirty) {
- dst_page->dirty = FALSE;
- dst_page->precious = TRUE;
- }
-
- if (dst_page->wire_count == 0) {
- /* deny access to the target page while */
- /* it is being worked on */
- dst_page->busy = TRUE;
- } else {
- vm_page_wire(dst_page);
- }
- if(cntrl_flags & UPL_RET_ONLY_ABSENT) {
- /*
- * expect the page not to be used
- * since it's coming in as part
- * of a cluster and could be
- * speculative... pages that
- * are 'consumed' will get a
- * hardware reference
- */
- dst_page->reference = FALSE;
- } else {
- /*
- * expect the page to be used
- */
- dst_page->reference = TRUE;
- }
- dst_page->precious =
- (cntrl_flags & UPL_PRECIOUS)
- ? TRUE : FALSE;
-
- if (dst_page->phys_page > upl->highest_page)
- upl->highest_page = dst_page->phys_page;
-
- if(user_page_list) {
- user_page_list[entry].phys_addr
- = dst_page->phys_page;
- user_page_list[entry].dirty =
- dst_page->dirty;
- user_page_list[entry].pageout =
- dst_page->pageout;
- user_page_list[entry].absent =
- dst_page->absent;
- user_page_list[entry].precious =
- dst_page->precious;
- }
- vm_page_unlock_queues();
+ pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+ assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+ lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+ if (hw_dirty)
+ pmap_clear_modify(dst_page->phys_page);
+
+ /*
+ * Mark original page as cleaning
+ * in place.
+ */
+ dst_page->cleaning = TRUE;
+ dst_page->precious = FALSE;
+ } else {
+ /*
+ * use pageclean setup, it is more
+ * convenient even for the pageout
+ * cases here
+ */
+ vm_object_lock(upl->map_object);
+ vm_pageclean_setup(dst_page, alias_page, upl->map_object, size - xfer_size);
+ vm_object_unlock(upl->map_object);
+
+ alias_page->absent = FALSE;
+ alias_page = NULL;
}
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- }
- }
- if (upl->flags & UPL_INTERNAL) {
- if(page_list_count != NULL)
- *page_list_count = 0;
- } else if (*page_list_count > entry) {
- if(page_list_count != NULL)
- *page_list_count = entry;
- }
+ if (cntrl_flags & UPL_CLEAN_IN_PLACE) {
+ /*
+ * clean in place for read implies
+ * that a write will be done on all
+ * the pages that are dirty before
+ * a upl commit is done. The caller
+ * is obligated to preserve the
+ * contents of all pages marked dirty
+ */
+ upl->flags |= UPL_CLEAR_DIRTY;
+ }
+ dst_page->dirty = dirty;
- if(alias_page != NULL) {
- vm_page_lock_queues();
- vm_page_free(alias_page);
- vm_page_unlock_queues();
- }
+ if (!dirty)
+ dst_page->precious = TRUE;
- if(do_m_lock) {
- vm_prot_t access_required;
- /* call back all associated pages from other users of the pager */
- /* all future updates will be on data which is based on the */
- /* changes we are going to make here. Note: it is assumed that */
- /* we already hold copies of the data so we will not be seeing */
- /* an avalanche of incoming data from the pager */
- access_required = (cntrl_flags & UPL_COPYOUT_FROM)
- ? VM_PROT_READ : VM_PROT_WRITE;
- while (TRUE) {
- kern_return_t rc;
-
- if(!object->pager_ready) {
- wait_result_t wait_result;
-
- wait_result = vm_object_sleep(object,
- VM_OBJECT_EVENT_PAGER_READY,
- THREAD_UNINT);
- if (wait_result != THREAD_AWAKENED) {
- vm_object_unlock(object);
- return KERN_FAILURE;
- }
- continue;
+ if ( !VM_PAGE_WIRED(dst_page)) {
+ /*
+ * deny access to the target page while
+ * it is being worked on
+ */
+ dst_page->busy = TRUE;
+ } else
+ dwp->dw_mask |= DW_vm_page_wire;
+
+ /*
+ * We might be about to satisfy a fault which has been
+ * requested. So no need for the "restart" bit.
+ */
+ dst_page->restart = FALSE;
+ if (!dst_page->absent && !(cntrl_flags & UPL_WILL_MODIFY)) {
+ /*
+ * expect the page to be used
+ */
+ dwp->dw_mask |= DW_set_reference;
+ }
+ dst_page->precious = (cntrl_flags & UPL_PRECIOUS) ? TRUE : FALSE;
+ }
+ if (dst_page->busy)
+ upl->flags |= UPL_HAS_BUSY;
+
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = dst_page->phys_page;
+ user_page_list[entry].pageout = dst_page->pageout;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].precious = dst_page->precious;
+ user_page_list[entry].device = FALSE;
+ if (dst_page->clustered == TRUE)
+ user_page_list[entry].speculative = dst_page->speculative;
+ else
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = dst_page->cs_validated;
+ user_page_list[entry].cs_tainted = dst_page->cs_tainted;
}
+ /*
+ * if UPL_RET_ONLY_ABSENT is set, then
+ * we are working with a fresh page and we've
+ * just set the clustered flag on it to
+ * indicate that it was drug in as part of a
+ * speculative cluster... so leave it alone
+ */
+ if ( !(cntrl_flags & UPL_RET_ONLY_ABSENT)) {
+ /*
+ * someone is explicitly grabbing this page...
+ * update clustered and speculative state
+ *
+ */
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
+ }
+try_next_page:
+ if (dwp->dw_mask) {
+ if (dwp->dw_mask & DW_vm_page_activate)
+ VM_STAT_INCR(reactivations);
- vm_object_unlock(object);
- rc = memory_object_data_unlock(
- object->pager,
- dst_offset + object->paging_offset,
- size,
- access_required);
- if (rc != KERN_SUCCESS && rc != MACH_SEND_INTERRUPTED)
- return KERN_FAILURE;
- vm_object_lock(object);
+ if (dst_page->busy == FALSE) {
+ /*
+ * dw_do_work may need to drop the object lock
+ * if it does, we need the pages it's looking at to
+ * be held stable via the busy bit.
+ */
+ dst_page->busy = TRUE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ }
+ dwp->dw_m = dst_page;
+ dwp++;
+ dw_count++;
- if (rc == KERN_SUCCESS)
- break;
- }
-
- /* lets wait on the last page requested */
- /* NOTE: we will have to update lock completed routine to signal */
- if(dst_page != VM_PAGE_NULL &&
- (access_required & dst_page->page_lock) != access_required) {
- PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
- vm_object_unlock(object);
- thread_block(THREAD_CONTINUE_NULL);
- return KERN_SUCCESS;
- }
+ if (dw_count >= DELAYED_WORK_LIMIT) {
+ dw_do_work(object, &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ }
+ entry++;
+ dst_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
}
+ if (dw_count)
+ dw_do_work(object, &dw_array[0], dw_count);
+ if (alias_page != NULL) {
+ VM_PAGE_FREE(alias_page);
+ }
+
+ if (page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL)
+ *page_list_count = 0;
+ else if (*page_list_count > entry)
+ *page_list_count = entry;
+ }
+#if UPL_DEBUG
+ upl->upl_state = 1;
+#endif
vm_object_unlock(object);
+
return KERN_SUCCESS;
}
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_t **user_page_list_ptr,
- int page_list_count,
+ unsigned int page_list_count,
int cntrl_flags);
kern_return_t
vm_fault_list_request(
upl_size_t size,
upl_t *upl_ptr,
upl_page_info_t **user_page_list_ptr,
- int page_list_count,
+ unsigned int page_list_count,
int cntrl_flags)
{
unsigned int local_list_count;
upl_page_info_t *user_page_list;
kern_return_t kr;
+ if((cntrl_flags & UPL_VECTOR)==UPL_VECTOR)
+ return KERN_INVALID_ARGUMENT;
+
if (user_page_list_ptr != NULL) {
local_list_count = page_list_count;
user_page_list = *user_page_list_ptr;
unsigned int *page_list_count,
int cntrl_flags)
{
- vm_page_t target_page;
- int ticket;
-
-
- if(object->paging_offset > offset)
+ if (object->paging_offset > offset || ((cntrl_flags & UPL_VECTOR)==UPL_VECTOR))
return KERN_FAILURE;
assert(object->paging_in_progress);
offset = offset - object->paging_offset;
- if(cntrl_flags & UPL_FOR_PAGEOUT) {
-
- vm_object_lock(object);
-
- if((target_page = vm_page_lookup(object, offset))
- != VM_PAGE_NULL) {
- ticket = target_page->page_ticket;
- cntrl_flags = cntrl_flags & ~(int)UPL_PAGE_TICKET_MASK;
- cntrl_flags = cntrl_flags |
- ((ticket << UPL_PAGE_TICKET_SHIFT)
- & UPL_PAGE_TICKET_MASK);
- }
- vm_object_unlock(object);
- }
-
if (super_cluster > size) {
vm_object_offset_t base_offset;
upl_size_t super_size;
-
- base_offset = (offset &
- ~((vm_object_offset_t) super_cluster - 1));
- super_size = (offset+size) > (base_offset + super_cluster) ?
- super_cluster<<1 : super_cluster;
- super_size = ((base_offset + super_size) > object->size) ?
- (object->size - base_offset) : super_size;
- if(offset > (base_offset + super_size))
- panic("vm_object_super_upl_request: Missed target pageout"
- " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
- offset, base_offset, super_size, super_cluster,
- size, object->paging_offset);
+ vm_object_size_t super_size_64;
+
+ base_offset = (offset & ~((vm_object_offset_t) super_cluster - 1));
+ super_size = (offset + size) > (base_offset + super_cluster) ? super_cluster<<1 : super_cluster;
+ super_size_64 = ((base_offset + super_size) > object->size) ? (object->size - base_offset) : super_size;
+ super_size = (upl_size_t) super_size_64;
+ assert(super_size == super_size_64);
+
+ if (offset > (base_offset + super_size)) {
+ panic("vm_object_super_upl_request: Missed target pageout"
+ " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
+ offset, base_offset, super_size, super_cluster,
+ size, object->paging_offset);
+ }
/*
* apparently there is a case where the vm requests a
* page to be written out who's offset is beyond the
* object size
*/
- if((offset + size) > (base_offset + super_size))
- super_size = (offset + size) - base_offset;
+ if ((offset + size) > (base_offset + super_size)) {
+ super_size_64 = (offset + size) - base_offset;
+ super_size = (upl_size_t) super_size_64;
+ assert(super_size == super_size_64);
+ }
offset = base_offset;
size = super_size;
}
- return vm_object_upl_request(object, offset, size,
- upl, user_page_list, page_list_count,
- cntrl_flags);
+ return vm_object_upl_request(object, offset, size, upl, user_page_list, page_list_count, cntrl_flags);
}
-
+
kern_return_t
vm_map_create_upl(
vm_map_t map,
*/
return KERN_INVALID_VALUE;
}
-
force_data_sync = (caller_flags & UPL_FORCE_DATA_SYNC);
sync_cow_data = !(caller_flags & UPL_COPYOUT_FROM);
- if(upl == NULL)
+ if (upl == NULL)
return KERN_INVALID_ARGUMENT;
-
REDISCOVER_ENTRY:
- vm_map_lock(map);
+ vm_map_lock_read(map);
+
if (vm_map_lookup_entry(map, offset, &entry)) {
- if (entry->object.vm_object == VM_OBJECT_NULL ||
- !entry->object.vm_object->phys_contiguous) {
- if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
- *upl_size = MAX_UPL_TRANSFER * page_size;
- }
- }
- if((entry->vme_end - offset) < *upl_size) {
- *upl_size = entry->vme_end - offset;
+
+ if ((entry->vme_end - offset) < *upl_size) {
+ *upl_size = (upl_size_t) (entry->vme_end - offset);
+ assert(*upl_size == entry->vme_end - offset);
}
+
if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
- if (entry->object.vm_object == VM_OBJECT_NULL) {
- *flags = 0;
- } else if (entry->object.vm_object->private) {
- *flags = UPL_DEV_MEMORY;
- if (entry->object.vm_object->phys_contiguous) {
+ *flags = 0;
+
+ if ( !entry->is_sub_map && entry->object.vm_object != VM_OBJECT_NULL) {
+ if (entry->object.vm_object->private)
+ *flags = UPL_DEV_MEMORY;
+
+ if (entry->object.vm_object->phys_contiguous)
*flags |= UPL_PHYS_CONTIG;
- }
- } else {
- *flags = 0;
}
- vm_map_unlock(map);
+ vm_map_unlock_read(map);
+
return KERN_SUCCESS;
}
+ if (entry->object.vm_object == VM_OBJECT_NULL || !entry->object.vm_object->phys_contiguous) {
+ if ((*upl_size/PAGE_SIZE) > MAX_UPL_SIZE)
+ *upl_size = MAX_UPL_SIZE * PAGE_SIZE;
+ }
/*
* Create an object if necessary.
*/
if (entry->object.vm_object == VM_OBJECT_NULL) {
- entry->object.vm_object = vm_object_allocate(
- (vm_size_t)(entry->vme_end - entry->vme_start));
+
+ if (vm_map_lock_read_to_write(map))
+ goto REDISCOVER_ENTRY;
+
+ entry->object.vm_object = vm_object_allocate((vm_size_t)(entry->vme_end - entry->vme_start));
entry->offset = 0;
+
+ vm_map_lock_write_to_read(map);
}
if (!(caller_flags & UPL_COPYOUT_FROM)) {
if (!(entry->protection & VM_PROT_WRITE)) {
- vm_map_unlock(map);
+ vm_map_unlock_read(map);
return KERN_PROTECTION_FAILURE;
}
if (entry->needs_copy) {
+ /*
+ * Honor copy-on-write for COPY_SYMMETRIC
+ * strategy.
+ */
vm_map_t local_map;
vm_object_t object;
- vm_map_offset_t offset_hi;
- vm_map_offset_t offset_lo;
vm_object_offset_t new_offset;
vm_prot_t prot;
boolean_t wired;
- vm_behavior_t behavior;
vm_map_version_t version;
vm_map_t real_map;
local_map = map;
- vm_map_lock_write_to_read(map);
- if(vm_map_lookup_locked(&local_map,
- offset, VM_PROT_WRITE,
- &version, &object,
- &new_offset, &prot, &wired,
- &behavior, &offset_lo,
- &offset_hi, &real_map)) {
- vm_map_unlock(local_map);
+
+ if (vm_map_lookup_locked(&local_map,
+ offset, VM_PROT_WRITE,
+ OBJECT_LOCK_EXCLUSIVE,
+ &version, &object,
+ &new_offset, &prot, &wired,
+ NULL,
+ &real_map) != KERN_SUCCESS) {
+ vm_map_unlock_read(local_map);
return KERN_FAILURE;
}
- if (real_map != map) {
+ if (real_map != map)
vm_map_unlock(real_map);
- }
+ vm_map_unlock_read(local_map);
+
vm_object_unlock(object);
- vm_map_unlock(local_map);
goto REDISCOVER_ENTRY;
}
submap = entry->object.sub_map;
local_start = entry->vme_start;
local_offset = entry->offset;
- vm_map_reference(submap);
- vm_map_unlock(map);
- ret = (vm_map_create_upl(submap,
- local_offset + (offset - local_start),
- upl_size, upl, page_list, count,
- flags));
+ vm_map_reference(submap);
+ vm_map_unlock_read(map);
+ ret = vm_map_create_upl(submap,
+ local_offset + (offset - local_start),
+ upl_size, upl, page_list, count, flags);
vm_map_deallocate(submap);
+
return ret;
}
-
if (sync_cow_data) {
- if (entry->object.vm_object->shadow
- || entry->object.vm_object->copy) {
-
+ if (entry->object.vm_object->shadow || entry->object.vm_object->copy) {
local_object = entry->object.vm_object;
local_start = entry->vme_start;
local_offset = entry->offset;
+
vm_object_reference(local_object);
- vm_map_unlock(map);
-
- if (entry->object.vm_object->shadow &&
- entry->object.vm_object->copy) {
- vm_object_lock_request(
- local_object->shadow,
- (vm_object_offset_t)
- ((offset - local_start) +
- local_offset) +
- local_object->shadow_offset,
- *upl_size, FALSE,
- MEMORY_OBJECT_DATA_SYNC,
- VM_PROT_NO_CHANGE);
+ vm_map_unlock_read(map);
+
+ if (local_object->shadow && local_object->copy) {
+ vm_object_lock_request(
+ local_object->shadow,
+ (vm_object_offset_t)
+ ((offset - local_start) +
+ local_offset) +
+ local_object->shadow_offset,
+ *upl_size, FALSE,
+ MEMORY_OBJECT_DATA_SYNC,
+ VM_PROT_NO_CHANGE);
}
sync_cow_data = FALSE;
vm_object_deallocate(local_object);
+
goto REDISCOVER_ENTRY;
}
}
-
if (force_data_sync) {
-
local_object = entry->object.vm_object;
local_start = entry->vme_start;
local_offset = entry->offset;
+
vm_object_reference(local_object);
- vm_map_unlock(map);
+ vm_map_unlock_read(map);
vm_object_lock_request(
- local_object,
- (vm_object_offset_t)
- ((offset - local_start) + local_offset),
- (vm_object_size_t)*upl_size, FALSE,
- MEMORY_OBJECT_DATA_SYNC,
- VM_PROT_NO_CHANGE);
+ local_object,
+ (vm_object_offset_t)
+ ((offset - local_start) + local_offset),
+ (vm_object_size_t)*upl_size, FALSE,
+ MEMORY_OBJECT_DATA_SYNC,
+ VM_PROT_NO_CHANGE);
+
force_data_sync = FALSE;
vm_object_deallocate(local_object);
+
goto REDISCOVER_ENTRY;
}
+ if (entry->object.vm_object->private)
+ *flags = UPL_DEV_MEMORY;
+ else
+ *flags = 0;
+
+ if (entry->object.vm_object->phys_contiguous)
+ *flags |= UPL_PHYS_CONTIG;
- if(!(entry->object.vm_object->private)) {
- if(*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
- *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
- if(entry->object.vm_object->phys_contiguous) {
- *flags = UPL_PHYS_CONTIG;
- } else {
- *flags = 0;
- }
- } else {
- *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
- }
local_object = entry->object.vm_object;
local_offset = entry->offset;
local_start = entry->vme_start;
+
vm_object_reference(local_object);
- vm_map_unlock(map);
- if(caller_flags & UPL_SET_IO_WIRE) {
- ret = (vm_object_iopl_request(local_object,
- (vm_object_offset_t)
- ((offset - local_start)
- + local_offset),
- *upl_size,
- upl,
- page_list,
- count,
- caller_flags));
- } else {
- ret = (vm_object_upl_request(local_object,
- (vm_object_offset_t)
- ((offset - local_start)
- + local_offset),
- *upl_size,
- upl,
- page_list,
- count,
- caller_flags));
- }
+ vm_map_unlock_read(map);
+
+ ret = vm_object_iopl_request(local_object,
+ (vm_object_offset_t) ((offset - local_start) + local_offset),
+ *upl_size,
+ upl,
+ page_list,
+ count,
+ caller_flags);
vm_object_deallocate(local_object);
+
return(ret);
}
+ vm_map_unlock_read(map);
- vm_map_unlock(map);
return(KERN_FAILURE);
-
}
/*
vm_map_enter_upl(
vm_map_t map,
upl_t upl,
- vm_map_offset_t *dst_addr)
+ vm_map_offset_t *dst_addr)
{
vm_map_size_t size;
vm_object_offset_t offset;
vm_map_offset_t addr;
vm_page_t m;
kern_return_t kr;
+ int isVectorUPL = 0, curr_upl=0;
+ upl_t vector_upl = NULL;
+ vm_offset_t vector_upl_dst_addr = 0;
+ vm_map_t vector_upl_submap = NULL;
+ upl_offset_t subupl_offset = 0;
+ upl_size_t subupl_size = 0;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
- upl_lock(upl);
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ int mapped=0,valid_upls=0;
+ vector_upl = upl;
- /* check to see if already mapped */
- if(UPL_PAGE_LIST_MAPPED & upl->flags) {
- upl_unlock(upl);
- return KERN_FAILURE;
+ upl_lock(vector_upl);
+ for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
+ if(upl == NULL)
+ continue;
+ valid_upls++;
+ if (UPL_PAGE_LIST_MAPPED & upl->flags)
+ mapped++;
+ }
+
+ if(mapped) {
+ if(mapped != valid_upls)
+ panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped, valid_upls);
+ else {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ }
+
+ kr = kmem_suballoc(map, &vector_upl_dst_addr, vector_upl->size, FALSE, VM_FLAGS_ANYWHERE, &vector_upl_submap);
+ if( kr != KERN_SUCCESS )
+ panic("Vector UPL submap allocation failed\n");
+ map = vector_upl_submap;
+ vector_upl_set_submap(vector_upl, vector_upl_submap, vector_upl_dst_addr);
+ curr_upl=0;
}
+ else
+ upl_lock(upl);
+
+process_upl_to_enter:
+ if(isVectorUPL){
+ if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+ *dst_addr = vector_upl_dst_addr;
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+ if(upl == NULL)
+ goto process_upl_to_enter;
+ vector_upl_get_iostate(vector_upl, upl, &subupl_offset, &subupl_size);
+ *dst_addr = (vm_map_offset_t)(vector_upl_dst_addr + (vm_map_offset_t)subupl_offset);
+ } else {
+ /*
+ * check to see if already mapped
+ */
+ if (UPL_PAGE_LIST_MAPPED & upl->flags) {
+ upl_unlock(upl);
+ return KERN_FAILURE;
+ }
+ }
+ if ((!(upl->flags & UPL_SHADOWED)) &&
+ ((upl->flags & UPL_HAS_BUSY) ||
+ !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || (upl->map_object->phys_contiguous)))) {
- if((!(upl->map_object->pageout)) &&
- !((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) ||
- (upl->map_object->phys_contiguous))) {
vm_object_t object;
vm_page_t alias_page;
vm_object_offset_t new_offset;
- int pg_num;
+ unsigned int pg_num;
wpl_array_t lite_list;
- if(upl->flags & UPL_INTERNAL) {
+ if (upl->flags & UPL_INTERNAL) {
lite_list = (wpl_array_t)
((((uintptr_t)upl) + sizeof(struct upl))
- + ((upl->size/PAGE_SIZE)
- * sizeof(upl_page_info_t)));
+ + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
} else {
- lite_list = (wpl_array_t)
- (((uintptr_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t)(((uintptr_t)upl) + sizeof(struct upl));
}
object = upl->map_object;
upl->map_object = vm_object_allocate(upl->size);
+
vm_object_lock(upl->map_object);
+
upl->map_object->shadow = object;
upl->map_object->pageout = TRUE;
upl->map_object->can_persist = FALSE;
- upl->map_object->copy_strategy =
- MEMORY_OBJECT_COPY_NONE;
- upl->map_object->shadow_offset =
- upl->offset - object->paging_offset;
+ upl->map_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
+ upl->map_object->shadow_offset = upl->offset - object->paging_offset;
upl->map_object->wimg_bits = object->wimg_bits;
offset = upl->map_object->shadow_offset;
new_offset = 0;
size = upl->size;
- vm_object_lock(object);
+ upl->flags |= UPL_SHADOWED;
- while(size) {
- pg_num = (new_offset)/PAGE_SIZE;
- if(lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
- vm_object_unlock(object);
- VM_PAGE_GRAB_FICTITIOUS(alias_page);
- vm_object_lock(object);
- m = vm_page_lookup(object, offset);
- if (m == VM_PAGE_NULL) {
- panic("vm_upl_map: page missing\n");
- }
+ while (size) {
+ pg_num = (unsigned int) (new_offset / PAGE_SIZE);
+ assert(pg_num == new_offset / PAGE_SIZE);
- vm_object_paging_begin(object);
+ if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
- /*
- * Convert the fictitious page to a private
- * shadow of the real page.
- */
- assert(alias_page->fictitious);
- alias_page->fictitious = FALSE;
- alias_page->private = TRUE;
- alias_page->pageout = TRUE;
- alias_page->phys_page = m->phys_page;
+ VM_PAGE_GRAB_FICTITIOUS(alias_page);
- vm_page_lock_queues();
- vm_page_wire(alias_page);
- vm_page_unlock_queues();
+ vm_object_lock(object);
- /*
- * ENCRYPTED SWAP:
- * The virtual page ("m") has to be wired in some way
- * here or its physical page ("m->phys_page") could
- * be recycled at any time.
- * Assuming this is enforced by the caller, we can't
- * get an encrypted page here. Since the encryption
- * key depends on the VM page's "pager" object and
- * the "paging_offset", we couldn't handle 2 pageable
- * VM pages (with different pagers and paging_offsets)
- * sharing the same physical page: we could end up
- * encrypting with one key (via one VM page) and
- * decrypting with another key (via the alias VM page).
- */
- ASSERT_PAGE_DECRYPTED(m);
+ m = vm_page_lookup(object, offset);
+ if (m == VM_PAGE_NULL) {
+ panic("vm_upl_map: page missing\n");
+ }
- vm_page_insert(alias_page,
- upl->map_object, new_offset);
- assert(!alias_page->wanted);
- alias_page->busy = FALSE;
- alias_page->absent = FALSE;
- }
+ /*
+ * Convert the fictitious page to a private
+ * shadow of the real page.
+ */
+ assert(alias_page->fictitious);
+ alias_page->fictitious = FALSE;
+ alias_page->private = TRUE;
+ alias_page->pageout = TRUE;
+ /*
+ * since m is a page in the upl it must
+ * already be wired or BUSY, so it's
+ * safe to assign the underlying physical
+ * page to the alias
+ */
+ alias_page->phys_page = m->phys_page;
+
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+ vm_page_wire(alias_page);
+ vm_page_unlock_queues();
+
+ /*
+ * ENCRYPTED SWAP:
+ * The virtual page ("m") has to be wired in some way
+ * here or its physical page ("m->phys_page") could
+ * be recycled at any time.
+ * Assuming this is enforced by the caller, we can't
+ * get an encrypted page here. Since the encryption
+ * key depends on the VM page's "pager" object and
+ * the "paging_offset", we couldn't handle 2 pageable
+ * VM pages (with different pagers and paging_offsets)
+ * sharing the same physical page: we could end up
+ * encrypting with one key (via one VM page) and
+ * decrypting with another key (via the alias VM page).
+ */
+ ASSERT_PAGE_DECRYPTED(m);
+
+ vm_page_insert(alias_page, upl->map_object, new_offset);
- size -= PAGE_SIZE;
- offset += PAGE_SIZE_64;
- new_offset += PAGE_SIZE_64;
+ assert(!alias_page->wanted);
+ alias_page->busy = FALSE;
+ alias_page->absent = FALSE;
+ }
+ size -= PAGE_SIZE;
+ offset += PAGE_SIZE_64;
+ new_offset += PAGE_SIZE_64;
}
- vm_object_unlock(object);
vm_object_unlock(upl->map_object);
}
- if ((upl->flags & (UPL_DEVICE_MEMORY | UPL_IO_WIRE)) || upl->map_object->phys_contiguous)
- offset = upl->offset - upl->map_object->paging_offset;
- else
+ if (upl->flags & UPL_SHADOWED)
offset = 0;
-
+ else
+ offset = upl->offset - upl->map_object->paging_offset;
size = upl->size;
- vm_object_lock(upl->map_object);
- upl->map_object->ref_count++;
- vm_object_res_reference(upl->map_object);
- vm_object_unlock(upl->map_object);
-
- *dst_addr = 0;
+ vm_object_reference(upl->map_object);
+ if(!isVectorUPL) {
+ *dst_addr = 0;
+ /*
+ * NEED A UPL_MAP ALIAS
+ */
+ kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+ VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
- /* NEED A UPL_MAP ALIAS */
- kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
- VM_FLAGS_ANYWHERE, upl->map_object, offset, FALSE,
- VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
-
- if (kr != KERN_SUCCESS) {
- upl_unlock(upl);
- return(kr);
+ if (kr != KERN_SUCCESS) {
+ upl_unlock(upl);
+ return(kr);
+ }
+ }
+ else {
+ kr = vm_map_enter(map, dst_addr, (vm_map_size_t)size, (vm_map_offset_t) 0,
+ VM_FLAGS_FIXED, upl->map_object, offset, FALSE,
+ VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
+ if(kr)
+ panic("vm_map_enter failed for a Vector UPL\n");
}
-
vm_object_lock(upl->map_object);
- for(addr=*dst_addr; size > 0; size-=PAGE_SIZE,addr+=PAGE_SIZE) {
+ for (addr = *dst_addr; size > 0; size -= PAGE_SIZE, addr += PAGE_SIZE) {
m = vm_page_lookup(upl->map_object, offset);
- if(m) {
- unsigned int cache_attr;
- cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+
+ if (m) {
+ unsigned int cache_attr;
+ cache_attr = ((unsigned int)m->object->wimg_bits) & VM_WIMG_MASK;
+
+ m->pmapped = TRUE;
+
+ /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
+ * but only in kernel space. If this was on a user map,
+ * we'd have to set the wpmapped bit. */
+ /* m->wpmapped = TRUE; */
+ assert(map==kernel_map);
- PMAP_ENTER(map->pmap, addr,
- m, VM_PROT_ALL,
- cache_attr, TRUE);
+ PMAP_ENTER(map->pmap, addr, m, VM_PROT_ALL, cache_attr, TRUE);
}
- offset+=PAGE_SIZE_64;
+ offset += PAGE_SIZE_64;
}
vm_object_unlock(upl->map_object);
- upl->ref_count++; /* hold a reference for the mapping */
+ /*
+ * hold a reference for the mapping
+ */
+ upl->ref_count++;
upl->flags |= UPL_PAGE_LIST_MAPPED;
- upl->kaddr = *dst_addr;
+ upl->kaddr = (vm_offset_t) *dst_addr;
+ assert(upl->kaddr == *dst_addr);
+
+ if(isVectorUPL)
+ goto process_upl_to_enter;
+
upl_unlock(upl);
+
return KERN_SUCCESS;
}
{
vm_address_t addr;
upl_size_t size;
+ int isVectorUPL = 0, curr_upl = 0;
+ upl_t vector_upl = NULL;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
- upl_lock(upl);
- if(upl->flags & UPL_PAGE_LIST_MAPPED) {
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ int unmapped=0, valid_upls=0;
+ vector_upl = upl;
+ upl_lock(vector_upl);
+ for(curr_upl=0; curr_upl < MAX_VECTOR_UPL_ELEMENTS; curr_upl++) {
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl );
+ if(upl == NULL)
+ continue;
+ valid_upls++;
+ if (!(UPL_PAGE_LIST_MAPPED & upl->flags))
+ unmapped++;
+ }
+
+ if(unmapped) {
+ if(unmapped != valid_upls)
+ panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped, valid_upls);
+ else {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ }
+ curr_upl=0;
+ }
+ else
+ upl_lock(upl);
+
+process_upl_to_remove:
+ if(isVectorUPL) {
+ if(curr_upl == MAX_VECTOR_UPL_ELEMENTS) {
+ vm_map_t v_upl_submap;
+ vm_offset_t v_upl_submap_dst_addr;
+ vector_upl_get_submap(vector_upl, &v_upl_submap, &v_upl_submap_dst_addr);
+
+ vm_map_remove(map, v_upl_submap_dst_addr, v_upl_submap_dst_addr + vector_upl->size, VM_MAP_NO_FLAGS);
+ vm_map_deallocate(v_upl_submap);
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+
+ upl = vector_upl_subupl_byindex(vector_upl, curr_upl++ );
+ if(upl == NULL)
+ goto process_upl_to_remove;
+ }
+
+ if (upl->flags & UPL_PAGE_LIST_MAPPED) {
addr = upl->kaddr;
size = upl->size;
+
assert(upl->ref_count > 1);
upl->ref_count--; /* removing mapping ref */
+
upl->flags &= ~UPL_PAGE_LIST_MAPPED;
upl->kaddr = (vm_offset_t) 0;
- upl_unlock(upl);
-
- vm_map_remove( map,
+
+ if(!isVectorUPL) {
+ upl_unlock(upl);
+
+ vm_map_remove(map,
vm_map_trunc_page(addr),
vm_map_round_page(addr + size),
VM_MAP_NO_FLAGS);
- return KERN_SUCCESS;
+
+ return KERN_SUCCESS;
+ }
+ else {
+ /*
+ * If it's a Vectored UPL, we'll be removing the entire
+ * submap anyways, so no need to remove individual UPL
+ * element mappings from within the submap
+ */
+ goto process_upl_to_remove;
+ }
}
upl_unlock(upl);
+
return KERN_FAILURE;
}
+static void
+dw_do_work(
+ vm_object_t object,
+ struct dw *dwp,
+ int dw_count)
+{
+ int j;
+ boolean_t held_as_spin = TRUE;
+
+ /*
+ * pageout_scan takes the vm_page_lock_queues first
+ * then tries for the object lock... to avoid what
+ * is effectively a lock inversion, we'll go to the
+ * trouble of taking them in that same order... otherwise
+ * if this object contains the majority of the pages resident
+ * in the UBC (or a small set of large objects actively being
+ * worked on contain the majority of the pages), we could
+ * cause the pageout_scan thread to 'starve' in its attempt
+ * to find pages to move to the free queue, since it has to
+ * successfully acquire the object lock of any candidate page
+ * before it can steal/clean it.
+ */
+ if (!vm_page_trylockspin_queues()) {
+ vm_object_unlock(object);
+
+ vm_page_lockspin_queues();
+
+ for (j = 0; ; j++) {
+ if (!vm_object_lock_avoid(object) &&
+ _vm_object_lock_try(object))
+ break;
+ vm_page_unlock_queues();
+ mutex_pause(j);
+ vm_page_lockspin_queues();
+ }
+ }
+ for (j = 0; j < dw_count; j++, dwp++) {
+
+ if (dwp->dw_mask & DW_vm_pageout_throttle_up)
+ vm_pageout_throttle_up(dwp->dw_m);
+
+ if (dwp->dw_mask & DW_vm_page_wire)
+ vm_page_wire(dwp->dw_m);
+ else if (dwp->dw_mask & DW_vm_page_unwire) {
+ boolean_t queueit;
+
+ queueit = (dwp->dw_mask & DW_vm_page_free) ? FALSE : TRUE;
+
+ vm_page_unwire(dwp->dw_m, queueit);
+ }
+ if (dwp->dw_mask & DW_vm_page_free) {
+ if (held_as_spin == TRUE) {
+ vm_page_lockconvert_queues();
+ held_as_spin = FALSE;
+ }
+ vm_page_free(dwp->dw_m);
+ } else {
+ if (dwp->dw_mask & DW_vm_page_deactivate_internal)
+ vm_page_deactivate_internal(dwp->dw_m, FALSE);
+ else if (dwp->dw_mask & DW_vm_page_activate)
+ vm_page_activate(dwp->dw_m);
+ else if (dwp->dw_mask & DW_vm_page_speculate)
+ vm_page_speculate(dwp->dw_m, TRUE);
+ else if (dwp->dw_mask & DW_vm_page_lru)
+ vm_page_lru(dwp->dw_m);
+
+ if (dwp->dw_mask & DW_set_reference)
+ dwp->dw_m->reference = TRUE;
+ else if (dwp->dw_mask & DW_clear_reference)
+ dwp->dw_m->reference = FALSE;
+
+ if (dwp->dw_mask & DW_clear_busy)
+ dwp->dw_m->busy = FALSE;
+
+ if (dwp->dw_mask & DW_PAGE_WAKEUP)
+ PAGE_WAKEUP(dwp->dw_m);
+ }
+ }
+ vm_page_unlock_queues();
+}
+
+
+
kern_return_t
upl_commit_range(
upl_t upl,
mach_msg_type_number_t count,
boolean_t *empty)
{
- upl_size_t xfer_size = size;
+ upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
- vm_object_t object = upl->map_object;
+ vm_object_t object;
vm_object_offset_t target_offset;
+ upl_offset_t subupl_offset = offset;
int entry;
wpl_array_t lite_list;
int occupied;
- int delayed_unlock = 0;
int clear_refmod = 0;
- boolean_t shadow_internal;
+ int pgpgout_count = 0;
+ struct dw dw_array[DELAYED_WORK_LIMIT];
+ struct dw *dwp;
+ int dw_count, isVectorUPL = 0;
+ upl_t vector_upl = NULL;
*empty = FALSE;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
-
if (count == 0)
page_list = NULL;
- if (object->pageout) {
- shadow_object = object->shadow;
- } else {
- shadow_object = object;
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ vector_upl = upl;
+ upl_lock(vector_upl);
}
+ else
+ upl_lock(upl);
- upl_lock(upl);
+process_upl_to_commit:
- if (upl->flags & UPL_ACCESS_BLOCKED) {
- /*
- * We used this UPL to block access to the pages by marking
- * them "busy". Now we need to clear the "busy" bit to allow
- * access to these pages again.
- */
- flags |= UPL_COMMIT_ALLOW_ACCESS;
+ if(isVectorUPL) {
+ size = subupl_size;
+ offset = subupl_offset;
+ if(size == 0) {
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+ upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+ if(upl == NULL) {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ page_list = UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl);
+ subupl_size -= size;
+ subupl_offset += size;
}
- if (upl->flags & UPL_CLEAR_DIRTY)
- flags |= UPL_COMMIT_CLEAR_DIRTY;
+#if UPL_DEBUG
+ if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+ (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+
+ upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+ upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
- if (upl->flags & UPL_DEVICE_MEMORY) {
+ upl->upl_commit_index++;
+ }
+#endif
+ if (upl->flags & UPL_DEVICE_MEMORY)
xfer_size = 0;
- } else if ((offset + size) > upl->size) {
- upl_unlock(upl);
+ else if ((offset + size) <= upl->size)
+ xfer_size = size;
+ else {
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ upl_unlock(vector_upl);
+ }
return KERN_FAILURE;
}
+ if (upl->flags & UPL_CLEAR_DIRTY)
+ flags |= UPL_COMMIT_CLEAR_DIRTY;
- if (upl->flags & UPL_INTERNAL) {
- lite_list = (wpl_array_t)
- ((((uintptr_t)upl) + sizeof(struct upl))
- + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
- } else {
- lite_list = (wpl_array_t)
- (((uintptr_t)upl) + sizeof(struct upl));
- }
- if (object != shadow_object)
- vm_object_lock(object);
- vm_object_lock(shadow_object);
+ if (upl->flags & UPL_INTERNAL)
+ lite_list = (wpl_array_t) ((((uintptr_t)upl) + sizeof(struct upl))
+ + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
+ else
+ lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
- shadow_internal = shadow_object->internal;
+ object = upl->map_object;
+ if (upl->flags & UPL_SHADOWED) {
+ vm_object_lock(object);
+ shadow_object = object->shadow;
+ } else {
+ shadow_object = object;
+ }
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
+ if (upl->flags & UPL_KERNEL_OBJECT)
+ vm_object_lock_shared(shadow_object);
+ else
+ vm_object_lock(shadow_object);
+
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ assert(shadow_object->blocked_access);
+ shadow_object->blocked_access = FALSE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
+ }
+
+ if (shadow_object->code_signed) {
+ /*
+ * CODE SIGNING:
+ * If the object is code-signed, do not let this UPL tell
+ * us if the pages are valid or not. Let the pages be
+ * validated by VM the normal way (when they get mapped or
+ * copied).
+ */
+ flags &= ~UPL_COMMIT_CS_VALIDATED;
+ }
+ if (! page_list) {
+ /*
+ * No page list to get the code-signing info from !?
+ */
+ flags &= ~UPL_COMMIT_CS_VALIDATED;
+ }
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+
while (xfer_size) {
- vm_page_t t,m;
- upl_page_info_t *p;
+ vm_page_t t, m;
+
+ dwp->dw_mask = 0;
+ clear_refmod = 0;
m = VM_PAGE_NULL;
if (upl->flags & UPL_LITE) {
- int pg_num;
+ unsigned int pg_num;
- pg_num = target_offset/PAGE_SIZE;
+ pg_num = (unsigned int) (target_offset/PAGE_SIZE);
+ assert(pg_num == target_offset/PAGE_SIZE);
if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
- m = vm_page_lookup(shadow_object,
- target_offset + (upl->offset -
- shadow_object->paging_offset));
+
+ if (!(upl->flags & UPL_KERNEL_OBJECT))
+ m = vm_page_lookup(shadow_object, target_offset + (upl->offset - shadow_object->paging_offset));
}
}
- if (object->pageout) {
- if ((t = vm_page_lookup(object, target_offset)) != NULL) {
+ if (upl->flags & UPL_SHADOWED) {
+ if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+
t->pageout = FALSE;
- if (delayed_unlock) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
VM_PAGE_FREE(t);
- if (m == NULL) {
- m = vm_page_lookup(
- shadow_object,
- target_offset +
- object->shadow_offset);
- }
- if (m != VM_PAGE_NULL)
- vm_object_paging_end(m->object);
+ if (m == VM_PAGE_NULL)
+ m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
}
}
- if (m != VM_PAGE_NULL) {
+ if ((upl->flags & UPL_KERNEL_OBJECT) || m == VM_PAGE_NULL)
+ goto commit_next_page;
- clear_refmod = 0;
-
- if (upl->flags & UPL_IO_WIRE) {
-
- if (delayed_unlock == 0)
- vm_page_lock_queues();
-
- vm_page_unwire(m);
+ if (flags & UPL_COMMIT_CS_VALIDATED) {
+ /*
+ * CODE SIGNING:
+ * Set the code signing bits according to
+ * what the UPL says they should be.
+ */
+ m->cs_validated = page_list[entry].cs_validated;
+ m->cs_tainted = page_list[entry].cs_tainted;
+ }
+ if (upl->flags & UPL_IO_WIRE) {
- if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
- if (page_list) {
+ if (page_list)
page_list[entry].phys_addr = 0;
- }
- if (flags & UPL_COMMIT_SET_DIRTY) {
+
+ if (flags & UPL_COMMIT_SET_DIRTY)
m->dirty = TRUE;
- } else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+ else if (flags & UPL_COMMIT_CLEAR_DIRTY) {
m->dirty = FALSE;
+
+ if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+ m->cs_validated && !m->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * This page is no longer dirty
+ * but could have been modified,
+ * so it will need to be
+ * re-validated.
+ */
+ m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(m->phys_page);
+ }
clear_refmod |= VM_MEM_MODIFIED;
- }
- if (flags & UPL_COMMIT_INACTIVATE) {
- m->reference = FALSE;
+ }
+ if (flags & UPL_COMMIT_INACTIVATE) {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
clear_refmod |= VM_MEM_REFERENCED;
- vm_page_deactivate(m);
}
- if (clear_refmod)
- pmap_clear_refmod(m->phys_page, clear_refmod);
-
- if (flags & UPL_COMMIT_ALLOW_ACCESS) {
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
/*
* We blocked access to the pages in this UPL.
* Clear the "busy" bit and wake up any waiter
* for this page.
*/
- PAGE_WAKEUP_DONE(m);
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
+ if (m->absent) {
+ if (flags & UPL_COMMIT_FREE_ABSENT)
+ dwp->dw_mask |= DW_vm_page_free;
+ else {
+ m->absent = FALSE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ }
+ } else
+ dwp->dw_mask |= DW_vm_page_unwire;
- target_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- entry++;
- continue;
- }
- if (delayed_unlock == 0)
- vm_page_lock_queues();
- /*
- * make sure to clear the hardware
- * modify or reference bits before
- * releasing the BUSY bit on this page
- * otherwise we risk losing a legitimate
- * change of state
- */
- if (flags & UPL_COMMIT_CLEAR_DIRTY) {
+ goto commit_next_page;
+ }
+ /*
+ * make sure to clear the hardware
+ * modify or reference bits before
+ * releasing the BUSY bit on this page
+ * otherwise we risk losing a legitimate
+ * change of state
+ */
+ if (flags & UPL_COMMIT_CLEAR_DIRTY) {
m->dirty = FALSE;
- clear_refmod |= VM_MEM_MODIFIED;
- }
- if (flags & UPL_COMMIT_INACTIVATE)
- clear_refmod |= VM_MEM_REFERENCED;
- if (clear_refmod)
- pmap_clear_refmod(m->phys_page, clear_refmod);
+ if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+ m->cs_validated && !m->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * This page is no longer dirty
+ * but could have been modified,
+ * so it will need to be
+ * re-validated.
+ */
+ m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(m->phys_page);
+ }
+ clear_refmod |= VM_MEM_MODIFIED;
+ }
+ if (page_list) {
+ upl_page_info_t *p;
- if (page_list) {
p = &(page_list[entry]);
- if(p->phys_addr && p->pageout && !m->pageout) {
+
+ if (p->phys_addr && p->pageout && !m->pageout) {
m->busy = TRUE;
m->pageout = TRUE;
- vm_page_wire(m);
- } else if (page_list[entry].phys_addr &&
- !p->pageout && m->pageout &&
- !m->dump_cleaning) {
+
+ dwp->dw_mask |= DW_vm_page_wire;
+
+ } else if (p->phys_addr &&
+ !p->pageout && m->pageout &&
+ !m->dump_cleaning) {
m->pageout = FALSE;
m->absent = FALSE;
m->overwriting = FALSE;
- vm_page_unwire(m);
- PAGE_WAKEUP_DONE(m);
+
+ dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
}
page_list[entry].phys_addr = 0;
- }
- m->dump_cleaning = FALSE;
- if(m->laundry) {
- vm_pageout_throttle_up(m);
- }
- if(m->pageout) {
- m->cleaning = FALSE;
- m->pageout = FALSE;
+ }
+ m->dump_cleaning = FALSE;
+
+ if (m->laundry)
+ dwp->dw_mask |= DW_vm_pageout_throttle_up;
+
+ if (m->pageout) {
+ m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
+ m->pageout = FALSE;
#if MACH_CLUSTER_STATS
- if (m->wanted) vm_pageout_target_collisions++;
+ if (m->wanted) vm_pageout_target_collisions++;
#endif
- if (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)
- m->dirty = TRUE;
- else
- m->dirty = FALSE;
+ m->dirty = FALSE;
+
+ if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+ m->cs_validated && !m->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * This page is no longer dirty
+ * but could have been modified,
+ * so it will need to be
+ * re-validated.
+ */
+ m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(m->phys_page);
+ }
+
+ if ((flags & UPL_COMMIT_SET_DIRTY) ||
+ (m->pmapped && (pmap_disconnect(m->phys_page) & VM_MEM_MODIFIED)))
+ m->dirty = TRUE;
+
+ if (m->dirty) {
+ /*
+ * page was re-dirtied after we started
+ * the pageout... reactivate it since
+ * we don't know whether the on-disk
+ * copy matches what is now in memory
+ */
+ dwp->dw_mask |= (DW_vm_page_unwire | DW_clear_busy | DW_PAGE_WAKEUP);
- if(m->dirty) {
- vm_page_unwire(m);/* reactivates */
+ if (upl->flags & UPL_PAGEOUT) {
+ CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
+ VM_STAT_INCR(reactivations);
+ DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
+ }
+ } else {
+ /*
+ * page has been successfully cleaned
+ * go ahead and free it for other use
+ */
- if (upl->flags & UPL_PAGEOUT) {
- CLUSTER_STAT(vm_pageout_target_page_dirtied++;)
- VM_STAT(reactivations++);
- }
- PAGE_WAKEUP_DONE(m);
- } else {
- vm_page_free(m);/* clears busy, etc. */
+ if (m->object->internal) {
+ DTRACE_VM2(anonpgout, int, 1, (uint64_t *), NULL);
+ } else {
+ DTRACE_VM2(fspgout, int, 1, (uint64_t *), NULL);
+ }
+ dwp->dw_mask |= DW_vm_page_free;
- if (upl->flags & UPL_PAGEOUT) {
- CLUSTER_STAT(vm_pageout_target_page_freed++;)
-
- if (page_list[entry].dirty)
- VM_STAT(pageouts++);
- }
- }
- if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
- target_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- entry++;
- continue;
- }
+ if (upl->flags & UPL_PAGEOUT) {
+ CLUSTER_STAT(vm_pageout_target_page_freed++;)
+
+ if (page_list[entry].dirty) {
+ VM_STAT_INCR(pageouts);
+ DTRACE_VM2(pgout, int, 1, (uint64_t *), NULL);
+ pgpgout_count++;
+ }
+ }
+ }
+ goto commit_next_page;
+ }
#if MACH_CLUSTER_STATS
- m->dirty = pmap_is_modified(m->phys_page);
+ if (m->wpmapped)
+ m->dirty = pmap_is_modified(m->phys_page);
- if (m->dirty) vm_pageout_cluster_dirtied++;
- else vm_pageout_cluster_cleaned++;
- if (m->wanted) vm_pageout_cluster_collisions++;
-#else
- m->dirty = 0;
+ if (m->dirty) vm_pageout_cluster_dirtied++;
+ else vm_pageout_cluster_cleaned++;
+ if (m->wanted) vm_pageout_cluster_collisions++;
#endif
+ m->dirty = FALSE;
- if((m->busy) && (m->cleaning)) {
- /* the request_page_list case */
- if(m->absent) {
- m->absent = FALSE;
- if(shadow_object->absent_count == 1)
- vm_object_absent_release(shadow_object);
- else
- shadow_object->absent_count--;
- }
+ if (! (flags & UPL_COMMIT_CS_VALIDATED) &&
+ m->cs_validated && !m->cs_tainted) {
+ /*
+ * CODE SIGNING:
+ * This page is no longer dirty
+ * but could have been modified,
+ * so it will need to be
+ * re-validated.
+ */
+ m->cs_validated = FALSE;
+#if DEVELOPMENT || DEBUG
+ vm_cs_validated_resets++;
+#endif
+ pmap_disconnect(m->phys_page);
+ }
+
+ if ((m->busy) && (m->cleaning)) {
+ /*
+ * the request_page_list case
+ */
+ m->absent = FALSE;
+ m->overwriting = FALSE;
+
+ dwp->dw_mask |= DW_clear_busy;
+
+ } else if (m->overwriting) {
+ /*
+ * alternate request page list, write to
+ * page_list case. Occurs when the original
+ * page was wired at the time of the list
+ * request
+ */
+ assert(VM_PAGE_WIRED(m));
m->overwriting = FALSE;
- m->busy = FALSE;
- m->dirty = FALSE;
- } else if (m->overwriting) {
- /* alternate request page list, write to
- * page_list case. Occurs when the original
- * page was wired at the time of the list
- * request */
- assert(m->wire_count != 0);
- vm_page_unwire(m);/* reactivates */
- m->overwriting = FALSE;
- }
- m->cleaning = FALSE;
- /* It is a part of the semantic of COPYOUT_FROM */
- /* UPLs that a commit implies cache sync */
- /* between the vm page and the backing store */
- /* this can be used to strip the precious bit */
- /* as well as clean */
- if (upl->flags & UPL_PAGE_SYNC_DONE)
- m->precious = FALSE;
+ dwp->dw_mask |= DW_vm_page_unwire; /* reactivates */
+ }
+ m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
+
+ /*
+ * It is a part of the semantic of COPYOUT_FROM
+ * UPLs that a commit implies cache sync
+ * between the vm page and the backing store
+ * this can be used to strip the precious bit
+ * as well as clean
+ */
+ if ((upl->flags & UPL_PAGE_SYNC_DONE) || (flags & UPL_COMMIT_CLEAR_PRECIOUS))
+ m->precious = FALSE;
- if (flags & UPL_COMMIT_SET_DIRTY)
+ if (flags & UPL_COMMIT_SET_DIRTY)
m->dirty = TRUE;
- if (flags & UPL_COMMIT_INACTIVATE) {
- m->reference = FALSE;
- vm_page_deactivate(m);
- } else if (!m->active && !m->inactive) {
- if (m->reference)
- vm_page_activate(m);
- else
- vm_page_deactivate(m);
- }
+ if ((flags & UPL_COMMIT_INACTIVATE) && !m->clustered && !m->speculative) {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ clear_refmod |= VM_MEM_REFERENCED;
- if (flags & UPL_COMMIT_ALLOW_ACCESS) {
- /*
- * We blocked access to the pages in this URL.
- * Clear the "busy" bit on this page before we
- * wake up any waiter.
- */
- m->busy = FALSE;
- }
+ } else if (!m->active && !m->inactive && !m->speculative) {
+
+ if (m->clustered || (flags & UPL_COMMIT_SPECULATE))
+ dwp->dw_mask |= DW_vm_page_speculate;
+ else if (m->reference)
+ dwp->dw_mask |= DW_vm_page_activate;
+ else {
+ dwp->dw_mask |= DW_vm_page_deactivate_internal;
+ clear_refmod |= VM_MEM_REFERENCED;
+ }
+ }
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ /*
+ * We blocked access to the pages in this URL.
+ * Clear the "busy" bit on this page before we
+ * wake up any waiter.
+ */
+ dwp->dw_mask |= DW_clear_busy;
+ }
+ /*
+ * Wakeup any thread waiting for the page to be un-cleaning.
+ */
+ dwp->dw_mask |= DW_PAGE_WAKEUP;
+
+commit_next_page:
+ if (clear_refmod)
+ pmap_clear_refmod(m->phys_page, clear_refmod);
+
+ target_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ entry++;
- /*
- * Wakeup any thread waiting for the page to be un-cleaning.
- */
- PAGE_WAKEUP(m);
+ if (dwp->dw_mask) {
+ if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+ if (m->busy == FALSE) {
+ /*
+ * dw_do_work may need to drop the object lock
+ * if it does, we need the pages it's looking at to
+ * be held stable via the busy bit.
+ */
+ m->busy = TRUE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ }
+ dwp->dw_m = m;
+ dwp++;
+ dw_count++;
+
+ if (dw_count >= DELAYED_WORK_LIMIT) {
+ dw_do_work(shadow_object, &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
+ } else {
+ if (dwp->dw_mask & DW_clear_busy)
+ m->busy = FALSE;
- if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
+ if (dwp->dw_mask & DW_PAGE_WAKEUP)
+ PAGE_WAKEUP(m);
+ }
}
- target_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- entry++;
}
- if (delayed_unlock)
- vm_page_unlock_queues();
+ if (dw_count)
+ dw_do_work(shadow_object, &dw_array[0], dw_count);
occupied = 1;
} else if (upl->flags & UPL_LITE) {
int pg_num;
int i;
+
pg_num = upl->size/PAGE_SIZE;
pg_num = (pg_num + 31) >> 5;
occupied = 0;
- for(i= 0; i<pg_num; i++) {
- if(lite_list[i] != 0) {
+
+ for (i = 0; i < pg_num; i++) {
+ if (lite_list[i] != 0) {
occupied = 1;
break;
}
}
} else {
- if(queue_empty(&upl->map_object->memq)) {
+ if (queue_empty(&upl->map_object->memq))
occupied = 0;
- }
}
-
- if(occupied == 0) {
- if(upl->flags & UPL_COMMIT_NOTIFY_EMPTY) {
+ if (occupied == 0) {
+ /*
+ * If this UPL element belongs to a Vector UPL and is
+ * empty, then this is the right function to deallocate
+ * it. So go ahead set the *empty variable. The flag
+ * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+ * should be considered relevant for the Vector UPL and not
+ * the internal UPLs.
+ */
+ if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
*empty = TRUE;
+
+ if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+ /*
+ * this is not a paging object
+ * so we need to drop the paging reference
+ * that was taken when we created the UPL
+ * against this object
+ */
+ vm_object_activity_end(shadow_object);
+ } else {
+ /*
+ * we dontated the paging reference to
+ * the map object... vm_pageout_object_terminate
+ * will drop this reference
+ */
}
- if(object == shadow_object)
- vm_object_paging_end(shadow_object);
}
vm_object_unlock(shadow_object);
if (object != shadow_object)
vm_object_unlock(object);
- upl_unlock(upl);
+
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ /*
+ * If we completed our operations on an UPL that is
+ * part of a Vectored UPL and if empty is TRUE, then
+ * we should go ahead and deallocate this UPL element.
+ * Then we check if this was the last of the UPL elements
+ * within that Vectored UPL. If so, set empty to TRUE
+ * so that in ubc_upl_commit_range or ubc_upl_commit, we
+ * can go ahead and deallocate the Vector UPL too.
+ */
+ if(*empty==TRUE) {
+ *empty = vector_upl_set_subupl(vector_upl, upl, 0);
+ upl_deallocate(upl);
+ }
+ goto process_upl_to_commit;
+ }
+
+ if (pgpgout_count) {
+ DTRACE_VM2(pgpgout, int, pgpgout_count, (uint64_t *), NULL);
+ }
return KERN_SUCCESS;
}
int error,
boolean_t *empty)
{
- upl_size_t xfer_size = size;
+ upl_size_t xfer_size, subupl_size = size;
vm_object_t shadow_object;
- vm_object_t object = upl->map_object;
+ vm_object_t object;
vm_object_offset_t target_offset;
+ upl_offset_t subupl_offset = offset;
int entry;
wpl_array_t lite_list;
int occupied;
- boolean_t shadow_internal;
+ struct dw dw_array[DELAYED_WORK_LIMIT];
+ struct dw *dwp;
+ int dw_count, isVectorUPL = 0;
+ upl_t vector_upl = NULL;
*empty = FALSE;
if (upl == UPL_NULL)
return KERN_INVALID_ARGUMENT;
- if (upl->flags & UPL_IO_WIRE) {
- return upl_commit_range(upl,
- offset, size, 0,
- NULL, 0, empty);
- }
+ if ( (upl->flags & UPL_IO_WIRE) && !(error & UPL_ABORT_DUMP_PAGES) )
+ return upl_commit_range(upl, offset, size, UPL_COMMIT_FREE_ABSENT, NULL, 0, empty);
- if(object->pageout) {
- shadow_object = object->shadow;
- } else {
- shadow_object = object;
+ if((isVectorUPL = vector_upl_is_valid(upl))) {
+ vector_upl = upl;
+ upl_lock(vector_upl);
+ }
+ else
+ upl_lock(upl);
+
+process_upl_to_abort:
+ if(isVectorUPL) {
+ size = subupl_size;
+ offset = subupl_offset;
+ if(size == 0) {
+ upl_unlock(vector_upl);
+ return KERN_SUCCESS;
+ }
+ upl = vector_upl_subupl_byoffset(vector_upl, &offset, &size);
+ if(upl == NULL) {
+ upl_unlock(vector_upl);
+ return KERN_FAILURE;
+ }
+ subupl_size -= size;
+ subupl_offset += size;
}
- upl_lock(upl);
- if(upl->flags & UPL_DEVICE_MEMORY) {
+ *empty = FALSE;
+
+#if UPL_DEBUG
+ if (upl->upl_commit_index < UPL_DEBUG_COMMIT_RECORDS) {
+ (void) OSBacktrace(&upl->upl_commit_records[upl->upl_commit_index].c_retaddr[0], UPL_DEBUG_STACK_FRAMES);
+
+ upl->upl_commit_records[upl->upl_commit_index].c_beg = offset;
+ upl->upl_commit_records[upl->upl_commit_index].c_end = (offset + size);
+ upl->upl_commit_records[upl->upl_commit_index].c_aborted = 1;
+
+ upl->upl_commit_index++;
+ }
+#endif
+ if (upl->flags & UPL_DEVICE_MEMORY)
xfer_size = 0;
- } else if ((offset + size) > upl->size) {
- upl_unlock(upl);
+ else if ((offset + size) <= upl->size)
+ xfer_size = size;
+ else {
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ upl_unlock(vector_upl);
+ }
+
return KERN_FAILURE;
}
- if (object != shadow_object)
- vm_object_lock(object);
- vm_object_lock(shadow_object);
-
- shadow_internal = shadow_object->internal;
-
- if(upl->flags & UPL_INTERNAL) {
+ if (upl->flags & UPL_INTERNAL) {
lite_list = (wpl_array_t)
((((uintptr_t)upl) + sizeof(struct upl))
+ ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
lite_list = (wpl_array_t)
(((uintptr_t)upl) + sizeof(struct upl));
}
+ object = upl->map_object;
+
+ if (upl->flags & UPL_SHADOWED) {
+ vm_object_lock(object);
+ shadow_object = object->shadow;
+ } else
+ shadow_object = object;
entry = offset/PAGE_SIZE;
target_offset = (vm_object_offset_t)offset;
- while(xfer_size) {
- vm_page_t t,m;
-
- m = VM_PAGE_NULL;
- if(upl->flags & UPL_LITE) {
- int pg_num;
- pg_num = target_offset/PAGE_SIZE;
- if(lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
- lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
- m = vm_page_lookup(shadow_object,
- target_offset + (upl->offset -
- shadow_object->paging_offset));
- }
- }
- if(object->pageout) {
- if ((t = vm_page_lookup(object, target_offset))
- != NULL) {
- t->pageout = FALSE;
- VM_PAGE_FREE(t);
- if(m == NULL) {
- m = vm_page_lookup(
- shadow_object,
- target_offset +
- object->shadow_offset);
- }
- if(m != VM_PAGE_NULL)
- vm_object_paging_end(m->object);
- }
- }
- if(m != VM_PAGE_NULL) {
- vm_page_lock_queues();
- if(m->absent) {
- boolean_t must_free = TRUE;
-
- /* COPYOUT = FALSE case */
- /* check for error conditions which must */
- /* be passed back to the pages customer */
- if(error & UPL_ABORT_RESTART) {
- m->restart = TRUE;
- m->absent = FALSE;
- vm_object_absent_release(m->object);
- m->page_error = KERN_MEMORY_ERROR;
- m->error = TRUE;
- must_free = FALSE;
- } else if(error & UPL_ABORT_UNAVAILABLE) {
- m->restart = FALSE;
- m->unusual = TRUE;
- must_free = FALSE;
- } else if(error & UPL_ABORT_ERROR) {
- m->restart = FALSE;
- m->absent = FALSE;
- vm_object_absent_release(m->object);
- m->page_error = KERN_MEMORY_ERROR;
- m->error = TRUE;
- must_free = FALSE;
- }
-
- /*
- * ENCRYPTED SWAP:
- * If the page was already encrypted,
- * we don't really need to decrypt it
- * now. It will get decrypted later,
- * on demand, as soon as someone needs
- * to access its contents.
- */
-
- m->cleaning = FALSE;
- m->overwriting = FALSE;
- PAGE_WAKEUP_DONE(m);
-
- if (must_free == TRUE) {
- vm_page_free(m);
- } else {
- vm_page_activate(m);
- }
- vm_page_unlock_queues();
-
- target_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- entry++;
- continue;
- }
- /*
- * Handle the trusted pager throttle.
- */
- if (m->laundry) {
- vm_pageout_throttle_up(m);
- }
- if(m->pageout) {
- assert(m->busy);
- assert(m->wire_count == 1);
- m->pageout = FALSE;
- vm_page_unwire(m);
- }
- m->dump_cleaning = FALSE;
- m->cleaning = FALSE;
- m->overwriting = FALSE;
-#if MACH_PAGEMAP
- vm_external_state_clr(
- m->object->existence_map, m->offset);
-#endif /* MACH_PAGEMAP */
- if(error & UPL_ABORT_DUMP_PAGES) {
- vm_page_free(m);
- pmap_disconnect(m->phys_page);
- } else {
- PAGE_WAKEUP_DONE(m);
- }
- vm_page_unlock_queues();
- }
- target_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
- entry++;
- }
- occupied = 1;
- if (upl->flags & UPL_DEVICE_MEMORY) {
- occupied = 0;
- } else if (upl->flags & UPL_LITE) {
- int pg_num;
- int i;
- pg_num = upl->size/PAGE_SIZE;
- pg_num = (pg_num + 31) >> 5;
- occupied = 0;
- for(i= 0; i<pg_num; i++) {
- if(lite_list[i] != 0) {
- occupied = 1;
- break;
- }
- }
- } else {
- if(queue_empty(&upl->map_object->memq)) {
- occupied = 0;
- }
- }
-
- if(occupied == 0) {
- if(upl->flags & UPL_COMMIT_NOTIFY_EMPTY) {
- *empty = TRUE;
- }
- if(object == shadow_object)
- vm_object_paging_end(shadow_object);
- }
- vm_object_unlock(shadow_object);
- if (object != shadow_object)
- vm_object_unlock(object);
-
- upl_unlock(upl);
-
- return KERN_SUCCESS;
-}
-
-kern_return_t
-upl_abort(
- upl_t upl,
- int error)
-{
- vm_object_t object = NULL;
- vm_object_t shadow_object = NULL;
- vm_object_offset_t offset;
- vm_object_offset_t shadow_offset;
- vm_object_offset_t target_offset;
- upl_size_t i;
- wpl_array_t lite_list;
- vm_page_t t,m;
- int occupied;
- boolean_t shadow_internal;
- if (upl == UPL_NULL)
- return KERN_INVALID_ARGUMENT;
+ if (upl->flags & UPL_KERNEL_OBJECT)
+ vm_object_lock_shared(shadow_object);
+ else
+ vm_object_lock(shadow_object);
- if (upl->flags & UPL_IO_WIRE) {
- boolean_t empty;
- return upl_commit_range(upl,
- 0, upl->size, 0,
- NULL, 0, &empty);
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ assert(shadow_object->blocked_access);
+ shadow_object->blocked_access = FALSE;
+ vm_object_wakeup(object, VM_OBJECT_EVENT_UNBLOCKED);
}
- upl_lock(upl);
- if(upl->flags & UPL_DEVICE_MEMORY) {
- upl_unlock(upl);
- return KERN_SUCCESS;
- }
+ dwp = &dw_array[0];
+ dw_count = 0;
- object = upl->map_object;
+ if ((error & UPL_ABORT_DUMP_PAGES) && (upl->flags & UPL_KERNEL_OBJECT))
+ panic("upl_abort_range: kernel_object being DUMPED");
- if (object == NULL) {
- panic("upl_abort: upl object is not backed by an object");
- upl_unlock(upl);
- return KERN_INVALID_ARGUMENT;
- }
+ while (xfer_size) {
+ vm_page_t t, m;
- if(object->pageout) {
- shadow_object = object->shadow;
- shadow_offset = object->shadow_offset;
- } else {
- shadow_object = object;
- shadow_offset = upl->offset - object->paging_offset;
- }
+ dwp->dw_mask = 0;
- if(upl->flags & UPL_INTERNAL) {
- lite_list = (wpl_array_t)
- ((((uintptr_t)upl) + sizeof(struct upl))
- + ((upl->size/PAGE_SIZE) * sizeof(upl_page_info_t)));
- } else {
- lite_list = (wpl_array_t)
- (((uintptr_t)upl) + sizeof(struct upl));
- }
- offset = 0;
+ m = VM_PAGE_NULL;
- if (object != shadow_object)
- vm_object_lock(object);
- vm_object_lock(shadow_object);
+ if (upl->flags & UPL_LITE) {
+ unsigned int pg_num;
- shadow_internal = shadow_object->internal;
+ pg_num = (unsigned int) (target_offset/PAGE_SIZE);
+ assert(pg_num == target_offset/PAGE_SIZE);
+
- for(i = 0; i<(upl->size); i+=PAGE_SIZE, offset += PAGE_SIZE_64) {
- m = VM_PAGE_NULL;
- target_offset = offset + shadow_offset;
- if(upl->flags & UPL_LITE) {
- int pg_num;
- pg_num = offset/PAGE_SIZE;
- if(lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
+ if (lite_list[pg_num>>5] & (1 << (pg_num & 31))) {
lite_list[pg_num>>5] &= ~(1 << (pg_num & 31));
- m = vm_page_lookup(
- shadow_object, target_offset);
+
+ if ( !(upl->flags & UPL_KERNEL_OBJECT))
+ m = vm_page_lookup(shadow_object, target_offset +
+ (upl->offset - shadow_object->paging_offset));
}
}
- if(object->pageout) {
- if ((t = vm_page_lookup(object, offset)) != NULL) {
- t->pageout = FALSE;
+ if (upl->flags & UPL_SHADOWED) {
+ if ((t = vm_page_lookup(object, target_offset)) != VM_PAGE_NULL) {
+ t->pageout = FALSE;
+
VM_PAGE_FREE(t);
- if(m == NULL) {
- m = vm_page_lookup(
- shadow_object, target_offset);
- }
- if(m != VM_PAGE_NULL)
- vm_object_paging_end(m->object);
+
+ if (m == VM_PAGE_NULL)
+ m = vm_page_lookup(shadow_object, target_offset + object->shadow_offset);
}
}
- if(m != VM_PAGE_NULL) {
- vm_page_lock_queues();
- if(m->absent) {
+ if ((upl->flags & UPL_KERNEL_OBJECT))
+ goto abort_next_page;
+
+ if (m != VM_PAGE_NULL) {
+
+ if (m->absent) {
boolean_t must_free = TRUE;
- /* COPYOUT = FALSE case */
- /* check for error conditions which must */
- /* be passed back to the pages customer */
- if(error & UPL_ABORT_RESTART) {
+ m->clustered = FALSE;
+ /*
+ * COPYOUT = FALSE case
+ * check for error conditions which must
+ * be passed back to the pages customer
+ */
+ if (error & UPL_ABORT_RESTART) {
m->restart = TRUE;
m->absent = FALSE;
- vm_object_absent_release(m->object);
- m->page_error = KERN_MEMORY_ERROR;
- m->error = TRUE;
+ m->unusual = TRUE;
must_free = FALSE;
- } else if(error & UPL_ABORT_UNAVAILABLE) {
+ } else if (error & UPL_ABORT_UNAVAILABLE) {
m->restart = FALSE;
m->unusual = TRUE;
must_free = FALSE;
- } else if(error & UPL_ABORT_ERROR) {
+ } else if (error & UPL_ABORT_ERROR) {
m->restart = FALSE;
m->absent = FALSE;
- vm_object_absent_release(m->object);
- m->page_error = KERN_MEMORY_ERROR;
m->error = TRUE;
+ m->unusual = TRUE;
must_free = FALSE;
}
*/
m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
m->overwriting = FALSE;
- PAGE_WAKEUP_DONE(m);
- if (must_free == TRUE) {
- vm_page_free(m);
- } else {
- vm_page_activate(m);
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+
+ if (must_free == TRUE)
+ dwp->dw_mask |= DW_vm_page_free;
+ else
+ dwp->dw_mask |= DW_vm_page_activate;
+ } else {
+ /*
+ * Handle the trusted pager throttle.
+ */
+ if (m->laundry)
+ dwp->dw_mask |= DW_vm_pageout_throttle_up;
+
+ if (m->pageout) {
+ assert(m->busy);
+ assert(m->wire_count == 1);
+ m->pageout = FALSE;
+
+ dwp->dw_mask |= DW_vm_page_unwire;
}
- vm_page_unlock_queues();
- continue;
- }
- /*
- * Handle the trusted pager throttle.
- */
- if (m->laundry) {
- vm_pageout_throttle_up(m);
- }
- if(m->pageout) {
- assert(m->busy);
- assert(m->wire_count == 1);
- m->pageout = FALSE;
- vm_page_unwire(m);
- }
- m->dump_cleaning = FALSE;
- m->cleaning = FALSE;
- m->overwriting = FALSE;
+ m->dump_cleaning = FALSE;
+ m->cleaning = FALSE;
+ m->encrypted_cleaning = FALSE;
+ m->overwriting = FALSE;
#if MACH_PAGEMAP
- vm_external_state_clr(
- m->object->existence_map, m->offset);
+ vm_external_state_clr(m->object->existence_map, m->offset);
#endif /* MACH_PAGEMAP */
- if(error & UPL_ABORT_DUMP_PAGES) {
- vm_page_free(m);
- pmap_disconnect(m->phys_page);
+ if (error & UPL_ABORT_DUMP_PAGES) {
+ pmap_disconnect(m->phys_page);
+
+ dwp->dw_mask |= DW_vm_page_free;
+ } else {
+ if (error & UPL_ABORT_REFERENCE) {
+ /*
+ * we've been told to explictly
+ * reference this page... for
+ * file I/O, this is done by
+ * implementing an LRU on the inactive q
+ */
+ dwp->dw_mask |= DW_vm_page_lru;
+ }
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ }
+ }
+ }
+abort_next_page:
+ target_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
+ entry++;
+
+ if (dwp->dw_mask) {
+ if (dwp->dw_mask & ~(DW_clear_busy | DW_PAGE_WAKEUP)) {
+ if (m->busy == FALSE) {
+ /*
+ * dw_do_work may need to drop the object lock
+ * if it does, we need the pages it's looking at to
+ * be held stable via the busy bit.
+ */
+ m->busy = TRUE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
+ }
+ dwp->dw_m = m;
+ dwp++;
+ dw_count++;
+
+ if (dw_count >= DELAYED_WORK_LIMIT) {
+ dw_do_work(shadow_object, &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+ }
} else {
- PAGE_WAKEUP_DONE(m);
+ if (dwp->dw_mask & DW_clear_busy)
+ m->busy = FALSE;
+
+ if (dwp->dw_mask & DW_PAGE_WAKEUP)
+ PAGE_WAKEUP(m);
}
- vm_page_unlock_queues();
}
}
+ if (dw_count)
+ dw_do_work(shadow_object, &dw_array[0], dw_count);
+
occupied = 1;
+
if (upl->flags & UPL_DEVICE_MEMORY) {
occupied = 0;
} else if (upl->flags & UPL_LITE) {
int pg_num;
- int j;
+ int i;
+
pg_num = upl->size/PAGE_SIZE;
pg_num = (pg_num + 31) >> 5;
occupied = 0;
- for(j= 0; j<pg_num; j++) {
- if(lite_list[j] != 0) {
+
+ for (i = 0; i < pg_num; i++) {
+ if (lite_list[i] != 0) {
occupied = 1;
break;
}
}
} else {
- if(queue_empty(&upl->map_object->memq)) {
+ if (queue_empty(&upl->map_object->memq))
occupied = 0;
- }
}
+ if (occupied == 0) {
+ /*
+ * If this UPL element belongs to a Vector UPL and is
+ * empty, then this is the right function to deallocate
+ * it. So go ahead set the *empty variable. The flag
+ * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
+ * should be considered relevant for the Vector UPL and
+ * not the internal UPLs.
+ */
+ if ((upl->flags & UPL_COMMIT_NOTIFY_EMPTY) || isVectorUPL)
+ *empty = TRUE;
- if(occupied == 0) {
- if(object == shadow_object)
- vm_object_paging_end(shadow_object);
+ if (object == shadow_object && !(upl->flags & UPL_KERNEL_OBJECT)) {
+ /*
+ * this is not a paging object
+ * so we need to drop the paging reference
+ * that was taken when we created the UPL
+ * against this object
+ */
+ vm_object_activity_end(shadow_object);
+ } else {
+ /*
+ * we dontated the paging reference to
+ * the map object... vm_pageout_object_terminate
+ * will drop this reference
+ */
+ }
}
vm_object_unlock(shadow_object);
if (object != shadow_object)
vm_object_unlock(object);
+
+ if(!isVectorUPL)
+ upl_unlock(upl);
+ else {
+ /*
+ * If we completed our operations on an UPL that is
+ * part of a Vectored UPL and if empty is TRUE, then
+ * we should go ahead and deallocate this UPL element.
+ * Then we check if this was the last of the UPL elements
+ * within that Vectored UPL. If so, set empty to TRUE
+ * so that in ubc_upl_abort_range or ubc_upl_abort, we
+ * can go ahead and deallocate the Vector UPL too.
+ */
+ if(*empty == TRUE) {
+ *empty = vector_upl_set_subupl(vector_upl, upl,0);
+ upl_deallocate(upl);
+ }
+ goto process_upl_to_abort;
+ }
- upl_unlock(upl);
return KERN_SUCCESS;
}
+
+kern_return_t
+upl_abort(
+ upl_t upl,
+ int error)
+{
+ boolean_t empty;
+
+ return upl_abort_range(upl, 0, upl->size, error, &empty);
+}
+
+
/* an option on commit should be wire */
kern_return_t
upl_commit(
upl_page_info_t *page_list,
mach_msg_type_number_t count)
{
- if (upl == UPL_NULL)
- return KERN_INVALID_ARGUMENT;
-
- if(upl->flags & (UPL_LITE | UPL_IO_WIRE)) {
- boolean_t empty;
- return upl_commit_range(upl, 0, upl->size, 0,
- page_list, count, &empty);
- }
-
- if (count == 0)
- page_list = NULL;
-
- upl_lock(upl);
- if (upl->flags & UPL_DEVICE_MEMORY)
- page_list = NULL;
-
- if (upl->flags & UPL_ENCRYPTED) {
- /*
- * ENCRYPTED SWAP:
- * This UPL was encrypted, but we don't need
- * to decrypt here. We'll decrypt each page
- * later, on demand, as soon as someone needs
- * to access the page's contents.
- */
- }
-
- if ((upl->flags & UPL_CLEAR_DIRTY) ||
- (upl->flags & UPL_PAGE_SYNC_DONE) || page_list) {
- vm_object_t shadow_object = upl->map_object->shadow;
- vm_object_t object = upl->map_object;
- vm_object_offset_t target_offset;
- upl_size_t xfer_end;
- int entry;
-
- vm_page_t t, m;
- upl_page_info_t *p;
-
- if (object != shadow_object)
- vm_object_lock(object);
- vm_object_lock(shadow_object);
-
- entry = 0;
- target_offset = object->shadow_offset;
- xfer_end = upl->size + object->shadow_offset;
-
- while(target_offset < xfer_end) {
+ boolean_t empty;
- if ((t = vm_page_lookup(object,
- target_offset - object->shadow_offset))
- == NULL) {
- target_offset += PAGE_SIZE_64;
- entry++;
- continue;
- }
-
- m = vm_page_lookup(shadow_object, target_offset);
- if(m != VM_PAGE_NULL) {
- /*
- * ENCRYPTED SWAP:
- * If this page was encrypted, we
- * don't need to decrypt it here.
- * We'll decrypt it later, on demand,
- * as soon as someone needs to access
- * its contents.
- */
-
- if (upl->flags & UPL_CLEAR_DIRTY) {
- pmap_clear_modify(m->phys_page);
- m->dirty = FALSE;
- }
- /* It is a part of the semantic of */
- /* COPYOUT_FROM UPLs that a commit */
- /* implies cache sync between the */
- /* vm page and the backing store */
- /* this can be used to strip the */
- /* precious bit as well as clean */
- if (upl->flags & UPL_PAGE_SYNC_DONE)
- m->precious = FALSE;
-
- if(page_list) {
- p = &(page_list[entry]);
- if(page_list[entry].phys_addr &&
- p->pageout && !m->pageout) {
- vm_page_lock_queues();
- m->busy = TRUE;
- m->pageout = TRUE;
- vm_page_wire(m);
- vm_page_unlock_queues();
- } else if (page_list[entry].phys_addr &&
- !p->pageout && m->pageout &&
- !m->dump_cleaning) {
- vm_page_lock_queues();
- m->pageout = FALSE;
- m->absent = FALSE;
- m->overwriting = FALSE;
- vm_page_unwire(m);
- PAGE_WAKEUP_DONE(m);
- vm_page_unlock_queues();
- }
- page_list[entry].phys_addr = 0;
- }
- }
- target_offset += PAGE_SIZE_64;
- entry++;
- }
- vm_object_unlock(shadow_object);
- if (object != shadow_object)
- vm_object_unlock(object);
-
- }
- if (upl->flags & UPL_DEVICE_MEMORY) {
- vm_object_lock(upl->map_object->shadow);
- if(upl->map_object == upl->map_object->shadow)
- vm_object_paging_end(upl->map_object->shadow);
- vm_object_unlock(upl->map_object->shadow);
- }
- upl_unlock(upl);
- return KERN_SUCCESS;
+ return upl_commit_range(upl, 0, upl->size, 0, page_list, count, &empty);
}
+unsigned int vm_object_iopl_request_sleep_for_cleaning = 0;
kern_return_t
vm_object_iopl_request(
int cntrl_flags)
{
vm_page_t dst_page;
- vm_object_offset_t dst_offset = offset;
- upl_size_t xfer_size = size;
+ vm_object_offset_t dst_offset;
+ upl_size_t xfer_size;
upl_t upl = NULL;
unsigned int entry;
wpl_array_t lite_list = NULL;
- int page_field_size;
- int delayed_unlock = 0;
int no_zero_fill = FALSE;
- vm_page_t alias_page = NULL;
+ u_int32_t psize;
kern_return_t ret;
vm_prot_t prot;
-
+ struct vm_object_fault_info fault_info;
+ struct dw dw_array[DELAYED_WORK_LIMIT];
+ struct dw *dwp;
+ int dw_count;
+ int dw_index;
if (cntrl_flags & ~UPL_VALID_FLAGS) {
/*
*/
return KERN_INVALID_VALUE;
}
- if (vm_lopage_poolsize == 0)
+ if (vm_lopage_needed == FALSE)
cntrl_flags &= ~UPL_NEED_32BIT_ADDR;
if (cntrl_flags & UPL_NEED_32BIT_ADDR) {
if (object->phys_contiguous) {
if ((offset + object->shadow_offset) >= (vm_object_offset_t)max_valid_dma_address)
return KERN_INVALID_ADDRESS;
-
- if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
+
+ if (((offset + object->shadow_offset) + size) >= (vm_object_offset_t)max_valid_dma_address)
return KERN_INVALID_ADDRESS;
}
}
*/
assert(! (cntrl_flags & UPL_ENCRYPT));
}
-
if (cntrl_flags & UPL_NOZEROFILL)
no_zero_fill = TRUE;
else
prot = VM_PROT_READ | VM_PROT_WRITE;
- if(((size/page_size) > MAX_UPL_TRANSFER) && !object->phys_contiguous) {
- size = MAX_UPL_TRANSFER * page_size;
+ if (((size/PAGE_SIZE) > MAX_UPL_SIZE) && !object->phys_contiguous)
+ size = MAX_UPL_SIZE * PAGE_SIZE;
+
+ if (cntrl_flags & UPL_SET_INTERNAL) {
+ if (page_list_count != NULL)
+ *page_list_count = MAX_UPL_SIZE;
}
+ if (((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
+ ((page_list_count != NULL) && (*page_list_count != 0) && *page_list_count < (size/page_size)))
+ return KERN_INVALID_ARGUMENT;
- if(cntrl_flags & UPL_SET_INTERNAL)
- if(page_list_count != NULL)
- *page_list_count = MAX_UPL_TRANSFER;
- if(((cntrl_flags & UPL_SET_INTERNAL) && !(object->phys_contiguous)) &&
- ((page_list_count != NULL) && (*page_list_count != 0)
- && *page_list_count < (size/page_size)))
- return KERN_INVALID_ARGUMENT;
+ if ((!object->internal) && (object->paging_offset != 0))
+ panic("vm_object_iopl_request: external object with non-zero paging offset\n");
- if((!object->internal) && (object->paging_offset != 0))
- panic("vm_object_upl_request: external object with non-zero paging offset\n");
- if(object->phys_contiguous) {
- /* No paging operations are possible against this memory */
- /* and so no need for map object, ever */
- cntrl_flags |= UPL_SET_LITE;
+ if (object->phys_contiguous)
+ psize = PAGE_SIZE;
+ else
+ psize = size;
+
+ if (cntrl_flags & UPL_SET_INTERNAL) {
+ upl = upl_create(UPL_CREATE_INTERNAL | UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+
+ user_page_list = (upl_page_info_t *) (((uintptr_t)upl) + sizeof(struct upl));
+ lite_list = (wpl_array_t) (((uintptr_t)user_page_list) +
+ ((psize / PAGE_SIZE) * sizeof(upl_page_info_t)));
+ if (size == 0) {
+ user_page_list = NULL;
+ lite_list = NULL;
+ }
+ } else {
+ upl = upl_create(UPL_CREATE_LITE, UPL_IO_WIRE, psize);
+
+ lite_list = (wpl_array_t) (((uintptr_t)upl) + sizeof(struct upl));
+ if (size == 0) {
+ lite_list = NULL;
+ }
}
+ if (user_page_list)
+ user_page_list[0].device = FALSE;
+ *upl_ptr = upl;
- if(upl_ptr) {
- if(cntrl_flags & UPL_SET_INTERNAL) {
- if(cntrl_flags & UPL_SET_LITE) {
- upl = upl_create(
- UPL_CREATE_INTERNAL | UPL_CREATE_LITE,
- size);
- user_page_list = (upl_page_info_t *)
- (((uintptr_t)upl) + sizeof(struct upl));
- lite_list = (wpl_array_t)
- (((uintptr_t)user_page_list) +
- ((size/PAGE_SIZE) *
- sizeof(upl_page_info_t)));
- page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
- page_field_size =
- (page_field_size + 3) & 0xFFFFFFFC;
- bzero((char *)lite_list, page_field_size);
- upl->flags =
- UPL_LITE | UPL_INTERNAL | UPL_IO_WIRE;
- } else {
- upl = upl_create(UPL_CREATE_INTERNAL, size);
- user_page_list = (upl_page_info_t *)
- (((uintptr_t)upl)
- + sizeof(struct upl));
- upl->flags = UPL_INTERNAL | UPL_IO_WIRE;
- }
- } else {
- if(cntrl_flags & UPL_SET_LITE) {
- upl = upl_create(UPL_CREATE_LITE, size);
- lite_list = (wpl_array_t)
- (((uintptr_t)upl) + sizeof(struct upl));
- page_field_size = ((size/PAGE_SIZE) + 7) >> 3;
- page_field_size =
- (page_field_size + 3) & 0xFFFFFFFC;
- bzero((char *)lite_list, page_field_size);
- upl->flags = UPL_LITE | UPL_IO_WIRE;
- } else {
- upl = upl_create(UPL_CREATE_EXTERNAL, size);
- upl->flags = UPL_IO_WIRE;
- }
+ upl->map_object = object;
+ upl->size = size;
+
+ if (object == kernel_object &&
+ !(cntrl_flags & (UPL_NEED_32BIT_ADDR | UPL_BLOCK_ACCESS))) {
+ upl->flags |= UPL_KERNEL_OBJECT;
+#if UPL_DEBUG
+ vm_object_lock(object);
+#else
+ vm_object_lock_shared(object);
+#endif
+ } else {
+ vm_object_lock(object);
+ vm_object_activity_begin(object);
+ }
+ /*
+ * paging in progress also protects the paging_offset
+ */
+ upl->offset = offset + object->paging_offset;
+
+ if (cntrl_flags & UPL_BLOCK_ACCESS) {
+ /*
+ * The user requested that access to the pages in this URL
+ * be blocked until the UPL is commited or aborted.
+ */
+ upl->flags |= UPL_ACCESS_BLOCKED;
+ }
+
+ if (object->phys_contiguous) {
+#if UPL_DEBUG
+ queue_enter(&object->uplq, upl, upl_t, uplq);
+#endif /* UPL_DEBUG */
+
+ if (upl->flags & UPL_ACCESS_BLOCKED) {
+ assert(!object->blocked_access);
+ object->blocked_access = TRUE;
}
- if(object->phys_contiguous) {
- upl->map_object = object;
- /* don't need any shadow mappings for this one */
- /* since it is already I/O memory */
- upl->flags |= UPL_DEVICE_MEMORY;
+ vm_object_unlock(object);
- vm_object_lock(object);
- vm_object_paging_begin(object);
- vm_object_unlock(object);
+ /*
+ * don't need any shadow mappings for this one
+ * since it is already I/O memory
+ */
+ upl->flags |= UPL_DEVICE_MEMORY;
- /* paging in progress also protects the paging_offset */
- upl->offset = offset + object->paging_offset;
- upl->size = size;
- *upl_ptr = upl;
- if(user_page_list) {
- user_page_list[0].phys_addr =
- (offset + object->shadow_offset)>>PAGE_SHIFT;
- user_page_list[0].device = TRUE;
- }
- upl->highest_page = (offset + object->shadow_offset + size - 1)>>PAGE_SHIFT;
+ upl->highest_page = (ppnum_t) ((offset + object->shadow_offset + size - 1)>>PAGE_SHIFT);
- if(page_list_count != NULL) {
- if (upl->flags & UPL_INTERNAL) {
- *page_list_count = 0;
- } else {
- *page_list_count = 1;
- }
- }
- return KERN_SUCCESS;
+ if (user_page_list) {
+ user_page_list[0].phys_addr = (ppnum_t) ((offset + object->shadow_offset)>>PAGE_SHIFT);
+ user_page_list[0].device = TRUE;
}
- if(user_page_list)
- user_page_list[0].device = FALSE;
-
- if(cntrl_flags & UPL_SET_LITE) {
- upl->map_object = object;
- } else {
- upl->map_object = vm_object_allocate(size);
- vm_object_lock(upl->map_object);
- upl->map_object->shadow = object;
- upl->map_object->pageout = TRUE;
- upl->map_object->can_persist = FALSE;
- upl->map_object->copy_strategy =
- MEMORY_OBJECT_COPY_NONE;
- upl->map_object->shadow_offset = offset;
- upl->map_object->wimg_bits = object->wimg_bits;
- vm_object_unlock(upl->map_object);
+ if (page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL)
+ *page_list_count = 0;
+ else
+ *page_list_count = 1;
}
+ return KERN_SUCCESS;
}
- vm_object_lock(object);
- vm_object_paging_begin(object);
-
- if (!object->phys_contiguous) {
- /* Protect user space from future COW operations */
+ if (object != kernel_object) {
+ /*
+ * Protect user space from future COW operations
+ */
object->true_share = TRUE;
+
if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
}
- /* we can lock the upl offset now that paging_in_progress is set */
- if(upl_ptr) {
- upl->size = size;
- upl->offset = offset + object->paging_offset;
- *upl_ptr = upl;
-#ifdef UPL_DEBUG
- queue_enter(&object->uplq, upl, upl_t, uplq);
+#if UPL_DEBUG
+ queue_enter(&object->uplq, upl, upl_t, uplq);
#endif /* UPL_DEBUG */
- }
- if (cntrl_flags & UPL_BLOCK_ACCESS) {
+ if (!(cntrl_flags & UPL_COPYOUT_FROM) &&
+ object->copy != VM_OBJECT_NULL) {
/*
- * The user requested that access to the pages in this URL
- * be blocked until the UPL is commited or aborted.
+ * Honor copy-on-write obligations
+ *
+ * The caller is gathering these pages and
+ * might modify their contents. We need to
+ * make sure that the copy object has its own
+ * private copies of these pages before we let
+ * the caller modify them.
+ *
+ * NOTE: someone else could map the original object
+ * after we've done this copy-on-write here, and they
+ * could then see an inconsistent picture of the memory
+ * while it's being modified via the UPL. To prevent this,
+ * we would have to block access to these pages until the
+ * UPL is released. We could use the UPL_BLOCK_ACCESS
+ * code path for that...
*/
- upl->flags |= UPL_ACCESS_BLOCKED;
+ vm_object_update(object,
+ offset,
+ size,
+ NULL,
+ NULL,
+ FALSE, /* should_return */
+ MEMORY_OBJECT_COPY_SYNC,
+ VM_PROT_NO_CHANGE);
+#if DEVELOPMENT || DEBUG
+ iopl_cow++;
+ iopl_cow_pages += size >> PAGE_SHIFT;
+#endif
}
+
entry = 0;
+
+ xfer_size = size;
+ dst_offset = offset;
+
+ fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
+ fault_info.user_tag = 0;
+ fault_info.lo_offset = offset;
+ fault_info.hi_offset = offset + xfer_size;
+ fault_info.no_cache = FALSE;
+ fault_info.stealth = FALSE;
+ fault_info.mark_zf_absent = TRUE;
+
+ dwp = &dw_array[0];
+ dw_count = 0;
+
while (xfer_size) {
- if((alias_page == NULL) && !(cntrl_flags & UPL_SET_LITE)) {
- if (delayed_unlock) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
- vm_object_unlock(object);
- VM_PAGE_GRAB_FICTITIOUS(alias_page);
- vm_object_lock(object);
- }
+ vm_fault_return_t result;
+ unsigned int pg_num;
+
+ dwp->dw_mask = 0;
+
dst_page = vm_page_lookup(object, dst_offset);
/*
* If the page is encrypted, we need to decrypt it,
* so force a soft page fault.
*/
- if ((dst_page == VM_PAGE_NULL) || (dst_page->busy) ||
- (dst_page->encrypted) ||
- (dst_page->unusual && (dst_page->error ||
- dst_page->restart ||
- dst_page->absent ||
- dst_page->fictitious ||
- (prot & dst_page->page_lock)))) {
- vm_fault_return_t result;
+ if (dst_page == VM_PAGE_NULL ||
+ dst_page->busy ||
+ dst_page->encrypted ||
+ dst_page->error ||
+ dst_page->restart ||
+ dst_page->absent ||
+ dst_page->fictitious) {
+
+ if (object == kernel_object)
+ panic("vm_object_iopl_request: missing/bad page in kernel object\n");
+
do {
vm_page_t top_page;
kern_return_t error_code;
int interruptible;
- vm_object_offset_t lo_offset = offset;
- vm_object_offset_t hi_offset = offset + size;
-
-
- if (delayed_unlock) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
-
- if(cntrl_flags & UPL_SET_INTERRUPTIBLE) {
+ if (cntrl_flags & UPL_SET_INTERRUPTIBLE)
interruptible = THREAD_ABORTSAFE;
- } else {
+ else
interruptible = THREAD_UNINT;
- }
+
+ fault_info.interruptible = interruptible;
+ fault_info.cluster_size = xfer_size;
+
+ vm_object_paging_begin(object);
result = vm_fault_page(object, dst_offset,
- prot | VM_PROT_WRITE, FALSE,
- interruptible,
- lo_offset, hi_offset,
- VM_BEHAVIOR_SEQUENTIAL,
- &prot, &dst_page, &top_page,
- (int *)0,
- &error_code, no_zero_fill, FALSE, NULL, 0);
-
- switch(result) {
- case VM_FAULT_SUCCESS:
+ prot | VM_PROT_WRITE, FALSE,
+ &prot, &dst_page, &top_page,
+ (int *)0,
+ &error_code, no_zero_fill,
+ FALSE, &fault_info);
+
+ switch (result) {
- PAGE_WAKEUP_DONE(dst_page);
+ case VM_FAULT_SUCCESS:
+ if ( !dst_page->absent) {
+ PAGE_WAKEUP_DONE(dst_page);
+ } else {
+ /*
+ * we only get back an absent page if we
+ * requested that it not be zero-filled
+ * because we are about to fill it via I/O
+ *
+ * absent pages should be left BUSY
+ * to prevent them from being faulted
+ * into an address space before we've
+ * had a chance to complete the I/O on
+ * them since they may contain info that
+ * shouldn't be seen by the faulting task
+ */
+ }
/*
* Release paging references and
* top-level placeholder page, if any.
*/
-
- if(top_page != VM_PAGE_NULL) {
+ if (top_page != VM_PAGE_NULL) {
vm_object_t local_object;
- local_object =
- top_page->object;
- if(top_page->object
- != dst_page->object) {
- vm_object_lock(
- local_object);
+
+ local_object = top_page->object;
+
+ if (top_page->object != dst_page->object) {
+ vm_object_lock(local_object);
VM_PAGE_FREE(top_page);
- vm_object_paging_end(
- local_object);
- vm_object_unlock(
- local_object);
+ vm_object_paging_end(local_object);
+ vm_object_unlock(local_object);
} else {
VM_PAGE_FREE(top_page);
- vm_object_paging_end(
- local_object);
+ vm_object_paging_end(local_object);
}
}
-
+ vm_object_paging_end(object);
break;
-
case VM_FAULT_RETRY:
vm_object_lock(object);
- vm_object_paging_begin(object);
break;
case VM_FAULT_FICTITIOUS_SHORTAGE:
vm_page_more_fictitious();
+
vm_object_lock(object);
- vm_object_paging_begin(object);
break;
case VM_FAULT_MEMORY_SHORTAGE:
if (vm_page_wait(interruptible)) {
vm_object_lock(object);
- vm_object_paging_begin(object);
break;
}
/* fall thru */
case VM_FAULT_INTERRUPTED:
error_code = MACH_SEND_INTERRUPTED;
case VM_FAULT_MEMORY_ERROR:
- ret = (error_code ? error_code:
- KERN_MEMORY_ERROR);
- vm_object_lock(object);
+ memory_error:
+ ret = (error_code ? error_code: KERN_MEMORY_ERROR);
+ vm_object_lock(object);
goto return_err;
+
+ case VM_FAULT_SUCCESS_NO_VM_PAGE:
+ /* success but no page: fail */
+ vm_object_paging_end(object);
+ vm_object_unlock(object);
+ goto memory_error;
+
+ default:
+ panic("vm_object_iopl_request: unexpected error"
+ " 0x%x from vm_fault_page()\n", result);
}
- } while ((result != VM_FAULT_SUCCESS)
- || (result == VM_FAULT_INTERRUPTED));
+ } while (result != VM_FAULT_SUCCESS);
+
}
+ if (upl->flags & UPL_KERNEL_OBJECT)
+ goto record_phys_addr;
+
+ if (dst_page->cleaning) {
+ /*
+ * Someone else is cleaning this page in place.as
+ * In theory, we should be able to proceed and use this
+ * page but they'll probably end up clearing the "busy"
+ * bit on it in upl_commit_range() but they didn't set
+ * it, so they would clear our "busy" bit and open
+ * us to race conditions.
+ * We'd better wait for the cleaning to complete and
+ * then try again.
+ */
+ vm_object_iopl_request_sleep_for_cleaning++;
+ PAGE_SLEEP(object, dst_page, THREAD_UNINT);
+ continue;
+ }
if ( (cntrl_flags & UPL_NEED_32BIT_ADDR) &&
dst_page->phys_page >= (max_valid_dma_address >> PAGE_SHIFT) ) {
vm_page_t low_page;
* we don't know whether that physical address has been
* handed out to some other 64 bit capable DMA device to use
*/
- if (dst_page->wire_count) {
+ if (VM_PAGE_WIRED(dst_page)) {
ret = KERN_PROTECTION_FAILURE;
goto return_err;
}
- if (delayed_unlock) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
low_page = vm_page_grablo();
if (low_page == VM_PAGE_NULL) {
* it after we disconnect it... we want the fault
* to find the new page being substituted.
*/
- refmod = pmap_disconnect(dst_page->phys_page);
+ if (dst_page->pmapped)
+ refmod = pmap_disconnect(dst_page->phys_page);
+ else
+ refmod = 0;
- vm_page_copy(dst_page, low_page);
-
+ if ( !dst_page->absent)
+ vm_page_copy(dst_page, low_page);
+
low_page->reference = dst_page->reference;
low_page->dirty = dst_page->dirty;
+ low_page->absent = dst_page->absent;
if (refmod & VM_MEM_REFERENCED)
low_page->reference = TRUE;
if (refmod & VM_MEM_MODIFIED)
low_page->dirty = TRUE;
- vm_page_lock_queues();
vm_page_replace(low_page, object, dst_offset);
- /*
- * keep the queue lock since we're going to
- * need it immediately
- */
- delayed_unlock = 1;
dst_page = low_page;
/*
* BUSY... we don't need a PAGE_WAKEUP_DONE
* here, because we've never dropped the object lock
*/
- dst_page->busy = FALSE;
+ if ( !dst_page->absent)
+ dst_page->busy = FALSE;
}
- if (delayed_unlock == 0)
- vm_page_lock_queues();
- vm_page_wire(dst_page);
+ if ( !dst_page->busy)
+ dwp->dw_mask |= DW_vm_page_wire;
if (cntrl_flags & UPL_BLOCK_ACCESS) {
/*
assert(!dst_page->fictitious);
dst_page->busy = TRUE;
}
+ /*
+ * expect the page to be used
+ * page queues lock must be held to set 'reference'
+ */
+ dwp->dw_mask |= DW_set_reference;
+
+ if (!(cntrl_flags & UPL_COPYOUT_FROM))
+ dst_page->dirty = TRUE;
+record_phys_addr:
+ if (dst_page->busy)
+ upl->flags |= UPL_HAS_BUSY;
+
+ pg_num = (unsigned int) ((dst_offset-offset)/PAGE_SIZE);
+ assert(pg_num == (dst_offset-offset)/PAGE_SIZE);
+ lite_list[pg_num>>5] |= 1 << (pg_num & 31);
+
+ if (dst_page->phys_page > upl->highest_page)
+ upl->highest_page = dst_page->phys_page;
+
+ if (user_page_list) {
+ user_page_list[entry].phys_addr = dst_page->phys_page;
+ user_page_list[entry].pageout = dst_page->pageout;
+ user_page_list[entry].absent = dst_page->absent;
+ user_page_list[entry].dirty = dst_page->dirty;
+ user_page_list[entry].precious = dst_page->precious;
+ user_page_list[entry].device = FALSE;
+ if (dst_page->clustered == TRUE)
+ user_page_list[entry].speculative = dst_page->speculative;
+ else
+ user_page_list[entry].speculative = FALSE;
+ user_page_list[entry].cs_validated = dst_page->cs_validated;
+ user_page_list[entry].cs_tainted = dst_page->cs_tainted;
+ }
+ if (object != kernel_object) {
+ /*
+ * someone is explicitly grabbing this page...
+ * update clustered and speculative state
+ *
+ */
+ VM_PAGE_CONSUME_CLUSTERED(dst_page);
+ }
+ entry++;
+ dst_offset += PAGE_SIZE_64;
+ xfer_size -= PAGE_SIZE;
- if (upl_ptr) {
- if (cntrl_flags & UPL_SET_LITE) {
- int pg_num;
- pg_num = (dst_offset-offset)/PAGE_SIZE;
- lite_list[pg_num>>5] |= 1 << (pg_num & 31);
- } else {
+ if (dwp->dw_mask) {
+ if (dst_page->busy == FALSE) {
/*
- * Convert the fictitious page to a
- * private shadow of the real page.
- */
- assert(alias_page->fictitious);
- alias_page->fictitious = FALSE;
- alias_page->private = TRUE;
- alias_page->pageout = TRUE;
- alias_page->phys_page = dst_page->phys_page;
- vm_page_wire(alias_page);
-
- vm_page_insert(alias_page,
- upl->map_object, size - xfer_size);
- assert(!alias_page->wanted);
- alias_page->busy = FALSE;
- alias_page->absent = FALSE;
+ * dw_do_work may need to drop the object lock
+ * if it does, we need the pages it's looking at to
+ * be held stable via the busy bit.
+ */
+ dst_page->busy = TRUE;
+ dwp->dw_mask |= (DW_clear_busy | DW_PAGE_WAKEUP);
}
+ dwp->dw_m = dst_page;
+ dwp++;
+ dw_count++;
- /* expect the page to be used */
- dst_page->reference = TRUE;
-
- if (!(cntrl_flags & UPL_COPYOUT_FROM))
- dst_page->dirty = TRUE;
- alias_page = NULL;
-
- if (dst_page->phys_page > upl->highest_page)
- upl->highest_page = dst_page->phys_page;
-
- if (user_page_list) {
- user_page_list[entry].phys_addr
- = dst_page->phys_page;
- user_page_list[entry].dirty =
- dst_page->dirty;
- user_page_list[entry].pageout =
- dst_page->pageout;
- user_page_list[entry].absent =
- dst_page->absent;
- user_page_list[entry].precious =
- dst_page->precious;
+ if (dw_count >= DELAYED_WORK_LIMIT) {
+ dw_do_work(object, &dw_array[0], dw_count);
+
+ dwp = &dw_array[0];
+ dw_count = 0;
}
}
- if (delayed_unlock++ > DELAYED_UNLOCK_LIMIT) {
- delayed_unlock = 0;
- vm_page_unlock_queues();
- }
- entry++;
- dst_offset += PAGE_SIZE_64;
- xfer_size -= PAGE_SIZE;
}
- if (delayed_unlock)
- vm_page_unlock_queues();
+ if (dw_count)
+ dw_do_work(object, &dw_array[0], dw_count);
- if (upl->flags & UPL_INTERNAL) {
- if(page_list_count != NULL)
+ if (page_list_count != NULL) {
+ if (upl->flags & UPL_INTERNAL)
*page_list_count = 0;
- } else if (*page_list_count > entry) {
- if(page_list_count != NULL)
+ else if (*page_list_count > entry)
*page_list_count = entry;
}
-
- if (alias_page != NULL) {
- vm_page_lock_queues();
- vm_page_free(alias_page);
- vm_page_unlock_queues();
- }
-
vm_object_unlock(object);
if (cntrl_flags & UPL_BLOCK_ACCESS) {
*/
vm_object_pmap_protect(object, offset, (vm_object_size_t)size,
PMAP_NULL, 0, VM_PROT_NONE);
+ assert(!object->blocked_access);
+ object->blocked_access = TRUE;
}
-
return KERN_SUCCESS;
-
return_err:
- if (delayed_unlock)
- vm_page_unlock_queues();
+ dw_index = 0;
for (; offset < dst_offset; offset += PAGE_SIZE) {
+ boolean_t need_unwire;
+
dst_page = vm_page_lookup(object, offset);
if (dst_page == VM_PAGE_NULL)
- panic("vm_object_iopl_request: Wired pages missing. \n");
+ panic("vm_object_iopl_request: Wired page missing. \n");
+
+ /*
+ * if we've already processed this page in an earlier
+ * dw_do_work, we need to undo the wiring... we will
+ * leave the dirty and reference bits on if they
+ * were set, since we don't have a good way of knowing
+ * what the previous state was and we won't get here
+ * under any normal circumstances... we will always
+ * clear BUSY and wakeup any waiters via vm_page_free
+ * or PAGE_WAKEUP_DONE
+ */
+ need_unwire = TRUE;
+
+ if (dw_count) {
+ if (dw_array[dw_index].dw_m == dst_page) {
+ /*
+ * still in the deferred work list
+ * which means we haven't yet called
+ * vm_page_wire on this page
+ */
+ need_unwire = FALSE;
+
+ dw_index++;
+ dw_count--;
+ }
+ }
vm_page_lock_queues();
- vm_page_unwire(dst_page);
+
+ if (dst_page->absent) {
+ vm_page_free(dst_page);
+
+ need_unwire = FALSE;
+ } else {
+ if (need_unwire == TRUE)
+ vm_page_unwire(dst_page, TRUE);
+
+ PAGE_WAKEUP_DONE(dst_page);
+ }
vm_page_unlock_queues();
- VM_STAT(reactivations++);
+
+ if (need_unwire == TRUE)
+ VM_STAT_INCR(reactivations);
+ }
+#if UPL_DEBUG
+ upl->upl_state = 2;
+#endif
+ if (! (upl->flags & UPL_KERNEL_OBJECT)) {
+ vm_object_activity_end(object);
}
- vm_object_paging_end(object);
vm_object_unlock(object);
upl_destroy(upl);
return ret;
}
-
kern_return_t
upl_transpose(
upl_t upl1,
boolean_t upls_locked;
vm_object_t object1, object2;
- if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2) {
+ if (upl1 == UPL_NULL || upl2 == UPL_NULL || upl1 == upl2 || ((upl1->flags & UPL_VECTOR)==UPL_VECTOR) || ((upl2->flags & UPL_VECTOR)==UPL_VECTOR)) {
return KERN_INVALID_ARGUMENT;
}
* Make each UPL point to the correct VM object, i.e. the
* object holding the pages that the UPL refers to...
*/
+#if UPL_DEBUG
+ queue_remove(&object1->uplq, upl1, upl_t, uplq);
+ queue_remove(&object2->uplq, upl2, upl_t, uplq);
+#endif
upl1->map_object = object2;
upl2->map_object = object1;
+#if UPL_DEBUG
+ queue_enter(&object1->uplq, upl2, upl_t, uplq);
+ queue_enter(&object2->uplq, upl1, upl_t, uplq);
+#endif
}
done:
vm_map_offset_t vm_paging_base_address = 0;
boolean_t vm_paging_page_inuse[VM_PAGING_NUM_PAGES] = { FALSE, };
int vm_paging_max_index = 0;
+int vm_paging_page_waiter = 0;
+int vm_paging_page_waiter_total = 0;
unsigned long vm_paging_no_kernel_page = 0;
unsigned long vm_paging_objects_mapped = 0;
unsigned long vm_paging_pages_mapped = 0;
unsigned long vm_paging_objects_mapped_slow = 0;
unsigned long vm_paging_pages_mapped_slow = 0;
+void
+vm_paging_map_init(void)
+{
+ kern_return_t kr;
+ vm_map_offset_t page_map_offset;
+ vm_map_entry_t map_entry;
+
+ assert(vm_paging_base_address == 0);
+
+ /*
+ * Initialize our pool of pre-allocated kernel
+ * virtual addresses.
+ */
+ page_map_offset = 0;
+ kr = vm_map_find_space(kernel_map,
+ &page_map_offset,
+ VM_PAGING_NUM_PAGES * PAGE_SIZE,
+ 0,
+ 0,
+ &map_entry);
+ if (kr != KERN_SUCCESS) {
+ panic("vm_paging_map_init: kernel_map full\n");
+ }
+ map_entry->object.vm_object = kernel_object;
+ map_entry->offset = page_map_offset;
+ vm_object_reference(kernel_object);
+ vm_map_unlock(kernel_map);
+
+ assert(vm_paging_base_address == 0);
+ vm_paging_base_address = page_map_offset;
+}
+
/*
* ENCRYPTED SWAP:
* vm_paging_map_object:
* kernel virtual addresses, if possible.
* Context:
* The VM object is locked. This lock will get
- * dropped and re-acquired though.
+ * dropped and re-acquired though, so the caller
+ * must make sure the VM object is kept alive
+ * (by holding a VM map that has a reference
+ * on it, for example, or taking an extra reference).
+ * The page should also be kept busy to prevent
+ * it from being reclaimed.
*/
kern_return_t
vm_paging_map_object(
vm_page_t page,
vm_object_t object,
vm_object_offset_t offset,
- vm_map_size_t *size)
+ vm_map_size_t *size,
+ vm_prot_t protection,
+ boolean_t can_unlock_object)
{
kern_return_t kr;
vm_map_offset_t page_map_offset;
vm_map_size_t map_size;
vm_object_offset_t object_offset;
-#ifdef __ppc__
int i;
- vm_map_entry_t map_entry;
-#endif /* __ppc__ */
-
-#ifdef __ppc__
+
if (page != VM_PAGE_NULL && *size == PAGE_SIZE) {
+ assert(page->busy);
/*
- * Optimization for the PowerPC.
* Use one of the pre-allocated kernel virtual addresses
* and just enter the VM page in the kernel address space
* at that virtual address.
*/
- vm_object_unlock(object);
simple_lock(&vm_paging_lock);
- if (vm_paging_base_address == 0) {
- /*
- * Initialize our pool of pre-allocated kernel
- * virtual addresses.
- */
- simple_unlock(&vm_paging_lock);
- page_map_offset = 0;
- kr = vm_map_find_space(kernel_map,
- &page_map_offset,
- VM_PAGING_NUM_PAGES * PAGE_SIZE,
- 0,
- 0,
- &map_entry);
- if (kr != KERN_SUCCESS) {
- panic("vm_paging_map_object: "
- "kernel_map full\n");
- }
- map_entry->object.vm_object = kernel_object;
- map_entry->offset =
- page_map_offset - VM_MIN_KERNEL_ADDRESS;
- vm_object_reference(kernel_object);
- vm_map_unlock(kernel_map);
-
- simple_lock(&vm_paging_lock);
- if (vm_paging_base_address != 0) {
- /* someone raced us and won: undo */
- simple_unlock(&vm_paging_lock);
- kr = vm_map_remove(kernel_map,
- page_map_offset,
- page_map_offset +
- (VM_PAGING_NUM_PAGES
- * PAGE_SIZE),
- VM_MAP_NO_FLAGS);
- assert(kr == KERN_SUCCESS);
- simple_lock(&vm_paging_lock);
- } else {
- vm_paging_base_address = page_map_offset;
- }
- }
-
/*
* Try and find an available kernel virtual address
* from our pre-allocated pool.
*/
page_map_offset = 0;
- for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
- if (vm_paging_page_inuse[i] == FALSE) {
- page_map_offset = vm_paging_base_address +
- (i * PAGE_SIZE);
+ for (;;) {
+ for (i = 0; i < VM_PAGING_NUM_PAGES; i++) {
+ if (vm_paging_page_inuse[i] == FALSE) {
+ page_map_offset =
+ vm_paging_base_address +
+ (i * PAGE_SIZE);
+ break;
+ }
+ }
+ if (page_map_offset != 0) {
+ /* found a space to map our page ! */
+ break;
+ }
+
+ if (can_unlock_object) {
+ /*
+ * If we can afford to unlock the VM object,
+ * let's take the slow path now...
+ */
break;
}
+ /*
+ * We can't afford to unlock the VM object, so
+ * let's wait for a space to become available...
+ */
+ vm_paging_page_waiter_total++;
+ vm_paging_page_waiter++;
+ thread_sleep_fast_usimple_lock(&vm_paging_page_waiter,
+ &vm_paging_lock,
+ THREAD_UNINT);
+ vm_paging_page_waiter--;
+ /* ... and try again */
}
if (page_map_offset != 0) {
}
vm_paging_page_inuse[i] = TRUE;
simple_unlock(&vm_paging_lock);
- pmap_map_block(kernel_pmap,
- page_map_offset,
- page->phys_page,
- 1, /* Size is number of 4k pages */
- VM_PROT_DEFAULT,
- ((int) page->object->wimg_bits &
- VM_WIMG_MASK),
- 0);
+
+ if (page->pmapped == FALSE) {
+ pmap_sync_page_data_phys(page->phys_page);
+ }
+ page->pmapped = TRUE;
+
+ /*
+ * Keep the VM object locked over the PMAP_ENTER
+ * and the actual use of the page by the kernel,
+ * or this pmap mapping might get undone by a
+ * vm_object_pmap_protect() call...
+ */
+ PMAP_ENTER(kernel_pmap,
+ page_map_offset,
+ page,
+ protection,
+ ((int) page->object->wimg_bits &
+ VM_WIMG_MASK),
+ TRUE);
vm_paging_objects_mapped++;
vm_paging_pages_mapped++;
*address = page_map_offset;
- vm_object_lock(object);
/* all done and mapped, ready to use ! */
return KERN_SUCCESS;
*/
vm_paging_no_kernel_page++;
simple_unlock(&vm_paging_lock);
- vm_object_lock(object);
}
-#endif /* __ppc__ */
+
+ if (! can_unlock_object) {
+ return KERN_NOT_SUPPORTED;
+ }
object_offset = vm_object_trunc_page(offset);
map_size = vm_map_round_page(*size);
* in the kernel_map
*/
- /* don't go beyond the object's end... */
- if (object_offset >= object->size) {
- map_size = 0;
- } else if (map_size > object->size - offset) {
- map_size = object->size - offset;
- }
-
vm_object_reference_locked(object); /* for the map entry */
vm_object_unlock(object);
object,
object_offset,
FALSE,
- VM_PROT_DEFAULT,
+ protection,
VM_PROT_ALL,
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
*address = 0;
*size = 0;
vm_object_deallocate(object); /* for the map entry */
+ vm_object_lock(object);
return kr;
}
* Enter the mapped pages in the page table now.
*/
vm_object_lock(object);
+ /*
+ * VM object must be kept locked from before PMAP_ENTER()
+ * until after the kernel is done accessing the page(s).
+ * Otherwise, the pmap mappings in the kernel could be
+ * undone by a call to vm_object_pmap_protect().
+ */
+
for (page_map_offset = 0;
map_size != 0;
map_size -= PAGE_SIZE_64, page_map_offset += PAGE_SIZE_64) {
page = vm_page_lookup(object, offset + page_map_offset);
if (page == VM_PAGE_NULL) {
- panic("vm_paging_map_object: no page !?");
+ printf("vm_paging_map_object: no page !?");
+ vm_object_unlock(object);
+ kr = vm_map_remove(kernel_map, *address, *size,
+ VM_MAP_NO_FLAGS);
+ assert(kr == KERN_SUCCESS);
+ *address = 0;
+ *size = 0;
+ vm_object_lock(object);
+ return KERN_MEMORY_ERROR;
}
- if (page->no_isync == TRUE) {
+ if (page->pmapped == FALSE) {
pmap_sync_page_data_phys(page->phys_page);
}
+ page->pmapped = TRUE;
cache_attr = ((unsigned int) object->wimg_bits) & VM_WIMG_MASK;
+ //assert(pmap_verify_free(page->phys_page));
PMAP_ENTER(kernel_pmap,
*address + page_map_offset,
page,
- VM_PROT_DEFAULT,
+ protection,
cache_attr,
- FALSE);
+ TRUE);
}
vm_paging_objects_mapped_slow++;
- vm_paging_pages_mapped_slow += map_size / PAGE_SIZE_64;
+ vm_paging_pages_mapped_slow += (unsigned long) (map_size / PAGE_SIZE_64);
return KERN_SUCCESS;
}
vm_map_offset_t end)
{
kern_return_t kr;
-#ifdef __ppc__
int i;
-#endif /* __ppc__ */
- if ((vm_paging_base_address == 0) &&
- ((start < vm_paging_base_address) ||
- (end > (vm_paging_base_address
- + (VM_PAGING_NUM_PAGES * PAGE_SIZE))))) {
+ if ((vm_paging_base_address == 0) ||
+ (start < vm_paging_base_address) ||
+ (end > (vm_paging_base_address
+ + (VM_PAGING_NUM_PAGES * PAGE_SIZE)))) {
/*
* We didn't use our pre-allocated pool of
* kernel virtual address. Deallocate the
* pre-allocated pool. Put it back in the pool
* for next time.
*/
-#ifdef __ppc__
assert(end - start == PAGE_SIZE);
- i = (start - vm_paging_base_address) >> PAGE_SHIFT;
+ i = (int) ((start - vm_paging_base_address) >> PAGE_SHIFT);
+ assert(i >= 0 && i < VM_PAGING_NUM_PAGES);
/* undo the pmap mapping */
- mapping_remove(kernel_pmap, start);
+ pmap_remove(kernel_pmap, start, end);
simple_lock(&vm_paging_lock);
vm_paging_page_inuse[i] = FALSE;
+ if (vm_paging_page_waiter) {
+ thread_wakeup(&vm_paging_page_waiter);
+ }
simple_unlock(&vm_paging_lock);
-#endif /* __ppc__ */
}
}
+#if CRYPTO
/*
* Encryption data.
* "iv" is the "initial vector". Ideally, we want to
unsigned char swap_crypt_test_page_decrypt[4096] __attribute__((aligned(4096)));
#endif /* DEBUG */
-extern u_long random(void);
-
/*
* Initialize the encryption context: key and key size.
*/
vm_page_t page,
vm_map_offset_t kernel_mapping_offset)
{
- int clear_refmod = 0;
kern_return_t kr;
- boolean_t page_was_referenced;
- boolean_t page_was_modified;
vm_map_size_t kernel_mapping_size;
vm_offset_t kernel_vaddr;
union {
ASSERT_PAGE_DECRYPTED(page);
/*
- * Gather the "reference" and "modified" status of the page.
- * We'll restore these values after the encryption, so that
- * the encryption is transparent to the rest of the system
- * and doesn't impact the VM's LRU logic.
+ * Take a paging-in-progress reference to keep the object
+ * alive even if we have to unlock it (in vm_paging_map_object()
+ * for example)...
*/
- page_was_referenced =
- (page->reference || pmap_is_referenced(page->phys_page));
- page_was_modified =
- (page->dirty || pmap_is_modified(page->phys_page));
+ vm_object_paging_begin(page->object);
if (kernel_mapping_offset == 0) {
/*
page,
page->object,
page->offset,
- &kernel_mapping_size);
+ &kernel_mapping_size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE);
if (kr != KERN_SUCCESS) {
panic("vm_page_encrypt: "
"could not map page in kernel: 0x%x\n",
encrypt_iv.vm.paging_offset =
page->object->paging_offset + page->offset;
- vm_object_unlock(page->object);
-
/* encrypt the "initial vector" */
aes_encrypt_cbc((const unsigned char *) &encrypt_iv.aes_iv[0],
swap_crypt_null_iv,
vm_page_encrypt_counter++;
- vm_object_lock(page->object);
-
/*
* Unmap the page from the kernel's address space,
* if we had to map it ourselves. Otherwise, let
}
/*
- * Restore the "reference" and "modified" bits.
+ * Clear the "reference" and "modified" bits.
* This should clean up any impact the encryption had
* on them.
+ * The page was kept busy and disconnected from all pmaps,
+ * so it can't have been referenced or modified from user
+ * space.
+ * The software bits will be reset later after the I/O
+ * has completed (in upl_commit_range()).
*/
- if (! page_was_referenced) {
- clear_refmod |= VM_MEM_REFERENCED;
- page->reference = FALSE;
- }
- if (! page_was_modified) {
- clear_refmod |= VM_MEM_MODIFIED;
- page->dirty = FALSE;
- }
- if (clear_refmod)
- pmap_clear_refmod(page->phys_page, clear_refmod);
+ pmap_clear_refmod(page->phys_page, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
page->encrypted = TRUE;
+
+ vm_object_paging_end(page->object);
}
/*
vm_page_t page,
vm_map_offset_t kernel_mapping_offset)
{
- int clear_refmod = 0;
kern_return_t kr;
vm_map_size_t kernel_mapping_size;
vm_offset_t kernel_vaddr;
- boolean_t page_was_referenced;
union {
unsigned char aes_iv[AES_BLOCK_SIZE];
struct {
assert(page->encrypted);
/*
- * Gather the "reference" status of the page.
- * We'll restore its value after the decryption, so that
- * the decryption is transparent to the rest of the system
- * and doesn't impact the VM's LRU logic.
+ * Take a paging-in-progress reference to keep the object
+ * alive even if we have to unlock it (in vm_paging_map_object()
+ * for example)...
*/
- page_was_referenced =
- (page->reference || pmap_is_referenced(page->phys_page));
+ vm_object_paging_begin(page->object);
if (kernel_mapping_offset == 0) {
/*
page,
page->object,
page->offset,
- &kernel_mapping_size);
+ &kernel_mapping_size,
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE);
if (kr != KERN_SUCCESS) {
panic("vm_page_decrypt: "
- "could not map page in kernel: 0x%x\n");
+ "could not map page in kernel: 0x%x\n",
+ kr);
}
} else {
kernel_mapping_size = 0;
decrypt_iv.vm.paging_offset =
page->object->paging_offset + page->offset;
- vm_object_unlock(page->object);
-
/* encrypt the "initial vector" */
aes_encrypt_cbc((const unsigned char *) &decrypt_iv.aes_iv[0],
swap_crypt_null_iv,
&swap_crypt_ctx.decrypt);
vm_page_decrypt_counter++;
- vm_object_lock(page->object);
-
/*
* Unmap the page from the kernel's address space,
* if we had to map it ourselves. Otherwise, let
* and the decryption doesn't count.
*/
page->dirty = FALSE;
- clear_refmod = VM_MEM_MODIFIED;
-
- /* restore the "reference" bit */
- if (! page_was_referenced) {
- page->reference = FALSE;
- clear_refmod |= VM_MEM_REFERENCED;
- }
- pmap_clear_refmod(page->phys_page, clear_refmod);
-
+ assert (page->cs_validated == FALSE);
+ pmap_clear_refmod(page->phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
page->encrypted = FALSE;
/*
/*
* Since the page is not mapped yet, some code might assume that it
* doesn't need to invalidate the instruction cache when writing to
- * that page. That code relies on "no_isync" being set, so that the
- * caches get syncrhonized when the page is first mapped. So we need
- * to set "no_isync" here too, despite the fact that we just
- * synchronized the caches above...
+ * that page. That code relies on "pmapped" being FALSE, so that the
+ * caches get synchronized when the page is first mapped.
*/
- page->no_isync = TRUE;
+ assert(pmap_verify_free(page->phys_page));
+ page->pmapped = FALSE;
+ page->wpmapped = FALSE;
+
+ vm_object_paging_end(page->object);
}
+#if DEVELOPMENT || DEBUG
unsigned long upl_encrypt_upls = 0;
unsigned long upl_encrypt_pages = 0;
+#endif
/*
* ENCRYPTED SWAP:
upl_offset_t crypt_offset,
upl_size_t crypt_size)
{
- upl_size_t upl_size;
- upl_offset_t upl_offset;
+ upl_size_t upl_size, subupl_size=crypt_size;
+ upl_offset_t offset_in_upl, subupl_offset=crypt_offset;
vm_object_t upl_object;
+ vm_object_offset_t upl_offset;
vm_page_t page;
vm_object_t shadow_object;
vm_object_offset_t shadow_offset;
vm_object_offset_t paging_offset;
vm_object_offset_t base_offset;
+ int isVectorUPL = 0;
+ upl_t vector_upl = NULL;
+
+ if((isVectorUPL = vector_upl_is_valid(upl)))
+ vector_upl = upl;
+
+process_upl_to_encrypt:
+ if(isVectorUPL) {
+ crypt_size = subupl_size;
+ crypt_offset = subupl_offset;
+ upl = vector_upl_subupl_byoffset(vector_upl, &crypt_offset, &crypt_size);
+ if(upl == NULL)
+ panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
+ subupl_size -= crypt_size;
+ subupl_offset += crypt_size;
+ }
+#if DEVELOPMENT || DEBUG
upl_encrypt_upls++;
upl_encrypt_pages += crypt_size / PAGE_SIZE;
-
- upl_lock(upl);
-
+#endif
upl_object = upl->map_object;
upl_offset = upl->offset;
upl_size = upl->size;
- upl_unlock(upl);
-
vm_object_lock(upl_object);
/*
paging_offset = shadow_object->paging_offset;
vm_object_paging_begin(shadow_object);
- if (shadow_object != upl_object) {
- vm_object_unlock(shadow_object);
- }
- vm_object_unlock(upl_object);
+ if (shadow_object != upl_object)
+ vm_object_unlock(upl_object);
+
base_offset = shadow_offset;
base_offset += upl_offset;
base_offset += crypt_offset;
base_offset -= paging_offset;
- /*
- * Unmap the pages, so that nobody can continue accessing them while
- * they're encrypted. After that point, all accesses to these pages
- * will cause a page fault and block while the page is being encrypted
- * (busy). After the encryption completes, any access will cause a
- * page fault and the page gets decrypted at that time.
- */
- assert(crypt_offset + crypt_size <= upl_size);
- vm_object_pmap_protect(shadow_object,
- base_offset,
- (vm_object_size_t)crypt_size,
- PMAP_NULL,
- 0,
- VM_PROT_NONE);
- /* XXX FBDP could the object have changed significantly here ? */
- vm_object_lock(shadow_object);
+ assert(crypt_offset + crypt_size <= upl_size);
- for (upl_offset = 0;
- upl_offset < crypt_size;
- upl_offset += PAGE_SIZE) {
+ for (offset_in_upl = 0;
+ offset_in_upl < crypt_size;
+ offset_in_upl += PAGE_SIZE) {
page = vm_page_lookup(shadow_object,
- base_offset + upl_offset);
+ base_offset + offset_in_upl);
if (page == VM_PAGE_NULL) {
panic("upl_encrypt: "
"no page for (obj=%p,off=%lld+%d)!\n",
shadow_object,
base_offset,
- upl_offset);
+ offset_in_upl);
}
+ /*
+ * Disconnect the page from all pmaps, so that nobody can
+ * access it while it's encrypted. After that point, all
+ * accesses to this page will cause a page fault and block
+ * while the page is busy being encrypted. After the
+ * encryption completes, any access will cause a
+ * page fault and the page gets decrypted at that time.
+ */
+ pmap_disconnect(page->phys_page);
vm_page_encrypt(page, 0);
+
+ if (vm_object_lock_avoid(shadow_object)) {
+ /*
+ * Give vm_pageout_scan() a chance to convert more
+ * pages from "clean-in-place" to "clean-and-free",
+ * if it's interested in the same pages we selected
+ * in this cluster.
+ */
+ vm_object_unlock(shadow_object);
+ mutex_pause(2);
+ vm_object_lock(shadow_object);
+ }
}
vm_object_paging_end(shadow_object);
vm_object_unlock(shadow_object);
+
+ if(isVectorUPL && subupl_size)
+ goto process_upl_to_encrypt;
+}
+
+#else /* CRYPTO */
+void
+upl_encrypt(
+ __unused upl_t upl,
+ __unused upl_offset_t crypt_offset,
+ __unused upl_size_t crypt_size)
+{
+}
+
+void
+vm_page_encrypt(
+ __unused vm_page_t page,
+ __unused vm_map_offset_t kernel_mapping_offset)
+{
+}
+
+void
+vm_page_decrypt(
+ __unused vm_page_t page,
+ __unused vm_map_offset_t kernel_mapping_offset)
+{
+}
+
+#endif /* CRYPTO */
+
+void
+vm_pageout_queue_steal(vm_page_t page, boolean_t queues_locked)
+{
+ boolean_t pageout;
+
+ pageout = page->pageout;
+
+ page->list_req_pending = FALSE;
+ page->cleaning = FALSE;
+ page->pageout = FALSE;
+
+ if (!queues_locked) {
+ vm_page_lockspin_queues();
+ }
+
+ /*
+ * need to drop the laundry count...
+ * we may also need to remove it
+ * from the I/O paging queue...
+ * vm_pageout_throttle_up handles both cases
+ *
+ * the laundry and pageout_queue flags are cleared...
+ */
+ vm_pageout_throttle_up(page);
+
+ if (pageout == TRUE) {
+ /*
+ * toss the wire count we picked up
+ * when we intially set this page up
+ * to be cleaned...
+ */
+ vm_page_unwire(page, TRUE);
+ }
+ vm_page_steal_pageout_page++;
+
+ if (!queues_locked) {
+ vm_page_unlock_queues();
+ }
+}
+
+upl_t
+vector_upl_create(vm_offset_t upl_offset)
+{
+ int vector_upl_size = sizeof(struct _vector_upl);
+ int i=0;
+ upl_t upl;
+ vector_upl_t vector_upl = (vector_upl_t)kalloc(vector_upl_size);
+
+ upl = upl_create(0,UPL_VECTOR,0);
+ upl->vector_upl = vector_upl;
+ upl->offset = upl_offset;
+ vector_upl->size = 0;
+ vector_upl->offset = upl_offset;
+ vector_upl->invalid_upls=0;
+ vector_upl->num_upls=0;
+ vector_upl->pagelist = NULL;
+
+ for(i=0; i < MAX_VECTOR_UPL_ELEMENTS ; i++) {
+ vector_upl->upl_iostates[i].size = 0;
+ vector_upl->upl_iostates[i].offset = 0;
+
+ }
+ return upl;
+}
+
+void
+vector_upl_deallocate(upl_t upl)
+{
+ if(upl) {
+ vector_upl_t vector_upl = upl->vector_upl;
+ if(vector_upl) {
+ if(vector_upl->invalid_upls != vector_upl->num_upls)
+ panic("Deallocating non-empty Vectored UPL\n");
+ kfree(vector_upl->pagelist,(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)));
+ vector_upl->invalid_upls=0;
+ vector_upl->num_upls = 0;
+ vector_upl->pagelist = NULL;
+ vector_upl->size = 0;
+ vector_upl->offset = 0;
+ kfree(vector_upl, sizeof(struct _vector_upl));
+ vector_upl = (vector_upl_t)0xdeadbeef;
+ }
+ else
+ panic("vector_upl_deallocate was passed a non-vectored upl\n");
+ }
+ else
+ panic("vector_upl_deallocate was passed a NULL upl\n");
+}
+
+boolean_t
+vector_upl_is_valid(upl_t upl)
+{
+ if(upl && ((upl->flags & UPL_VECTOR)==UPL_VECTOR)) {
+ vector_upl_t vector_upl = upl->vector_upl;
+ if(vector_upl == NULL || vector_upl == (vector_upl_t)0xdeadbeef || vector_upl == (vector_upl_t)0xfeedbeef)
+ return FALSE;
+ else
+ return TRUE;
+ }
+ return FALSE;
+}
+
+boolean_t
+vector_upl_set_subupl(upl_t upl,upl_t subupl, uint32_t io_size)
+{
+ if(vector_upl_is_valid(upl)) {
+ vector_upl_t vector_upl = upl->vector_upl;
+
+ if(vector_upl) {
+ if(subupl) {
+ if(io_size) {
+ if(io_size < PAGE_SIZE)
+ io_size = PAGE_SIZE;
+ subupl->vector_upl = (void*)vector_upl;
+ vector_upl->upl_elems[vector_upl->num_upls++] = subupl;
+ vector_upl->size += io_size;
+ upl->size += io_size;
+ }
+ else {
+ uint32_t i=0,invalid_upls=0;
+ for(i = 0; i < vector_upl->num_upls; i++) {
+ if(vector_upl->upl_elems[i] == subupl)
+ break;
+ }
+ if(i == vector_upl->num_upls)
+ panic("Trying to remove sub-upl when none exists");
+
+ vector_upl->upl_elems[i] = NULL;
+ invalid_upls = hw_atomic_add(&(vector_upl)->invalid_upls, 1);
+ if(invalid_upls == vector_upl->num_upls)
+ return TRUE;
+ else
+ return FALSE;
+ }
+ }
+ else
+ panic("vector_upl_set_subupl was passed a NULL upl element\n");
+ }
+ else
+ panic("vector_upl_set_subupl was passed a non-vectored upl\n");
+ }
+ else
+ panic("vector_upl_set_subupl was passed a NULL upl\n");
+
+ return FALSE;
+}
+
+void
+vector_upl_set_pagelist(upl_t upl)
+{
+ if(vector_upl_is_valid(upl)) {
+ uint32_t i=0;
+ vector_upl_t vector_upl = upl->vector_upl;
+
+ if(vector_upl) {
+ vm_offset_t pagelist_size=0, cur_upl_pagelist_size=0;
+
+ vector_upl->pagelist = (upl_page_info_array_t)kalloc(sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE));
+
+ for(i=0; i < vector_upl->num_upls; i++) {
+ cur_upl_pagelist_size = sizeof(struct upl_page_info) * vector_upl->upl_elems[i]->size/PAGE_SIZE;
+ bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl->upl_elems[i]), (char*)vector_upl->pagelist + pagelist_size, cur_upl_pagelist_size);
+ pagelist_size += cur_upl_pagelist_size;
+ if(vector_upl->upl_elems[i]->highest_page > upl->highest_page)
+ upl->highest_page = vector_upl->upl_elems[i]->highest_page;
+ }
+ assert( pagelist_size == (sizeof(struct upl_page_info)*(vector_upl->size/PAGE_SIZE)) );
+ }
+ else
+ panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
+ }
+ else
+ panic("vector_upl_set_pagelist was passed a NULL upl\n");
+
+}
+
+upl_t
+vector_upl_subupl_byindex(upl_t upl, uint32_t index)
+{
+ if(vector_upl_is_valid(upl)) {
+ vector_upl_t vector_upl = upl->vector_upl;
+ if(vector_upl) {
+ if(index < vector_upl->num_upls)
+ return vector_upl->upl_elems[index];
+ }
+ else
+ panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
+ }
+ return NULL;
+}
+
+upl_t
+vector_upl_subupl_byoffset(upl_t upl, upl_offset_t *upl_offset, upl_size_t *upl_size)
+{
+ if(vector_upl_is_valid(upl)) {
+ uint32_t i=0;
+ vector_upl_t vector_upl = upl->vector_upl;
+
+ if(vector_upl) {
+ upl_t subupl = NULL;
+ vector_upl_iostates_t subupl_state;
+
+ for(i=0; i < vector_upl->num_upls; i++) {
+ subupl = vector_upl->upl_elems[i];
+ subupl_state = vector_upl->upl_iostates[i];
+ if( *upl_offset <= (subupl_state.offset + subupl_state.size - 1)) {
+ /* We could have been passed an offset/size pair that belongs
+ * to an UPL element that has already been committed/aborted.
+ * If so, return NULL.
+ */
+ if(subupl == NULL)
+ return NULL;
+ if((subupl_state.offset + subupl_state.size) < (*upl_offset + *upl_size)) {
+ *upl_size = (subupl_state.offset + subupl_state.size) - *upl_offset;
+ if(*upl_size > subupl_state.size)
+ *upl_size = subupl_state.size;
+ }
+ if(*upl_offset >= subupl_state.offset)
+ *upl_offset -= subupl_state.offset;
+ else if(i)
+ panic("Vector UPL offset miscalculation\n");
+ return subupl;
+ }
+ }
+ }
+ else
+ panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
+ }
+ return NULL;
+}
+
+void
+vector_upl_get_submap(upl_t upl, vm_map_t *v_upl_submap, vm_offset_t *submap_dst_addr)
+{
+ *v_upl_submap = NULL;
+
+ if(vector_upl_is_valid(upl)) {
+ vector_upl_t vector_upl = upl->vector_upl;
+ if(vector_upl) {
+ *v_upl_submap = vector_upl->submap;
+ *submap_dst_addr = vector_upl->submap_dst_addr;
+ }
+ else
+ panic("vector_upl_get_submap was passed a non-vectored UPL\n");
+ }
+ else
+ panic("vector_upl_get_submap was passed a null UPL\n");
+}
+
+void
+vector_upl_set_submap(upl_t upl, vm_map_t submap, vm_offset_t submap_dst_addr)
+{
+ if(vector_upl_is_valid(upl)) {
+ vector_upl_t vector_upl = upl->vector_upl;
+ if(vector_upl) {
+ vector_upl->submap = submap;
+ vector_upl->submap_dst_addr = submap_dst_addr;
+ }
+ else
+ panic("vector_upl_get_submap was passed a non-vectored UPL\n");
+ }
+ else
+ panic("vector_upl_get_submap was passed a NULL UPL\n");
+}
+
+void
+vector_upl_set_iostate(upl_t upl, upl_t subupl, upl_offset_t offset, upl_size_t size)
+{
+ if(vector_upl_is_valid(upl)) {
+ uint32_t i = 0;
+ vector_upl_t vector_upl = upl->vector_upl;
+
+ if(vector_upl) {
+ for(i = 0; i < vector_upl->num_upls; i++) {
+ if(vector_upl->upl_elems[i] == subupl)
+ break;
+ }
+
+ if(i == vector_upl->num_upls)
+ panic("setting sub-upl iostate when none exists");
+
+ vector_upl->upl_iostates[i].offset = offset;
+ if(size < PAGE_SIZE)
+ size = PAGE_SIZE;
+ vector_upl->upl_iostates[i].size = size;
+ }
+ else
+ panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
+ }
+ else
+ panic("vector_upl_set_iostate was passed a NULL UPL\n");
+}
+
+void
+vector_upl_get_iostate(upl_t upl, upl_t subupl, upl_offset_t *offset, upl_size_t *size)
+{
+ if(vector_upl_is_valid(upl)) {
+ uint32_t i = 0;
+ vector_upl_t vector_upl = upl->vector_upl;
+
+ if(vector_upl) {
+ for(i = 0; i < vector_upl->num_upls; i++) {
+ if(vector_upl->upl_elems[i] == subupl)
+ break;
+ }
+
+ if(i == vector_upl->num_upls)
+ panic("getting sub-upl iostate when none exists");
+
+ *offset = vector_upl->upl_iostates[i].offset;
+ *size = vector_upl->upl_iostates[i].size;
+ }
+ else
+ panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
+ }
+ else
+ panic("vector_upl_get_iostate was passed a NULL UPL\n");
+}
+
+void
+vector_upl_get_iostate_byindex(upl_t upl, uint32_t index, upl_offset_t *offset, upl_size_t *size)
+{
+ if(vector_upl_is_valid(upl)) {
+ vector_upl_t vector_upl = upl->vector_upl;
+ if(vector_upl) {
+ if(index < vector_upl->num_upls) {
+ *offset = vector_upl->upl_iostates[index].offset;
+ *size = vector_upl->upl_iostates[index].size;
+ }
+ else
+ *offset = *size = 0;
+ }
+ else
+ panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
+ }
+ else
+ panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
+}
+
+upl_page_info_t *
+upl_get_internal_vectorupl_pagelist(upl_t upl)
+{
+ return ((vector_upl_t)(upl->vector_upl))->pagelist;
+}
+
+void *
+upl_get_internal_vectorupl(upl_t upl)
+{
+ return upl->vector_upl;
}
vm_size_t
#ifdef MACH_BSD
+boolean_t upl_device_page(upl_page_info_t *upl)
+{
+ return(UPL_DEVICE_PAGE(upl));
+}
boolean_t upl_page_present(upl_page_info_t *upl, int index)
{
return(UPL_PAGE_PRESENT(upl, index));
}
+boolean_t upl_speculative_page(upl_page_info_t *upl, int index)
+{
+ return(UPL_SPECULATIVE_PAGE(upl, index));
+}
boolean_t upl_dirty_page(upl_page_info_t *upl, int index)
{
return(UPL_DIRTY_PAGE(upl, index));
return(UPL_PHYS_PAGE(upl, index));
}
+
void
vm_countdirtypages(void)
{
} while (!queue_end(&vm_page_queue_inactive,(queue_entry_t) m));
vm_page_unlock_queues();
+ vm_page_lock_queues();
+ m = (vm_page_t) queue_first(&vm_page_queue_throttled);
+ do {
+ if (m ==(vm_page_t )0) break;
+
+ dpages++;
+ assert(m->dirty);
+ assert(!m->pageout);
+ assert(m->object != kernel_object);
+ m = (vm_page_t) queue_next(&m->pageq);
+ if (m ==(vm_page_t )0) break;
+
+ } while (!queue_end(&vm_page_queue_throttled,(queue_entry_t) m));
+ vm_page_unlock_queues();
+
vm_page_lock_queues();
m = (vm_page_t) queue_first(&vm_page_queue_zf);
do {
#endif /* MACH_BSD */
ppnum_t upl_get_highest_page(
- upl_t upl)
+ upl_t upl)
+{
+ return upl->highest_page;
+}
+
+upl_size_t upl_get_size(
+ upl_t upl)
{
- return upl->highest_page;
+ return upl->size;
}
-#ifdef UPL_DEBUG
-kern_return_t upl_ubc_alias_set(upl_t upl, unsigned int alias1, unsigned int alias2)
+#if UPL_DEBUG
+kern_return_t upl_ubc_alias_set(upl_t upl, uintptr_t alias1, uintptr_t alias2)
{
upl->ubc_alias1 = alias1;
upl->ubc_alias2 = alias2;
return KERN_SUCCESS;
}
-int upl_ubc_alias_get(upl_t upl, unsigned int * al, unsigned int * al2)
+int upl_ubc_alias_get(upl_t upl, uintptr_t * al, uintptr_t * al2)
{
if(al)
*al = upl->ubc_alias1;