#include <kern/sched_prim.h>
#include <kern/ledger.h>
+#include <kern/policy_internal.h>
#include <libkern/OSDebug.h>
decl_lck_mtx_data(,vm_purgeable_queue_lock)
-#define TOKEN_ADD 0x40 /* 0x100 */
-#define TOKEN_DELETE 0x41 /* 0x104 */
-#define TOKEN_RIPEN 0x42 /* 0x108 */
-#define OBJECT_ADD 0x48 /* 0x120 */
-#define OBJECT_REMOVE 0x49 /* 0x124 */
-#define OBJECT_PURGE 0x4a /* 0x128 */
-#define OBJECT_PURGE_ALL 0x4b /* 0x12c */
-
static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
kern_return_t
vm_purgeable_token_add(purgeable_q_t queue)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
/* new token */
token_idx_t token;
(vm_offset_t) tokens,
token_q_cur_size,
(vm_offset_t *) &new_loc,
- alloc_size);
+ alloc_size, VM_KERN_MEMORY_OSFMK);
} else {
result = kmem_alloc(kernel_map,
(vm_offset_t *) &new_loc,
- alloc_size);
+ alloc_size, VM_KERN_MEMORY_OSFMK);
}
}
static token_idx_t
vm_purgeable_token_remove_first(purgeable_q_t queue)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
token_idx_t token;
token = queue->token_q_head;
static token_idx_t
vm_purgeable_token_remove_last(purgeable_q_t queue)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
token_idx_t token;
token = queue->token_q_tail;
void
vm_purgeable_token_delete_first(purgeable_q_t queue)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
token_idx_t token = vm_purgeable_token_remove_first(queue);
if (token) {
void
vm_purgeable_token_delete_last(purgeable_q_t queue)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
token_idx_t token = vm_purgeable_token_remove_last(queue);
if (token) {
void
vm_purgeable_q_advance_all()
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
/* check queue counters - if they get really large, scale them back.
* They tend to get that large when there is no purgeable queue action */
static void
vm_purgeable_token_remove_ripe(purgeable_q_t queue)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
/* return token to free list. advance token list. */
token_idx_t new_head = tokens[queue->token_q_head].next;
static void
vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
{
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
assert(queue->token_q_head);
if (tokens[queue->token_q_head].count == 0) {
int best_object_task_importance;
int best_object_skipped;
int num_objects_skipped;
+ int try_lock_failed = 0;
+ int try_lock_succeeded = 0;
task_t owner;
best_object = VM_OBJECT_NULL;
best_object_task_importance = INT_MAX;
- lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
/*
* Usually we would pick the first element from a queue. However, we
* might not be able to get a lock on it, in which case we try the
* remaining elements in order.
*/
- num_objects_skipped = -1;
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_START),
+ pick_ripe,
+ group,
+ VM_KERNEL_UNSLIDE_OR_PERM(queue),
+ 0,
+ 0);
+
+ num_objects_skipped = 0;
for (object = (vm_object_t) queue_first(&queue->objq[group]);
!queue_end(&queue->objq[group], (queue_entry_t) object);
object = (vm_object_t) queue_next(&object->objq),
num_objects_skipped++) {
+ /*
+ * To prevent us looping for an excessively long time, choose
+ * the best object we've seen after looking at PURGEABLE_LOOP_MAX elements.
+ * If we haven't seen an eligible object after PURGEABLE_LOOP_MAX elements,
+ * we keep going until we find the first eligible object.
+ */
+ if ((num_objects_skipped >= PURGEABLE_LOOP_MAX) && (best_object != NULL)) {
+ break;
+ }
+
if (pick_ripe &&
! object->purgeable_when_ripe) {
/* we want an object that has a ripe token */
owner = object->vo_purgeable_owner;
if (owner) {
+#if CONFIG_EMBEDDED
+#if CONFIG_JETSAM
+ object_task_importance = proc_get_memstat_priority((struct proc *)get_bsdtask_info(owner), TRUE);
+#endif /* CONFIG_JETSAM */
+#else /* CONFIG_EMBEDDED */
object_task_importance = task_importance_estimate(owner);
+#endif /* CONFIG_EMBEDDED */
}
if (object_task_importance < best_object_task_importance) {
if (vm_object_lock_try(object)) {
+ try_lock_succeeded++;
if (best_object != VM_OBJECT_NULL) {
/* forget about previous best object */
vm_object_unlock(best_object);
/* can't get any better: stop looking */
break;
}
+ } else {
+ try_lock_failed++;
}
}
}
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_LOOP) | DBG_FUNC_END),
+ num_objects_skipped, /* considered objects */
+ try_lock_failed,
+ try_lock_succeeded,
+ VM_KERNEL_UNSLIDE_OR_PERM(best_object),
+ ((best_object == NULL) ? 0 : best_object->resident_page_count));
+
object = best_object;
if (object == VM_OBJECT_NULL) {
boolean_t forced_purge;
/* Need the page queue lock since we'll be changing the token queue. */
-#if MACH_ASSERT
- lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
-#endif
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
lck_mtx_lock(&vm_purgeable_queue_lock);
/* Cycle through all queues */
vm_page_lock_queues();
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
- object, /* purged object */
+ VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
0,
available_for_purge,
0,
void
vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
{
- lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
stat->count = stat->size = 0;
vm_object_t object;
lck_mtx_unlock(&vm_purgeable_queue_lock);
return;
}
-
+
+#if DEVELOPMENT || DEBUG
+static void
+vm_purgeable_account_volatile_queue(
+ purgeable_q_t queue,
+ int group,
+ task_t task,
+ pvm_account_info_t acnt_info)
+{
+ vm_object_t object;
+ uint64_t compressed_count;
+
+ for (object = (vm_object_t) queue_first(&queue->objq[group]);
+ !queue_end(&queue->objq[group], (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq)) {
+ if (object->vo_purgeable_owner == task) {
+ compressed_count = vm_compressor_pager_get_count(object->pager);
+ acnt_info->pvm_volatile_compressed_count += compressed_count;
+ acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
+ acnt_info->pvm_nonvolatile_count += object->wired_page_count;
+ }
+ }
+
+}
+
+/*
+ * Walks the purgeable object queues and calculates the usage
+ * associated with the objects for the given task.
+ */
+kern_return_t
+vm_purgeable_account(
+ task_t task,
+ pvm_account_info_t acnt_info)
+{
+ queue_head_t *nonvolatile_q;
+ vm_object_t object;
+ int group;
+ int state;
+ uint64_t compressed_count;
+ purgeable_q_t volatile_q;
+
+
+ if ((task == NULL) || (acnt_info == NULL)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ acnt_info->pvm_volatile_count = 0;
+ acnt_info->pvm_volatile_compressed_count = 0;
+ acnt_info->pvm_nonvolatile_count = 0;
+ acnt_info->pvm_nonvolatile_compressed_count = 0;
+
+ lck_mtx_lock(&vm_purgeable_queue_lock);
+
+ nonvolatile_q = &purgeable_nonvolatile_queue;
+ for (object = (vm_object_t) queue_first(nonvolatile_q);
+ !queue_end(nonvolatile_q, (queue_entry_t) object);
+ object = (vm_object_t) queue_next(&object->objq)) {
+ if (object->vo_purgeable_owner == task) {
+ state = object->purgable;
+ compressed_count = vm_compressor_pager_get_count(object->pager);
+ if (state == VM_PURGABLE_EMPTY) {
+ acnt_info->pvm_volatile_count += (object->resident_page_count - object->wired_page_count);
+ acnt_info->pvm_volatile_compressed_count += compressed_count;
+ } else {
+ acnt_info->pvm_nonvolatile_count += (object->resident_page_count - object->wired_page_count);
+ acnt_info->pvm_nonvolatile_compressed_count += compressed_count;
+ }
+ acnt_info->pvm_nonvolatile_count += object->wired_page_count;
+ }
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+ vm_purgeable_account_volatile_queue(volatile_q, 0, task, acnt_info);
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
+ vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
+ }
+
+ volatile_q = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+ for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
+ vm_purgeable_account_volatile_queue(volatile_q, group, task, acnt_info);
+ }
+ lck_mtx_unlock(&vm_purgeable_queue_lock);
+
+ acnt_info->pvm_volatile_count = (acnt_info->pvm_volatile_count * PAGE_SIZE);
+ acnt_info->pvm_volatile_compressed_count = (acnt_info->pvm_volatile_compressed_count * PAGE_SIZE);
+ acnt_info->pvm_nonvolatile_count = (acnt_info->pvm_nonvolatile_count * PAGE_SIZE);
+ acnt_info->pvm_nonvolatile_compressed_count = (acnt_info->pvm_nonvolatile_compressed_count * PAGE_SIZE);
+
+ return KERN_SUCCESS;
+}
+#endif /* DEVELOPMENT || DEBUG */
static void
vm_purgeable_volatile_queue_disown(
collisions = 0;
again:
- lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
+ LCK_MTX_ASSERT(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
for (object = (vm_object_t) queue_first(&queue->objq[group]);
!queue_end(&queue->objq[group], (queue_entry_t) object);
assert(object->purgable == VM_PURGABLE_NONVOLATILE);
assert(object->vo_purgeable_owner == NULL);
- assert(owner != NULL);
lck_mtx_lock(&vm_purgeable_queue_lock);
- if (owner->task_purgeable_disowning) {
+ if (owner != NULL &&
+ owner->task_purgeable_disowning) {
/* task is exiting and no longer tracking purgeable objects */
owner = NULL;
}
#endif /* DEBUG */
page_count = object->resident_page_count;
- assert(page_count == 0); /* should be a freshly-created object */
if (owner != NULL && page_count != 0) {
ledger_credit(owner->ledger,
task_ledgers.purgeable_nonvolatile,
resident_page_count = object->resident_page_count;
wired_page_count = object->wired_page_count;
- if ((COMPRESSED_PAGER_IS_ACTIVE ||
- DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) &&
- object->pager != NULL) {
+ if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
+ object->pager != NULL) {
compressed_page_count =
vm_compressor_pager_get_count(object->pager);
} else {