]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_purgeable.c
xnu-2422.100.13.tar.gz
[apple/xnu.git] / osfmk / vm / vm_purgeable.c
index df1f3f810b7bcf307937fa66a97875672e5c5713..5930bc7d40ac2faf1a3889d63a482af72a53c427 100644 (file)
 
 #include <mach/mach_types.h>
 #include <vm/vm_page.h>
+#include <vm/vm_kern.h>                                /* kmem_alloc */
+#include <vm/vm_protos.h>
 #include <vm/vm_purgeable_internal.h>
 #include <sys/kdebug.h>
+#include <kern/sched_prim.h>
+#include <machine/limits.h>
+
+extern vm_pressure_level_t memorystatus_vm_pressure_level;
 
 struct token {
        token_cnt_t     count;
+       token_idx_t     prev;
        token_idx_t     next;
 };
 
-struct token    tokens[MAX_VOLATILE];
+struct token   *tokens;
+token_idx_t    token_q_max_cnt = 0;
+vm_size_t      token_q_cur_size = 0;
 
-token_idx_t     token_free_idx = 0;    /* head of free queue */
-token_cnt_t     token_init_count = 1;  /* token 0 is reserved!! */
-token_cnt_t     token_new_pagecount = 0;       /* count of pages that will
+token_idx_t     token_free_idx = 0;            /* head of free queue */
+token_idx_t     token_init_idx = 1;            /* token 0 is reserved!! */
+int32_t                token_new_pagecount = 0;        /* count of pages that will
                                                 * be added onto token queue */
 
 int             available_for_purge = 0;       /* increase when ripe token
                                                 * added, decrease when ripe
-                                                * token removed protect with
-                                                * page_queue_lock */
+                                                * token removed.
+                                                * protected by page_queue_lock 
+                                                */
+
+static int token_q_allocating = 0;             /* flag for singlethreading 
+                                                * allocator */
 
 struct purgeable_q purgeable_queues[PURGEABLE_Q_TYPE_MAX];
 
-#define TOKEN_ADD           0x40/* 0x100 */
-#define TOKEN_DELETE        0x41/* 0x104 */
-#define TOKEN_QUEUE_ADVANCE 0x42/* 0x108 actually means "token ripened" */
-#define TOKEN_OBJECT_PURGED 0x43/* 0x10c */
-#define OBJECT_ADDED        0x50/* 0x140 */
-#define OBJECT_REMOVED      0x51/* 0x144 */
+decl_lck_mtx_data(,vm_purgeable_queue_lock)
+
+#define TOKEN_ADD              0x40    /* 0x100 */
+#define TOKEN_DELETE           0x41    /* 0x104 */
+#define TOKEN_RIPEN            0x42    /* 0x108 */
+#define OBJECT_ADD             0x48    /* 0x120 */
+#define OBJECT_REMOVE          0x49    /* 0x124 */
+#define OBJECT_PURGE           0x4a    /* 0x128 */
+#define OBJECT_PURGE_ALL       0x4b    /* 0x12c */
 
-static void     vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue);
 static token_idx_t vm_purgeable_token_remove_first(purgeable_q_t queue);
 
+static void vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task);
+
 #if MACH_ASSERT
 static void
 vm_purgeable_token_check_queue(purgeable_q_t queue)
@@ -83,37 +100,125 @@ vm_purgeable_token_check_queue(purgeable_q_t queue)
        if (unripe)
                assert(queue->token_q_unripe == unripe);
        assert(token_cnt == queue->debug_count_tokens);
-       our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
-       assert(our_inactive_count >= 0);
-       assert((uint32_t) our_inactive_count == vm_page_inactive_count);
+       
+       /* obsolete queue doesn't maintain token counts */
+       if(queue->type != PURGEABLE_Q_TYPE_OBSOLETE)
+       {
+               our_inactive_count = page_cnt + queue->new_pages + token_new_pagecount;
+               assert(our_inactive_count >= 0);
+               assert((uint32_t) our_inactive_count == vm_page_inactive_count - vm_page_cleaned_count);
+       }
 }
 #endif
 
+/*
+ * Add a token. Allocate token queue memory if necessary.
+ * Call with page queue locked.
+ */
 kern_return_t
 vm_purgeable_token_add(purgeable_q_t queue)
 {
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       
        /* new token */
        token_idx_t     token;
        enum purgeable_q_type i;
 
-       if (token_init_count < MAX_VOLATILE) {  /* lazy token array init */
-               token = token_init_count;
-               token_init_count++;
-       } else if (token_free_idx) {
+find_available_token:
+
+       if (token_free_idx) {                           /* unused tokens available */
                token = token_free_idx;
                token_free_idx = tokens[token_free_idx].next;
-       } else {
-               return KERN_FAILURE;
+       } else if (token_init_idx < token_q_max_cnt) {  /* lazy token array init */
+               token = token_init_idx;
+               token_init_idx++;
+       } else {                                        /* allocate more memory */
+               /* Wait if another thread is inside the memory alloc section */
+               while(token_q_allocating) {
+                       wait_result_t res = lck_mtx_sleep(&vm_page_queue_lock,
+                                                         LCK_SLEEP_DEFAULT,
+                                                         (event_t)&token_q_allocating,
+                                                         THREAD_UNINT);
+                       if(res != THREAD_AWAKENED) return KERN_ABORTED;
+               };
+               
+               /* Check whether memory is still maxed out */
+               if(token_init_idx < token_q_max_cnt)
+                       goto find_available_token;
+               
+               /* Still no memory. Allocate some. */
+               token_q_allocating = 1;
+               
+               /* Drop page queue lock so we can allocate */
+               vm_page_unlock_queues();
+               
+               struct token *new_loc;
+               vm_size_t alloc_size = token_q_cur_size + PAGE_SIZE;
+               kern_return_t result;
+               
+               if (alloc_size / sizeof (struct token) > TOKEN_COUNT_MAX) {
+                       result = KERN_RESOURCE_SHORTAGE;
+               } else {
+                       if (token_q_cur_size) {
+                               result = kmem_realloc(kernel_map,
+                                                     (vm_offset_t) tokens,
+                                                     token_q_cur_size,
+                                                     (vm_offset_t *) &new_loc,
+                                                     alloc_size);
+                       } else {
+                               result = kmem_alloc(kernel_map,
+                                                   (vm_offset_t *) &new_loc,
+                                                   alloc_size);
+                       }
+               }
+               
+               vm_page_lock_queues();
+               
+               if (result) {
+                       /* Unblock waiting threads */
+                       token_q_allocating = 0;
+                       thread_wakeup((event_t)&token_q_allocating);
+                       return result;
+               }
+               
+               /* If we get here, we allocated new memory. Update pointers and
+                * dealloc old range */
+               struct token *old_tokens=tokens;
+               tokens=new_loc;
+               vm_size_t old_token_q_cur_size=token_q_cur_size;
+               token_q_cur_size=alloc_size;
+               token_q_max_cnt = (token_idx_t) (token_q_cur_size /
+                                                sizeof(struct token));
+               assert (token_init_idx < token_q_max_cnt);      /* We must have a free token now */
+               
+               if (old_token_q_cur_size) {     /* clean up old mapping */
+                       vm_page_unlock_queues();
+                       /* kmem_realloc leaves the old region mapped. Get rid of it. */
+                       kmem_free(kernel_map, (vm_offset_t)old_tokens, old_token_q_cur_size);
+                       vm_page_lock_queues();
+               }
+               
+               /* Unblock waiting threads */
+               token_q_allocating = 0;
+               thread_wakeup((event_t)&token_q_allocating);
+               
+               goto find_available_token;
        }
-
+       
+       assert (token);
+       
        /*
         * the new pagecount we got need to be applied to all queues except
         * obsolete
         */
        for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
-               purgeable_queues[i].new_pages += token_new_pagecount;
-               assert(purgeable_queues[i].new_pages >= 0);
-               assert((uint64_t) (purgeable_queues[i].new_pages) <= TOKEN_COUNT_MAX);
+               int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
+               assert(pages >= 0);
+               assert(pages <= TOKEN_COUNT_MAX);
+               purgeable_queues[i].new_pages = (int32_t) pages;
+               assert(purgeable_queues[i].new_pages == pages);
        }
        token_new_pagecount = 0;
 
@@ -130,8 +235,10 @@ vm_purgeable_token_add(purgeable_q_t queue)
        if (queue->token_q_tail == 0) {
                assert(queue->token_q_head == 0 && queue->token_q_unripe == 0);
                queue->token_q_head = token;
+               tokens[token].prev = 0;
        } else {
                tokens[queue->token_q_tail].next = token;
+               tokens[token].prev = queue->token_q_tail;
        }
        if (queue->token_q_unripe == 0) {       /* only ripe tokens (token
                                                 * count == 0) in queue */
@@ -164,10 +271,15 @@ vm_purgeable_token_add(purgeable_q_t queue)
 /*
  * Remove first token from queue and return its index. Add its count to the
  * count of the next token.
+ * Call with page queue locked. 
  */
 static token_idx_t 
 vm_purgeable_token_remove_first(purgeable_q_t queue)
 {
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       
        token_idx_t     token;
        token = queue->token_q_head;
 
@@ -190,6 +302,7 @@ vm_purgeable_token_remove_first(purgeable_q_t queue)
                queue->token_q_head = tokens[token].next;
                if (queue->token_q_head) {
                        tokens[queue->token_q_head].count += tokens[token].count;
+                       tokens[queue->token_q_head].prev = 0;
                } else {
                        /* currently no other tokens in the queue */
                        /*
@@ -218,80 +331,183 @@ vm_purgeable_token_remove_first(purgeable_q_t queue)
        return token;
 }
 
-/* Delete first token from queue. Return token to token queue. */
+static token_idx_t 
+vm_purgeable_token_remove_last(purgeable_q_t queue)
+{
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       
+       token_idx_t     token;
+       token = queue->token_q_tail;
+
+       assert(token);
+
+       if (token) {
+               assert(queue->token_q_head);
+
+               if (queue->token_q_tail == queue->token_q_head)
+                       assert(tokens[token].next == 0);
+
+               if (queue->token_q_unripe == 0) {
+                       /* we're removing a ripe token. decrease count */
+                       available_for_purge--;
+                       assert(available_for_purge >= 0);
+               } else if (queue->token_q_unripe == token) {
+                       /* we're removing the only unripe token */
+                       queue->token_q_unripe = 0;
+               }
+                       
+               if (token == queue->token_q_head) {
+                       /* token is the last one in the queue */
+                       queue->token_q_head = 0;
+                       queue->token_q_tail = 0;
+               } else {
+                       token_idx_t new_tail;
+
+                       new_tail = tokens[token].prev;
+
+                       assert(new_tail);
+                       assert(tokens[new_tail].next == token);
+
+                       queue->token_q_tail = new_tail;
+                       tokens[new_tail].next = 0;
+               }
+
+               queue->new_pages += tokens[token].count;
+
+#if MACH_ASSERT
+               queue->debug_count_tokens--;
+               vm_purgeable_token_check_queue(queue);
+
+               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_DELETE)),
+                                     queue->type,
+                                     tokens[queue->token_q_head].count,        /* num pages on new
+                                                                                * first token */
+                                     token_new_pagecount,      /* num pages waiting for
+                                                                * next token */
+                                     available_for_purge,
+                                     0);
+#endif
+       }
+       return token;
+}
+
+/* 
+ * Delete first token from queue. Return token to token queue.
+ * Call with page queue locked. 
+ */
 void
 vm_purgeable_token_delete_first(purgeable_q_t queue)
 {
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
        token_idx_t     token = vm_purgeable_token_remove_first(queue);
 
        if (token) {
                /* stick removed token on free queue */
                tokens[token].next = token_free_idx;
+               tokens[token].prev = 0;
                token_free_idx = token;
        }
 }
 
-
 void
-vm_purgeable_q_advance_all(uint32_t num_pages)
+vm_purgeable_token_delete_last(purgeable_q_t queue)
 {
-       /*
-        * don't need to advance obsolete queue - all items are ripe there,
-        * always
-        */
-       vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_FIFO]);
-       vm_purgeable_q_advance(num_pages, &purgeable_queues[PURGEABLE_Q_TYPE_LIFO]);
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       token_idx_t     token = vm_purgeable_token_remove_last(queue);
+
+       if (token) {
+               /* stick removed token on free queue */
+               tokens[token].next = token_free_idx;
+               tokens[token].prev = 0;
+               token_free_idx = token;
+       }
 }
 
-/*
- * Decrements token counters. A token counter can be zero, this means the
- * object is ripe to be purged. It is not purged immediately, because that
- * could cause several objects to be purged even if purging one would satisfy
- * the memory needs. Instead, the pageout thread purges one after the other
- * by calling vm_purgeable_object_purge_one and then rechecking the memory
- * balance.
- */
-static void
-vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue)
+
+/* Call with page queue locked. */
+void
+vm_purgeable_q_advance_all()
 {
-       /* Iterate over tokens as long as there are unripe tokens. */
-       while (queue->token_q_unripe) {
-               int             min = (tokens[queue->token_q_unripe].count < num_pages) ?
-               tokens[queue->token_q_unripe].count : num_pages;
-               tokens[queue->token_q_unripe].count -= min;
-               num_pages -= min;
-
-               if (tokens[queue->token_q_unripe].count == 0) {
-                       queue->token_q_unripe = tokens[queue->token_q_unripe].next;
-                       available_for_purge++;
-                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_QUEUE_ADVANCE)),
-                                             queue->type,
-                                         tokens[queue->token_q_head].count,    /* num pages on new
-                                                                                * first token */
-                                             0,
-                                             available_for_purge,
-                                             0);
-                       continue;       /* One token ripened. Make sure to
-                                        * check the next. */
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       
+       /* check queue counters - if they get really large, scale them back.
+        * They tend to get that large when there is no purgeable queue action */
+       int i;
+       if(token_new_pagecount > (TOKEN_NEW_PAGECOUNT_MAX >> 1))        /* a system idling years might get there */
+       {
+               for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
+                       int64_t pages = purgeable_queues[i].new_pages += token_new_pagecount;
+                       assert(pages >= 0);
+                       assert(pages <= TOKEN_COUNT_MAX);
+                       purgeable_queues[i].new_pages = (int32_t) pages;
+                       assert(purgeable_queues[i].new_pages == pages);
                }
-               if (num_pages == 0)
-                       break;  /* Current token not ripe and no more pages.
-                                * Work done. */
+               token_new_pagecount = 0;
        }
-
+       
        /*
-        * if there are no unripe tokens in the queue, decrement the
-        * new_pages counter instead new_pages can be negative, but must be
-        * canceled out by token_new_pagecount -- since inactive queue as a
-        * whole always contains a nonnegative number of pages
+        * Decrement token counters. A token counter can be zero, this means the
+        * object is ripe to be purged. It is not purged immediately, because that
+        * could cause several objects to be purged even if purging one would satisfy
+        * the memory needs. Instead, the pageout thread purges one after the other
+        * by calling vm_purgeable_object_purge_one and then rechecking the memory
+        * balance.
+        *
+        * No need to advance obsolete queue - all items are ripe there,
+        * always
         */
-       if (!queue->token_q_unripe) {
-               queue->new_pages -= num_pages;
-               assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
-       }
+       for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
+               purgeable_q_t queue = &purgeable_queues[i];
+               uint32_t num_pages = 1;
+               
+               /* Iterate over tokens as long as there are unripe tokens. */
+               while (queue->token_q_unripe) {
+                       if (tokens[queue->token_q_unripe].count && num_pages)
+                       {
+                               tokens[queue->token_q_unripe].count -= 1;
+                               num_pages -= 1;
+                       }
+
+                       if (tokens[queue->token_q_unripe].count == 0) {
+                               queue->token_q_unripe = tokens[queue->token_q_unripe].next;
+                               available_for_purge++;
+                               KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_RIPEN)),
+                                                     queue->type,
+                                                     tokens[queue->token_q_head].count,        /* num pages on new
+                                                                                        * first token */
+                                                     0,
+                                                     available_for_purge,
+                                                     0);
+                               continue;       /* One token ripened. Make sure to
+                                                * check the next. */
+                       }
+                       if (num_pages == 0)
+                               break;  /* Current token not ripe and no more pages.
+                                        * Work done. */
+               }
+
+               /*
+                * if there are no unripe tokens in the queue, decrement the
+                * new_pages counter instead new_pages can be negative, but must be
+                * canceled out by token_new_pagecount -- since inactive queue as a
+                * whole always contains a nonnegative number of pages
+                */
+               if (!queue->token_q_unripe) {
+                       queue->new_pages -= num_pages;
+                       assert((int32_t) token_new_pagecount + queue->new_pages >= 0);
+               }
 #if MACH_ASSERT
-       vm_purgeable_token_check_queue(queue);
+               vm_purgeable_token_check_queue(queue);
 #endif
+       }
 }
 
 /*
@@ -305,16 +521,22 @@ vm_purgeable_q_advance(uint32_t num_pages, purgeable_q_t queue)
  *     Yes - purge it. Remove token. If there is no ripe token, remove ripe
  *      token from other queue and migrate unripe token from this
  *      queue to other queue.
+ * Call with page queue locked.
  */
 static void
 vm_purgeable_token_remove_ripe(purgeable_q_t queue)
 {
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
        assert(queue->token_q_head && tokens[queue->token_q_head].count == 0);
        /* return token to free list. advance token list. */
        token_idx_t     new_head = tokens[queue->token_q_head].next;
        tokens[queue->token_q_head].next = token_free_idx;
+       tokens[queue->token_q_head].prev = 0;
        token_free_idx = queue->token_q_head;
        queue->token_q_head = new_head;
+       tokens[new_head].prev = 0;
        if (new_head == 0)
                queue->token_q_tail = 0;
 
@@ -331,10 +553,14 @@ vm_purgeable_token_remove_ripe(purgeable_q_t queue)
  * Delete a ripe token from the given queue. If there are no ripe tokens on
  * that queue, delete a ripe token from queue2, and migrate an unripe token
  * from queue to queue2
+ * Call with page queue locked.
  */
 static void
 vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t queue2)
 {
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
        assert(queue->token_q_head);
 
        if (tokens[queue->token_q_head].count == 0) {
@@ -361,36 +587,55 @@ vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t que
 
                /* migrate to queue2 */
                /* go to migration target loc */
-               token_idx_t    *token_in_queue2 = &queue2->token_q_head;
-               while (*token_in_queue2 && count > tokens[*token_in_queue2].count) {
-                       count -= tokens[*token_in_queue2].count;
-                       token_in_queue2 = &tokens[*token_in_queue2].next;
-               }
 
-               if ((*token_in_queue2 == queue2->token_q_unripe) ||     /* becomes the first
-                                                                        * unripe token */
-                   (queue2->token_q_unripe == 0))
-                       queue2->token_q_unripe = token; /* must update unripe
-                                                        * pointer */
+               token_idx_t token_to_insert_before = queue2->token_q_head, token_to_insert_after;
 
-               /* insert token */
-               tokens[token].count = count;
-               tokens[token].next = *token_in_queue2;
+               while (token_to_insert_before != 0 && count > tokens[token_to_insert_before].count) {
+                       count -= tokens[token_to_insert_before].count;
+                       token_to_insert_before = tokens[token_to_insert_before].next;
+               }
+
+               /* token_to_insert_before is now set correctly */
+       
+               /* should the inserted token become the first unripe token? */
+               if ((token_to_insert_before == queue2->token_q_unripe) || (queue2->token_q_unripe == 0))
+                       queue2->token_q_unripe = token; /* if so, must update unripe pointer */
 
                /*
-                * if inserting at end, reduce new_pages by that value if
-                * inserting before token, reduce counter of that token
+                * insert token.
+                * if inserting at end, reduce new_pages by that value;
+                * otherwise, reduce counter of next token
                 */
-               if (*token_in_queue2 == 0) {    /* insertion at end of queue2 */
-                       queue2->token_q_tail = token;   /* must update tail
-                                                        * pointer */
+
+               tokens[token].count = count;
+
+               if (token_to_insert_before != 0) {
+                       token_to_insert_after = tokens[token_to_insert_before].prev;
+
+                       tokens[token].next = token_to_insert_before;
+                       tokens[token_to_insert_before].prev = token;
+
+                       assert(tokens[token_to_insert_before].count >= count);
+                       tokens[token_to_insert_before].count -= count;
+               } else {
+                       /* if we ran off the end of the list, the token to insert after is the tail */
+                       token_to_insert_after = queue2->token_q_tail;
+
+                       tokens[token].next = 0;
+                       queue2->token_q_tail = token;
+
                        assert(queue2->new_pages >= (int32_t) count);
                        queue2->new_pages -= count;
+               }
+
+               if (token_to_insert_after != 0) {
+                       tokens[token].prev = token_to_insert_after;
+                       tokens[token_to_insert_after].next = token;
                } else {
-                       assert(tokens[*token_in_queue2].count >= count);
-                       tokens[*token_in_queue2].count -= count;
+                       /* is this case possible? */
+                       tokens[token].prev = 0;
+                       queue2->token_q_head = token;
                }
-               *token_in_queue2 = token;
 
 #if MACH_ASSERT
                queue2->debug_count_tokens++;
@@ -400,54 +645,204 @@ vm_purgeable_token_choose_and_delete_ripe(purgeable_q_t queue, purgeable_q_t que
 }
 
 /* Find an object that can be locked. Returns locked object. */
-static          vm_object_t
-vm_purgeable_object_find_and_lock(purgeable_q_t queue, int group)
+/* Call with purgeable queue locked. */
+static vm_object_t
+vm_purgeable_object_find_and_lock(
+       purgeable_q_t   queue,
+       int             group,
+       boolean_t       pick_ripe)
 {
+       vm_object_t     object, best_object;
+       int             object_task_importance;
+       int             best_object_task_importance;
+       int             best_object_skipped;
+       int             num_objects_skipped;
+       task_t          owner;
+
+       best_object = VM_OBJECT_NULL;
+       best_object_task_importance = INT_MAX;
+
+       lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
        /*
         * Usually we would pick the first element from a queue. However, we
         * might not be able to get a lock on it, in which case we try the
         * remaining elements in order.
         */
 
-       vm_object_t     object;
+       num_objects_skipped = -1;
        for (object = (vm_object_t) queue_first(&queue->objq[group]);
             !queue_end(&queue->objq[group], (queue_entry_t) object);
-            object = (vm_object_t) queue_next(&object->objq)) {
-               if (vm_object_lock_try(object)) {
-                       /* Locked. Great. We'll take it. Remove and return. */
-                       queue_remove(&queue->objq[group], object,
-                                    vm_object_t, objq);
-                       object->objq.next = 0;
-                       object->objq.prev = 0;
+            object = (vm_object_t) queue_next(&object->objq),
+               num_objects_skipped++) {
+
+               if (pick_ripe &&
+                   ! object->purgeable_when_ripe) {
+                       /* we want an object that has a ripe token */
+                       continue;
+               }
+
+               object_task_importance = 0;
+               owner = object->vo_purgeable_owner;
+               if (owner) {
+                       object_task_importance = task_importance_estimate(owner);
+               }
+               if (object_task_importance < best_object_task_importance) {
+                       if (vm_object_lock_try(object)) {
+                               if (best_object != VM_OBJECT_NULL) {
+                                       /* forget about previous best object */
+                                       vm_object_unlock(best_object);
+                               }
+                               best_object = object;
+                               best_object_task_importance = object_task_importance;
+                               best_object_skipped = num_objects_skipped;
+                               if (best_object_task_importance == 0) {
+                                       /* can't get any better: stop looking */
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       if (best_object) {
+               /* Locked. Great. We'll take it. Remove and return. */
+//             printf("FOUND PURGEABLE object %p skipped %d\n", object, num_objects_skipped);
+
+               /* clear ownership when dequeueing purgeable object */
+               owner = best_object->vo_purgeable_owner;
+               if (owner) {
+                       assert(owner->task_volatile_objects > 0);
+                       OSAddAtomic(-1, &owner->task_volatile_objects);
+                       best_object->vo_purgeable_owner = NULL;
+               }
+
+               queue_remove(&queue->objq[group], best_object,
+                            vm_object_t, objq);
+               best_object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
+               best_object->purgeable_queue_group = 0;
+               best_object->objq.next = NULL;
+               best_object->objq.prev = NULL;
 #if MACH_ASSERT
-                       queue->debug_count_objects--;
+               queue->debug_count_objects--;
 #endif
-                       return object;
-               }
+               return best_object;
        }
 
        return 0;
 }
 
+/* Can be called without holding locks */
 void
-vm_purgeable_object_purge_one(void)
+vm_purgeable_object_purge_all(void)
 {
        enum purgeable_q_type i;
        int             group;
-       vm_object_t     object = 0;
+       vm_object_t     object;
+       unsigned int    purged_count;
+       uint32_t        collisions;
+
+       purged_count = 0;
+       collisions = 0;
 
-       mutex_lock(&vm_purgeable_queue_lock);
+restart:
+       lck_mtx_lock(&vm_purgeable_queue_lock);
        /* Cycle through all queues */
        for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
-               purgeable_q_t   queue = &purgeable_queues[i];
+               purgeable_q_t   queue;
+
+               queue = &purgeable_queues[i];
 
                /*
-                * Are there any ripe tokens on this queue? If yes, we'll
-                * find an object to purge there
+                * Look through all groups, starting from the lowest. If
+                * we find an object in that group, try to lock it (this can
+                * fail). If locking is successful, we can drop the queue
+                * lock, remove a token and then purge the object.
                 */
-               if (!(queue->token_q_head && tokens[queue->token_q_head].count == 0))
-                       continue;       /* no token? Look at next purgeable
-                                        * queue */
+               for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
+                       while (!queue_empty(&queue->objq[group])) {
+                               object = vm_purgeable_object_find_and_lock(queue, group, FALSE);
+                               if (object == VM_OBJECT_NULL) {
+                                       lck_mtx_unlock(&vm_purgeable_queue_lock);
+                                       mutex_pause(collisions++);
+                                       goto restart;
+                               }
+
+                               lck_mtx_unlock(&vm_purgeable_queue_lock);
+                               
+                               /* Lock the page queue here so we don't hold it
+                                * over the whole, legthy operation */
+                               if (object->purgeable_when_ripe) {
+                                       vm_page_lock_queues();
+                                       vm_purgeable_token_remove_first(queue);
+                                       vm_page_unlock_queues();
+                               }
+                               
+                               assert(object->purgable == VM_PURGABLE_VOLATILE);
+                               (void) vm_object_purge(object);
+                               vm_object_unlock(object);
+                               purged_count++;
+                               goto restart;
+                       }
+                       assert(queue->debug_count_objects >= 0);
+               }
+       }
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ALL)),
+                             purged_count, /* # of purged objects */
+                             0,
+                             available_for_purge,
+                             0,
+                             0);
+       lck_mtx_unlock(&vm_purgeable_queue_lock);
+       return;
+}
+
+boolean_t
+vm_purgeable_object_purge_one_unlocked(
+       int     force_purge_below_group)
+{
+       boolean_t       retval;
+
+       vm_page_lock_queues();
+       retval = vm_purgeable_object_purge_one(force_purge_below_group);
+       vm_page_unlock_queues();
+
+       return retval;
+}
+
+boolean_t
+vm_purgeable_object_purge_one(
+       int     force_purge_below_group)
+{
+       enum purgeable_q_type i;
+       int             group;
+       vm_object_t     object = 0;
+       purgeable_q_t   queue, queue2;
+       boolean_t       forced_purge;
+
+       /* Need the page queue lock since we'll be changing the token queue. */
+#if MACH_ASSERT
+       lck_mtx_assert(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
+#endif
+       lck_mtx_lock(&vm_purgeable_queue_lock);
+       
+       /* Cycle through all queues */
+       for (i = PURGEABLE_Q_TYPE_OBSOLETE; i < PURGEABLE_Q_TYPE_MAX; i++) {
+               queue = &purgeable_queues[i];
+
+               if (force_purge_below_group == 0) {
+                       /*
+                        * Are there any ripe tokens on this queue? If yes,
+                        * we'll find an object to purge there
+                        */
+                       if (!queue->token_q_head) {
+                               /* no token: look at next purgeable queue */
+                               continue;
+                       }
+
+                       if (tokens[queue->token_q_head].count != 0) {
+                               /* no ripe token: next queue */
+                               continue;
+                       }
+               }
 
                /*
                 * Now look through all groups, starting from the lowest. If
@@ -456,19 +851,55 @@ vm_purgeable_object_purge_one(void)
                 * lock, remove a token and then purge the object.
                 */
                for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
-                       if (!queue_empty(&queue->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue, group))) {
-                               mutex_unlock(&vm_purgeable_queue_lock);
-                               vm_purgeable_token_choose_and_delete_ripe(queue, 0);
+                       if (!queue->token_q_head ||
+                           tokens[queue->token_q_head].count != 0) {
+                               /* no tokens or no ripe tokens */
+
+                               if (group >= force_purge_below_group) {
+                                       /* no more groups to force-purge */
+                                       break;
+                               }
+
+                               /*
+                                * Try and purge an object in this group
+                                * even though no tokens are ripe.
+                                */
+                               if (!queue_empty(&queue->objq[group]) &&
+                                   (object = vm_purgeable_object_find_and_lock(queue, group, FALSE))) {
+                                       lck_mtx_unlock(&vm_purgeable_queue_lock);
+                                       if (object->purgeable_when_ripe) {
+                                               vm_purgeable_token_delete_first(queue);
+                                       }
+                                       forced_purge = TRUE;
+                                       goto purge_now;
+                               }
+
+                               /* nothing to purge in this group: next group */
+                               continue;
+                       }
+                       if (!queue_empty(&queue->objq[group]) && 
+                           (object = vm_purgeable_object_find_and_lock(queue, group, TRUE))) {
+                               lck_mtx_unlock(&vm_purgeable_queue_lock);
+                               if (object->purgeable_when_ripe) {
+                                       vm_purgeable_token_choose_and_delete_ripe(queue, 0);
+                               }
+                               forced_purge = FALSE;
                                goto purge_now;
-                       } else {
-                               assert(i != PURGEABLE_Q_TYPE_OBSOLETE); /* obsolete queue must
-                                                                        * have all objects in
-                                                                        * group 0 */
-                               purgeable_q_t   queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? PURGEABLE_Q_TYPE_FIFO : PURGEABLE_Q_TYPE_LIFO];
-
-                               if (!queue_empty(&queue2->objq[group]) && (object = vm_purgeable_object_find_and_lock(queue2, group))) {
-                                       mutex_unlock(&vm_purgeable_queue_lock);
-                                       vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
+                       }
+                       if (i != PURGEABLE_Q_TYPE_OBSOLETE) { 
+                               /* This is the token migration case, and it works between
+                                * FIFO and LIFO only */
+                               queue2 = &purgeable_queues[i != PURGEABLE_Q_TYPE_FIFO ? 
+                                                          PURGEABLE_Q_TYPE_FIFO : 
+                                                          PURGEABLE_Q_TYPE_LIFO];
+
+                               if (!queue_empty(&queue2->objq[group]) && 
+                                   (object = vm_purgeable_object_find_and_lock(queue2, group, TRUE))) {
+                                       lck_mtx_unlock(&vm_purgeable_queue_lock);
+                                       if (object->purgeable_when_ripe) {
+                                               vm_purgeable_token_choose_and_delete_ripe(queue2, queue);
+                                       }
+                                       forced_purge = FALSE;
                                        goto purge_now;
                                }
                        }
@@ -480,39 +911,62 @@ vm_purgeable_object_purge_one(void)
          * we could end up with no object to purge at this time, even though
          * we have objects in a purgeable state
          */
-       mutex_unlock(&vm_purgeable_queue_lock);
-       return;
+       lck_mtx_unlock(&vm_purgeable_queue_lock);
+       return FALSE;
 
 purge_now:
 
        assert(object);
+       assert(object->purgable == VM_PURGABLE_VOLATILE);
+       vm_page_unlock_queues();  /* Unlock for call to vm_object_purge() */
+//     printf("%sPURGING object %p task %p importance %d queue %d group %d force_purge_below_group %d memorystatus_vm_pressure_level %d\n", forced_purge ? "FORCED " : "", object, object->vo_purgeable_owner, task_importance_estimate(object->vo_purgeable_owner), i, group, force_purge_below_group, memorystatus_vm_pressure_level);
        (void) vm_object_purge(object);
        vm_object_unlock(object);
+       vm_page_lock_queues();
 
-       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, TOKEN_OBJECT_PURGED)),
-                             (unsigned int) object,    /* purged object */
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
+                             object,   /* purged object */
                              0,
                              available_for_purge,
                              0,
                              0);
+       
+       return TRUE;
 }
 
+/* Called with object lock held */
 void
 vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
 {
-       mutex_lock(&vm_purgeable_queue_lock);
+       task_t  owner;
+
+       vm_object_lock_assert_exclusive(object);
+       lck_mtx_lock(&vm_purgeable_queue_lock);
 
        if (queue->type == PURGEABLE_Q_TYPE_OBSOLETE)
                group = 0;
+
        if (queue->type != PURGEABLE_Q_TYPE_LIFO)       /* fifo and obsolete are
                                                         * fifo-queued */
                queue_enter(&queue->objq[group], object, vm_object_t, objq);    /* last to die */
        else
                queue_enter_first(&queue->objq[group], object, vm_object_t, objq);      /* first to die */
 
+       object->purgeable_queue_type = queue->type;
+       object->purgeable_queue_group = group;
+
+       /* set ownership when enqueueing purgeable object */
+       assert(object->vo_purgeable_owner == NULL);
+       owner = current_task();
+       if (current_task() != kernel_task) {
+               OSAddAtomic(+1, &owner->task_volatile_objects);
+               assert(owner->task_volatile_objects > 0);
+               object->vo_purgeable_owner = owner;
+       }
+
 #if MACH_ASSERT
        queue->debug_count_objects++;
-       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADDED)),
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_ADD)),
                              0,
                              tokens[queue->token_q_head].count,
                              queue->type,
@@ -520,44 +974,160 @@ vm_purgeable_object_add(vm_object_t object, purgeable_q_t queue, int group)
                              0);
 #endif
 
-       mutex_unlock(&vm_purgeable_queue_lock);
+       lck_mtx_unlock(&vm_purgeable_queue_lock);
 }
 
 /* Look for object. If found, remove from purgeable queue. */
+/* Called with object lock held */
 purgeable_q_t
 vm_purgeable_object_remove(vm_object_t object)
 {
-       enum purgeable_q_type i;
-       int             group;
+       int group;
+       task_t owner;
+       enum purgeable_q_type type;
+       purgeable_q_t queue;
+
+       vm_object_lock_assert_exclusive(object);
+
+       type = object->purgeable_queue_type;
+       group = object->purgeable_queue_group;
+
+       if (type == PURGEABLE_Q_TYPE_MAX) {
+               if (object->objq.prev || object->objq.next)
+                       panic("unmarked object on purgeable q");
+
+               return NULL;
+       } else if (!(object->objq.prev && object->objq.next))
+               panic("marked object not on purgeable q");
+
+       lck_mtx_lock(&vm_purgeable_queue_lock);
+
+       queue = &purgeable_queues[type];
+
+       /* clear ownership when dequeueing purgeable object */
+       owner = object->vo_purgeable_owner;
+       if (owner) {
+               assert(owner->task_volatile_objects > 0);
+               OSAddAtomic(-1, &owner->task_volatile_objects);
+               object->vo_purgeable_owner = NULL;
+       }
+
+       queue_remove(&queue->objq[group], object, vm_object_t, objq);
 
-       mutex_lock(&vm_purgeable_queue_lock);
-       for (i = PURGEABLE_Q_TYPE_FIFO; i < PURGEABLE_Q_TYPE_MAX; i++) {
-               purgeable_q_t   queue = &purgeable_queues[i];
-               for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
-                       vm_object_t     o;
-                       for (o = (vm_object_t) queue_first(&queue->objq[group]);
-                        !queue_end(&queue->objq[group], (queue_entry_t) o);
-                            o = (vm_object_t) queue_next(&o->objq)) {
-                               if (o == object) {
-                                       queue_remove(&queue->objq[group], object,
-                                                    vm_object_t, objq);
 #if MACH_ASSERT
-                                       queue->debug_count_objects--;
-                                       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVED)),
-                                                             0,
-                                         tokens[queue->token_q_head].count,
-                                                             queue->type,
-                                                             group,
-                                                             0);
+       queue->debug_count_objects--;
+       KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_REMOVE)),
+                             0,
+                             tokens[queue->token_q_head].count,
+                             queue->type,
+                             group,
+                             0);
 #endif
-                                       mutex_unlock(&vm_purgeable_queue_lock);
-                                       object->objq.next = 0;
-                                       object->objq.prev = 0;
-                                       return &purgeable_queues[i];
-                               }
+
+       lck_mtx_unlock(&vm_purgeable_queue_lock);
+
+       object->purgeable_queue_type = PURGEABLE_Q_TYPE_MAX;
+       object->purgeable_queue_group = 0;
+
+       object->objq.next = NULL;
+       object->objq.prev = NULL;
+
+       return &purgeable_queues[type];
+}
+
+void
+vm_purgeable_stats_helper(vm_purgeable_stat_t *stat, purgeable_q_t queue, int group, task_t target_task)
+{
+       lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+       stat->count = stat->size = 0;
+       vm_object_t     object;
+       for (object = (vm_object_t) queue_first(&queue->objq[group]);
+            !queue_end(&queue->objq[group], (queue_entry_t) object);
+            object = (vm_object_t) queue_next(&object->objq)) {
+                       if (!target_task || object->vo_purgeable_owner == target_task) {
+                               stat->count++;
+                               stat->size += (object->resident_page_count * PAGE_SIZE);
                        }
+       }
+       return;
+}
+
+void
+vm_purgeable_stats(vm_purgeable_info_t info, task_t target_task)
+{
+       purgeable_q_t   queue;
+       int             group;
+
+       lck_mtx_lock(&vm_purgeable_queue_lock);
+       
+       /* Populate fifo_data */
+       queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+       for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+               vm_purgeable_stats_helper(&(info->fifo_data[group]), queue, group, target_task);
+       
+       /* Populate lifo_data */
+       queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+       for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+               vm_purgeable_stats_helper(&(info->lifo_data[group]), queue, group, target_task);
+
+       /* Populate obsolete data */
+       queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+       vm_purgeable_stats_helper(&(info->obsolete_data), queue, 0, target_task);
+
+       lck_mtx_unlock(&vm_purgeable_queue_lock);
+       return;
+}
+       
+
+static void
+vm_purgeable_queue_disown(
+       purgeable_q_t   queue,
+       int             group,
+       task_t          task)
+{
+       vm_object_t     object;
+       int             num_objects;
+
+       lck_mtx_assert(&vm_purgeable_queue_lock, LCK_MTX_ASSERT_OWNED);
+
+       num_objects = 0;
+       for (object = (vm_object_t) queue_first(&queue->objq[group]);
+            !queue_end(&queue->objq[group], (queue_entry_t) object);
+            object = (vm_object_t) queue_next(&object->objq)) {
+               if (object->vo_purgeable_owner == task) {
+                       object->vo_purgeable_owner = NULL;
+                       num_objects++;
                }
        }
-       mutex_unlock(&vm_purgeable_queue_lock);
-       return 0;
+       assert(task->task_volatile_objects >= num_objects);
+       OSAddAtomic(-num_objects, &task->task_volatile_objects);
+       return;
+}
+
+void
+vm_purgeable_disown(
+       task_t  task)
+{
+       purgeable_q_t   queue;
+       int             group;
+
+       if (task == NULL) {
+               return;
+       }
+
+       lck_mtx_lock(&vm_purgeable_queue_lock);
+       
+       queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
+       vm_purgeable_queue_disown(queue, 0, task);
+
+       queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
+       for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+               vm_purgeable_queue_disown(queue, group, task);
+       
+       queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
+       for (group = 0; group < NUM_VOLATILE_GROUPS; group++)
+               vm_purgeable_queue_disown(queue, group, task);
+
+       lck_mtx_unlock(&vm_purgeable_queue_lock);
 }