+/*
+ * Free a list of pages. The list can be up to several hundred pages,
+ * as blocked up by vm_pageout_scan().
+ * The big win is not having to take the page q and free list locks once
+ * per page. We sort the incoming pages into n lists, one for
+ * each color.
+ *
+ * The page queues must be locked, and are kept locked.
+ */
+void
+vm_page_free_list(
+ vm_page_t mem)
+{
+ vm_page_t nxt;
+ int pg_count = 0;
+ int color;
+ int inuse_list_head = -1;
+
+ queue_head_t free_list[MAX_COLORS];
+ int inuse[MAX_COLORS];
+
+ for (color = 0; color < (signed) vm_colors; color++) {
+ queue_init(&free_list[color]);
+ }
+
+#if DEBUG
+ _mutex_assert(&vm_page_queue_lock, MA_OWNED);
+#endif
+ while (mem) {
+#if DEBUG
+ if (mem->tabled || mem->object)
+ panic("vm_page_free_list: freeing tabled page\n");
+ if (mem->inactive || mem->active || mem->throttled || mem->free)
+ panic("vm_page_free_list: freeing page on list\n");
+ if (vm_page_free_verify && !mem->fictitious && !mem->private) {
+ assert(pmap_verify_free(mem->phys_page));
+ }
+#endif
+ assert(mem->pageq.prev == NULL);
+ assert(mem->busy);
+ assert(!mem->free);
+ nxt = (vm_page_t)(mem->pageq.next);
+
+ if (!mem->fictitious) {
+ if (mem->phys_page <= vm_lopage_poolend && mem->phys_page >= vm_lopage_poolstart) {
+ mem->pageq.next = NULL;
+ vm_page_release(mem);
+ } else {
+ mem->free = TRUE;
+
+ color = mem->phys_page & vm_color_mask;
+ if (queue_empty(&free_list[color])) {
+ inuse[color] = inuse_list_head;
+ inuse_list_head = color;
+ }
+ queue_enter_first(&free_list[color],
+ mem,
+ vm_page_t,
+ pageq);
+ pg_count++;
+ }
+ } else {
+ assert(mem->phys_page == vm_page_fictitious_addr ||
+ mem->phys_page == vm_page_guard_addr);
+ vm_page_release_fictitious(mem);
+ }
+ mem = nxt;
+ }
+ if (pg_count) {
+ unsigned int avail_free_count;
+
+ mutex_lock(&vm_page_queue_free_lock);
+
+ color = inuse_list_head;
+
+ while( color != -1 ) {
+ vm_page_t first, last;
+ vm_page_t first_free;
+
+ first = (vm_page_t) queue_first(&free_list[color]);
+ last = (vm_page_t) queue_last(&free_list[color]);
+ first_free = (vm_page_t) queue_first(&vm_page_queue_free[color]);
+
+ if (queue_empty(&vm_page_queue_free[color])) {
+ queue_last(&vm_page_queue_free[color]) =
+ (queue_entry_t) last;
+ } else {
+ queue_prev(&first_free->pageq) =
+ (queue_entry_t) last;
+ }
+ queue_first(&vm_page_queue_free[color]) =
+ (queue_entry_t) first;
+ queue_prev(&first->pageq) =
+ (queue_entry_t) &vm_page_queue_free[color];
+ queue_next(&last->pageq) =
+ (queue_entry_t) first_free;
+ color = inuse[color];
+ }
+
+ vm_page_free_count += pg_count;
+ avail_free_count = vm_page_free_count;
+
+ while ((vm_page_free_wanted_privileged > 0) && avail_free_count) {
+ vm_page_free_wanted_privileged--;
+ avail_free_count--;
+
+ thread_wakeup_one((event_t) &vm_page_free_wanted_privileged);
+ }
+
+ if ((vm_page_free_wanted > 0) &&
+ (avail_free_count >= vm_page_free_reserved)) {
+ unsigned int available_pages;
+
+ if (avail_free_count >= vm_page_free_reserved) {
+ available_pages = (avail_free_count - vm_page_free_reserved);
+ } else {
+ available_pages = 0;
+ }
+
+ if (available_pages >= vm_page_free_wanted) {
+ vm_page_free_wanted = 0;
+ thread_wakeup((event_t) &vm_page_free_count);
+ } else {
+ while (available_pages--) {
+ vm_page_free_wanted--;
+ thread_wakeup_one((event_t) &vm_page_free_count);
+ }
+ }
+ }
+ mutex_unlock(&vm_page_queue_free_lock);
+
+#if CONFIG_EMBEDDED
+ {
+ int percent_avail;
+
+ /*
+ * Decide if we need to poke the memorystatus notification thread.
+ */
+ percent_avail =
+ (vm_page_active_count + vm_page_inactive_count +
+ vm_page_speculative_count + vm_page_free_count +
+ (IP_VALID(memory_manager_default)?0:vm_page_purgeable_count) ) * 100 /
+ atop_64(max_mem);
+ if (percent_avail >= (kern_memorystatus_level + 5)) {
+ kern_memorystatus_level = percent_avail;
+ thread_wakeup((event_t)&kern_memorystatus_wakeup);
+ }
+ }
+#endif
+ }
+}
+
+