+#endif /* 0 */
+
+#if CONFIG_JETSAM
+/* Coalition support */
+
+/* sorting info for a particular priority bucket */
+typedef struct memstat_sort_info {
+ coalition_t msi_coal;
+ uint64_t msi_page_count;
+ pid_t msi_pid;
+ int msi_ntasks;
+} memstat_sort_info_t;
+
+/*
+ * qsort from smallest page count to largest page count
+ *
+ * return < 0 for a < b
+ * 0 for a == b
+ * > 0 for a > b
+ */
+static int memstat_asc_cmp(const void *a, const void *b)
+{
+ const memstat_sort_info_t *msA = (const memstat_sort_info_t *)a;
+ const memstat_sort_info_t *msB = (const memstat_sort_info_t *)b;
+
+ return (int)((uint64_t)msA->msi_page_count - (uint64_t)msB->msi_page_count);
+}
+
+/*
+ * Return the number of pids rearranged during this sort.
+ */
+static int
+memorystatus_sort_by_largest_coalition_locked(unsigned int bucket_index, int coal_sort_order)
+{
+#define MAX_SORT_PIDS 80
+#define MAX_COAL_LEADERS 10
+
+ unsigned int b = bucket_index;
+ int nleaders = 0;
+ int ntasks = 0;
+ proc_t p = NULL;
+ coalition_t coal = COALITION_NULL;
+ int pids_moved = 0;
+ int total_pids_moved = 0;
+ int i;
+
+ /*
+ * The system is typically under memory pressure when in this
+ * path, hence, we want to avoid dynamic memory allocation.
+ */
+ memstat_sort_info_t leaders[MAX_COAL_LEADERS];
+ pid_t pid_list[MAX_SORT_PIDS];
+
+ if (bucket_index >= MEMSTAT_BUCKET_COUNT) {
+ return(0);
+ }
+
+ /*
+ * Clear the array that holds coalition leader information
+ */
+ for (i=0; i < MAX_COAL_LEADERS; i++) {
+ leaders[i].msi_coal = COALITION_NULL;
+ leaders[i].msi_page_count = 0; /* will hold total coalition page count */
+ leaders[i].msi_pid = 0; /* will hold coalition leader pid */
+ leaders[i].msi_ntasks = 0; /* will hold the number of tasks in a coalition */
+ }
+
+ p = memorystatus_get_first_proc_locked(&b, FALSE);
+ while (p) {
+ if (coalition_is_leader(p->task, COALITION_TYPE_JETSAM, &coal)) {
+ if (nleaders < MAX_COAL_LEADERS) {
+ int coal_ntasks = 0;
+ uint64_t coal_page_count = coalition_get_page_count(coal, &coal_ntasks);
+ leaders[nleaders].msi_coal = coal;
+ leaders[nleaders].msi_page_count = coal_page_count;
+ leaders[nleaders].msi_pid = p->p_pid; /* the coalition leader */
+ leaders[nleaders].msi_ntasks = coal_ntasks;
+ nleaders++;
+ } else {
+ /*
+ * We've hit MAX_COAL_LEADERS meaning we can handle no more coalitions.
+ * Abandoned coalitions will linger at the tail of the priority band
+ * when this sort session ends.
+ * TODO: should this be an assert?
+ */
+ printf("%s: WARNING: more than %d leaders in priority band [%d]\n",
+ __FUNCTION__, MAX_COAL_LEADERS, bucket_index);
+ break;
+ }
+ }
+ p=memorystatus_get_next_proc_locked(&b, p, FALSE);
+ }
+
+ if (nleaders == 0) {
+ /* Nothing to sort */
+ return(0);
+ }
+
+ /*
+ * Sort the coalition leader array, from smallest coalition page count
+ * to largest coalition page count. When inserted in the priority bucket,
+ * smallest coalition is handled first, resulting in the last to be jetsammed.
+ */
+ if (nleaders > 1) {
+ qsort(leaders, nleaders, sizeof(memstat_sort_info_t), memstat_asc_cmp);
+ }
+
+#if 0
+ for (i = 0; i < nleaders; i++) {
+ printf("%s: coal_leader[%d of %d] pid[%d] pages[%llu] ntasks[%d]\n",
+ __FUNCTION__, i, nleaders, leaders[i].msi_pid, leaders[i].msi_page_count,
+ leaders[i].msi_ntasks);
+ }
+#endif
+
+ /*
+ * During coalition sorting, processes in a priority band are rearranged
+ * by being re-inserted at the head of the queue. So, when handling a
+ * list, the first process that gets moved to the head of the queue,
+ * ultimately gets pushed toward the queue tail, and hence, jetsams last.
+ *
+ * So, for example, the coalition leader is expected to jetsam last,
+ * after its coalition members. Therefore, the coalition leader is
+ * inserted at the head of the queue first.
+ *
+ * After processing a coalition, the jetsam order is as follows:
+ * undefs(jetsam first), extensions, xpc services, leader(jetsam last)
+ */
+
+ /*
+ * Coalition members are rearranged in the priority bucket here,
+ * based on their coalition role.
+ */
+ total_pids_moved = 0;
+ for (i=0; i < nleaders; i++) {
+
+ /* a bit of bookkeeping */
+ pids_moved = 0;
+
+ /* Coalition leaders are jetsammed last, so move into place first */
+ pid_list[0] = leaders[i].msi_pid;
+ pids_moved += memorystatus_move_list_locked(bucket_index, pid_list, 1);
+
+ /* xpc services should jetsam after extensions */
+ ntasks = coalition_get_pid_list (leaders[i].msi_coal, COALITION_ROLEMASK_XPC,
+ coal_sort_order, pid_list, MAX_SORT_PIDS);
+
+ if (ntasks > 0) {
+ pids_moved += memorystatus_move_list_locked(bucket_index, pid_list,
+ (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS));
+ }
+
+ /* extensions should jetsam after unmarked processes */
+ ntasks = coalition_get_pid_list (leaders[i].msi_coal, COALITION_ROLEMASK_EXT,
+ coal_sort_order, pid_list, MAX_SORT_PIDS);
+
+ if (ntasks > 0) {
+ pids_moved += memorystatus_move_list_locked(bucket_index, pid_list,
+ (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS));
+ }
+
+ /* undefined coalition members should be the first to jetsam */
+ ntasks = coalition_get_pid_list (leaders[i].msi_coal, COALITION_ROLEMASK_UNDEF,
+ coal_sort_order, pid_list, MAX_SORT_PIDS);
+
+ if (ntasks > 0) {
+ pids_moved += memorystatus_move_list_locked(bucket_index, pid_list,
+ (ntasks <= MAX_SORT_PIDS ? ntasks : MAX_SORT_PIDS));
+ }
+
+#if 0
+ if (pids_moved == leaders[i].msi_ntasks) {
+ /*
+ * All the pids in the coalition were found in this band.
+ */
+ printf("%s: pids_moved[%d] equal total coalition ntasks[%d] \n", __FUNCTION__,
+ pids_moved, leaders[i].msi_ntasks);
+ } else if (pids_moved > leaders[i].msi_ntasks) {
+ /*
+ * Apparently new coalition members showed up during the sort?
+ */
+ printf("%s: pids_moved[%d] were greater than expected coalition ntasks[%d] \n", __FUNCTION__,
+ pids_moved, leaders[i].msi_ntasks);
+ } else {
+ /*
+ * Apparently not all the pids in the coalition were found in this band?
+ */
+ printf("%s: pids_moved[%d] were less than expected coalition ntasks[%d] \n", __FUNCTION__,
+ pids_moved, leaders[i].msi_ntasks);
+ }
+#endif
+
+ total_pids_moved += pids_moved;
+
+ } /* end for */
+
+ return(total_pids_moved);
+}
+
+
+/*
+ * Traverse a list of pids, searching for each within the priority band provided.
+ * If pid is found, move it to the front of the priority band.
+ * Never searches outside the priority band provided.
+ *
+ * Input:
+ * bucket_index - jetsam priority band.
+ * pid_list - pointer to a list of pids.
+ * list_sz - number of pids in the list.
+ *
+ * Pid list ordering is important in that,
+ * pid_list[n] is expected to jetsam ahead of pid_list[n+1].
+ * The sort_order is set by the coalition default.
+ *
+ * Return:
+ * the number of pids found and hence moved within the priority band.
+ */
+static int
+memorystatus_move_list_locked(unsigned int bucket_index, pid_t *pid_list, int list_sz)
+{
+ memstat_bucket_t *current_bucket;
+ int i;
+ int found_pids = 0;
+
+ if ((pid_list == NULL) || (list_sz <= 0)) {
+ return(0);
+ }
+
+ if (bucket_index >= MEMSTAT_BUCKET_COUNT) {
+ return(0);
+ }
+
+ current_bucket = &memstat_bucket[bucket_index];
+ for (i=0; i < list_sz; i++) {
+ unsigned int b = bucket_index;
+ proc_t p = NULL;
+ proc_t aProc = NULL;
+ pid_t aPid;
+ int list_index;
+
+ list_index = ((list_sz - 1) - i);
+ aPid = pid_list[list_index];
+
+ /* never search beyond bucket_index provided */
+ p = memorystatus_get_first_proc_locked(&b, FALSE);
+ while (p) {
+ if (p->p_pid == aPid) {
+ aProc = p;
+ break;
+ }
+ p = memorystatus_get_next_proc_locked(&b, p, FALSE);
+ }
+
+ if (aProc == NULL) {
+ /* pid not found in this band, just skip it */
+ continue;
+ } else {
+ TAILQ_REMOVE(¤t_bucket->list, aProc, p_memstat_list);
+ TAILQ_INSERT_HEAD(¤t_bucket->list, aProc, p_memstat_list);
+ found_pids++;
+ }
+ }
+ return(found_pids);
+}
+#endif /* CONFIG_JETSAM */