- if (p == NULL) {
- goto exit;
- }
-
- if (memorystatus_freeze_enabled == FALSE) {
- goto exit;
- }
-
- if (!memorystatus_can_freeze(&memorystatus_freeze_swap_low)) {
- goto exit;
- }
-
- if (memorystatus_freeze_update_throttle()) {
- printf("memorystatus_freeze_process_sync: in throttle, ignorning freeze\n");
- memorystatus_freeze_throttle_count++;
- goto exit;
- }
-
- proc_list_lock();
-
- if (p != NULL) {
- uint32_t purgeable, wired, clean, dirty, state;
- uint32_t max_pages, pages, i;
- boolean_t shared;
-
- aPid = p->p_pid;
- state = p->p_memstat_state;
-
- /* Ensure the process is eligible for freezing */
- if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FROZEN)) || !(state & P_MEMSTAT_SUSPENDED)) {
- proc_list_unlock();
- goto exit;
- }
-
- /* Only freeze processes meeting our minimum resident page criteria */
- memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL);
- if (pages < memorystatus_freeze_pages_min) {
- proc_list_unlock();
- goto exit;
- }
-
- if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
-
- unsigned int avail_swap_space = 0; /* in pages. */
-
- /*
- * Freezer backed by the compressor and swap file(s)
- * while will hold compressed data.
- */
- avail_swap_space = vm_swap_get_free_space() / PAGE_SIZE_64;
-
- max_pages = MIN(avail_swap_space, memorystatus_freeze_pages_max);
-
- if (max_pages < memorystatus_freeze_pages_min) {
- proc_list_unlock();
- goto exit;
- }
- } else {
- /*
- * We only have the compressor without any swap.
- */
- max_pages = UINT32_MAX - 1;
- }
-
- /* Mark as locked temporarily to avoid kill */
- p->p_memstat_state |= P_MEMSTAT_LOCKED;
- proc_list_unlock();
-
- ret = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE);
-
- DTRACE_MEMORYSTATUS6(memorystatus_freeze, proc_t, p, unsigned int, memorystatus_available_pages, boolean_t, purgeable, unsigned int, wired, uint32_t, clean, uint32_t, dirty);
-
- MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_process_sync: task_freeze %s for pid %d [%s] - "
- "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, max_pages %d, shared %d\n",
- (ret == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (*p->p_name ? p->p_name : "(unknown)"),
- memorystatus_available_pages, purgeable, wired, clean, dirty, max_pages, shared);
-
- proc_list_lock();
- p->p_memstat_state &= ~P_MEMSTAT_LOCKED;
-
- if (ret == KERN_SUCCESS) {
- memorystatus_freeze_entry_t data = { aPid, TRUE, dirty };
-
- memorystatus_frozen_count++;
-
- p->p_memstat_state |= (P_MEMSTAT_FROZEN | (shared ? 0: P_MEMSTAT_NORECLAIM));
-
- if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
- /* Update stats */
- for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
- throttle_intervals[i].pageouts += dirty;
- }
- }
-
- memorystatus_freeze_pageouts += dirty;
- memorystatus_freeze_count++;
-
- proc_list_unlock();
-
- memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data));
- } else {
- proc_list_unlock();
- }
- }
-
-exit:
- lck_mtx_unlock(&freezer_mutex);
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END,
- memorystatus_available_pages, aPid, 0, 0, 0);
-
- return ret;
-}
-
-static int
-memorystatus_freeze_top_process(boolean_t *memorystatus_freeze_swap_low)
-{
- pid_t aPid = 0;
- int ret = -1;
- proc_t p = PROC_NULL, next_p = PROC_NULL;
- unsigned int i = 0;
-
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START,
- memorystatus_available_pages, 0, 0, 0, 0);
-
- proc_list_lock();
-
- next_p = memorystatus_get_first_proc_locked(&i, TRUE);
- while (next_p) {
- kern_return_t kr;
- uint32_t purgeable, wired, clean, dirty;
- boolean_t shared;
- uint32_t pages;
- uint32_t max_pages = 0;
- uint32_t state;
-
- p = next_p;
- next_p = memorystatus_get_next_proc_locked(&i, p, TRUE);
-
- aPid = p->p_pid;
- state = p->p_memstat_state;
-
- /* Ensure the process is eligible for freezing */
- if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FROZEN)) || !(state & P_MEMSTAT_SUSPENDED)) {
- continue; // with lock held
- }
-
- /* Only freeze processes meeting our minimum resident page criteria */
- memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL);
- if (pages < memorystatus_freeze_pages_min) {
- continue; // with lock held
- }
-
- if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
-
- /* Ensure there's enough free space to freeze this process. */
-
- unsigned int avail_swap_space = 0; /* in pages. */
-
- /*
- * Freezer backed by the compressor and swap file(s)
- * while will hold compressed data.
- */
- avail_swap_space = vm_swap_get_free_space() / PAGE_SIZE_64;
-
- max_pages = MIN(avail_swap_space, memorystatus_freeze_pages_max);
-
- if (max_pages < memorystatus_freeze_pages_min) {
- *memorystatus_freeze_swap_low = TRUE;
- proc_list_unlock();
- goto exit;
- }
- } else {
- /*
- * We only have the compressor pool.
- */
- max_pages = UINT32_MAX - 1;
- }
-
- /* Mark as locked temporarily to avoid kill */
- p->p_memstat_state |= P_MEMSTAT_LOCKED;
-
- p = proc_ref_locked(p);
- proc_list_unlock();
- if (!p) {
- goto exit;
- }
-
- kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE);
-
- MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_top_process: task_freeze %s for pid %d [%s] - "
- "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, max_pages %d, shared %d\n",
- (kr == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (*p->p_name ? p->p_name : "(unknown)"),
- memorystatus_available_pages, purgeable, wired, clean, dirty, max_pages, shared);
-
- proc_list_lock();
- p->p_memstat_state &= ~P_MEMSTAT_LOCKED;
-
- /* Success? */
- if (KERN_SUCCESS == kr) {
- memorystatus_freeze_entry_t data = { aPid, TRUE, dirty };
-
- memorystatus_frozen_count++;
-
- p->p_memstat_state |= (P_MEMSTAT_FROZEN | (shared ? 0: P_MEMSTAT_NORECLAIM));
-
- if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
- /* Update stats */
- for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
- throttle_intervals[i].pageouts += dirty;
- }
- }
-
- memorystatus_freeze_pageouts += dirty;
- memorystatus_freeze_count++;
-
- proc_list_unlock();
-
- memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data));
-
- /* Return KERN_SUCESS */
- ret = kr;
-
- } else {
- proc_list_unlock();
- }
-
- proc_rele(p);
- goto exit;
- }
-
- proc_list_unlock();
-
-exit:
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END,
- memorystatus_available_pages, aPid, 0, 0, 0);
-
- return ret;
-}
-
-static inline boolean_t
-memorystatus_can_freeze_processes(void)
-{
- boolean_t ret;
-
- proc_list_lock();
-
- if (memorystatus_suspended_count) {
- uint32_t average_resident_pages, estimated_processes;
-
- /* Estimate the number of suspended processes we can fit */
- average_resident_pages = memorystatus_suspended_footprint_total / memorystatus_suspended_count;
- estimated_processes = memorystatus_suspended_count +
- ((memorystatus_available_pages - memorystatus_available_pages_critical) / average_resident_pages);
-
- /* If it's predicted that no freeze will occur, lower the threshold temporarily */
- if (estimated_processes <= FREEZE_SUSPENDED_THRESHOLD_DEFAULT) {
- memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_LOW;
- } else {
- memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT;
- }
-
- MEMORYSTATUS_DEBUG(1, "memorystatus_can_freeze_processes: %d suspended processes, %d average resident pages / process, %d suspended processes estimated\n",
- memorystatus_suspended_count, average_resident_pages, estimated_processes);
-
- if ((memorystatus_suspended_count - memorystatus_frozen_count) > memorystatus_freeze_suspended_threshold) {
- ret = TRUE;
- } else {
- ret = FALSE;
- }
- } else {
- ret = FALSE;
- }
-
- proc_list_unlock();
-
- return ret;
-}
-
-static boolean_t
-memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low)
-{
- boolean_t can_freeze = TRUE;
-
- /* Only freeze if we're sufficiently low on memory; this holds off freeze right
- after boot, and is generally is a no-op once we've reached steady state. */
- if (memorystatus_available_pages > memorystatus_freeze_threshold) {
- return FALSE;
- }
-
- /* Check minimum suspended process threshold. */
- if (!memorystatus_can_freeze_processes()) {
- return FALSE;
- }
- assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
-
- if ( !VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
- /*
- * In-core compressor used for freezing WITHOUT on-disk swap support.
- */
- if (vm_compressor_low_on_space()) {
- if (*memorystatus_freeze_swap_low) {
- *memorystatus_freeze_swap_low = TRUE;
- }
-
- can_freeze = FALSE;
-
- } else {
- if (*memorystatus_freeze_swap_low) {
- *memorystatus_freeze_swap_low = FALSE;
- }
-
- can_freeze = TRUE;
- }
- } else {
- /*
- * Freezing WITH on-disk swap support.
- *
- * In-core compressor fronts the swap.
- */
- if (vm_swap_low_on_space()) {
- if (*memorystatus_freeze_swap_low) {
- *memorystatus_freeze_swap_low = TRUE;
- }
-
- can_freeze = FALSE;
- }
-
- }
-
- return can_freeze;
-}
-
-static void
-memorystatus_freeze_update_throttle_interval(mach_timespec_t *ts, struct throttle_interval_t *interval)
-{
- unsigned int freeze_daily_pageouts_max = memorystatus_freeze_daily_mb_max * (1024 * 1024 / PAGE_SIZE);
- if (CMP_MACH_TIMESPEC(ts, &interval->ts) >= 0) {
- if (!interval->max_pageouts) {
- interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * freeze_daily_pageouts_max) / (24 * 60)));
- } else {
- printf("memorystatus_freeze_update_throttle_interval: %d minute throttle timeout, resetting\n", interval->mins);
- }
- interval->ts.tv_sec = interval->mins * 60;
- interval->ts.tv_nsec = 0;
- ADD_MACH_TIMESPEC(&interval->ts, ts);
- /* Since we update the throttle stats pre-freeze, adjust for overshoot here */
- if (interval->pageouts > interval->max_pageouts) {
- interval->pageouts -= interval->max_pageouts;
- } else {
- interval->pageouts = 0;
- }
- interval->throttle = FALSE;
- } else if (!interval->throttle && interval->pageouts >= interval->max_pageouts) {
- printf("memorystatus_freeze_update_throttle_interval: %d minute pageout limit exceeded; enabling throttle\n", interval->mins);
- interval->throttle = TRUE;
- }
-
- MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_update_throttle_interval: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n",
- interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60,
- interval->throttle ? "on" : "off");
-}
-
-static boolean_t
-memorystatus_freeze_update_throttle(void)
-{
- clock_sec_t sec;
- clock_nsec_t nsec;
- mach_timespec_t ts;
- uint32_t i;
- boolean_t throttled = FALSE;
-
-#if DEVELOPMENT || DEBUG
- if (!memorystatus_freeze_throttle_enabled)
- return FALSE;
-#endif
-
- clock_get_system_nanotime(&sec, &nsec);
- ts.tv_sec = sec;
- ts.tv_nsec = nsec;
-
- /* Check freeze pageouts over multiple intervals and throttle if we've exceeded our budget.
- *
- * This ensures that periods of inactivity can't be used as 'credit' towards freeze if the device has
- * remained dormant for a long period. We do, however, allow increased thresholds for shorter intervals in
- * order to allow for bursts of activity.
- */
- for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
- memorystatus_freeze_update_throttle_interval(&ts, &throttle_intervals[i]);
- if (throttle_intervals[i].throttle == TRUE)
- throttled = TRUE;
- }
-
- return throttled;
-}
-
-static void
-memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused)
-{
- static boolean_t memorystatus_freeze_swap_low = FALSE;
-
- lck_mtx_lock(&freezer_mutex);
- if (memorystatus_freeze_enabled) {
- if (memorystatus_can_freeze(&memorystatus_freeze_swap_low)) {
- /* Only freeze if we've not exceeded our pageout budgets.*/
- if (!memorystatus_freeze_update_throttle()) {
- memorystatus_freeze_top_process(&memorystatus_freeze_swap_low);
- } else {
- printf("memorystatus_freeze_thread: in throttle, ignoring freeze\n");
- memorystatus_freeze_throttle_count++; /* Throttled, update stats */
- }
- }
- }
- lck_mtx_unlock(&freezer_mutex);
-
- assert_wait((event_t) &memorystatus_freeze_wakeup, THREAD_UNINT);
- thread_block((thread_continue_t) memorystatus_freeze_thread);
-}
-
-static int
-sysctl_memorystatus_do_fastwake_warmup_all SYSCTL_HANDLER_ARGS
-{
-#pragma unused(oidp, req, arg1, arg2)
-
- /* Need to be root or have entitlement */
- if (!kauth_cred_issuser(kauth_cred_get()) && !IOTaskHasEntitlement(current_task(), MEMORYSTATUS_ENTITLEMENT)) {
- return EPERM;
- }
-
- if (memorystatus_freeze_enabled == FALSE) {
- return ENOTSUP;
- }
-
- do_fastwake_warmup_all();
-
- return 0;
-}
-
-SYSCTL_PROC(_kern, OID_AUTO, memorystatus_do_fastwake_warmup_all, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
- 0, 0, &sysctl_memorystatus_do_fastwake_warmup_all, "I", "");
-
-#endif /* CONFIG_FREEZE */
-
-#if VM_PRESSURE_EVENTS
-
-#if CONFIG_MEMORYSTATUS
-
-static int
-memorystatus_send_note(int event_code, void *data, size_t data_length) {
- int ret;
- struct kev_msg ev_msg;
-
- ev_msg.vendor_code = KEV_VENDOR_APPLE;
- ev_msg.kev_class = KEV_SYSTEM_CLASS;
- ev_msg.kev_subclass = KEV_MEMORYSTATUS_SUBCLASS;
-
- ev_msg.event_code = event_code;
-
- ev_msg.dv[0].data_length = data_length;
- ev_msg.dv[0].data_ptr = data;
- ev_msg.dv[1].data_length = 0;
-
- ret = kev_post_msg(&ev_msg);
- if (ret) {
- printf("%s: kev_post_msg() failed, err %d\n", __func__, ret);
- }
-
- return ret;
-}
-
-boolean_t
-memorystatus_warn_process(pid_t pid, __unused boolean_t is_active, __unused boolean_t is_fatal, boolean_t limit_exceeded) {
-
- boolean_t ret = FALSE;
- boolean_t found_knote = FALSE;
- struct knote *kn = NULL;
- int send_knote_count = 0;
-
- /*
- * See comment in sysctl_memorystatus_vm_pressure_send.
- */
-
- memorystatus_klist_lock();
-
- SLIST_FOREACH(kn, &memorystatus_klist, kn_selnext) {
- proc_t knote_proc = knote_get_kq(kn)->kq_p;
- pid_t knote_pid = knote_proc->p_pid;
-
- if (knote_pid == pid) {
- /*
- * By setting the "fflags" here, we are forcing
- * a process to deal with the case where it's
- * bumping up into its memory limits. If we don't
- * do this here, we will end up depending on the
- * system pressure snapshot evaluation in
- * filt_memorystatus().
- */
-
-#if CONFIG_EMBEDDED
- if (!limit_exceeded) {
- /*
- * Intentionally set either the unambiguous limit warning,
- * the system-wide critical or the system-wide warning
- * notification bit.
- */
-
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) {
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN;
- found_knote = TRUE;
- send_knote_count++;
- } else if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) {
- kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL;
- found_knote = TRUE;
- send_knote_count++;
- } else if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_WARN) {
- kn->kn_fflags = NOTE_MEMORYSTATUS_PRESSURE_WARN;
- found_knote = TRUE;
- send_knote_count++;
- }
- } else {
- /*
- * Send this notification when a process has exceeded a soft limit.
- */
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) {
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL;
- found_knote = TRUE;
- send_knote_count++;
- }
- }
-#else /* CONFIG_EMBEDDED */
- if (!limit_exceeded) {
-
- /*
- * Processes on desktop are not expecting to handle a system-wide
- * critical or system-wide warning notification from this path.
- * Intentionally set only the unambiguous limit warning here.
- *
- * If the limit is soft, however, limit this to one notification per
- * active/inactive limit (per each registered listener).
- */
-
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN) {
- found_knote=TRUE;
- if (!is_fatal) {
- /*
- * Restrict proc_limit_warn notifications when
- * non-fatal (soft) limit is at play.
- */
- if (is_active) {
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE) {
- /*
- * Mark this knote for delivery.
- */
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN;
- /*
- * And suppress it from future notifications.
- */
- kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_ACTIVE;
- send_knote_count++;
- }
- } else {
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE) {
- /*
- * Mark this knote for delivery.
- */
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN;
- /*
- * And suppress it from future notifications.
- */
- kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_WARN_INACTIVE;
- send_knote_count++;
- }
- }
- } else {
- /*
- * No restriction on proc_limit_warn notifications when
- * fatal (hard) limit is at play.
- */
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_WARN;
- send_knote_count++;
- }
- }
- } else {
- /*
- * Send this notification when a process has exceeded a soft limit,
- */
-
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL) {
- found_knote = TRUE;
- if (!is_fatal) {
- /*
- * Restrict critical notifications for soft limits.
- */
-
- if (is_active) {
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE) {
- /*
- * Suppress future proc_limit_critical notifications
- * for the active soft limit.
- */
- kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_ACTIVE;
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL;
- send_knote_count++;
-
- }
- } else {
- if (kn->kn_sfflags & NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE) {
- /*
- * Suppress future proc_limit_critical_notifications
- * for the inactive soft limit.
- */
- kn->kn_sfflags &= ~NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL_INACTIVE;
- kn->kn_fflags = NOTE_MEMORYSTATUS_PROC_LIMIT_CRITICAL;
- send_knote_count++;
- }
- }
- } else {
- /*
- * We should never be trying to send a critical notification for
- * a hard limit... the process would be killed before it could be
- * received.
- */
- panic("Caught sending pid %d a critical warning for a fatal limit.\n", pid);
- }
- }
- }
-#endif /* CONFIG_EMBEDDED */
- }
- }
-
- if (found_knote) {
- if (send_knote_count > 0) {
- KNOTE(&memorystatus_klist, 0);
- }
- ret = TRUE;
- }
-
- memorystatus_klist_unlock();
-
- return ret;
-}
-
-/*
- * Can only be set by the current task on itself.
- */
-int
-memorystatus_low_mem_privileged_listener(uint32_t op_flags)
-{
- boolean_t set_privilege = FALSE;
- /*
- * Need an entitlement check here?
- */
- if (op_flags == MEMORYSTATUS_CMD_PRIVILEGED_LISTENER_ENABLE) {
- set_privilege = TRUE;
- } else if (op_flags == MEMORYSTATUS_CMD_PRIVILEGED_LISTENER_DISABLE) {
- set_privilege = FALSE;
- } else {
- return EINVAL;
- }
-
- return (task_low_mem_privileged_listener(current_task(), set_privilege, NULL));
-}
-
-int
-memorystatus_send_pressure_note(pid_t pid) {
- MEMORYSTATUS_DEBUG(1, "memorystatus_send_pressure_note(): pid %d\n", pid);
- return memorystatus_send_note(kMemorystatusPressureNote, &pid, sizeof(pid));
-}
-
-void
-memorystatus_send_low_swap_note(void) {
-
- struct knote *kn = NULL;
-
- memorystatus_klist_lock();
- SLIST_FOREACH(kn, &memorystatus_klist, kn_selnext) {
- /* We call is_knote_registered_modify_task_pressure_bits to check if the sfflags for the
- * current note contain NOTE_MEMORYSTATUS_LOW_SWAP. Once we find one note in the memorystatus_klist
- * that has the NOTE_MEMORYSTATUS_LOW_SWAP flags in its sfflags set, we call KNOTE with
- * kMemoryStatusLowSwap as the hint to process and update all knotes on the memorystatus_klist accordingly. */
- if (is_knote_registered_modify_task_pressure_bits(kn, NOTE_MEMORYSTATUS_LOW_SWAP, NULL, 0, 0) == TRUE) {
- KNOTE(&memorystatus_klist, kMemorystatusLowSwap);
- break;
- }
- }
-
- memorystatus_klist_unlock();
-}
-
-boolean_t
-memorystatus_bg_pressure_eligible(proc_t p) {
- boolean_t eligible = FALSE;
-
- proc_list_lock();
-
- MEMORYSTATUS_DEBUG(1, "memorystatus_bg_pressure_eligible: pid %d, state 0x%x\n", p->p_pid, p->p_memstat_state);
-
- /* Foreground processes have already been dealt with at this point, so just test for eligibility */
- if (!(p->p_memstat_state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN))) {
- eligible = TRUE;
- }
-
- proc_list_unlock();
-
- return eligible;
-}
-
-boolean_t
-memorystatus_is_foreground_locked(proc_t p) {
- return ((p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND) ||
- (p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND_SUPPORT));
-}
-
-/*
- * This is meant for stackshot and kperf -- it does not take the proc_list_lock
- * to access the p_memstat_dirty field.
- */
-boolean_t
-memorystatus_proc_is_dirty_unsafe(void *v)
-{
- if (!v) {
- return FALSE;
- }
- proc_t p = (proc_t)v;
- return (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) != 0;
-}
-
-#endif /* CONFIG_MEMORYSTATUS */
-
-/*
- * Trigger levels to test the mechanism.
- * Can be used via a sysctl.
- */
-#define TEST_LOW_MEMORY_TRIGGER_ONE 1
-#define TEST_LOW_MEMORY_TRIGGER_ALL 2
-#define TEST_PURGEABLE_TRIGGER_ONE 3
-#define TEST_PURGEABLE_TRIGGER_ALL 4
-#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE 5
-#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL 6
-
-boolean_t memorystatus_manual_testing_on = FALSE;
-vm_pressure_level_t memorystatus_manual_testing_level = kVMPressureNormal;
-
-extern struct knote *
-vm_pressure_select_optimal_candidate_to_notify(struct klist *, int, boolean_t);
-
-/*
- * This value is the threshold that a process must meet to be considered for scavenging.
- */
-#if CONFIG_EMBEDDED
-#define VM_PRESSURE_MINIMUM_RSIZE 1 /* MB */
-#else /* CONFIG_EMBEDDED */
-#define VM_PRESSURE_MINIMUM_RSIZE 10 /* MB */
-#endif /* CONFIG_EMBEDDED */
-
-#define VM_PRESSURE_NOTIFY_WAIT_PERIOD 10000 /* milliseconds */
-
-#if DEBUG
-#define VM_PRESSURE_DEBUG(cond, format, ...) \
-do { \
- if (cond) { printf(format, ##__VA_ARGS__); } \
-} while(0)
-#else
-#define VM_PRESSURE_DEBUG(cond, format, ...)
-#endif
-
-#define INTER_NOTIFICATION_DELAY (250000) /* .25 second */
-
-void memorystatus_on_pageout_scan_end(void) {
- /* No-op */
-}
-
-/*
- * kn_max - knote
- *
- * knote_pressure_level - to check if the knote is registered for this notification level.
- *
- * task - task whose bits we'll be modifying
- *
- * pressure_level_to_clear - if the task has been notified of this past level, clear that notification bit so that if/when we revert to that level, the task will be notified again.
- *
- * pressure_level_to_set - the task is about to be notified of this new level. Update the task's bit notification information appropriately.
- *
- */
-
-boolean_t
-is_knote_registered_modify_task_pressure_bits(struct knote *kn_max, int knote_pressure_level, task_t task, vm_pressure_level_t pressure_level_to_clear, vm_pressure_level_t pressure_level_to_set)
-{
- if (kn_max->kn_sfflags & knote_pressure_level) {
-
- if (pressure_level_to_clear && task_has_been_notified(task, pressure_level_to_clear) == TRUE) {
-
- task_clear_has_been_notified(task, pressure_level_to_clear);
- }
-
- task_mark_has_been_notified(task, pressure_level_to_set);
- return TRUE;
- }
-
- return FALSE;
-}
-
-void
-memorystatus_klist_reset_all_for_level(vm_pressure_level_t pressure_level_to_clear)
-{
- struct knote *kn = NULL;
-
- memorystatus_klist_lock();
- SLIST_FOREACH(kn, &memorystatus_klist, kn_selnext) {
-
- proc_t p = PROC_NULL;
- struct task* t = TASK_NULL;
-
- p = knote_get_kq(kn)->kq_p;
- proc_list_lock();
- if (p != proc_ref_locked(p)) {
- p = PROC_NULL;
- proc_list_unlock();
- continue;
- }
- proc_list_unlock();
-
- t = (struct task *)(p->task);
-
- task_clear_has_been_notified(t, pressure_level_to_clear);
-
- proc_rele(p);
- }
-
- memorystatus_klist_unlock();
-}
-
-extern kern_return_t vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process);
-
-struct knote *
-vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process);
-
-/*
- * Used by the vm_pressure_thread which is
- * signalled from within vm_pageout_scan().
- */
-static void vm_dispatch_memory_pressure(void);
-void consider_vm_pressure_events(void);
-
-void consider_vm_pressure_events(void)
-{
- vm_dispatch_memory_pressure();
-}
-static void vm_dispatch_memory_pressure(void)
-{
- memorystatus_update_vm_pressure(FALSE);
-}
-
-extern vm_pressure_level_t
-convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t);
-
-struct knote *
-vm_pressure_select_optimal_candidate_to_notify(struct klist *candidate_list, int level, boolean_t target_foreground_process)
-{
- struct knote *kn = NULL, *kn_max = NULL;
- uint64_t resident_max = 0; /* MB */
- struct timeval curr_tstamp = {0, 0};
- int elapsed_msecs = 0;
- int selected_task_importance = 0;
- static int pressure_snapshot = -1;
- boolean_t pressure_increase = FALSE;
-
- if (pressure_snapshot == -1) {
- /*
- * Initial snapshot.
- */
- pressure_snapshot = level;
- pressure_increase = TRUE;
- } else {
-
- if (level && (level >= pressure_snapshot)) {
- pressure_increase = TRUE;
- } else {
- pressure_increase = FALSE;
- }
-
- pressure_snapshot = level;
- }
-
- if (pressure_increase == TRUE) {
- /*
- * We'll start by considering the largest
- * unimportant task in our list.
- */
- selected_task_importance = INT_MAX;
- } else {
- /*
- * We'll start by considering the largest
- * important task in our list.
- */
- selected_task_importance = 0;
- }
-
- microuptime(&curr_tstamp);
-
- SLIST_FOREACH(kn, candidate_list, kn_selnext) {
-
- uint64_t resident_size = 0; /* MB */
- proc_t p = PROC_NULL;
- struct task* t = TASK_NULL;
- int curr_task_importance = 0;
- boolean_t consider_knote = FALSE;
- boolean_t privileged_listener = FALSE;
-
- p = knote_get_kq(kn)->kq_p;
- proc_list_lock();
- if (p != proc_ref_locked(p)) {
- p = PROC_NULL;
- proc_list_unlock();
- continue;
- }
- proc_list_unlock();
-
-#if CONFIG_MEMORYSTATUS
- if (target_foreground_process == TRUE && !memorystatus_is_foreground_locked(p)) {
- /*
- * Skip process not marked foreground.
- */
- proc_rele(p);
- continue;
- }
-#endif /* CONFIG_MEMORYSTATUS */
-
- t = (struct task *)(p->task);
-
- timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp);
- elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000;
-
- vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(level);
-
- if ((kn->kn_sfflags & dispatch_level) == 0) {
- proc_rele(p);
- continue;
- }
-
-#if CONFIG_MEMORYSTATUS
- if (target_foreground_process == FALSE && !memorystatus_bg_pressure_eligible(p)) {
- VM_PRESSURE_DEBUG(1, "[vm_pressure] skipping process %d\n", p->p_pid);
- proc_rele(p);
- continue;
- }
-#endif /* CONFIG_MEMORYSTATUS */
-
-#if CONFIG_EMBEDDED
- curr_task_importance = p->p_memstat_effectivepriority;
-#else /* CONFIG_EMBEDDED */
- curr_task_importance = task_importance_estimate(t);
-#endif /* CONFIG_EMBEDDED */
-
- /*
- * Privileged listeners are only considered in the multi-level pressure scheme
- * AND only if the pressure is increasing.
- */
- if (level > 0) {
-
- if (task_has_been_notified(t, level) == FALSE) {
-
- /*
- * Is this a privileged listener?
- */
- if (task_low_mem_privileged_listener(t, FALSE, &privileged_listener) == 0) {
-
- if (privileged_listener) {
- kn_max = kn;
- proc_rele(p);
- goto done_scanning;
- }
- }
- } else {
- proc_rele(p);
- continue;
- }
- } else if (level == 0) {
-
- /*
- * Task wasn't notified when the pressure was increasing and so
- * no need to notify it that the pressure is decreasing.
- */
- if ((task_has_been_notified(t, kVMPressureWarning) == FALSE) && (task_has_been_notified(t, kVMPressureCritical) == FALSE)) {
- proc_rele(p);
- continue;
- }
- }
-
- /*
- * We don't want a small process to block large processes from
- * being notified again. <rdar://problem/7955532>
- */
- resident_size = (get_task_phys_footprint(t))/(1024*1024ULL); /* MB */
-
- if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) {
-
- if (level > 0) {
- /*
- * Warning or Critical Pressure.
- */
- if (pressure_increase) {
- if ((curr_task_importance < selected_task_importance) ||
- ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) {
-
- /*
- * We have found a candidate process which is:
- * a) at a lower importance than the current selected process
- * OR
- * b) has importance equal to that of the current selected process but is larger
- */
-
- consider_knote = TRUE;
- }
- } else {
- if ((curr_task_importance > selected_task_importance) ||
- ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) {
-
- /*
- * We have found a candidate process which is:
- * a) at a higher importance than the current selected process
- * OR
- * b) has importance equal to that of the current selected process but is larger
- */
-
- consider_knote = TRUE;
- }
- }
- } else if (level == 0) {
- /*
- * Pressure back to normal.
- */
- if ((curr_task_importance > selected_task_importance) ||
- ((curr_task_importance == selected_task_importance) && (resident_size > resident_max))) {
-
- consider_knote = TRUE;
- }
- }
-
- if (consider_knote) {
- resident_max = resident_size;
- kn_max = kn;
- selected_task_importance = curr_task_importance;
- consider_knote = FALSE; /* reset for the next candidate */
- }
- } else {
- /* There was no candidate with enough resident memory to scavenge */
- VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %llu resident...\n", p->p_pid, resident_size);
- }
- proc_rele(p);
- }
-
-done_scanning:
- if (kn_max) {
- VM_DEBUG_CONSTANT_EVENT(vm_pressure_event, VM_PRESSURE_EVENT, DBG_FUNC_NONE, knote_get_kq(kn_max)->kq_p->p_pid, resident_max, 0, 0);
- VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %llu resident\n", knote_get_kq(kn_max)->kq_p->p_pid, resident_max);
- }
-
- return kn_max;
-}
-
-#define VM_PRESSURE_DECREASED_SMOOTHING_PERIOD 5000 /* milliseconds */
-#define WARNING_NOTIFICATION_RESTING_PERIOD 25 /* seconds */
-#define CRITICAL_NOTIFICATION_RESTING_PERIOD 25 /* seconds */
-
-uint64_t next_warning_notification_sent_at_ts = 0;
-uint64_t next_critical_notification_sent_at_ts = 0;
-
-kern_return_t
-memorystatus_update_vm_pressure(boolean_t target_foreground_process)
-{
- struct knote *kn_max = NULL;
- struct knote *kn_cur = NULL, *kn_temp = NULL; /* for safe list traversal */
- pid_t target_pid = -1;
- struct klist dispatch_klist = { NULL };
- proc_t target_proc = PROC_NULL;
- struct task *task = NULL;
- boolean_t found_candidate = FALSE;
-
- static vm_pressure_level_t level_snapshot = kVMPressureNormal;
- static vm_pressure_level_t prev_level_snapshot = kVMPressureNormal;
- boolean_t smoothing_window_started = FALSE;
- struct timeval smoothing_window_start_tstamp = {0, 0};
- struct timeval curr_tstamp = {0, 0};
- int elapsed_msecs = 0;
- uint64_t curr_ts = mach_absolute_time();
-
-#if !CONFIG_JETSAM
-#define MAX_IDLE_KILLS 100 /* limit the number of idle kills allowed */
-
- int idle_kill_counter = 0;
-
- /*
- * On desktop we take this opportunity to free up memory pressure
- * by immediately killing idle exitable processes. We use a delay
- * to avoid overkill. And we impose a max counter as a fail safe
- * in case daemons re-launch too fast.
- */
- while ((memorystatus_vm_pressure_level != kVMPressureNormal) && (idle_kill_counter < MAX_IDLE_KILLS)) {
- if (memorystatus_idle_exit_from_VM() == FALSE) {
- /* No idle exitable processes left to kill */
- break;
- }
- idle_kill_counter++;
-
- if (memorystatus_manual_testing_on == TRUE) {
- /*
- * Skip the delay when testing
- * the pressure notification scheme.
- */
- } else {
- delay(1000000); /* 1 second */
- }
- }
-#endif /* !CONFIG_JETSAM */
-
- if (level_snapshot != kVMPressureNormal) {
-
- /*
- * Check to see if we are still in the 'resting' period
- * after having notified all clients interested in
- * a particular pressure level.
- */
-
- level_snapshot = memorystatus_vm_pressure_level;
-
- if (level_snapshot == kVMPressureWarning || level_snapshot == kVMPressureUrgent) {
-
- if (next_warning_notification_sent_at_ts) {
- if (curr_ts < next_warning_notification_sent_at_ts) {
- delay(INTER_NOTIFICATION_DELAY * 4 /* 1 sec */);
- return KERN_SUCCESS;
- }
-
- next_warning_notification_sent_at_ts = 0;
- memorystatus_klist_reset_all_for_level(kVMPressureWarning);
- }
- } else if (level_snapshot == kVMPressureCritical) {
-
- if (next_critical_notification_sent_at_ts) {
- if (curr_ts < next_critical_notification_sent_at_ts) {
- delay(INTER_NOTIFICATION_DELAY * 4 /* 1 sec */);
- return KERN_SUCCESS;
- }
- next_critical_notification_sent_at_ts = 0;
- memorystatus_klist_reset_all_for_level(kVMPressureCritical);
- }
- }
- }
-
- while (1) {
-
- /*
- * There is a race window here. But it's not clear
- * how much we benefit from having extra synchronization.
- */
- level_snapshot = memorystatus_vm_pressure_level;
-
- if (prev_level_snapshot > level_snapshot) {
- /*
- * Pressure decreased? Let's take a little breather
- * and see if this condition stays.
- */
- if (smoothing_window_started == FALSE) {
-
- smoothing_window_started = TRUE;
- microuptime(&smoothing_window_start_tstamp);
- }
-
- microuptime(&curr_tstamp);
- timevalsub(&curr_tstamp, &smoothing_window_start_tstamp);
- elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000;
-
- if (elapsed_msecs < VM_PRESSURE_DECREASED_SMOOTHING_PERIOD) {
-
- delay(INTER_NOTIFICATION_DELAY);
- continue;
- }
- }
-
- prev_level_snapshot = level_snapshot;
- smoothing_window_started = FALSE;
-
- memorystatus_klist_lock();
- kn_max = vm_pressure_select_optimal_candidate_to_notify(&memorystatus_klist, level_snapshot, target_foreground_process);
-
- if (kn_max == NULL) {
- memorystatus_klist_unlock();
-
- /*
- * No more level-based clients to notify.
- *
- * Start the 'resting' window within which clients will not be re-notified.
- */
-
- if (level_snapshot != kVMPressureNormal) {
- if (level_snapshot == kVMPressureWarning || level_snapshot == kVMPressureUrgent) {
- nanoseconds_to_absolutetime(WARNING_NOTIFICATION_RESTING_PERIOD * NSEC_PER_SEC, &curr_ts);
-
- /* Next warning notification (if nothing changes) won't be sent before...*/
- next_warning_notification_sent_at_ts = mach_absolute_time() + curr_ts;
- }
-
- if (level_snapshot == kVMPressureCritical) {
- nanoseconds_to_absolutetime(CRITICAL_NOTIFICATION_RESTING_PERIOD * NSEC_PER_SEC, &curr_ts);
-
- /* Next critical notification (if nothing changes) won't be sent before...*/
- next_critical_notification_sent_at_ts = mach_absolute_time() + curr_ts;
- }
- }
- return KERN_FAILURE;
- }
-
- target_proc = knote_get_kq(kn_max)->kq_p;
-
- proc_list_lock();
- if (target_proc != proc_ref_locked(target_proc)) {
- target_proc = PROC_NULL;
- proc_list_unlock();
- memorystatus_klist_unlock();
- continue;
- }
- proc_list_unlock();
-
- target_pid = target_proc->p_pid;
-
- task = (struct task *)(target_proc->task);
-
- if (level_snapshot != kVMPressureNormal) {
-
- if (level_snapshot == kVMPressureWarning || level_snapshot == kVMPressureUrgent) {
-
- if (is_knote_registered_modify_task_pressure_bits(kn_max, NOTE_MEMORYSTATUS_PRESSURE_WARN, task, 0, kVMPressureWarning) == TRUE) {
- found_candidate = TRUE;
- }
- } else {
- if (level_snapshot == kVMPressureCritical) {
-
- if (is_knote_registered_modify_task_pressure_bits(kn_max, NOTE_MEMORYSTATUS_PRESSURE_CRITICAL, task, 0, kVMPressureCritical) == TRUE) {
- found_candidate = TRUE;
- }
- }
- }
- } else {
- if (kn_max->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_NORMAL) {
-
- task_clear_has_been_notified(task, kVMPressureWarning);
- task_clear_has_been_notified(task, kVMPressureCritical);
-
- found_candidate = TRUE;
- }
- }
-
- if (found_candidate == FALSE) {
- proc_rele(target_proc);
- memorystatus_klist_unlock();
- continue;
- }
-
- SLIST_FOREACH_SAFE(kn_cur, &memorystatus_klist, kn_selnext, kn_temp) {
-
- int knote_pressure_level = convert_internal_pressure_level_to_dispatch_level(level_snapshot);
-
- if (is_knote_registered_modify_task_pressure_bits(kn_cur, knote_pressure_level, task, 0, level_snapshot) == TRUE) {
- proc_t knote_proc = knote_get_kq(kn_cur)->kq_p;
- pid_t knote_pid = knote_proc->p_pid;
- if (knote_pid == target_pid) {
- KNOTE_DETACH(&memorystatus_klist, kn_cur);
- KNOTE_ATTACH(&dispatch_klist, kn_cur);
- }
- }
- }
-
- KNOTE(&dispatch_klist, (level_snapshot != kVMPressureNormal) ? kMemorystatusPressure : kMemorystatusNoPressure);
-
- SLIST_FOREACH_SAFE(kn_cur, &dispatch_klist, kn_selnext, kn_temp) {
- KNOTE_DETACH(&dispatch_klist, kn_cur);
- KNOTE_ATTACH(&memorystatus_klist, kn_cur);
- }
-
- memorystatus_klist_unlock();
-
- microuptime(&target_proc->vm_pressure_last_notify_tstamp);
- proc_rele(target_proc);
-
- if (memorystatus_manual_testing_on == TRUE && target_foreground_process == TRUE) {
- break;
- }
-
- if (memorystatus_manual_testing_on == TRUE) {
- /*
- * Testing out the pressure notification scheme.
- * No need for delays etc.
- */
- } else {
-
- uint32_t sleep_interval = INTER_NOTIFICATION_DELAY;
-#if CONFIG_JETSAM
- unsigned int page_delta = 0;
- unsigned int skip_delay_page_threshold = 0;
-
- assert(memorystatus_available_pages_pressure >= memorystatus_available_pages_critical_base);
-
- page_delta = (memorystatus_available_pages_pressure - memorystatus_available_pages_critical_base) / 2;
- skip_delay_page_threshold = memorystatus_available_pages_pressure - page_delta;
-
- if (memorystatus_available_pages <= skip_delay_page_threshold) {
- /*
- * We are nearing the critcal mark fast and can't afford to wait between
- * notifications.
- */
- sleep_interval = 0;
- }
-#endif /* CONFIG_JETSAM */
-
- if (sleep_interval) {
- delay(sleep_interval);
- }
- }
- }
-
- return KERN_SUCCESS;
-}
-
-vm_pressure_level_t
-convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t internal_pressure_level)
-{
- vm_pressure_level_t dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL;
-
- switch (internal_pressure_level) {
-
- case kVMPressureNormal:
- {
- dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL;
- break;
- }
-
- case kVMPressureWarning:
- case kVMPressureUrgent:
- {
- dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_WARN;
- break;
- }
-
- case kVMPressureCritical:
- {
- dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL;
- break;
- }
-
- default:
- break;
- }
-
- return dispatch_level;
-}
-
-static int
-sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS
-{
-#pragma unused(arg1, arg2, oidp)
-#if CONFIG_EMBEDDED
- int error = 0;
-
- error = priv_check_cred(kauth_cred_get(), PRIV_VM_PRESSURE, 0);
- if (error)
- return (error);
-
-#endif /* CONFIG_EMBEDDED */
- vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(memorystatus_vm_pressure_level);
-
- return SYSCTL_OUT(req, &dispatch_level, sizeof(dispatch_level));
-}
-
-#if DEBUG || DEVELOPMENT
-
-SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED,
- 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", "");
-
-#else /* DEBUG || DEVELOPMENT */
-
-SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED|CTLFLAG_MASKED,
- 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", "");
-
-#endif /* DEBUG || DEVELOPMENT */
-
-extern int memorystatus_purge_on_warning;
-extern int memorystatus_purge_on_critical;
-
-static int
-sysctl_memorypressure_manual_trigger SYSCTL_HANDLER_ARGS
-{
-#pragma unused(arg1, arg2)
-
- int level = 0;
- int error = 0;
- int pressure_level = 0;
- int trigger_request = 0;
- int force_purge;
-
- error = sysctl_handle_int(oidp, &level, 0, req);
- if (error || !req->newptr) {
- return (error);
- }
-
- memorystatus_manual_testing_on = TRUE;
-
- trigger_request = (level >> 16) & 0xFFFF;
- pressure_level = (level & 0xFFFF);
-
- if (trigger_request < TEST_LOW_MEMORY_TRIGGER_ONE ||
- trigger_request > TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL) {
- return EINVAL;
- }
- switch (pressure_level) {
- case NOTE_MEMORYSTATUS_PRESSURE_NORMAL:
- case NOTE_MEMORYSTATUS_PRESSURE_WARN:
- case NOTE_MEMORYSTATUS_PRESSURE_CRITICAL:
- break;
- default:
- return EINVAL;
- }
-
- /*
- * The pressure level is being set from user-space.
- * And user-space uses the constants in sys/event.h
- * So we translate those events to our internal levels here.
- */
- if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) {
-
- memorystatus_manual_testing_level = kVMPressureNormal;
- force_purge = 0;
-
- } else if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_WARN) {
-
- memorystatus_manual_testing_level = kVMPressureWarning;
- force_purge = memorystatus_purge_on_warning;
-
- } else if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) {
-
- memorystatus_manual_testing_level = kVMPressureCritical;
- force_purge = memorystatus_purge_on_critical;
- }
-
- memorystatus_vm_pressure_level = memorystatus_manual_testing_level;
-
- /* purge according to the new pressure level */
- switch (trigger_request) {
- case TEST_PURGEABLE_TRIGGER_ONE:
- case TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE:
- if (force_purge == 0) {
- /* no purging requested */
- break;
- }
- vm_purgeable_object_purge_one_unlocked(force_purge);
- break;
- case TEST_PURGEABLE_TRIGGER_ALL:
- case TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL:
- if (force_purge == 0) {
- /* no purging requested */
- break;
- }
- while (vm_purgeable_object_purge_one_unlocked(force_purge));
- break;
- }
-
- if ((trigger_request == TEST_LOW_MEMORY_TRIGGER_ONE) ||
- (trigger_request == TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE)) {
-
- memorystatus_update_vm_pressure(TRUE);
- }
-
- if ((trigger_request == TEST_LOW_MEMORY_TRIGGER_ALL) ||
- (trigger_request == TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL)) {
-
- while (memorystatus_update_vm_pressure(FALSE) == KERN_SUCCESS) {
- continue;
- }
- }
-
- if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) {
- memorystatus_manual_testing_on = FALSE;
- }
-
- return 0;
-}
-
-SYSCTL_PROC(_kern, OID_AUTO, memorypressure_manual_trigger, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
- 0, 0, &sysctl_memorypressure_manual_trigger, "I", "");
-
-
-extern int memorystatus_purge_on_warning;
-extern int memorystatus_purge_on_urgent;
-extern int memorystatus_purge_on_critical;
-
-SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_warning, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_warning, 0, "");
-SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_urgent, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_urgent, 0, "");
-SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_critical, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_critical, 0, "");
-
-
-#endif /* VM_PRESSURE_EVENTS */
-
-/* Return both allocated and actual size, since there's a race between allocation and list compilation */
-static int
-memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t *buffer_size, size_t *list_size, boolean_t size_only)
-{
- uint32_t list_count, i = 0;
- memorystatus_priority_entry_t *list_entry;
- proc_t p;
-
- list_count = memorystatus_list_count;
- *list_size = sizeof(memorystatus_priority_entry_t) * list_count;
-
- /* Just a size check? */
- if (size_only) {
- return 0;
- }
-