+ proc_list_unlock();
+
+exit:
+ /* Clear snapshot if freshly captured and no target was found */
+ if (new_snapshot && !killed) {
+ memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0;
+ }
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END,
+ memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0);
+
+ return killed;
+}
+
+#if LEGACY_HIWATER
+
+static boolean_t
+memorystatus_kill_hiwat_proc(uint32_t *errors)
+{
+ pid_t aPid = 0;
+ proc_t p = PROC_NULL, next_p = PROC_NULL;
+ boolean_t new_snapshot = FALSE, killed = FALSE;
+ unsigned int i = 0;
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_START,
+ memorystatus_available_pages, 0, 0, 0, 0);
+
+ proc_list_lock();
+ memorystatus_sort_by_largest_process_locked(JETSAM_PRIORITY_FOREGROUND);
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (next_p) {
+ uint32_t footprint;
+ boolean_t skip;
+
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+
+ aPid = p->p_pid;
+
+ if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) {
+ continue;
+ }
+
+ /* skip if no limit set */
+ if (p->p_memstat_memlimit <= 0) {
+ continue;
+ }
+
+ /* skip if a currently inapplicable limit is encountered */
+ if ((p->p_memstat_state & P_MEMSTAT_MEMLIMIT_BACKGROUND) && (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND)) {
+ continue;
+ }
+
+ footprint = (uint32_t)(get_task_phys_footprint(p->task) / (1024 * 1024));
+ skip = (((int32_t)footprint) <= p->p_memstat_memlimit);
+#if DEVELOPMENT || DEBUG
+ if (!skip && (memorystatus_jetsam_policy & kPolicyDiagnoseActive)) {
+ if (p->p_memstat_state & P_MEMSTAT_DIAG_SUSPENDED) {
+ continue;
+ }
+ }
+#endif /* DEVELOPMENT || DEBUG */
+
+#if CONFIG_FREEZE
+ if (!skip) {
+ if (p->p_memstat_state & P_MEMSTAT_LOCKED) {
+ skip = TRUE;
+ } else {
+ skip = FALSE;
+ }
+ }
+#endif
+
+ if (skip) {
+ continue;
+ } else {
+ MEMORYSTATUS_DEBUG(1, "jetsam: %s pid %d [%s] - %d Mb > 1 (%d Mb)\n",
+ (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing", aPid, p->p_comm, footprint, p->p_memstat_memlimit);
+
+ if (memorystatus_jetsam_snapshot_count == 0) {
+ memorystatus_jetsam_snapshot_procs_locked();
+ new_snapshot = TRUE;
+ }
+
+ p->p_memstat_state |= P_MEMSTAT_TERMINATED;
+
+#if DEVELOPMENT || DEBUG
+ if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) {
+ MEMORYSTATUS_DEBUG(1, "jetsam: pid %d suspended for diagnosis - memorystatus_available_pages: %d\n", aPid, memorystatus_available_pages);
+ memorystatus_update_snapshot_locked(p, kMemorystatusKilledDiagnostic);
+ p->p_memstat_state |= P_MEMSTAT_DIAG_SUSPENDED;
+
+ p = proc_ref_locked(p);
+ proc_list_unlock();
+ if (p) {
+ task_suspend(p->task);
+ proc_rele(p);
+ killed = TRUE;
+ }
+
+ goto exit;
+ } else
+#endif /* DEVELOPMENT || DEBUG */
+ {
+ memorystatus_update_snapshot_locked(p, kMemorystatusKilledHiwat);
+
+ p = proc_ref_locked(p);
+ proc_list_unlock();
+ if (p) {
+ printf("memorystatus: jetsam killing pid %d [%s] (highwater) - memorystatus_available_pages: %d\n",
+ aPid, (p->p_comm ? p->p_comm : "(unknown)"), memorystatus_available_pages);
+ killed = memorystatus_do_kill(p, kMemorystatusKilledHiwat);
+ }
+
+ /* Success? */
+ if (killed) {
+ proc_rele(p);
+ goto exit;
+ }
+
+ /* Failure - unwind and restart. */
+ proc_list_lock();
+ proc_rele_locked(p);
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ p->p_memstat_state |= P_MEMSTAT_ERROR;
+ *errors += 1;
+ i = 0;
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ }
+ }
+ }
+
+ proc_list_unlock();
+
+exit:
+ /* Clear snapshot if freshly captured and no target was found */
+ if (new_snapshot && !killed) {
+ memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0;
+ }
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_END,
+ memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0);
+
+ return killed;
+}
+
+#endif /* LEGACY_HIWATER */
+
+static boolean_t
+memorystatus_kill_process_async(pid_t victim_pid, uint32_t cause) {
+ /* TODO: allow a general async path */
+ if ((victim_pid != -1) || (cause != kMemorystatusKilledVMPageShortage && cause != kMemorystatusKilledVMThrashing &&
+ cause != kMemorystatusKilledFCThrashing)) {
+ return FALSE;
+ }
+
+ kill_under_pressure_cause = cause;
+ memorystatus_thread_wake();
+ return TRUE;
+}
+
+static boolean_t
+memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause) {
+ boolean_t res;
+ uint32_t errors = 0;
+
+ if (victim_pid == -1) {
+ /* No pid, so kill first process */
+ res = memorystatus_kill_top_process(TRUE, cause, NULL, &errors);
+ } else {
+ res = memorystatus_kill_specific_process(victim_pid, cause);
+ }
+
+ if (errors) {
+ memorystatus_clear_errors();
+ }
+
+ if (res == TRUE) {
+ /* Fire off snapshot notification */
+ size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) +
+ sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count;
+ memorystatus_jetsam_snapshot->notification_time = mach_absolute_time();
+ memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size));
+ }
+
+ return res;
+}
+
+boolean_t
+memorystatus_kill_on_VM_page_shortage(boolean_t async) {
+ if (async) {
+ return memorystatus_kill_process_async(-1, kMemorystatusKilledVMPageShortage);
+ } else {
+ return memorystatus_kill_process_sync(-1, kMemorystatusKilledVMPageShortage);
+ }
+}
+
+boolean_t
+memorystatus_kill_on_VM_thrashing(boolean_t async) {
+ if (async) {
+ return memorystatus_kill_process_async(-1, kMemorystatusKilledVMThrashing);
+ } else {
+ return memorystatus_kill_process_sync(-1, kMemorystatusKilledVMThrashing);
+ }
+}
+
+boolean_t
+memorystatus_kill_on_FC_thrashing(boolean_t async) {
+ if (async) {
+ return memorystatus_kill_process_async(-1, kMemorystatusKilledFCThrashing);
+ } else {
+ return memorystatus_kill_process_sync(-1, kMemorystatusKilledFCThrashing);
+ }
+}
+
+boolean_t
+memorystatus_kill_on_vnode_limit(void) {
+ return memorystatus_kill_process_sync(-1, kMemorystatusKilledVnodes);
+}
+
+#endif /* CONFIG_JETSAM */
+
+#if CONFIG_FREEZE
+
+__private_extern__ void
+memorystatus_freeze_init(void)
+{
+ kern_return_t result;
+ thread_t thread;
+
+ result = kernel_thread_start(memorystatus_freeze_thread, NULL, &thread);
+ if (result == KERN_SUCCESS) {
+ thread_deallocate(thread);
+ } else {
+ panic("Could not create memorystatus_freeze_thread");
+ }
+}
+
+static int
+memorystatus_freeze_top_process(boolean_t *memorystatus_freeze_swap_low)
+{
+ pid_t aPid = 0;
+ int ret = -1;
+ proc_t p = PROC_NULL, next_p = PROC_NULL;
+ unsigned int i = 0;
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_START,
+ memorystatus_available_pages, 0, 0, 0, 0);
+
+ proc_list_lock();
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (next_p) {
+ kern_return_t kr;
+ uint32_t purgeable, wired, clean, dirty;
+ boolean_t shared;
+ uint32_t pages;
+ uint32_t max_pages = 0;
+ uint32_t state;
+
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+
+ aPid = p->p_pid;
+ state = p->p_memstat_state;
+
+ /* Ensure the process is eligible for freezing */
+ if ((state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FROZEN)) || !(state & P_MEMSTAT_SUSPENDED)) {
+ continue; // with lock held
+ }
+
+ /* Only freeze processes meeting our minimum resident page criteria */
+ memorystatus_get_task_page_counts(p->task, &pages, NULL, NULL, NULL);
+ if (pages < memorystatus_freeze_pages_min) {
+ continue; // with lock held
+ }
+
+ if (DEFAULT_FREEZER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED) {
+ /* Ensure there's enough free space to freeze this process. */
+ max_pages = MIN(default_pager_swap_pages_free(), memorystatus_freeze_pages_max);
+ if (max_pages < memorystatus_freeze_pages_min) {
+ *memorystatus_freeze_swap_low = TRUE;
+ proc_list_unlock();
+ goto exit;
+ }
+ } else {
+ max_pages = UINT32_MAX - 1;
+ }
+
+ /* Mark as locked temporarily to avoid kill */
+ p->p_memstat_state |= P_MEMSTAT_LOCKED;
+
+ p = proc_ref_locked(p);
+ proc_list_unlock();
+ if (!p) {
+ goto exit;
+ }
+
+ kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE);
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_top_process: task_freeze %s for pid %d [%s] - "
+ "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, shared %d, free swap: %d\n",
+ (kr == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (p->p_comm ? p->p_comm : "(unknown)"),
+ memorystatus_available_pages, purgeable, wired, clean, dirty, shared, default_pager_swap_pages_free());
+
+ proc_list_lock();
+ p->p_memstat_state &= ~P_MEMSTAT_LOCKED;
+
+ /* Success? */
+ if (KERN_SUCCESS == kr) {
+ memorystatus_freeze_entry_t data = { aPid, TRUE, dirty };
+
+ memorystatus_frozen_count++;
+
+ p->p_memstat_state |= (P_MEMSTAT_FROZEN | (shared ? 0: P_MEMSTAT_NORECLAIM));
+
+ /* Update stats */
+ for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
+ throttle_intervals[i].pageouts += dirty;
+ }
+
+ memorystatus_freeze_pageouts += dirty;
+ memorystatus_freeze_count++;
+
+ proc_list_unlock();
+
+ memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data));
+
+ /* Return the number of reclaimed pages */
+ ret = dirty;
+
+ } else {
+ proc_list_unlock();
+ }
+
+ proc_rele(p);
+ goto exit;
+ }
+
+ proc_list_unlock();
+
+exit:
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE) | DBG_FUNC_END,
+ memorystatus_available_pages, aPid, 0, 0, 0);
+
+ return ret;
+}
+
+static inline boolean_t
+memorystatus_can_freeze_processes(void)
+{
+ boolean_t ret;
+
+ proc_list_lock();
+
+ if (memorystatus_suspended_count) {
+ uint32_t average_resident_pages, estimated_processes;
+
+ /* Estimate the number of suspended processes we can fit */
+ average_resident_pages = memorystatus_suspended_footprint_total / memorystatus_suspended_count;
+ estimated_processes = memorystatus_suspended_count +
+ ((memorystatus_available_pages - memorystatus_available_pages_critical) / average_resident_pages);
+
+ /* If it's predicted that no freeze will occur, lower the threshold temporarily */
+ if (estimated_processes <= FREEZE_SUSPENDED_THRESHOLD_DEFAULT) {
+ memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_LOW;
+ } else {
+ memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT;
+ }
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus_can_freeze_processes: %d suspended processes, %d average resident pages / process, %d suspended processes estimated\n",
+ memorystatus_suspended_count, average_resident_pages, estimated_processes);
+
+ if ((memorystatus_suspended_count - memorystatus_frozen_count) > memorystatus_freeze_suspended_threshold) {
+ ret = TRUE;
+ } else {
+ ret = FALSE;
+ }
+ } else {
+ ret = FALSE;
+ }
+
+ proc_list_unlock();
+
+ return ret;
+}
+
+static boolean_t
+memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low)
+{
+ /* Only freeze if we're sufficiently low on memory; this holds off freeze right
+ after boot, and is generally is a no-op once we've reached steady state. */
+ if (memorystatus_available_pages > memorystatus_freeze_threshold) {
+ return FALSE;
+ }
+
+ /* Check minimum suspended process threshold. */
+ if (!memorystatus_can_freeze_processes()) {
+ return FALSE;
+ }
+
+ /* Is swap running low? */
+ if (*memorystatus_freeze_swap_low) {
+ /* If there's been no movement in free swap pages since we last attempted freeze, return. */
+ if (default_pager_swap_pages_free() < memorystatus_freeze_pages_min) {
+ return FALSE;
+ }
+
+ /* Pages have been freed - we can retry. */
+ *memorystatus_freeze_swap_low = FALSE;
+ }
+
+ /* OK */
+ return TRUE;
+}
+
+static void
+memorystatus_freeze_update_throttle_interval(mach_timespec_t *ts, struct throttle_interval_t *interval)
+{
+ if (CMP_MACH_TIMESPEC(ts, &interval->ts) >= 0) {
+ if (!interval->max_pageouts) {
+ interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * FREEZE_DAILY_PAGEOUTS_MAX) / (24 * 60)));
+ } else {
+ printf("memorystatus_freeze_update_throttle_interval: %d minute throttle timeout, resetting\n", interval->mins);
+ }
+ interval->ts.tv_sec = interval->mins * 60;
+ interval->ts.tv_nsec = 0;
+ ADD_MACH_TIMESPEC(&interval->ts, ts);
+ /* Since we update the throttle stats pre-freeze, adjust for overshoot here */
+ if (interval->pageouts > interval->max_pageouts) {
+ interval->pageouts -= interval->max_pageouts;
+ } else {
+ interval->pageouts = 0;
+ }
+ interval->throttle = FALSE;
+ } else if (!interval->throttle && interval->pageouts >= interval->max_pageouts) {
+ printf("memorystatus_freeze_update_throttle_interval: %d minute pageout limit exceeded; enabling throttle\n", interval->mins);
+ interval->throttle = TRUE;
+ }
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_update_throttle_interval: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n",
+ interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60,
+ interval->throttle ? "on" : "off");
+}
+
+static boolean_t
+memorystatus_freeze_update_throttle(void)
+{
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ mach_timespec_t ts;
+ uint32_t i;
+ boolean_t throttled = FALSE;
+
+#if DEVELOPMENT || DEBUG
+ if (!memorystatus_freeze_throttle_enabled)
+ return FALSE;
+#endif
+
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = sec;
+ ts.tv_nsec = nsec;
+
+ /* Check freeze pageouts over multiple intervals and throttle if we've exceeded our budget.
+ *
+ * This ensures that periods of inactivity can't be used as 'credit' towards freeze if the device has
+ * remained dormant for a long period. We do, however, allow increased thresholds for shorter intervals in
+ * order to allow for bursts of activity.
+ */
+ for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
+ memorystatus_freeze_update_throttle_interval(&ts, &throttle_intervals[i]);
+ if (throttle_intervals[i].throttle == TRUE)
+ throttled = TRUE;
+ }
+
+ return throttled;
+}
+
+static void
+memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused)
+{
+ static boolean_t memorystatus_freeze_swap_low = FALSE;
+
+ if (memorystatus_freeze_enabled) {
+ if (memorystatus_can_freeze(&memorystatus_freeze_swap_low)) {
+ /* Only freeze if we've not exceeded our pageout budgets or we're not backed by swap. */
+ if (DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS ||
+ !memorystatus_freeze_update_throttle()) {
+ memorystatus_freeze_top_process(&memorystatus_freeze_swap_low);
+ } else {
+ printf("memorystatus_freeze_thread: in throttle, ignoring freeze\n");
+ memorystatus_freeze_throttle_count++; /* Throttled, update stats */
+ }
+ }
+ }
+
+ assert_wait((event_t) &memorystatus_freeze_wakeup, THREAD_UNINT);
+ thread_block((thread_continue_t) memorystatus_freeze_thread);
+}
+
+#endif /* CONFIG_FREEZE */
+
+#if VM_PRESSURE_EVENTS
+
+#if CONFIG_MEMORYSTATUS
+
+static int
+memorystatus_send_note(int event_code, void *data, size_t data_length) {
+ int ret;
+ struct kev_msg ev_msg;
+
+ ev_msg.vendor_code = KEV_VENDOR_APPLE;
+ ev_msg.kev_class = KEV_SYSTEM_CLASS;
+ ev_msg.kev_subclass = KEV_MEMORYSTATUS_SUBCLASS;
+
+ ev_msg.event_code = event_code;
+
+ ev_msg.dv[0].data_length = data_length;
+ ev_msg.dv[0].data_ptr = data;
+ ev_msg.dv[1].data_length = 0;
+
+ ret = kev_post_msg(&ev_msg);
+ if (ret) {
+ printf("%s: kev_post_msg() failed, err %d\n", __func__, ret);
+ }
+
+ return ret;
+}
+
+boolean_t
+memorystatus_warn_process(pid_t pid, boolean_t critical) {
+
+ boolean_t ret = FALSE;
+ struct knote *kn = NULL;
+
+ /*
+ * See comment in sysctl_memorystatus_vm_pressure_send.
+ */
+
+ memorystatus_klist_lock();
+ kn = vm_find_knote_from_pid(pid, &memorystatus_klist);
+ if (kn) {
+ /*
+ * By setting the "fflags" here, we are forcing
+ * a process to deal with the case where it's
+ * bumping up into its memory limits. If we don't
+ * do this here, we will end up depending on the
+ * system pressure snapshot evaluation in
+ * filt_memorystatus().
+ */
+
+ if (critical) {
+ kn->kn_fflags |= NOTE_MEMORYSTATUS_PRESSURE_CRITICAL;
+ } else {
+ kn->kn_fflags |= NOTE_MEMORYSTATUS_PRESSURE_WARN;
+ }
+ KNOTE(&memorystatus_klist, kMemorystatusPressure);
+ ret = TRUE;
+ } else {
+ if (vm_dispatch_pressure_note_to_pid(pid, FALSE) == 0) {
+ ret = TRUE;
+ }
+ }
+ memorystatus_klist_unlock();
+
+ return ret;
+}
+
+int
+memorystatus_send_pressure_note(pid_t pid) {
+ MEMORYSTATUS_DEBUG(1, "memorystatus_send_pressure_note(): pid %d\n", pid);
+ return memorystatus_send_note(kMemorystatusPressureNote, &pid, sizeof(pid));
+}
+
+void
+memorystatus_send_low_swap_note(void) {
+
+ struct knote *kn = NULL;
+
+ memorystatus_klist_lock();
+ SLIST_FOREACH(kn, &memorystatus_klist, kn_selnext) {
+ if (is_knote_registered_modify_task_pressure_bits(kn, NOTE_MEMORYSTATUS_LOW_SWAP, NULL, 0, 0) == TRUE) {
+ KNOTE(&memorystatus_klist, kMemorystatusLowSwap);
+ }
+ }
+ memorystatus_klist_unlock();
+}
+
+boolean_t
+memorystatus_bg_pressure_eligible(proc_t p) {
+ boolean_t eligible = FALSE;
+
+ proc_list_lock();
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus_bg_pressure_eligible: pid %d, state 0x%x\n", p->p_pid, p->p_memstat_state);
+
+ /* Foreground processes have already been dealt with at this point, so just test for eligibility */
+ if (!(p->p_memstat_state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_SUSPENDED | P_MEMSTAT_FROZEN))) {
+ eligible = TRUE;
+ }
+
+ proc_list_unlock();
+
+ return eligible;
+}
+
+boolean_t
+memorystatus_is_foreground_locked(proc_t p) {
+ return ((p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND) ||
+ (p->p_memstat_effectivepriority == JETSAM_PRIORITY_FOREGROUND_SUPPORT));
+}
+#endif /* CONFIG_MEMORYSTATUS */
+
+/*
+ * Trigger levels to test the mechanism.
+ * Can be used via a sysctl.
+ */
+#define TEST_LOW_MEMORY_TRIGGER_ONE 1
+#define TEST_LOW_MEMORY_TRIGGER_ALL 2
+#define TEST_PURGEABLE_TRIGGER_ONE 3
+#define TEST_PURGEABLE_TRIGGER_ALL 4
+#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE 5
+#define TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL 6
+
+boolean_t memorystatus_manual_testing_on = FALSE;
+vm_pressure_level_t memorystatus_manual_testing_level = kVMPressureNormal;
+
+extern struct knote *
+vm_pressure_select_optimal_candidate_to_notify(struct klist *, int, boolean_t);
+
+extern
+kern_return_t vm_pressure_notification_without_levels(boolean_t);
+
+extern void vm_pressure_klist_lock(void);
+extern void vm_pressure_klist_unlock(void);
+
+extern void vm_reset_active_list(void);
+
+extern void delay(int);
+
+#define INTER_NOTIFICATION_DELAY (250000) /* .25 second */
+
+void memorystatus_on_pageout_scan_end(void) {
+ /* No-op */
+}
+
+/*
+ * kn_max - knote
+ *
+ * knote_pressure_level - to check if the knote is registered for this notification level.
+ *
+ * task - task whose bits we'll be modifying
+ *
+ * pressure_level_to_clear - if the task has been notified of this past level, clear that notification bit so that if/when we revert to that level, the task will be notified again.
+ *
+ * pressure_level_to_set - the task is about to be notified of this new level. Update the task's bit notification information appropriately.
+ *
+ */
+
+boolean_t
+is_knote_registered_modify_task_pressure_bits(struct knote *kn_max, int knote_pressure_level, task_t task, vm_pressure_level_t pressure_level_to_clear, vm_pressure_level_t pressure_level_to_set)
+{
+ if (kn_max->kn_sfflags & knote_pressure_level) {
+
+ if (task_has_been_notified(task, pressure_level_to_clear) == TRUE) {
+
+ task_clear_has_been_notified(task, pressure_level_to_clear);
+ }
+
+ task_mark_has_been_notified(task, pressure_level_to_set);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+extern kern_return_t vm_pressure_notify_dispatch_vm_clients(boolean_t target_foreground_process);
+
+#define VM_PRESSURE_DECREASED_SMOOTHING_PERIOD 5000 /* milliseconds */
+
+kern_return_t
+memorystatus_update_vm_pressure(boolean_t target_foreground_process)
+{
+ struct knote *kn_max = NULL;
+ pid_t target_pid = -1;
+ struct klist dispatch_klist = { NULL };
+ proc_t target_proc = PROC_NULL;
+ struct task *task = NULL;
+ boolean_t found_candidate = FALSE;
+
+ static vm_pressure_level_t level_snapshot = kVMPressureNormal;
+ static vm_pressure_level_t prev_level_snapshot = kVMPressureNormal;
+ boolean_t smoothing_window_started = FALSE;
+ struct timeval smoothing_window_start_tstamp = {0, 0};
+ struct timeval curr_tstamp = {0, 0};
+ int elapsed_msecs = 0;
+
+#if !CONFIG_JETSAM
+#define MAX_IDLE_KILLS 100 /* limit the number of idle kills allowed */
+
+ int idle_kill_counter = 0;
+
+ /*
+ * On desktop we take this opportunity to free up memory pressure
+ * by immediately killing idle exitable processes. We use a delay
+ * to avoid overkill. And we impose a max counter as a fail safe
+ * in case daemons re-launch too fast.
+ */
+ while ((memorystatus_vm_pressure_level != kVMPressureNormal) && (idle_kill_counter < MAX_IDLE_KILLS)) {
+ if (memorystatus_idle_exit_from_VM() == FALSE) {
+ /* No idle exitable processes left to kill */
+ break;
+ }
+ idle_kill_counter++;
+ delay(1000000); /* 1 second */
+ }
+#endif /* !CONFIG_JETSAM */
+
+ while (1) {
+
+ /*
+ * There is a race window here. But it's not clear
+ * how much we benefit from having extra synchronization.
+ */
+ level_snapshot = memorystatus_vm_pressure_level;
+
+ if (prev_level_snapshot > level_snapshot) {
+ /*
+ * Pressure decreased? Let's take a little breather
+ * and see if this condition stays.
+ */
+ if (smoothing_window_started == FALSE) {
+
+ smoothing_window_started = TRUE;
+ microuptime(&smoothing_window_start_tstamp);
+ }
+
+ microuptime(&curr_tstamp);
+ timevalsub(&curr_tstamp, &smoothing_window_start_tstamp);
+ elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000;
+
+ if (elapsed_msecs < VM_PRESSURE_DECREASED_SMOOTHING_PERIOD) {
+
+ delay(INTER_NOTIFICATION_DELAY);
+ continue;
+ }
+ }
+
+ prev_level_snapshot = level_snapshot;
+ smoothing_window_started = FALSE;
+
+ memorystatus_klist_lock();
+ kn_max = vm_pressure_select_optimal_candidate_to_notify(&memorystatus_klist, level_snapshot, target_foreground_process);
+
+ if (kn_max == NULL) {
+ memorystatus_klist_unlock();
+
+ /*
+ * No more level-based clients to notify.
+ * Try the non-level based notification clients.
+ *
+ * However, these non-level clients don't understand
+ * the "return-to-normal" notification.
+ *
+ * So don't consider them for those notifications. Just
+ * return instead.
+ *
+ */
+
+ if (level_snapshot != kVMPressureNormal) {
+ goto try_dispatch_vm_clients;
+ } else {
+ return KERN_FAILURE;
+ }
+ }
+
+ target_proc = kn_max->kn_kq->kq_p;
+
+ proc_list_lock();
+ if (target_proc != proc_ref_locked(target_proc)) {
+ target_proc = PROC_NULL;
+ proc_list_unlock();
+ memorystatus_klist_unlock();
+ continue;
+ }
+ proc_list_unlock();
+ memorystatus_klist_unlock();
+
+ target_pid = target_proc->p_pid;
+
+ task = (struct task *)(target_proc->task);
+
+ if (level_snapshot != kVMPressureNormal) {
+
+ if (level_snapshot == kVMPressureWarning || level_snapshot == kVMPressureUrgent) {
+
+ if (is_knote_registered_modify_task_pressure_bits(kn_max, NOTE_MEMORYSTATUS_PRESSURE_WARN, task, kVMPressureCritical, kVMPressureWarning) == TRUE) {
+ found_candidate = TRUE;
+ }
+ } else {
+ if (level_snapshot == kVMPressureCritical) {
+
+ if (is_knote_registered_modify_task_pressure_bits(kn_max, NOTE_MEMORYSTATUS_PRESSURE_CRITICAL, task, kVMPressureWarning, kVMPressureCritical) == TRUE) {
+ found_candidate = TRUE;
+ }
+ }
+ }
+ } else {
+ if (kn_max->kn_sfflags & NOTE_MEMORYSTATUS_PRESSURE_NORMAL) {
+
+ task_clear_has_been_notified(task, kVMPressureWarning);
+ task_clear_has_been_notified(task, kVMPressureCritical);
+
+ found_candidate = TRUE;
+ }
+ }
+
+ if (found_candidate == FALSE) {
+ continue;
+ }
+
+ memorystatus_klist_lock();
+ KNOTE_DETACH(&memorystatus_klist, kn_max);
+ KNOTE_ATTACH(&dispatch_klist, kn_max);
+ memorystatus_klist_unlock();
+
+ KNOTE(&dispatch_klist, (level_snapshot != kVMPressureNormal) ? kMemorystatusPressure : kMemorystatusNoPressure);
+
+ memorystatus_klist_lock();
+ KNOTE_DETACH(&dispatch_klist, kn_max);
+ KNOTE_ATTACH(&memorystatus_klist, kn_max);
+ memorystatus_klist_unlock();
+
+ microuptime(&target_proc->vm_pressure_last_notify_tstamp);
+ proc_rele(target_proc);
+
+ if (memorystatus_manual_testing_on == TRUE && target_foreground_process == TRUE) {
+ break;
+ }
+
+try_dispatch_vm_clients:
+ if (kn_max == NULL && level_snapshot != kVMPressureNormal) {
+ /*
+ * We will exit this loop when we are done with
+ * notification clients (level and non-level based).
+ */
+ if ((vm_pressure_notify_dispatch_vm_clients(target_foreground_process) == KERN_FAILURE) && (kn_max == NULL)) {
+ /*
+ * kn_max == NULL i.e. we didn't find any eligible clients for the level-based notifications
+ * AND
+ * we have failed to find any eligible clients for the non-level based notifications too.
+ * So, we are done.
+ */
+
+ return KERN_FAILURE;
+ }
+ }
+
+ /*
+ * LD: This block of code below used to be invoked in the older memory notification scheme on embedded everytime
+ * a process was sent a memory pressure notification. The "memorystatus_klist" list was used to hold these
+ * privileged listeners. But now we have moved to the newer scheme and are trying to move away from the extra
+ * notifications. So the code is here in case we break compat. and need to send out notifications to the privileged
+ * apps.
+ */
+#if 0
+#endif /* 0 */
+
+ if (memorystatus_manual_testing_on == TRUE) {
+ /*
+ * Testing out the pressure notification scheme.
+ * No need for delays etc.
+ */
+ } else {
+
+ uint32_t sleep_interval = INTER_NOTIFICATION_DELAY;
+#if CONFIG_JETSAM
+ unsigned int page_delta = 0;
+ unsigned int skip_delay_page_threshold = 0;
+
+ assert(memorystatus_available_pages_pressure >= memorystatus_available_pages_critical_base);
+
+ page_delta = (memorystatus_available_pages_pressure - memorystatus_available_pages_critical_base) / 2;
+ skip_delay_page_threshold = memorystatus_available_pages_pressure - page_delta;
+
+ if (memorystatus_available_pages <= skip_delay_page_threshold) {
+ /*
+ * We are nearing the critcal mark fast and can't afford to wait between
+ * notifications.
+ */
+ sleep_interval = 0;
+ }
+#endif /* CONFIG_JETSAM */
+
+ if (sleep_interval) {
+ delay(sleep_interval);
+ }
+ }
+ }
+
+ return KERN_SUCCESS;
+}
+
+vm_pressure_level_t
+convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t);
+
+vm_pressure_level_t
+convert_internal_pressure_level_to_dispatch_level(vm_pressure_level_t internal_pressure_level)
+{
+ vm_pressure_level_t dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL;
+
+ switch (internal_pressure_level) {
+
+ case kVMPressureNormal:
+ {
+ dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_NORMAL;
+ break;
+ }
+
+ case kVMPressureWarning:
+ case kVMPressureUrgent:
+ {
+ dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_WARN;
+ break;
+ }
+
+ case kVMPressureCritical:
+ {
+ dispatch_level = NOTE_MEMORYSTATUS_PRESSURE_CRITICAL;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return dispatch_level;
+}
+
+static int
+sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2, oidp)
+ vm_pressure_level_t dispatch_level = convert_internal_pressure_level_to_dispatch_level(memorystatus_vm_pressure_level);
+
+ return SYSCTL_OUT(req, &dispatch_level, sizeof(dispatch_level));
+}
+
+#if DEBUG || DEVELOPMENT
+
+SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED,
+ 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", "");
+
+#else /* DEBUG || DEVELOPMENT */
+
+SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+ 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", "");
+
+#endif /* DEBUG || DEVELOPMENT */
+
+extern int memorystatus_purge_on_warning;
+extern int memorystatus_purge_on_critical;
+
+static int
+sysctl_memorypressure_manual_trigger SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+
+ int level = 0;
+ int error = 0;
+ int pressure_level = 0;
+ int trigger_request = 0;
+ int force_purge;
+
+ error = sysctl_handle_int(oidp, &level, 0, req);
+ if (error || !req->newptr) {
+ return (error);
+ }
+
+ memorystatus_manual_testing_on = TRUE;
+
+ trigger_request = (level >> 16) & 0xFFFF;
+ pressure_level = (level & 0xFFFF);
+
+ if (trigger_request < TEST_LOW_MEMORY_TRIGGER_ONE ||
+ trigger_request > TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL) {
+ return EINVAL;
+ }
+ switch (pressure_level) {
+ case NOTE_MEMORYSTATUS_PRESSURE_NORMAL:
+ case NOTE_MEMORYSTATUS_PRESSURE_WARN:
+ case NOTE_MEMORYSTATUS_PRESSURE_CRITICAL:
+ break;
+ default:
+ return EINVAL;
+ }
+
+ /*
+ * The pressure level is being set from user-space.
+ * And user-space uses the constants in sys/event.h
+ * So we translate those events to our internal levels here.
+ */
+ if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) {
+
+ memorystatus_manual_testing_level = kVMPressureNormal;
+ force_purge = 0;
+
+ } else if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_WARN) {
+
+ memorystatus_manual_testing_level = kVMPressureWarning;
+ force_purge = memorystatus_purge_on_warning;
+
+ } else if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_CRITICAL) {
+
+ memorystatus_manual_testing_level = kVMPressureCritical;
+ force_purge = memorystatus_purge_on_critical;
+ }
+
+ memorystatus_vm_pressure_level = memorystatus_manual_testing_level;
+
+ /* purge according to the new pressure level */
+ switch (trigger_request) {
+ case TEST_PURGEABLE_TRIGGER_ONE:
+ case TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE:
+ if (force_purge == 0) {
+ /* no purging requested */
+ break;
+ }
+ vm_purgeable_object_purge_one_unlocked(force_purge);
+ break;
+ case TEST_PURGEABLE_TRIGGER_ALL:
+ case TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL:
+ if (force_purge == 0) {
+ /* no purging requested */
+ break;
+ }
+ while (vm_purgeable_object_purge_one_unlocked(force_purge));
+ break;
+ }
+
+ if ((trigger_request == TEST_LOW_MEMORY_TRIGGER_ONE) ||
+ (trigger_request == TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ONE)) {
+
+ memorystatus_update_vm_pressure(TRUE);
+ }
+
+ if ((trigger_request == TEST_LOW_MEMORY_TRIGGER_ALL) ||
+ (trigger_request == TEST_LOW_MEMORY_PURGEABLE_TRIGGER_ALL)) {
+
+ while (memorystatus_update_vm_pressure(FALSE) == KERN_SUCCESS) {
+ continue;
+ }
+ }
+
+ if (pressure_level == NOTE_MEMORYSTATUS_PRESSURE_NORMAL) {
+ memorystatus_manual_testing_on = FALSE;
+
+ vm_pressure_klist_lock();
+ vm_reset_active_list();
+ vm_pressure_klist_unlock();
+ } else {
+
+ vm_pressure_klist_lock();
+ vm_pressure_notification_without_levels(FALSE);
+ vm_pressure_klist_unlock();
+ }
+
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, memorypressure_manual_trigger, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+ 0, 0, &sysctl_memorypressure_manual_trigger, "I", "");
+
+
+extern int memorystatus_purge_on_warning;
+extern int memorystatus_purge_on_urgent;
+extern int memorystatus_purge_on_critical;
+
+SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_warning, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_warning, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_urgent, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_urgent, 0, "");
+SYSCTL_INT(_kern, OID_AUTO, memorystatus_purge_on_critical, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_purge_on_critical, 0, "");
+
+
+#endif /* VM_PRESSURE_EVENTS */
+
+/* Return both allocated and actual size, since there's a race between allocation and list compilation */
+static int
+memorystatus_get_priority_list(memorystatus_priority_entry_t **list_ptr, size_t *buffer_size, size_t *list_size, boolean_t size_only)
+{
+ uint32_t list_count, i = 0;
+ memorystatus_priority_entry_t *list_entry;
+ proc_t p;
+
+ list_count = memorystatus_list_count;
+ *list_size = sizeof(memorystatus_priority_entry_t) * list_count;
+
+ /* Just a size check? */
+ if (size_only) {
+ return 0;
+ }
+
+ /* Otherwise, validate the size of the buffer */
+ if (*buffer_size < *list_size) {
+ return EINVAL;
+ }
+
+ *list_ptr = (memorystatus_priority_entry_t*)kalloc(*list_size);
+ if (!list_ptr) {
+ return ENOMEM;
+ }
+
+ memset(*list_ptr, 0, *list_size);
+
+ *buffer_size = *list_size;
+ *list_size = 0;
+
+ list_entry = *list_ptr;
+
+ proc_list_lock();
+
+ p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (p && (*list_size < *buffer_size)) {
+ list_entry->pid = p->p_pid;
+ list_entry->priority = p->p_memstat_effectivepriority;
+ list_entry->user_data = p->p_memstat_userdata;
+#if LEGACY_HIWATER
+ if (((p->p_memstat_state & P_MEMSTAT_MEMLIMIT_BACKGROUND) && (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND)) ||
+ (p->p_memstat_memlimit <= 0)) {
+ task_get_phys_footprint_limit(p->task, &list_entry->limit);
+ } else {
+ list_entry->limit = p->p_memstat_memlimit;
+ }
+#else
+ task_get_phys_footprint_limit(p->task, &list_entry->limit);
+#endif
+ list_entry->state = memorystatus_build_state(p);
+ list_entry++;
+
+ *list_size += sizeof(memorystatus_priority_entry_t);
+
+ p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+ }
+
+ proc_list_unlock();
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus_get_priority_list: returning %lu for size\n", (unsigned long)*list_size);
+
+ return 0;
+}
+
+static int
+memorystatus_cmd_get_priority_list(user_addr_t buffer, size_t buffer_size, int32_t *retval) {
+ int error = EINVAL;
+ boolean_t size_only;
+ memorystatus_priority_entry_t *list = NULL;
+ size_t list_size;
+
+ size_only = ((buffer == USER_ADDR_NULL) ? TRUE: FALSE);
+
+ error = memorystatus_get_priority_list(&list, &buffer_size, &list_size, size_only);
+ if (error) {
+ goto out;
+ }
+
+ if (!size_only) {
+ error = copyout(list, buffer, list_size);
+ }
+
+ if (error == 0) {
+ *retval = list_size;
+ }
+out:
+
+ if (list) {
+ kfree(list, buffer_size);
+ }
+
+ return error;
+}
+
+#if CONFIG_JETSAM
+
+static void
+memorystatus_clear_errors(void)
+{
+ proc_t p;
+ unsigned int i = 0;
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_CLEAR_ERRORS) | DBG_FUNC_START, 0, 0, 0, 0, 0);
+
+ proc_list_lock();
+
+ p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (p) {
+ if (p->p_memstat_state & P_MEMSTAT_ERROR) {
+ p->p_memstat_state &= ~P_MEMSTAT_ERROR;
+ }
+ p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+ }
+
+ proc_list_unlock();
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_CLEAR_ERRORS) | DBG_FUNC_END, 0, 0, 0, 0, 0);
+}
+
+static void
+memorystatus_update_levels_locked(boolean_t critical_only) {
+
+ memorystatus_available_pages_critical = memorystatus_available_pages_critical_base;
+
+ /*
+ * If there's an entry in the first bucket, we have idle processes.
+ */
+ memstat_bucket_t *first_bucket = &memstat_bucket[JETSAM_PRIORITY_IDLE];
+ if (first_bucket->count) {
+ memorystatus_available_pages_critical += memorystatus_available_pages_critical_idle_offset;
+
+ if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure ) {
+ /*
+ * The critical threshold must never exceed the pressure threshold
+ */
+ memorystatus_available_pages_critical = memorystatus_available_pages_pressure;
+ }
+ }
+
+#if DEBUG || DEVELOPMENT
+ if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) {
+ memorystatus_available_pages_critical += memorystatus_jetsam_policy_offset_pages_diagnostic;
+
+ if (memorystatus_available_pages_critical > memorystatus_available_pages_pressure ) {
+ /*
+ * The critical threshold must never exceed the pressure threshold
+ */
+ memorystatus_available_pages_critical = memorystatus_available_pages_pressure;
+ }
+ }
+#endif
+
+ if (critical_only) {
+ return;
+ }
+
+#if VM_PRESSURE_EVENTS
+ memorystatus_available_pages_pressure = (pressure_threshold_percentage / delta_percentage) * memorystatus_delta;
+#if DEBUG || DEVELOPMENT
+ if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) {
+ memorystatus_available_pages_pressure += memorystatus_jetsam_policy_offset_pages_diagnostic;
+ }
+#endif
+#endif
+}
+
+static int
+memorystatus_get_snapshot(memorystatus_jetsam_snapshot_t **snapshot, size_t *snapshot_size, boolean_t size_only) {
+ size_t input_size = *snapshot_size;
+
+ if (memorystatus_jetsam_snapshot_count > 0) {
+ *snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + (sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_count));
+ } else {
+ *snapshot_size = 0;
+ }
+
+ if (size_only) {
+ return 0;
+ }