+
+ if (memlimit_is_fatal) {
+ /*
+ * If this process has no high watermark or has a fatal task limit, then we have been invoked because the task
+ * has violated either the system-wide per-task memory limit OR its own task limit.
+ */
+ jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_PERPROCESSLIMIT);
+ if (jetsam_reason == NULL) {
+ printf("task_exceeded footprint: failed to allocate jetsam reason\n");
+ } else if (corpse_for_fatal_memkill != 0 && proc_send_synchronous_EXC_RESOURCE(p) == FALSE) {
+ /* Set OS_REASON_FLAG_GENERATE_CRASH_REPORT to generate corpse */
+ jetsam_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
+ }
+
+ if (memorystatus_kill_process_sync(p->p_pid, kMemorystatusKilledPerProcessLimit, jetsam_reason) != TRUE) {
+ printf("task_exceeded_footprint: failed to kill the current task (exiting?).\n");
+ }
+ } else {
+ /*
+ * HWM offender exists. Done without locks or synchronization.
+ * See comment near its declaration for more details.
+ */
+ memorystatus_hwm_candidates = TRUE;
+
+#if VM_PRESSURE_EVENTS
+ /*
+ * The current process is not in the warning path.
+ * This path implies the current process has exceeded a non-fatal (soft) memory limit.
+ * Failure to send note is ignored here.
+ */
+ (void)memorystatus_warn_process(p->p_pid, memlimit_is_active, memlimit_is_fatal, TRUE /* exceeded */);
+
+#endif /* VM_PRESSURE_EVENTS */
+ }
+}
+
+void
+memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal)
+{
+ proc_t p = current_proc();
+
+ /*
+ * The limit violation is logged here, but only once per process per limit.
+ * Soft memory limit is a non-fatal high-water-mark
+ * Hard memory limit is a fatal custom-task-limit or system-wide per-task memory limit.
+ */
+
+ os_log_with_startup_serial(OS_LOG_DEFAULT, "EXC_RESOURCE -> %s[%d] exceeded mem limit: %s%s %d MB (%s)\n",
+ (*p->p_name ? p->p_name : "unknown"), p->p_pid, (memlimit_is_active ? "Active" : "Inactive"),
+ (memlimit_is_fatal ? "Hard" : "Soft"), max_footprint_mb,
+ (memlimit_is_fatal ? "fatal" : "non-fatal"));
+
+ return;
+}
+
+
+/*
+ * Description:
+ * Evaluates process state to determine which limit
+ * should be applied (active vs. inactive limit).
+ *
+ * Processes that have the 'elevated inactive jetsam band' attribute
+ * are first evaluated based on their current priority band.
+ * presently elevated ==> active
+ *
+ * Processes that opt into dirty tracking are evaluated
+ * based on clean vs dirty state.
+ * dirty ==> active
+ * clean ==> inactive
+ *
+ * Process that do not opt into dirty tracking are
+ * evalulated based on priority level.
+ * Foreground or above ==> active
+ * Below Foreground ==> inactive
+ *
+ * Return: TRUE if active
+ * False if inactive
+ */
+
+static boolean_t
+proc_jetsam_state_is_active_locked(proc_t p)
+{
+ if ((p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND) &&
+ (p->p_memstat_effectivepriority == JETSAM_PRIORITY_ELEVATED_INACTIVE)) {
+ /*
+ * process has the 'elevated inactive jetsam band' attribute
+ * and process is present in the elevated band
+ * implies active state
+ */
+ return TRUE;
+ } else if (p->p_memstat_dirty & P_DIRTY_TRACK) {
+ /*
+ * process has opted into dirty tracking
+ * active state is based on dirty vs. clean
+ */
+ if (p->p_memstat_dirty & P_DIRTY_IS_DIRTY) {
+ /*
+ * process is dirty
+ * implies active state
+ */
+ return TRUE;
+ } else {
+ /*
+ * process is clean
+ * implies inactive state
+ */
+ return FALSE;
+ }
+ } else if (p->p_memstat_effectivepriority >= JETSAM_PRIORITY_FOREGROUND) {
+ /*
+ * process is Foreground or higher
+ * implies active state
+ */
+ return TRUE;
+ } else {
+ /*
+ * process found below Foreground
+ * implies inactive state
+ */
+ return FALSE;
+ }
+}
+
+static boolean_t
+memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason)
+{
+ boolean_t res;
+
+ uint32_t errors = 0;
+
+ if (victim_pid == -1) {
+ /* No pid, so kill first process */
+ res = memorystatus_kill_top_process(TRUE, TRUE, cause, jetsam_reason, NULL, &errors);
+ } else {
+ res = memorystatus_kill_specific_process(victim_pid, cause, jetsam_reason);
+ }
+
+ if (errors) {
+ memorystatus_clear_errors();
+ }
+
+ if (res == TRUE) {
+ /* Fire off snapshot notification */
+ proc_list_lock();
+ size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) +
+ sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count;
+ uint64_t timestamp_now = mach_absolute_time();
+ memorystatus_jetsam_snapshot->notification_time = timestamp_now;
+ if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 ||
+ timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) {
+ proc_list_unlock();
+ int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size));
+ if (!ret) {
+ proc_list_lock();
+ memorystatus_jetsam_snapshot_last_timestamp = timestamp_now;
+ proc_list_unlock();
+ }
+ } else {
+ proc_list_unlock();
+ }
+ }
+
+ return res;
+}
+
+/*
+ * Jetsam a specific process.
+ */
+static boolean_t
+memorystatus_kill_specific_process(pid_t victim_pid, uint32_t cause, os_reason_t jetsam_reason)
+{
+ boolean_t killed;
+ proc_t p;
+ uint64_t killtime = 0;
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+ uint32_t tv_msec;
+
+ /* TODO - add a victim queue and push this into the main jetsam thread */
+
+ p = proc_find(victim_pid);
+ if (!p) {
+ os_reason_free(jetsam_reason);
+ return FALSE;
+ }
+
+ proc_list_lock();
+
+ if (memorystatus_jetsam_snapshot_count == 0) {
+ memorystatus_init_jetsam_snapshot_locked(NULL, 0);
+ }
+
+ killtime = mach_absolute_time();
+ absolutetime_to_microtime(killtime, &tv_sec, &tv_usec);
+ tv_msec = tv_usec / 1000;
+
+ memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime);
+
+ proc_list_unlock();
+
+ os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_specific_process pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n",
+ (unsigned long)tv_sec, tv_msec, victim_pid, (*p->p_name ? p->p_name : "unknown"),
+ memorystatus_kill_cause_name[cause], p->p_memstat_effectivepriority, (uint64_t)memorystatus_available_pages);
+
+ killed = memorystatus_do_kill(p, cause, jetsam_reason);
+ proc_rele(p);
+
+ return killed;
+}
+
+
+/*
+ * Toggle the P_MEMSTAT_TERMINATED state.
+ * Takes the proc_list_lock.
+ */
+void
+proc_memstat_terminated(proc_t p, boolean_t set)
+{
+#if DEVELOPMENT || DEBUG
+ if (p) {
+ proc_list_lock();
+ if (set == TRUE) {
+ p->p_memstat_state |= P_MEMSTAT_TERMINATED;
+ } else {
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ }
+ proc_list_unlock();
+ }
+#else
+#pragma unused(p, set)
+ /*
+ * do nothing
+ */
+#endif /* DEVELOPMENT || DEBUG */
+ return;
+}
+
+
+#if CONFIG_JETSAM
+/*
+ * This is invoked when cpulimits have been exceeded while in fatal mode.
+ * The jetsam_flags do not apply as those are for memory related kills.
+ * We call this routine so that the offending process is killed with
+ * a non-zero exit status.
+ */
+void
+jetsam_on_ledger_cpulimit_exceeded(void)
+{
+ int retval = 0;
+ int jetsam_flags = 0; /* make it obvious */
+ proc_t p = current_proc();
+ os_reason_t jetsam_reason = OS_REASON_NULL;
+
+ printf("task_exceeded_cpulimit: killing pid %d [%s]\n",
+ p->p_pid, (*p->p_name ? p->p_name : "(unknown)"));
+
+ jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_CPULIMIT);
+ if (jetsam_reason == OS_REASON_NULL) {
+ printf("task_exceeded_cpulimit: unable to allocate memory for jetsam reason\n");
+ }
+
+ retval = jetsam_do_kill(p, jetsam_flags, jetsam_reason);
+
+ if (retval) {
+ printf("task_exceeded_cpulimit: failed to kill current task (exiting?).\n");
+ }
+}
+
+#endif /* CONFIG_JETSAM */
+
+static void
+memorystatus_get_task_memory_region_count(task_t task, uint64_t *count)
+{
+ assert(task);
+ assert(count);
+
+ *count = get_task_memory_region_count(task);
+}
+
+
+#define MEMORYSTATUS_VM_MAP_FORK_ALLOWED 0x100000000
+#define MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED 0x200000000
+
+#if DEVELOPMENT || DEBUG
+
+/*
+ * Sysctl only used to test memorystatus_allowed_vm_map_fork() path.
+ * set a new pidwatch value
+ * or
+ * get the current pidwatch value
+ *
+ * The pidwatch_val starts out with a PID to watch for in the map_fork path.
+ * Its value is:
+ * - OR'd with MEMORYSTATUS_VM_MAP_FORK_ALLOWED if we allow the map_fork.
+ * - OR'd with MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED if we disallow the map_fork.
+ * - set to -1ull if the map_fork() is aborted for other reasons.
+ */
+
+uint64_t memorystatus_vm_map_fork_pidwatch_val = 0;
+
+static int sysctl_memorystatus_vm_map_fork_pidwatch SYSCTL_HANDLER_ARGS {
+#pragma unused(oidp, arg1, arg2)
+
+ uint64_t new_value = 0;
+ uint64_t old_value = 0;
+ int error = 0;
+
+ /*
+ * The pid is held in the low 32 bits.
+ * The 'allowed' flags are in the upper 32 bits.
+ */
+ old_value = memorystatus_vm_map_fork_pidwatch_val;
+
+ error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL);
+
+ if (error || !req->newptr) {
+ /*
+ * No new value passed in.
+ */
+ return error;
+ }
+
+ /*
+ * A new pid was passed in via req->newptr.
+ * Ignore any attempt to set the higher order bits.
+ */
+ memorystatus_vm_map_fork_pidwatch_val = new_value & 0xFFFFFFFF;
+ printf("memorystatus: pidwatch old_value = 0x%llx, new_value = 0x%llx \n", old_value, new_value);
+
+ return error;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_map_fork_pidwatch, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_MASKED,
+ 0, 0, sysctl_memorystatus_vm_map_fork_pidwatch, "Q", "get/set pid watched for in vm_map_fork");
+
+
+/*
+ * Record if a watched process fails to qualify for a vm_map_fork().
+ */
+void
+memorystatus_abort_vm_map_fork(task_t task)
+{
+ if (memorystatus_vm_map_fork_pidwatch_val != 0) {
+ proc_t p = get_bsdtask_info(task);
+ if (p != NULL && memorystatus_vm_map_fork_pidwatch_val == (uint64_t)p->p_pid) {
+ memorystatus_vm_map_fork_pidwatch_val = -1ull;
+ }
+ }
+}
+
+static void
+set_vm_map_fork_pidwatch(task_t task, uint64_t x)
+{
+ if (memorystatus_vm_map_fork_pidwatch_val != 0) {
+ proc_t p = get_bsdtask_info(task);
+ if (p && (memorystatus_vm_map_fork_pidwatch_val == (uint64_t)p->p_pid)) {
+ memorystatus_vm_map_fork_pidwatch_val |= x;
+ }
+ }
+}
+
+#else /* DEVELOPMENT || DEBUG */
+
+
+static void
+set_vm_map_fork_pidwatch(task_t task, uint64_t x)
+{
+#pragma unused(task)
+#pragma unused(x)
+}
+
+#endif /* DEVELOPMENT || DEBUG */
+
+/*
+ * Called during EXC_RESOURCE handling when a process exceeds a soft
+ * memory limit. This is the corpse fork path and here we decide if
+ * vm_map_fork will be allowed when creating the corpse.
+ * The task being considered is suspended.
+ *
+ * By default, a vm_map_fork is allowed to proceed.
+ *
+ * A few simple policy assumptions:
+ * Desktop platform is not considered in this path.
+ * The vm_map_fork is always allowed.
+ *
+ * If the device has a zero system-wide task limit,
+ * then the vm_map_fork is allowed.
+ *
+ * And if a process's memory footprint calculates less
+ * than or equal to half of the system-wide task limit,
+ * then the vm_map_fork is allowed. This calculation
+ * is based on the assumption that a process can
+ * munch memory up to the system-wide task limit.
+ */
+boolean_t
+memorystatus_allowed_vm_map_fork(task_t task)
+{
+ boolean_t is_allowed = TRUE; /* default */
+
+#if CONFIG_EMBEDDED
+
+ uint64_t footprint_in_bytes;
+ uint64_t max_allowed_bytes;
+
+ if (max_task_footprint_mb == 0) {
+ set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED);
+ return is_allowed;
+ }
+
+ footprint_in_bytes = get_task_phys_footprint(task);
+
+ /*
+ * Maximum is 1/4 of the system-wide task limit.
+ */
+ max_allowed_bytes = ((uint64_t)max_task_footprint_mb * 1024 * 1024) >> 2;
+
+ if (footprint_in_bytes > max_allowed_bytes) {
+ printf("memorystatus disallowed vm_map_fork %lld %lld\n", footprint_in_bytes, max_allowed_bytes);
+ set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_NOT_ALLOWED);
+ return !is_allowed;
+ }
+#endif /* CONFIG_EMBEDDED */
+
+ set_vm_map_fork_pidwatch(task, MEMORYSTATUS_VM_MAP_FORK_ALLOWED);
+ return is_allowed;
+}
+
+static void
+memorystatus_get_task_page_counts(task_t task, uint32_t *footprint, uint32_t *max_footprint_lifetime, uint32_t *purgeable_pages)
+{
+ assert(task);
+ assert(footprint);
+
+ uint64_t pages;
+
+ pages = (get_task_phys_footprint(task) / PAGE_SIZE_64);
+ assert(((uint32_t)pages) == pages);
+ *footprint = (uint32_t)pages;
+
+ if (max_footprint_lifetime) {
+ pages = (get_task_phys_footprint_lifetime_max(task) / PAGE_SIZE_64);
+ assert(((uint32_t)pages) == pages);
+ *max_footprint_lifetime = (uint32_t)pages;
+ }
+ if (purgeable_pages) {
+ pages = (get_task_purgeable_size(task) / PAGE_SIZE_64);
+ assert(((uint32_t)pages) == pages);
+ *purgeable_pages = (uint32_t)pages;
+ }
+}
+
+static void
+memorystatus_get_task_phys_footprint_page_counts(task_t task,
+ uint64_t *internal_pages, uint64_t *internal_compressed_pages,
+ uint64_t *purgeable_nonvolatile_pages, uint64_t *purgeable_nonvolatile_compressed_pages,
+ uint64_t *alternate_accounting_pages, uint64_t *alternate_accounting_compressed_pages,
+ uint64_t *iokit_mapped_pages, uint64_t *page_table_pages)
+{
+ assert(task);
+
+ if (internal_pages) {
+ *internal_pages = (get_task_internal(task) / PAGE_SIZE_64);
+ }
+
+ if (internal_compressed_pages) {
+ *internal_compressed_pages = (get_task_internal_compressed(task) / PAGE_SIZE_64);
+ }
+
+ if (purgeable_nonvolatile_pages) {
+ *purgeable_nonvolatile_pages = (get_task_purgeable_nonvolatile(task) / PAGE_SIZE_64);
+ }
+
+ if (purgeable_nonvolatile_compressed_pages) {
+ *purgeable_nonvolatile_compressed_pages = (get_task_purgeable_nonvolatile_compressed(task) / PAGE_SIZE_64);
+ }
+
+ if (alternate_accounting_pages) {
+ *alternate_accounting_pages = (get_task_alternate_accounting(task) / PAGE_SIZE_64);
+ }
+
+ if (alternate_accounting_compressed_pages) {
+ *alternate_accounting_compressed_pages = (get_task_alternate_accounting_compressed(task) / PAGE_SIZE_64);
+ }
+
+ if (iokit_mapped_pages) {
+ *iokit_mapped_pages = (get_task_iokit_mapped(task) / PAGE_SIZE_64);
+ }
+
+ if (page_table_pages) {
+ *page_table_pages = (get_task_page_table(task) / PAGE_SIZE_64);
+ }
+}
+
+/*
+ * This routine only acts on the global jetsam event snapshot.
+ * Updating the process's entry can race when the memorystatus_thread
+ * has chosen to kill a process that is racing to exit on another core.
+ */
+static void
+memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, uint64_t killtime)
+{
+ memorystatus_jetsam_snapshot_entry_t *entry = NULL;
+ memorystatus_jetsam_snapshot_t *snapshot = NULL;
+ memorystatus_jetsam_snapshot_entry_t *snapshot_list = NULL;
+
+ unsigned int i;
+
+ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED);
+
+ if (memorystatus_jetsam_snapshot_count == 0) {
+ /*
+ * No active snapshot.
+ * Nothing to do.
+ */
+ return;
+ }
+
+ /*
+ * Sanity check as this routine should only be called
+ * from a jetsam kill path.
+ */
+ assert(kill_cause != 0 && killtime != 0);
+
+ snapshot = memorystatus_jetsam_snapshot;
+ snapshot_list = memorystatus_jetsam_snapshot->entries;
+
+ for (i = 0; i < memorystatus_jetsam_snapshot_count; i++) {
+ if (snapshot_list[i].pid == p->p_pid) {
+ entry = &snapshot_list[i];
+
+ if (entry->killed || entry->jse_killtime) {
+ /*
+ * We apparently raced on the exit path
+ * for this process, as it's snapshot entry
+ * has already recorded a kill.
+ */
+ assert(entry->killed && entry->jse_killtime);
+ break;
+ }
+
+ /*
+ * Update the entry we just found in the snapshot.
+ */
+
+ entry->killed = kill_cause;
+ entry->jse_killtime = killtime;
+ entry->jse_gencount = snapshot->js_gencount;
+ entry->jse_idle_delta = p->p_memstat_idle_delta;
+#if CONFIG_FREEZE
+ entry->jse_thaw_count = p->p_memstat_thaw_count;
+#else /* CONFIG_FREEZE */
+ entry->jse_thaw_count = 0;
+#endif /* CONFIG_FREEZE */
+
+ /*
+ * If a process has moved between bands since snapshot was
+ * initialized, then likely these fields changed too.
+ */
+ if (entry->priority != p->p_memstat_effectivepriority) {
+ strlcpy(entry->name, p->p_name, sizeof(entry->name));
+ entry->priority = p->p_memstat_effectivepriority;
+ entry->state = memorystatus_build_state(p);
+ entry->user_data = p->p_memstat_userdata;
+ entry->fds = p->p_fd->fd_nfiles;
+ }
+
+ /*
+ * Always update the page counts on a kill.
+ */
+
+ uint32_t pages = 0;
+ uint32_t max_pages_lifetime = 0;
+ uint32_t purgeable_pages = 0;
+
+ memorystatus_get_task_page_counts(p->task, &pages, &max_pages_lifetime, &purgeable_pages);
+ entry->pages = (uint64_t)pages;
+ entry->max_pages_lifetime = (uint64_t)max_pages_lifetime;
+ entry->purgeable_pages = (uint64_t)purgeable_pages;
+
+ uint64_t internal_pages = 0;
+ uint64_t internal_compressed_pages = 0;
+ uint64_t purgeable_nonvolatile_pages = 0;
+ uint64_t purgeable_nonvolatile_compressed_pages = 0;
+ uint64_t alternate_accounting_pages = 0;
+ uint64_t alternate_accounting_compressed_pages = 0;
+ uint64_t iokit_mapped_pages = 0;
+ uint64_t page_table_pages = 0;
+
+ memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages,
+ &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages,
+ &alternate_accounting_pages, &alternate_accounting_compressed_pages,
+ &iokit_mapped_pages, &page_table_pages);
+
+ entry->jse_internal_pages = internal_pages;
+ entry->jse_internal_compressed_pages = internal_compressed_pages;
+ entry->jse_purgeable_nonvolatile_pages = purgeable_nonvolatile_pages;
+ entry->jse_purgeable_nonvolatile_compressed_pages = purgeable_nonvolatile_compressed_pages;
+ entry->jse_alternate_accounting_pages = alternate_accounting_pages;
+ entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages;
+ entry->jse_iokit_mapped_pages = iokit_mapped_pages;
+ entry->jse_page_table_pages = page_table_pages;
+
+ uint64_t region_count = 0;
+ memorystatus_get_task_memory_region_count(p->task, ®ion_count);
+ entry->jse_memory_region_count = region_count;
+
+ goto exit;
+ }
+ }
+
+ if (entry == NULL) {
+ /*
+ * The entry was not found in the snapshot, so the process must have
+ * launched after the snapshot was initialized.
+ * Let's try to append the new entry.
+ */
+ if (memorystatus_jetsam_snapshot_count < memorystatus_jetsam_snapshot_max) {
+ /*
+ * A populated snapshot buffer exists
+ * and there is room to init a new entry.
+ */
+ assert(memorystatus_jetsam_snapshot_count == snapshot->entry_count);
+
+ unsigned int next = memorystatus_jetsam_snapshot_count;
+
+ if (memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[next], (snapshot->js_gencount)) == TRUE) {
+ entry = &snapshot_list[next];
+ entry->killed = kill_cause;
+ entry->jse_killtime = killtime;
+
+ snapshot->entry_count = ++next;
+ memorystatus_jetsam_snapshot_count = next;
+
+ if (memorystatus_jetsam_snapshot_count >= memorystatus_jetsam_snapshot_max) {
+ /*
+ * We just used the last slot in the snapshot buffer.
+ * We only want to log it once... so we do it here
+ * when we notice we've hit the max.
+ */
+ printf("memorystatus: WARNING snapshot buffer is full, count %d\n",
+ memorystatus_jetsam_snapshot_count);
+ }
+ }
+ }
+ }
+
+exit:
+ if (entry == NULL) {
+ /*
+ * If we reach here, the snapshot buffer could not be updated.
+ * Most likely, the buffer is full, in which case we would have
+ * logged a warning in the previous call.
+ *
+ * For now, we will stop appending snapshot entries.
+ * When the buffer is consumed, the snapshot state will reset.
+ */
+
+ MEMORYSTATUS_DEBUG(4, "memorystatus_update_jetsam_snapshot_entry_locked: failed to update pid %d, priority %d, count %d\n",
+ p->p_pid, p->p_memstat_effectivepriority, memorystatus_jetsam_snapshot_count);
+ }
+
+ return;
+}
+
+#if CONFIG_JETSAM
+void
+memorystatus_pages_update(unsigned int pages_avail)
+{
+ memorystatus_available_pages = pages_avail;
+
+#if VM_PRESSURE_EVENTS
+ /*
+ * Since memorystatus_available_pages changes, we should
+ * re-evaluate the pressure levels on the system and
+ * check if we need to wake the pressure thread.
+ * We also update memorystatus_level in that routine.
+ */
+ vm_pressure_response();
+
+ if (memorystatus_available_pages <= memorystatus_available_pages_pressure) {
+ if (memorystatus_hwm_candidates || (memorystatus_available_pages <= memorystatus_available_pages_critical)) {
+ memorystatus_thread_wake();
+ }
+ }
+#if CONFIG_FREEZE
+ /*
+ * We can't grab the freezer_mutex here even though that synchronization would be correct to inspect
+ * the # of frozen processes and wakeup the freezer thread. Reason being that we come here into this
+ * code with (possibly) the page-queue locks held and preemption disabled. So trying to grab a mutex here
+ * will result in the "mutex with preemption disabled" panic.
+ */
+
+ if (memorystatus_freeze_thread_should_run() == TRUE) {
+ /*
+ * The freezer thread is usually woken up by some user-space call i.e. pid_hibernate(any process).
+ * That trigger isn't invoked often enough and so we are enabling this explicit wakeup here.
+ */
+ if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
+ thread_wakeup((event_t)&memorystatus_freeze_wakeup);
+ }
+ }
+#endif /* CONFIG_FREEZE */
+
+#else /* VM_PRESSURE_EVENTS */
+
+ boolean_t critical, delta;
+
+ if (!memorystatus_delta) {
+ return;
+ }
+
+ critical = (pages_avail < memorystatus_available_pages_critical) ? TRUE : FALSE;
+ delta = ((pages_avail >= (memorystatus_available_pages + memorystatus_delta))
+ || (memorystatus_available_pages >= (pages_avail + memorystatus_delta))) ? TRUE : FALSE;
+
+ if (critical || delta) {
+ unsigned int total_pages;
+
+ total_pages = (unsigned int) atop_64(max_mem);
+#if CONFIG_SECLUDED_MEMORY
+ total_pages -= vm_page_secluded_count;
+#endif /* CONFIG_SECLUDED_MEMORY */
+ memorystatus_level = memorystatus_available_pages * 100 / total_pages;
+ memorystatus_thread_wake();
+ }
+#endif /* VM_PRESSURE_EVENTS */
+}
+#endif /* CONFIG_JETSAM */
+
+static boolean_t
+memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_snapshot_entry_t *entry, uint64_t gencount)
+{
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+ uint32_t pages = 0;
+ uint32_t max_pages_lifetime = 0;
+ uint32_t purgeable_pages = 0;
+ uint64_t internal_pages = 0;
+ uint64_t internal_compressed_pages = 0;
+ uint64_t purgeable_nonvolatile_pages = 0;
+ uint64_t purgeable_nonvolatile_compressed_pages = 0;
+ uint64_t alternate_accounting_pages = 0;
+ uint64_t alternate_accounting_compressed_pages = 0;
+ uint64_t iokit_mapped_pages = 0;
+ uint64_t page_table_pages = 0;
+ uint64_t region_count = 0;
+ uint64_t cids[COALITION_NUM_TYPES];
+
+ memset(entry, 0, sizeof(memorystatus_jetsam_snapshot_entry_t));
+
+ entry->pid = p->p_pid;
+ strlcpy(&entry->name[0], p->p_name, sizeof(entry->name));
+ entry->priority = p->p_memstat_effectivepriority;
+
+ memorystatus_get_task_page_counts(p->task, &pages, &max_pages_lifetime, &purgeable_pages);
+ entry->pages = (uint64_t)pages;
+ entry->max_pages_lifetime = (uint64_t)max_pages_lifetime;
+ entry->purgeable_pages = (uint64_t)purgeable_pages;
+
+ memorystatus_get_task_phys_footprint_page_counts(p->task, &internal_pages, &internal_compressed_pages,
+ &purgeable_nonvolatile_pages, &purgeable_nonvolatile_compressed_pages,
+ &alternate_accounting_pages, &alternate_accounting_compressed_pages,
+ &iokit_mapped_pages, &page_table_pages);
+
+ entry->jse_internal_pages = internal_pages;
+ entry->jse_internal_compressed_pages = internal_compressed_pages;
+ entry->jse_purgeable_nonvolatile_pages = purgeable_nonvolatile_pages;
+ entry->jse_purgeable_nonvolatile_compressed_pages = purgeable_nonvolatile_compressed_pages;
+ entry->jse_alternate_accounting_pages = alternate_accounting_pages;
+ entry->jse_alternate_accounting_compressed_pages = alternate_accounting_compressed_pages;
+ entry->jse_iokit_mapped_pages = iokit_mapped_pages;
+ entry->jse_page_table_pages = page_table_pages;
+
+ memorystatus_get_task_memory_region_count(p->task, ®ion_count);
+ entry->jse_memory_region_count = region_count;
+
+ entry->state = memorystatus_build_state(p);
+ entry->user_data = p->p_memstat_userdata;
+ memcpy(&entry->uuid[0], &p->p_uuid[0], sizeof(p->p_uuid));
+ entry->fds = p->p_fd->fd_nfiles;
+
+ absolutetime_to_microtime(get_task_cpu_time(p->task), &tv_sec, &tv_usec);
+ entry->cpu_time.tv_sec = (int64_t)tv_sec;
+ entry->cpu_time.tv_usec = (int64_t)tv_usec;
+
+ assert(p->p_stats != NULL);
+ entry->jse_starttime = p->p_stats->ps_start; /* abstime process started */
+ entry->jse_killtime = 0; /* abstime jetsam chose to kill process */
+ entry->killed = 0; /* the jetsam kill cause */
+ entry->jse_gencount = gencount; /* indicates a pass through jetsam thread, when process was targeted to be killed */
+
+ entry->jse_idle_delta = p->p_memstat_idle_delta; /* Most recent timespan spent in idle-band */
+
+#if CONFIG_FREEZE
+ entry->jse_thaw_count = p->p_memstat_thaw_count;
+#else /* CONFIG_FREEZE */
+ entry->jse_thaw_count = 0;
+#endif /* CONFIG_FREEZE */
+
+ proc_coalitionids(p, cids);
+ entry->jse_coalition_jetsam_id = cids[COALITION_TYPE_JETSAM];
+
+ return TRUE;
+}
+
+static void
+memorystatus_init_snapshot_vmstats(memorystatus_jetsam_snapshot_t *snapshot)
+{
+ kern_return_t kr = KERN_SUCCESS;
+ mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
+ vm_statistics64_data_t vm_stat;
+
+ if ((kr = host_statistics64(host_self(), HOST_VM_INFO64, (host_info64_t)&vm_stat, &count)) != KERN_SUCCESS) {
+ printf("memorystatus_init_jetsam_snapshot_stats: host_statistics64 failed with %d\n", kr);
+ memset(&snapshot->stats, 0, sizeof(snapshot->stats));
+ } else {
+ snapshot->stats.free_pages = vm_stat.free_count;
+ snapshot->stats.active_pages = vm_stat.active_count;
+ snapshot->stats.inactive_pages = vm_stat.inactive_count;
+ snapshot->stats.throttled_pages = vm_stat.throttled_count;
+ snapshot->stats.purgeable_pages = vm_stat.purgeable_count;
+ snapshot->stats.wired_pages = vm_stat.wire_count;
+
+ snapshot->stats.speculative_pages = vm_stat.speculative_count;
+ snapshot->stats.filebacked_pages = vm_stat.external_page_count;
+ snapshot->stats.anonymous_pages = vm_stat.internal_page_count;
+ snapshot->stats.compressions = vm_stat.compressions;
+ snapshot->stats.decompressions = vm_stat.decompressions;
+ snapshot->stats.compressor_pages = vm_stat.compressor_page_count;
+ snapshot->stats.total_uncompressed_pages_in_compressor = vm_stat.total_uncompressed_pages_in_compressor;
+ }
+
+ get_zone_map_size(&snapshot->stats.zone_map_size, &snapshot->stats.zone_map_capacity);
+ get_largest_zone_info(snapshot->stats.largest_zone_name, sizeof(snapshot->stats.largest_zone_name),
+ &snapshot->stats.largest_zone_size);
+}
+
+/*
+ * Collect vm statistics at boot.
+ * Called only once (see kern_exec.c)
+ * Data can be consumed at any time.
+ */
+void
+memorystatus_init_at_boot_snapshot()
+{
+ memorystatus_init_snapshot_vmstats(&memorystatus_at_boot_snapshot);
+ memorystatus_at_boot_snapshot.entry_count = 0;
+ memorystatus_at_boot_snapshot.notification_time = 0; /* updated when consumed */
+ memorystatus_at_boot_snapshot.snapshot_time = mach_absolute_time();
+}
+
+static void
+memorystatus_init_jetsam_snapshot_locked(memorystatus_jetsam_snapshot_t *od_snapshot, uint32_t ods_list_count )
+{
+ proc_t p, next_p;
+ unsigned int b = 0, i = 0;
+
+ memorystatus_jetsam_snapshot_t *snapshot = NULL;
+ memorystatus_jetsam_snapshot_entry_t *snapshot_list = NULL;
+ unsigned int snapshot_max = 0;
+
+ LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED);
+
+ if (od_snapshot) {
+ /*
+ * This is an on_demand snapshot
+ */
+ snapshot = od_snapshot;
+ snapshot_list = od_snapshot->entries;
+ snapshot_max = ods_list_count;
+ } else {
+ /*
+ * This is a jetsam event snapshot
+ */
+ snapshot = memorystatus_jetsam_snapshot;
+ snapshot_list = memorystatus_jetsam_snapshot->entries;
+ snapshot_max = memorystatus_jetsam_snapshot_max;
+ }
+
+ /*
+ * Init the snapshot header information
+ */
+ memorystatus_init_snapshot_vmstats(snapshot);
+ snapshot->snapshot_time = mach_absolute_time();
+ snapshot->notification_time = 0;
+ snapshot->js_gencount = 0;
+
+ next_p = memorystatus_get_first_proc_locked(&b, TRUE);
+ while (next_p) {
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&b, p, TRUE);
+
+ if (FALSE == memorystatus_init_jetsam_snapshot_entry_locked(p, &snapshot_list[i], snapshot->js_gencount)) {
+ continue;
+ }
+
+ MEMORYSTATUS_DEBUG(0, "jetsam snapshot pid %d, uuid = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ p->p_pid,
+ p->p_uuid[0], p->p_uuid[1], p->p_uuid[2], p->p_uuid[3], p->p_uuid[4], p->p_uuid[5], p->p_uuid[6], p->p_uuid[7],
+ p->p_uuid[8], p->p_uuid[9], p->p_uuid[10], p->p_uuid[11], p->p_uuid[12], p->p_uuid[13], p->p_uuid[14], p->p_uuid[15]);
+
+ if (++i == snapshot_max) {
+ break;
+ }
+ }
+
+ snapshot->entry_count = i;
+
+ if (!od_snapshot) {
+ /* update the system buffer count */
+ memorystatus_jetsam_snapshot_count = i;
+ }
+}
+
+#if DEVELOPMENT || DEBUG
+
+#if CONFIG_JETSAM
+static int
+memorystatus_cmd_set_panic_bits(user_addr_t buffer, uint32_t buffer_size)
+{
+ int ret;
+ memorystatus_jetsam_panic_options_t debug;
+
+ if (buffer_size != sizeof(memorystatus_jetsam_panic_options_t)) {
+ return EINVAL;
+ }
+
+ ret = copyin(buffer, &debug, buffer_size);
+ if (ret) {
+ return ret;
+ }
+
+ /* Panic bits match kMemorystatusKilled* enum */
+ memorystatus_jetsam_panic_debug = (memorystatus_jetsam_panic_debug & ~debug.mask) | (debug.data & debug.mask);
+
+ /* Copyout new value */
+ debug.data = memorystatus_jetsam_panic_debug;
+ ret = copyout(&debug, buffer, sizeof(memorystatus_jetsam_panic_options_t));
+
+ return ret;
+}
+#endif /* CONFIG_JETSAM */
+
+/*
+ * Triggers a sort_order on a specified jetsam priority band.
+ * This is for testing only, used to force a path through the sort
+ * function.
+ */
+static int
+memorystatus_cmd_test_jetsam_sort(int priority, int sort_order)
+{
+ int error = 0;
+
+ unsigned int bucket_index = 0;
+
+ if (priority == -1) {
+ /* Use as shorthand for default priority */
+ bucket_index = JETSAM_PRIORITY_DEFAULT;
+ } else {
+ bucket_index = (unsigned int)priority;
+ }
+
+ error = memorystatus_sort_bucket(bucket_index, sort_order);
+
+ return error;
+}
+
+#endif /* DEVELOPMENT || DEBUG */
+
+/*
+ * Prepare the process to be killed (set state, update snapshot) and kill it.
+ */
+static uint64_t memorystatus_purge_before_jetsam_success = 0;
+
+static boolean_t
+memorystatus_kill_proc(proc_t p, uint32_t cause, os_reason_t jetsam_reason, boolean_t *killed)
+{
+ pid_t aPid = 0;
+ uint32_t aPid_ep = 0;
+
+ uint64_t killtime = 0;
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+ uint32_t tv_msec;
+ boolean_t retval = FALSE;
+ uint64_t num_pages_purged = 0;
+
+ aPid = p->p_pid;
+ aPid_ep = p->p_memstat_effectivepriority;
+
+ if (cause != kMemorystatusKilledVnodes && cause != kMemorystatusKilledZoneMapExhaustion) {
+ /*
+ * Genuine memory pressure and not other (vnode/zone) resource exhaustion.
+ */
+ boolean_t success = FALSE;
+
+ networking_memstatus_callout(p, cause);
+ num_pages_purged = vm_purgeable_purge_task_owned(p->task);
+
+ if (num_pages_purged) {
+ /*
+ * We actually purged something and so let's
+ * check if we need to continue with the kill.
+ */
+ if (cause == kMemorystatusKilledHiwat) {
+ uint64_t footprint_in_bytes = get_task_phys_footprint(p->task);
+ uint64_t memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */
+ success = (footprint_in_bytes <= memlimit_in_bytes);
+ } else {
+ success = (memorystatus_avail_pages_below_pressure() == FALSE);
+ }
+
+ if (success) {
+ memorystatus_purge_before_jetsam_success++;
+
+ os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: purged %llu pages from pid %d [%s] and avoided %s\n",
+ num_pages_purged, aPid, (*p->p_name ? p->p_name : "unknown"), memorystatus_kill_cause_name[cause]);
+
+ *killed = FALSE;
+
+ return TRUE;
+ }
+ }
+ }
+
+#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
+ MEMORYSTATUS_DEBUG(1, "jetsam: %s pid %d [%s] - %lld Mb > 1 (%d Mb)\n",
+ (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing",
+ aPid, (*p->p_name ? p->p_name : "unknown"),
+ (footprint_in_bytes / (1024ULL * 1024ULL)), /* converted bytes to MB */
+ p->p_memstat_memlimit);
+#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
+
+ killtime = mach_absolute_time();
+ absolutetime_to_microtime(killtime, &tv_sec, &tv_usec);
+ tv_msec = tv_usec / 1000;
+
+#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
+ if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) {
+ if (cause == kMemorystatusKilledHiwat) {
+ MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] for diagnosis - memorystatus_available_pages: %d\n",
+ aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages);
+ } else {
+ int activeProcess = p->p_memstat_state & P_MEMSTAT_FOREGROUND;
+ if (activeProcess) {
+ MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] (active) for diagnosis - memorystatus_available_pages: %d\n",
+ aPid, (*p->p_name ? p->p_name: "(unknown)"), memorystatus_available_pages);
+
+ if (memorystatus_jetsam_policy & kPolicyDiagnoseFirst) {
+ jetsam_diagnostic_suspended_one_active_proc = 1;
+ printf("jetsam: returning after suspending first active proc - %d\n", aPid);
+ }
+ }
+ }
+
+ proc_list_lock();
+ /* This diagnostic code is going away soon. Ignore the kMemorystatusInvalid cause here. */
+ memorystatus_update_jetsam_snapshot_entry_locked(p, kMemorystatusInvalid, killtime);
+ proc_list_unlock();
+
+ p->p_memstat_state |= P_MEMSTAT_DIAG_SUSPENDED;
+
+ if (p) {
+ task_suspend(p->task);
+ *killed = TRUE;
+ }
+ } else
+#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
+ {
+ proc_list_lock();
+ memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime);
+ proc_list_unlock();
+
+ char kill_reason_string[128];
+
+ if (cause == kMemorystatusKilledHiwat) {
+ strlcpy(kill_reason_string, "killing_highwater_process", 128);
+ } else {
+ if (aPid_ep == JETSAM_PRIORITY_IDLE) {
+ strlcpy(kill_reason_string, "killing_idle_process", 128);
+ } else {
+ strlcpy(kill_reason_string, "killing_top_process", 128);
+ }
+ }
+
+ os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: %s pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n",
+ (unsigned long)tv_sec, tv_msec, kill_reason_string,
+ aPid, (*p->p_name ? p->p_name : "unknown"),
+ memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages);
+
+ /*
+ * memorystatus_do_kill drops a reference, so take another one so we can
+ * continue to use this exit reason even after memorystatus_do_kill()
+ * returns
+ */
+ os_reason_ref(jetsam_reason);
+
+ retval = memorystatus_do_kill(p, cause, jetsam_reason);
+
+ *killed = retval;
+ }
+
+ return retval;
+}
+
+/*
+ * Jetsam the first process in the queue.
+ */
+static boolean_t
+memorystatus_kill_top_process(boolean_t any, boolean_t sort_flag, uint32_t cause, os_reason_t jetsam_reason,
+ int32_t *priority, uint32_t *errors)
+{
+ pid_t aPid;
+ proc_t p = PROC_NULL, next_p = PROC_NULL;
+ boolean_t new_snapshot = FALSE, force_new_snapshot = FALSE, killed = FALSE, freed_mem = FALSE;
+ unsigned int i = 0;
+ uint32_t aPid_ep;
+ int32_t local_max_kill_prio = JETSAM_PRIORITY_IDLE;
+
+#ifndef CONFIG_FREEZE
+#pragma unused(any)
+#endif
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START,
+ memorystatus_available_pages, 0, 0, 0, 0);
+
+
+#if CONFIG_JETSAM
+ if (sort_flag == TRUE) {
+ (void)memorystatus_sort_bucket(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_DEFAULT);
+ }
+
+ local_max_kill_prio = max_kill_priority;
+
+ force_new_snapshot = FALSE;
+
+#else /* CONFIG_JETSAM */
+
+ if (sort_flag == TRUE) {
+ (void)memorystatus_sort_bucket(JETSAM_PRIORITY_IDLE, JETSAM_SORT_DEFAULT);
+ }
+
+ /*
+ * On macos, we currently only have 2 reasons to be here:
+ *
+ * kMemorystatusKilledZoneMapExhaustion
+ * AND
+ * kMemorystatusKilledVMCompressorSpaceShortage
+ *
+ * If we are here because of kMemorystatusKilledZoneMapExhaustion, we will consider
+ * any and all processes as eligible kill candidates since we need to avoid a panic.
+ *
+ * Since this function can be called async. it is harder to toggle the max_kill_priority
+ * value before and after a call. And so we use this local variable to set the upper band
+ * on the eligible kill bands.
+ */
+ if (cause == kMemorystatusKilledZoneMapExhaustion) {
+ local_max_kill_prio = JETSAM_PRIORITY_MAX;
+ } else {
+ local_max_kill_prio = max_kill_priority;
+ }
+
+ /*
+ * And, because we are here under extreme circumstances, we force a snapshot even for
+ * IDLE kills.
+ */
+ force_new_snapshot = TRUE;
+
+#endif /* CONFIG_JETSAM */
+
+ proc_list_lock();
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (next_p && (next_p->p_memstat_effectivepriority <= local_max_kill_prio)) {
+#if DEVELOPMENT || DEBUG
+ int procSuspendedForDiagnosis;
+#endif /* DEVELOPMENT || DEBUG */
+
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+
+#if DEVELOPMENT || DEBUG
+ procSuspendedForDiagnosis = p->p_memstat_state & P_MEMSTAT_DIAG_SUSPENDED;
+#endif /* DEVELOPMENT || DEBUG */
+
+ aPid = p->p_pid;
+ aPid_ep = p->p_memstat_effectivepriority;
+
+ if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) {
+ continue; /* with lock held */
+ }
+
+#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
+ if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && procSuspendedForDiagnosis) {
+ printf("jetsam: continuing after ignoring proc suspended already for diagnosis - %d\n", aPid);
+ continue;
+ }
+#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
+
+ if (cause == kMemorystatusKilledVnodes) {
+ /*
+ * If the system runs out of vnodes, we systematically jetsam
+ * processes in hopes of stumbling onto a vnode gain that helps
+ * the system recover. The process that happens to trigger
+ * this path has no known relationship to the vnode shortage.
+ * Deadlock avoidance: attempt to safeguard the caller.
+ */
+
+ if (p == current_proc()) {
+ /* do not jetsam the current process */
+ continue;
+ }
+ }
+
+#if CONFIG_FREEZE
+ boolean_t skip;
+ boolean_t reclaim_proc = !(p->p_memstat_state & P_MEMSTAT_LOCKED);
+ if (any || reclaim_proc) {
+ skip = FALSE;
+ } else {
+ skip = TRUE;
+ }
+
+ if (skip) {
+ continue;
+ } else
+#endif
+ {
+ if (proc_ref_locked(p) == p) {
+ /*
+ * Mark as terminated so that if exit1() indicates success, but the process (for example)
+ * is blocked in task_exception_notify(), it'll be skipped if encountered again - see
+ * <rdar://problem/13553476>. This is cheaper than examining P_LEXIT, which requires the
+ * acquisition of the proc lock.
+ */
+ p->p_memstat_state |= P_MEMSTAT_TERMINATED;
+ } else {
+ /*
+ * We need to restart the search again because
+ * proc_ref_locked _can_ drop the proc_list lock
+ * and we could have lost our stored next_p via
+ * an exit() on another core.
+ */
+ i = 0;
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ continue;
+ }
+
+ /*
+ * Capture a snapshot if none exists and:
+ * - we are forcing a new snapshot creation, either because:
+ * - on a particular platform we need these snapshots every time, OR
+ * - a boot-arg/embedded device tree property has been set.
+ * - priority was not requested (this is something other than an ambient kill)
+ * - the priority was requested *and* the targeted process is not at idle priority
+ */
+ if ((memorystatus_jetsam_snapshot_count == 0) &&
+ (force_new_snapshot || memorystatus_idle_snapshot || ((!priority) || (priority && (aPid_ep != JETSAM_PRIORITY_IDLE))))) {
+ memorystatus_init_jetsam_snapshot_locked(NULL, 0);
+ new_snapshot = TRUE;
+ }
+
+ proc_list_unlock();
+
+ freed_mem = memorystatus_kill_proc(p, cause, jetsam_reason, &killed); /* purged and/or killed 'p' */
+ /* Success? */
+ if (freed_mem) {
+ if (killed) {
+ if (priority) {
+ *priority = aPid_ep;
+ }
+ } else {
+ /* purged */
+ proc_list_lock();
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ proc_list_unlock();
+ }
+ proc_rele(p);
+ goto exit;
+ }
+
+ /*
+ * Failure - first unwind the state,
+ * then fall through to restart the search.
+ */
+ proc_list_lock();
+ proc_rele_locked(p);
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ p->p_memstat_state |= P_MEMSTAT_ERROR;
+ *errors += 1;
+
+ i = 0;
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ }
+ }
+
+ proc_list_unlock();
+
+exit:
+ os_reason_free(jetsam_reason);
+
+ /* Clear snapshot if freshly captured and no target was found */
+ if (new_snapshot && !killed) {
+ proc_list_lock();
+ memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0;
+ proc_list_unlock();
+ }
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END,
+ memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0);
+
+ return killed;
+}
+
+/*
+ * Jetsam aggressively
+ */
+static boolean_t
+memorystatus_kill_top_process_aggressive(uint32_t cause, int aggr_count,
+ int32_t priority_max, uint32_t *errors)
+{
+ pid_t aPid;
+ proc_t p = PROC_NULL, next_p = PROC_NULL;
+ boolean_t new_snapshot = FALSE, killed = FALSE;
+ int kill_count = 0;
+ unsigned int i = 0;
+ int32_t aPid_ep = 0;
+ unsigned int memorystatus_level_snapshot = 0;
+ uint64_t killtime = 0;
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+ uint32_t tv_msec;
+ os_reason_t jetsam_reason = OS_REASON_NULL;
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START,
+ memorystatus_available_pages, priority_max, 0, 0, 0);
+
+ memorystatus_sort_bucket(JETSAM_PRIORITY_FOREGROUND, JETSAM_SORT_DEFAULT);
+
+ jetsam_reason = os_reason_create(OS_REASON_JETSAM, cause);
+ if (jetsam_reason == OS_REASON_NULL) {
+ printf("memorystatus_kill_top_process_aggressive: failed to allocate exit reason\n");
+ }
+
+ proc_list_lock();
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (next_p) {
+#if DEVELOPMENT || DEBUG
+ int activeProcess;
+ int procSuspendedForDiagnosis;
+#endif /* DEVELOPMENT || DEBUG */
+
+ if (((next_p->p_listflag & P_LIST_EXITED) != 0) ||
+ ((unsigned int)(next_p->p_memstat_effectivepriority) != i)) {
+ /*
+ * We have raced with next_p running on another core.
+ * It may be exiting or it may have moved to a different
+ * jetsam priority band. This means we have lost our
+ * place in line while traversing the jetsam list. We
+ * attempt to recover by rewinding to the beginning of the band
+ * we were already traversing. By doing this, we do not guarantee
+ * that no process escapes this aggressive march, but we can make
+ * skipping an entire range of processes less likely. (PR-21069019)
+ */
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus: aggressive%d: rewinding band %d, %s(%d) moved or exiting.\n",
+ aggr_count, i, (*next_p->p_name ? next_p->p_name : "unknown"), next_p->p_pid);
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ continue;
+ }
+
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+
+ if (p->p_memstat_effectivepriority > priority_max) {
+ /*
+ * Bail out of this killing spree if we have
+ * reached beyond the priority_max jetsam band.
+ * That is, we kill up to and through the
+ * priority_max jetsam band.
+ */
+ proc_list_unlock();
+ goto exit;
+ }
+
+#if DEVELOPMENT || DEBUG
+ activeProcess = p->p_memstat_state & P_MEMSTAT_FOREGROUND;
+ procSuspendedForDiagnosis = p->p_memstat_state & P_MEMSTAT_DIAG_SUSPENDED;
+#endif /* DEVELOPMENT || DEBUG */
+
+ aPid = p->p_pid;
+ aPid_ep = p->p_memstat_effectivepriority;
+
+ if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) {
+ continue;
+ }
+
+#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
+ if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && procSuspendedForDiagnosis) {
+ printf("jetsam: continuing after ignoring proc suspended already for diagnosis - %d\n", aPid);
+ continue;
+ }
+#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
+
+ /*
+ * Capture a snapshot if none exists.
+ */
+ if (memorystatus_jetsam_snapshot_count == 0) {
+ memorystatus_init_jetsam_snapshot_locked(NULL, 0);
+ new_snapshot = TRUE;
+ }
+
+ /*
+ * Mark as terminated so that if exit1() indicates success, but the process (for example)
+ * is blocked in task_exception_notify(), it'll be skipped if encountered again - see
+ * <rdar://problem/13553476>. This is cheaper than examining P_LEXIT, which requires the
+ * acquisition of the proc lock.
+ */
+ p->p_memstat_state |= P_MEMSTAT_TERMINATED;
+
+ killtime = mach_absolute_time();
+ absolutetime_to_microtime(killtime, &tv_sec, &tv_usec);
+ tv_msec = tv_usec / 1000;
+
+ /* Shift queue, update stats */
+ memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime);
+
+ /*
+ * In order to kill the target process, we will drop the proc_list_lock.
+ * To guaranteee that p and next_p don't disappear out from under the lock,
+ * we must take a ref on both.
+ * If we cannot get a reference, then it's likely we've raced with
+ * that process exiting on another core.
+ */
+ if (proc_ref_locked(p) == p) {
+ if (next_p) {
+ while (next_p && (proc_ref_locked(next_p) != next_p)) {
+ proc_t temp_p;
+
+ /*
+ * We must have raced with next_p exiting on another core.
+ * Recover by getting the next eligible process in the band.
+ */
+
+ MEMORYSTATUS_DEBUG(1, "memorystatus: aggressive%d: skipping %d [%s] (exiting?)\n",
+ aggr_count, next_p->p_pid, (*next_p->p_name ? next_p->p_name : "(unknown)"));
+
+ temp_p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&i, temp_p, TRUE);
+ }
+ }
+ proc_list_unlock();
+
+ printf("%lu.%03d memorystatus: %s%d pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n",
+ (unsigned long)tv_sec, tv_msec,
+ ((aPid_ep == JETSAM_PRIORITY_IDLE) ? "killing_idle_process_aggressive" : "killing_top_process_aggressive"),
+ aggr_count, aPid, (*p->p_name ? p->p_name : "unknown"),
+ memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages);
+
+ memorystatus_level_snapshot = memorystatus_level;
+
+ /*
+ * memorystatus_do_kill() drops a reference, so take another one so we can
+ * continue to use this exit reason even after memorystatus_do_kill()
+ * returns.
+ */
+ os_reason_ref(jetsam_reason);
+ killed = memorystatus_do_kill(p, cause, jetsam_reason);
+
+ /* Success? */
+ if (killed) {
+ proc_rele(p);
+ kill_count++;
+ p = NULL;
+ killed = FALSE;
+
+ /*
+ * Continue the killing spree.
+ */
+ proc_list_lock();
+ if (next_p) {
+ proc_rele_locked(next_p);
+ }
+
+ if (aPid_ep == JETSAM_PRIORITY_FOREGROUND && memorystatus_aggressive_jetsam_lenient == TRUE) {
+ if (memorystatus_level > memorystatus_level_snapshot && ((memorystatus_level - memorystatus_level_snapshot) >= AGGRESSIVE_JETSAM_LENIENT_MODE_THRESHOLD)) {
+#if DEVELOPMENT || DEBUG
+ printf("Disabling Lenient mode after one-time deployment.\n");
+#endif /* DEVELOPMENT || DEBUG */
+ memorystatus_aggressive_jetsam_lenient = FALSE;
+ break;
+ }
+ }
+
+ continue;
+ }
+
+ /*
+ * Failure - first unwind the state,
+ * then fall through to restart the search.
+ */
+ proc_list_lock();
+ proc_rele_locked(p);
+ if (next_p) {
+ proc_rele_locked(next_p);
+ }
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ p->p_memstat_state |= P_MEMSTAT_ERROR;
+ *errors += 1;
+ p = NULL;
+ }
+
+ /*
+ * Failure - restart the search at the beginning of
+ * the band we were already traversing.
+ *
+ * We might have raced with "p" exiting on another core, resulting in no
+ * ref on "p". Or, we may have failed to kill "p".
+ *
+ * Either way, we fall thru to here, leaving the proc in the
+ * P_MEMSTAT_TERMINATED or P_MEMSTAT_ERROR state.
+ *
+ * And, we hold the the proc_list_lock at this point.
+ */
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ }
+
+ proc_list_unlock();
+
+exit:
+ os_reason_free(jetsam_reason);
+
+ /* Clear snapshot if freshly captured and no target was found */
+ if (new_snapshot && (kill_count == 0)) {
+ proc_list_lock();
+ memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0;
+ proc_list_unlock();
+ }
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END,
+ memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0);
+
+ if (kill_count > 0) {
+ return TRUE;
+ } else {
+ return FALSE;
+ }
+}
+
+static boolean_t
+memorystatus_kill_hiwat_proc(uint32_t *errors, boolean_t *purged)
+{
+ pid_t aPid = 0;
+ proc_t p = PROC_NULL, next_p = PROC_NULL;
+ boolean_t new_snapshot = FALSE, killed = FALSE, freed_mem = FALSE;
+ unsigned int i = 0;
+ uint32_t aPid_ep;
+ os_reason_t jetsam_reason = OS_REASON_NULL;
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_START,
+ memorystatus_available_pages, 0, 0, 0, 0);
+
+ jetsam_reason = os_reason_create(OS_REASON_JETSAM, JETSAM_REASON_MEMORY_HIGHWATER);
+ if (jetsam_reason == OS_REASON_NULL) {
+ printf("memorystatus_kill_hiwat_proc: failed to allocate exit reason\n");
+ }
+
+ proc_list_lock();
+
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ while (next_p) {
+ uint64_t footprint_in_bytes = 0;
+ uint64_t memlimit_in_bytes = 0;
+ boolean_t skip = 0;
+
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&i, p, TRUE);
+
+ aPid = p->p_pid;
+ aPid_ep = p->p_memstat_effectivepriority;
+
+ if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) {
+ continue;
+ }
+
+ /* skip if no limit set */
+ if (p->p_memstat_memlimit <= 0) {
+ continue;
+ }
+
+ footprint_in_bytes = get_task_phys_footprint(p->task);
+ memlimit_in_bytes = (((uint64_t)p->p_memstat_memlimit) * 1024ULL * 1024ULL); /* convert MB to bytes */
+ skip = (footprint_in_bytes <= memlimit_in_bytes);
+
+#if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
+ if (!skip && (memorystatus_jetsam_policy & kPolicyDiagnoseActive)) {
+ if (p->p_memstat_state & P_MEMSTAT_DIAG_SUSPENDED) {
+ continue;
+ }
+ }
+#endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
+
+#if CONFIG_FREEZE
+ if (!skip) {
+ if (p->p_memstat_state & P_MEMSTAT_LOCKED) {
+ skip = TRUE;
+ } else {
+ skip = FALSE;
+ }
+ }
+#endif
+
+ if (skip) {
+ continue;
+ } else {
+ if (memorystatus_jetsam_snapshot_count == 0) {
+ memorystatus_init_jetsam_snapshot_locked(NULL, 0);
+ new_snapshot = TRUE;
+ }
+
+ if (proc_ref_locked(p) == p) {
+ /*
+ * Mark as terminated so that if exit1() indicates success, but the process (for example)
+ * is blocked in task_exception_notify(), it'll be skipped if encountered again - see
+ * <rdar://problem/13553476>. This is cheaper than examining P_LEXIT, which requires the
+ * acquisition of the proc lock.
+ */
+ p->p_memstat_state |= P_MEMSTAT_TERMINATED;
+
+ proc_list_unlock();
+ } else {
+ /*
+ * We need to restart the search again because
+ * proc_ref_locked _can_ drop the proc_list lock
+ * and we could have lost our stored next_p via
+ * an exit() on another core.
+ */
+ i = 0;
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ continue;
+ }
+
+ freed_mem = memorystatus_kill_proc(p, kMemorystatusKilledHiwat, jetsam_reason, &killed); /* purged and/or killed 'p' */
+
+ /* Success? */
+ if (freed_mem) {
+ if (killed == FALSE) {
+ /* purged 'p'..don't reset HWM candidate count */
+ *purged = TRUE;
+
+ proc_list_lock();
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ proc_list_unlock();
+ }
+ proc_rele(p);
+ goto exit;
+ }
+ /*
+ * Failure - first unwind the state,
+ * then fall through to restart the search.
+ */
+ proc_list_lock();
+ proc_rele_locked(p);
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ p->p_memstat_state |= P_MEMSTAT_ERROR;
+ *errors += 1;
+
+ i = 0;
+ next_p = memorystatus_get_first_proc_locked(&i, TRUE);
+ }
+ }
+
+ proc_list_unlock();
+
+exit:
+ os_reason_free(jetsam_reason);
+
+ /* Clear snapshot if freshly captured and no target was found */
+ if (new_snapshot && !killed) {
+ proc_list_lock();
+ memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0;
+ proc_list_unlock();
+ }
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT) | DBG_FUNC_END,
+ memorystatus_available_pages, killed ? aPid : 0, 0, 0, 0);
+
+ return killed;
+}
+
+/*
+ * Jetsam a process pinned in the elevated band.
+ *
+ * Return: true -- at least one pinned process was jetsammed
+ * false -- no pinned process was jetsammed
+ */
+static boolean_t
+memorystatus_kill_elevated_process(uint32_t cause, os_reason_t jetsam_reason, unsigned int band, int aggr_count, uint32_t *errors)
+{
+ pid_t aPid = 0;
+ proc_t p = PROC_NULL, next_p = PROC_NULL;
+ boolean_t new_snapshot = FALSE, killed = FALSE;
+ int kill_count = 0;
+ uint32_t aPid_ep;
+ uint64_t killtime = 0;
+ clock_sec_t tv_sec;
+ clock_usec_t tv_usec;
+ uint32_t tv_msec;
+
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_START,
+ memorystatus_available_pages, 0, 0, 0, 0);
+
+#if CONFIG_FREEZE
+ boolean_t consider_frozen_only = FALSE;
+
+ if (band == (unsigned int) memorystatus_freeze_jetsam_band) {
+ consider_frozen_only = TRUE;
+ }
+#endif /* CONFIG_FREEZE */
+
+ proc_list_lock();
+
+ next_p = memorystatus_get_first_proc_locked(&band, FALSE);
+ while (next_p) {
+ p = next_p;
+ next_p = memorystatus_get_next_proc_locked(&band, p, FALSE);
+
+ aPid = p->p_pid;
+ aPid_ep = p->p_memstat_effectivepriority;
+
+ /*
+ * Only pick a process pinned in this elevated band
+ */
+ if (!(p->p_memstat_state & P_MEMSTAT_USE_ELEVATED_INACTIVE_BAND)) {
+ continue;
+ }
+
+ if (p->p_memstat_state & (P_MEMSTAT_ERROR | P_MEMSTAT_TERMINATED)) {
+ continue;
+ }
+
+#if CONFIG_FREEZE
+ if (consider_frozen_only && !(p->p_memstat_state & P_MEMSTAT_FROZEN)) {
+ continue;
+ }
+
+ if (p->p_memstat_state & P_MEMSTAT_LOCKED) {
+ continue;
+ }
+#endif /* CONFIG_FREEZE */
+
+#if DEVELOPMENT || DEBUG
+ MEMORYSTATUS_DEBUG(1, "jetsam: elevated%d process pid %d [%s] - memorystatus_available_pages: %d\n",
+ aggr_count,
+ aPid, (*p->p_name ? p->p_name : "unknown"),
+ memorystatus_available_pages);
+#endif /* DEVELOPMENT || DEBUG */
+
+ if (memorystatus_jetsam_snapshot_count == 0) {
+ memorystatus_init_jetsam_snapshot_locked(NULL, 0);
+ new_snapshot = TRUE;
+ }
+
+ p->p_memstat_state |= P_MEMSTAT_TERMINATED;
+
+ killtime = mach_absolute_time();
+ absolutetime_to_microtime(killtime, &tv_sec, &tv_usec);
+ tv_msec = tv_usec / 1000;
+
+ memorystatus_update_jetsam_snapshot_entry_locked(p, cause, killtime);
+
+ if (proc_ref_locked(p) == p) {
+ proc_list_unlock();
+
+ os_log_with_startup_serial(OS_LOG_DEFAULT, "%lu.%03d memorystatus: killing_top_process_elevated%d pid %d [%s] (%s %d) - memorystatus_available_pages: %llu\n",
+ (unsigned long)tv_sec, tv_msec,
+ aggr_count,
+ aPid, (*p->p_name ? p->p_name : "unknown"),
+ memorystatus_kill_cause_name[cause], aPid_ep, (uint64_t)memorystatus_available_pages);
+
+ /*
+ * memorystatus_do_kill drops a reference, so take another one so we can
+ * continue to use this exit reason even after memorystatus_do_kill()
+ * returns
+ */
+ os_reason_ref(jetsam_reason);
+ killed = memorystatus_do_kill(p, cause, jetsam_reason);
+
+ /* Success? */
+ if (killed) {
+ proc_rele(p);
+ kill_count++;
+ goto exit;
+ }
+
+ /*
+ * Failure - first unwind the state,
+ * then fall through to restart the search.
+ */
+ proc_list_lock();
+ proc_rele_locked(p);
+ p->p_memstat_state &= ~P_MEMSTAT_TERMINATED;
+ p->p_memstat_state |= P_MEMSTAT_ERROR;
+ *errors += 1;
+ }
+
+ /*
+ * Failure - restart the search.
+ *
+ * We might have raced with "p" exiting on another core, resulting in no
+ * ref on "p". Or, we may have failed to kill "p".
+ *
+ * Either way, we fall thru to here, leaving the proc in the
+ * P_MEMSTAT_TERMINATED state or P_MEMSTAT_ERROR state.
+ *
+ * And, we hold the the proc_list_lock at this point.
+ */
+
+ next_p = memorystatus_get_first_proc_locked(&band, FALSE);
+ }
+
+ proc_list_unlock();
+
+exit:
+ os_reason_free(jetsam_reason);
+
+ /* Clear snapshot if freshly captured and no target was found */
+ if (new_snapshot && (kill_count == 0)) {
+ proc_list_lock();
+ memorystatus_jetsam_snapshot->entry_count = memorystatus_jetsam_snapshot_count = 0;
+ proc_list_unlock();
+ }
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM) | DBG_FUNC_END,
+ memorystatus_available_pages, killed ? aPid : 0, kill_count, 0, 0);
+
+ return killed;