+#if CONFIG_FREEZE
+
+__private_extern__ void
+kern_hibernation_init(void)
+{
+ hibernation_lck_attr = lck_attr_alloc_init();
+ hibernation_lck_grp_attr = lck_grp_attr_alloc_init();
+ hibernation_lck_grp = lck_grp_alloc_init("hibernation", hibernation_lck_grp_attr);
+ hibernation_mlock = lck_mtx_alloc_init(hibernation_lck_grp, hibernation_lck_attr);
+
+ RB_INIT(&hibernation_tree_head);
+
+ (void)kernel_thread(kernel_task, kern_hibernation_thread);
+}
+
+static inline boolean_t
+kern_hibernation_can_hibernate_processes(void)
+{
+ boolean_t ret;
+
+ lck_mtx_lock_spin(hibernation_mlock);
+ ret = (kern_memorystatus_suspended_count - kern_memorystatus_hibernated_count) >
+ kern_memorystatus_hibernation_suspended_minimum ? TRUE : FALSE;
+ lck_mtx_unlock(hibernation_mlock);
+
+ return ret;
+}
+
+static boolean_t
+kern_hibernation_can_hibernate(void)
+{
+ /* Only hibernate if we're sufficiently low on memory; this holds off hibernation right after boot,
+ and is generally is a no-op once we've reached steady state. */
+ if (kern_memorystatus_level > kern_memorystatus_level_hibernate) {
+ return FALSE;
+ }
+
+ /* Check minimum suspended process threshold. */
+ if (!kern_hibernation_can_hibernate_processes()) {
+ return FALSE;
+ }
+
+ /* Is swap running low? */
+ if (kern_memorystatus_low_swap_pages) {
+ /* If there's been no movement in free swap pages since we last attempted hibernation, return. */
+ if (default_pager_swap_pages_free() <= kern_memorystatus_low_swap_pages) {
+ return FALSE;
+ }
+
+ /* Pages have been freed, so we can retry. */
+ kern_memorystatus_low_swap_pages = 0;
+ }
+
+ /* OK */
+ return TRUE;
+}
+
+static void
+kern_hibernation_add_node(hibernation_node *node)
+{
+ lck_mtx_lock_spin(hibernation_mlock);
+
+ RB_INSERT(hibernation_tree, &hibernation_tree_head, node);
+ kern_memorystatus_suspended_count++;
+
+ lck_mtx_unlock(hibernation_mlock);
+}
+
+/* Returns with the hibernation lock taken */
+static hibernation_node *
+kern_hibernation_get_node(pid_t pid)
+{
+ hibernation_node sought, *found;
+ sought.pid = pid;
+ lck_mtx_lock_spin(hibernation_mlock);
+ found = RB_FIND(hibernation_tree, &hibernation_tree_head, &sought);
+ if (!found) {
+ lck_mtx_unlock(hibernation_mlock);
+ }
+ return found;
+}
+
+static void
+kern_hibernation_release_node(hibernation_node *node)
+{
+#pragma unused(node)
+ lck_mtx_unlock(hibernation_mlock);
+}
+
+static void
+kern_hibernation_free_node(hibernation_node *node, boolean_t unlock)
+{
+ /* make sure we're called with the hibernation_mlock held */
+ lck_mtx_assert(hibernation_mlock, LCK_MTX_ASSERT_OWNED);
+
+ if (node->state & (kProcessHibernated | kProcessIgnored)) {
+ kern_memorystatus_hibernated_count--;
+ }
+
+ kern_memorystatus_suspended_count--;
+
+ RB_REMOVE(hibernation_tree, &hibernation_tree_head, node);
+ kfree(node, sizeof(hibernation_node));
+
+ if (unlock) {
+ lck_mtx_unlock(hibernation_mlock);
+ }
+}
+
+static void
+kern_hibernation_register_pid(pid_t pid)
+{
+ hibernation_node *node;
+
+#if DEVELOPMENT || DEBUG
+ node = kern_hibernation_get_node(pid);
+ if (node) {
+ printf("kern_hibernation_register_pid: pid %d already registered!\n", pid);
+ kern_hibernation_release_node(node);
+ return;
+ }
+#endif
+
+ /* Register as a candiate for hibernation */
+ node = (hibernation_node *)kalloc(sizeof(hibernation_node));
+ if (node) {
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ mach_timespec_t ts;
+
+ memset(node, 0, sizeof(hibernation_node));
+
+ node->pid = pid;
+ node->state = kProcessSuspended;
+
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = sec;
+ ts.tv_nsec = nsec;
+
+ node->hibernation_ts = ts;
+
+ kern_hibernation_add_node(node);
+ }
+}
+
+static void
+kern_hibernation_unregister_pid(pid_t pid)
+{
+ hibernation_node *node;
+
+ node = kern_hibernation_get_node(pid);
+ if (node) {
+ kern_hibernation_free_node(node, TRUE);
+ }
+}
+
+void
+kern_hibernation_on_pid_suspend(pid_t pid)
+{
+ kern_hibernation_register_pid(pid);
+}
+
+/* If enabled, we bring all the hibernated pages back prior to resumption; otherwise, they're faulted back in on demand */
+#define THAW_ON_RESUME 1
+
+void
+kern_hibernation_on_pid_resume(pid_t pid, task_t task)
+{
+#if THAW_ON_RESUME
+ hibernation_node *node;
+ if ((node = kern_hibernation_get_node(pid))) {
+ if (node->state & kProcessHibernated) {
+ node->state |= kProcessBusy;
+ kern_hibernation_release_node(node);
+ task_thaw(task);
+ jetsam_send_hibernation_note(kJetsamFlagsThawed, pid, 0);
+ } else {
+ kern_hibernation_release_node(node);
+ }
+ }
+#else
+#pragma unused(task)
+#endif
+ kern_hibernation_unregister_pid(pid);
+}
+
+void
+kern_hibernation_on_pid_hibernate(pid_t pid)
+{
+#pragma unused(pid)
+
+ /* Wake the hibernation thread */
+ thread_wakeup((event_t)&kern_hibernation_wakeup);
+}
+
+static int
+kern_hibernation_get_process_state(pid_t pid, uint32_t *state, mach_timespec_t *ts)
+{
+ hibernation_node *found;
+ int err = ESRCH;
+
+ *state = 0;
+
+ found = kern_hibernation_get_node(pid);
+ if (found) {
+ *state = found->state;
+ if (ts) {
+ *ts = found->hibernation_ts;
+ }
+ err = 0;
+ kern_hibernation_release_node(found);
+ }
+
+ return err;
+}
+
+static int
+kern_hibernation_set_process_state(pid_t pid, uint32_t state)
+{
+ hibernation_node *found;
+ int err = ESRCH;
+
+ found = kern_hibernation_get_node(pid);
+ if (found) {
+ found->state = state;
+ err = 0;
+ kern_hibernation_release_node(found);
+ }
+
+ return err;
+}
+
+static void
+kern_hibernation_update_throttle_interval(mach_timespec_t *ts, struct throttle_interval_t *interval)
+{
+ if (CMP_MACH_TIMESPEC(ts, &interval->ts) >= 0) {
+ if (!interval->max_pageouts) {
+ interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * HIBERNATION_DAILY_PAGEOUTS_MAX) / (24 * 60)));
+ } else {
+ printf("jetsam: %d minute throttle timeout, resetting\n", interval->mins);
+ }
+ interval->ts.tv_sec = interval->mins * 60;
+ interval->ts.tv_nsec = 0;
+ ADD_MACH_TIMESPEC(&interval->ts, ts);
+ /* Since we update the throttle stats pre-hibernation, adjust for overshoot here */
+ if (interval->pageouts > interval->max_pageouts) {
+ interval->pageouts -= interval->max_pageouts;
+ } else {
+ interval->pageouts = 0;
+ }
+ interval->throttle = FALSE;
+ } else if (!interval->throttle && interval->pageouts >= interval->max_pageouts) {
+ printf("jetsam: %d minute pageout limit exceeded; enabling throttle\n", interval->mins);
+ interval->throttle = TRUE;
+ }
+#ifdef DEBUG
+ printf("jetsam: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n",
+ interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60,
+ interval->throttle ? "on" : "off");
+#endif
+}
+
+static boolean_t
+kern_hibernation_throttle_update(void)
+{
+ clock_sec_t sec;
+ clock_nsec_t nsec;
+ mach_timespec_t ts;
+ uint32_t i;
+ boolean_t throttled = FALSE;
+
+#if DEVELOPMENT || DEBUG
+ if (!kern_memorystatus_hibernation_throttle_enabled)
+ return FALSE;
+#endif
+
+ clock_get_system_nanotime(&sec, &nsec);
+ ts.tv_sec = sec;
+ ts.tv_nsec = nsec;
+
+ /* Check hibernation pageouts over multiple intervals and throttle if we've exceeded our budget.
+ *
+ * This ensures that periods of inactivity can't be used as 'credit' towards hibernation if the device has
+ * remained dormant for a long period. We do, however, allow increased thresholds for shorter intervals in
+ * order to allow for bursts of activity.
+ */
+ for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
+ kern_hibernation_update_throttle_interval(&ts, &throttle_intervals[i]);
+ if (throttle_intervals[i].throttle == TRUE)
+ throttled = TRUE;
+ }
+
+ return throttled;
+}
+
+static void
+kern_hibernation_cull(void)
+{
+ hibernation_node *node, *next;
+ lck_mtx_lock(hibernation_mlock);
+
+ for (node = RB_MIN(hibernation_tree, &hibernation_tree_head); node != NULL; node = next) {
+ proc_t p;
+
+ next = RB_NEXT(hibernation_tree, &hibernation_tree_head, node);
+
+ /* TODO: probably suboptimal, so revisit should it cause a performance issue */
+ p = proc_find(node->pid);
+ if (p) {
+ proc_rele(p);
+ } else {
+ kern_hibernation_free_node(node, FALSE);
+ }
+ }
+
+ lck_mtx_unlock(hibernation_mlock);
+}
+
+static void
+kern_hibernation_thread(void)
+{
+ if (vm_freeze_enabled) {
+ if (kern_hibernation_can_hibernate()) {
+
+ /* Cull dead processes */
+ kern_hibernation_cull();
+
+ /* Only hibernate if we've not exceeded our pageout budgets */
+ if (!kern_hibernation_throttle_update()) {
+ jetsam_hibernate_top_proc();
+ } else {
+ printf("kern_hibernation_thread: in throttle, ignoring hibernation\n");
+ kern_memorystatus_hibernation_throttle_count++; /* Throttled, update stats */
+ }
+ }
+ }
+
+ assert_wait((event_t) &kern_hibernation_wakeup, THREAD_UNINT);
+ thread_block((thread_continue_t) kern_hibernation_thread);
+}
+
+#endif /* CONFIG_FREEZE */
+