+static void vm_reset_active_list(void) {
+ /* No-op */
+}
+
+#if DEVELOPMENT || DEBUG
+
+/* Test purposes only */
+boolean_t vm_dispatch_pressure_note_to_pid(pid_t pid) {
+ struct knote *kn;
+
+ vm_pressure_klist_lock();
+
+ kn = vm_find_knote_from_pid(pid);
+ if (kn) {
+ KNOTE(&vm_pressure_klist, pid);
+ }
+
+ vm_pressure_klist_unlock();
+
+ return kn ? TRUE : FALSE;
+}
+
+#endif /* DEVELOPMENT || DEBUG */
+
+#else /* CONFIG_MEMORYSTATUS */
+
+static kern_return_t vm_try_pressure_candidates(void)
+{
+ struct knote *kn = NULL, *kn_max = NULL;
+ unsigned int resident_max = 0;
+ pid_t target_pid = -1;
+ struct klist dispatch_klist = { NULL };
+ kern_return_t kr = KERN_SUCCESS;
+ struct timeval curr_tstamp = {0, 0};
+ int elapsed_msecs = 0;
+ proc_t target_proc = PROC_NULL;
+
+ microuptime(&curr_tstamp);
+
+ SLIST_FOREACH(kn, &vm_pressure_klist, kn_selnext) {
+ struct mach_task_basic_info basic_info;
+ mach_msg_type_number_t size = MACH_TASK_BASIC_INFO_COUNT;
+ unsigned int resident_size = 0;
+ proc_t p = PROC_NULL;
+ struct task* t = TASK_NULL;
+
+ p = kn->kn_kq->kq_p;
+ proc_list_lock();
+ if (p != proc_ref_locked(p)) {
+ p = PROC_NULL;
+ proc_list_unlock();
+ continue;
+ }
+ proc_list_unlock();
+
+ t = (struct task *)(p->task);
+
+ timevalsub(&curr_tstamp, &p->vm_pressure_last_notify_tstamp);
+ elapsed_msecs = curr_tstamp.tv_sec * 1000 + curr_tstamp.tv_usec / 1000;
+
+ if (elapsed_msecs < VM_PRESSURE_NOTIFY_WAIT_PERIOD) {
+ proc_rele(p);
+ continue;
+ }
+
+ if( ( kr = task_info(t, MACH_TASK_BASIC_INFO, (task_info_t)(&basic_info), &size)) != KERN_SUCCESS ) {
+ VM_PRESSURE_DEBUG(1, "[vm_pressure] task_info for pid %d failed with %d\n", p->p_pid, kr);
+ proc_rele(p);
+ continue;
+ }
+
+ /*
+ * We don't want a small process to block large processes from
+ * being notified again. <rdar://problem/7955532>
+ */
+ resident_size = (basic_info.resident_size)/(MB);
+ if (resident_size >= VM_PRESSURE_MINIMUM_RSIZE) {
+ if (resident_size > resident_max) {
+ resident_max = resident_size;
+ kn_max = kn;
+ target_pid = p->p_pid;
+ target_proc = p;
+ }
+ } else {
+ /* There was no candidate with enough resident memory to scavenge */
+ VM_PRESSURE_DEBUG(0, "[vm_pressure] threshold failed for pid %d with %u resident...\n", p->p_pid, resident_size);
+ }
+ proc_rele(p);
+ }
+
+ if (kn_max == NULL || target_pid == -1) {
+ return KERN_FAILURE;
+ }
+
+ VM_DEBUG_EVENT(vm_pageout_scan, VM_PRESSURE_EVENT, DBG_FUNC_NONE, target_pid, resident_max, 0, 0);
+ VM_PRESSURE_DEBUG(1, "[vm_pressure] sending event to pid %d with %u resident\n", kn_max->kn_kq->kq_p->p_pid, resident_max);
+
+ KNOTE_DETACH(&vm_pressure_klist, kn_max);
+
+ target_proc = proc_find(target_pid);
+ if (target_proc != PROC_NULL) {
+ KNOTE_ATTACH(&dispatch_klist, kn_max);
+ KNOTE(&dispatch_klist, target_pid);
+ KNOTE_ATTACH(&vm_pressure_klist_dormant, kn_max);
+
+ microuptime(&target_proc->vm_pressure_last_notify_tstamp);
+ proc_rele(target_proc);
+ }
+
+ return KERN_SUCCESS;
+}