]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kern_memorystatus.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / bsd / kern / kern_memorystatus.c
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 *
28 */
29
30 #include <kern/sched_prim.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/debug.h>
34 #include <kern/lock.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/host.h>
38 #include <libkern/libkern.h>
39 #include <mach/mach_time.h>
40 #include <mach/task.h>
41 #include <mach/task_info.h>
42 #include <mach/host_priv.h>
43 #include <sys/kern_event.h>
44 #include <sys/proc.h>
45 #include <sys/signal.h>
46 #include <sys/signalvar.h>
47 #include <sys/sysctl.h>
48 #include <sys/sysproto.h>
49 #include <sys/wait.h>
50 #include <sys/tree.h>
51 #include <sys/priv.h>
52 #include <pexpert/pexpert.h>
53
54 #if CONFIG_FREEZE
55 #include <vm/vm_protos.h>
56 #include <vm/vm_map.h>
57 #endif
58
59 #include <sys/kern_memorystatus.h>
60
61 /* These are very verbose printfs(), enable with
62 * MEMORYSTATUS_DEBUG_LOG
63 */
64 #if MEMORYSTATUS_DEBUG_LOG
65 #define MEMORYSTATUS_DEBUG(cond, format, ...) \
66 do { \
67 if (cond) { printf(format, ##__VA_ARGS__); } \
68 } while(0)
69 #else
70 #define MEMORYSTATUS_DEBUG(cond, format, ...)
71 #endif
72
73 /* General memorystatus stuff */
74
75 static void memorystatus_add_node(memorystatus_node *node);
76 static void memorystatus_remove_node(memorystatus_node *node);
77 static memorystatus_node *memorystatus_get_node(pid_t pid);
78 static void memorystatus_release_node(memorystatus_node *node);
79
80 int memorystatus_wakeup = 0;
81
82 static void memorystatus_thread(void *param __unused, wait_result_t wr __unused);
83
84 static memorystatus_node *next_memorystatus_node = NULL;
85
86 static int memorystatus_list_count = 0;
87
88 static lck_mtx_t * memorystatus_list_mlock;
89 static lck_attr_t * memorystatus_lck_attr;
90 static lck_grp_t * memorystatus_lck_grp;
91 static lck_grp_attr_t * memorystatus_lck_grp_attr;
92
93 static TAILQ_HEAD(memorystatus_list_head, memorystatus_node) memorystatus_list;
94
95 static uint64_t memorystatus_idle_delay_time = 0;
96
97 static unsigned int memorystatus_dirty_count = 0;
98
99 extern void proc_dirty_start(struct proc *p);
100 extern void proc_dirty_end(struct proc *p);
101
102 /* Jetsam */
103
104 #if CONFIG_JETSAM
105
106 extern unsigned int vm_page_free_count;
107 extern unsigned int vm_page_active_count;
108 extern unsigned int vm_page_inactive_count;
109 extern unsigned int vm_page_throttled_count;
110 extern unsigned int vm_page_purgeable_count;
111 extern unsigned int vm_page_wire_count;
112
113 static lck_mtx_t * exit_list_mlock;
114
115 static TAILQ_HEAD(exit_list_head, memorystatus_node) exit_list;
116
117 static unsigned int memorystatus_kev_failure_count = 0;
118
119 /* Counted in pages... */
120 unsigned int memorystatus_delta = 0;
121
122 unsigned int memorystatus_available_pages = (unsigned int)-1;
123 unsigned int memorystatus_available_pages_critical = 0;
124 unsigned int memorystatus_available_pages_highwater = 0;
125
126 /* ...with the exception of the legacy level in percent. */
127 unsigned int memorystatus_level = 0;
128
129 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_kev_failure_count, CTLFLAG_RD, &memorystatus_kev_failure_count, 0, "");
130 SYSCTL_INT(_kern, OID_AUTO, memorystatus_level, CTLFLAG_RD, &memorystatus_level, 0, "");
131
132 unsigned int memorystatus_jetsam_policy = kPolicyDefault;
133
134 unsigned int memorystatus_jetsam_policy_offset_pages_more_free = 0;
135 #if DEVELOPMENT || DEBUG
136 unsigned int memorystatus_jetsam_policy_offset_pages_diagnostic = 0;
137 #endif
138
139 static memorystatus_jetsam_snapshot_t memorystatus_jetsam_snapshot;
140 #define memorystatus_jetsam_snapshot_list memorystatus_jetsam_snapshot.entries
141
142 static int memorystatus_jetsam_snapshot_list_count = 0;
143
144 int memorystatus_jetsam_wakeup = 0;
145 unsigned int memorystatus_jetsam_running = 1;
146
147 static uint32_t memorystatus_task_page_count(task_t task);
148
149 static void memorystatus_move_node_to_exit_list(memorystatus_node *node);
150
151 static void memorystatus_update_levels_locked(void);
152
153 static void memorystatus_jetsam_thread_block(void);
154 static void memorystatus_jetsam_thread(void *param __unused, wait_result_t wr __unused);
155
156 static int memorystatus_send_note(int event_code, void *data, size_t data_length);
157
158 static uint32_t memorystatus_build_flags_from_state(uint32_t state);
159
160 /* VM pressure */
161
162 #if VM_PRESSURE_EVENTS
163
164 typedef enum vm_pressure_level {
165 kVMPressureNormal = 0,
166 kVMPressureWarning = 1,
167 kVMPressureUrgent = 2,
168 kVMPressureCritical = 3,
169 } vm_pressure_level_t;
170
171 static vm_pressure_level_t memorystatus_vm_pressure_level = kVMPressureNormal;
172
173 unsigned int memorystatus_available_pages_pressure = 0;
174
175 static inline boolean_t memorystatus_get_pressure_locked(void);
176 static void memorystatus_check_pressure_reset(void);
177
178 #endif /* VM_PRESSURE_EVENTS */
179
180 #endif /* CONFIG_JETSAM */
181
182 /* Freeze */
183
184 #if CONFIG_FREEZE
185
186 static unsigned int memorystatus_suspended_resident_count = 0;
187 static unsigned int memorystatus_suspended_count = 0;
188
189 boolean_t memorystatus_freeze_enabled = FALSE;
190 int memorystatus_freeze_wakeup = 0;
191
192 static inline boolean_t memorystatus_can_freeze_processes(void);
193 static boolean_t memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low);
194
195 static void memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused);
196
197 /* Thresholds */
198 static unsigned int memorystatus_freeze_threshold = 0;
199
200 static unsigned int memorystatus_freeze_pages_min = FREEZE_PAGES_MIN;
201 static unsigned int memorystatus_freeze_pages_max = FREEZE_PAGES_MAX;
202
203 static unsigned int memorystatus_frozen_count = 0;
204
205 static unsigned int memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT;
206
207 /* Stats */
208 static uint64_t memorystatus_freeze_count = 0;
209 static uint64_t memorystatus_freeze_pageouts = 0;
210
211 /* Throttling */
212 static throttle_interval_t throttle_intervals[] = {
213 { 60, 8, 0, 0, { 0, 0 }, FALSE }, /* 1 hour intermediate interval, 8x burst */
214 { 24 * 60, 1, 0, 0, { 0, 0 }, FALSE }, /* 24 hour long interval, no burst */
215 };
216
217 static uint64_t memorystatus_freeze_throttle_count = 0;
218
219 #endif /* CONFIG_FREEZE */
220
221 #if CONFIG_JETSAM
222
223 /* Debug */
224
225 #if DEVELOPMENT || DEBUG
226
227 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages, CTLFLAG_RD, &memorystatus_available_pages, 0, "");
228 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_critical, CTLFLAG_RW, &memorystatus_available_pages_critical, 0, "");
229 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_highwater, CTLFLAG_RW, &memorystatus_available_pages_highwater, 0, "");
230 #if VM_PRESSURE_EVENTS
231 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_available_pages_pressure, CTLFLAG_RW, &memorystatus_available_pages_pressure, 0, "");
232 #endif /* VM_PRESSURE_EVENTS */
233
234 /* Diagnostic code */
235 enum {
236 kJetsamDiagnosticModeNone = 0,
237 kJetsamDiagnosticModeAll = 1,
238 kJetsamDiagnosticModeStopAtFirstActive = 2,
239 kJetsamDiagnosticModeCount
240 } jetsam_diagnostic_mode = kJetsamDiagnosticModeNone;
241
242 static int jetsam_diagnostic_suspended_one_active_proc = 0;
243
244 static int
245 sysctl_jetsam_diagnostic_mode SYSCTL_HANDLER_ARGS
246 {
247 #pragma unused(arg1, arg2)
248
249 const char *diagnosticStrings[] = {
250 "jetsam: diagnostic mode: resetting critical level.",
251 "jetsam: diagnostic mode: will examine all processes",
252 "jetsam: diagnostic mode: will stop at first active process"
253 };
254
255 int error, val = jetsam_diagnostic_mode;
256 boolean_t changed = FALSE;
257
258 error = sysctl_handle_int(oidp, &val, 0, req);
259 if (error || !req->newptr)
260 return (error);
261 if ((val < 0) || (val >= kJetsamDiagnosticModeCount)) {
262 printf("jetsam: diagnostic mode: invalid value - %d\n", val);
263 return EINVAL;
264 }
265
266 lck_mtx_lock(memorystatus_list_mlock);
267
268 if ((unsigned int) val != jetsam_diagnostic_mode) {
269 jetsam_diagnostic_mode = val;
270
271 memorystatus_jetsam_policy &= ~kPolicyDiagnoseActive;
272
273 switch (jetsam_diagnostic_mode) {
274 case kJetsamDiagnosticModeNone:
275 /* Already cleared */
276 break;
277 case kJetsamDiagnosticModeAll:
278 memorystatus_jetsam_policy |= kPolicyDiagnoseAll;
279 break;
280 case kJetsamDiagnosticModeStopAtFirstActive:
281 memorystatus_jetsam_policy |= kPolicyDiagnoseFirst;
282 break;
283 default:
284 /* Already validated */
285 break;
286 }
287
288 memorystatus_update_levels_locked();
289 changed = TRUE;
290 }
291
292 lck_mtx_unlock(memorystatus_list_mlock);
293
294 if (changed) {
295 printf("%s\n", diagnosticStrings[val]);
296 }
297
298 return (0);
299 }
300
301 SYSCTL_PROC(_debug, OID_AUTO, jetsam_diagnostic_mode, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_ANYBODY,
302 &jetsam_diagnostic_mode, 0, sysctl_jetsam_diagnostic_mode, "I", "Jetsam Diagnostic Mode");
303
304 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jetsam_policy_offset_pages_more_free, CTLFLAG_RW, &memorystatus_jetsam_policy_offset_pages_more_free, 0, "");
305 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_jetsam_policy_offset_pages_diagnostic, CTLFLAG_RW, &memorystatus_jetsam_policy_offset_pages_diagnostic, 0, "");
306
307 #if VM_PRESSURE_EVENTS
308
309 #include "vm_pressure.h"
310
311 static int
312 sysctl_memorystatus_vm_pressure_level SYSCTL_HANDLER_ARGS
313 {
314 #pragma unused(arg1, arg2, oidp)
315 int error = 0;
316
317 error = priv_check_cred(kauth_cred_get(), PRIV_VM_PRESSURE, 0);
318 if (error)
319 return (error);
320
321 return SYSCTL_OUT(req, &memorystatus_vm_pressure_level, sizeof(memorystatus_vm_pressure_level));
322 }
323
324 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_level, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_LOCKED|CTLFLAG_MASKED,
325 0, 0, &sysctl_memorystatus_vm_pressure_level, "I", "");
326
327 static int
328 sysctl_memorystatus_vm_pressure_send SYSCTL_HANDLER_ARGS
329 {
330 #pragma unused(arg1, arg2)
331
332 int error, pid = 0;
333
334 error = sysctl_handle_int(oidp, &pid, 0, req);
335 if (error || !req->newptr)
336 return (error);
337
338 if (vm_dispatch_pressure_note_to_pid(pid)) {
339 return 0;
340 }
341
342 return EINVAL;
343 }
344
345 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_vm_pressure_send, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
346 0, 0, &sysctl_memorystatus_vm_pressure_send, "I", "");
347
348 #endif /* VM_PRESSURE_EVENTS */
349
350 #endif /* CONFIG_JETSAM */
351
352 #if CONFIG_FREEZE
353
354 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_threshold, CTLFLAG_RW, &memorystatus_freeze_threshold, 0, "");
355
356 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_pages_min, CTLFLAG_RW, &memorystatus_freeze_pages_min, 0, "");
357 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_pages_max, CTLFLAG_RW, &memorystatus_freeze_pages_max, 0, "");
358
359 SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_count, CTLFLAG_RD, &memorystatus_freeze_count, "");
360 SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_pageouts, CTLFLAG_RD, &memorystatus_freeze_pageouts, "");
361 SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freeze_throttle_count, CTLFLAG_RD, &memorystatus_freeze_throttle_count, "");
362 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_min_processes, CTLFLAG_RW, &memorystatus_freeze_suspended_threshold, 0, "");
363
364 boolean_t memorystatus_freeze_throttle_enabled = TRUE;
365 SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_throttle_enabled, CTLFLAG_RW, &memorystatus_freeze_throttle_enabled, 0, "");
366
367 /*
368 * Manual trigger of freeze and thaw for dev / debug kernels only.
369 */
370 static int
371 sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS
372 {
373 #pragma unused(arg1, arg2)
374
375 int error, pid = 0;
376 proc_t p;
377
378 error = sysctl_handle_int(oidp, &pid, 0, req);
379 if (error || !req->newptr)
380 return (error);
381
382 p = proc_find(pid);
383 if (p != NULL) {
384 uint32_t purgeable, wired, clean, dirty;
385 boolean_t shared;
386 uint32_t max_pages = MIN(default_pager_swap_pages_free(), memorystatus_freeze_pages_max);
387 task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE);
388 proc_rele(p);
389 return 0;
390 }
391
392 return EINVAL;
393 }
394
395 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_freeze, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
396 0, 0, &sysctl_memorystatus_freeze, "I", "");
397
398 static int
399 sysctl_memorystatus_available_pages_thaw SYSCTL_HANDLER_ARGS
400 {
401 #pragma unused(arg1, arg2)
402
403 int error, pid = 0;
404 proc_t p;
405
406 error = sysctl_handle_int(oidp, &pid, 0, req);
407 if (error || !req->newptr)
408 return (error);
409
410 p = proc_find(pid);
411 if (p != NULL) {
412 task_thaw(p->task);
413 proc_rele(p);
414 return 0;
415 }
416
417 return EINVAL;
418 }
419
420 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_thaw, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
421 0, 0, &sysctl_memorystatus_available_pages_thaw, "I", "");
422
423 #endif /* CONFIG_FREEZE */
424
425 #endif /* DEVELOPMENT || DEBUG */
426
427 __private_extern__ void
428 memorystatus_init(void)
429 {
430 thread_t thread = THREAD_NULL;
431 kern_return_t result;
432
433 memorystatus_lck_attr = lck_attr_alloc_init();
434 memorystatus_lck_grp_attr = lck_grp_attr_alloc_init();
435 memorystatus_lck_grp = lck_grp_alloc_init("memorystatus", memorystatus_lck_grp_attr);
436 memorystatus_list_mlock = lck_mtx_alloc_init(memorystatus_lck_grp, memorystatus_lck_attr);
437 TAILQ_INIT(&memorystatus_list);
438
439 #if CONFIG_JETSAM
440 exit_list_mlock = lck_mtx_alloc_init(memorystatus_lck_grp, memorystatus_lck_attr);
441 TAILQ_INIT(&exit_list);
442
443 memorystatus_delta = DELTA_PERCENT * atop_64(max_mem) / 100;
444 #endif
445
446 #if CONFIG_FREEZE
447 memorystatus_freeze_threshold = (FREEZE_PERCENT / DELTA_PERCENT) * memorystatus_delta;
448 #endif
449
450 nanoseconds_to_absolutetime((uint64_t)IDLE_EXIT_TIME_SECS * NSEC_PER_SEC, &memorystatus_idle_delay_time);
451
452 result = kernel_thread_start(memorystatus_thread, NULL, &thread);
453 if (result == KERN_SUCCESS) {
454 thread_deallocate(thread);
455 } else {
456 panic("Could not create memorystatus_thread");
457 }
458
459 #if CONFIG_JETSAM
460 memorystatus_jetsam_policy_offset_pages_more_free = (POLICY_MORE_FREE_OFFSET_PERCENT / DELTA_PERCENT) * memorystatus_delta;
461 #if DEVELOPMENT || DEBUG
462 memorystatus_jetsam_policy_offset_pages_diagnostic = (POLICY_DIAGNOSTIC_OFFSET_PERCENT / DELTA_PERCENT) * memorystatus_delta;
463 #endif
464
465 /* No contention at this point */
466 memorystatus_update_levels_locked();
467
468 result = kernel_thread_start(memorystatus_jetsam_thread, NULL, &thread);
469 if (result == KERN_SUCCESS) {
470 thread_deallocate(thread);
471 } else {
472 panic("Could not create memorystatus_jetsam_thread");
473 }
474 #endif
475 }
476
477 /*
478 * Node manipulation
479 */
480
481 static void
482 memorystatus_add_node(memorystatus_node *new_node)
483 {
484 memorystatus_node *node;
485
486 /* Make sure we're called with the list lock held */
487 lck_mtx_assert(memorystatus_list_mlock, LCK_MTX_ASSERT_OWNED);
488
489 TAILQ_FOREACH(node, &memorystatus_list, link) {
490 if (node->priority <= new_node->priority) {
491 break;
492 }
493 }
494
495 if (node) {
496 TAILQ_INSERT_BEFORE(node, new_node, link);
497 } else {
498 TAILQ_INSERT_TAIL(&memorystatus_list, new_node, link);
499 }
500
501 next_memorystatus_node = TAILQ_FIRST(&memorystatus_list);
502
503 memorystatus_list_count++;
504 }
505
506 static void
507 memorystatus_remove_node(memorystatus_node *node)
508 {
509 /* Make sure we're called with the list lock held */
510 lck_mtx_assert(memorystatus_list_mlock, LCK_MTX_ASSERT_OWNED);
511
512 TAILQ_REMOVE(&memorystatus_list, node, link);
513 next_memorystatus_node = TAILQ_FIRST(&memorystatus_list);
514
515 #if CONFIG_FREEZE
516 if (node->state & (kProcessFrozen)) {
517 memorystatus_frozen_count--;
518 }
519
520 if (node->state & kProcessSuspended) {
521 memorystatus_suspended_resident_count -= node->resident_pages;
522 memorystatus_suspended_count--;
523 }
524 #endif
525
526 memorystatus_list_count--;
527 }
528
529 /* Returns with the lock taken if found */
530 static memorystatus_node *
531 memorystatus_get_node(pid_t pid)
532 {
533 memorystatus_node *node;
534
535 lck_mtx_lock(memorystatus_list_mlock);
536
537 TAILQ_FOREACH(node, &memorystatus_list, link) {
538 if (node->pid == pid) {
539 break;
540 }
541 }
542
543 if (!node) {
544 lck_mtx_unlock(memorystatus_list_mlock);
545 }
546
547 return node;
548 }
549
550 static void
551 memorystatus_release_node(memorystatus_node *node)
552 {
553 #pragma unused(node)
554 lck_mtx_unlock(memorystatus_list_mlock);
555 }
556
557 /*
558 * List manipulation
559 */
560
561 kern_return_t
562 memorystatus_list_add(pid_t pid, int priority, int high_water_mark)
563 {
564
565 #if !CONFIG_JETSAM
566 #pragma unused(high_water_mark)
567 #endif
568
569 memorystatus_node *new_node;
570
571 new_node = (memorystatus_node*)kalloc(sizeof(memorystatus_node));
572 if (!new_node) {
573 assert(FALSE);
574 }
575 memset(new_node, 0, sizeof(memorystatus_node));
576
577 MEMORYSTATUS_DEBUG(1, "memorystatus_list_add: adding process %d with priority %d, high water mark %d.\n", pid, priority, high_water_mark);
578
579 new_node->pid = pid;
580 new_node->priority = priority;
581 #if CONFIG_JETSAM
582 new_node->hiwat_pages = high_water_mark;
583 #endif
584
585 lck_mtx_lock(memorystatus_list_mlock);
586
587 memorystatus_add_node(new_node);
588
589 lck_mtx_unlock(memorystatus_list_mlock);
590
591 return KERN_SUCCESS;
592 }
593
594 kern_return_t
595 memorystatus_list_change(boolean_t effective, pid_t pid, int priority, int state_flags, int high_water_mark)
596 {
597
598 #if !CONFIG_JETSAM
599 #pragma unused(high_water_mark)
600 #endif
601
602 kern_return_t ret;
603 memorystatus_node *node, *search;
604
605 MEMORYSTATUS_DEBUG(1, "memorystatus_list_change: changing process %d to priority %d with flags %d\n", pid, priority, state_flags);
606
607 lck_mtx_lock(memorystatus_list_mlock);
608
609 TAILQ_FOREACH(node, &memorystatus_list, link) {
610 if (node->pid == pid) {
611 break;
612 }
613 }
614
615 if (!node) {
616 ret = KERN_FAILURE;
617 goto out;
618 }
619
620 if (effective && (node->state & kProcessPriorityUpdated)) {
621 MEMORYSTATUS_DEBUG(1, "memorystatus_list_change: effective change specified for pid %d, but change already occurred.\n", pid);
622 ret = KERN_FAILURE;
623 goto out;
624 }
625
626 node->state |= kProcessPriorityUpdated;
627
628 if (state_flags != -1) {
629 node->state &= ~(kProcessActive|kProcessForeground);
630 if (state_flags & kMemorystatusFlagsFrontmost) {
631 node->state |= kProcessForeground;
632 }
633 if (state_flags & kMemorystatusFlagsActive) {
634 node->state |= kProcessActive;
635 }
636 }
637
638 #if CONFIG_JETSAM
639 if (high_water_mark != -1) {
640 node->hiwat_pages = high_water_mark;
641 }
642 #endif
643
644 if (node->priority == priority) {
645 /* Priority unchanged */
646 MEMORYSTATUS_DEBUG(1, "memorystatus_list_change: same priority set for pid %d\n", pid);
647 ret = KERN_SUCCESS;
648 goto out;
649 }
650
651 if (node->priority < priority) {
652 /* Higher priority value (ie less important) - search backwards */
653 search = TAILQ_PREV(node, memorystatus_list_head, link);
654 TAILQ_REMOVE(&memorystatus_list, node, link);
655
656 node->priority = priority;
657 while (search && (search->priority <= node->priority)) {
658 search = TAILQ_PREV(search, memorystatus_list_head, link);
659 }
660 if (search) {
661 TAILQ_INSERT_AFTER(&memorystatus_list, search, node, link);
662 } else {
663 TAILQ_INSERT_HEAD(&memorystatus_list, node, link);
664 }
665 } else {
666 /* Lower priority value (ie more important) - search forwards */
667 search = TAILQ_NEXT(node, link);
668 TAILQ_REMOVE(&memorystatus_list, node, link);
669
670 node->priority = priority;
671 while (search && (search->priority >= node->priority)) {
672 search = TAILQ_NEXT(search, link);
673 }
674 if (search) {
675 TAILQ_INSERT_BEFORE(search, node, link);
676 } else {
677 TAILQ_INSERT_TAIL(&memorystatus_list, node, link);
678 }
679 }
680
681 next_memorystatus_node = TAILQ_FIRST(&memorystatus_list);
682 ret = KERN_SUCCESS;
683
684 out:
685 lck_mtx_unlock(memorystatus_list_mlock);
686 return ret;
687 }
688
689 kern_return_t memorystatus_list_remove(pid_t pid)
690 {
691 kern_return_t ret;
692 memorystatus_node *node = NULL;
693
694 MEMORYSTATUS_DEBUG(1, "memorystatus_list_remove: removing process %d\n", pid);
695
696 #if CONFIG_JETSAM
697 /* Did we mark this as a exited process? */
698 lck_mtx_lock(exit_list_mlock);
699
700 TAILQ_FOREACH(node, &exit_list, link) {
701 if (node->pid == pid) {
702 /* We did, so remove it from the list. The stats were updated when the queues were shifted. */
703 TAILQ_REMOVE(&exit_list, node, link);
704 break;
705 }
706 }
707
708 lck_mtx_unlock(exit_list_mlock);
709 #endif
710
711 /* If not, search the main list */
712 if (!node) {
713 lck_mtx_lock(memorystatus_list_mlock);
714
715 TAILQ_FOREACH(node, &memorystatus_list, link) {
716 if (node->pid == pid) {
717 /* Remove from the list, and update accounting accordingly */
718 memorystatus_remove_node(node);
719 break;
720 }
721 }
722
723 lck_mtx_unlock(memorystatus_list_mlock);
724 }
725
726 if (node) {
727 kfree(node, sizeof(memorystatus_node));
728 ret = KERN_SUCCESS;
729 } else {
730 ret = KERN_FAILURE;
731 }
732
733 return ret;
734 }
735
736 kern_return_t
737 memorystatus_on_track_dirty(int pid, boolean_t track)
738 {
739 kern_return_t ret = KERN_FAILURE;
740 memorystatus_node *node;
741
742 node = memorystatus_get_node((pid_t)pid);
743 if (!node) {
744 return KERN_FAILURE;
745 }
746
747 if (track & !(node->state & kProcessSupportsIdleExit)) {
748 node->state |= kProcessSupportsIdleExit;
749 node->clean_time = mach_absolute_time() + memorystatus_idle_delay_time;
750 ret = KERN_SUCCESS;
751 } else if (!track & (node->state & kProcessSupportsIdleExit)) {
752 node->state &= ~kProcessSupportsIdleExit;
753 node->clean_time = 0;
754 ret = KERN_SUCCESS;
755 }
756
757 memorystatus_release_node(node);
758
759 return ret;
760 }
761
762 kern_return_t
763 memorystatus_on_dirty(int pid, boolean_t dirty)
764 {
765 kern_return_t ret = KERN_FAILURE;
766 memorystatus_node *node;
767
768 node = memorystatus_get_node((pid_t)pid);
769 if (!node) {
770 return KERN_FAILURE;
771 }
772
773 if (dirty) {
774 if (!(node->state & kProcessDirty)) {
775 node->state |= kProcessDirty;
776 node->clean_time = 0;
777 memorystatus_dirty_count++;
778 ret = KERN_SUCCESS;
779 }
780 } else {
781 if (node->state & kProcessDirty) {
782 node->state &= ~kProcessDirty;
783 node->clean_time = mach_absolute_time() + memorystatus_idle_delay_time;
784 memorystatus_dirty_count--;
785 ret = KERN_SUCCESS;
786 }
787 }
788
789 memorystatus_release_node(node);
790
791 return ret;
792 }
793
794 void
795 memorystatus_on_suspend(int pid)
796 {
797 memorystatus_node *node = memorystatus_get_node((pid_t)pid);
798
799 if (node) {
800 #if CONFIG_FREEZE
801 proc_t p;
802
803 p = proc_find(pid);
804 if (p != NULL) {
805 uint32_t pages = memorystatus_task_page_count(p->task);
806 proc_rele(p);
807 node->resident_pages = pages;
808 memorystatus_suspended_resident_count += pages;
809 }
810 memorystatus_suspended_count++;
811 #endif
812
813 node->state |= kProcessSuspended;
814
815 memorystatus_release_node(node);
816 }
817 }
818
819 void
820 memorystatus_on_resume(int pid)
821 {
822 memorystatus_node *node = memorystatus_get_node((pid_t)pid);
823
824 if (node) {
825 #if CONFIG_FREEZE
826 boolean_t frozen = (node->state & kProcessFrozen);
827 if (node->state & (kProcessFrozen)) {
828 memorystatus_frozen_count--;
829 }
830 memorystatus_suspended_resident_count -= node->resident_pages;
831 memorystatus_suspended_count--;
832 #endif
833
834 node->state &= ~(kProcessSuspended | kProcessFrozen | kProcessIgnored);
835
836 memorystatus_release_node(node);
837
838 #if CONFIG_FREEZE
839 if (frozen) {
840 memorystatus_freeze_entry_t data = { pid, kMemorystatusFlagsThawed, 0 };
841 memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data));
842 }
843 #endif
844 }
845 }
846
847 void
848 memorystatus_on_inactivity(int pid)
849 {
850 #pragma unused(pid)
851 #if CONFIG_FREEZE
852 /* Wake the freeze thread */
853 thread_wakeup((event_t)&memorystatus_freeze_wakeup);
854 #endif
855 }
856
857 static void
858 memorystatus_thread(void *param __unused, wait_result_t wr __unused)
859 {
860 static boolean_t initialized = FALSE;
861 memorystatus_node *node;
862 uint64_t current_time;
863 pid_t victim_pid = -1;
864
865 if (initialized == FALSE) {
866 initialized = TRUE;
867 assert_wait(&memorystatus_wakeup, THREAD_UNINT);
868 (void)thread_block((thread_continue_t)memorystatus_thread);
869 }
870
871 /* Pick next idle exit victim. For now, just iterate through; ideally, this would be be more intelligent. */
872 current_time = mach_absolute_time();
873
874 /* Set a cutoff so that we don't idle exit processes that went recently clean */
875
876 lck_mtx_lock(memorystatus_list_mlock);
877
878 if (memorystatus_dirty_count) {
879 TAILQ_FOREACH(node, &memorystatus_list, link) {
880 if ((node->state & kProcessSupportsIdleExit) && !(node->state & (kProcessDirty|kProcessIgnoreIdleExit))) {
881 if (current_time >= node->clean_time) {
882 victim_pid = node->pid;
883 break;
884 }
885 }
886 }
887 }
888
889 lck_mtx_unlock(memorystatus_list_mlock);
890
891 if (-1 != victim_pid) {
892 proc_t p = proc_find(victim_pid);
893 if (p != NULL) {
894 boolean_t kill = FALSE;
895 proc_dirty_start(p);
896 /* Ensure process is still marked for idle exit and is clean */
897 if ((p->p_dirty & (P_DIRTY_ALLOW_IDLE_EXIT|P_DIRTY_IS_DIRTY|P_DIRTY_TERMINATED)) == (P_DIRTY_ALLOW_IDLE_EXIT)) {
898 /* Clean; issue SIGKILL */
899 p->p_dirty |= P_DIRTY_TERMINATED;
900 kill = TRUE;
901 }
902 proc_dirty_end(p);
903 if (TRUE == kill) {
904 printf("memorystatus_thread: idle exiting pid %d [%s]\n", victim_pid, (p->p_comm ? p->p_comm : "(unknown)"));
905 psignal(p, SIGKILL);
906 }
907 proc_rele(p);
908 }
909 }
910
911 assert_wait(&memorystatus_wakeup, THREAD_UNINT);
912 (void)thread_block((thread_continue_t)memorystatus_thread);
913 }
914
915 #if CONFIG_JETSAM
916
917 static uint32_t
918 memorystatus_task_page_count(task_t task)
919 {
920 kern_return_t ret;
921 static task_info_data_t data;
922 static struct task_basic_info *info = (struct task_basic_info *)&data;
923 static mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
924
925 ret = task_info(task, TASK_BASIC_INFO, (task_info_t)&data, &count);
926 if (ret == KERN_SUCCESS) {
927 return info->resident_size / PAGE_SIZE;
928 }
929 return 0;
930 }
931
932 static int
933 memorystatus_send_note(int event_code, void *data, size_t data_length) {
934 int ret;
935 struct kev_msg ev_msg;
936
937 ev_msg.vendor_code = KEV_VENDOR_APPLE;
938 ev_msg.kev_class = KEV_SYSTEM_CLASS;
939 ev_msg.kev_subclass = KEV_MEMORYSTATUS_SUBCLASS;
940
941 ev_msg.event_code = event_code;
942
943 ev_msg.dv[0].data_length = data_length;
944 ev_msg.dv[0].data_ptr = data;
945 ev_msg.dv[1].data_length = 0;
946
947 ret = kev_post_msg(&ev_msg);
948 if (ret) {
949 memorystatus_kev_failure_count++;
950 printf("%s: kev_post_msg() failed, err %d\n", __func__, ret);
951 }
952
953 return ret;
954 }
955
956 static uint32_t
957 memorystatus_build_flags_from_state(uint32_t state) {
958 uint32_t flags = 0;
959
960 if (state & kProcessForeground) {
961 flags |= kMemorystatusFlagsFrontmost;
962 }
963 if (state & kProcessActive) {
964 flags |= kMemorystatusFlagsActive;
965 }
966 if (state & kProcessSupportsIdleExit) {
967 flags |= kMemorystatusFlagsSupportsIdleExit;
968 }
969 if (state & kProcessDirty) {
970 flags |= kMemorystatusFlagsDirty;
971 }
972
973 return flags;
974 }
975
976 static void
977 memorystatus_move_node_to_exit_list(memorystatus_node *node)
978 {
979 /* Make sure we're called with the list lock held */
980 lck_mtx_assert(memorystatus_list_mlock, LCK_MTX_ASSERT_OWNED);
981
982 /* Now, acquire the exit list lock... */
983 lck_mtx_lock(exit_list_mlock);
984
985 /* Remove from list + update accounting... */
986 memorystatus_remove_node(node);
987
988 /* ...then insert at the end of the exit queue */
989 TAILQ_INSERT_TAIL(&exit_list, node, link);
990
991 /* And relax */
992 lck_mtx_unlock(exit_list_mlock);
993 }
994
995 void memorystatus_update(unsigned int pages_avail)
996 {
997 if (!memorystatus_delta) {
998 return;
999 }
1000
1001 if ((pages_avail < memorystatus_available_pages_critical) ||
1002 (pages_avail >= (memorystatus_available_pages + memorystatus_delta)) ||
1003 (memorystatus_available_pages >= (pages_avail + memorystatus_delta))) {
1004 memorystatus_available_pages = pages_avail;
1005 memorystatus_level = memorystatus_available_pages * 100 / atop_64(max_mem);
1006 /* Only wake the thread if currently blocked */
1007 if (OSCompareAndSwap(0, 1, &memorystatus_jetsam_running)) {
1008 thread_wakeup((event_t)&memorystatus_jetsam_wakeup);
1009 }
1010 }
1011 }
1012
1013 static boolean_t
1014 memorystatus_get_snapshot_properties_for_proc_locked(proc_t p, memorystatus_jetsam_snapshot_entry_t *entry)
1015 {
1016 memorystatus_node *node;
1017
1018 TAILQ_FOREACH(node, &memorystatus_list, link) {
1019 if (node->pid == p->p_pid) {
1020 break;
1021 }
1022 }
1023
1024 if (!node) {
1025 return FALSE;
1026 }
1027
1028 entry->pid = p->p_pid;
1029 strlcpy(&entry->name[0], p->p_comm, MAXCOMLEN+1);
1030 entry->priority = node->priority;
1031 entry->pages = memorystatus_task_page_count(p->task);
1032 entry->flags = memorystatus_build_flags_from_state(node->state);
1033 memcpy(&entry->uuid[0], &p->p_uuid[0], sizeof(p->p_uuid));
1034
1035 return TRUE;
1036 }
1037
1038 static void
1039 memorystatus_jetsam_snapshot_procs_locked(void)
1040 {
1041 proc_t p;
1042 int i = 0;
1043
1044 memorystatus_jetsam_snapshot.stats.free_pages = vm_page_free_count;
1045 memorystatus_jetsam_snapshot.stats.active_pages = vm_page_active_count;
1046 memorystatus_jetsam_snapshot.stats.inactive_pages = vm_page_inactive_count;
1047 memorystatus_jetsam_snapshot.stats.throttled_pages = vm_page_throttled_count;
1048 memorystatus_jetsam_snapshot.stats.purgeable_pages = vm_page_purgeable_count;
1049 memorystatus_jetsam_snapshot.stats.wired_pages = vm_page_wire_count;
1050 proc_list_lock();
1051 LIST_FOREACH(p, &allproc, p_list) {
1052 if (FALSE == memorystatus_get_snapshot_properties_for_proc_locked(p, &memorystatus_jetsam_snapshot_list[i])) {
1053 continue;
1054 }
1055
1056 MEMORYSTATUS_DEBUG(0, "jetsam snapshot pid = %d, uuid = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1057 p->p_pid,
1058 p->p_uuid[0], p->p_uuid[1], p->p_uuid[2], p->p_uuid[3], p->p_uuid[4], p->p_uuid[5], p->p_uuid[6], p->p_uuid[7],
1059 p->p_uuid[8], p->p_uuid[9], p->p_uuid[10], p->p_uuid[11], p->p_uuid[12], p->p_uuid[13], p->p_uuid[14], p->p_uuid[15]);
1060
1061 if (++i == kMaxSnapshotEntries) {
1062 break;
1063 }
1064 }
1065 proc_list_unlock();
1066 memorystatus_jetsam_snapshot.snapshot_time = mach_absolute_time();
1067 memorystatus_jetsam_snapshot.entry_count = memorystatus_jetsam_snapshot_list_count = i - 1;
1068 }
1069
1070 static void
1071 memorystatus_mark_pid_in_snapshot(pid_t pid, int flags)
1072 {
1073 int i = 0;
1074
1075 for (i = 0; i < memorystatus_jetsam_snapshot_list_count; i++) {
1076 if (memorystatus_jetsam_snapshot_list[i].pid == pid) {
1077 memorystatus_jetsam_snapshot_list[i].flags |= flags;
1078 return;
1079 }
1080 }
1081 }
1082
1083 int
1084 memorystatus_kill_top_proc(boolean_t any, uint32_t cause)
1085 {
1086 proc_t p;
1087 int pending_snapshot = 0;
1088
1089 #ifndef CONFIG_FREEZE
1090 #pragma unused(any)
1091 #endif
1092
1093 lck_mtx_lock(memorystatus_list_mlock);
1094
1095 if (memorystatus_jetsam_snapshot_list_count == 0) {
1096 memorystatus_jetsam_snapshot_procs_locked();
1097 } else {
1098 pending_snapshot = 1;
1099 }
1100
1101 while (next_memorystatus_node) {
1102 memorystatus_node *node;
1103 pid_t aPid;
1104 #if DEVELOPMENT || DEBUG
1105 int activeProcess;
1106 int procSuspendedForDiagnosis;
1107 #endif /* DEVELOPMENT || DEBUG */
1108
1109 node = next_memorystatus_node;
1110 next_memorystatus_node = TAILQ_NEXT(next_memorystatus_node, link);
1111
1112 #if DEVELOPMENT || DEBUG
1113 activeProcess = node->state & kProcessForeground;
1114 procSuspendedForDiagnosis = node->state & kProcessSuspendedForDiag;
1115 #endif /* DEVELOPMENT || DEBUG */
1116
1117 aPid = node->pid;
1118
1119 /* skip empty slots in the list */
1120 if (aPid == 0 || (node->state & kProcessKilled)) {
1121 continue; // with lock held
1122 }
1123
1124 p = proc_find(aPid);
1125 if (p != NULL) {
1126 int flags = cause;
1127
1128 #if DEVELOPMENT || DEBUG
1129 if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && procSuspendedForDiagnosis) {
1130 printf("jetsam: continuing after ignoring proc suspended already for diagnosis - %d\n", aPid);
1131 proc_rele(p);
1132 continue;
1133 }
1134 #endif /* DEVELOPMENT || DEBUG */
1135
1136 #if CONFIG_FREEZE
1137 boolean_t skip;
1138 boolean_t reclaim_proc = !(node->state & (kProcessLocked | kProcessNoReclaimWorth));
1139 if (any || reclaim_proc) {
1140 if (node->state & kProcessFrozen) {
1141 flags |= kMemorystatusFlagsFrozen;
1142 }
1143 skip = FALSE;
1144 } else {
1145 skip = TRUE;
1146 }
1147
1148 if (skip) {
1149 proc_rele(p);
1150 } else
1151 #endif
1152 {
1153 #if DEVELOPMENT || DEBUG
1154 if ((memorystatus_jetsam_policy & kPolicyDiagnoseActive) && activeProcess) {
1155 MEMORYSTATUS_DEBUG(1, "jetsam: suspending pid %d [%s] (active) for diagnosis - memory_status_level: %d\n",
1156 aPid, (p->p_comm ? p->p_comm: "(unknown)"), memorystatus_level);
1157 memorystatus_mark_pid_in_snapshot(aPid, kMemorystatusFlagsSuspForDiagnosis);
1158 node->state |= kProcessSuspendedForDiag;
1159 if (memorystatus_jetsam_policy & kPolicyDiagnoseFirst) {
1160 jetsam_diagnostic_suspended_one_active_proc = 1;
1161 printf("jetsam: returning after suspending first active proc - %d\n", aPid);
1162 }
1163 lck_mtx_unlock(memorystatus_list_mlock);
1164 task_suspend(p->task);
1165 proc_rele(p);
1166 return 0;
1167 } else
1168 #endif /* DEVELOPMENT || DEBUG */
1169 {
1170 printf("memorystatus: jetsam killing pid %d [%s] - memorystatus_available_pages: %d\n",
1171 aPid, (p->p_comm ? p->p_comm : "(unknown)"), memorystatus_available_pages);
1172 /* Shift queue, update stats */
1173 memorystatus_move_node_to_exit_list(node);
1174 memorystatus_mark_pid_in_snapshot(aPid, flags);
1175 lck_mtx_unlock(memorystatus_list_mlock);
1176 exit1_internal(p, W_EXITCODE(0, SIGKILL), (int *)NULL, FALSE, FALSE);
1177 proc_rele(p);
1178 return 0;
1179 }
1180 }
1181 }
1182 }
1183
1184 lck_mtx_unlock(memorystatus_list_mlock);
1185
1186 // If we didn't kill anything, toss any newly-created snapshot
1187 if (!pending_snapshot) {
1188 memorystatus_jetsam_snapshot.entry_count = memorystatus_jetsam_snapshot_list_count = 0;
1189 }
1190
1191 return -1;
1192 }
1193
1194 int memorystatus_kill_top_proc_from_VM(void) {
1195 return memorystatus_kill_top_proc(TRUE, kMemorystatusFlagsKilledVM);
1196 }
1197
1198 static int
1199 memorystatus_kill_hiwat_proc(void)
1200 {
1201 proc_t p;
1202 int pending_snapshot = 0;
1203 memorystatus_node *next_hiwat_node;
1204
1205 lck_mtx_lock(memorystatus_list_mlock);
1206
1207 if (memorystatus_jetsam_snapshot_list_count == 0) {
1208 memorystatus_jetsam_snapshot_procs_locked();
1209 } else {
1210 pending_snapshot = 1;
1211 }
1212
1213 next_hiwat_node = next_memorystatus_node;
1214
1215 while (next_hiwat_node) {
1216 pid_t aPid;
1217 int32_t hiwat;
1218 memorystatus_node *node;
1219
1220 node = next_hiwat_node;
1221 next_hiwat_node = TAILQ_NEXT(next_hiwat_node, link);
1222
1223 aPid = node->pid;
1224 hiwat = node->hiwat_pages;
1225
1226 /* skip empty or non-hiwat slots in the list */
1227 if (aPid == 0 || (hiwat < 0) || (node->state & kProcessKilled)) {
1228 continue; // with lock held
1229 }
1230
1231 p = proc_find(aPid);
1232 if (p != NULL) {
1233 int32_t pages = (int32_t)memorystatus_task_page_count(p->task);
1234 boolean_t skip = (pages <= hiwat);
1235 #if DEVELOPMENT || DEBUG
1236 if (!skip && (memorystatus_jetsam_policy & kPolicyDiagnoseActive)) {
1237 if (node->state & kProcessSuspendedForDiag) {
1238 proc_rele(p);
1239 continue;
1240 }
1241 }
1242 #endif /* DEVELOPMENT || DEBUG */
1243
1244 #if CONFIG_FREEZE
1245 if (!skip) {
1246 if (node->state & kProcessLocked) {
1247 skip = TRUE;
1248 } else {
1249 skip = FALSE;
1250 }
1251 }
1252 #endif
1253
1254 if (!skip) {
1255 MEMORYSTATUS_DEBUG(1, "jetsam: %s pid %d [%s] - %d pages > 1 (%d)\n",
1256 (memorystatus_jetsam_policy & kPolicyDiagnoseActive) ? "suspending": "killing", aPid, p->p_comm, pages, hiwat);
1257 #if DEVELOPMENT || DEBUG
1258 if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) {
1259 memorystatus_mark_pid_in_snapshot(aPid, kMemorystatusFlagsSuspForDiagnosis);
1260 node->state |= kProcessSuspendedForDiag;
1261 lck_mtx_unlock(memorystatus_list_mlock);
1262 task_suspend(p->task);
1263 proc_rele(p);
1264 MEMORYSTATUS_DEBUG(1, "jetsam: pid %d suspended for diagnosis - memorystatus_available_pages: %d\n", aPid, memorystatus_available_pages);
1265 } else
1266 #endif /* DEVELOPMENT || DEBUG */
1267 {
1268 printf("memorystatus: jetsam killing pid %d [%s] (highwater) - memorystatus_available_pages: %d\n",
1269 aPid, (p->p_comm ? p->p_comm : "(unknown)"), memorystatus_available_pages);
1270 /* Shift queue, update stats */
1271 memorystatus_move_node_to_exit_list(node);
1272 memorystatus_mark_pid_in_snapshot(aPid, kMemorystatusFlagsKilledHiwat);
1273 lck_mtx_unlock(memorystatus_list_mlock);
1274 exit1(p, W_EXITCODE(0, SIGKILL), (int *)NULL);
1275 proc_rele(p);
1276 }
1277 return 0;
1278 } else {
1279 proc_rele(p);
1280 }
1281
1282 }
1283 }
1284
1285 lck_mtx_unlock(memorystatus_list_mlock);
1286
1287 // If we didn't kill anything, toss any newly-created snapshot
1288 if (!pending_snapshot) {
1289 memorystatus_jetsam_snapshot.entry_count = memorystatus_jetsam_snapshot_list_count = 0;
1290 }
1291
1292 return -1;
1293 }
1294
1295 static void
1296 memorystatus_jetsam_thread_block(void)
1297 {
1298 assert_wait(&memorystatus_jetsam_wakeup, THREAD_UNINT);
1299 assert(memorystatus_jetsam_running == 1);
1300 OSDecrementAtomic(&memorystatus_jetsam_running);
1301 (void)thread_block((thread_continue_t)memorystatus_jetsam_thread);
1302 }
1303
1304 static void
1305 memorystatus_jetsam_thread(void *param __unused, wait_result_t wr __unused)
1306 {
1307 boolean_t post_snapshot = FALSE;
1308 static boolean_t is_vm_privileged = FALSE;
1309
1310 if (is_vm_privileged == FALSE) {
1311 /*
1312 * It's the first time the thread has run, so just mark the thread as privileged and block.
1313 * This avoids a spurious pass with unset variables, as set out in <rdar://problem/9609402>.
1314 */
1315 thread_wire(host_priv_self(), current_thread(), TRUE);
1316 is_vm_privileged = TRUE;
1317 memorystatus_jetsam_thread_block();
1318 }
1319
1320 assert(memorystatus_available_pages != (unsigned)-1);
1321
1322 while(1) {
1323 unsigned int last_available_pages;
1324
1325 #if DEVELOPMENT || DEBUG
1326 jetsam_diagnostic_suspended_one_active_proc = 0;
1327 #endif /* DEVELOPMENT || DEBUG */
1328
1329 while (memorystatus_available_pages <= memorystatus_available_pages_highwater) {
1330 if (memorystatus_kill_hiwat_proc() < 0) {
1331 break;
1332 }
1333 post_snapshot = TRUE;
1334 }
1335
1336 while (memorystatus_available_pages <= memorystatus_available_pages_critical) {
1337 if (memorystatus_kill_top_proc(FALSE, kMemorystatusFlagsKilled) < 0) {
1338 /* No victim was found - panic */
1339 panic("memorystatus_jetsam_thread: no victim! available pages:%d, critical page level: %d\n",
1340 memorystatus_available_pages, memorystatus_available_pages_critical);
1341 }
1342 post_snapshot = TRUE;
1343 #if DEVELOPMENT || DEBUG
1344 if ((memorystatus_jetsam_policy & kPolicyDiagnoseFirst) && jetsam_diagnostic_suspended_one_active_proc) {
1345 printf("jetsam: stopping killing since 1 active proc suspended already for diagnosis\n");
1346 break; // we found first active proc, let's not kill any more
1347 }
1348 #endif /* DEVELOPMENT || DEBUG */
1349 }
1350
1351 last_available_pages = memorystatus_available_pages;
1352
1353 if (post_snapshot) {
1354 size_t snapshot_size = sizeof(memorystatus_jetsam_snapshot_t) + sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_list_count - 1);
1355 memorystatus_jetsam_snapshot.notification_time = mach_absolute_time();
1356 memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size));
1357 }
1358
1359 if (memorystatus_available_pages >= (last_available_pages + memorystatus_delta) ||
1360 last_available_pages >= (memorystatus_available_pages + memorystatus_delta)) {
1361 continue;
1362 }
1363
1364 #if VM_PRESSURE_EVENTS
1365 memorystatus_check_pressure_reset();
1366 #endif
1367
1368 memorystatus_jetsam_thread_block();
1369 }
1370 }
1371
1372 #endif /* CONFIG_JETSAM */
1373
1374 #if CONFIG_FREEZE
1375
1376 __private_extern__ void
1377 memorystatus_freeze_init(void)
1378 {
1379 kern_return_t result;
1380 thread_t thread;
1381
1382 result = kernel_thread_start(memorystatus_freeze_thread, NULL, &thread);
1383 if (result == KERN_SUCCESS) {
1384 thread_deallocate(thread);
1385 } else {
1386 panic("Could not create memorystatus_freeze_thread");
1387 }
1388 }
1389
1390 static int
1391 memorystatus_freeze_top_proc(boolean_t *memorystatus_freeze_swap_low)
1392 {
1393 proc_t p;
1394 uint32_t i;
1395 memorystatus_node *next_freeze_node;
1396
1397 lck_mtx_lock(memorystatus_list_mlock);
1398
1399 next_freeze_node = next_memorystatus_node;
1400
1401 while (next_freeze_node) {
1402 memorystatus_node *node;
1403 pid_t aPid;
1404 uint32_t state;
1405
1406 node = next_freeze_node;
1407 next_freeze_node = TAILQ_NEXT(next_freeze_node, link);
1408
1409 aPid = node->pid;
1410 state = node->state;
1411
1412 /* skip empty slots in the list */
1413 if (aPid == 0) {
1414 continue; // with lock held
1415 }
1416
1417 /* Ensure the process is eligible for freezing */
1418 if ((state & (kProcessKilled | kProcessLocked | kProcessFrozen)) || !(state & kProcessSuspended)) {
1419 continue; // with lock held
1420 }
1421
1422 p = proc_find(aPid);
1423 if (p != NULL) {
1424 kern_return_t kr;
1425 uint32_t purgeable, wired, clean, dirty;
1426 boolean_t shared;
1427 uint32_t max_pages = 0;
1428
1429 /* Only freeze processes meeting our minimum resident page criteria */
1430 if (memorystatus_task_page_count(p->task) < memorystatus_freeze_pages_min) {
1431 proc_rele(p);
1432 continue;
1433 }
1434
1435 /* Ensure there's enough free space to freeze this process. */
1436 max_pages = MIN(default_pager_swap_pages_free(), memorystatus_freeze_pages_max);
1437 if (max_pages < memorystatus_freeze_pages_min) {
1438 *memorystatus_freeze_swap_low = TRUE;
1439 proc_rele(p);
1440 lck_mtx_unlock(memorystatus_list_mlock);
1441 return 0;
1442 }
1443
1444 /* Mark as locked temporarily to avoid kill */
1445 node->state |= kProcessLocked;
1446
1447 kr = task_freeze(p->task, &purgeable, &wired, &clean, &dirty, max_pages, &shared, FALSE);
1448
1449 MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_top_proc: task_freeze %s for pid %d [%s] - "
1450 "memorystatus_pages: %d, purgeable: %d, wired: %d, clean: %d, dirty: %d, shared %d, free swap: %d\n",
1451 (kr == KERN_SUCCESS) ? "SUCCEEDED" : "FAILED", aPid, (p->p_comm ? p->p_comm : "(unknown)"),
1452 memorystatus_available_pages, purgeable, wired, clean, dirty, shared, default_pager_swap_pages_free());
1453
1454 proc_rele(p);
1455
1456 node->state &= ~kProcessLocked;
1457
1458 if (KERN_SUCCESS == kr) {
1459 memorystatus_freeze_entry_t data = { aPid, kMemorystatusFlagsFrozen, dirty };
1460
1461 memorystatus_frozen_count++;
1462
1463 node->state |= (kProcessFrozen | (shared ? 0: kProcessNoReclaimWorth));
1464
1465 /* Update stats */
1466 for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
1467 throttle_intervals[i].pageouts += dirty;
1468 }
1469
1470 memorystatus_freeze_pageouts += dirty;
1471 memorystatus_freeze_count++;
1472
1473 lck_mtx_unlock(memorystatus_list_mlock);
1474
1475 memorystatus_send_note(kMemorystatusFreezeNote, &data, sizeof(data));
1476
1477 return dirty;
1478 }
1479
1480 /* Failed; go round again */
1481 }
1482 }
1483
1484 lck_mtx_unlock(memorystatus_list_mlock);
1485
1486 return -1;
1487 }
1488
1489 static inline boolean_t
1490 memorystatus_can_freeze_processes(void)
1491 {
1492 boolean_t ret;
1493
1494 lck_mtx_lock(memorystatus_list_mlock);
1495
1496 if (memorystatus_suspended_count) {
1497 uint32_t average_resident_pages, estimated_processes;
1498
1499 /* Estimate the number of suspended processes we can fit */
1500 average_resident_pages = memorystatus_suspended_resident_count / memorystatus_suspended_count;
1501 estimated_processes = memorystatus_suspended_count +
1502 ((memorystatus_available_pages - memorystatus_available_pages_critical) / average_resident_pages);
1503
1504 /* If it's predicted that no freeze will occur, lower the threshold temporarily */
1505 if (estimated_processes <= FREEZE_SUSPENDED_THRESHOLD_DEFAULT) {
1506 memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_LOW;
1507 } else {
1508 memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT;
1509 }
1510
1511 MEMORYSTATUS_DEBUG(1, "memorystatus_can_freeze_processes: %d suspended processes, %d average resident pages / process, %d suspended processes estimated\n",
1512 memorystatus_suspended_count, average_resident_pages, estimated_processes);
1513
1514 if ((memorystatus_suspended_count - memorystatus_frozen_count) > memorystatus_freeze_suspended_threshold) {
1515 ret = TRUE;
1516 } else {
1517 ret = FALSE;
1518 }
1519 } else {
1520 ret = FALSE;
1521 }
1522
1523 lck_mtx_unlock(memorystatus_list_mlock);
1524
1525 return ret;
1526 }
1527
1528 static boolean_t
1529 memorystatus_can_freeze(boolean_t *memorystatus_freeze_swap_low)
1530 {
1531 /* Only freeze if we're sufficiently low on memory; this holds off freeze right
1532 after boot, and is generally is a no-op once we've reached steady state. */
1533 if (memorystatus_available_pages > memorystatus_freeze_threshold) {
1534 return FALSE;
1535 }
1536
1537 /* Check minimum suspended process threshold. */
1538 if (!memorystatus_can_freeze_processes()) {
1539 return FALSE;
1540 }
1541
1542 /* Is swap running low? */
1543 if (*memorystatus_freeze_swap_low) {
1544 /* If there's been no movement in free swap pages since we last attempted freeze, return. */
1545 if (default_pager_swap_pages_free() < memorystatus_freeze_pages_min) {
1546 return FALSE;
1547 }
1548
1549 /* Pages have been freed - we can retry. */
1550 *memorystatus_freeze_swap_low = FALSE;
1551 }
1552
1553 /* OK */
1554 return TRUE;
1555 }
1556
1557 static void
1558 memorystatus_freeze_update_throttle_interval(mach_timespec_t *ts, struct throttle_interval_t *interval)
1559 {
1560 if (CMP_MACH_TIMESPEC(ts, &interval->ts) >= 0) {
1561 if (!interval->max_pageouts) {
1562 interval->max_pageouts = (interval->burst_multiple * (((uint64_t)interval->mins * FREEZE_DAILY_PAGEOUTS_MAX) / (24 * 60)));
1563 } else {
1564 printf("memorystatus_freeze_update_throttle_interval: %d minute throttle timeout, resetting\n", interval->mins);
1565 }
1566 interval->ts.tv_sec = interval->mins * 60;
1567 interval->ts.tv_nsec = 0;
1568 ADD_MACH_TIMESPEC(&interval->ts, ts);
1569 /* Since we update the throttle stats pre-freeze, adjust for overshoot here */
1570 if (interval->pageouts > interval->max_pageouts) {
1571 interval->pageouts -= interval->max_pageouts;
1572 } else {
1573 interval->pageouts = 0;
1574 }
1575 interval->throttle = FALSE;
1576 } else if (!interval->throttle && interval->pageouts >= interval->max_pageouts) {
1577 printf("memorystatus_freeze_update_throttle_interval: %d minute pageout limit exceeded; enabling throttle\n", interval->mins);
1578 interval->throttle = TRUE;
1579 }
1580
1581 MEMORYSTATUS_DEBUG(1, "memorystatus_freeze_update_throttle_interval: throttle updated - %d frozen (%d max) within %dm; %dm remaining; throttle %s\n",
1582 interval->pageouts, interval->max_pageouts, interval->mins, (interval->ts.tv_sec - ts->tv_sec) / 60,
1583 interval->throttle ? "on" : "off");
1584 }
1585
1586 static boolean_t
1587 memorystatus_freeze_update_throttle(void)
1588 {
1589 clock_sec_t sec;
1590 clock_nsec_t nsec;
1591 mach_timespec_t ts;
1592 uint32_t i;
1593 boolean_t throttled = FALSE;
1594
1595 #if DEVELOPMENT || DEBUG
1596 if (!memorystatus_freeze_throttle_enabled)
1597 return FALSE;
1598 #endif
1599
1600 clock_get_system_nanotime(&sec, &nsec);
1601 ts.tv_sec = sec;
1602 ts.tv_nsec = nsec;
1603
1604 /* Check freeze pageouts over multiple intervals and throttle if we've exceeded our budget.
1605 *
1606 * This ensures that periods of inactivity can't be used as 'credit' towards freeze if the device has
1607 * remained dormant for a long period. We do, however, allow increased thresholds for shorter intervals in
1608 * order to allow for bursts of activity.
1609 */
1610 for (i = 0; i < sizeof(throttle_intervals) / sizeof(struct throttle_interval_t); i++) {
1611 memorystatus_freeze_update_throttle_interval(&ts, &throttle_intervals[i]);
1612 if (throttle_intervals[i].throttle == TRUE)
1613 throttled = TRUE;
1614 }
1615
1616 return throttled;
1617 }
1618
1619 static void
1620 memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused)
1621 {
1622 static boolean_t memorystatus_freeze_swap_low = FALSE;
1623
1624 if (memorystatus_freeze_enabled) {
1625 if (memorystatus_can_freeze(&memorystatus_freeze_swap_low)) {
1626 /* Only freeze if we've not exceeded our pageout budgets */
1627 if (!memorystatus_freeze_update_throttle()) {
1628 memorystatus_freeze_top_proc(&memorystatus_freeze_swap_low);
1629 } else {
1630 printf("memorystatus_freeze_thread: in throttle, ignoring freeze\n");
1631 memorystatus_freeze_throttle_count++; /* Throttled, update stats */
1632 }
1633 }
1634 }
1635
1636 assert_wait((event_t) &memorystatus_freeze_wakeup, THREAD_UNINT);
1637 thread_block((thread_continue_t) memorystatus_freeze_thread);
1638 }
1639
1640 #endif /* CONFIG_FREEZE */
1641
1642 #if CONFIG_JETSAM
1643
1644 #if VM_PRESSURE_EVENTS
1645
1646 static inline boolean_t
1647 memorystatus_get_pressure_locked(void) {
1648 if (memorystatus_available_pages > memorystatus_available_pages_pressure) {
1649 /* Too many free pages */
1650 return kVMPressureNormal;
1651 }
1652
1653 #if CONFIG_FREEZE
1654 if (memorystatus_frozen_count > 0) {
1655 /* Frozen processes exist */
1656 return kVMPressureNormal;
1657 }
1658 #endif
1659
1660 if (memorystatus_suspended_count > MEMORYSTATUS_SUSPENDED_THRESHOLD) {
1661 /* Too many supended processes */
1662 return kVMPressureNormal;
1663 }
1664
1665 if (memorystatus_suspended_count > 0) {
1666 /* Some suspended processes - warn */
1667 return kVMPressureWarning;
1668 }
1669
1670 /* Otherwise, pressure level is urgent */
1671 return kVMPressureUrgent;
1672 }
1673
1674 pid_t
1675 memorystatus_request_vm_pressure_candidate(void) {
1676 memorystatus_node *node;
1677 pid_t pid = -1;
1678
1679 lck_mtx_lock(memorystatus_list_mlock);
1680
1681 /* Are we in a low memory state? */
1682 memorystatus_vm_pressure_level = memorystatus_get_pressure_locked();
1683 if (kVMPressureNormal != memorystatus_vm_pressure_level) {
1684 TAILQ_FOREACH(node, &memorystatus_list, link) {
1685 /* Skip ineligible processes */
1686 if (node->state & (kProcessKilled | kProcessLocked | kProcessSuspended | kProcessFrozen | kProcessNotifiedForPressure)) {
1687 continue;
1688 }
1689 node->state |= kProcessNotifiedForPressure;
1690 pid = node->pid;
1691 break;
1692 }
1693 }
1694
1695 lck_mtx_unlock(memorystatus_list_mlock);
1696
1697 return pid;
1698 }
1699
1700 void
1701 memorystatus_send_pressure_note(pid_t pid) {
1702 memorystatus_send_note(kMemorystatusPressureNote, &pid, sizeof(pid));
1703 }
1704
1705 static void
1706 memorystatus_check_pressure_reset() {
1707 lck_mtx_lock(memorystatus_list_mlock);
1708
1709 if (kVMPressureNormal != memorystatus_vm_pressure_level) {
1710 memorystatus_vm_pressure_level = memorystatus_get_pressure_locked();
1711 if (kVMPressureNormal == memorystatus_vm_pressure_level) {
1712 memorystatus_node *node;
1713 TAILQ_FOREACH(node, &memorystatus_list, link) {
1714 node->state &= ~kProcessNotifiedForPressure;
1715 }
1716 }
1717 }
1718
1719 lck_mtx_unlock(memorystatus_list_mlock);
1720 }
1721
1722 #endif /* VM_PRESSURE_EVENTS */
1723
1724 /* Sysctls... */
1725
1726 static int
1727 sysctl_memorystatus_list_change SYSCTL_HANDLER_ARGS
1728 {
1729 int ret;
1730 memorystatus_priority_entry_t entry;
1731
1732 #pragma unused(oidp, arg1, arg2)
1733
1734 if (!req->newptr || req->newlen > sizeof(entry)) {
1735 return EINVAL;
1736 }
1737
1738 ret = SYSCTL_IN(req, &entry, req->newlen);
1739 if (ret) {
1740 return ret;
1741 }
1742
1743 memorystatus_list_change(FALSE, entry.pid, entry.priority, entry.flags, -1);
1744
1745 return ret;
1746 }
1747
1748 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_jetsam_change, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
1749 0, 0, &sysctl_memorystatus_list_change, "I", "");
1750
1751 static int
1752 sysctl_memorystatus_priority_list(__unused struct sysctl_oid *oid, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1753 {
1754 int ret;
1755 size_t allocated_size, list_size = 0;
1756 memorystatus_priority_entry_t *list;
1757 uint32_t list_count, i = 0;
1758 memorystatus_node *node;
1759
1760 /* Races, but this is only for diagnostic purposes */
1761 list_count = memorystatus_list_count;
1762 allocated_size = sizeof(memorystatus_priority_entry_t) * list_count;
1763 list = kalloc(allocated_size);
1764 if (!list) {
1765 return ENOMEM;
1766 }
1767
1768 memset(list, 0, allocated_size);
1769
1770 lck_mtx_lock(memorystatus_list_mlock);
1771
1772 TAILQ_FOREACH(node, &memorystatus_list, link) {
1773 list[i].pid = node->pid;
1774 list[i].priority = node->priority;
1775 list[i].flags = memorystatus_build_flags_from_state(node->state);
1776 list[i].hiwat_pages = node->hiwat_pages;
1777 list_size += sizeof(memorystatus_priority_entry_t);
1778 if (++i >= list_count) {
1779 break;
1780 }
1781 }
1782
1783 lck_mtx_unlock(memorystatus_list_mlock);
1784
1785 if (!list_size) {
1786 if (req->oldptr) {
1787 MEMORYSTATUS_DEBUG(1, "kern.memorystatus_priority_list returning EINVAL\n");
1788 return EINVAL;
1789 }
1790 else {
1791 MEMORYSTATUS_DEBUG(1, "kern.memorystatus_priority_list returning 0 for size\n");
1792 }
1793 } else {
1794 MEMORYSTATUS_DEBUG(1, "kern.memorystatus_priority_list returning %ld for size\n", (long)list_size);
1795 }
1796
1797 ret = SYSCTL_OUT(req, list, list_size);
1798
1799 kfree(list, allocated_size);
1800
1801 return ret;
1802 }
1803
1804 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_priority_list, CTLTYPE_OPAQUE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_memorystatus_priority_list, "S,jetsam_priorities", "");
1805
1806 static void
1807 memorystatus_update_levels_locked(void) {
1808 /* Set the baseline levels in pages */
1809 memorystatus_available_pages_critical = (CRITICAL_PERCENT / DELTA_PERCENT) * memorystatus_delta;
1810 memorystatus_available_pages_highwater = (HIGHWATER_PERCENT / DELTA_PERCENT) * memorystatus_delta;
1811 #if VM_PRESSURE_EVENTS
1812 memorystatus_available_pages_pressure = (PRESSURE_PERCENT / DELTA_PERCENT) * memorystatus_delta;
1813 #endif
1814
1815 #if DEBUG || DEVELOPMENT
1816 if (memorystatus_jetsam_policy & kPolicyDiagnoseActive) {
1817 memorystatus_available_pages_critical += memorystatus_jetsam_policy_offset_pages_diagnostic;
1818 memorystatus_available_pages_highwater += memorystatus_jetsam_policy_offset_pages_diagnostic;
1819 #if VM_PRESSURE_EVENTS
1820 memorystatus_available_pages_pressure += memorystatus_jetsam_policy_offset_pages_diagnostic;
1821 #endif
1822 }
1823 #endif
1824
1825 /* Only boost the critical level - it's more important to kill right away than issue warnings */
1826 if (memorystatus_jetsam_policy & kPolicyMoreFree) {
1827 memorystatus_available_pages_critical += memorystatus_jetsam_policy_offset_pages_more_free;
1828 }
1829 }
1830
1831 static int
1832 sysctl_memorystatus_jetsam_policy_more_free SYSCTL_HANDLER_ARGS
1833 {
1834 #pragma unused(arg1, arg2, oidp)
1835 int error, more_free = 0;
1836
1837 error = priv_check_cred(kauth_cred_get(), PRIV_VM_JETSAM, 0);
1838 if (error)
1839 return (error);
1840
1841 error = sysctl_handle_int(oidp, &more_free, 0, req);
1842 if (error || !req->newptr)
1843 return (error);
1844
1845 lck_mtx_lock(memorystatus_list_mlock);
1846
1847 if (more_free) {
1848 memorystatus_jetsam_policy |= kPolicyMoreFree;
1849 } else {
1850 memorystatus_jetsam_policy &= ~kPolicyMoreFree;
1851 }
1852
1853 memorystatus_update_levels_locked();
1854
1855 lck_mtx_unlock(memorystatus_list_mlock);
1856
1857 return 0;
1858 }
1859
1860 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_jetsam_policy_more_free, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED|CTLFLAG_ANYBODY,
1861 0, 0, &sysctl_memorystatus_jetsam_policy_more_free, "I", "");
1862
1863 static int
1864 sysctl_handle_memorystatus_snapshot(__unused struct sysctl_oid *oid, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1865 {
1866 int ret;
1867 size_t currentsize = 0;
1868
1869 if (memorystatus_jetsam_snapshot_list_count > 0) {
1870 currentsize = sizeof(memorystatus_jetsam_snapshot_t) + sizeof(memorystatus_jetsam_snapshot_entry_t) * (memorystatus_jetsam_snapshot_list_count - 1);
1871 }
1872 if (!currentsize) {
1873 if (req->oldptr) {
1874 MEMORYSTATUS_DEBUG(1, "kern.memorystatus_snapshot returning EINVAL\n");
1875 return EINVAL;
1876 }
1877 else {
1878 MEMORYSTATUS_DEBUG(1, "kern.memorystatus_snapshot returning 0 for size\n");
1879 }
1880 } else {
1881 MEMORYSTATUS_DEBUG(1, "kern.memorystatus_snapshot returning %ld for size\n", (long)currentsize);
1882 }
1883 ret = SYSCTL_OUT(req, &memorystatus_jetsam_snapshot, currentsize);
1884 if (!ret && req->oldptr) {
1885 memorystatus_jetsam_snapshot.entry_count = memorystatus_jetsam_snapshot_list_count = 0;
1886 }
1887 return ret;
1888 }
1889
1890 SYSCTL_PROC(_kern, OID_AUTO, memorystatus_snapshot, CTLTYPE_OPAQUE|CTLFLAG_RD, 0, 0, sysctl_handle_memorystatus_snapshot, "S,memorystatus_snapshot", "");
1891
1892 #endif /* CONFIG_JETSAM */