]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/task.c
133f6b718e1c119984a3792ea97fa4239ecb71eb
[apple/xnu.git] / osfmk / kern / task.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63 /*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
81 /*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
88
89 #include <mach/mach_types.h>
90 #include <mach/boolean.h>
91 #include <mach/host_priv.h>
92 #include <mach/machine/vm_types.h>
93 #include <mach/vm_param.h>
94 #include <mach/mach_vm.h>
95 #include <mach/semaphore.h>
96 #include <mach/task_info.h>
97 #include <mach/task_special_ports.h>
98 #include <mach/sdt.h>
99
100 #include <ipc/ipc_importance.h>
101 #include <ipc/ipc_types.h>
102 #include <ipc/ipc_space.h>
103 #include <ipc/ipc_entry.h>
104 #include <ipc/ipc_hash.h>
105
106 #include <kern/kern_types.h>
107 #include <kern/mach_param.h>
108 #include <kern/misc_protos.h>
109 #include <kern/task.h>
110 #include <kern/thread.h>
111 #include <kern/coalition.h>
112 #include <kern/zalloc.h>
113 #include <kern/kalloc.h>
114 #include <kern/kern_cdata.h>
115 #include <kern/processor.h>
116 #include <kern/sched_prim.h> /* for thread_wakeup */
117 #include <kern/ipc_tt.h>
118 #include <kern/host.h>
119 #include <kern/clock.h>
120 #include <kern/timer.h>
121 #include <kern/assert.h>
122 #include <kern/sync_lock.h>
123 #include <kern/affinity.h>
124 #include <kern/exc_resource.h>
125 #include <kern/machine.h>
126 #include <kern/policy_internal.h>
127
128 #include <corpses/task_corpse.h>
129 #if CONFIG_TELEMETRY
130 #include <kern/telemetry.h>
131 #endif
132
133 #include <vm/pmap.h>
134 #include <vm/vm_map.h>
135 #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
136 #include <vm/vm_pageout.h>
137 #include <vm/vm_protos.h>
138 #include <vm/vm_purgeable_internal.h>
139
140 #include <sys/resource.h>
141 #include <sys/signalvar.h> /* for coredump */
142
143 /*
144 * Exported interfaces
145 */
146
147 #include <mach/task_server.h>
148 #include <mach/mach_host_server.h>
149 #include <mach/host_security_server.h>
150 #include <mach/mach_port_server.h>
151
152 #include <vm/vm_shared_region.h>
153
154 #include <libkern/OSDebug.h>
155 #include <libkern/OSAtomic.h>
156
157 #if CONFIG_ATM
158 #include <atm/atm_internal.h>
159 #endif
160
161 #include <kern/sfi.h> /* picks up ledger.h */
162
163 #if CONFIG_MACF
164 #include <security/mac_mach_internal.h>
165 #endif
166
167 #if KPERF
168 extern int kpc_force_all_ctrs(task_t, int);
169 #endif
170
171 task_t kernel_task;
172 zone_t task_zone;
173 lck_attr_t task_lck_attr;
174 lck_grp_t task_lck_grp;
175 lck_grp_attr_t task_lck_grp_attr;
176
177 extern int exc_via_corpse_forking;
178 extern int unify_corpse_blob_alloc;
179 extern int corpse_for_fatal_memkill;
180
181 /* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
182 int audio_active = 0;
183
184 zinfo_usage_store_t tasks_tkm_private;
185 zinfo_usage_store_t tasks_tkm_shared;
186
187 /* A container to accumulate statistics for expired tasks */
188 expired_task_statistics_t dead_task_statistics;
189 lck_spin_t dead_task_statistics_lock;
190
191 ledger_template_t task_ledger_template = NULL;
192
193 struct _task_ledger_indices task_ledgers __attribute__((used)) =
194 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
195 { 0 /* initialized at runtime */},
196 #ifdef CONFIG_BANK
197 -1, -1,
198 #endif
199 -1, -1,
200 };
201
202 /* System sleep state */
203 boolean_t tasks_suspend_state;
204
205
206 void init_task_ledgers(void);
207 void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
208 void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
209 void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
210 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
211 void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal);
212 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
213
214 kern_return_t task_suspend_internal(task_t);
215 kern_return_t task_resume_internal(task_t);
216 static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
217 int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
218
219 extern kern_return_t iokit_task_terminate(task_t task);
220
221 extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
222 extern void bsd_copythreadname(void *dst_uth, void *src_uth);
223 extern kern_return_t thread_resume(thread_t thread);
224
225 // Warn tasks when they hit 80% of their memory limit.
226 #define PHYS_FOOTPRINT_WARNING_LEVEL 80
227
228 #define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
229 #define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
230
231 /*
232 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
233 *
234 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
235 * stacktraces, aka micro-stackshots)
236 */
237 #define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
238
239 int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
240 int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
241
242 int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
243
244 int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */
245
246 ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
247 int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
248 int max_task_footprint_mb = 0; /* Per-task limit on physical memory consumption in megabytes */
249
250 /* I/O Monitor Limits */
251 #define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
252 #define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
253
254 uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
255 uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
256
257 #define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
258 int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
259 int64_t global_logical_writes_count = 0; /* Global count for logical writes */
260 static boolean_t global_update_logical_writes(int64_t);
261
262 #if MACH_ASSERT
263 int pmap_ledgers_panic = 1;
264 #endif /* MACH_ASSERT */
265
266 int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
267
268 #if CONFIG_COREDUMP
269 int hwm_user_cores = 0; /* high watermark violations generate user core files */
270 #endif
271
272 #ifdef MACH_BSD
273 extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
274 extern int proc_pid(struct proc *p);
275 extern int proc_selfpid(void);
276 extern char *proc_name_address(struct proc *p);
277 extern uint64_t get_dispatchqueue_offset_from_proc(void *);
278
279 #if CONFIG_MEMORYSTATUS
280 extern void proc_memstat_terminated(struct proc* p, boolean_t set);
281 extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
282 extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
283 #endif /* CONFIG_MEMORYSTATUS */
284
285 #endif /* MACH_BSD */
286
287 /* Forwards */
288
289 static void task_hold_locked(task_t task);
290 static void task_wait_locked(task_t task, boolean_t until_not_runnable);
291 static void task_release_locked(task_t task);
292
293 static void task_synchronizer_destroy_all(task_t task);
294
295 void
296 task_backing_store_privileged(
297 task_t task)
298 {
299 task_lock(task);
300 task->priv_flags |= VM_BACKING_STORE_PRIV;
301 task_unlock(task);
302 return;
303 }
304
305
306 void
307 task_set_64bit(
308 task_t task,
309 boolean_t is64bit)
310 {
311 #if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
312 thread_t thread;
313 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
314
315 task_lock(task);
316
317 if (is64bit) {
318 if (task_has_64BitAddr(task))
319 goto out;
320 task_set_64BitAddr(task);
321 } else {
322 if ( !task_has_64BitAddr(task))
323 goto out;
324 task_clear_64BitAddr(task);
325 }
326 /* FIXME: On x86, the thread save state flavor can diverge from the
327 * task's 64-bit feature flag due to the 32-bit/64-bit register save
328 * state dichotomy. Since we can be pre-empted in this interval,
329 * certain routines may observe the thread as being in an inconsistent
330 * state with respect to its task's 64-bitness.
331 */
332
333 #if defined(__x86_64__) || defined(__arm64__)
334 queue_iterate(&task->threads, thread, thread_t, task_threads) {
335 thread_mtx_lock(thread);
336 machine_thread_switch_addrmode(thread);
337 thread_mtx_unlock(thread);
338
339 if (thread == current_thread()) {
340 uint64_t arg1, arg2;
341 int urgency;
342 spl_t spl = splsched();
343 /*
344 * This call tell that the current thread changed it's 32bitness.
345 * Other thread were no more on core when 32bitness was changed,
346 * but current_thread() is on core and the previous call to
347 * machine_thread_going_on_core() gave 32bitness which is now wrong.
348 *
349 * This is needed for bring-up, a different callback should be used
350 * in the future.
351 */
352 thread_lock(thread);
353 urgency = thread_get_urgency(thread, &arg1, &arg2);
354 machine_thread_going_on_core(thread, urgency, 0, 0);
355 thread_unlock(thread);
356 splx(spl);
357 }
358 }
359 #endif /* defined(__x86_64__) || defined(__arm64__) */
360
361 out:
362 task_unlock(task);
363 }
364
365
366 void
367 task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
368 {
369 task_lock(task);
370 task->all_image_info_addr = addr;
371 task->all_image_info_size = size;
372 task_unlock(task);
373 }
374
375 void
376 task_atm_reset(__unused task_t task) {
377
378 #if CONFIG_ATM
379 if (task->atm_context != NULL) {
380 atm_task_descriptor_destroy(task->atm_context);
381 task->atm_context = NULL;
382 }
383 #endif
384
385 }
386
387 void
388 task_bank_reset(__unused task_t task) {
389
390 #if CONFIG_BANK
391 if (task->bank_context != NULL) {
392 bank_task_destroy(task);
393 }
394 #endif
395
396 }
397
398 /*
399 * NOTE: This should only be called when the P_LINTRANSIT
400 * flag is set (the proc_trans lock is held) on the
401 * proc associated with the task.
402 */
403 void
404 task_bank_init(__unused task_t task) {
405
406 #if CONFIG_BANK
407 if (task->bank_context != NULL) {
408 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
409 }
410 bank_task_initialize(task);
411 #endif
412
413 }
414
415 void
416 task_set_did_exec_flag(task_t task)
417 {
418 task->t_procflags |= TPF_DID_EXEC;
419 }
420
421 void
422 task_clear_exec_copy_flag(task_t task)
423 {
424 task->t_procflags &= ~TPF_EXEC_COPY;
425 }
426
427 /*
428 * This wait event is t_procflags instead of t_flags because t_flags is volatile
429 *
430 * TODO: store the flags in the same place as the event
431 * rdar://problem/28501994
432 */
433 event_t
434 task_get_return_wait_event(task_t task)
435 {
436 return (event_t)&task->t_procflags;
437 }
438
439 void
440 task_clear_return_wait(task_t task)
441 {
442 task_lock(task);
443
444 task->t_flags &= ~TF_LRETURNWAIT;
445
446 if (task->t_flags & TF_LRETURNWAITER) {
447 thread_wakeup(task_get_return_wait_event(task));
448 task->t_flags &= ~TF_LRETURNWAITER;
449 }
450
451 task_unlock(task);
452 }
453
454 void
455 task_wait_to_return(void)
456 {
457 task_t task;
458
459 task = current_task();
460 task_lock(task);
461
462 if (task->t_flags & TF_LRETURNWAIT) {
463 do {
464 task->t_flags |= TF_LRETURNWAITER;
465 assert_wait(task_get_return_wait_event(task), THREAD_UNINT);
466 task_unlock(task);
467
468 thread_block(THREAD_CONTINUE_NULL);
469
470 task_lock(task);
471 } while (task->t_flags & TF_LRETURNWAIT);
472 }
473
474 task_unlock(task);
475
476 thread_bootstrap_return();
477 }
478
479 boolean_t
480 task_is_exec_copy(task_t task)
481 {
482 return task_is_exec_copy_internal(task);
483 }
484
485 boolean_t
486 task_did_exec(task_t task)
487 {
488 return task_did_exec_internal(task);
489 }
490
491 boolean_t
492 task_is_active(task_t task)
493 {
494 return task->active;
495 }
496
497 #if TASK_REFERENCE_LEAK_DEBUG
498 #include <kern/btlog.h>
499
500 static btlog_t *task_ref_btlog;
501 #define TASK_REF_OP_INCR 0x1
502 #define TASK_REF_OP_DECR 0x2
503
504 #define TASK_REF_NUM_RECORDS 100000
505 #define TASK_REF_BTDEPTH 7
506
507 void
508 task_reference_internal(task_t task)
509 {
510 void * bt[TASK_REF_BTDEPTH];
511 int numsaved = 0;
512
513 numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
514
515 (void)hw_atomic_add(&(task)->ref_count, 1);
516 btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR,
517 bt, numsaved);
518 }
519
520 uint32_t
521 task_deallocate_internal(task_t task)
522 {
523 void * bt[TASK_REF_BTDEPTH];
524 int numsaved = 0;
525
526 numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
527
528 btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR,
529 bt, numsaved);
530 return hw_atomic_sub(&(task)->ref_count, 1);
531 }
532
533 #endif /* TASK_REFERENCE_LEAK_DEBUG */
534
535 void
536 task_init(void)
537 {
538
539 lck_grp_attr_setdefault(&task_lck_grp_attr);
540 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
541 lck_attr_setdefault(&task_lck_attr);
542 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
543 lck_mtx_init(&tasks_corpse_lock, &task_lck_grp, &task_lck_attr);
544
545 task_zone = zinit(
546 sizeof(struct task),
547 task_max * sizeof(struct task),
548 TASK_CHUNK * sizeof(struct task),
549 "tasks");
550
551 zone_change(task_zone, Z_NOENCRYPT, TRUE);
552
553
554 /*
555 * Configure per-task memory limit.
556 * The boot-arg is interpreted as Megabytes,
557 * and takes precedence over the device tree.
558 * Setting the boot-arg to 0 disables task limits.
559 */
560 if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb,
561 sizeof (max_task_footprint_mb))) {
562 /*
563 * No limit was found in boot-args, so go look in the device tree.
564 */
565 if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb,
566 sizeof(max_task_footprint_mb))) {
567 /*
568 * No limit was found in device tree.
569 */
570 max_task_footprint_mb = 0;
571 }
572 }
573
574 if (max_task_footprint_mb != 0) {
575 #if CONFIG_MEMORYSTATUS
576 if (max_task_footprint_mb < 50) {
577 printf("Warning: max_task_pmem %d below minimum.\n",
578 max_task_footprint_mb);
579 max_task_footprint_mb = 50;
580 }
581 printf("Limiting task physical memory footprint to %d MB\n",
582 max_task_footprint_mb);
583
584 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
585
586 /*
587 * Configure the per-task memory limit warning level.
588 * This is computed as a percentage.
589 */
590 max_task_footprint_warning_level = 0;
591
592 if (max_mem < 0x40000000) {
593 /*
594 * On devices with < 1GB of memory:
595 * -- set warnings to 50MB below the per-task limit.
596 */
597 if (max_task_footprint_mb > 50) {
598 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
599 }
600 } else {
601 /*
602 * On devices with >= 1GB of memory:
603 * -- set warnings to 100MB below the per-task limit.
604 */
605 if (max_task_footprint_mb > 100) {
606 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
607 }
608 }
609
610 /*
611 * Never allow warning level to land below the default.
612 */
613 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
614 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
615 }
616
617 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
618
619 #else
620 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
621 #endif /* CONFIG_MEMORYSTATUS */
622 }
623
624 #if MACH_ASSERT
625 PE_parse_boot_argn("pmap_ledgers_panic", &pmap_ledgers_panic,
626 sizeof (pmap_ledgers_panic));
627 #endif /* MACH_ASSERT */
628
629 #if CONFIG_COREDUMP
630 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
631 sizeof (hwm_user_cores))) {
632 hwm_user_cores = 0;
633 }
634 #endif
635
636 proc_init_cpumon_params();
637
638 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof (task_wakeups_monitor_rate))) {
639 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
640 }
641
642 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof (task_wakeups_monitor_interval))) {
643 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
644 }
645
646 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
647 sizeof (task_wakeups_monitor_ustackshots_trigger_pct))) {
648 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
649 }
650
651 if (!PE_parse_boot_argn("disable_exc_resource", &disable_exc_resource,
652 sizeof (disable_exc_resource))) {
653 disable_exc_resource = 0;
654 }
655
656 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof (task_iomon_limit_mb))) {
657 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
658 }
659
660 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof (task_iomon_interval_secs))) {
661 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
662 }
663
664 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof (io_telemetry_limit))) {
665 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
666 }
667
668 /*
669 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
670 * sets up the ledgers for the default coalition. If we don't have coalitions,
671 * then we have to call it now.
672 */
673 #if CONFIG_COALITIONS
674 assert(task_ledger_template);
675 #else /* CONFIG_COALITIONS */
676 init_task_ledgers();
677 #endif /* CONFIG_COALITIONS */
678
679 #if TASK_REFERENCE_LEAK_DEBUG
680 task_ref_btlog = btlog_create(TASK_REF_NUM_RECORDS, TASK_REF_BTDEPTH, TRUE /* caller_will_remove_entries_for_element? */);
681 assert(task_ref_btlog);
682 #endif
683
684 /*
685 * Create the kernel task as the first task.
686 */
687 #ifdef __LP64__
688 if (task_create_internal(TASK_NULL, NULL, FALSE, TRUE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS)
689 #else
690 if (task_create_internal(TASK_NULL, NULL, FALSE, FALSE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS)
691 #endif
692 panic("task_init\n");
693
694 vm_map_deallocate(kernel_task->map);
695 kernel_task->map = kernel_map;
696 lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
697 }
698
699 /*
700 * Create a task running in the kernel address space. It may
701 * have its own map of size mem_size and may have ipc privileges.
702 */
703 kern_return_t
704 kernel_task_create(
705 __unused task_t parent_task,
706 __unused vm_offset_t map_base,
707 __unused vm_size_t map_size,
708 __unused task_t *child_task)
709 {
710 return (KERN_INVALID_ARGUMENT);
711 }
712
713 kern_return_t
714 task_create(
715 task_t parent_task,
716 __unused ledger_port_array_t ledger_ports,
717 __unused mach_msg_type_number_t num_ledger_ports,
718 __unused boolean_t inherit_memory,
719 __unused task_t *child_task) /* OUT */
720 {
721 if (parent_task == TASK_NULL)
722 return(KERN_INVALID_ARGUMENT);
723
724 /*
725 * No longer supported: too many calls assume that a task has a valid
726 * process attached.
727 */
728 return(KERN_FAILURE);
729 }
730
731 kern_return_t
732 host_security_create_task_token(
733 host_security_t host_security,
734 task_t parent_task,
735 __unused security_token_t sec_token,
736 __unused audit_token_t audit_token,
737 __unused host_priv_t host_priv,
738 __unused ledger_port_array_t ledger_ports,
739 __unused mach_msg_type_number_t num_ledger_ports,
740 __unused boolean_t inherit_memory,
741 __unused task_t *child_task) /* OUT */
742 {
743 if (parent_task == TASK_NULL)
744 return(KERN_INVALID_ARGUMENT);
745
746 if (host_security == HOST_NULL)
747 return(KERN_INVALID_SECURITY);
748
749 /*
750 * No longer supported.
751 */
752 return(KERN_FAILURE);
753 }
754
755 /*
756 * Task ledgers
757 * ------------
758 *
759 * phys_footprint
760 * Physical footprint: This is the sum of:
761 * + (internal - alternate_accounting)
762 * + (internal_compressed - alternate_accounting_compressed)
763 * + iokit_mapped
764 * + purgeable_nonvolatile
765 * + purgeable_nonvolatile_compressed
766 * + page_table
767 *
768 * internal
769 * The task's anonymous memory, which on iOS is always resident.
770 *
771 * internal_compressed
772 * Amount of this task's internal memory which is held by the compressor.
773 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
774 * and could be either decompressed back into memory, or paged out to storage, depending
775 * on our implementation.
776 *
777 * iokit_mapped
778 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
779 clean/dirty or internal/external state].
780 *
781 * alternate_accounting
782 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
783 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
784 * double counting.
785 */
786 void
787 init_task_ledgers(void)
788 {
789 ledger_template_t t;
790
791 assert(task_ledger_template == NULL);
792 assert(kernel_task == TASK_NULL);
793
794 #if MACH_ASSERT
795 PE_parse_boot_argn("pmap_ledgers_panic", &pmap_ledgers_panic,
796 sizeof (pmap_ledgers_panic));
797 #endif /* MACH_ASSERT */
798
799 if ((t = ledger_template_create("Per-task ledger")) == NULL)
800 panic("couldn't create task ledger template");
801
802 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
803 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
804 "physmem", "bytes");
805 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
806 "bytes");
807 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
808 "bytes");
809 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
810 "bytes");
811 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
812 "bytes");
813 task_ledgers.iokit_mapped = ledger_entry_add(t, "iokit_mapped", "mappings",
814 "bytes");
815 task_ledgers.alternate_accounting = ledger_entry_add(t, "alternate_accounting", "physmem",
816 "bytes");
817 task_ledgers.alternate_accounting_compressed = ledger_entry_add(t, "alternate_accounting_compressed", "physmem",
818 "bytes");
819 task_ledgers.page_table = ledger_entry_add(t, "page_table", "physmem",
820 "bytes");
821 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
822 "bytes");
823 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
824 "bytes");
825 task_ledgers.purgeable_volatile = ledger_entry_add(t, "purgeable_volatile", "physmem", "bytes");
826 task_ledgers.purgeable_nonvolatile = ledger_entry_add(t, "purgeable_nonvolatile", "physmem", "bytes");
827 task_ledgers.purgeable_volatile_compressed = ledger_entry_add(t, "purgeable_volatile_compress", "physmem", "bytes");
828 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add(t, "purgeable_nonvolatile_compress", "physmem", "bytes");
829 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
830 "count");
831 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
832 "count");
833
834 #if CONFIG_SCHED_SFI
835 sfi_class_id_t class_id, ledger_alias;
836 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
837 task_ledgers.sfi_wait_times[class_id] = -1;
838 }
839
840 /* don't account for UNSPECIFIED */
841 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
842 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
843 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
844 /* Check to see if alias has been registered yet */
845 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
846 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
847 } else {
848 /* Otherwise, initialize it first */
849 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
850 }
851 } else {
852 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
853 }
854
855 if (task_ledgers.sfi_wait_times[class_id] < 0) {
856 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
857 }
858 }
859
860 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID -1] != -1);
861 #endif /* CONFIG_SCHED_SFI */
862
863 #ifdef CONFIG_BANK
864 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
865 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
866 #endif
867 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
868 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
869
870 if ((task_ledgers.cpu_time < 0) ||
871 (task_ledgers.tkm_private < 0) ||
872 (task_ledgers.tkm_shared < 0) ||
873 (task_ledgers.phys_mem < 0) ||
874 (task_ledgers.wired_mem < 0) ||
875 (task_ledgers.internal < 0) ||
876 (task_ledgers.iokit_mapped < 0) ||
877 (task_ledgers.alternate_accounting < 0) ||
878 (task_ledgers.alternate_accounting_compressed < 0) ||
879 (task_ledgers.page_table < 0) ||
880 (task_ledgers.phys_footprint < 0) ||
881 (task_ledgers.internal_compressed < 0) ||
882 (task_ledgers.purgeable_volatile < 0) ||
883 (task_ledgers.purgeable_nonvolatile < 0) ||
884 (task_ledgers.purgeable_volatile_compressed < 0) ||
885 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
886 (task_ledgers.platform_idle_wakeups < 0) ||
887 (task_ledgers.interrupt_wakeups < 0) ||
888 #ifdef CONFIG_BANK
889 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
890 #endif
891 (task_ledgers.physical_writes < 0) ||
892 (task_ledgers.logical_writes < 0)
893 ) {
894 panic("couldn't create entries for task ledger template");
895 }
896
897 ledger_track_credit_only(t, task_ledgers.phys_footprint);
898 ledger_track_credit_only(t, task_ledgers.page_table);
899 ledger_track_credit_only(t, task_ledgers.internal);
900 ledger_track_credit_only(t, task_ledgers.internal_compressed);
901 ledger_track_credit_only(t, task_ledgers.iokit_mapped);
902 ledger_track_credit_only(t, task_ledgers.alternate_accounting);
903 ledger_track_credit_only(t, task_ledgers.alternate_accounting_compressed);
904 ledger_track_credit_only(t, task_ledgers.purgeable_volatile);
905 ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile);
906 ledger_track_credit_only(t, task_ledgers.purgeable_volatile_compressed);
907 ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile_compressed);
908
909 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
910 #if MACH_ASSERT
911 if (pmap_ledgers_panic) {
912 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
913 ledger_panic_on_negative(t, task_ledgers.page_table);
914 ledger_panic_on_negative(t, task_ledgers.internal);
915 ledger_panic_on_negative(t, task_ledgers.internal_compressed);
916 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
917 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
918 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
919 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
920 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
921 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
922 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
923 }
924 #endif /* MACH_ASSERT */
925
926 #if CONFIG_MEMORYSTATUS
927 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
928 #endif /* CONFIG_MEMORYSTATUS */
929
930 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
931 task_wakeups_rate_exceeded, NULL, NULL);
932 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
933 ledger_set_callback(t, task_ledgers.logical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL);
934 task_ledger_template = t;
935 }
936
937 kern_return_t
938 task_create_internal(
939 task_t parent_task,
940 coalition_t *parent_coalitions __unused,
941 boolean_t inherit_memory,
942 __unused boolean_t is_64bit,
943 uint32_t t_flags,
944 uint32_t t_procflags,
945 task_t *child_task) /* OUT */
946 {
947 task_t new_task;
948 vm_shared_region_t shared_region;
949 ledger_t ledger = NULL;
950
951 new_task = (task_t) zalloc(task_zone);
952
953 if (new_task == TASK_NULL)
954 return(KERN_RESOURCE_SHORTAGE);
955
956 /* one ref for just being alive; one for our caller */
957 new_task->ref_count = 2;
958
959 /* allocate with active entries */
960 assert(task_ledger_template != NULL);
961 if ((ledger = ledger_instantiate(task_ledger_template,
962 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
963 zfree(task_zone, new_task);
964 return(KERN_RESOURCE_SHORTAGE);
965 }
966
967 new_task->ledger = ledger;
968
969 #if defined(CONFIG_SCHED_MULTIQ)
970 new_task->sched_group = sched_group_create();
971 #endif
972
973 /* if inherit_memory is true, parent_task MUST not be NULL */
974 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory)
975 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
976 else
977 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
978 (vm_map_offset_t)(VM_MIN_ADDRESS),
979 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
980
981 /* Inherit memlock limit from parent */
982 if (parent_task)
983 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
984
985 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
986 queue_init(&new_task->threads);
987 new_task->suspend_count = 0;
988 new_task->thread_count = 0;
989 new_task->active_thread_count = 0;
990 new_task->user_stop_count = 0;
991 new_task->legacy_stop_count = 0;
992 new_task->active = TRUE;
993 new_task->halting = FALSE;
994 new_task->user_data = NULL;
995 new_task->priv_flags = 0;
996 new_task->t_flags = t_flags;
997 new_task->t_procflags = t_procflags;
998 new_task->importance = 0;
999 new_task->corpse_info_kernel = NULL;
1000 new_task->exec_token = 0;
1001
1002 #if CONFIG_ATM
1003 new_task->atm_context = NULL;
1004 #endif
1005 #if CONFIG_BANK
1006 new_task->bank_context = NULL;
1007 #endif
1008
1009 #ifdef MACH_BSD
1010 new_task->bsd_info = NULL;
1011 new_task->corpse_info = NULL;
1012 #endif /* MACH_BSD */
1013
1014 #if CONFIG_MACF
1015 new_task->crash_label = NULL;
1016 #endif
1017
1018 #if CONFIG_MEMORYSTATUS
1019 if (max_task_footprint != 0) {
1020 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1021 }
1022 #endif /* CONFIG_MEMORYSTATUS */
1023
1024 if (task_wakeups_monitor_rate != 0) {
1025 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1026 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1027 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1028 }
1029
1030 #if CONFIG_IO_ACCOUNTING
1031 uint32_t flags = IOMON_ENABLE;
1032 task_io_monitor_ctl(new_task, &flags);
1033 #endif /* CONFIG_IO_ACCOUNTING */
1034
1035 #if defined(__i386__) || defined(__x86_64__)
1036 new_task->i386_ldt = 0;
1037 #endif
1038
1039 new_task->task_debug = NULL;
1040
1041 #if DEVELOPMENT || DEBUG
1042 new_task->task_unnested = FALSE;
1043 new_task->task_disconnected_count = 0;
1044 #endif
1045 queue_init(&new_task->semaphore_list);
1046 new_task->semaphores_owned = 0;
1047
1048 ipc_task_init(new_task, parent_task);
1049
1050 new_task->vtimers = 0;
1051
1052 new_task->shared_region = NULL;
1053
1054 new_task->affinity_space = NULL;
1055
1056 new_task->t_chud = 0;
1057
1058 new_task->pidsuspended = FALSE;
1059 new_task->frozen = FALSE;
1060 new_task->changing_freeze_state = FALSE;
1061 new_task->rusage_cpu_flags = 0;
1062 new_task->rusage_cpu_percentage = 0;
1063 new_task->rusage_cpu_interval = 0;
1064 new_task->rusage_cpu_deadline = 0;
1065 new_task->rusage_cpu_callt = NULL;
1066 #if MACH_ASSERT
1067 new_task->suspends_outstanding = 0;
1068 #endif
1069
1070 #if HYPERVISOR
1071 new_task->hv_task_target = NULL;
1072 #endif /* HYPERVISOR */
1073
1074
1075 new_task->mem_notify_reserved = 0;
1076 #if IMPORTANCE_INHERITANCE
1077 new_task->task_imp_base = NULL;
1078 #endif /* IMPORTANCE_INHERITANCE */
1079
1080 #if defined(__x86_64__)
1081 new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
1082 #endif
1083
1084 new_task->requested_policy = default_task_requested_policy;
1085 new_task->effective_policy = default_task_effective_policy;
1086
1087 if (parent_task != TASK_NULL) {
1088 new_task->sec_token = parent_task->sec_token;
1089 new_task->audit_token = parent_task->audit_token;
1090
1091 /* inherit the parent's shared region */
1092 shared_region = vm_shared_region_get(parent_task);
1093 vm_shared_region_set(new_task, shared_region);
1094
1095 if(task_has_64BitAddr(parent_task))
1096 task_set_64BitAddr(new_task);
1097 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1098 new_task->all_image_info_size = parent_task->all_image_info_size;
1099
1100 #if defined(__i386__) || defined(__x86_64__)
1101 if (inherit_memory && parent_task->i386_ldt)
1102 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
1103 #endif
1104 if (inherit_memory && parent_task->affinity_space)
1105 task_affinity_create(parent_task, new_task);
1106
1107 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
1108
1109 #if IMPORTANCE_INHERITANCE
1110 ipc_importance_task_t new_task_imp = IIT_NULL;
1111 boolean_t inherit_receive = TRUE;
1112
1113 if (task_is_marked_importance_donor(parent_task)) {
1114 new_task_imp = ipc_importance_for_task(new_task, FALSE);
1115 assert(IIT_NULL != new_task_imp);
1116 ipc_importance_task_mark_donor(new_task_imp, TRUE);
1117 }
1118
1119 if (inherit_receive) {
1120 if (task_is_marked_importance_receiver(parent_task)) {
1121 if (IIT_NULL == new_task_imp)
1122 new_task_imp = ipc_importance_for_task(new_task, FALSE);
1123 assert(IIT_NULL != new_task_imp);
1124 ipc_importance_task_mark_receiver(new_task_imp, TRUE);
1125 }
1126 if (task_is_marked_importance_denap_receiver(parent_task)) {
1127 if (IIT_NULL == new_task_imp)
1128 new_task_imp = ipc_importance_for_task(new_task, FALSE);
1129 assert(IIT_NULL != new_task_imp);
1130 ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
1131 }
1132 }
1133
1134 if (IIT_NULL != new_task_imp) {
1135 assert(new_task->task_imp_base == new_task_imp);
1136 ipc_importance_task_release(new_task_imp);
1137 }
1138 #endif /* IMPORTANCE_INHERITANCE */
1139
1140 new_task->priority = BASEPRI_DEFAULT;
1141 new_task->max_priority = MAXPRI_USER;
1142
1143 task_policy_create(new_task, parent_task);
1144 } else {
1145 new_task->sec_token = KERNEL_SECURITY_TOKEN;
1146 new_task->audit_token = KERNEL_AUDIT_TOKEN;
1147 #ifdef __LP64__
1148 if(is_64bit)
1149 task_set_64BitAddr(new_task);
1150 #endif
1151 new_task->all_image_info_addr = (mach_vm_address_t)0;
1152 new_task->all_image_info_size = (mach_vm_size_t)0;
1153
1154 new_task->pset_hint = PROCESSOR_SET_NULL;
1155
1156 if (kernel_task == TASK_NULL) {
1157 new_task->priority = BASEPRI_KERNEL;
1158 new_task->max_priority = MAXPRI_KERNEL;
1159 } else {
1160 new_task->priority = BASEPRI_DEFAULT;
1161 new_task->max_priority = MAXPRI_USER;
1162 }
1163 }
1164
1165 bzero(new_task->coalition, sizeof(new_task->coalition));
1166 for (int i = 0; i < COALITION_NUM_TYPES; i++)
1167 queue_chain_init(new_task->task_coalition[i]);
1168
1169 /* Allocate I/O Statistics */
1170 new_task->task_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info));
1171 assert(new_task->task_io_stats != NULL);
1172 bzero(new_task->task_io_stats, sizeof(struct io_stat_info));
1173
1174 bzero(&(new_task->cpu_time_qos_stats), sizeof(struct _cpu_time_qos_stats));
1175
1176 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
1177
1178 /* Copy resource acc. info from Parent for Corpe Forked task. */
1179 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
1180 task_rollup_accounting_info(new_task, parent_task);
1181 } else {
1182 /* Initialize to zero for standard fork/spawn case */
1183 new_task->total_user_time = 0;
1184 new_task->total_system_time = 0;
1185 new_task->faults = 0;
1186 new_task->pageins = 0;
1187 new_task->cow_faults = 0;
1188 new_task->messages_sent = 0;
1189 new_task->messages_received = 0;
1190 new_task->syscalls_mach = 0;
1191 new_task->syscalls_unix = 0;
1192 new_task->c_switch = 0;
1193 new_task->p_switch = 0;
1194 new_task->ps_switch = 0;
1195 new_task->low_mem_notified_warn = 0;
1196 new_task->low_mem_notified_critical = 0;
1197 new_task->purged_memory_warn = 0;
1198 new_task->purged_memory_critical = 0;
1199 new_task->low_mem_privileged_listener = 0;
1200 new_task->memlimit_is_active = 0;
1201 new_task->memlimit_is_fatal = 0;
1202 new_task->memlimit_active_exc_resource = 0;
1203 new_task->memlimit_inactive_exc_resource = 0;
1204 new_task->task_timer_wakeups_bin_1 = 0;
1205 new_task->task_timer_wakeups_bin_2 = 0;
1206 new_task->task_gpu_ns = 0;
1207 new_task->task_immediate_writes = 0;
1208 new_task->task_deferred_writes = 0;
1209 new_task->task_invalidated_writes = 0;
1210 new_task->task_metadata_writes = 0;
1211 new_task->task_energy = 0;
1212 }
1213
1214
1215 #if CONFIG_COALITIONS
1216 if (!(t_flags & TF_CORPSE_FORK)) {
1217 /* TODO: there is no graceful failure path here... */
1218 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1219 coalitions_adopt_task(parent_coalitions, new_task);
1220 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1221 /*
1222 * all tasks at least have a resource coalition, so
1223 * if the parent has one then inherit all coalitions
1224 * the parent is a part of
1225 */
1226 coalitions_adopt_task(parent_task->coalition, new_task);
1227 } else {
1228 /* TODO: assert that new_task will be PID 1 (launchd) */
1229 coalitions_adopt_init_task(new_task);
1230 }
1231 } else {
1232 coalitions_adopt_corpse_task(new_task);
1233 }
1234
1235 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1236 panic("created task is not a member of a resource coalition");
1237 }
1238 #endif /* CONFIG_COALITIONS */
1239
1240 new_task->dispatchqueue_offset = 0;
1241 if (parent_task != NULL) {
1242 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1243 }
1244
1245 if (vm_backing_store_low && parent_task != NULL)
1246 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
1247
1248 new_task->task_volatile_objects = 0;
1249 new_task->task_nonvolatile_objects = 0;
1250 new_task->task_purgeable_disowning = FALSE;
1251 new_task->task_purgeable_disowned = FALSE;
1252
1253 #if CONFIG_SECLUDED_MEMORY
1254 new_task->task_can_use_secluded_mem = FALSE;
1255 new_task->task_could_use_secluded_mem = FALSE;
1256 new_task->task_could_also_use_secluded_mem = FALSE;
1257 #endif /* CONFIG_SECLUDED_MEMORY */
1258
1259 queue_init(&new_task->io_user_clients);
1260
1261 ipc_task_enable(new_task);
1262
1263 lck_mtx_lock(&tasks_threads_lock);
1264 queue_enter(&tasks, new_task, task_t, tasks);
1265 tasks_count++;
1266 if (tasks_suspend_state) {
1267 task_suspend_internal(new_task);
1268 }
1269 lck_mtx_unlock(&tasks_threads_lock);
1270
1271 *child_task = new_task;
1272 return(KERN_SUCCESS);
1273 }
1274
1275 /*
1276 * task_rollup_accounting_info
1277 *
1278 * Roll up accounting stats. Used to rollup stats
1279 * for exec copy task and corpse fork.
1280 */
1281 void
1282 task_rollup_accounting_info(task_t to_task, task_t from_task)
1283 {
1284 assert(from_task != to_task);
1285
1286 to_task->total_user_time = from_task->total_user_time;
1287 to_task->total_system_time = from_task->total_system_time;
1288 to_task->faults = from_task->faults;
1289 to_task->pageins = from_task->pageins;
1290 to_task->cow_faults = from_task->cow_faults;
1291 to_task->messages_sent = from_task->messages_sent;
1292 to_task->messages_received = from_task->messages_received;
1293 to_task->syscalls_mach = from_task->syscalls_mach;
1294 to_task->syscalls_unix = from_task->syscalls_unix;
1295 to_task->c_switch = from_task->c_switch;
1296 to_task->p_switch = from_task->p_switch;
1297 to_task->ps_switch = from_task->ps_switch;
1298 to_task->extmod_statistics = from_task->extmod_statistics;
1299 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1300 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1301 to_task->purged_memory_warn = from_task->purged_memory_warn;
1302 to_task->purged_memory_critical = from_task->purged_memory_critical;
1303 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1304 *to_task->task_io_stats = *from_task->task_io_stats;
1305 to_task->cpu_time_qos_stats = from_task->cpu_time_qos_stats;
1306 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1307 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1308 to_task->task_gpu_ns = from_task->task_gpu_ns;
1309 to_task->task_immediate_writes = from_task->task_immediate_writes;
1310 to_task->task_deferred_writes = from_task->task_deferred_writes;
1311 to_task->task_invalidated_writes = from_task->task_invalidated_writes;
1312 to_task->task_metadata_writes = from_task->task_metadata_writes;
1313 to_task->task_energy = from_task->task_energy;
1314
1315 /* Skip ledger roll up for memory accounting entries */
1316 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1317 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1318 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1319 #if CONFIG_SCHED_SFI
1320 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1321 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1322 }
1323 #endif
1324 #if CONFIG_BANK
1325 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1326 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
1327 #endif
1328 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1329 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
1330 }
1331
1332 int task_dropped_imp_count = 0;
1333
1334 /*
1335 * task_deallocate:
1336 *
1337 * Drop a reference on a task.
1338 */
1339 void
1340 task_deallocate(
1341 task_t task)
1342 {
1343 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
1344 uint32_t refs;
1345
1346 if (task == TASK_NULL)
1347 return;
1348
1349 refs = task_deallocate_internal(task);
1350
1351 #if IMPORTANCE_INHERITANCE
1352 if (refs > 1)
1353 return;
1354
1355 if (refs == 1) {
1356 /*
1357 * If last ref potentially comes from the task's importance,
1358 * disconnect it. But more task refs may be added before
1359 * that completes, so wait for the reference to go to zero
1360 * naturually (it may happen on a recursive task_deallocate()
1361 * from the ipc_importance_disconnect_task() call).
1362 */
1363 if (IIT_NULL != task->task_imp_base)
1364 ipc_importance_disconnect_task(task);
1365 return;
1366 }
1367 #else
1368 if (refs > 0)
1369 return;
1370 #endif /* IMPORTANCE_INHERITANCE */
1371
1372 lck_mtx_lock(&tasks_threads_lock);
1373 queue_remove(&terminated_tasks, task, task_t, tasks);
1374 terminated_tasks_count--;
1375 lck_mtx_unlock(&tasks_threads_lock);
1376
1377 /*
1378 * remove the reference on atm descriptor
1379 */
1380 task_atm_reset(task);
1381
1382 /*
1383 * remove the reference on bank context
1384 */
1385 task_bank_reset(task);
1386
1387 if (task->task_io_stats)
1388 kfree(task->task_io_stats, sizeof(struct io_stat_info));
1389
1390 /*
1391 * Give the machine dependent code a chance
1392 * to perform cleanup before ripping apart
1393 * the task.
1394 */
1395 machine_task_terminate(task);
1396
1397 ipc_task_terminate(task);
1398
1399 /* let iokit know */
1400 iokit_task_terminate(task);
1401
1402 if (task->affinity_space)
1403 task_affinity_deallocate(task);
1404
1405 #if MACH_ASSERT
1406 if (task->ledger != NULL &&
1407 task->map != NULL &&
1408 task->map->pmap != NULL &&
1409 task->map->pmap->ledger != NULL) {
1410 assert(task->ledger == task->map->pmap->ledger);
1411 }
1412 #endif /* MACH_ASSERT */
1413
1414 vm_purgeable_disown(task);
1415 assert(task->task_purgeable_disowned);
1416 if (task->task_volatile_objects != 0 ||
1417 task->task_nonvolatile_objects != 0) {
1418 panic("task_deallocate(%p): "
1419 "volatile_objects=%d nonvolatile_objects=%d\n",
1420 task,
1421 task->task_volatile_objects,
1422 task->task_nonvolatile_objects);
1423 }
1424
1425 vm_map_deallocate(task->map);
1426 is_release(task->itk_space);
1427
1428 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
1429 &interrupt_wakeups, &debit);
1430 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
1431 &platform_idle_wakeups, &debit);
1432
1433 #if defined(CONFIG_SCHED_MULTIQ)
1434 sched_group_destroy(task->sched_group);
1435 #endif
1436
1437 /* Accumulate statistics for dead tasks */
1438 lck_spin_lock(&dead_task_statistics_lock);
1439 dead_task_statistics.total_user_time += task->total_user_time;
1440 dead_task_statistics.total_system_time += task->total_system_time;
1441
1442 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
1443 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
1444
1445 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
1446 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
1447
1448 lck_spin_unlock(&dead_task_statistics_lock);
1449 lck_mtx_destroy(&task->lock, &task_lck_grp);
1450
1451 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
1452 &debit)) {
1453 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
1454 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
1455 }
1456 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
1457 &debit)) {
1458 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
1459 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
1460 }
1461 ledger_dereference(task->ledger);
1462
1463 #if TASK_REFERENCE_LEAK_DEBUG
1464 btlog_remove_entries_for_element(task_ref_btlog, task);
1465 #endif
1466
1467 #if CONFIG_COALITIONS
1468 task_release_coalitions(task);
1469 #endif /* CONFIG_COALITIONS */
1470
1471 bzero(task->coalition, sizeof(task->coalition));
1472
1473 #if MACH_BSD
1474 /* clean up collected information since last reference to task is gone */
1475 if (task->corpse_info) {
1476 task_crashinfo_destroy(task->corpse_info, RELEASE_CORPSE_REF);
1477 task->corpse_info = NULL;
1478 }
1479 #endif
1480 if (task->corpse_info_kernel) {
1481 kfree(task->corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
1482 }
1483
1484 #if CONFIG_MACF
1485 if (task->crash_label) {
1486 mac_exc_action_label_task_destroy(task);
1487 }
1488 #endif
1489
1490 zfree(task_zone, task);
1491 }
1492
1493 /*
1494 * task_name_deallocate:
1495 *
1496 * Drop a reference on a task name.
1497 */
1498 void
1499 task_name_deallocate(
1500 task_name_t task_name)
1501 {
1502 return(task_deallocate((task_t)task_name));
1503 }
1504
1505 /*
1506 * task_inspect_deallocate:
1507 *
1508 * Drop a task inspection reference.
1509 */
1510 void
1511 task_inspect_deallocate(
1512 task_inspect_t task_inspect)
1513 {
1514 return(task_deallocate((task_t)task_inspect));
1515 }
1516
1517 /*
1518 * task_suspension_token_deallocate:
1519 *
1520 * Drop a reference on a task suspension token.
1521 */
1522 void
1523 task_suspension_token_deallocate(
1524 task_suspension_token_t token)
1525 {
1526 return(task_deallocate((task_t)token));
1527 }
1528
1529
1530 /*
1531 * task_collect_crash_info:
1532 *
1533 * collect crash info from bsd and mach based data
1534 */
1535 kern_return_t
1536 task_collect_crash_info(task_t task, struct proc *proc, int is_corpse_fork)
1537 {
1538 kern_return_t kr = KERN_SUCCESS;
1539
1540 kcdata_descriptor_t crash_data = NULL;
1541 kcdata_descriptor_t crash_data_release = NULL;
1542 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
1543 mach_vm_offset_t crash_data_ptr = 0;
1544 void *crash_data_kernel = NULL;
1545 void *crash_data_kernel_release = NULL;
1546 int corpse_blob_kernel_alloc = (is_corpse_fork || unify_corpse_blob_alloc);
1547
1548 if (!corpses_enabled()) {
1549 return KERN_NOT_SUPPORTED;
1550 }
1551
1552 task_lock(task);
1553
1554 assert(is_corpse_fork || task->bsd_info != NULL);
1555 if (task->corpse_info == NULL && (is_corpse_fork || task->bsd_info != NULL)) {
1556 #if CONFIG_MACF
1557 /* Update the corpse label, used by the exception delivery mac hook */
1558 mac_exc_action_label_task_update(task, proc);
1559 #endif
1560 task_unlock(task);
1561
1562 if (!corpse_blob_kernel_alloc) {
1563 /* map crash data memory in task's vm map */
1564 kr = mach_vm_allocate(task->map, &crash_data_ptr, size, (VM_MAKE_TAG(VM_MEMORY_CORPSEINFO) | VM_FLAGS_ANYWHERE));
1565 } else {
1566 crash_data_kernel = (void *) kalloc(CORPSEINFO_ALLOCATION_SIZE);
1567 if (crash_data_kernel == 0)
1568 kr = KERN_RESOURCE_SHORTAGE;
1569 bzero(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE);
1570 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
1571 }
1572 if (kr != KERN_SUCCESS)
1573 goto out_no_lock;
1574
1575 /* Do not get a corpse ref for corpse fork */
1576 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size, is_corpse_fork ? !GET_CORPSE_REF : GET_CORPSE_REF, corpse_blob_kernel_alloc ? KCFLAG_USE_MEMCOPY: KCFLAG_USE_COPYOUT);
1577 if (crash_data) {
1578 task_lock(task);
1579 crash_data_release = task->corpse_info;
1580 crash_data_kernel_release = task->corpse_info_kernel;
1581 task->corpse_info = crash_data;
1582 task->corpse_info_kernel = crash_data_kernel;
1583
1584 task_unlock(task);
1585 kr = KERN_SUCCESS;
1586 } else {
1587 /* if failed to create corpse info, free the mapping */
1588 if (!corpse_blob_kernel_alloc) {
1589 if (KERN_SUCCESS != mach_vm_deallocate(task->map, crash_data_ptr, size)) {
1590 printf("mach_vm_deallocate failed to clear corpse_data for pid %d.\n", task_pid(task));
1591 }
1592 } else {
1593 kfree(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE);
1594 }
1595 kr = KERN_FAILURE;
1596 }
1597
1598 if (crash_data_release != NULL) {
1599 task_crashinfo_destroy(crash_data_release, is_corpse_fork ? !RELEASE_CORPSE_REF : RELEASE_CORPSE_REF);
1600 }
1601 if (crash_data_kernel_release != NULL) {
1602 kfree(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
1603 }
1604 } else {
1605 task_unlock(task);
1606 }
1607
1608 out_no_lock:
1609 return kr;
1610 }
1611
1612 /*
1613 * task_deliver_crash_notification:
1614 *
1615 * Makes outcall to registered host port for a corpse.
1616 */
1617 kern_return_t
1618 task_deliver_crash_notification(task_t task, thread_t thread, mach_exception_data_type_t subcode)
1619 {
1620 kcdata_descriptor_t crash_info = task->corpse_info;
1621 thread_t th_iter = NULL;
1622 kern_return_t kr = KERN_SUCCESS;
1623 wait_interrupt_t wsave;
1624 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
1625 ipc_port_t task_port, old_notify;
1626
1627 if (crash_info == NULL)
1628 return KERN_FAILURE;
1629
1630 task_lock(task);
1631 if (task_is_a_corpse_fork(task)) {
1632 /* Populate code with EXC_RESOURCE for corpse fork */
1633 code[0] = EXC_RESOURCE;
1634 code[1] = subcode;
1635 } else if (unify_corpse_blob_alloc) {
1636 /* Populate code with EXC_CRASH for corpses */
1637 code[0] = EXC_CRASH;
1638 code[1] = 0;
1639 /* Update the code[1] if the boot-arg corpse_for_fatal_memkill is set */
1640 if (corpse_for_fatal_memkill) {
1641 code[1] = subcode;
1642 }
1643 } else {
1644 /* Populate code with address and length for EXC_CRASH */
1645 code[0] = crash_info->kcd_addr_begin;
1646 code[1] = crash_info->kcd_length;
1647 }
1648 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
1649 {
1650 if (th_iter->corpse_dup == FALSE) {
1651 ipc_thread_reset(th_iter);
1652 }
1653 }
1654 task_unlock(task);
1655
1656 /* Arm the no-sender notification for taskport */
1657 task_reference(task);
1658 task_port = convert_task_to_port(task);
1659 ip_lock(task_port);
1660 assert(ip_active(task_port));
1661 ipc_port_nsrequest(task_port, task_port->ip_mscount, ipc_port_make_sonce_locked(task_port), &old_notify);
1662 /* port unlocked */
1663 assert(IP_NULL == old_notify);
1664
1665 wsave = thread_interrupt_level(THREAD_UNINT);
1666 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
1667 if (kr != KERN_SUCCESS) {
1668 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(task));
1669 }
1670
1671 (void)thread_interrupt_level(wsave);
1672
1673 /*
1674 * Drop the send right on task port, will fire the
1675 * no-sender notification if exception deliver failed.
1676 */
1677 ipc_port_release_send(task_port);
1678 return kr;
1679 }
1680
1681 /*
1682 * task_terminate:
1683 *
1684 * Terminate the specified task. See comments on thread_terminate
1685 * (kern/thread.c) about problems with terminating the "current task."
1686 */
1687
1688 kern_return_t
1689 task_terminate(
1690 task_t task)
1691 {
1692 if (task == TASK_NULL)
1693 return (KERN_INVALID_ARGUMENT);
1694
1695 if (task->bsd_info)
1696 return (KERN_FAILURE);
1697
1698 return (task_terminate_internal(task));
1699 }
1700
1701 #if MACH_ASSERT
1702 extern int proc_pid(struct proc *);
1703 extern void proc_name_kdp(task_t t, char *buf, int size);
1704 #endif /* MACH_ASSERT */
1705
1706 #define VM_MAP_PARTIAL_REAP 0x54 /* 0x150 */
1707 static void
1708 __unused task_partial_reap(task_t task, __unused int pid)
1709 {
1710 unsigned int reclaimed_resident = 0;
1711 unsigned int reclaimed_compressed = 0;
1712 uint64_t task_page_count;
1713
1714 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
1715
1716 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
1717 pid, task_page_count, 0, 0, 0);
1718
1719 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
1720
1721 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
1722 pid, reclaimed_resident, reclaimed_compressed, 0, 0);
1723 }
1724
1725 kern_return_t
1726 task_mark_corpse(task_t task)
1727 {
1728 kern_return_t kr = KERN_SUCCESS;
1729 thread_t self_thread;
1730 (void) self_thread;
1731 wait_interrupt_t wsave;
1732
1733 assert(task != kernel_task);
1734 assert(task == current_task());
1735 assert(!task_is_a_corpse(task));
1736
1737 kr = task_collect_crash_info(task, (struct proc*)task->bsd_info, FALSE);
1738 if (kr != KERN_SUCCESS) {
1739 return kr;
1740 }
1741
1742 self_thread = current_thread();
1743
1744 wsave = thread_interrupt_level(THREAD_UNINT);
1745 task_lock(task);
1746
1747 task_set_corpse_pending_report(task);
1748 task_set_corpse(task);
1749
1750 kr = task_start_halt_locked(task, TRUE);
1751 assert(kr == KERN_SUCCESS);
1752
1753 ipc_task_reset(task);
1754 /* Remove the naked send right for task port, needed to arm no sender notification */
1755 task_set_special_port(task, TASK_KERNEL_PORT, IPC_PORT_NULL);
1756 ipc_task_enable(task);
1757
1758 task_unlock(task);
1759 /* terminate the ipc space */
1760 ipc_space_terminate(task->itk_space);
1761
1762 /* Add it to global corpse task list */
1763 task_add_to_corpse_task_list(task);
1764
1765 task_start_halt(task);
1766 thread_terminate_internal(self_thread);
1767
1768 (void) thread_interrupt_level(wsave);
1769 assert(task->halting == TRUE);
1770 return kr;
1771 }
1772
1773 /*
1774 * task_clear_corpse
1775 *
1776 * Clears the corpse pending bit on task.
1777 * Removes inspection bit on the threads.
1778 */
1779 void
1780 task_clear_corpse(task_t task)
1781 {
1782 thread_t th_iter = NULL;
1783
1784 task_lock(task);
1785 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
1786 {
1787 thread_mtx_lock(th_iter);
1788 th_iter->inspection = FALSE;
1789 thread_mtx_unlock(th_iter);
1790 }
1791
1792 thread_terminate_crashed_threads();
1793 /* remove the pending corpse report flag */
1794 task_clear_corpse_pending_report(task);
1795
1796 task_unlock(task);
1797 }
1798
1799 /*
1800 * task_port_notify
1801 *
1802 * Called whenever the Mach port system detects no-senders on
1803 * the task port of a corpse.
1804 * Each notification that comes in should terminate the task (corpse).
1805 */
1806 void
1807 task_port_notify(mach_msg_header_t *msg)
1808 {
1809 mach_no_senders_notification_t *notification = (void *)msg;
1810 ipc_port_t port = notification->not_header.msgh_remote_port;
1811 task_t task;
1812
1813 assert(ip_active(port));
1814 assert(IKOT_TASK == ip_kotype(port));
1815 task = (task_t) port->ip_kobject;
1816
1817 assert(task_is_a_corpse(task));
1818
1819 /* Remove the task from global corpse task list */
1820 task_remove_from_corpse_task_list(task);
1821
1822 task_clear_corpse(task);
1823 task_terminate_internal(task);
1824 }
1825
1826 /*
1827 * task_wait_till_threads_terminate_locked
1828 *
1829 * Wait till all the threads in the task are terminated.
1830 * Might release the task lock and re-acquire it.
1831 */
1832 void
1833 task_wait_till_threads_terminate_locked(task_t task)
1834 {
1835 /* wait for all the threads in the task to terminate */
1836 while (task->active_thread_count != 0) {
1837 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
1838 task_unlock(task);
1839 thread_block(THREAD_CONTINUE_NULL);
1840
1841 task_lock(task);
1842 }
1843 }
1844
1845 /*
1846 * task_duplicate_map_and_threads
1847 *
1848 * Copy vmmap of source task.
1849 * Copy active threads from source task to destination task.
1850 * Source task would be suspended during the copy.
1851 */
1852 kern_return_t
1853 task_duplicate_map_and_threads(
1854 task_t task,
1855 void *p,
1856 task_t new_task,
1857 thread_t *thread_ret,
1858 uint64_t **udata_buffer,
1859 int *size,
1860 int *num_udata)
1861 {
1862 kern_return_t kr = KERN_SUCCESS;
1863 int active;
1864 thread_t thread, self, thread_return = THREAD_NULL;
1865 thread_t new_thread = THREAD_NULL;
1866 thread_t *thread_array;
1867 uint32_t active_thread_count = 0, array_count = 0, i;
1868 vm_map_t oldmap;
1869 uint64_t *buffer = NULL;
1870 int buf_size = 0;
1871 int est_knotes = 0, num_knotes = 0;
1872
1873 self = current_thread();
1874
1875 /*
1876 * Suspend the task to copy thread state, use the internal
1877 * variant so that no user-space process can resume
1878 * the task from under us
1879 */
1880 kr = task_suspend_internal(task);
1881 if (kr != KERN_SUCCESS) {
1882 return kr;
1883 }
1884
1885 if (task->map->disable_vmentry_reuse == TRUE) {
1886 /*
1887 * Quite likely GuardMalloc (or some debugging tool)
1888 * is being used on this task. And it has gone through
1889 * its limit. Making a corpse will likely encounter
1890 * a lot of VM entries that will need COW.
1891 *
1892 * Skip it.
1893 */
1894 task_resume_internal(task);
1895 return KERN_FAILURE;
1896 }
1897
1898 /* Setup new task's vmmap, switch from parent task's map to it COW map */
1899 oldmap = new_task->map;
1900 new_task->map = vm_map_fork(new_task->ledger,
1901 task->map,
1902 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
1903 VM_MAP_FORK_PRESERVE_PURGEABLE));
1904 vm_map_deallocate(oldmap);
1905
1906 /* Get all the udata pointers from kqueue */
1907 est_knotes = proc_list_uptrs(p, NULL, 0);
1908 if (est_knotes > 0) {
1909 buf_size = (est_knotes + 32) * sizeof(uint64_t);
1910 buffer = (uint64_t *) kalloc(buf_size);
1911 num_knotes = proc_list_uptrs(p, buffer, buf_size);
1912 if (num_knotes > est_knotes + 32) {
1913 num_knotes = est_knotes + 32;
1914 }
1915 }
1916
1917 active_thread_count = task->active_thread_count;
1918 if (active_thread_count == 0) {
1919 if (buffer != NULL) {
1920 kfree(buffer, buf_size);
1921 }
1922 task_resume_internal(task);
1923 return KERN_FAILURE;
1924 }
1925
1926 thread_array = (thread_t *) kalloc(sizeof(thread_t) * active_thread_count);
1927
1928 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
1929 task_lock(task);
1930 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1931 /* Skip inactive threads */
1932 active = thread->active;
1933 if (!active) {
1934 continue;
1935 }
1936
1937 if (array_count >= active_thread_count) {
1938 break;
1939 }
1940
1941 thread_array[array_count++] = thread;
1942 thread_reference(thread);
1943 }
1944 task_unlock(task);
1945
1946 for (i = 0; i < array_count; i++) {
1947
1948 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
1949 if (kr != KERN_SUCCESS) {
1950 break;
1951 }
1952
1953 /* Equivalent of current thread in corpse */
1954 if (thread_array[i] == self) {
1955 thread_return = new_thread;
1956 } else {
1957 /* drop the extra ref returned by thread_create_with_continuation */
1958 thread_deallocate(new_thread);
1959 }
1960
1961 kr = thread_dup2(thread_array[i], new_thread);
1962 if (kr != KERN_SUCCESS) {
1963 thread_mtx_lock(new_thread);
1964 new_thread->corpse_dup = TRUE;
1965 thread_mtx_unlock(new_thread);
1966 continue;
1967 }
1968
1969 /* Copy thread name */
1970 bsd_copythreadname(new_thread->uthread, thread_array[i]->uthread);
1971 thread_copy_resource_info(new_thread, thread_array[i]);
1972 }
1973
1974 task_resume_internal(task);
1975
1976 for (i = 0; i < array_count; i++) {
1977 thread_deallocate(thread_array[i]);
1978 }
1979 kfree(thread_array, sizeof(thread_t) * active_thread_count);
1980
1981 if (kr == KERN_SUCCESS) {
1982 *thread_ret = thread_return;
1983 *udata_buffer = buffer;
1984 *size = buf_size;
1985 *num_udata = num_knotes;
1986 } else {
1987 if (thread_return != THREAD_NULL) {
1988 thread_deallocate(thread_return);
1989 }
1990 if (buffer != NULL) {
1991 kfree(buffer, buf_size);
1992 }
1993 }
1994
1995 return kr;
1996 }
1997
1998 #if CONFIG_SECLUDED_MEMORY
1999 extern void task_set_can_use_secluded_mem_locked(
2000 task_t task,
2001 boolean_t can_use_secluded_mem);
2002 #endif /* CONFIG_SECLUDED_MEMORY */
2003
2004 kern_return_t
2005 task_terminate_internal(
2006 task_t task)
2007 {
2008 thread_t thread, self;
2009 task_t self_task;
2010 boolean_t interrupt_save;
2011 int pid = 0;
2012
2013 assert(task != kernel_task);
2014
2015 self = current_thread();
2016 self_task = self->task;
2017
2018 /*
2019 * Get the task locked and make sure that we are not racing
2020 * with someone else trying to terminate us.
2021 */
2022 if (task == self_task)
2023 task_lock(task);
2024 else
2025 if (task < self_task) {
2026 task_lock(task);
2027 task_lock(self_task);
2028 }
2029 else {
2030 task_lock(self_task);
2031 task_lock(task);
2032 }
2033
2034 #if CONFIG_SECLUDED_MEMORY
2035 if (task->task_can_use_secluded_mem) {
2036 task_set_can_use_secluded_mem_locked(task, FALSE);
2037 }
2038 task->task_could_use_secluded_mem = FALSE;
2039 task->task_could_also_use_secluded_mem = FALSE;
2040 #endif /* CONFIG_SECLUDED_MEMORY */
2041
2042 if (!task->active) {
2043 /*
2044 * Task is already being terminated.
2045 * Just return an error. If we are dying, this will
2046 * just get us to our AST special handler and that
2047 * will get us to finalize the termination of ourselves.
2048 */
2049 task_unlock(task);
2050 if (self_task != task)
2051 task_unlock(self_task);
2052
2053 return (KERN_FAILURE);
2054 }
2055
2056 if (task_corpse_pending_report(task)) {
2057 /*
2058 * Task is marked for reporting as corpse.
2059 * Just return an error. This will
2060 * just get us to our AST special handler and that
2061 * will get us to finish the path to death
2062 */
2063 task_unlock(task);
2064 if (self_task != task)
2065 task_unlock(self_task);
2066
2067 return (KERN_FAILURE);
2068 }
2069
2070 if (self_task != task)
2071 task_unlock(self_task);
2072
2073 /*
2074 * Make sure the current thread does not get aborted out of
2075 * the waits inside these operations.
2076 */
2077 interrupt_save = thread_interrupt_level(THREAD_UNINT);
2078
2079 /*
2080 * Indicate that we want all the threads to stop executing
2081 * at user space by holding the task (we would have held
2082 * each thread independently in thread_terminate_internal -
2083 * but this way we may be more likely to already find it
2084 * held there). Mark the task inactive, and prevent
2085 * further task operations via the task port.
2086 */
2087 task_hold_locked(task);
2088 task->active = FALSE;
2089 ipc_task_disable(task);
2090
2091 #if CONFIG_TELEMETRY
2092 /*
2093 * Notify telemetry that this task is going away.
2094 */
2095 telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2096 #endif
2097
2098 /*
2099 * Terminate each thread in the task.
2100 */
2101 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2102 thread_terminate_internal(thread);
2103 }
2104
2105 #ifdef MACH_BSD
2106 if (task->bsd_info != NULL && !task_is_exec_copy(task)) {
2107 pid = proc_pid(task->bsd_info);
2108 }
2109 #endif /* MACH_BSD */
2110
2111 task_unlock(task);
2112
2113 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
2114 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
2115
2116 /* Early object reap phase */
2117
2118 // PR-17045188: Revisit implementation
2119 // task_partial_reap(task, pid);
2120
2121
2122 /*
2123 * Destroy all synchronizers owned by the task.
2124 */
2125 task_synchronizer_destroy_all(task);
2126
2127 /*
2128 * Destroy the IPC space, leaving just a reference for it.
2129 */
2130 ipc_space_terminate(task->itk_space);
2131
2132 #if 00
2133 /* if some ledgers go negative on tear-down again... */
2134 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2135 task_ledgers.phys_footprint);
2136 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2137 task_ledgers.internal);
2138 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2139 task_ledgers.internal_compressed);
2140 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2141 task_ledgers.iokit_mapped);
2142 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2143 task_ledgers.alternate_accounting);
2144 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2145 task_ledgers.alternate_accounting_compressed);
2146 #endif
2147
2148 /*
2149 * If the current thread is a member of the task
2150 * being terminated, then the last reference to
2151 * the task will not be dropped until the thread
2152 * is finally reaped. To avoid incurring the
2153 * expense of removing the address space regions
2154 * at reap time, we do it explictly here.
2155 */
2156
2157 vm_map_lock(task->map);
2158 vm_map_disable_hole_optimization(task->map);
2159 vm_map_unlock(task->map);
2160
2161 vm_map_remove(task->map,
2162 task->map->min_offset,
2163 task->map->max_offset,
2164 /* no unnesting on final cleanup: */
2165 VM_MAP_REMOVE_NO_UNNESTING);
2166
2167 /* release our shared region */
2168 vm_shared_region_set(task, NULL);
2169
2170
2171 #if MACH_ASSERT
2172 /*
2173 * Identify the pmap's process, in case the pmap ledgers drift
2174 * and we have to report it.
2175 */
2176 char procname[17];
2177 if (task->bsd_info && !task_is_exec_copy(task)) {
2178 pid = proc_pid(task->bsd_info);
2179 proc_name_kdp(task, procname, sizeof (procname));
2180 } else {
2181 pid = 0;
2182 strlcpy(procname, "<unknown>", sizeof (procname));
2183 }
2184 pmap_set_process(task->map->pmap, pid, procname);
2185 #endif /* MACH_ASSERT */
2186
2187 lck_mtx_lock(&tasks_threads_lock);
2188 queue_remove(&tasks, task, task_t, tasks);
2189 queue_enter(&terminated_tasks, task, task_t, tasks);
2190 tasks_count--;
2191 terminated_tasks_count++;
2192 lck_mtx_unlock(&tasks_threads_lock);
2193
2194 /*
2195 * We no longer need to guard against being aborted, so restore
2196 * the previous interruptible state.
2197 */
2198 thread_interrupt_level(interrupt_save);
2199
2200 #if KPERF
2201 /* force the task to release all ctrs */
2202 if (task->t_chud & TASK_KPC_FORCED_ALL_CTRS)
2203 kpc_force_all_ctrs(task, 0);
2204 #endif
2205
2206 #if CONFIG_COALITIONS
2207 /*
2208 * Leave our coalitions. (drop activation but not reference)
2209 */
2210 coalitions_remove_task(task);
2211 #endif
2212
2213 /*
2214 * Get rid of the task active reference on itself.
2215 */
2216 task_deallocate(task);
2217
2218 return (KERN_SUCCESS);
2219 }
2220
2221 void
2222 tasks_system_suspend(boolean_t suspend)
2223 {
2224 task_t task;
2225
2226 lck_mtx_lock(&tasks_threads_lock);
2227 assert(tasks_suspend_state != suspend);
2228 tasks_suspend_state = suspend;
2229 queue_iterate(&tasks, task, task_t, tasks) {
2230 if (task == kernel_task) {
2231 continue;
2232 }
2233 suspend ? task_suspend_internal(task) : task_resume_internal(task);
2234 }
2235 lck_mtx_unlock(&tasks_threads_lock);
2236 }
2237
2238 /*
2239 * task_start_halt:
2240 *
2241 * Shut the current task down (except for the current thread) in
2242 * preparation for dramatic changes to the task (probably exec).
2243 * We hold the task and mark all other threads in the task for
2244 * termination.
2245 */
2246 kern_return_t
2247 task_start_halt(task_t task)
2248 {
2249 kern_return_t kr = KERN_SUCCESS;
2250 task_lock(task);
2251 kr = task_start_halt_locked(task, FALSE);
2252 task_unlock(task);
2253 return kr;
2254 }
2255
2256 static kern_return_t
2257 task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
2258 {
2259 thread_t thread, self;
2260 uint64_t dispatchqueue_offset;
2261
2262 assert(task != kernel_task);
2263
2264 self = current_thread();
2265
2266 if (task != self->task && !task_is_a_corpse_fork(task))
2267 return (KERN_INVALID_ARGUMENT);
2268
2269 if (task->halting || !task->active || !self->active) {
2270 /*
2271 * Task or current thread is already being terminated.
2272 * Hurry up and return out of the current kernel context
2273 * so that we run our AST special handler to terminate
2274 * ourselves.
2275 */
2276 return (KERN_FAILURE);
2277 }
2278
2279 task->halting = TRUE;
2280
2281 /*
2282 * Mark all the threads to keep them from starting any more
2283 * user-level execution. The thread_terminate_internal code
2284 * would do this on a thread by thread basis anyway, but this
2285 * gives us a better chance of not having to wait there.
2286 */
2287 task_hold_locked(task);
2288 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(task->bsd_info);
2289
2290 /*
2291 * Terminate all the other threads in the task.
2292 */
2293 queue_iterate(&task->threads, thread, thread_t, task_threads)
2294 {
2295 if (should_mark_corpse) {
2296 thread_mtx_lock(thread);
2297 thread->inspection = TRUE;
2298 thread_mtx_unlock(thread);
2299 }
2300 if (thread != self)
2301 thread_terminate_internal(thread);
2302 }
2303 task->dispatchqueue_offset = dispatchqueue_offset;
2304
2305 task_release_locked(task);
2306
2307 return KERN_SUCCESS;
2308 }
2309
2310
2311 /*
2312 * task_complete_halt:
2313 *
2314 * Complete task halt by waiting for threads to terminate, then clean
2315 * up task resources (VM, port namespace, etc...) and then let the
2316 * current thread go in the (practically empty) task context.
2317 *
2318 * Note: task->halting flag is not cleared in order to avoid creation
2319 * of new thread in old exec'ed task.
2320 */
2321 void
2322 task_complete_halt(task_t task)
2323 {
2324 task_lock(task);
2325 assert(task->halting);
2326 assert(task == current_task());
2327
2328 /*
2329 * Wait for the other threads to get shut down.
2330 * When the last other thread is reaped, we'll be
2331 * woken up.
2332 */
2333 if (task->thread_count > 1) {
2334 assert_wait((event_t)&task->halting, THREAD_UNINT);
2335 task_unlock(task);
2336 thread_block(THREAD_CONTINUE_NULL);
2337 } else {
2338 task_unlock(task);
2339 }
2340
2341 /*
2342 * Give the machine dependent code a chance
2343 * to perform cleanup of task-level resources
2344 * associated with the current thread before
2345 * ripping apart the task.
2346 */
2347 machine_task_terminate(task);
2348
2349 /*
2350 * Destroy all synchronizers owned by the task.
2351 */
2352 task_synchronizer_destroy_all(task);
2353
2354 /*
2355 * Destroy the contents of the IPC space, leaving just
2356 * a reference for it.
2357 */
2358 ipc_space_clean(task->itk_space);
2359
2360 /*
2361 * Clean out the address space, as we are going to be
2362 * getting a new one.
2363 */
2364 vm_map_remove(task->map, task->map->min_offset,
2365 task->map->max_offset,
2366 /* no unnesting on final cleanup: */
2367 VM_MAP_REMOVE_NO_UNNESTING);
2368
2369 /*
2370 * Kick out any IOKitUser handles to the task. At best they're stale,
2371 * at worst someone is racing a SUID exec.
2372 */
2373 iokit_task_terminate(task);
2374 }
2375
2376 /*
2377 * task_hold_locked:
2378 *
2379 * Suspend execution of the specified task.
2380 * This is a recursive-style suspension of the task, a count of
2381 * suspends is maintained.
2382 *
2383 * CONDITIONS: the task is locked and active.
2384 */
2385 void
2386 task_hold_locked(
2387 task_t task)
2388 {
2389 thread_t thread;
2390
2391 assert(task->active);
2392
2393 if (task->suspend_count++ > 0)
2394 return;
2395
2396 /*
2397 * Iterate through all the threads and hold them.
2398 */
2399 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2400 thread_mtx_lock(thread);
2401 thread_hold(thread);
2402 thread_mtx_unlock(thread);
2403 }
2404 }
2405
2406 /*
2407 * task_hold:
2408 *
2409 * Same as the internal routine above, except that is must lock
2410 * and verify that the task is active. This differs from task_suspend
2411 * in that it places a kernel hold on the task rather than just a
2412 * user-level hold. This keeps users from over resuming and setting
2413 * it running out from under the kernel.
2414 *
2415 * CONDITIONS: the caller holds a reference on the task
2416 */
2417 kern_return_t
2418 task_hold(
2419 task_t task)
2420 {
2421 if (task == TASK_NULL)
2422 return (KERN_INVALID_ARGUMENT);
2423
2424 task_lock(task);
2425
2426 if (!task->active) {
2427 task_unlock(task);
2428
2429 return (KERN_FAILURE);
2430 }
2431
2432 task_hold_locked(task);
2433 task_unlock(task);
2434
2435 return (KERN_SUCCESS);
2436 }
2437
2438 kern_return_t
2439 task_wait(
2440 task_t task,
2441 boolean_t until_not_runnable)
2442 {
2443 if (task == TASK_NULL)
2444 return (KERN_INVALID_ARGUMENT);
2445
2446 task_lock(task);
2447
2448 if (!task->active) {
2449 task_unlock(task);
2450
2451 return (KERN_FAILURE);
2452 }
2453
2454 task_wait_locked(task, until_not_runnable);
2455 task_unlock(task);
2456
2457 return (KERN_SUCCESS);
2458 }
2459
2460 /*
2461 * task_wait_locked:
2462 *
2463 * Wait for all threads in task to stop.
2464 *
2465 * Conditions:
2466 * Called with task locked, active, and held.
2467 */
2468 void
2469 task_wait_locked(
2470 task_t task,
2471 boolean_t until_not_runnable)
2472 {
2473 thread_t thread, self;
2474
2475 assert(task->active);
2476 assert(task->suspend_count > 0);
2477
2478 self = current_thread();
2479
2480 /*
2481 * Iterate through all the threads and wait for them to
2482 * stop. Do not wait for the current thread if it is within
2483 * the task.
2484 */
2485 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2486 if (thread != self)
2487 thread_wait(thread, until_not_runnable);
2488 }
2489 }
2490
2491 /*
2492 * task_release_locked:
2493 *
2494 * Release a kernel hold on a task.
2495 *
2496 * CONDITIONS: the task is locked and active
2497 */
2498 void
2499 task_release_locked(
2500 task_t task)
2501 {
2502 thread_t thread;
2503
2504 assert(task->active);
2505 assert(task->suspend_count > 0);
2506
2507 if (--task->suspend_count > 0)
2508 return;
2509
2510 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2511 thread_mtx_lock(thread);
2512 thread_release(thread);
2513 thread_mtx_unlock(thread);
2514 }
2515 }
2516
2517 /*
2518 * task_release:
2519 *
2520 * Same as the internal routine above, except that it must lock
2521 * and verify that the task is active.
2522 *
2523 * CONDITIONS: The caller holds a reference to the task
2524 */
2525 kern_return_t
2526 task_release(
2527 task_t task)
2528 {
2529 if (task == TASK_NULL)
2530 return (KERN_INVALID_ARGUMENT);
2531
2532 task_lock(task);
2533
2534 if (!task->active) {
2535 task_unlock(task);
2536
2537 return (KERN_FAILURE);
2538 }
2539
2540 task_release_locked(task);
2541 task_unlock(task);
2542
2543 return (KERN_SUCCESS);
2544 }
2545
2546 kern_return_t
2547 task_threads(
2548 task_t task,
2549 thread_act_array_t *threads_out,
2550 mach_msg_type_number_t *count)
2551 {
2552 mach_msg_type_number_t actual;
2553 thread_t *thread_list;
2554 thread_t thread;
2555 vm_size_t size, size_needed;
2556 void *addr;
2557 unsigned int i, j;
2558
2559 if (task == TASK_NULL)
2560 return (KERN_INVALID_ARGUMENT);
2561
2562 size = 0; addr = NULL;
2563
2564 for (;;) {
2565 task_lock(task);
2566 if (!task->active) {
2567 task_unlock(task);
2568
2569 if (size != 0)
2570 kfree(addr, size);
2571
2572 return (KERN_FAILURE);
2573 }
2574
2575 actual = task->thread_count;
2576
2577 /* do we have the memory we need? */
2578 size_needed = actual * sizeof (mach_port_t);
2579 if (size_needed <= size)
2580 break;
2581
2582 /* unlock the task and allocate more memory */
2583 task_unlock(task);
2584
2585 if (size != 0)
2586 kfree(addr, size);
2587
2588 assert(size_needed > 0);
2589 size = size_needed;
2590
2591 addr = kalloc(size);
2592 if (addr == 0)
2593 return (KERN_RESOURCE_SHORTAGE);
2594 }
2595
2596 /* OK, have memory and the task is locked & active */
2597 thread_list = (thread_t *)addr;
2598
2599 i = j = 0;
2600
2601 for (thread = (thread_t)queue_first(&task->threads); i < actual;
2602 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
2603 thread_reference_internal(thread);
2604 thread_list[j++] = thread;
2605 }
2606
2607 assert(queue_end(&task->threads, (queue_entry_t)thread));
2608
2609 actual = j;
2610 size_needed = actual * sizeof (mach_port_t);
2611
2612 /* can unlock task now that we've got the thread refs */
2613 task_unlock(task);
2614
2615 if (actual == 0) {
2616 /* no threads, so return null pointer and deallocate memory */
2617
2618 *threads_out = NULL;
2619 *count = 0;
2620
2621 if (size != 0)
2622 kfree(addr, size);
2623 }
2624 else {
2625 /* if we allocated too much, must copy */
2626
2627 if (size_needed < size) {
2628 void *newaddr;
2629
2630 newaddr = kalloc(size_needed);
2631 if (newaddr == 0) {
2632 for (i = 0; i < actual; ++i)
2633 thread_deallocate(thread_list[i]);
2634 kfree(addr, size);
2635 return (KERN_RESOURCE_SHORTAGE);
2636 }
2637
2638 bcopy(addr, newaddr, size_needed);
2639 kfree(addr, size);
2640 thread_list = (thread_t *)newaddr;
2641 }
2642
2643 *threads_out = thread_list;
2644 *count = actual;
2645
2646 /* do the conversion that Mig should handle */
2647
2648 for (i = 0; i < actual; ++i)
2649 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
2650 }
2651
2652 return (KERN_SUCCESS);
2653 }
2654
2655 #define TASK_HOLD_NORMAL 0
2656 #define TASK_HOLD_PIDSUSPEND 1
2657 #define TASK_HOLD_LEGACY 2
2658 #define TASK_HOLD_LEGACY_ALL 3
2659
2660 static kern_return_t
2661 place_task_hold (
2662 task_t task,
2663 int mode)
2664 {
2665 if (!task->active && !task_is_a_corpse(task)) {
2666 return (KERN_FAILURE);
2667 }
2668
2669 /* Return success for corpse task */
2670 if (task_is_a_corpse(task)) {
2671 return KERN_SUCCESS;
2672 }
2673
2674 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2675 MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_SUSPEND) | DBG_FUNC_NONE,
2676 task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
2677 task->user_stop_count, task->user_stop_count + 1, 0);
2678
2679 #if MACH_ASSERT
2680 current_task()->suspends_outstanding++;
2681 #endif
2682
2683 if (mode == TASK_HOLD_LEGACY)
2684 task->legacy_stop_count++;
2685
2686 if (task->user_stop_count++ > 0) {
2687 /*
2688 * If the stop count was positive, the task is
2689 * already stopped and we can exit.
2690 */
2691 return (KERN_SUCCESS);
2692 }
2693
2694 /*
2695 * Put a kernel-level hold on the threads in the task (all
2696 * user-level task suspensions added together represent a
2697 * single kernel-level hold). We then wait for the threads
2698 * to stop executing user code.
2699 */
2700 task_hold_locked(task);
2701 task_wait_locked(task, FALSE);
2702
2703 return (KERN_SUCCESS);
2704 }
2705
2706 static kern_return_t
2707 release_task_hold (
2708 task_t task,
2709 int mode)
2710 {
2711 boolean_t release = FALSE;
2712
2713 if (!task->active && !task_is_a_corpse(task)) {
2714 return (KERN_FAILURE);
2715 }
2716
2717 /* Return success for corpse task */
2718 if (task_is_a_corpse(task)) {
2719 return KERN_SUCCESS;
2720 }
2721
2722 if (mode == TASK_HOLD_PIDSUSPEND) {
2723 if (task->pidsuspended == FALSE) {
2724 return (KERN_FAILURE);
2725 }
2726 task->pidsuspended = FALSE;
2727 }
2728
2729 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
2730
2731 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2732 MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_RESUME) | DBG_FUNC_NONE,
2733 task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
2734 task->user_stop_count, mode, task->legacy_stop_count);
2735
2736 #if MACH_ASSERT
2737 /*
2738 * This is obviously not robust; if we suspend one task and then resume a different one,
2739 * we'll fly under the radar. This is only meant to catch the common case of a crashed
2740 * or buggy suspender.
2741 */
2742 current_task()->suspends_outstanding--;
2743 #endif
2744
2745 if (mode == TASK_HOLD_LEGACY_ALL) {
2746 if (task->legacy_stop_count >= task->user_stop_count) {
2747 task->user_stop_count = 0;
2748 release = TRUE;
2749 } else {
2750 task->user_stop_count -= task->legacy_stop_count;
2751 }
2752 task->legacy_stop_count = 0;
2753 } else {
2754 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0)
2755 task->legacy_stop_count--;
2756 if (--task->user_stop_count == 0)
2757 release = TRUE;
2758 }
2759 }
2760 else {
2761 return (KERN_FAILURE);
2762 }
2763
2764 /*
2765 * Release the task if necessary.
2766 */
2767 if (release)
2768 task_release_locked(task);
2769
2770 return (KERN_SUCCESS);
2771 }
2772
2773
2774 /*
2775 * task_suspend:
2776 *
2777 * Implement an (old-fashioned) user-level suspension on a task.
2778 *
2779 * Because the user isn't expecting to have to manage a suspension
2780 * token, we'll track it for him in the kernel in the form of a naked
2781 * send right to the task's resume port. All such send rights
2782 * account for a single suspension against the task (unlike task_suspend2()
2783 * where each caller gets a unique suspension count represented by a
2784 * unique send-once right).
2785 *
2786 * Conditions:
2787 * The caller holds a reference to the task
2788 */
2789 kern_return_t
2790 task_suspend(
2791 task_t task)
2792 {
2793 kern_return_t kr;
2794 mach_port_t port, send, old_notify;
2795 mach_port_name_t name;
2796
2797 if (task == TASK_NULL || task == kernel_task)
2798 return (KERN_INVALID_ARGUMENT);
2799
2800 task_lock(task);
2801
2802 /*
2803 * Claim a send right on the task resume port, and request a no-senders
2804 * notification on that port (if none outstanding).
2805 */
2806 if (task->itk_resume == IP_NULL) {
2807 task->itk_resume = ipc_port_alloc_kernel();
2808 if (!IP_VALID(task->itk_resume))
2809 panic("failed to create resume port");
2810 ipc_kobject_set(task->itk_resume, (ipc_kobject_t)task, IKOT_TASK_RESUME);
2811 }
2812
2813 port = task->itk_resume;
2814 ip_lock(port);
2815 assert(ip_active(port));
2816
2817 send = ipc_port_make_send_locked(port);
2818 assert(IP_VALID(send));
2819
2820 if (port->ip_nsrequest == IP_NULL) {
2821 ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
2822 assert(old_notify == IP_NULL);
2823 /* port unlocked */
2824 } else {
2825 ip_unlock(port);
2826 }
2827
2828 /*
2829 * place a legacy hold on the task.
2830 */
2831 kr = place_task_hold(task, TASK_HOLD_LEGACY);
2832 if (kr != KERN_SUCCESS) {
2833 task_unlock(task);
2834 ipc_port_release_send(send);
2835 return kr;
2836 }
2837
2838 task_unlock(task);
2839
2840 /*
2841 * Copyout the send right into the calling task's IPC space. It won't know it is there,
2842 * but we'll look it up when calling a traditional resume. Any IPC operations that
2843 * deallocate the send right will auto-release the suspension.
2844 */
2845 if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, (ipc_object_t)send,
2846 MACH_MSG_TYPE_MOVE_SEND, &name)) != KERN_SUCCESS) {
2847 printf("warning: %s(%d) failed to copyout suspension token for pid %d with error: %d\n",
2848 proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
2849 task_pid(task), kr);
2850 return (kr);
2851 }
2852
2853 return (kr);
2854 }
2855
2856 /*
2857 * task_resume:
2858 * Release a user hold on a task.
2859 *
2860 * Conditions:
2861 * The caller holds a reference to the task
2862 */
2863 kern_return_t
2864 task_resume(
2865 task_t task)
2866 {
2867 kern_return_t kr;
2868 mach_port_name_t resume_port_name;
2869 ipc_entry_t resume_port_entry;
2870 ipc_space_t space = current_task()->itk_space;
2871
2872 if (task == TASK_NULL || task == kernel_task )
2873 return (KERN_INVALID_ARGUMENT);
2874
2875 /* release a legacy task hold */
2876 task_lock(task);
2877 kr = release_task_hold(task, TASK_HOLD_LEGACY);
2878 task_unlock(task);
2879
2880 is_write_lock(space);
2881 if (is_active(space) && IP_VALID(task->itk_resume) &&
2882 ipc_hash_lookup(space, (ipc_object_t)task->itk_resume, &resume_port_name, &resume_port_entry) == TRUE) {
2883 /*
2884 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
2885 * we are holding one less legacy hold on the task from this caller. If the release failed,
2886 * go ahead and drop all the rights, as someone either already released our holds or the task
2887 * is gone.
2888 */
2889 if (kr == KERN_SUCCESS)
2890 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
2891 else
2892 ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
2893 /* space unlocked */
2894 } else {
2895 is_write_unlock(space);
2896 if (kr == KERN_SUCCESS)
2897 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
2898 proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
2899 task_pid(task));
2900 }
2901
2902 return kr;
2903 }
2904
2905 /*
2906 * Suspend the target task.
2907 * Making/holding a token/reference/port is the callers responsibility.
2908 */
2909 kern_return_t
2910 task_suspend_internal(task_t task)
2911 {
2912 kern_return_t kr;
2913
2914 if (task == TASK_NULL || task == kernel_task)
2915 return (KERN_INVALID_ARGUMENT);
2916
2917 task_lock(task);
2918 kr = place_task_hold(task, TASK_HOLD_NORMAL);
2919 task_unlock(task);
2920 return (kr);
2921 }
2922
2923 /*
2924 * Suspend the target task, and return a suspension token. The token
2925 * represents a reference on the suspended task.
2926 */
2927 kern_return_t
2928 task_suspend2(
2929 task_t task,
2930 task_suspension_token_t *suspend_token)
2931 {
2932 kern_return_t kr;
2933
2934 kr = task_suspend_internal(task);
2935 if (kr != KERN_SUCCESS) {
2936 *suspend_token = TASK_NULL;
2937 return (kr);
2938 }
2939
2940 /*
2941 * Take a reference on the target task and return that to the caller
2942 * as a "suspension token," which can be converted into an SO right to
2943 * the now-suspended task's resume port.
2944 */
2945 task_reference_internal(task);
2946 *suspend_token = task;
2947
2948 return (KERN_SUCCESS);
2949 }
2950
2951 /*
2952 * Resume the task
2953 * (reference/token/port management is caller's responsibility).
2954 */
2955 kern_return_t
2956 task_resume_internal(
2957 task_suspension_token_t task)
2958 {
2959 kern_return_t kr;
2960
2961 if (task == TASK_NULL || task == kernel_task)
2962 return (KERN_INVALID_ARGUMENT);
2963
2964 task_lock(task);
2965 kr = release_task_hold(task, TASK_HOLD_NORMAL);
2966 task_unlock(task);
2967 return (kr);
2968 }
2969
2970 /*
2971 * Resume the task using a suspension token. Consumes the token's ref.
2972 */
2973 kern_return_t
2974 task_resume2(
2975 task_suspension_token_t task)
2976 {
2977 kern_return_t kr;
2978
2979 kr = task_resume_internal(task);
2980 task_suspension_token_deallocate(task);
2981
2982 return (kr);
2983 }
2984
2985 boolean_t
2986 task_suspension_notify(mach_msg_header_t *request_header)
2987 {
2988 ipc_port_t port = (ipc_port_t) request_header->msgh_remote_port;
2989 task_t task = convert_port_to_task_suspension_token(port);
2990 mach_msg_type_number_t not_count;
2991
2992 if (task == TASK_NULL || task == kernel_task)
2993 return TRUE; /* nothing to do */
2994
2995 switch (request_header->msgh_id) {
2996
2997 case MACH_NOTIFY_SEND_ONCE:
2998 /* release the hold held by this specific send-once right */
2999 task_lock(task);
3000 release_task_hold(task, TASK_HOLD_NORMAL);
3001 task_unlock(task);
3002 break;
3003
3004 case MACH_NOTIFY_NO_SENDERS:
3005 not_count = ((mach_no_senders_notification_t *)request_header)->not_count;
3006
3007 task_lock(task);
3008 ip_lock(port);
3009 if (port->ip_mscount == not_count) {
3010
3011 /* release all the [remaining] outstanding legacy holds */
3012 assert(port->ip_nsrequest == IP_NULL);
3013 ip_unlock(port);
3014 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
3015 task_unlock(task);
3016
3017 } else if (port->ip_nsrequest == IP_NULL) {
3018 ipc_port_t old_notify;
3019
3020 task_unlock(task);
3021 /* new send rights, re-arm notification at current make-send count */
3022 ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
3023 assert(old_notify == IP_NULL);
3024 /* port unlocked */
3025 } else {
3026 ip_unlock(port);
3027 task_unlock(task);
3028 }
3029 break;
3030
3031 default:
3032 break;
3033 }
3034
3035 task_suspension_token_deallocate(task); /* drop token reference */
3036 return TRUE;
3037 }
3038
3039 kern_return_t
3040 task_pidsuspend_locked(task_t task)
3041 {
3042 kern_return_t kr;
3043
3044 if (task->pidsuspended) {
3045 kr = KERN_FAILURE;
3046 goto out;
3047 }
3048
3049 task->pidsuspended = TRUE;
3050
3051 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
3052 if (kr != KERN_SUCCESS) {
3053 task->pidsuspended = FALSE;
3054 }
3055 out:
3056 return(kr);
3057 }
3058
3059
3060 /*
3061 * task_pidsuspend:
3062 *
3063 * Suspends a task by placing a hold on its threads.
3064 *
3065 * Conditions:
3066 * The caller holds a reference to the task
3067 */
3068 kern_return_t
3069 task_pidsuspend(
3070 task_t task)
3071 {
3072 kern_return_t kr;
3073
3074 if (task == TASK_NULL || task == kernel_task)
3075 return (KERN_INVALID_ARGUMENT);
3076
3077 task_lock(task);
3078
3079 kr = task_pidsuspend_locked(task);
3080
3081 task_unlock(task);
3082
3083 return (kr);
3084 }
3085
3086 /*
3087 * task_pidresume:
3088 * Resumes a previously suspended task.
3089 *
3090 * Conditions:
3091 * The caller holds a reference to the task
3092 */
3093 kern_return_t
3094 task_pidresume(
3095 task_t task)
3096 {
3097 kern_return_t kr;
3098
3099 if (task == TASK_NULL || task == kernel_task)
3100 return (KERN_INVALID_ARGUMENT);
3101
3102 task_lock(task);
3103
3104 #if CONFIG_FREEZE
3105
3106 while (task->changing_freeze_state) {
3107
3108 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3109 task_unlock(task);
3110 thread_block(THREAD_CONTINUE_NULL);
3111
3112 task_lock(task);
3113 }
3114 task->changing_freeze_state = TRUE;
3115 #endif
3116
3117 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
3118
3119 task_unlock(task);
3120
3121 #if CONFIG_FREEZE
3122
3123 task_lock(task);
3124
3125 if (kr == KERN_SUCCESS)
3126 task->frozen = FALSE;
3127 task->changing_freeze_state = FALSE;
3128 thread_wakeup(&task->changing_freeze_state);
3129
3130 task_unlock(task);
3131 #endif
3132
3133 return (kr);
3134 }
3135
3136
3137 #if DEVELOPMENT || DEBUG
3138
3139 extern void IOSleep(int);
3140
3141 kern_return_t
3142 task_disconnect_page_mappings(task_t task)
3143 {
3144 int n;
3145
3146 if (task == TASK_NULL || task == kernel_task)
3147 return (KERN_INVALID_ARGUMENT);
3148
3149 /*
3150 * this function is used to strip all of the mappings from
3151 * the pmap for the specified task to force the task to
3152 * re-fault all of the pages it is actively using... this
3153 * allows us to approximate the true working set of the
3154 * specified task. We only engage if at least 1 of the
3155 * threads in the task is runnable, but we want to continuously
3156 * sweep (at least for a while - I've arbitrarily set the limit at
3157 * 100 sweeps to be re-looked at as we gain experience) to get a better
3158 * view into what areas within a page are being visited (as opposed to only
3159 * seeing the first fault of a page after the task becomes
3160 * runnable)... in the future I may
3161 * try to block until awakened by a thread in this task
3162 * being made runnable, but for now we'll periodically poll from the
3163 * user level debug tool driving the sysctl
3164 */
3165 for (n = 0; n < 100; n++) {
3166 thread_t thread;
3167 boolean_t runnable;
3168 boolean_t do_unnest;
3169 int page_count;
3170
3171 runnable = FALSE;
3172 do_unnest = FALSE;
3173
3174 task_lock(task);
3175
3176 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3177
3178 if (thread->state & TH_RUN) {
3179 runnable = TRUE;
3180 break;
3181 }
3182 }
3183 if (n == 0)
3184 task->task_disconnected_count++;
3185
3186 if (task->task_unnested == FALSE) {
3187 if (runnable == TRUE) {
3188 task->task_unnested = TRUE;
3189 do_unnest = TRUE;
3190 }
3191 }
3192 task_unlock(task);
3193
3194 if (runnable == FALSE)
3195 break;
3196
3197 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
3198 task, do_unnest, task->task_disconnected_count, 0, 0);
3199
3200 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
3201
3202 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
3203 task, page_count, 0, 0, 0);
3204
3205 if ((n % 5) == 4)
3206 IOSleep(1);
3207 }
3208 return (KERN_SUCCESS);
3209 }
3210
3211 #endif
3212
3213
3214 #if CONFIG_FREEZE
3215
3216 /*
3217 * task_freeze:
3218 *
3219 * Freeze a task.
3220 *
3221 * Conditions:
3222 * The caller holds a reference to the task
3223 */
3224 extern void vm_wake_compactor_swapper();
3225 extern queue_head_t c_swapout_list_head;
3226
3227 kern_return_t
3228 task_freeze(
3229 task_t task,
3230 uint32_t *purgeable_count,
3231 uint32_t *wired_count,
3232 uint32_t *clean_count,
3233 uint32_t *dirty_count,
3234 uint32_t dirty_budget,
3235 boolean_t *shared,
3236 boolean_t walk_only)
3237 {
3238 kern_return_t kr = KERN_SUCCESS;
3239
3240 if (task == TASK_NULL || task == kernel_task)
3241 return (KERN_INVALID_ARGUMENT);
3242
3243 task_lock(task);
3244
3245 while (task->changing_freeze_state) {
3246
3247 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3248 task_unlock(task);
3249 thread_block(THREAD_CONTINUE_NULL);
3250
3251 task_lock(task);
3252 }
3253 if (task->frozen) {
3254 task_unlock(task);
3255 return (KERN_FAILURE);
3256 }
3257 task->changing_freeze_state = TRUE;
3258
3259 task_unlock(task);
3260
3261 if (walk_only) {
3262 panic("task_freeze - walk_only == TRUE");
3263 } else {
3264 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
3265 }
3266
3267 task_lock(task);
3268
3269 if (walk_only == FALSE && kr == KERN_SUCCESS)
3270 task->frozen = TRUE;
3271 task->changing_freeze_state = FALSE;
3272 thread_wakeup(&task->changing_freeze_state);
3273
3274 task_unlock(task);
3275
3276 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3277 vm_wake_compactor_swapper();
3278 /*
3279 * We do an explicit wakeup of the swapout thread here
3280 * because the compact_and_swap routines don't have
3281 * knowledge about these kind of "per-task packed c_segs"
3282 * and so will not be evaluating whether we need to do
3283 * a wakeup there.
3284 */
3285 thread_wakeup((event_t)&c_swapout_list_head);
3286 }
3287
3288 return (kr);
3289 }
3290
3291 /*
3292 * task_thaw:
3293 *
3294 * Thaw a currently frozen task.
3295 *
3296 * Conditions:
3297 * The caller holds a reference to the task
3298 */
3299 kern_return_t
3300 task_thaw(
3301 task_t task)
3302 {
3303 if (task == TASK_NULL || task == kernel_task)
3304 return (KERN_INVALID_ARGUMENT);
3305
3306 task_lock(task);
3307
3308 while (task->changing_freeze_state) {
3309
3310 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3311 task_unlock(task);
3312 thread_block(THREAD_CONTINUE_NULL);
3313
3314 task_lock(task);
3315 }
3316 if (!task->frozen) {
3317 task_unlock(task);
3318 return (KERN_FAILURE);
3319 }
3320 task->frozen = FALSE;
3321
3322 task_unlock(task);
3323
3324 return (KERN_SUCCESS);
3325 }
3326
3327 #endif /* CONFIG_FREEZE */
3328
3329 kern_return_t
3330 host_security_set_task_token(
3331 host_security_t host_security,
3332 task_t task,
3333 security_token_t sec_token,
3334 audit_token_t audit_token,
3335 host_priv_t host_priv)
3336 {
3337 ipc_port_t host_port;
3338 kern_return_t kr;
3339
3340 if (task == TASK_NULL)
3341 return(KERN_INVALID_ARGUMENT);
3342
3343 if (host_security == HOST_NULL)
3344 return(KERN_INVALID_SECURITY);
3345
3346 task_lock(task);
3347 task->sec_token = sec_token;
3348 task->audit_token = audit_token;
3349
3350 task_unlock(task);
3351
3352 if (host_priv != HOST_PRIV_NULL) {
3353 kr = host_get_host_priv_port(host_priv, &host_port);
3354 } else {
3355 kr = host_get_host_port(host_priv_self(), &host_port);
3356 }
3357 assert(kr == KERN_SUCCESS);
3358 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
3359 return(kr);
3360 }
3361
3362 kern_return_t
3363 task_send_trace_memory(
3364 task_t target_task,
3365 __unused uint32_t pid,
3366 __unused uint64_t uniqueid)
3367 {
3368 kern_return_t kr = KERN_INVALID_ARGUMENT;
3369 if (target_task == TASK_NULL)
3370 return (KERN_INVALID_ARGUMENT);
3371
3372 #if CONFIG_ATM
3373 kr = atm_send_proc_inspect_notification(target_task,
3374 pid,
3375 uniqueid);
3376
3377 #endif
3378 return (kr);
3379 }
3380 /*
3381 * This routine was added, pretty much exclusively, for registering the
3382 * RPC glue vector for in-kernel short circuited tasks. Rather than
3383 * removing it completely, I have only disabled that feature (which was
3384 * the only feature at the time). It just appears that we are going to
3385 * want to add some user data to tasks in the future (i.e. bsd info,
3386 * task names, etc...), so I left it in the formal task interface.
3387 */
3388 kern_return_t
3389 task_set_info(
3390 task_t task,
3391 task_flavor_t flavor,
3392 __unused task_info_t task_info_in, /* pointer to IN array */
3393 __unused mach_msg_type_number_t task_info_count)
3394 {
3395 if (task == TASK_NULL)
3396 return(KERN_INVALID_ARGUMENT);
3397
3398 switch (flavor) {
3399
3400 #if CONFIG_ATM
3401 case TASK_TRACE_MEMORY_INFO:
3402 {
3403 if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT)
3404 return (KERN_INVALID_ARGUMENT);
3405
3406 assert(task_info_in != NULL);
3407 task_trace_memory_info_t mem_info;
3408 mem_info = (task_trace_memory_info_t) task_info_in;
3409 kern_return_t kr = atm_register_trace_memory(task,
3410 mem_info->user_memory_address,
3411 mem_info->buffer_size);
3412 return kr;
3413 }
3414
3415 #endif
3416 default:
3417 return (KERN_INVALID_ARGUMENT);
3418 }
3419 return (KERN_SUCCESS);
3420 }
3421
3422 int radar_20146450 = 1;
3423 kern_return_t
3424 task_info(
3425 task_t task,
3426 task_flavor_t flavor,
3427 task_info_t task_info_out,
3428 mach_msg_type_number_t *task_info_count)
3429 {
3430 kern_return_t error = KERN_SUCCESS;
3431 mach_msg_type_number_t original_task_info_count;
3432
3433 if (task == TASK_NULL)
3434 return (KERN_INVALID_ARGUMENT);
3435
3436 original_task_info_count = *task_info_count;
3437 task_lock(task);
3438
3439 if ((task != current_task()) && (!task->active)) {
3440 task_unlock(task);
3441 return (KERN_INVALID_ARGUMENT);
3442 }
3443
3444 switch (flavor) {
3445
3446 case TASK_BASIC_INFO_32:
3447 case TASK_BASIC2_INFO_32:
3448 {
3449 task_basic_info_32_t basic_info;
3450 vm_map_t map;
3451 clock_sec_t secs;
3452 clock_usec_t usecs;
3453
3454 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
3455 error = KERN_INVALID_ARGUMENT;
3456 break;
3457 }
3458
3459 basic_info = (task_basic_info_32_t)task_info_out;
3460
3461 map = (task == kernel_task)? kernel_map: task->map;
3462 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
3463 if (flavor == TASK_BASIC2_INFO_32) {
3464 /*
3465 * The "BASIC2" flavor gets the maximum resident
3466 * size instead of the current resident size...
3467 */
3468 basic_info->resident_size = pmap_resident_max(map->pmap);
3469 } else {
3470 basic_info->resident_size = pmap_resident_count(map->pmap);
3471 }
3472 basic_info->resident_size *= PAGE_SIZE;
3473
3474 basic_info->policy = ((task != kernel_task)?
3475 POLICY_TIMESHARE: POLICY_RR);
3476 basic_info->suspend_count = task->user_stop_count;
3477
3478 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
3479 basic_info->user_time.seconds =
3480 (typeof(basic_info->user_time.seconds))secs;
3481 basic_info->user_time.microseconds = usecs;
3482
3483 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
3484 basic_info->system_time.seconds =
3485 (typeof(basic_info->system_time.seconds))secs;
3486 basic_info->system_time.microseconds = usecs;
3487
3488 *task_info_count = TASK_BASIC_INFO_32_COUNT;
3489 break;
3490 }
3491
3492 case TASK_BASIC_INFO_64:
3493 {
3494 task_basic_info_64_t basic_info;
3495 vm_map_t map;
3496 clock_sec_t secs;
3497 clock_usec_t usecs;
3498
3499 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
3500 error = KERN_INVALID_ARGUMENT;
3501 break;
3502 }
3503
3504 basic_info = (task_basic_info_64_t)task_info_out;
3505
3506 map = (task == kernel_task)? kernel_map: task->map;
3507 basic_info->virtual_size = map->size;
3508 basic_info->resident_size =
3509 (mach_vm_size_t)(pmap_resident_count(map->pmap))
3510 * PAGE_SIZE_64;
3511
3512 basic_info->policy = ((task != kernel_task)?
3513 POLICY_TIMESHARE: POLICY_RR);
3514 basic_info->suspend_count = task->user_stop_count;
3515
3516 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
3517 basic_info->user_time.seconds =
3518 (typeof(basic_info->user_time.seconds))secs;
3519 basic_info->user_time.microseconds = usecs;
3520
3521 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
3522 basic_info->system_time.seconds =
3523 (typeof(basic_info->system_time.seconds))secs;
3524 basic_info->system_time.microseconds = usecs;
3525
3526 *task_info_count = TASK_BASIC_INFO_64_COUNT;
3527 break;
3528 }
3529
3530 case MACH_TASK_BASIC_INFO:
3531 {
3532 mach_task_basic_info_t basic_info;
3533 vm_map_t map;
3534 clock_sec_t secs;
3535 clock_usec_t usecs;
3536
3537 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
3538 error = KERN_INVALID_ARGUMENT;
3539 break;
3540 }
3541
3542 basic_info = (mach_task_basic_info_t)task_info_out;
3543
3544 map = (task == kernel_task) ? kernel_map : task->map;
3545
3546 basic_info->virtual_size = map->size;
3547
3548 basic_info->resident_size =
3549 (mach_vm_size_t)(pmap_resident_count(map->pmap));
3550 basic_info->resident_size *= PAGE_SIZE_64;
3551
3552 basic_info->resident_size_max =
3553 (mach_vm_size_t)(pmap_resident_max(map->pmap));
3554 basic_info->resident_size_max *= PAGE_SIZE_64;
3555
3556 basic_info->policy = ((task != kernel_task) ?
3557 POLICY_TIMESHARE : POLICY_RR);
3558
3559 basic_info->suspend_count = task->user_stop_count;
3560
3561 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
3562 basic_info->user_time.seconds =
3563 (typeof(basic_info->user_time.seconds))secs;
3564 basic_info->user_time.microseconds = usecs;
3565
3566 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
3567 basic_info->system_time.seconds =
3568 (typeof(basic_info->system_time.seconds))secs;
3569 basic_info->system_time.microseconds = usecs;
3570
3571 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
3572 break;
3573 }
3574
3575 case TASK_THREAD_TIMES_INFO:
3576 {
3577 task_thread_times_info_t times_info;
3578 thread_t thread;
3579
3580 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
3581 error = KERN_INVALID_ARGUMENT;
3582 break;
3583 }
3584
3585 times_info = (task_thread_times_info_t) task_info_out;
3586 times_info->user_time.seconds = 0;
3587 times_info->user_time.microseconds = 0;
3588 times_info->system_time.seconds = 0;
3589 times_info->system_time.microseconds = 0;
3590
3591
3592 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3593 time_value_t user_time, system_time;
3594
3595 if (thread->options & TH_OPT_IDLE_THREAD)
3596 continue;
3597
3598 thread_read_times(thread, &user_time, &system_time);
3599
3600 time_value_add(&times_info->user_time, &user_time);
3601 time_value_add(&times_info->system_time, &system_time);
3602 }
3603
3604 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
3605 break;
3606 }
3607
3608 case TASK_ABSOLUTETIME_INFO:
3609 {
3610 task_absolutetime_info_t info;
3611 thread_t thread;
3612
3613 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
3614 error = KERN_INVALID_ARGUMENT;
3615 break;
3616 }
3617
3618 info = (task_absolutetime_info_t)task_info_out;
3619 info->threads_user = info->threads_system = 0;
3620
3621
3622 info->total_user = task->total_user_time;
3623 info->total_system = task->total_system_time;
3624
3625 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3626 uint64_t tval;
3627 spl_t x;
3628
3629 if (thread->options & TH_OPT_IDLE_THREAD)
3630 continue;
3631
3632 x = splsched();
3633 thread_lock(thread);
3634
3635 tval = timer_grab(&thread->user_timer);
3636 info->threads_user += tval;
3637 info->total_user += tval;
3638
3639 tval = timer_grab(&thread->system_timer);
3640 if (thread->precise_user_kernel_time) {
3641 info->threads_system += tval;
3642 info->total_system += tval;
3643 } else {
3644 /* system_timer may represent either sys or user */
3645 info->threads_user += tval;
3646 info->total_user += tval;
3647 }
3648
3649 thread_unlock(thread);
3650 splx(x);
3651 }
3652
3653
3654 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
3655 break;
3656 }
3657
3658 case TASK_DYLD_INFO:
3659 {
3660 task_dyld_info_t info;
3661
3662 /*
3663 * We added the format field to TASK_DYLD_INFO output. For
3664 * temporary backward compatibility, accept the fact that
3665 * clients may ask for the old version - distinquished by the
3666 * size of the expected result structure.
3667 */
3668 #define TASK_LEGACY_DYLD_INFO_COUNT \
3669 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
3670
3671 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
3672 error = KERN_INVALID_ARGUMENT;
3673 break;
3674 }
3675
3676 info = (task_dyld_info_t)task_info_out;
3677 info->all_image_info_addr = task->all_image_info_addr;
3678 info->all_image_info_size = task->all_image_info_size;
3679
3680 /* only set format on output for those expecting it */
3681 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
3682 info->all_image_info_format = task_has_64BitAddr(task) ?
3683 TASK_DYLD_ALL_IMAGE_INFO_64 :
3684 TASK_DYLD_ALL_IMAGE_INFO_32 ;
3685 *task_info_count = TASK_DYLD_INFO_COUNT;
3686 } else {
3687 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
3688 }
3689 break;
3690 }
3691
3692 case TASK_EXTMOD_INFO:
3693 {
3694 task_extmod_info_t info;
3695 void *p;
3696
3697 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
3698 error = KERN_INVALID_ARGUMENT;
3699 break;
3700 }
3701
3702 info = (task_extmod_info_t)task_info_out;
3703
3704 p = get_bsdtask_info(task);
3705 if (p) {
3706 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
3707 } else {
3708 bzero(info->task_uuid, sizeof(info->task_uuid));
3709 }
3710 info->extmod_statistics = task->extmod_statistics;
3711 *task_info_count = TASK_EXTMOD_INFO_COUNT;
3712
3713 break;
3714 }
3715
3716 case TASK_KERNELMEMORY_INFO:
3717 {
3718 task_kernelmemory_info_t tkm_info;
3719 ledger_amount_t credit, debit;
3720
3721 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
3722 error = KERN_INVALID_ARGUMENT;
3723 break;
3724 }
3725
3726 tkm_info = (task_kernelmemory_info_t) task_info_out;
3727 tkm_info->total_palloc = 0;
3728 tkm_info->total_pfree = 0;
3729 tkm_info->total_salloc = 0;
3730 tkm_info->total_sfree = 0;
3731
3732 if (task == kernel_task) {
3733 /*
3734 * All shared allocs/frees from other tasks count against
3735 * the kernel private memory usage. If we are looking up
3736 * info for the kernel task, gather from everywhere.
3737 */
3738 task_unlock(task);
3739
3740 /* start by accounting for all the terminated tasks against the kernel */
3741 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
3742 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
3743
3744 /* count all other task/thread shared alloc/free against the kernel */
3745 lck_mtx_lock(&tasks_threads_lock);
3746
3747 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
3748 queue_iterate(&tasks, task, task_t, tasks) {
3749 if (task == kernel_task) {
3750 if (ledger_get_entries(task->ledger,
3751 task_ledgers.tkm_private, &credit,
3752 &debit) == KERN_SUCCESS) {
3753 tkm_info->total_palloc += credit;
3754 tkm_info->total_pfree += debit;
3755 }
3756 }
3757 if (!ledger_get_entries(task->ledger,
3758 task_ledgers.tkm_shared, &credit, &debit)) {
3759 tkm_info->total_palloc += credit;
3760 tkm_info->total_pfree += debit;
3761 }
3762 }
3763 lck_mtx_unlock(&tasks_threads_lock);
3764 } else {
3765 if (!ledger_get_entries(task->ledger,
3766 task_ledgers.tkm_private, &credit, &debit)) {
3767 tkm_info->total_palloc = credit;
3768 tkm_info->total_pfree = debit;
3769 }
3770 if (!ledger_get_entries(task->ledger,
3771 task_ledgers.tkm_shared, &credit, &debit)) {
3772 tkm_info->total_salloc = credit;
3773 tkm_info->total_sfree = debit;
3774 }
3775 task_unlock(task);
3776 }
3777
3778 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
3779 return KERN_SUCCESS;
3780 }
3781
3782 /* OBSOLETE */
3783 case TASK_SCHED_FIFO_INFO:
3784 {
3785
3786 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
3787 error = KERN_INVALID_ARGUMENT;
3788 break;
3789 }
3790
3791 error = KERN_INVALID_POLICY;
3792 break;
3793 }
3794
3795 /* OBSOLETE */
3796 case TASK_SCHED_RR_INFO:
3797 {
3798 policy_rr_base_t rr_base;
3799 uint32_t quantum_time;
3800 uint64_t quantum_ns;
3801
3802 if (*task_info_count < POLICY_RR_BASE_COUNT) {
3803 error = KERN_INVALID_ARGUMENT;
3804 break;
3805 }
3806
3807 rr_base = (policy_rr_base_t) task_info_out;
3808
3809 if (task != kernel_task) {
3810 error = KERN_INVALID_POLICY;
3811 break;
3812 }
3813
3814 rr_base->base_priority = task->priority;
3815
3816 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
3817 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
3818
3819 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
3820
3821 *task_info_count = POLICY_RR_BASE_COUNT;
3822 break;
3823 }
3824
3825 /* OBSOLETE */
3826 case TASK_SCHED_TIMESHARE_INFO:
3827 {
3828 policy_timeshare_base_t ts_base;
3829
3830 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
3831 error = KERN_INVALID_ARGUMENT;
3832 break;
3833 }
3834
3835 ts_base = (policy_timeshare_base_t) task_info_out;
3836
3837 if (task == kernel_task) {
3838 error = KERN_INVALID_POLICY;
3839 break;
3840 }
3841
3842 ts_base->base_priority = task->priority;
3843
3844 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
3845 break;
3846 }
3847
3848 case TASK_SECURITY_TOKEN:
3849 {
3850 security_token_t *sec_token_p;
3851
3852 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
3853 error = KERN_INVALID_ARGUMENT;
3854 break;
3855 }
3856
3857 sec_token_p = (security_token_t *) task_info_out;
3858
3859 *sec_token_p = task->sec_token;
3860
3861 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
3862 break;
3863 }
3864
3865 case TASK_AUDIT_TOKEN:
3866 {
3867 audit_token_t *audit_token_p;
3868
3869 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
3870 error = KERN_INVALID_ARGUMENT;
3871 break;
3872 }
3873
3874 audit_token_p = (audit_token_t *) task_info_out;
3875
3876 *audit_token_p = task->audit_token;
3877
3878 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
3879 break;
3880 }
3881
3882 case TASK_SCHED_INFO:
3883 error = KERN_INVALID_ARGUMENT;
3884 break;
3885
3886 case TASK_EVENTS_INFO:
3887 {
3888 task_events_info_t events_info;
3889 thread_t thread;
3890
3891 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
3892 error = KERN_INVALID_ARGUMENT;
3893 break;
3894 }
3895
3896 events_info = (task_events_info_t) task_info_out;
3897
3898
3899 events_info->faults = task->faults;
3900 events_info->pageins = task->pageins;
3901 events_info->cow_faults = task->cow_faults;
3902 events_info->messages_sent = task->messages_sent;
3903 events_info->messages_received = task->messages_received;
3904 events_info->syscalls_mach = task->syscalls_mach;
3905 events_info->syscalls_unix = task->syscalls_unix;
3906
3907 events_info->csw = task->c_switch;
3908
3909 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3910 events_info->csw += thread->c_switch;
3911 events_info->syscalls_mach += thread->syscalls_mach;
3912 events_info->syscalls_unix += thread->syscalls_unix;
3913 }
3914
3915
3916 *task_info_count = TASK_EVENTS_INFO_COUNT;
3917 break;
3918 }
3919 case TASK_AFFINITY_TAG_INFO:
3920 {
3921 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
3922 error = KERN_INVALID_ARGUMENT;
3923 break;
3924 }
3925
3926 error = task_affinity_info(task, task_info_out, task_info_count);
3927 break;
3928 }
3929 case TASK_POWER_INFO:
3930 {
3931 if (*task_info_count < TASK_POWER_INFO_COUNT) {
3932 error = KERN_INVALID_ARGUMENT;
3933 break;
3934 }
3935
3936 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL);
3937 break;
3938 }
3939
3940 case TASK_POWER_INFO_V2:
3941 {
3942 if (*task_info_count < TASK_POWER_INFO_V2_COUNT) {
3943 error = KERN_INVALID_ARGUMENT;
3944 break;
3945 }
3946 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
3947
3948 uint64_t *task_energy = NULL;
3949 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, task_energy);
3950 break;
3951 }
3952
3953 case TASK_VM_INFO:
3954 case TASK_VM_INFO_PURGEABLE:
3955 {
3956 task_vm_info_t vm_info;
3957 vm_map_t map;
3958
3959 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
3960 error = KERN_INVALID_ARGUMENT;
3961 break;
3962 }
3963
3964 vm_info = (task_vm_info_t)task_info_out;
3965
3966 if (task == kernel_task) {
3967 map = kernel_map;
3968 /* no lock */
3969 } else {
3970 map = task->map;
3971 vm_map_lock_read(map);
3972 }
3973
3974 vm_info->virtual_size = (typeof(vm_info->virtual_size))map->size;
3975 vm_info->region_count = map->hdr.nentries;
3976 vm_info->page_size = vm_map_page_size(map);
3977
3978 vm_info->resident_size = pmap_resident_count(map->pmap);
3979 vm_info->resident_size *= PAGE_SIZE;
3980 vm_info->resident_size_peak = pmap_resident_max(map->pmap);
3981 vm_info->resident_size_peak *= PAGE_SIZE;
3982
3983 #define _VM_INFO(_name) \
3984 vm_info->_name = ((mach_vm_size_t) map->pmap->stats._name) * PAGE_SIZE
3985
3986 _VM_INFO(device);
3987 _VM_INFO(device_peak);
3988 _VM_INFO(external);
3989 _VM_INFO(external_peak);
3990 _VM_INFO(internal);
3991 _VM_INFO(internal_peak);
3992 _VM_INFO(reusable);
3993 _VM_INFO(reusable_peak);
3994 _VM_INFO(compressed);
3995 _VM_INFO(compressed_peak);
3996 _VM_INFO(compressed_lifetime);
3997
3998 vm_info->purgeable_volatile_pmap = 0;
3999 vm_info->purgeable_volatile_resident = 0;
4000 vm_info->purgeable_volatile_virtual = 0;
4001 if (task == kernel_task) {
4002 /*
4003 * We do not maintain the detailed stats for the
4004 * kernel_pmap, so just count everything as
4005 * "internal"...
4006 */
4007 vm_info->internal = vm_info->resident_size;
4008 /*
4009 * ... but since the memory held by the VM compressor
4010 * in the kernel address space ought to be attributed
4011 * to user-space tasks, we subtract it from "internal"
4012 * to give memory reporting tools a more accurate idea
4013 * of what the kernel itself is actually using, instead
4014 * of making it look like the kernel is leaking memory
4015 * when the system is under memory pressure.
4016 */
4017 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
4018 PAGE_SIZE);
4019 } else {
4020 mach_vm_size_t volatile_virtual_size;
4021 mach_vm_size_t volatile_resident_size;
4022 mach_vm_size_t volatile_compressed_size;
4023 mach_vm_size_t volatile_pmap_size;
4024 mach_vm_size_t volatile_compressed_pmap_size;
4025 kern_return_t kr;
4026
4027 if (flavor == TASK_VM_INFO_PURGEABLE) {
4028 kr = vm_map_query_volatile(
4029 map,
4030 &volatile_virtual_size,
4031 &volatile_resident_size,
4032 &volatile_compressed_size,
4033 &volatile_pmap_size,
4034 &volatile_compressed_pmap_size);
4035 if (kr == KERN_SUCCESS) {
4036 vm_info->purgeable_volatile_pmap =
4037 volatile_pmap_size;
4038 if (radar_20146450) {
4039 vm_info->compressed -=
4040 volatile_compressed_pmap_size;
4041 }
4042 vm_info->purgeable_volatile_resident =
4043 volatile_resident_size;
4044 vm_info->purgeable_volatile_virtual =
4045 volatile_virtual_size;
4046 }
4047 }
4048 }
4049 *task_info_count = TASK_VM_INFO_REV0_COUNT;
4050
4051 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
4052 vm_info->phys_footprint =
4053 (mach_vm_size_t) get_task_phys_footprint(task);
4054 *task_info_count = TASK_VM_INFO_REV1_COUNT;
4055 }
4056 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
4057 vm_info->min_address = map->min_offset;
4058 vm_info->max_address = map->max_offset;
4059 *task_info_count = TASK_VM_INFO_REV2_COUNT;
4060 }
4061
4062 if (task != kernel_task) {
4063 vm_map_unlock_read(map);
4064 }
4065
4066 break;
4067 }
4068
4069 case TASK_WAIT_STATE_INFO:
4070 {
4071 /*
4072 * Deprecated flavor. Currently allowing some results until all users
4073 * stop calling it. The results may not be accurate.
4074 */
4075 task_wait_state_info_t wait_state_info;
4076 uint64_t total_sfi_ledger_val = 0;
4077
4078 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
4079 error = KERN_INVALID_ARGUMENT;
4080 break;
4081 }
4082
4083 wait_state_info = (task_wait_state_info_t) task_info_out;
4084
4085 wait_state_info->total_wait_state_time = 0;
4086 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
4087
4088 #if CONFIG_SCHED_SFI
4089 int i, prev_lentry = -1;
4090 int64_t val_credit, val_debit;
4091
4092 for (i = 0; i < MAX_SFI_CLASS_ID; i++){
4093 val_credit =0;
4094 /*
4095 * checking with prev_lentry != entry ensures adjacent classes
4096 * which share the same ledger do not add wait times twice.
4097 * Note: Use ledger() call to get data for each individual sfi class.
4098 */
4099 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
4100 KERN_SUCCESS == ledger_get_entries(task->ledger,
4101 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
4102 total_sfi_ledger_val += val_credit;
4103 }
4104 prev_lentry = task_ledgers.sfi_wait_times[i];
4105 }
4106
4107 #endif /* CONFIG_SCHED_SFI */
4108 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
4109 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
4110
4111 break;
4112 }
4113 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
4114 {
4115 #if DEVELOPMENT || DEBUG
4116 pvm_account_info_t acnt_info;
4117
4118 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
4119 error = KERN_INVALID_ARGUMENT;
4120 break;
4121 }
4122
4123 if (task_info_out == NULL) {
4124 error = KERN_INVALID_ARGUMENT;
4125 break;
4126 }
4127
4128 acnt_info = (pvm_account_info_t) task_info_out;
4129
4130 error = vm_purgeable_account(task, acnt_info);
4131
4132 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
4133
4134 break;
4135 #else /* DEVELOPMENT || DEBUG */
4136 error = KERN_NOT_SUPPORTED;
4137 break;
4138 #endif /* DEVELOPMENT || DEBUG */
4139 }
4140 case TASK_FLAGS_INFO:
4141 {
4142 task_flags_info_t flags_info;
4143
4144 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
4145 error = KERN_INVALID_ARGUMENT;
4146 break;
4147 }
4148
4149 flags_info = (task_flags_info_t)task_info_out;
4150
4151 /* only publish the 64-bit flag of the task */
4152 flags_info->flags = task->t_flags & TF_64B_ADDR;
4153
4154 *task_info_count = TASK_FLAGS_INFO_COUNT;
4155 break;
4156 }
4157
4158 case TASK_DEBUG_INFO_INTERNAL:
4159 {
4160 #if DEVELOPMENT || DEBUG
4161 task_debug_info_internal_t dbg_info;
4162 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
4163 error = KERN_NOT_SUPPORTED;
4164 break;
4165 }
4166
4167 if (task_info_out == NULL) {
4168 error = KERN_INVALID_ARGUMENT;
4169 break;
4170 }
4171 dbg_info = (task_debug_info_internal_t) task_info_out;
4172 dbg_info->ipc_space_size = 0;
4173 if (task->itk_space){
4174 dbg_info->ipc_space_size = task->itk_space->is_table_size;
4175 }
4176
4177 error = KERN_SUCCESS;
4178 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
4179 break;
4180 #else /* DEVELOPMENT || DEBUG */
4181 error = KERN_NOT_SUPPORTED;
4182 break;
4183 #endif /* DEVELOPMENT || DEBUG */
4184 }
4185 default:
4186 error = KERN_INVALID_ARGUMENT;
4187 }
4188
4189 task_unlock(task);
4190 return (error);
4191 }
4192
4193 /*
4194 * task_power_info
4195 *
4196 * Returns power stats for the task.
4197 * Note: Called with task locked.
4198 */
4199 void
4200 task_power_info_locked(
4201 task_t task,
4202 task_power_info_t info,
4203 gpu_energy_data_t ginfo,
4204 uint64_t *task_energy)
4205 {
4206 thread_t thread;
4207 ledger_amount_t tmp;
4208
4209 task_lock_assert_owned(task);
4210
4211 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
4212 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
4213 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
4214 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
4215
4216 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
4217 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
4218
4219 info->total_user = task->total_user_time;
4220 info->total_system = task->total_system_time;
4221
4222 if (task_energy) {
4223 *task_energy = task->task_energy;
4224 }
4225
4226 if (ginfo) {
4227 ginfo->task_gpu_utilisation = task->task_gpu_ns;
4228 }
4229
4230 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4231 uint64_t tval;
4232 spl_t x;
4233
4234 if (thread->options & TH_OPT_IDLE_THREAD)
4235 continue;
4236
4237 x = splsched();
4238 thread_lock(thread);
4239
4240 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
4241 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
4242
4243 if (task_energy) {
4244 *task_energy += ml_energy_stat(thread);
4245 }
4246
4247 tval = timer_grab(&thread->user_timer);
4248 info->total_user += tval;
4249
4250 tval = timer_grab(&thread->system_timer);
4251 if (thread->precise_user_kernel_time) {
4252 info->total_system += tval;
4253 } else {
4254 /* system_timer may represent either sys or user */
4255 info->total_user += tval;
4256 }
4257
4258 if (ginfo) {
4259 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
4260 }
4261 thread_unlock(thread);
4262 splx(x);
4263 }
4264 }
4265
4266 /*
4267 * task_gpu_utilisation
4268 *
4269 * Returns the total gpu time used by the all the threads of the task
4270 * (both dead and alive)
4271 */
4272 uint64_t
4273 task_gpu_utilisation(
4274 task_t task)
4275 {
4276 uint64_t gpu_time = 0;
4277 thread_t thread;
4278
4279 task_lock(task);
4280 gpu_time += task->task_gpu_ns;
4281
4282 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4283 spl_t x;
4284 x = splsched();
4285 thread_lock(thread);
4286 gpu_time += ml_gpu_stat(thread);
4287 thread_unlock(thread);
4288 splx(x);
4289 }
4290
4291 task_unlock(task);
4292 return gpu_time;
4293 }
4294
4295 /*
4296 * task_energy
4297 *
4298 * Returns the total energy used by the all the threads of the task
4299 * (both dead and alive)
4300 */
4301 uint64_t
4302 task_energy(
4303 task_t task)
4304 {
4305 uint64_t energy = 0;
4306 thread_t thread;
4307
4308 task_lock(task);
4309 energy += task->task_energy;
4310
4311 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4312 spl_t x;
4313 x = splsched();
4314 thread_lock(thread);
4315 energy += ml_energy_stat(thread);
4316 thread_unlock(thread);
4317 splx(x);
4318 }
4319
4320 task_unlock(task);
4321 return energy;
4322 }
4323
4324 kern_return_t
4325 task_purgable_info(
4326 task_t task,
4327 task_purgable_info_t *stats)
4328 {
4329 if (task == TASK_NULL || stats == NULL)
4330 return KERN_INVALID_ARGUMENT;
4331 /* Take task reference */
4332 task_reference(task);
4333 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
4334 /* Drop task reference */
4335 task_deallocate(task);
4336 return KERN_SUCCESS;
4337 }
4338
4339 void
4340 task_vtimer_set(
4341 task_t task,
4342 integer_t which)
4343 {
4344 thread_t thread;
4345 spl_t x;
4346
4347 task_lock(task);
4348
4349 task->vtimers |= which;
4350
4351 switch (which) {
4352
4353 case TASK_VTIMER_USER:
4354 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4355 x = splsched();
4356 thread_lock(thread);
4357 if (thread->precise_user_kernel_time)
4358 thread->vtimer_user_save = timer_grab(&thread->user_timer);
4359 else
4360 thread->vtimer_user_save = timer_grab(&thread->system_timer);
4361 thread_unlock(thread);
4362 splx(x);
4363 }
4364 break;
4365
4366 case TASK_VTIMER_PROF:
4367 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4368 x = splsched();
4369 thread_lock(thread);
4370 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
4371 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
4372 thread_unlock(thread);
4373 splx(x);
4374 }
4375 break;
4376
4377 case TASK_VTIMER_RLIM:
4378 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4379 x = splsched();
4380 thread_lock(thread);
4381 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
4382 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
4383 thread_unlock(thread);
4384 splx(x);
4385 }
4386 break;
4387 }
4388
4389 task_unlock(task);
4390 }
4391
4392 void
4393 task_vtimer_clear(
4394 task_t task,
4395 integer_t which)
4396 {
4397 assert(task == current_task());
4398
4399 task_lock(task);
4400
4401 task->vtimers &= ~which;
4402
4403 task_unlock(task);
4404 }
4405
4406 void
4407 task_vtimer_update(
4408 __unused
4409 task_t task,
4410 integer_t which,
4411 uint32_t *microsecs)
4412 {
4413 thread_t thread = current_thread();
4414 uint32_t tdelt = 0;
4415 clock_sec_t secs = 0;
4416 uint64_t tsum;
4417
4418 assert(task == current_task());
4419
4420 spl_t s = splsched();
4421 thread_lock(thread);
4422
4423 if ((task->vtimers & which) != (uint32_t)which) {
4424 thread_unlock(thread);
4425 splx(s);
4426 return;
4427 }
4428
4429 switch (which) {
4430
4431 case TASK_VTIMER_USER:
4432 if (thread->precise_user_kernel_time) {
4433 tdelt = (uint32_t)timer_delta(&thread->user_timer,
4434 &thread->vtimer_user_save);
4435 } else {
4436 tdelt = (uint32_t)timer_delta(&thread->system_timer,
4437 &thread->vtimer_user_save);
4438 }
4439 absolutetime_to_microtime(tdelt, &secs, microsecs);
4440 break;
4441
4442 case TASK_VTIMER_PROF:
4443 tsum = timer_grab(&thread->user_timer);
4444 tsum += timer_grab(&thread->system_timer);
4445 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
4446 absolutetime_to_microtime(tdelt, &secs, microsecs);
4447 /* if the time delta is smaller than a usec, ignore */
4448 if (*microsecs != 0)
4449 thread->vtimer_prof_save = tsum;
4450 break;
4451
4452 case TASK_VTIMER_RLIM:
4453 tsum = timer_grab(&thread->user_timer);
4454 tsum += timer_grab(&thread->system_timer);
4455 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
4456 thread->vtimer_rlim_save = tsum;
4457 absolutetime_to_microtime(tdelt, &secs, microsecs);
4458 break;
4459 }
4460
4461 thread_unlock(thread);
4462 splx(s);
4463 }
4464
4465 /*
4466 * task_assign:
4467 *
4468 * Change the assigned processor set for the task
4469 */
4470 kern_return_t
4471 task_assign(
4472 __unused task_t task,
4473 __unused processor_set_t new_pset,
4474 __unused boolean_t assign_threads)
4475 {
4476 return(KERN_FAILURE);
4477 }
4478
4479 /*
4480 * task_assign_default:
4481 *
4482 * Version of task_assign to assign to default processor set.
4483 */
4484 kern_return_t
4485 task_assign_default(
4486 task_t task,
4487 boolean_t assign_threads)
4488 {
4489 return (task_assign(task, &pset0, assign_threads));
4490 }
4491
4492 /*
4493 * task_get_assignment
4494 *
4495 * Return name of processor set that task is assigned to.
4496 */
4497 kern_return_t
4498 task_get_assignment(
4499 task_t task,
4500 processor_set_t *pset)
4501 {
4502 if (!task || !task->active)
4503 return KERN_FAILURE;
4504
4505 *pset = &pset0;
4506
4507 return KERN_SUCCESS;
4508 }
4509
4510 uint64_t
4511 get_task_dispatchqueue_offset(
4512 task_t task)
4513 {
4514 return task->dispatchqueue_offset;
4515 }
4516
4517 /*
4518 * task_policy
4519 *
4520 * Set scheduling policy and parameters, both base and limit, for
4521 * the given task. Policy must be a policy which is enabled for the
4522 * processor set. Change contained threads if requested.
4523 */
4524 kern_return_t
4525 task_policy(
4526 __unused task_t task,
4527 __unused policy_t policy_id,
4528 __unused policy_base_t base,
4529 __unused mach_msg_type_number_t count,
4530 __unused boolean_t set_limit,
4531 __unused boolean_t change)
4532 {
4533 return(KERN_FAILURE);
4534 }
4535
4536 /*
4537 * task_set_policy
4538 *
4539 * Set scheduling policy and parameters, both base and limit, for
4540 * the given task. Policy can be any policy implemented by the
4541 * processor set, whether enabled or not. Change contained threads
4542 * if requested.
4543 */
4544 kern_return_t
4545 task_set_policy(
4546 __unused task_t task,
4547 __unused processor_set_t pset,
4548 __unused policy_t policy_id,
4549 __unused policy_base_t base,
4550 __unused mach_msg_type_number_t base_count,
4551 __unused policy_limit_t limit,
4552 __unused mach_msg_type_number_t limit_count,
4553 __unused boolean_t change)
4554 {
4555 return(KERN_FAILURE);
4556 }
4557
4558 kern_return_t
4559 task_set_ras_pc(
4560 __unused task_t task,
4561 __unused vm_offset_t pc,
4562 __unused vm_offset_t endpc)
4563 {
4564 return KERN_FAILURE;
4565 }
4566
4567 void
4568 task_synchronizer_destroy_all(task_t task)
4569 {
4570 /*
4571 * Destroy owned semaphores
4572 */
4573 semaphore_destroy_all(task);
4574 }
4575
4576 /*
4577 * Install default (machine-dependent) initial thread state
4578 * on the task. Subsequent thread creation will have this initial
4579 * state set on the thread by machine_thread_inherit_taskwide().
4580 * Flavors and structures are exactly the same as those to thread_set_state()
4581 */
4582 kern_return_t
4583 task_set_state(
4584 task_t task,
4585 int flavor,
4586 thread_state_t state,
4587 mach_msg_type_number_t state_count)
4588 {
4589 kern_return_t ret;
4590
4591 if (task == TASK_NULL) {
4592 return (KERN_INVALID_ARGUMENT);
4593 }
4594
4595 task_lock(task);
4596
4597 if (!task->active) {
4598 task_unlock(task);
4599 return (KERN_FAILURE);
4600 }
4601
4602 ret = machine_task_set_state(task, flavor, state, state_count);
4603
4604 task_unlock(task);
4605 return ret;
4606 }
4607
4608 /*
4609 * Examine the default (machine-dependent) initial thread state
4610 * on the task, as set by task_set_state(). Flavors and structures
4611 * are exactly the same as those passed to thread_get_state().
4612 */
4613 kern_return_t
4614 task_get_state(
4615 task_t task,
4616 int flavor,
4617 thread_state_t state,
4618 mach_msg_type_number_t *state_count)
4619 {
4620 kern_return_t ret;
4621
4622 if (task == TASK_NULL) {
4623 return (KERN_INVALID_ARGUMENT);
4624 }
4625
4626 task_lock(task);
4627
4628 if (!task->active) {
4629 task_unlock(task);
4630 return (KERN_FAILURE);
4631 }
4632
4633 ret = machine_task_get_state(task, flavor, state, state_count);
4634
4635 task_unlock(task);
4636 return ret;
4637 }
4638
4639 #if CONFIG_MEMORYSTATUS
4640
4641 boolean_t
4642 task_get_memlimit_is_active(task_t task)
4643 {
4644 assert (task != NULL);
4645
4646 return (task->memlimit_is_active ? TRUE : FALSE);
4647 }
4648
4649 void
4650 task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
4651 {
4652 assert (task != NULL);
4653
4654 memlimit_is_active ? (task->memlimit_is_active = 1) : (task->memlimit_is_active = 0);
4655 }
4656
4657 boolean_t
4658 task_get_memlimit_is_fatal(task_t task)
4659 {
4660 assert(task != NULL);
4661
4662 return (task->memlimit_is_fatal ? TRUE : FALSE);
4663 }
4664
4665 void
4666 task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
4667 {
4668 assert (task != NULL);
4669
4670 memlimit_is_fatal ? (task->memlimit_is_fatal = 1) : (task->memlimit_is_fatal = 0);
4671 }
4672
4673 boolean_t
4674 task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
4675 {
4676 boolean_t triggered = FALSE;
4677
4678 assert(task == current_task());
4679
4680 /*
4681 * Returns true, if task has already triggered an exc_resource exception.
4682 */
4683
4684 if (memlimit_is_active) {
4685 triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
4686 } else {
4687 triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
4688 }
4689
4690 return(triggered);
4691 }
4692
4693 void
4694 task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
4695 {
4696 assert(task == current_task());
4697
4698 /*
4699 * We allow one exc_resource per process per active/inactive limit.
4700 * The limit's fatal attribute does not come into play.
4701 */
4702
4703 if (memlimit_is_active) {
4704 task->memlimit_active_exc_resource = 1;
4705 } else {
4706 task->memlimit_inactive_exc_resource = 1;
4707 }
4708 }
4709
4710 #define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
4711
4712 void __attribute__((noinline))
4713 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
4714 {
4715 task_t task = current_task();
4716 int pid = 0;
4717 const char *procname = "unknown";
4718 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
4719
4720 #ifdef MACH_BSD
4721 pid = proc_selfpid();
4722
4723 if (pid == 1) {
4724 /*
4725 * Cannot have ReportCrash analyzing
4726 * a suspended initproc.
4727 */
4728 return;
4729 }
4730
4731 if (task->bsd_info != NULL)
4732 procname = proc_name_address(current_task()->bsd_info);
4733 #endif
4734 #if CONFIG_COREDUMP
4735 if (hwm_user_cores) {
4736 int error;
4737 uint64_t starttime, end;
4738 clock_sec_t secs = 0;
4739 uint32_t microsecs = 0;
4740
4741 starttime = mach_absolute_time();
4742 /*
4743 * Trigger a coredump of this process. Don't proceed unless we know we won't
4744 * be filling up the disk; and ignore the core size resource limit for this
4745 * core file.
4746 */
4747 if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
4748 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
4749 }
4750 /*
4751 * coredump() leaves the task suspended.
4752 */
4753 task_resume_internal(current_task());
4754
4755 end = mach_absolute_time();
4756 absolutetime_to_microtime(end - starttime, &secs, &microsecs);
4757 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
4758 proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
4759 }
4760 #endif /* CONFIG_COREDUMP */
4761
4762 if (disable_exc_resource) {
4763 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
4764 "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
4765 return;
4766 }
4767
4768 /*
4769 * A task that has triggered an EXC_RESOURCE, should not be
4770 * jetsammed when the device is under memory pressure. Here
4771 * we set the P_MEMSTAT_TERMINATED flag so that the process
4772 * will be skipped if the memorystatus_thread wakes up.
4773 */
4774 proc_memstat_terminated(current_task()->bsd_info, TRUE);
4775
4776 printf("process %s[%d] crossed memory high watermark (%d MB); sending "
4777 "EXC_RESOURCE.\n", procname, pid, max_footprint_mb);
4778
4779 code[0] = code[1] = 0;
4780 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
4781 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
4782 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
4783
4784 /* Do not generate a corpse fork if the violation is a fatal one */
4785 if (is_fatal || exc_via_corpse_forking == 0) {
4786 /* Do not send a EXC_RESOURCE is corpse_for_fatal_memkill is set */
4787 if (corpse_for_fatal_memkill == 0) {
4788 /*
4789 * Use the _internal_ variant so that no user-space
4790 * process can resume our task from under us.
4791 */
4792 task_suspend_internal(task);
4793 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
4794 task_resume_internal(task);
4795 }
4796 } else {
4797 task_enqueue_exception_with_corpse(task, code, EXCEPTION_CODE_MAX);
4798 }
4799
4800 /*
4801 * After the EXC_RESOURCE has been handled, we must clear the
4802 * P_MEMSTAT_TERMINATED flag so that the process can again be
4803 * considered for jetsam if the memorystatus_thread wakes up.
4804 */
4805 proc_memstat_terminated(current_task()->bsd_info, FALSE); /* clear the flag */
4806 }
4807
4808 /*
4809 * Callback invoked when a task exceeds its physical footprint limit.
4810 */
4811 void
4812 task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
4813 {
4814 ledger_amount_t max_footprint, max_footprint_mb;
4815 task_t task;
4816 boolean_t is_warning;
4817 boolean_t memlimit_is_active;
4818 boolean_t memlimit_is_fatal;
4819
4820 if (warning == LEDGER_WARNING_DIPPED_BELOW) {
4821 /*
4822 * Task memory limits only provide a warning on the way up.
4823 */
4824 return;
4825 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
4826 /*
4827 * This task is in danger of violating a memory limit,
4828 * It has exceeded a percentage level of the limit.
4829 */
4830 is_warning = TRUE;
4831 } else {
4832 /*
4833 * The task has exceeded the physical footprint limit.
4834 * This is not a warning but a true limit violation.
4835 */
4836 is_warning = FALSE;
4837 }
4838
4839 task = current_task();
4840
4841 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
4842 max_footprint_mb = max_footprint >> 20;
4843
4844 memlimit_is_active = task_get_memlimit_is_active(task);
4845 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
4846
4847 /*
4848 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
4849 * We only generate the exception once per process per memlimit (active/inactive limit).
4850 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
4851 * and we disable it by marking that memlimit as exception triggered.
4852 */
4853 if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) {
4854 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal);
4855 memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal);
4856 task_mark_has_triggered_exc_resource(task, memlimit_is_active);
4857 }
4858
4859 memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal);
4860 }
4861
4862 extern int proc_check_footprint_priv(void);
4863
4864 kern_return_t
4865 task_set_phys_footprint_limit(
4866 task_t task,
4867 int new_limit_mb,
4868 int *old_limit_mb)
4869 {
4870 kern_return_t error;
4871
4872 boolean_t memlimit_is_active;
4873 boolean_t memlimit_is_fatal;
4874
4875 if ((error = proc_check_footprint_priv())) {
4876 return (KERN_NO_ACCESS);
4877 }
4878
4879 /*
4880 * This call should probably be obsoleted.
4881 * But for now, we default to current state.
4882 */
4883 memlimit_is_active = task_get_memlimit_is_active(task);
4884 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
4885
4886 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
4887 }
4888
4889 kern_return_t
4890 task_convert_phys_footprint_limit(
4891 int limit_mb,
4892 int *converted_limit_mb)
4893 {
4894 if (limit_mb == -1) {
4895 /*
4896 * No limit
4897 */
4898 if (max_task_footprint != 0) {
4899 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
4900 } else {
4901 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
4902 }
4903 } else {
4904 /* nothing to convert */
4905 *converted_limit_mb = limit_mb;
4906 }
4907 return (KERN_SUCCESS);
4908 }
4909
4910
4911 kern_return_t
4912 task_set_phys_footprint_limit_internal(
4913 task_t task,
4914 int new_limit_mb,
4915 int *old_limit_mb,
4916 boolean_t memlimit_is_active,
4917 boolean_t memlimit_is_fatal)
4918 {
4919 ledger_amount_t old;
4920
4921 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
4922
4923 if (old_limit_mb) {
4924 /*
4925 * Check that limit >> 20 will not give an "unexpected" 32-bit
4926 * result. There are, however, implicit assumptions that -1 mb limit
4927 * equates to LEDGER_LIMIT_INFINITY.
4928 */
4929 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
4930 *old_limit_mb = (int)(old >> 20);
4931 }
4932
4933 if (new_limit_mb == -1) {
4934 /*
4935 * Caller wishes to remove the limit.
4936 */
4937 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
4938 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
4939 max_task_footprint ? max_task_footprint_warning_level : 0);
4940
4941 task_set_memlimit_is_active(task, memlimit_is_active);
4942 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
4943
4944 return (KERN_SUCCESS);
4945 }
4946
4947 #ifdef CONFIG_NOMONITORS
4948 return (KERN_SUCCESS);
4949 #endif /* CONFIG_NOMONITORS */
4950
4951 task_lock(task);
4952
4953 task_set_memlimit_is_active(task, memlimit_is_active);
4954 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
4955
4956 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
4957 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
4958
4959 if (task == current_task()) {
4960 ledger_check_new_balance(task->ledger, task_ledgers.phys_footprint);
4961 }
4962
4963 task_unlock(task);
4964
4965 return (KERN_SUCCESS);
4966 }
4967
4968 kern_return_t
4969 task_get_phys_footprint_limit(
4970 task_t task,
4971 int *limit_mb)
4972 {
4973 ledger_amount_t limit;
4974
4975 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
4976 /*
4977 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
4978 * result. There are, however, implicit assumptions that -1 mb limit
4979 * equates to LEDGER_LIMIT_INFINITY.
4980 */
4981 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
4982 *limit_mb = (int)(limit >> 20);
4983
4984 return (KERN_SUCCESS);
4985 }
4986 #else /* CONFIG_MEMORYSTATUS */
4987 kern_return_t
4988 task_set_phys_footprint_limit(
4989 __unused task_t task,
4990 __unused int new_limit_mb,
4991 __unused int *old_limit_mb)
4992 {
4993 return (KERN_FAILURE);
4994 }
4995
4996 kern_return_t
4997 task_get_phys_footprint_limit(
4998 __unused task_t task,
4999 __unused int *limit_mb)
5000 {
5001 return (KERN_FAILURE);
5002 }
5003 #endif /* CONFIG_MEMORYSTATUS */
5004
5005 /*
5006 * We need to export some functions to other components that
5007 * are currently implemented in macros within the osfmk
5008 * component. Just export them as functions of the same name.
5009 */
5010 boolean_t is_kerneltask(task_t t)
5011 {
5012 if (t == kernel_task)
5013 return (TRUE);
5014
5015 return (FALSE);
5016 }
5017
5018 boolean_t is_corpsetask(task_t t)
5019 {
5020 return (task_is_a_corpse(t));
5021 }
5022
5023 #undef current_task
5024 task_t current_task(void);
5025 task_t current_task(void)
5026 {
5027 return (current_task_fast());
5028 }
5029
5030 #undef task_reference
5031 void task_reference(task_t task);
5032 void
5033 task_reference(
5034 task_t task)
5035 {
5036 if (task != TASK_NULL)
5037 task_reference_internal(task);
5038 }
5039
5040 /* defined in bsd/kern/kern_prot.c */
5041 extern int get_audit_token_pid(audit_token_t *audit_token);
5042
5043 int task_pid(task_t task)
5044 {
5045 if (task)
5046 return get_audit_token_pid(&task->audit_token);
5047 return -1;
5048 }
5049
5050
5051 /*
5052 * This routine finds a thread in a task by its unique id
5053 * Returns a referenced thread or THREAD_NULL if the thread was not found
5054 *
5055 * TODO: This is super inefficient - it's an O(threads in task) list walk!
5056 * We should make a tid hash, or transition all tid clients to thread ports
5057 *
5058 * Precondition: No locks held (will take task lock)
5059 */
5060 thread_t
5061 task_findtid(task_t task, uint64_t tid)
5062 {
5063 thread_t self = current_thread();
5064 thread_t found_thread = THREAD_NULL;
5065 thread_t iter_thread = THREAD_NULL;
5066
5067 /* Short-circuit the lookup if we're looking up ourselves */
5068 if (tid == self->thread_id || tid == TID_NULL) {
5069 assert(self->task == task);
5070
5071 thread_reference(self);
5072
5073 return self;
5074 }
5075
5076 task_lock(task);
5077
5078 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
5079 if (iter_thread->thread_id == tid) {
5080 found_thread = iter_thread;
5081 thread_reference(found_thread);
5082 break;
5083 }
5084 }
5085
5086 task_unlock(task);
5087
5088 return (found_thread);
5089 }
5090
5091 int pid_from_task(task_t task)
5092 {
5093 int pid = -1;
5094
5095 if (task->bsd_info) {
5096 pid = proc_pid(task->bsd_info);
5097 } else {
5098 pid = task_pid(task);
5099 }
5100
5101 return pid;
5102 }
5103
5104 /*
5105 * Control the CPU usage monitor for a task.
5106 */
5107 kern_return_t
5108 task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
5109 {
5110 int error = KERN_SUCCESS;
5111
5112 if (*flags & CPUMON_MAKE_FATAL) {
5113 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
5114 } else {
5115 error = KERN_INVALID_ARGUMENT;
5116 }
5117
5118 return error;
5119 }
5120
5121 /*
5122 * Control the wakeups monitor for a task.
5123 */
5124 kern_return_t
5125 task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
5126 {
5127 ledger_t ledger = task->ledger;
5128
5129 task_lock(task);
5130 if (*flags & WAKEMON_GET_PARAMS) {
5131 ledger_amount_t limit;
5132 uint64_t period;
5133
5134 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
5135 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
5136
5137 if (limit != LEDGER_LIMIT_INFINITY) {
5138 /*
5139 * An active limit means the wakeups monitor is enabled.
5140 */
5141 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
5142 *flags = WAKEMON_ENABLE;
5143 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
5144 *flags |= WAKEMON_MAKE_FATAL;
5145 }
5146 } else {
5147 *flags = WAKEMON_DISABLE;
5148 *rate_hz = -1;
5149 }
5150
5151 /*
5152 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
5153 */
5154 task_unlock(task);
5155 return KERN_SUCCESS;
5156 }
5157
5158 if (*flags & WAKEMON_ENABLE) {
5159 if (*flags & WAKEMON_SET_DEFAULTS) {
5160 *rate_hz = task_wakeups_monitor_rate;
5161 }
5162
5163 #ifndef CONFIG_NOMONITORS
5164 if (*flags & WAKEMON_MAKE_FATAL) {
5165 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
5166 }
5167 #endif /* CONFIG_NOMONITORS */
5168
5169 if (*rate_hz <= 0) {
5170 task_unlock(task);
5171 return KERN_INVALID_ARGUMENT;
5172 }
5173
5174 #ifndef CONFIG_NOMONITORS
5175 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
5176 task_wakeups_monitor_ustackshots_trigger_pct);
5177 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
5178 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
5179 #endif /* CONFIG_NOMONITORS */
5180 } else if (*flags & WAKEMON_DISABLE) {
5181 /*
5182 * Caller wishes to disable wakeups monitor on the task.
5183 *
5184 * Disable telemetry if it was triggered by the wakeups monitor, and
5185 * remove the limit & callback on the wakeups ledger entry.
5186 */
5187 #if CONFIG_TELEMETRY
5188 telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
5189 #endif
5190 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
5191 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
5192 }
5193
5194 task_unlock(task);
5195 return KERN_SUCCESS;
5196 }
5197
5198 void
5199 task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
5200 {
5201 if (warning == LEDGER_WARNING_ROSE_ABOVE) {
5202 #if CONFIG_TELEMETRY
5203 /*
5204 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
5205 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
5206 */
5207 telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
5208 #endif
5209 return;
5210 }
5211
5212 #if CONFIG_TELEMETRY
5213 /*
5214 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
5215 * exceeded the limit, turn telemetry off for the task.
5216 */
5217 telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
5218 #endif
5219
5220 if (warning == 0) {
5221 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
5222 }
5223 }
5224
5225 void __attribute__((noinline))
5226 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
5227 {
5228 task_t task = current_task();
5229 int pid = 0;
5230 const char *procname = "unknown";
5231 boolean_t fatal;
5232 kern_return_t kr;
5233 #ifdef EXC_RESOURCE_MONITORS
5234 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
5235 #endif /* EXC_RESOURCE_MONITORS */
5236 struct ledger_entry_info lei;
5237
5238 #ifdef MACH_BSD
5239 pid = proc_selfpid();
5240 if (task->bsd_info != NULL)
5241 procname = proc_name_address(current_task()->bsd_info);
5242 #endif
5243
5244 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
5245
5246 /*
5247 * Disable the exception notification so we don't overwhelm
5248 * the listener with an endless stream of redundant exceptions.
5249 * TODO: detect whether another thread is already reporting the violation.
5250 */
5251 uint32_t flags = WAKEMON_DISABLE;
5252 task_wakeups_monitor_ctl(task, &flags, NULL);
5253
5254 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
5255 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
5256 printf("process %s[%d] caught waking the CPU %llu times "
5257 "over ~%llu seconds, averaging %llu wakes / second and "
5258 "violating a %slimit of %llu wakes over %llu seconds.\n",
5259 procname, pid,
5260 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
5261 lei.lei_last_refill == 0 ? 0 :
5262 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
5263 fatal ? "FATAL " : "",
5264 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
5265
5266 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
5267 fatal ? kRNFatalLimitFlag : 0);
5268 if (kr) {
5269 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
5270 }
5271
5272 #ifdef EXC_RESOURCE_MONITORS
5273 if (disable_exc_resource) {
5274 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
5275 "supressed by a boot-arg\n", procname, pid);
5276 return;
5277 }
5278 if (audio_active) {
5279 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
5280 "supressed due to audio playback\n", procname, pid);
5281 return;
5282 }
5283 if (lei.lei_last_refill == 0) {
5284 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
5285 "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
5286 }
5287
5288 code[0] = code[1] = 0;
5289 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
5290 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
5291 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
5292 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
5293 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
5294 lei.lei_last_refill);
5295 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
5296 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
5297 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
5298 #endif /* EXC_RESOURCE_MONITORS */
5299
5300 if (fatal) {
5301 task_terminate_internal(task);
5302 }
5303 }
5304
5305 static boolean_t
5306 global_update_logical_writes(int64_t io_delta)
5307 {
5308 int64_t old_count, new_count;
5309 boolean_t needs_telemetry;
5310
5311 do {
5312 new_count = old_count = global_logical_writes_count;
5313 new_count += io_delta;
5314 if (new_count >= io_telemetry_limit) {
5315 new_count = 0;
5316 needs_telemetry = TRUE;
5317 } else {
5318 needs_telemetry = FALSE;
5319 }
5320 } while(!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count));
5321 return needs_telemetry;
5322 }
5323
5324 void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
5325 {
5326 int64_t io_delta = 0;
5327 boolean_t needs_telemetry = FALSE;
5328
5329 if ((!task) || (!io_size) || (!vp))
5330 return;
5331
5332 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
5333 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
5334 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
5335 switch(flags) {
5336 case TASK_WRITE_IMMEDIATE:
5337 OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes));
5338 ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
5339 break;
5340 case TASK_WRITE_DEFERRED:
5341 OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes));
5342 ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
5343 break;
5344 case TASK_WRITE_INVALIDATED:
5345 OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes));
5346 ledger_debit(task->ledger, task_ledgers.logical_writes, io_size);
5347 break;
5348 case TASK_WRITE_METADATA:
5349 OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes));
5350 ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
5351 break;
5352 }
5353
5354 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
5355 if (io_telemetry_limit != 0) {
5356 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
5357 needs_telemetry = global_update_logical_writes(io_delta);
5358 if (needs_telemetry) {
5359 act_set_io_telemetry_ast(current_thread());
5360 }
5361 }
5362 }
5363
5364 /*
5365 * Control the I/O monitor for a task.
5366 */
5367 kern_return_t
5368 task_io_monitor_ctl(task_t task, uint32_t *flags)
5369 {
5370 ledger_t ledger = task->ledger;
5371
5372 task_lock(task);
5373 if (*flags & IOMON_ENABLE) {
5374 /* Configure the physical I/O ledger */
5375 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
5376 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
5377
5378 /* Configure the logical I/O ledger */
5379 ledger_set_limit(ledger, task_ledgers.logical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
5380 ledger_set_period(ledger, task_ledgers.logical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
5381
5382 } else if (*flags & IOMON_DISABLE) {
5383 /*
5384 * Caller wishes to disable I/O monitor on the task.
5385 */
5386 ledger_disable_refill(ledger, task_ledgers.physical_writes);
5387 ledger_disable_callback(ledger, task_ledgers.physical_writes);
5388 ledger_disable_refill(ledger, task_ledgers.logical_writes);
5389 ledger_disable_callback(ledger, task_ledgers.logical_writes);
5390 }
5391
5392 task_unlock(task);
5393 return KERN_SUCCESS;
5394 }
5395
5396 void
5397 task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
5398 {
5399 if (warning == 0) {
5400 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
5401 }
5402 }
5403
5404 void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
5405 {
5406 int pid = 0;
5407 task_t task = current_task();
5408 #ifdef EXC_RESOURCE_MONITORS
5409 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
5410 #endif /* EXC_RESOURCE_MONITORS */
5411 struct ledger_entry_info lei;
5412 kern_return_t kr;
5413
5414 #ifdef MACH_BSD
5415 pid = proc_selfpid();
5416 #endif
5417 /*
5418 * Get the ledger entry info. We need to do this before disabling the exception
5419 * to get correct values for all fields.
5420 */
5421 switch(flavor) {
5422 case FLAVOR_IO_PHYSICAL_WRITES:
5423 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
5424 break;
5425 case FLAVOR_IO_LOGICAL_WRITES:
5426 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
5427 break;
5428 }
5429
5430
5431 /*
5432 * Disable the exception notification so we don't overwhelm
5433 * the listener with an endless stream of redundant exceptions.
5434 * TODO: detect whether another thread is already reporting the violation.
5435 */
5436 uint32_t flags = IOMON_DISABLE;
5437 task_io_monitor_ctl(task, &flags);
5438
5439 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
5440 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
5441 }
5442 printf("process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
5443 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
5444
5445 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
5446 if (kr) {
5447 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
5448 }
5449
5450 #ifdef EXC_RESOURCE_MONITORS
5451 code[0] = code[1] = 0;
5452 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
5453 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
5454 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
5455 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
5456 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
5457 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
5458 #endif /* EXC_RESOURCE_MONITORS */
5459 }
5460
5461 /* Placeholders for the task set/get voucher interfaces */
5462 kern_return_t
5463 task_get_mach_voucher(
5464 task_t task,
5465 mach_voucher_selector_t __unused which,
5466 ipc_voucher_t *voucher)
5467 {
5468 if (TASK_NULL == task)
5469 return KERN_INVALID_TASK;
5470
5471 *voucher = NULL;
5472 return KERN_SUCCESS;
5473 }
5474
5475 kern_return_t
5476 task_set_mach_voucher(
5477 task_t task,
5478 ipc_voucher_t __unused voucher)
5479 {
5480 if (TASK_NULL == task)
5481 return KERN_INVALID_TASK;
5482
5483 return KERN_SUCCESS;
5484 }
5485
5486 kern_return_t
5487 task_swap_mach_voucher(
5488 task_t task,
5489 ipc_voucher_t new_voucher,
5490 ipc_voucher_t *in_out_old_voucher)
5491 {
5492 if (TASK_NULL == task)
5493 return KERN_INVALID_TASK;
5494
5495 *in_out_old_voucher = new_voucher;
5496 return KERN_SUCCESS;
5497 }
5498
5499 void task_set_gpu_denied(task_t task, boolean_t denied)
5500 {
5501 task_lock(task);
5502
5503 if (denied) {
5504 task->t_flags |= TF_GPU_DENIED;
5505 } else {
5506 task->t_flags &= ~TF_GPU_DENIED;
5507 }
5508
5509 task_unlock(task);
5510 }
5511
5512 boolean_t task_is_gpu_denied(task_t task)
5513 {
5514 /* We don't need the lock to read this flag */
5515 return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
5516 }
5517
5518
5519 uint64_t get_task_memory_region_count(task_t task)
5520 {
5521 vm_map_t map;
5522 map = (task == kernel_task) ? kernel_map: task->map;
5523 return((uint64_t)get_map_nentries(map));
5524 }
5525
5526 static void
5527 kdebug_trace_dyld_internal(uint32_t base_code,
5528 struct dyld_kernel_image_info *info)
5529 {
5530 static_assert(sizeof(info->uuid) >= 16);
5531
5532 #if defined(__LP64__)
5533 uint64_t *uuid = (uint64_t *)&(info->uuid);
5534
5535 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5536 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
5537 uuid[1], info->load_addr,
5538 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
5539 0);
5540 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5541 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
5542 (uint64_t)info->fsobjid.fid_objno |
5543 ((uint64_t)info->fsobjid.fid_generation << 32),
5544 0, 0, 0, 0);
5545 #else /* defined(__LP64__) */
5546 uint32_t *uuid = (uint32_t *)&(info->uuid);
5547
5548 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5549 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
5550 uuid[1], uuid[2], uuid[3], 0);
5551 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5552 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
5553 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
5554 info->fsobjid.fid_objno, 0);
5555 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5556 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
5557 info->fsobjid.fid_generation, 0, 0, 0, 0);
5558 #endif /* !defined(__LP64__) */
5559 }
5560
5561 static kern_return_t
5562 kdebug_trace_dyld(task_t task, uint32_t base_code,
5563 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
5564 {
5565 kern_return_t kr;
5566 dyld_kernel_image_info_array_t infos;
5567 vm_map_offset_t map_data;
5568 vm_offset_t data;
5569
5570 if (!kdebug_enable ||
5571 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0)))
5572 {
5573 vm_map_copy_discard(infos_copy);
5574 return KERN_SUCCESS;
5575 }
5576
5577 assert(infos_copy != NULL);
5578
5579 if (task == NULL || task != current_task()) {
5580 return KERN_INVALID_TASK;
5581 }
5582
5583 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
5584 if (kr != KERN_SUCCESS) {
5585 return kr;
5586 }
5587
5588 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
5589
5590 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
5591 kdebug_trace_dyld_internal(base_code, &(infos[i]));
5592 }
5593
5594 data = CAST_DOWN(vm_offset_t, map_data);
5595 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
5596 return KERN_SUCCESS;
5597 }
5598
5599 kern_return_t
5600 task_register_dyld_image_infos(task_t task,
5601 dyld_kernel_image_info_array_t infos_copy,
5602 mach_msg_type_number_t infos_len)
5603 {
5604 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
5605 (vm_map_copy_t)infos_copy, infos_len);
5606 }
5607
5608 kern_return_t
5609 task_unregister_dyld_image_infos(task_t task,
5610 dyld_kernel_image_info_array_t infos_copy,
5611 mach_msg_type_number_t infos_len)
5612 {
5613 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
5614 (vm_map_copy_t)infos_copy, infos_len);
5615 }
5616
5617 kern_return_t
5618 task_get_dyld_image_infos(__unused task_t task,
5619 __unused dyld_kernel_image_info_array_t * dyld_images,
5620 __unused mach_msg_type_number_t * dyld_imagesCnt)
5621 {
5622 return KERN_NOT_SUPPORTED;
5623 }
5624
5625 kern_return_t
5626 task_register_dyld_shared_cache_image_info(task_t task,
5627 dyld_kernel_image_info_t cache_img,
5628 __unused boolean_t no_cache,
5629 __unused boolean_t private_cache)
5630 {
5631 if (task == NULL || task != current_task()) {
5632 return KERN_INVALID_TASK;
5633 }
5634
5635 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
5636 return KERN_SUCCESS;
5637 }
5638
5639 kern_return_t
5640 task_register_dyld_set_dyld_state(__unused task_t task,
5641 __unused uint8_t dyld_state)
5642 {
5643 return KERN_NOT_SUPPORTED;
5644 }
5645
5646 kern_return_t
5647 task_register_dyld_get_process_state(__unused task_t task,
5648 __unused dyld_kernel_process_info_t * dyld_process_state)
5649 {
5650 return KERN_NOT_SUPPORTED;
5651 }
5652
5653 #if CONFIG_SECLUDED_MEMORY
5654 int num_tasks_can_use_secluded_mem = 0;
5655
5656 void
5657 task_set_can_use_secluded_mem(
5658 task_t task,
5659 boolean_t can_use_secluded_mem)
5660 {
5661 if (!task->task_could_use_secluded_mem) {
5662 return;
5663 }
5664 task_lock(task);
5665 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
5666 task_unlock(task);
5667 }
5668
5669 void
5670 task_set_can_use_secluded_mem_locked(
5671 task_t task,
5672 boolean_t can_use_secluded_mem)
5673 {
5674 assert(task->task_could_use_secluded_mem);
5675 if (can_use_secluded_mem &&
5676 secluded_for_apps && /* global boot-arg */
5677 !task->task_can_use_secluded_mem) {
5678 assert(num_tasks_can_use_secluded_mem >= 0);
5679 OSAddAtomic(+1,
5680 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
5681 task->task_can_use_secluded_mem = TRUE;
5682 } else if (!can_use_secluded_mem &&
5683 task->task_can_use_secluded_mem) {
5684 assert(num_tasks_can_use_secluded_mem > 0);
5685 OSAddAtomic(-1,
5686 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
5687 task->task_can_use_secluded_mem = FALSE;
5688 }
5689 }
5690
5691 void
5692 task_set_could_use_secluded_mem(
5693 task_t task,
5694 boolean_t could_use_secluded_mem)
5695 {
5696 task->task_could_use_secluded_mem = could_use_secluded_mem;
5697 }
5698
5699 void
5700 task_set_could_also_use_secluded_mem(
5701 task_t task,
5702 boolean_t could_also_use_secluded_mem)
5703 {
5704 task->task_could_also_use_secluded_mem = could_also_use_secluded_mem;
5705 }
5706
5707 boolean_t
5708 task_can_use_secluded_mem(
5709 task_t task)
5710 {
5711 if (task->task_can_use_secluded_mem) {
5712 assert(task->task_could_use_secluded_mem);
5713 assert(num_tasks_can_use_secluded_mem > 0);
5714 return TRUE;
5715 }
5716 if (task->task_could_also_use_secluded_mem &&
5717 num_tasks_can_use_secluded_mem > 0) {
5718 assert(num_tasks_can_use_secluded_mem > 0);
5719 return TRUE;
5720 }
5721 return FALSE;
5722 }
5723
5724 boolean_t
5725 task_could_use_secluded_mem(
5726 task_t task)
5727 {
5728 return task->task_could_use_secluded_mem;
5729 }
5730 #endif /* CONFIG_SECLUDED_MEMORY */
5731
5732 queue_head_t *
5733 task_io_user_clients(task_t task)
5734 {
5735 return (&task->io_user_clients);
5736 }