]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/task.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
CommitLineData
1c79356b 1/*
cb323159 2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
0a7de745 31/*
1c79356b
A
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
0a7de745 35 *
1c79356b
A
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
0a7de745 41 *
1c79356b
A
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
0a7de745 45 *
1c79356b 46 * Carnegie Mellon requests users of this software to return to
0a7de745 47 *
1c79356b
A
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
0a7de745 52 *
1c79356b
A
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
2d21ac55
A
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
1c79356b 88
91447636 89#include <mach/mach_types.h>
1c79356b 90#include <mach/boolean.h>
91447636 91#include <mach/host_priv.h>
1c79356b
A
92#include <mach/machine/vm_types.h>
93#include <mach/vm_param.h>
3e170ce0 94#include <mach/mach_vm.h>
1c79356b
A
95#include <mach/semaphore.h>
96#include <mach/task_info.h>
5ba3f43e 97#include <mach/task_inspect.h>
1c79356b 98#include <mach/task_special_ports.h>
39037602 99#include <mach/sdt.h>
91447636 100
fe8ab488 101#include <ipc/ipc_importance.h>
91447636 102#include <ipc/ipc_types.h>
1c79356b
A
103#include <ipc/ipc_space.h>
104#include <ipc/ipc_entry.h>
39236c6e 105#include <ipc/ipc_hash.h>
91447636
A
106
107#include <kern/kern_types.h>
1c79356b
A
108#include <kern/mach_param.h>
109#include <kern/misc_protos.h>
110#include <kern/task.h>
111#include <kern/thread.h>
fe8ab488 112#include <kern/coalition.h>
1c79356b
A
113#include <kern/zalloc.h>
114#include <kern/kalloc.h>
3e170ce0 115#include <kern/kern_cdata.h>
1c79356b 116#include <kern/processor.h>
0a7de745 117#include <kern/sched_prim.h> /* for thread_wakeup */
1c79356b 118#include <kern/ipc_tt.h>
1c79356b 119#include <kern/host.h>
91447636
A
120#include <kern/clock.h>
121#include <kern/timer.h>
1c79356b
A
122#include <kern/assert.h>
123#include <kern/sync_lock.h>
2d21ac55 124#include <kern/affinity.h>
39236c6e 125#include <kern/exc_resource.h>
3e170ce0 126#include <kern/machine.h>
39037602 127#include <kern/policy_internal.h>
cb323159 128#include <kern/restartable.h>
39037602 129
3e170ce0 130#include <corpses/task_corpse.h>
39236c6e
A
131#if CONFIG_TELEMETRY
132#include <kern/telemetry.h>
133#endif
91447636 134
5ba3f43e
A
135#if MONOTONIC
136#include <kern/monotonic.h>
137#include <machine/monotonic.h>
138#endif /* MONOTONIC */
139
140#include <os/log.h>
141
91447636
A
142#include <vm/pmap.h>
143#include <vm/vm_map.h>
0a7de745 144#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
91447636 145#include <vm/vm_pageout.h>
2d21ac55 146#include <vm/vm_protos.h>
39236c6e 147#include <vm/vm_purgeable_internal.h>
cb323159 148#include <vm/vm_compressor_pager.h>
91447636 149
39236c6e 150#include <sys/resource.h>
3e170ce0 151#include <sys/signalvar.h> /* for coredump */
cb323159 152#include <sys/bsdtask_info.h>
1c79356b
A
153/*
154 * Exported interfaces
155 */
156
157#include <mach/task_server.h>
158#include <mach/mach_host_server.h>
159#include <mach/host_security_server.h>
91447636
A
160#include <mach/mach_port_server.h>
161
2d21ac55
A
162#include <vm/vm_shared_region.h>
163
39236c6e
A
164#include <libkern/OSDebug.h>
165#include <libkern/OSAtomic.h>
d9a64523 166#include <libkern/section_keywords.h>
39236c6e 167
cb323159
A
168#include <mach-o/loader.h>
169
fe8ab488
A
170#if CONFIG_ATM
171#include <atm/atm_internal.h>
172#endif
173
0a7de745 174#include <kern/sfi.h> /* picks up ledger.h */
39037602
A
175
176#if CONFIG_MACF
177#include <security/mac_mach_internal.h>
178#endif
fe8ab488 179
eb6b6ca3
A
180#include <IOKit/IOBSD.h>
181
fe8ab488
A
182#if KPERF
183extern int kpc_force_all_ctrs(task_t, int);
184#endif
185
eb6b6ca3
A
186SECURITY_READ_ONLY_LATE(task_t) kernel_task;
187SECURITY_READ_ONLY_LATE(zone_t) task_zone;
b0d623f7
A
188lck_attr_t task_lck_attr;
189lck_grp_t task_lck_grp;
190lck_grp_attr_t task_lck_grp_attr;
191
39037602 192extern int exc_via_corpse_forking;
39037602 193extern int corpse_for_fatal_memkill;
d9a64523 194extern boolean_t proc_send_synchronous_EXC_RESOURCE(void *p);
39037602 195
15129b1c
A
196/* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
197int audio_active = 0;
198
6d2010ae
A
199zinfo_usage_store_t tasks_tkm_private;
200zinfo_usage_store_t tasks_tkm_shared;
201
4b17d6b6 202/* A container to accumulate statistics for expired tasks */
0a7de745
A
203expired_task_statistics_t dead_task_statistics;
204lck_spin_t dead_task_statistics_lock;
4b17d6b6 205
fe8ab488
A
206ledger_template_t task_ledger_template = NULL;
207
d9a64523
A
208SECURITY_READ_ONLY_LATE(struct _task_ledger_indices) task_ledgers __attribute__((used)) =
209{.cpu_time = -1,
210 .tkm_private = -1,
211 .tkm_shared = -1,
212 .phys_mem = -1,
213 .wired_mem = -1,
214 .internal = -1,
215 .iokit_mapped = -1,
216 .alternate_accounting = -1,
217 .alternate_accounting_compressed = -1,
218 .page_table = -1,
219 .phys_footprint = -1,
220 .internal_compressed = -1,
221 .purgeable_volatile = -1,
222 .purgeable_nonvolatile = -1,
223 .purgeable_volatile_compressed = -1,
224 .purgeable_nonvolatile_compressed = -1,
cb323159
A
225 .tagged_nofootprint = -1,
226 .tagged_footprint = -1,
227 .tagged_nofootprint_compressed = -1,
228 .tagged_footprint_compressed = -1,
d9a64523
A
229 .network_volatile = -1,
230 .network_nonvolatile = -1,
231 .network_volatile_compressed = -1,
232 .network_nonvolatile_compressed = -1,
cb323159
A
233 .media_nofootprint = -1,
234 .media_footprint = -1,
235 .media_nofootprint_compressed = -1,
236 .media_footprint_compressed = -1,
237 .graphics_nofootprint = -1,
238 .graphics_footprint = -1,
239 .graphics_nofootprint_compressed = -1,
240 .graphics_footprint_compressed = -1,
241 .neural_nofootprint = -1,
242 .neural_footprint = -1,
243 .neural_nofootprint_compressed = -1,
244 .neural_footprint_compressed = -1,
d9a64523
A
245 .platform_idle_wakeups = -1,
246 .interrupt_wakeups = -1,
5ba3f43e 247#if !CONFIG_EMBEDDED
d9a64523 248 .sfi_wait_times = { 0 /* initialized at runtime */},
0a7de745 249#endif /* !CONFIG_EMBEDDED */
d9a64523
A
250 .cpu_time_billed_to_me = -1,
251 .cpu_time_billed_to_others = -1,
252 .physical_writes = -1,
253 .logical_writes = -1,
cb323159
A
254 .logical_writes_to_external = -1,
255#if DEBUG || DEVELOPMENT
0a7de745
A
256 .pages_grabbed = -1,
257 .pages_grabbed_kern = -1,
258 .pages_grabbed_iopl = -1,
cb323159
A
259 .pages_grabbed_upl = -1,
260#endif
261 .energy_billed_to_me = -1,
262 .energy_billed_to_others = -1};
fe8ab488 263
4bd07ac2
A
264/* System sleep state */
265boolean_t tasks_suspend_state;
266
267
316670eb 268void init_task_ledgers(void);
39236c6e
A
269void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
270void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
39037602
A
271void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
272void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
273void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal);
274void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
39236c6e
A
275
276kern_return_t task_suspend_internal(task_t);
277kern_return_t task_resume_internal(task_t);
3e170ce0
A
278static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
279
7e41aa88 280extern kern_return_t iokit_task_terminate(task_t task);
cb323159 281extern void iokit_task_app_suspended_changed(task_t task);
39236c6e 282
3e170ce0 283extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
39037602 284extern void bsd_copythreadname(void *dst_uth, void *src_uth);
743345f9 285extern kern_return_t thread_resume(thread_t thread);
39236c6e
A
286
287// Warn tasks when they hit 80% of their memory limit.
0a7de745 288#define PHYS_FOOTPRINT_WARNING_LEVEL 80
39236c6e 289
0a7de745
A
290#define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
291#define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
39236c6e
A
292
293/*
294 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
295 *
296 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
297 * stacktraces, aka micro-stackshots)
298 */
0a7de745 299#define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
39236c6e
A
300
301int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
302int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
303
304int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
316670eb 305
39236c6e 306int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */
316670eb 307
3e170ce0 308ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
39037602 309int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
3e170ce0
A
310int max_task_footprint_mb = 0; /* Per-task limit on physical memory consumption in megabytes */
311
39037602 312/* I/O Monitor Limits */
0a7de745
A
313#define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
314#define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
39037602 315
0a7de745
A
316uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
317uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
39037602 318
0a7de745
A
319#define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
320int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
321int64_t global_logical_writes_count = 0; /* Global count for logical writes */
cb323159
A
322int64_t global_logical_writes_to_external_count = 0; /* Global count for logical writes to external storage*/
323static boolean_t global_update_logical_writes(int64_t, int64_t*);
39037602 324
d9a64523
A
325#define TASK_MAX_THREAD_LIMIT 256
326
fe8ab488
A
327#if MACH_ASSERT
328int pmap_ledgers_panic = 1;
d9a64523 329int pmap_ledgers_panic_leeway = 3;
fe8ab488
A
330#endif /* MACH_ASSERT */
331
b0d623f7 332int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
1c79356b 333
39037602 334#if CONFIG_COREDUMP
39236c6e 335int hwm_user_cores = 0; /* high watermark violations generate user core files */
39037602 336#endif
39236c6e
A
337
338#ifdef MACH_BSD
cb323159
A
339extern uint32_t proc_platform(struct proc *);
340extern uint32_t proc_sdk(struct proc *);
0a7de745
A
341extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
342extern int proc_pid(struct proc *p);
343extern int proc_selfpid(void);
d9a64523 344extern struct proc *current_proc(void);
0a7de745 345extern char *proc_name_address(struct proc *p);
3e170ce0 346extern uint64_t get_dispatchqueue_offset_from_proc(void *);
5ba3f43e 347extern int kevent_proc_copy_uptrs(void *proc, uint64_t *buf, int bufsize);
d9a64523
A
348extern void workq_proc_suspended(struct proc *p);
349extern void workq_proc_resumed(struct proc *p);
39037602
A
350
351#if CONFIG_MEMORYSTATUS
0a7de745
A
352extern void proc_memstat_terminated(struct proc* p, boolean_t set);
353extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
354extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
a39ff7e2 355extern boolean_t memorystatus_allowed_vm_map_fork(task_t task);
cb323159 356extern uint64_t memorystatus_available_memory_internal(proc_t p);
a39ff7e2
A
357
358#if DEVELOPMENT || DEBUG
359extern void memorystatus_abort_vm_map_fork(task_t);
360#endif
361
39037602
A
362#endif /* CONFIG_MEMORYSTATUS */
363
364#endif /* MACH_BSD */
6d2010ae 365
d9a64523
A
366#if DEVELOPMENT || DEBUG
367int exc_resource_threads_enabled;
368#endif /* DEVELOPMENT || DEBUG */
369
cb323159
A
370#if (DEVELOPMENT || DEBUG)
371uint32_t task_exc_guard_default = TASK_EXC_GUARD_MP_DELIVER | TASK_EXC_GUARD_MP_ONCE | TASK_EXC_GUARD_MP_CORPSE |
372 TASK_EXC_GUARD_VM_DELIVER | TASK_EXC_GUARD_VM_ONCE | TASK_EXC_GUARD_VM_CORPSE;
d9a64523
A
373#else
374uint32_t task_exc_guard_default = 0;
375#endif
376
1c79356b
A
377/* Forwards */
378
39037602
A
379static void task_hold_locked(task_t task);
380static void task_wait_locked(task_t task, boolean_t until_not_runnable);
381static void task_release_locked(task_t task);
382
383static void task_synchronizer_destroy_all(task_t task);
cb323159
A
384static os_ref_count_t
385task_add_turnstile_watchports_locked(
386 task_t task,
387 struct task_watchports *watchports,
388 struct task_watchport_elem **previous_elem_array,
389 ipc_port_t *portwatch_ports,
390 uint32_t portwatch_count);
391
392static os_ref_count_t
393task_remove_turnstile_watchports_locked(
394 task_t task,
395 struct task_watchports *watchports,
396 ipc_port_t *port_freelist);
397
398static struct task_watchports *
399task_watchports_alloc_init(
400 task_t task,
401 thread_t thread,
402 uint32_t count);
b0d623f7 403
cb323159
A
404static void
405task_watchports_deallocate(
406 struct task_watchports *watchports);
91447636
A
407
408void
409task_set_64bit(
0a7de745
A
410 task_t task,
411 boolean_t is_64bit,
412 boolean_t is_64bit_data)
91447636 413{
fe8ab488 414#if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
2d21ac55 415 thread_t thread;
fe8ab488 416#endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
39236c6e
A
417
418 task_lock(task);
0c530ab8 419
d9a64523
A
420 /*
421 * Switching to/from 64-bit address spaces
422 */
423 if (is_64bit) {
424 if (!task_has_64Bit_addr(task)) {
425 task_set_64Bit_addr(task);
426 }
427 } else {
428 if (task_has_64Bit_addr(task)) {
429 task_clear_64Bit_addr(task);
430 }
431 }
432
433 /*
434 * Switching to/from 64-bit register state.
435 */
436 if (is_64bit_data) {
0a7de745 437 if (task_has_64Bit_data(task)) {
39236c6e 438 goto out;
0a7de745 439 }
d9a64523
A
440
441 task_set_64Bit_data(task);
91447636 442 } else {
0a7de745 443 if (!task_has_64Bit_data(task)) {
39236c6e 444 goto out;
0a7de745 445 }
d9a64523
A
446
447 task_clear_64Bit_data(task);
91447636 448 }
d9a64523 449
0c530ab8
A
450 /* FIXME: On x86, the thread save state flavor can diverge from the
451 * task's 64-bit feature flag due to the 32-bit/64-bit register save
452 * state dichotomy. Since we can be pre-empted in this interval,
453 * certain routines may observe the thread as being in an inconsistent
454 * state with respect to its task's 64-bitness.
455 */
39236c6e 456
00867663 457#if defined(__x86_64__) || defined(__arm64__)
0c530ab8 458 queue_iterate(&task->threads, thread, thread_t, task_threads) {
b0d623f7 459 thread_mtx_lock(thread);
2d21ac55 460 machine_thread_switch_addrmode(thread);
b0d623f7 461 thread_mtx_unlock(thread);
39037602 462
5ba3f43e
A
463#if defined(__arm64__)
464 /* specifically, if running on H9 */
39037602
A
465 if (thread == current_thread()) {
466 uint64_t arg1, arg2;
467 int urgency;
468 spl_t spl = splsched();
469 /*
470 * This call tell that the current thread changed it's 32bitness.
471 * Other thread were no more on core when 32bitness was changed,
472 * but current_thread() is on core and the previous call to
473 * machine_thread_going_on_core() gave 32bitness which is now wrong.
474 *
475 * This is needed for bring-up, a different callback should be used
476 * in the future.
5ba3f43e
A
477 *
478 * TODO: Remove this callout when we no longer support 32-bit code on H9
39037602
A
479 */
480 thread_lock(thread);
481 urgency = thread_get_urgency(thread, &arg1, &arg2);
5ba3f43e 482 machine_thread_going_on_core(thread, urgency, 0, 0, mach_approximate_time());
39037602
A
483 thread_unlock(thread);
484 splx(spl);
485 }
5ba3f43e 486#endif /* defined(__arm64__) */
0c530ab8 487 }
00867663 488#endif /* defined(__x86_64__) || defined(__arm64__) */
39236c6e
A
489
490out:
b0d623f7 491 task_unlock(task);
91447636
A
492}
493
d9a64523
A
494boolean_t
495task_get_64bit_data(task_t task)
496{
497 return task_has_64Bit_data(task);
498}
499
5ba3f43e
A
500void
501task_set_platform_binary(
0a7de745
A
502 task_t task,
503 boolean_t is_platform)
5ba3f43e
A
504{
505 task_lock(task);
506 if (is_platform) {
507 task->t_flags |= TF_PLATFORM;
cb323159
A
508 /* set exc guard default behavior for first-party code */
509 task->task_exc_guard = (task_exc_guard_default & TASK_EXC_GUARD_ALL);
5ba3f43e
A
510 } else {
511 task->t_flags &= ~(TF_PLATFORM);
cb323159
A
512 /* set exc guard default behavior for third-party code */
513 task->task_exc_guard = ((task_exc_guard_default >> TASK_EXC_GUARD_THIRD_PARTY_DEFAULT_SHIFT) & TASK_EXC_GUARD_ALL);
5ba3f43e
A
514 }
515 task_unlock(task);
516}
b0d623f7 517
a39ff7e2
A
518/*
519 * Set or clear per-task TF_CA_CLIENT_WI flag according to specified argument.
520 * Returns "false" if flag is already set, and "true" in other cases.
521 */
522bool
523task_set_ca_client_wi(
0a7de745
A
524 task_t task,
525 boolean_t set_or_clear)
a39ff7e2
A
526{
527 bool ret = true;
528 task_lock(task);
529 if (set_or_clear) {
530 /* Tasks can have only one CA_CLIENT work interval */
0a7de745 531 if (task->t_flags & TF_CA_CLIENT_WI) {
a39ff7e2 532 ret = false;
0a7de745 533 } else {
a39ff7e2 534 task->t_flags |= TF_CA_CLIENT_WI;
0a7de745 535 }
a39ff7e2
A
536 } else {
537 task->t_flags &= ~TF_CA_CLIENT_WI;
538 }
539 task_unlock(task);
540 return ret;
541}
542
b0d623f7 543void
5ba3f43e 544task_set_dyld_info(
0a7de745
A
545 task_t task,
546 mach_vm_address_t addr,
547 mach_vm_size_t size)
b0d623f7
A
548{
549 task_lock(task);
550 task->all_image_info_addr = addr;
0a7de745
A
551 task->all_image_info_size = size;
552 task_unlock(task);
b0d623f7
A
553}
554
cb323159
A
555void
556task_set_mach_header_address(
557 task_t task,
558 mach_vm_address_t addr)
559{
560 task_lock(task);
561 task->mach_header_vm_address = addr;
562 task_unlock(task);
563}
564
fe8ab488 565void
0a7de745
A
566task_atm_reset(__unused task_t task)
567{
fe8ab488
A
568#if CONFIG_ATM
569 if (task->atm_context != NULL) {
0a7de745
A
570 atm_task_descriptor_destroy(task->atm_context);
571 task->atm_context = NULL;
fe8ab488
A
572 }
573#endif
fe8ab488
A
574}
575
490019cf 576void
0a7de745
A
577task_bank_reset(__unused task_t task)
578{
490019cf 579 if (task->bank_context != NULL) {
0a7de745 580 bank_task_destroy(task);
490019cf 581 }
490019cf
A
582}
583
584/*
585 * NOTE: This should only be called when the P_LINTRANSIT
586 * flag is set (the proc_trans lock is held) on the
587 * proc associated with the task.
588 */
589void
0a7de745
A
590task_bank_init(__unused task_t task)
591{
490019cf
A
592 if (task->bank_context != NULL) {
593 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
594 }
595 bank_task_initialize(task);
490019cf
A
596}
597
743345f9
A
598void
599task_set_did_exec_flag(task_t task)
600{
601 task->t_procflags |= TPF_DID_EXEC;
602}
603
604void
605task_clear_exec_copy_flag(task_t task)
606{
607 task->t_procflags &= ~TPF_EXEC_COPY;
608}
609
743345f9
A
610event_t
611task_get_return_wait_event(task_t task)
612{
cb323159 613 return (event_t)&task->returnwait_inheritor;
743345f9
A
614}
615
616void
cb323159 617task_clear_return_wait(task_t task, uint32_t flags)
743345f9 618{
cb323159 619 if (flags & TCRW_CLEAR_INITIAL_WAIT) {
743345f9 620 thread_wakeup(task_get_return_wait_event(task));
743345f9
A
621 }
622
cb323159
A
623 if (flags & TCRW_CLEAR_FINAL_WAIT) {
624 is_write_lock(task->itk_space);
625
626 task->t_returnwaitflags &= ~TRW_LRETURNWAIT;
627 task->returnwait_inheritor = NULL;
628
629 if (task->t_returnwaitflags & TRW_LRETURNWAITER) {
630 struct turnstile *turnstile = turnstile_prepare((uintptr_t) task_get_return_wait_event(task),
631 NULL, TURNSTILE_NULL, TURNSTILE_ULOCK);
632
633 waitq_wakeup64_all(&turnstile->ts_waitq,
634 CAST_EVENT64_T(task_get_return_wait_event(task)),
635 THREAD_AWAKENED, 0);
636
637 turnstile_update_inheritor(turnstile, NULL,
638 TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
639 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_HELD);
640
641 turnstile_complete((uintptr_t) task_get_return_wait_event(task), NULL, NULL, TURNSTILE_ULOCK);
642 turnstile_cleanup();
643 task->t_returnwaitflags &= ~TRW_LRETURNWAITER;
644 }
645 is_write_unlock(task->itk_space);
646 }
743345f9
A
647}
648
a39ff7e2 649void __attribute__((noreturn))
743345f9
A
650task_wait_to_return(void)
651{
cb323159 652 task_t task = current_task();
743345f9 653
cb323159
A
654 is_write_lock(task->itk_space);
655
656 if (task->t_returnwaitflags & TRW_LRETURNWAIT) {
657 struct turnstile *turnstile = turnstile_prepare((uintptr_t) task_get_return_wait_event(task),
658 NULL, TURNSTILE_NULL, TURNSTILE_ULOCK);
743345f9 659
743345f9 660 do {
cb323159
A
661 task->t_returnwaitflags |= TRW_LRETURNWAITER;
662 turnstile_update_inheritor(turnstile, task->returnwait_inheritor,
663 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
664
665 waitq_assert_wait64(&turnstile->ts_waitq,
666 CAST_EVENT64_T(task_get_return_wait_event(task)),
667 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
668
669 is_write_unlock(task->itk_space);
670
671 turnstile_update_inheritor_complete(turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
743345f9
A
672
673 thread_block(THREAD_CONTINUE_NULL);
674
cb323159
A
675 is_write_lock(task->itk_space);
676 } while (task->t_returnwaitflags & TRW_LRETURNWAIT);
677
678 turnstile_complete((uintptr_t) task_get_return_wait_event(task), NULL, NULL, TURNSTILE_ULOCK);
743345f9
A
679 }
680
cb323159
A
681 is_write_unlock(task->itk_space);
682 turnstile_cleanup();
683
743345f9 684
d9a64523
A
685#if CONFIG_MACF
686 /*
687 * Before jumping to userspace and allowing this process to execute any code,
688 * notify any interested parties.
689 */
690 mac_proc_notify_exec_complete(current_proc());
691#endif
692
743345f9
A
693 thread_bootstrap_return();
694}
695
527f9951
A
696#ifdef CONFIG_32BIT_TELEMETRY
697boolean_t
698task_consume_32bit_log_flag(task_t task)
699{
700 if ((task->t_procflags & TPF_LOG_32BIT_TELEMETRY) != 0) {
701 task->t_procflags &= ~TPF_LOG_32BIT_TELEMETRY;
702 return TRUE;
703 } else {
704 return FALSE;
705 }
706}
707
708void
709task_set_32bit_log_flag(task_t task)
710{
711 task->t_procflags |= TPF_LOG_32BIT_TELEMETRY;
712}
713#endif /* CONFIG_32BIT_TELEMETRY */
714
743345f9
A
715boolean_t
716task_is_exec_copy(task_t task)
717{
718 return task_is_exec_copy_internal(task);
719}
720
721boolean_t
722task_did_exec(task_t task)
723{
724 return task_did_exec_internal(task);
725}
726
727boolean_t
728task_is_active(task_t task)
729{
730 return task->active;
731}
732
5ba3f43e
A
733boolean_t
734task_is_halting(task_t task)
735{
736 return task->halting;
737}
738
39236c6e
A
739#if TASK_REFERENCE_LEAK_DEBUG
740#include <kern/btlog.h>
741
39236c6e 742static btlog_t *task_ref_btlog;
0a7de745
A
743#define TASK_REF_OP_INCR 0x1
744#define TASK_REF_OP_DECR 0x2
39236c6e 745
0a7de745
A
746#define TASK_REF_NUM_RECORDS 100000
747#define TASK_REF_BTDEPTH 7
39236c6e 748
39236c6e
A
749void
750task_reference_internal(task_t task)
751{
752 void * bt[TASK_REF_BTDEPTH];
753 int numsaved = 0;
754
eb6b6ca3 755 zone_require(task, task_zone);
e8c3f781
A
756 os_ref_retain(&task->ref_count);
757
39236c6e 758 numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
39236c6e 759 btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR,
0a7de745 760 bt, numsaved);
39236c6e
A
761}
762
e8c3f781 763os_ref_count_t
39236c6e
A
764task_deallocate_internal(task_t task)
765{
766 void * bt[TASK_REF_BTDEPTH];
767 int numsaved = 0;
768
769 numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
39236c6e 770 btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR,
0a7de745 771 bt, numsaved);
e8c3f781
A
772
773 return os_ref_release(&task->ref_count);
39236c6e
A
774}
775
776#endif /* TASK_REFERENCE_LEAK_DEBUG */
777
1c79356b
A
778void
779task_init(void)
780{
b0d623f7
A
781 lck_grp_attr_setdefault(&task_lck_grp_attr);
782 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
783 lck_attr_setdefault(&task_lck_attr);
784 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
39037602 785 lck_mtx_init(&tasks_corpse_lock, &task_lck_grp, &task_lck_attr);
b0d623f7 786
1c79356b 787 task_zone = zinit(
0a7de745
A
788 sizeof(struct task),
789 task_max * sizeof(struct task),
790 TASK_CHUNK * sizeof(struct task),
791 "tasks");
6d2010ae 792
0b4c1975 793 zone_change(task_zone, Z_NOENCRYPT, TRUE);
1c79356b 794
5ba3f43e
A
795#if CONFIG_EMBEDDED
796 task_watch_init();
797#endif /* CONFIG_EMBEDDED */
39037602 798
39236c6e 799 /*
fe8ab488
A
800 * Configure per-task memory limit.
801 * The boot-arg is interpreted as Megabytes,
802 * and takes precedence over the device tree.
803 * Setting the boot-arg to 0 disables task limits.
39236c6e 804 */
3e170ce0 805 if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb,
0a7de745 806 sizeof(max_task_footprint_mb))) {
39236c6e
A
807 /*
808 * No limit was found in boot-args, so go look in the device tree.
809 */
3e170ce0 810 if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb,
0a7de745 811 sizeof(max_task_footprint_mb))) {
fe8ab488
A
812 /*
813 * No limit was found in device tree.
814 */
3e170ce0 815 max_task_footprint_mb = 0;
39236c6e
A
816 }
817 }
818
3e170ce0 819 if (max_task_footprint_mb != 0) {
39037602 820#if CONFIG_MEMORYSTATUS
3e170ce0 821 if (max_task_footprint_mb < 50) {
0a7de745
A
822 printf("Warning: max_task_pmem %d below minimum.\n",
823 max_task_footprint_mb);
824 max_task_footprint_mb = 50;
39236c6e
A
825 }
826 printf("Limiting task physical memory footprint to %d MB\n",
0a7de745 827 max_task_footprint_mb);
3e170ce0
A
828
829 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
39037602
A
830
831 /*
832 * Configure the per-task memory limit warning level.
833 * This is computed as a percentage.
834 */
835 max_task_footprint_warning_level = 0;
836
837 if (max_mem < 0x40000000) {
838 /*
839 * On devices with < 1GB of memory:
840 * -- set warnings to 50MB below the per-task limit.
841 */
842 if (max_task_footprint_mb > 50) {
843 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
844 }
845 } else {
846 /*
847 * On devices with >= 1GB of memory:
848 * -- set warnings to 100MB below the per-task limit.
849 */
850 if (max_task_footprint_mb > 100) {
851 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
852 }
853 }
854
855 /*
856 * Never allow warning level to land below the default.
857 */
858 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
859 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
860 }
861
862 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
863
39236c6e 864#else
39037602
A
865 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
866#endif /* CONFIG_MEMORYSTATUS */
39236c6e
A
867 }
868
d9a64523
A
869#if DEVELOPMENT || DEBUG
870 if (!PE_parse_boot_argn("exc_resource_threads",
0a7de745
A
871 &exc_resource_threads_enabled,
872 sizeof(exc_resource_threads_enabled))) {
d9a64523
A
873 exc_resource_threads_enabled = 1;
874 }
875 PE_parse_boot_argn("task_exc_guard_default",
876 &task_exc_guard_default,
877 sizeof(task_exc_guard_default));
878#endif /* DEVELOPMENT || DEBUG */
fe8ab488 879
39037602 880#if CONFIG_COREDUMP
39236c6e 881 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
0a7de745 882 sizeof(hwm_user_cores))) {
39236c6e
A
883 hwm_user_cores = 0;
884 }
39037602 885#endif
a1c7dba1 886
39236c6e
A
887 proc_init_cpumon_params();
888
0a7de745 889 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof(task_wakeups_monitor_rate))) {
39236c6e
A
890 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
891 }
892
0a7de745 893 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof(task_wakeups_monitor_interval))) {
39236c6e
A
894 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
895 }
896
897 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
0a7de745 898 sizeof(task_wakeups_monitor_ustackshots_trigger_pct))) {
39236c6e
A
899 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
900 }
901
902 if (!PE_parse_boot_argn("disable_exc_resource", &disable_exc_resource,
0a7de745 903 sizeof(disable_exc_resource))) {
39236c6e
A
904 disable_exc_resource = 0;
905 }
906
0a7de745 907 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof(task_iomon_limit_mb))) {
39037602
A
908 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
909 }
910
0a7de745 911 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof(task_iomon_interval_secs))) {
39037602
A
912 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
913 }
914
0a7de745 915 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof(io_telemetry_limit))) {
39037602
A
916 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
917 }
918
fe8ab488
A
919/*
920 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
921 * sets up the ledgers for the default coalition. If we don't have coalitions,
922 * then we have to call it now.
923 */
924#if CONFIG_COALITIONS
925 assert(task_ledger_template);
926#else /* CONFIG_COALITIONS */
316670eb 927 init_task_ledgers();
fe8ab488 928#endif /* CONFIG_COALITIONS */
316670eb 929
39236c6e 930#if TASK_REFERENCE_LEAK_DEBUG
39037602 931 task_ref_btlog = btlog_create(TASK_REF_NUM_RECORDS, TASK_REF_BTDEPTH, TRUE /* caller_will_remove_entries_for_element? */);
39236c6e
A
932 assert(task_ref_btlog);
933#endif
934
1c79356b
A
935 /*
936 * Create the kernel task as the first task.
1c79356b 937 */
b0d623f7 938#ifdef __LP64__
cb323159 939 if (task_create_internal(TASK_NULL, NULL, FALSE, TRUE, TRUE, TF_NONE, TPF_NONE, TWF_NONE, &kernel_task) != KERN_SUCCESS)
b0d623f7 940#else
cb323159 941 if (task_create_internal(TASK_NULL, NULL, FALSE, FALSE, FALSE, TF_NONE, TPF_NONE, TWF_NONE, &kernel_task) != KERN_SUCCESS)
b0d623f7 942#endif
0a7de745 943 { panic("task_init\n");}
55e303ae 944
cb323159
A
945#if defined(HAS_APPLE_PAC)
946 kernel_task->rop_pid = KERNEL_ROP_ID;
947 // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
948 // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
949 ml_task_set_disable_user_jop(kernel_task, FALSE);
950#endif
5ba3f43e 951
1c79356b
A
952 vm_map_deallocate(kernel_task->map);
953 kernel_task->map = kernel_map;
4b17d6b6 954 lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
1c79356b
A
955}
956
1c79356b
A
957/*
958 * Create a task running in the kernel address space. It may
959 * have its own map of size mem_size and may have ipc privileges.
960 */
961kern_return_t
962kernel_task_create(
0a7de745
A
963 __unused task_t parent_task,
964 __unused vm_offset_t map_base,
965 __unused vm_size_t map_size,
966 __unused task_t *child_task)
1c79356b 967{
0a7de745 968 return KERN_INVALID_ARGUMENT;
1c79356b
A
969}
970
971kern_return_t
972task_create(
0a7de745
A
973 task_t parent_task,
974 __unused ledger_port_array_t ledger_ports,
975 __unused mach_msg_type_number_t num_ledger_ports,
976 __unused boolean_t inherit_memory,
977 __unused task_t *child_task) /* OUT */
1c79356b 978{
0a7de745
A
979 if (parent_task == TASK_NULL) {
980 return KERN_INVALID_ARGUMENT;
981 }
1c79356b 982
2d21ac55
A
983 /*
984 * No longer supported: too many calls assume that a task has a valid
985 * process attached.
986 */
0a7de745 987 return KERN_FAILURE;
1c79356b
A
988}
989
990kern_return_t
991host_security_create_task_token(
0a7de745
A
992 host_security_t host_security,
993 task_t parent_task,
994 __unused security_token_t sec_token,
995 __unused audit_token_t audit_token,
996 __unused host_priv_t host_priv,
997 __unused ledger_port_array_t ledger_ports,
998 __unused mach_msg_type_number_t num_ledger_ports,
999 __unused boolean_t inherit_memory,
1000 __unused task_t *child_task) /* OUT */
1001{
1002 if (parent_task == TASK_NULL) {
1003 return KERN_INVALID_ARGUMENT;
1004 }
1005
1006 if (host_security == HOST_NULL) {
1007 return KERN_INVALID_SECURITY;
1008 }
1c79356b 1009
2d21ac55
A
1010 /*
1011 * No longer supported.
1012 */
0a7de745 1013 return KERN_FAILURE;
1c79356b
A
1014}
1015
39236c6e
A
1016/*
1017 * Task ledgers
1018 * ------------
1019 *
1020 * phys_footprint
1021 * Physical footprint: This is the sum of:
3e170ce0
A
1022 * + (internal - alternate_accounting)
1023 * + (internal_compressed - alternate_accounting_compressed)
fe8ab488 1024 * + iokit_mapped
3e170ce0
A
1025 * + purgeable_nonvolatile
1026 * + purgeable_nonvolatile_compressed
39037602 1027 * + page_table
39236c6e 1028 *
fe8ab488
A
1029 * internal
1030 * The task's anonymous memory, which on iOS is always resident.
1031 *
1032 * internal_compressed
1033 * Amount of this task's internal memory which is held by the compressor.
39236c6e
A
1034 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
1035 * and could be either decompressed back into memory, or paged out to storage, depending
1036 * on our implementation.
fe8ab488
A
1037 *
1038 * iokit_mapped
1039 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
0a7de745 1040 * clean/dirty or internal/external state].
fe8ab488
A
1041 *
1042 * alternate_accounting
1043 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
1044 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
1045 * double counting.
0a7de745
A
1046 *
1047 * pages_grabbed
1048 * pages_grabbed counts all page grabs in a task. It is also broken out into three subtypes
1049 * which track UPL, IOPL and Kernel page grabs.
39236c6e 1050 */
316670eb
A
1051void
1052init_task_ledgers(void)
1053{
1054 ledger_template_t t;
0a7de745 1055
316670eb
A
1056 assert(task_ledger_template == NULL);
1057 assert(kernel_task == TASK_NULL);
1058
39037602 1059#if MACH_ASSERT
d9a64523 1060 PE_parse_boot_argn("pmap_ledgers_panic",
0a7de745
A
1061 &pmap_ledgers_panic,
1062 sizeof(pmap_ledgers_panic));
d9a64523 1063 PE_parse_boot_argn("pmap_ledgers_panic_leeway",
0a7de745
A
1064 &pmap_ledgers_panic_leeway,
1065 sizeof(pmap_ledgers_panic_leeway));
39037602
A
1066#endif /* MACH_ASSERT */
1067
0a7de745 1068 if ((t = ledger_template_create("Per-task ledger")) == NULL) {
316670eb 1069 panic("couldn't create task ledger template");
0a7de745 1070 }
316670eb
A
1071
1072 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
1073 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
1074 "physmem", "bytes");
1075 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
1076 "bytes");
1077 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
1078 "bytes");
1079 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
1080 "bytes");
fe8ab488
A
1081 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
1082 "bytes");
1083 task_ledgers.iokit_mapped = ledger_entry_add(t, "iokit_mapped", "mappings",
0a7de745 1084 "bytes");
fe8ab488 1085 task_ledgers.alternate_accounting = ledger_entry_add(t, "alternate_accounting", "physmem",
0a7de745 1086 "bytes");
3e170ce0 1087 task_ledgers.alternate_accounting_compressed = ledger_entry_add(t, "alternate_accounting_compressed", "physmem",
0a7de745 1088 "bytes");
39037602
A
1089 task_ledgers.page_table = ledger_entry_add(t, "page_table", "physmem",
1090 "bytes");
39236c6e 1091 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
0a7de745 1092 "bytes");
fe8ab488 1093 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
0a7de745 1094 "bytes");
fe8ab488
A
1095 task_ledgers.purgeable_volatile = ledger_entry_add(t, "purgeable_volatile", "physmem", "bytes");
1096 task_ledgers.purgeable_nonvolatile = ledger_entry_add(t, "purgeable_nonvolatile", "physmem", "bytes");
1097 task_ledgers.purgeable_volatile_compressed = ledger_entry_add(t, "purgeable_volatile_compress", "physmem", "bytes");
1098 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add(t, "purgeable_nonvolatile_compress", "physmem", "bytes");
cb323159 1099#if DEBUG || DEVELOPMENT
0a7de745
A
1100 task_ledgers.pages_grabbed = ledger_entry_add(t, "pages_grabbed", "physmem", "count");
1101 task_ledgers.pages_grabbed_kern = ledger_entry_add(t, "pages_grabbed_kern", "physmem", "count");
1102 task_ledgers.pages_grabbed_iopl = ledger_entry_add(t, "pages_grabbed_iopl", "physmem", "count");
1103 task_ledgers.pages_grabbed_upl = ledger_entry_add(t, "pages_grabbed_upl", "physmem", "count");
cb323159
A
1104#endif
1105 task_ledgers.tagged_nofootprint = ledger_entry_add(t, "tagged_nofootprint", "physmem", "bytes");
1106 task_ledgers.tagged_footprint = ledger_entry_add(t, "tagged_footprint", "physmem", "bytes");
1107 task_ledgers.tagged_nofootprint_compressed = ledger_entry_add(t, "tagged_nofootprint_compressed", "physmem", "bytes");
1108 task_ledgers.tagged_footprint_compressed = ledger_entry_add(t, "tagged_footprint_compressed", "physmem", "bytes");
d9a64523
A
1109 task_ledgers.network_volatile = ledger_entry_add(t, "network_volatile", "physmem", "bytes");
1110 task_ledgers.network_nonvolatile = ledger_entry_add(t, "network_nonvolatile", "physmem", "bytes");
1111 task_ledgers.network_volatile_compressed = ledger_entry_add(t, "network_volatile_compressed", "physmem", "bytes");
1112 task_ledgers.network_nonvolatile_compressed = ledger_entry_add(t, "network_nonvolatile_compressed", "physmem", "bytes");
cb323159
A
1113 task_ledgers.media_nofootprint = ledger_entry_add(t, "media_nofootprint", "physmem", "bytes");
1114 task_ledgers.media_footprint = ledger_entry_add(t, "media_footprint", "physmem", "bytes");
1115 task_ledgers.media_nofootprint_compressed = ledger_entry_add(t, "media_nofootprint_compressed", "physmem", "bytes");
1116 task_ledgers.media_footprint_compressed = ledger_entry_add(t, "media_footprint_compressed", "physmem", "bytes");
1117 task_ledgers.graphics_nofootprint = ledger_entry_add(t, "graphics_nofootprint", "physmem", "bytes");
1118 task_ledgers.graphics_footprint = ledger_entry_add(t, "graphics_footprint", "physmem", "bytes");
1119 task_ledgers.graphics_nofootprint_compressed = ledger_entry_add(t, "graphics_nofootprint_compressed", "physmem", "bytes");
1120 task_ledgers.graphics_footprint_compressed = ledger_entry_add(t, "graphics_footprint_compressed", "physmem", "bytes");
1121 task_ledgers.neural_nofootprint = ledger_entry_add(t, "neural_nofootprint", "physmem", "bytes");
1122 task_ledgers.neural_footprint = ledger_entry_add(t, "neural_footprint", "physmem", "bytes");
1123 task_ledgers.neural_nofootprint_compressed = ledger_entry_add(t, "neural_nofootprint_compressed", "physmem", "bytes");
1124 task_ledgers.neural_footprint_compressed = ledger_entry_add(t, "neural_footprint_compressed", "physmem", "bytes");
1125
d9a64523 1126
4b17d6b6 1127 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
0a7de745 1128 "count");
4b17d6b6 1129 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
0a7de745
A
1130 "count");
1131
3e170ce0 1132#if CONFIG_SCHED_SFI
fe8ab488
A
1133 sfi_class_id_t class_id, ledger_alias;
1134 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1135 task_ledgers.sfi_wait_times[class_id] = -1;
1136 }
1137
1138 /* don't account for UNSPECIFIED */
1139 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
1140 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
1141 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
1142 /* Check to see if alias has been registered yet */
1143 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
1144 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
1145 } else {
1146 /* Otherwise, initialize it first */
1147 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
1148 }
1149 } else {
1150 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
1151 }
1152
1153 if (task_ledgers.sfi_wait_times[class_id] < 0) {
1154 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
1155 }
1156 }
316670eb 1157
0a7de745 1158 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID - 1] != -1);
3e170ce0
A
1159#endif /* CONFIG_SCHED_SFI */
1160
fe8ab488
A
1161 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
1162 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
39037602
A
1163 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
1164 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
cb323159 1165 task_ledgers.logical_writes_to_external = ledger_entry_add(t, "logical_writes_to_external", "res", "bytes");
5ba3f43e
A
1166 task_ledgers.energy_billed_to_me = ledger_entry_add(t, "energy_billed_to_me", "power", "nj");
1167 task_ledgers.energy_billed_to_others = ledger_entry_add(t, "energy_billed_to_others", "power", "nj");
39037602 1168
fe8ab488
A
1169 if ((task_ledgers.cpu_time < 0) ||
1170 (task_ledgers.tkm_private < 0) ||
1171 (task_ledgers.tkm_shared < 0) ||
1172 (task_ledgers.phys_mem < 0) ||
1173 (task_ledgers.wired_mem < 0) ||
1174 (task_ledgers.internal < 0) ||
1175 (task_ledgers.iokit_mapped < 0) ||
1176 (task_ledgers.alternate_accounting < 0) ||
3e170ce0 1177 (task_ledgers.alternate_accounting_compressed < 0) ||
39037602 1178 (task_ledgers.page_table < 0) ||
fe8ab488
A
1179 (task_ledgers.phys_footprint < 0) ||
1180 (task_ledgers.internal_compressed < 0) ||
1181 (task_ledgers.purgeable_volatile < 0) ||
1182 (task_ledgers.purgeable_nonvolatile < 0) ||
1183 (task_ledgers.purgeable_volatile_compressed < 0) ||
1184 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
cb323159
A
1185 (task_ledgers.tagged_nofootprint < 0) ||
1186 (task_ledgers.tagged_footprint < 0) ||
1187 (task_ledgers.tagged_nofootprint_compressed < 0) ||
1188 (task_ledgers.tagged_footprint_compressed < 0) ||
d9a64523
A
1189 (task_ledgers.network_volatile < 0) ||
1190 (task_ledgers.network_nonvolatile < 0) ||
1191 (task_ledgers.network_volatile_compressed < 0) ||
1192 (task_ledgers.network_nonvolatile_compressed < 0) ||
cb323159
A
1193 (task_ledgers.media_nofootprint < 0) ||
1194 (task_ledgers.media_footprint < 0) ||
1195 (task_ledgers.media_nofootprint_compressed < 0) ||
1196 (task_ledgers.media_footprint_compressed < 0) ||
1197 (task_ledgers.graphics_nofootprint < 0) ||
1198 (task_ledgers.graphics_footprint < 0) ||
1199 (task_ledgers.graphics_nofootprint_compressed < 0) ||
1200 (task_ledgers.graphics_footprint_compressed < 0) ||
1201 (task_ledgers.neural_nofootprint < 0) ||
1202 (task_ledgers.neural_footprint < 0) ||
1203 (task_ledgers.neural_nofootprint_compressed < 0) ||
1204 (task_ledgers.neural_footprint_compressed < 0) ||
fe8ab488 1205 (task_ledgers.platform_idle_wakeups < 0) ||
39037602 1206 (task_ledgers.interrupt_wakeups < 0) ||
39037602 1207 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
39037602 1208 (task_ledgers.physical_writes < 0) ||
5ba3f43e 1209 (task_ledgers.logical_writes < 0) ||
cb323159 1210 (task_ledgers.logical_writes_to_external < 0) ||
5ba3f43e
A
1211 (task_ledgers.energy_billed_to_me < 0) ||
1212 (task_ledgers.energy_billed_to_others < 0)
fe8ab488 1213 ) {
316670eb
A
1214 panic("couldn't create entries for task ledger template");
1215 }
1216
39037602 1217 ledger_track_credit_only(t, task_ledgers.phys_footprint);
813fb2f6 1218 ledger_track_credit_only(t, task_ledgers.page_table);
39037602
A
1219 ledger_track_credit_only(t, task_ledgers.internal);
1220 ledger_track_credit_only(t, task_ledgers.internal_compressed);
1221 ledger_track_credit_only(t, task_ledgers.iokit_mapped);
1222 ledger_track_credit_only(t, task_ledgers.alternate_accounting);
1223 ledger_track_credit_only(t, task_ledgers.alternate_accounting_compressed);
1224 ledger_track_credit_only(t, task_ledgers.purgeable_volatile);
1225 ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile);
1226 ledger_track_credit_only(t, task_ledgers.purgeable_volatile_compressed);
1227 ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile_compressed);
cb323159 1228#if DEBUG || DEVELOPMENT
0a7de745
A
1229 ledger_track_credit_only(t, task_ledgers.pages_grabbed);
1230 ledger_track_credit_only(t, task_ledgers.pages_grabbed_kern);
1231 ledger_track_credit_only(t, task_ledgers.pages_grabbed_iopl);
1232 ledger_track_credit_only(t, task_ledgers.pages_grabbed_upl);
cb323159
A
1233#endif
1234 ledger_track_credit_only(t, task_ledgers.tagged_nofootprint);
1235 ledger_track_credit_only(t, task_ledgers.tagged_footprint);
1236 ledger_track_credit_only(t, task_ledgers.tagged_nofootprint_compressed);
1237 ledger_track_credit_only(t, task_ledgers.tagged_footprint_compressed);
d9a64523
A
1238 ledger_track_credit_only(t, task_ledgers.network_volatile);
1239 ledger_track_credit_only(t, task_ledgers.network_nonvolatile);
1240 ledger_track_credit_only(t, task_ledgers.network_volatile_compressed);
1241 ledger_track_credit_only(t, task_ledgers.network_nonvolatile_compressed);
cb323159
A
1242 ledger_track_credit_only(t, task_ledgers.media_nofootprint);
1243 ledger_track_credit_only(t, task_ledgers.media_footprint);
1244 ledger_track_credit_only(t, task_ledgers.media_nofootprint_compressed);
1245 ledger_track_credit_only(t, task_ledgers.media_footprint_compressed);
1246 ledger_track_credit_only(t, task_ledgers.graphics_nofootprint);
1247 ledger_track_credit_only(t, task_ledgers.graphics_footprint);
1248 ledger_track_credit_only(t, task_ledgers.graphics_nofootprint_compressed);
1249 ledger_track_credit_only(t, task_ledgers.graphics_footprint_compressed);
1250 ledger_track_credit_only(t, task_ledgers.neural_nofootprint);
1251 ledger_track_credit_only(t, task_ledgers.neural_footprint);
1252 ledger_track_credit_only(t, task_ledgers.neural_nofootprint_compressed);
1253 ledger_track_credit_only(t, task_ledgers.neural_footprint_compressed);
d9a64523 1254
39236c6e 1255 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
fe8ab488
A
1256#if MACH_ASSERT
1257 if (pmap_ledgers_panic) {
1258 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
39037602 1259 ledger_panic_on_negative(t, task_ledgers.page_table);
fe8ab488
A
1260 ledger_panic_on_negative(t, task_ledgers.internal);
1261 ledger_panic_on_negative(t, task_ledgers.internal_compressed);
1262 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
1263 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
3e170ce0 1264 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
fe8ab488
A
1265 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
1266 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
1267 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
1268 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
d9a64523 1269
cb323159
A
1270 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint);
1271 ledger_panic_on_negative(t, task_ledgers.tagged_footprint);
1272 ledger_panic_on_negative(t, task_ledgers.tagged_nofootprint_compressed);
1273 ledger_panic_on_negative(t, task_ledgers.tagged_footprint_compressed);
d9a64523
A
1274 ledger_panic_on_negative(t, task_ledgers.network_volatile);
1275 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile);
1276 ledger_panic_on_negative(t, task_ledgers.network_volatile_compressed);
1277 ledger_panic_on_negative(t, task_ledgers.network_nonvolatile_compressed);
cb323159
A
1278 ledger_panic_on_negative(t, task_ledgers.media_nofootprint);
1279 ledger_panic_on_negative(t, task_ledgers.media_footprint);
1280 ledger_panic_on_negative(t, task_ledgers.media_nofootprint_compressed);
1281 ledger_panic_on_negative(t, task_ledgers.media_footprint_compressed);
1282 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint);
1283 ledger_panic_on_negative(t, task_ledgers.graphics_footprint);
1284 ledger_panic_on_negative(t, task_ledgers.graphics_nofootprint_compressed);
1285 ledger_panic_on_negative(t, task_ledgers.graphics_footprint_compressed);
1286 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint);
1287 ledger_panic_on_negative(t, task_ledgers.neural_footprint);
1288 ledger_panic_on_negative(t, task_ledgers.neural_nofootprint_compressed);
1289 ledger_panic_on_negative(t, task_ledgers.neural_footprint_compressed);
fe8ab488
A
1290 }
1291#endif /* MACH_ASSERT */
39236c6e 1292
39037602 1293#if CONFIG_MEMORYSTATUS
39236c6e 1294 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
39037602 1295#endif /* CONFIG_MEMORYSTATUS */
39236c6e
A
1296
1297 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
0a7de745 1298 task_wakeups_rate_exceeded, NULL, NULL);
39037602 1299 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
5ba3f43e 1300
c6bf4f31
A
1301#if XNU_MONITOR
1302 ledger_template_complete_secure_alloc(t);
1303#else /* XNU_MONITOR */
5ba3f43e 1304 ledger_template_complete(t);
c6bf4f31 1305#endif /* XNU_MONITOR */
316670eb
A
1306 task_ledger_template = t;
1307}
1308
e8c3f781
A
1309os_refgrp_decl(static, task_refgrp, "task", NULL);
1310
1c79356b 1311kern_return_t
55e303ae 1312task_create_internal(
0a7de745
A
1313 task_t parent_task,
1314 coalition_t *parent_coalitions __unused,
1315 boolean_t inherit_memory,
1316 __unused boolean_t is_64bit,
d9a64523 1317 boolean_t is_64bit_data,
0a7de745
A
1318 uint32_t t_flags,
1319 uint32_t t_procflags,
cb323159 1320 uint8_t t_returnwaitflags,
0a7de745 1321 task_t *child_task) /* OUT */
1c79356b 1322{
0a7de745
A
1323 task_t new_task;
1324 vm_shared_region_t shared_region;
1325 ledger_t ledger = NULL;
1c79356b
A
1326
1327 new_task = (task_t) zalloc(task_zone);
1328
0a7de745
A
1329 if (new_task == TASK_NULL) {
1330 return KERN_RESOURCE_SHORTAGE;
1331 }
1c79356b
A
1332
1333 /* one ref for just being alive; one for our caller */
e8c3f781 1334 os_ref_init_count(&new_task->ref_count, &task_refgrp, 2);
1c79356b 1335
316670eb
A
1336 /* allocate with active entries */
1337 assert(task_ledger_template != NULL);
1338 if ((ledger = ledger_instantiate(task_ledger_template,
0a7de745 1339 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
316670eb 1340 zfree(task_zone, new_task);
0a7de745 1341 return KERN_RESOURCE_SHORTAGE;
316670eb 1342 }
39236c6e 1343
cb323159
A
1344#if defined(HAS_APPLE_PAC)
1345 ml_task_set_rop_pid(new_task, parent_task, inherit_memory);
1346 ml_task_set_disable_user_jop(new_task, inherit_memory ? parent_task->disable_user_jop : FALSE);
1347#endif
5ba3f43e 1348
316670eb
A
1349 new_task->ledger = ledger;
1350
fe8ab488
A
1351#if defined(CONFIG_SCHED_MULTIQ)
1352 new_task->sched_group = sched_group_create();
1353#endif
1354
b0d623f7 1355 /* if inherit_memory is true, parent_task MUST not be NULL */
0a7de745 1356 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory) {
39037602 1357 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
0a7de745 1358 } else {
cb323159
A
1359 unsigned int pmap_flags = is_64bit ? PMAP_CREATE_64BIT : 0;
1360 new_task->map = vm_map_create(pmap_create_options(ledger, 0, pmap_flags),
0a7de745
A
1361 (vm_map_offset_t)(VM_MIN_ADDRESS),
1362 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
1363 }
1c79356b 1364
2d21ac55 1365 /* Inherit memlock limit from parent */
0a7de745 1366 if (parent_task) {
b0d623f7 1367 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
0a7de745 1368 }
2d21ac55 1369
b0d623f7 1370 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
55e303ae 1371 queue_init(&new_task->threads);
1c79356b 1372 new_task->suspend_count = 0;
55e303ae 1373 new_task->thread_count = 0;
55e303ae 1374 new_task->active_thread_count = 0;
1c79356b 1375 new_task->user_stop_count = 0;
39236c6e 1376 new_task->legacy_stop_count = 0;
1c79356b 1377 new_task->active = TRUE;
b0d623f7 1378 new_task->halting = FALSE;
55e303ae 1379 new_task->priv_flags = 0;
39037602 1380 new_task->t_flags = t_flags;
743345f9 1381 new_task->t_procflags = t_procflags;
cb323159
A
1382 new_task->t_returnwaitflags = t_returnwaitflags;
1383 new_task->returnwait_inheritor = current_thread();
39236c6e 1384 new_task->importance = 0;
5ba3f43e 1385 new_task->crashed_thread_id = 0;
39037602 1386 new_task->exec_token = 0;
cb323159
A
1387 new_task->watchports = NULL;
1388 new_task->restartable_ranges = NULL;
1389 new_task->task_exc_guard = 0;
0a7de745 1390
fe8ab488
A
1391#if CONFIG_ATM
1392 new_task->atm_context = NULL;
1393#endif
fe8ab488 1394 new_task->bank_context = NULL;
fe8ab488 1395
1c79356b 1396#ifdef MACH_BSD
2d21ac55 1397 new_task->bsd_info = NULL;
3e170ce0 1398 new_task->corpse_info = NULL;
1c79356b
A
1399#endif /* MACH_BSD */
1400
39037602
A
1401#if CONFIG_MACF
1402 new_task->crash_label = NULL;
1403#endif
1404
1405#if CONFIG_MEMORYSTATUS
39236c6e
A
1406 if (max_task_footprint != 0) {
1407 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1408 }
39037602 1409#endif /* CONFIG_MEMORYSTATUS */
39236c6e
A
1410
1411 if (task_wakeups_monitor_rate != 0) {
1412 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1413 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1414 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1415 }
1416
39037602
A
1417#if CONFIG_IO_ACCOUNTING
1418 uint32_t flags = IOMON_ENABLE;
1419 task_io_monitor_ctl(new_task, &flags);
1420#endif /* CONFIG_IO_ACCOUNTING */
1421
5ba3f43e 1422 machine_task_init(new_task, parent_task, inherit_memory);
0c530ab8 1423
39236c6e 1424 new_task->task_debug = NULL;
55e303ae 1425
39037602
A
1426#if DEVELOPMENT || DEBUG
1427 new_task->task_unnested = FALSE;
1428 new_task->task_disconnected_count = 0;
1429#endif
1c79356b 1430 queue_init(&new_task->semaphore_list);
1c79356b 1431 new_task->semaphores_owned = 0;
1c79356b 1432
1c79356b
A
1433 ipc_task_init(new_task, parent_task);
1434
2d21ac55 1435 new_task->vtimers = 0;
1c79356b 1436
2d21ac55
A
1437 new_task->shared_region = NULL;
1438
1439 new_task->affinity_space = NULL;
1c79356b 1440
a39ff7e2 1441 new_task->t_kpc = 0;
813fb2f6 1442
316670eb
A
1443 new_task->pidsuspended = FALSE;
1444 new_task->frozen = FALSE;
39236c6e 1445 new_task->changing_freeze_state = FALSE;
316670eb
A
1446 new_task->rusage_cpu_flags = 0;
1447 new_task->rusage_cpu_percentage = 0;
1448 new_task->rusage_cpu_interval = 0;
1449 new_task->rusage_cpu_deadline = 0;
1450 new_task->rusage_cpu_callt = NULL;
39236c6e
A
1451#if MACH_ASSERT
1452 new_task->suspends_outstanding = 0;
1453#endif
1454
fe8ab488
A
1455#if HYPERVISOR
1456 new_task->hv_task_target = NULL;
1457#endif /* HYPERVISOR */
1458
5ba3f43e
A
1459#if CONFIG_EMBEDDED
1460 queue_init(&new_task->task_watchers);
1461 new_task->num_taskwatchers = 0;
1462 new_task->watchapplying = 0;
1463#endif /* CONFIG_EMBEDDED */
316670eb 1464
39236c6e 1465 new_task->mem_notify_reserved = 0;
5ba3f43e 1466 new_task->memlimit_attrs_reserved = 0;
39236c6e 1467
39236c6e
A
1468 new_task->requested_policy = default_task_requested_policy;
1469 new_task->effective_policy = default_task_effective_policy;
db609669 1470
d9a64523
A
1471 task_importance_init_from_parent(new_task, parent_task);
1472
2d21ac55 1473 if (parent_task != TASK_NULL) {
1c79356b 1474 new_task->sec_token = parent_task->sec_token;
55e303ae 1475 new_task->audit_token = parent_task->audit_token;
1c79356b 1476
2d21ac55
A
1477 /* inherit the parent's shared region */
1478 shared_region = vm_shared_region_get(parent_task);
1479 vm_shared_region_set(new_task, shared_region);
1c79356b 1480
0a7de745 1481 if (task_has_64Bit_addr(parent_task)) {
d9a64523
A
1482 task_set_64Bit_addr(new_task);
1483 }
1484
0a7de745 1485 if (task_has_64Bit_data(parent_task)) {
d9a64523
A
1486 task_set_64Bit_data(new_task);
1487 }
1488
b0d623f7
A
1489 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1490 new_task->all_image_info_size = parent_task->all_image_info_size;
cb323159 1491 new_task->mach_header_vm_address = 0;
0c530ab8 1492
0a7de745 1493 if (inherit_memory && parent_task->affinity_space) {
2d21ac55 1494 task_affinity_create(parent_task, new_task);
0a7de745 1495 }
b0d623f7
A
1496
1497 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
39236c6e 1498
0a7de745
A
1499#if DEBUG || DEVELOPMENT
1500 if (parent_task->t_flags & TF_NO_SMT) {
1501 new_task->t_flags |= TF_NO_SMT;
1502 }
1503#endif
1504
fe8ab488
A
1505 new_task->priority = BASEPRI_DEFAULT;
1506 new_task->max_priority = MAXPRI_USER;
1507
39037602 1508 task_policy_create(new_task, parent_task);
39236c6e 1509 } else {
1c79356b 1510 new_task->sec_token = KERNEL_SECURITY_TOKEN;
55e303ae 1511 new_task->audit_token = KERNEL_AUDIT_TOKEN;
b0d623f7 1512#ifdef __LP64__
0a7de745 1513 if (is_64bit) {
d9a64523
A
1514 task_set_64Bit_addr(new_task);
1515 }
b0d623f7 1516#endif
d9a64523 1517
0a7de745 1518 if (is_64bit_data) {
d9a64523
A
1519 task_set_64Bit_data(new_task);
1520 }
1521
6d2010ae
A
1522 new_task->all_image_info_addr = (mach_vm_address_t)0;
1523 new_task->all_image_info_size = (mach_vm_size_t)0;
b0d623f7
A
1524
1525 new_task->pset_hint = PROCESSOR_SET_NULL;
fe8ab488
A
1526
1527 if (kernel_task == TASK_NULL) {
1528 new_task->priority = BASEPRI_KERNEL;
1529 new_task->max_priority = MAXPRI_KERNEL;
1530 } else {
1531 new_task->priority = BASEPRI_DEFAULT;
1532 new_task->max_priority = MAXPRI_USER;
1533 }
1c79356b
A
1534 }
1535
3e170ce0 1536 bzero(new_task->coalition, sizeof(new_task->coalition));
0a7de745 1537 for (int i = 0; i < COALITION_NUM_TYPES; i++) {
3e170ce0 1538 queue_chain_init(new_task->task_coalition[i]);
0a7de745 1539 }
fe8ab488
A
1540
1541 /* Allocate I/O Statistics */
1542 new_task->task_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info));
1543 assert(new_task->task_io_stats != NULL);
1544 bzero(new_task->task_io_stats, sizeof(struct io_stat_info));
1545
a39ff7e2
A
1546 bzero(&(new_task->cpu_time_eqos_stats), sizeof(new_task->cpu_time_eqos_stats));
1547 bzero(&(new_task->cpu_time_rqos_stats), sizeof(new_task->cpu_time_rqos_stats));
fe8ab488 1548
6d2010ae 1549 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
3e170ce0 1550
39037602
A
1551 /* Copy resource acc. info from Parent for Corpe Forked task. */
1552 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
743345f9 1553 task_rollup_accounting_info(new_task, parent_task);
39037602
A
1554 } else {
1555 /* Initialize to zero for standard fork/spawn case */
1556 new_task->total_user_time = 0;
1557 new_task->total_system_time = 0;
5ba3f43e 1558 new_task->total_ptime = 0;
d9a64523 1559 new_task->total_runnable_time = 0;
39037602
A
1560 new_task->faults = 0;
1561 new_task->pageins = 0;
1562 new_task->cow_faults = 0;
1563 new_task->messages_sent = 0;
1564 new_task->messages_received = 0;
1565 new_task->syscalls_mach = 0;
1566 new_task->syscalls_unix = 0;
1567 new_task->c_switch = 0;
1568 new_task->p_switch = 0;
1569 new_task->ps_switch = 0;
cb323159 1570 new_task->decompressions = 0;
39037602
A
1571 new_task->low_mem_notified_warn = 0;
1572 new_task->low_mem_notified_critical = 0;
1573 new_task->purged_memory_warn = 0;
1574 new_task->purged_memory_critical = 0;
1575 new_task->low_mem_privileged_listener = 0;
813fb2f6
A
1576 new_task->memlimit_is_active = 0;
1577 new_task->memlimit_is_fatal = 0;
1578 new_task->memlimit_active_exc_resource = 0;
1579 new_task->memlimit_inactive_exc_resource = 0;
39037602
A
1580 new_task->task_timer_wakeups_bin_1 = 0;
1581 new_task->task_timer_wakeups_bin_2 = 0;
1582 new_task->task_gpu_ns = 0;
cb323159
A
1583 new_task->task_writes_counters_internal.task_immediate_writes = 0;
1584 new_task->task_writes_counters_internal.task_deferred_writes = 0;
1585 new_task->task_writes_counters_internal.task_invalidated_writes = 0;
1586 new_task->task_writes_counters_internal.task_metadata_writes = 0;
1587 new_task->task_writes_counters_external.task_immediate_writes = 0;
1588 new_task->task_writes_counters_external.task_deferred_writes = 0;
1589 new_task->task_writes_counters_external.task_invalidated_writes = 0;
1590 new_task->task_writes_counters_external.task_metadata_writes = 0;
1591
39037602 1592 new_task->task_energy = 0;
5ba3f43e
A
1593#if MONOTONIC
1594 memset(&new_task->task_monotonic, 0, sizeof(new_task->task_monotonic));
1595#endif /* MONOTONIC */
39037602 1596 }
3e170ce0 1597
39037602
A
1598
1599#if CONFIG_COALITIONS
1600 if (!(t_flags & TF_CORPSE_FORK)) {
1601 /* TODO: there is no graceful failure path here... */
1602 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1603 coalitions_adopt_task(parent_coalitions, new_task);
1604 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1605 /*
1606 * all tasks at least have a resource coalition, so
1607 * if the parent has one then inherit all coalitions
1608 * the parent is a part of
1609 */
1610 coalitions_adopt_task(parent_task->coalition, new_task);
1611 } else {
1612 /* TODO: assert that new_task will be PID 1 (launchd) */
1613 coalitions_adopt_init_task(new_task);
1614 }
5ba3f43e
A
1615 /*
1616 * on exec, we need to transfer the coalition roles from the
1617 * parent task to the exec copy task.
1618 */
1619 if (parent_task && (t_procflags & TPF_EXEC_COPY)) {
1620 int coal_roles[COALITION_NUM_TYPES];
1621 task_coalition_roles(parent_task, coal_roles);
1622 (void)coalitions_set_roles(new_task->coalition, new_task, coal_roles);
1623 }
3e170ce0 1624 } else {
39037602 1625 coalitions_adopt_corpse_task(new_task);
3e170ce0
A
1626 }
1627
1628 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1629 panic("created task is not a member of a resource coalition");
1630 }
1631#endif /* CONFIG_COALITIONS */
1632
1633 new_task->dispatchqueue_offset = 0;
1634 if (parent_task != NULL) {
1635 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1636 }
1c79356b 1637
cb323159 1638 new_task->task_can_transfer_memory_ownership = FALSE;
39236c6e 1639 new_task->task_volatile_objects = 0;
fe8ab488 1640 new_task->task_nonvolatile_objects = 0;
cb323159
A
1641 new_task->task_objects_disowning = FALSE;
1642 new_task->task_objects_disowned = FALSE;
1643 new_task->task_owned_objects = 0;
a39ff7e2
A
1644 queue_init(&new_task->task_objq);
1645 task_objq_lock_init(new_task);
1646
d9a64523
A
1647#if __arm64__
1648 new_task->task_legacy_footprint = FALSE;
cb323159 1649 new_task->task_extra_footprint_limit = FALSE;
94ff46dc 1650 new_task->task_ios13extended_footprint_limit = FALSE;
d9a64523 1651#endif /* __arm64__ */
a39ff7e2 1652 new_task->task_region_footprint = FALSE;
d9a64523
A
1653 new_task->task_has_crossed_thread_limit = FALSE;
1654 new_task->task_thread_limit = 0;
39037602
A
1655#if CONFIG_SECLUDED_MEMORY
1656 new_task->task_can_use_secluded_mem = FALSE;
1657 new_task->task_could_use_secluded_mem = FALSE;
1658 new_task->task_could_also_use_secluded_mem = FALSE;
d9a64523 1659 new_task->task_suppressed_secluded = FALSE;
39037602
A
1660#endif /* CONFIG_SECLUDED_MEMORY */
1661
d9a64523
A
1662 /*
1663 * t_flags is set up above. But since we don't
1664 * support darkwake mode being set that way
1665 * currently, we clear it out here explicitly.
1666 */
1667 new_task->t_flags &= ~(TF_DARKWAKE_MODE);
1668
0a7de745 1669 queue_init(&new_task->io_user_clients);
cb323159 1670 new_task->loadTag = 0;
7e41aa88 1671
1c79356b
A
1672 ipc_task_enable(new_task);
1673
3e170ce0
A
1674 lck_mtx_lock(&tasks_threads_lock);
1675 queue_enter(&tasks, new_task, task_t, tasks);
1676 tasks_count++;
0a7de745
A
1677 if (tasks_suspend_state) {
1678 task_suspend_internal(new_task);
1679 }
3e170ce0
A
1680 lck_mtx_unlock(&tasks_threads_lock);
1681
1c79356b 1682 *child_task = new_task;
0a7de745 1683 return KERN_SUCCESS;
1c79356b
A
1684}
1685
743345f9
A
1686/*
1687 * task_rollup_accounting_info
1688 *
1689 * Roll up accounting stats. Used to rollup stats
1690 * for exec copy task and corpse fork.
1691 */
1692void
1693task_rollup_accounting_info(task_t to_task, task_t from_task)
1694{
1695 assert(from_task != to_task);
1696
1697 to_task->total_user_time = from_task->total_user_time;
1698 to_task->total_system_time = from_task->total_system_time;
5ba3f43e 1699 to_task->total_ptime = from_task->total_ptime;
d9a64523 1700 to_task->total_runnable_time = from_task->total_runnable_time;
743345f9
A
1701 to_task->faults = from_task->faults;
1702 to_task->pageins = from_task->pageins;
1703 to_task->cow_faults = from_task->cow_faults;
cb323159 1704 to_task->decompressions = from_task->decompressions;
743345f9
A
1705 to_task->messages_sent = from_task->messages_sent;
1706 to_task->messages_received = from_task->messages_received;
1707 to_task->syscalls_mach = from_task->syscalls_mach;
1708 to_task->syscalls_unix = from_task->syscalls_unix;
1709 to_task->c_switch = from_task->c_switch;
1710 to_task->p_switch = from_task->p_switch;
1711 to_task->ps_switch = from_task->ps_switch;
1712 to_task->extmod_statistics = from_task->extmod_statistics;
1713 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1714 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1715 to_task->purged_memory_warn = from_task->purged_memory_warn;
1716 to_task->purged_memory_critical = from_task->purged_memory_critical;
1717 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1718 *to_task->task_io_stats = *from_task->task_io_stats;
a39ff7e2
A
1719 to_task->cpu_time_eqos_stats = from_task->cpu_time_eqos_stats;
1720 to_task->cpu_time_rqos_stats = from_task->cpu_time_rqos_stats;
743345f9
A
1721 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1722 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1723 to_task->task_gpu_ns = from_task->task_gpu_ns;
cb323159
A
1724 to_task->task_writes_counters_internal.task_immediate_writes = from_task->task_writes_counters_internal.task_immediate_writes;
1725 to_task->task_writes_counters_internal.task_deferred_writes = from_task->task_writes_counters_internal.task_deferred_writes;
1726 to_task->task_writes_counters_internal.task_invalidated_writes = from_task->task_writes_counters_internal.task_invalidated_writes;
1727 to_task->task_writes_counters_internal.task_metadata_writes = from_task->task_writes_counters_internal.task_metadata_writes;
1728 to_task->task_writes_counters_external.task_immediate_writes = from_task->task_writes_counters_external.task_immediate_writes;
1729 to_task->task_writes_counters_external.task_deferred_writes = from_task->task_writes_counters_external.task_deferred_writes;
1730 to_task->task_writes_counters_external.task_invalidated_writes = from_task->task_writes_counters_external.task_invalidated_writes;
1731 to_task->task_writes_counters_external.task_metadata_writes = from_task->task_writes_counters_external.task_metadata_writes;
743345f9
A
1732 to_task->task_energy = from_task->task_energy;
1733
1734 /* Skip ledger roll up for memory accounting entries */
1735 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1736 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1737 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1738#if CONFIG_SCHED_SFI
1739 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1740 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1741 }
1742#endif
743345f9
A
1743 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1744 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
743345f9
A
1745 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1746 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
5ba3f43e
A
1747 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_me);
1748 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.energy_billed_to_others);
743345f9
A
1749}
1750
fe8ab488
A
1751int task_dropped_imp_count = 0;
1752
1c79356b 1753/*
91447636 1754 * task_deallocate:
1c79356b 1755 *
91447636 1756 * Drop a reference on a task.
1c79356b
A
1757 */
1758void
9bccf70c 1759task_deallocate(
0a7de745 1760 task_t task)
1c79356b 1761{
4b17d6b6 1762 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
e8c3f781 1763 os_ref_count_t refs;
316670eb 1764
0a7de745
A
1765 if (task == TASK_NULL) {
1766 return;
1767 }
9bccf70c 1768
fe8ab488
A
1769 refs = task_deallocate_internal(task);
1770
1771#if IMPORTANCE_INHERITANCE
fe8ab488
A
1772 if (refs == 1) {
1773 /*
1774 * If last ref potentially comes from the task's importance,
1775 * disconnect it. But more task refs may be added before
1776 * that completes, so wait for the reference to go to zero
e8c3f781 1777 * naturally (it may happen on a recursive task_deallocate()
fe8ab488
A
1778 * from the ipc_importance_disconnect_task() call).
1779 */
0a7de745 1780 if (IIT_NULL != task->task_imp_base) {
fe8ab488 1781 ipc_importance_disconnect_task(task);
0a7de745 1782 }
fe8ab488
A
1783 return;
1784 }
fe8ab488 1785#endif /* IMPORTANCE_INHERITANCE */
1c79356b 1786
e8c3f781
A
1787 if (refs > 0) {
1788 return;
1789 }
1790
cb323159
A
1791 /*
1792 * The task should be dead at this point. Ensure other resources
1793 * like threads, are gone before we trash the world.
1794 */
1795 assert(queue_empty(&task->threads));
1796 assert(task->bsd_info == NULL);
1797 assert(!is_active(task->itk_space));
1798 assert(!task->active);
1799 assert(task->active_thread_count == 0);
1800
6d2010ae 1801 lck_mtx_lock(&tasks_threads_lock);
cb323159 1802 assert(terminated_tasks_count > 0);
6d2010ae 1803 queue_remove(&terminated_tasks, task, task_t, tasks);
39236c6e 1804 terminated_tasks_count--;
6d2010ae
A
1805 lck_mtx_unlock(&tasks_threads_lock);
1806
fe8ab488
A
1807 /*
1808 * remove the reference on atm descriptor
1809 */
490019cf 1810 task_atm_reset(task);
fe8ab488 1811
fe8ab488
A
1812 /*
1813 * remove the reference on bank context
1814 */
490019cf 1815 task_bank_reset(task);
fe8ab488 1816
0a7de745 1817 if (task->task_io_stats) {
fe8ab488 1818 kfree(task->task_io_stats, sizeof(struct io_stat_info));
0a7de745 1819 }
fe8ab488 1820
316670eb
A
1821 /*
1822 * Give the machine dependent code a chance
1823 * to perform cleanup before ripping apart
1824 * the task.
1825 */
1826 machine_task_terminate(task);
1827
9bccf70c
A
1828 ipc_task_terminate(task);
1829
7e41aa88
A
1830 /* let iokit know */
1831 iokit_task_terminate(task);
1832
0a7de745 1833 if (task->affinity_space) {
2d21ac55 1834 task_affinity_deallocate(task);
0a7de745 1835 }
2d21ac55 1836
fe8ab488
A
1837#if MACH_ASSERT
1838 if (task->ledger != NULL &&
1839 task->map != NULL &&
1840 task->map->pmap != NULL &&
1841 task->map->pmap->ledger != NULL) {
1842 assert(task->ledger == task->map->pmap->ledger);
1843 }
1844#endif /* MACH_ASSERT */
1845
cb323159
A
1846 vm_owned_objects_disown(task);
1847 assert(task->task_objects_disowned);
fe8ab488 1848 if (task->task_volatile_objects != 0 ||
cb323159
A
1849 task->task_nonvolatile_objects != 0 ||
1850 task->task_owned_objects != 0) {
fe8ab488 1851 panic("task_deallocate(%p): "
cb323159 1852 "volatile_objects=%d nonvolatile_objects=%d owned=%d\n",
0a7de745
A
1853 task,
1854 task->task_volatile_objects,
cb323159
A
1855 task->task_nonvolatile_objects,
1856 task->task_owned_objects);
fe8ab488
A
1857 }
1858
1c79356b
A
1859 vm_map_deallocate(task->map);
1860 is_release(task->itk_space);
cb323159
A
1861 if (task->restartable_ranges) {
1862 restartable_ranges_release(task->restartable_ranges);
1863 }
1c79356b 1864
4b17d6b6 1865 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
0a7de745 1866 &interrupt_wakeups, &debit);
4b17d6b6 1867 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
0a7de745 1868 &platform_idle_wakeups, &debit);
4b17d6b6 1869
fe8ab488
A
1870#if defined(CONFIG_SCHED_MULTIQ)
1871 sched_group_destroy(task->sched_group);
1872#endif
1873
4b17d6b6
A
1874 /* Accumulate statistics for dead tasks */
1875 lck_spin_lock(&dead_task_statistics_lock);
1876 dead_task_statistics.total_user_time += task->total_user_time;
1877 dead_task_statistics.total_system_time += task->total_system_time;
1878
1879 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
1880 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
1881
1882 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
1883 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
5ba3f43e
A
1884 dead_task_statistics.total_ptime += task->total_ptime;
1885 dead_task_statistics.total_pset_switches += task->ps_switch;
1886 dead_task_statistics.task_gpu_ns += task->task_gpu_ns;
1887 dead_task_statistics.task_energy += task->task_energy;
4b17d6b6
A
1888
1889 lck_spin_unlock(&dead_task_statistics_lock);
b0d623f7
A
1890 lck_mtx_destroy(&task->lock, &task_lck_grp);
1891
316670eb
A
1892 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
1893 &debit)) {
1894 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
1895 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
1896 }
1897 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
1898 &debit)) {
1899 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
1900 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
1901 }
1902 ledger_dereference(task->ledger);
39236c6e
A
1903
1904#if TASK_REFERENCE_LEAK_DEBUG
1905 btlog_remove_entries_for_element(task_ref_btlog, task);
1906#endif
1907
fe8ab488 1908#if CONFIG_COALITIONS
3e170ce0 1909 task_release_coalitions(task);
fe8ab488
A
1910#endif /* CONFIG_COALITIONS */
1911
3e170ce0
A
1912 bzero(task->coalition, sizeof(task->coalition));
1913
1914#if MACH_BSD
1915 /* clean up collected information since last reference to task is gone */
1916 if (task->corpse_info) {
5ba3f43e
A
1917 void *corpse_info_kernel = kcdata_memory_get_begin_addr(task->corpse_info);
1918 task_crashinfo_destroy(task->corpse_info);
3e170ce0 1919 task->corpse_info = NULL;
5ba3f43e
A
1920 if (corpse_info_kernel) {
1921 kfree(corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
1922 }
3e170ce0
A
1923 }
1924#endif
39037602
A
1925
1926#if CONFIG_MACF
1927 if (task->crash_label) {
5ba3f43e
A
1928 mac_exc_free_label(task->crash_label);
1929 task->crash_label = NULL;
39037602
A
1930 }
1931#endif
39236c6e 1932
a39ff7e2
A
1933 assert(queue_empty(&task->task_objq));
1934
91447636 1935 zfree(task_zone, task);
1c79356b
A
1936}
1937
0c530ab8
A
1938/*
1939 * task_name_deallocate:
1940 *
1941 * Drop a reference on a task name.
1942 */
1943void
1944task_name_deallocate(
0a7de745 1945 task_name_t task_name)
0c530ab8 1946{
0a7de745 1947 return task_deallocate((task_t)task_name);
0c530ab8
A
1948}
1949
813fb2f6
A
1950/*
1951 * task_inspect_deallocate:
1952 *
1953 * Drop a task inspection reference.
1954 */
1955void
1956task_inspect_deallocate(
0a7de745 1957 task_inspect_t task_inspect)
813fb2f6 1958{
0a7de745 1959 return task_deallocate((task_t)task_inspect);
813fb2f6
A
1960}
1961
39236c6e
A
1962/*
1963 * task_suspension_token_deallocate:
1964 *
1965 * Drop a reference on a task suspension token.
1966 */
1967void
1968task_suspension_token_deallocate(
0a7de745 1969 task_suspension_token_t token)
39236c6e 1970{
0a7de745 1971 return task_deallocate((task_t)token);
39236c6e 1972}
0c530ab8 1973
3e170ce0
A
1974
1975/*
1976 * task_collect_crash_info:
1977 *
1978 * collect crash info from bsd and mach based data
1979 */
1980kern_return_t
5ba3f43e
A
1981task_collect_crash_info(
1982 task_t task,
1983#ifdef CONFIG_MACF
1984 struct label *crash_label,
1985#endif
1986 int is_corpse_fork)
3e170ce0
A
1987{
1988 kern_return_t kr = KERN_SUCCESS;
1989
1990 kcdata_descriptor_t crash_data = NULL;
1991 kcdata_descriptor_t crash_data_release = NULL;
1992 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
39037602
A
1993 mach_vm_offset_t crash_data_ptr = 0;
1994 void *crash_data_kernel = NULL;
1995 void *crash_data_kernel_release = NULL;
5ba3f43e
A
1996#if CONFIG_MACF
1997 struct label *label, *free_label;
1998#endif
3e170ce0
A
1999
2000 if (!corpses_enabled()) {
2001 return KERN_NOT_SUPPORTED;
2002 }
2003
5ba3f43e
A
2004#if CONFIG_MACF
2005 free_label = label = mac_exc_create_label();
2006#endif
0a7de745 2007
3e170ce0 2008 task_lock(task);
39037602
A
2009
2010 assert(is_corpse_fork || task->bsd_info != NULL);
2011 if (task->corpse_info == NULL && (is_corpse_fork || task->bsd_info != NULL)) {
2012#if CONFIG_MACF
5ba3f43e
A
2013 /* Set the crash label, used by the exception delivery mac hook */
2014 free_label = task->crash_label; // Most likely NULL.
2015 task->crash_label = label;
2016 mac_exc_update_task_crash_label(task, crash_label);
39037602 2017#endif
3e170ce0 2018 task_unlock(task);
3e170ce0 2019
5ba3f43e
A
2020 crash_data_kernel = (void *) kalloc(CORPSEINFO_ALLOCATION_SIZE);
2021 if (crash_data_kernel == NULL) {
2022 kr = KERN_RESOURCE_SHORTAGE;
3e170ce0 2023 goto out_no_lock;
5ba3f43e
A
2024 }
2025 bzero(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE);
2026 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
3e170ce0 2027
39037602 2028 /* Do not get a corpse ref for corpse fork */
5ba3f43e 2029 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size,
0a7de745
A
2030 is_corpse_fork ? 0 : CORPSE_CRASHINFO_HAS_REF,
2031 KCFLAG_USE_MEMCOPY);
3e170ce0
A
2032 if (crash_data) {
2033 task_lock(task);
2034 crash_data_release = task->corpse_info;
5ba3f43e 2035 crash_data_kernel_release = kcdata_memory_get_begin_addr(crash_data_release);
3e170ce0 2036 task->corpse_info = crash_data;
39037602 2037
3e170ce0
A
2038 task_unlock(task);
2039 kr = KERN_SUCCESS;
2040 } else {
5ba3f43e 2041 kfree(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE);
3e170ce0
A
2042 kr = KERN_FAILURE;
2043 }
2044
2045 if (crash_data_release != NULL) {
5ba3f43e 2046 task_crashinfo_destroy(crash_data_release);
39037602
A
2047 }
2048 if (crash_data_kernel_release != NULL) {
2049 kfree(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
3e170ce0
A
2050 }
2051 } else {
2052 task_unlock(task);
2053 }
2054
2055out_no_lock:
5ba3f43e
A
2056#if CONFIG_MACF
2057 if (free_label != NULL) {
2058 mac_exc_free_label(free_label);
2059 }
2060#endif
3e170ce0
A
2061 return kr;
2062}
2063
2064/*
2065 * task_deliver_crash_notification:
2066 *
2067 * Makes outcall to registered host port for a corpse.
2068 */
2069kern_return_t
5ba3f43e
A
2070task_deliver_crash_notification(
2071 task_t task,
2072 thread_t thread,
2073 exception_type_t etype,
2074 mach_exception_subcode_t subcode)
3e170ce0
A
2075{
2076 kcdata_descriptor_t crash_info = task->corpse_info;
2077 thread_t th_iter = NULL;
2078 kern_return_t kr = KERN_SUCCESS;
2079 wait_interrupt_t wsave;
2080 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
39037602 2081 ipc_port_t task_port, old_notify;
3e170ce0 2082
0a7de745 2083 if (crash_info == NULL) {
3e170ce0 2084 return KERN_FAILURE;
0a7de745 2085 }
3e170ce0 2086
3e170ce0 2087 task_lock(task);
39037602 2088 if (task_is_a_corpse_fork(task)) {
5ba3f43e
A
2089 /* Populate code with EXC_{RESOURCE,GUARD} for corpse fork */
2090 code[0] = etype;
39037602 2091 code[1] = subcode;
5ba3f43e 2092 } else {
39037602
A
2093 /* Populate code with EXC_CRASH for corpses */
2094 code[0] = EXC_CRASH;
2095 code[1] = 0;
2096 /* Update the code[1] if the boot-arg corpse_for_fatal_memkill is set */
2097 if (corpse_for_fatal_memkill) {
2098 code[1] = subcode;
2099 }
39037602 2100 }
5ba3f43e 2101
3e170ce0
A
2102 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2103 {
39037602
A
2104 if (th_iter->corpse_dup == FALSE) {
2105 ipc_thread_reset(th_iter);
2106 }
3e170ce0
A
2107 }
2108 task_unlock(task);
2109
39037602
A
2110 /* Arm the no-sender notification for taskport */
2111 task_reference(task);
2112 task_port = convert_task_to_port(task);
2113 ip_lock(task_port);
cb323159 2114 require_ip_active(task_port);
39037602
A
2115 ipc_port_nsrequest(task_port, task_port->ip_mscount, ipc_port_make_sonce_locked(task_port), &old_notify);
2116 /* port unlocked */
2117 assert(IP_NULL == old_notify);
2118
3e170ce0 2119 wsave = thread_interrupt_level(THREAD_UNINT);
39037602 2120 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
3e170ce0
A
2121 if (kr != KERN_SUCCESS) {
2122 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(task));
2123 }
2124
3e170ce0 2125 (void)thread_interrupt_level(wsave);
3e170ce0 2126
39037602
A
2127 /*
2128 * Drop the send right on task port, will fire the
2129 * no-sender notification if exception deliver failed.
2130 */
2131 ipc_port_release_send(task_port);
3e170ce0
A
2132 return kr;
2133}
2134
1c79356b
A
2135/*
2136 * task_terminate:
2137 *
2138 * Terminate the specified task. See comments on thread_terminate
2139 * (kern/thread.c) about problems with terminating the "current task."
2140 */
2141
2142kern_return_t
2143task_terminate(
0a7de745 2144 task_t task)
1c79356b 2145{
0a7de745
A
2146 if (task == TASK_NULL) {
2147 return KERN_INVALID_ARGUMENT;
2148 }
91447636 2149
0a7de745
A
2150 if (task->bsd_info) {
2151 return KERN_FAILURE;
2152 }
91447636 2153
0a7de745 2154 return task_terminate_internal(task);
1c79356b
A
2155}
2156
fe8ab488
A
2157#if MACH_ASSERT
2158extern int proc_pid(struct proc *);
2159extern void proc_name_kdp(task_t t, char *buf, int size);
2160#endif /* MACH_ASSERT */
2161
2162#define VM_MAP_PARTIAL_REAP 0x54 /* 0x150 */
2163static void
2164__unused task_partial_reap(task_t task, __unused int pid)
2165{
0a7de745
A
2166 unsigned int reclaimed_resident = 0;
2167 unsigned int reclaimed_compressed = 0;
fe8ab488
A
2168 uint64_t task_page_count;
2169
2170 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
2171
2172 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
0a7de745 2173 pid, task_page_count, 0, 0, 0);
fe8ab488
A
2174
2175 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
2176
0a7de745
A
2177 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
2178 pid, reclaimed_resident, reclaimed_compressed, 0, 0);
fe8ab488
A
2179}
2180
3e170ce0
A
2181kern_return_t
2182task_mark_corpse(task_t task)
2183{
2184 kern_return_t kr = KERN_SUCCESS;
2185 thread_t self_thread;
2186 (void) self_thread;
2187 wait_interrupt_t wsave;
5ba3f43e
A
2188#if CONFIG_MACF
2189 struct label *crash_label = NULL;
2190#endif
3e170ce0
A
2191
2192 assert(task != kernel_task);
2193 assert(task == current_task());
2194 assert(!task_is_a_corpse(task));
2195
5ba3f43e
A
2196#if CONFIG_MACF
2197 crash_label = mac_exc_create_label_for_proc((struct proc*)task->bsd_info);
2198#endif
0a7de745 2199
5ba3f43e
A
2200 kr = task_collect_crash_info(task,
2201#if CONFIG_MACF
0a7de745 2202 crash_label,
5ba3f43e 2203#endif
0a7de745 2204 FALSE);
3e170ce0 2205 if (kr != KERN_SUCCESS) {
5ba3f43e 2206 goto out;
3e170ce0
A
2207 }
2208
2209 self_thread = current_thread();
2210
2211 wsave = thread_interrupt_level(THREAD_UNINT);
2212 task_lock(task);
2213
2214 task_set_corpse_pending_report(task);
2215 task_set_corpse(task);
5ba3f43e 2216 task->crashed_thread_id = thread_tid(self_thread);
3e170ce0
A
2217
2218 kr = task_start_halt_locked(task, TRUE);
2219 assert(kr == KERN_SUCCESS);
39037602 2220
3e170ce0 2221 ipc_task_reset(task);
39037602 2222 /* Remove the naked send right for task port, needed to arm no sender notification */
4ba76501 2223 task_set_special_port_internal(task, TASK_KERNEL_PORT, IPC_PORT_NULL);
3e170ce0
A
2224 ipc_task_enable(task);
2225
2226 task_unlock(task);
2227 /* terminate the ipc space */
2228 ipc_space_terminate(task->itk_space);
39037602
A
2229
2230 /* Add it to global corpse task list */
2231 task_add_to_corpse_task_list(task);
0a7de745 2232
3e170ce0
A
2233 task_start_halt(task);
2234 thread_terminate_internal(self_thread);
39037602 2235
3e170ce0
A
2236 (void) thread_interrupt_level(wsave);
2237 assert(task->halting == TRUE);
5ba3f43e
A
2238
2239out:
2240#if CONFIG_MACF
2241 mac_exc_free_label(crash_label);
2242#endif
3e170ce0
A
2243 return kr;
2244}
2245
39037602
A
2246/*
2247 * task_clear_corpse
2248 *
2249 * Clears the corpse pending bit on task.
2250 * Removes inspection bit on the threads.
2251 */
2252void
2253task_clear_corpse(task_t task)
2254{
2255 thread_t th_iter = NULL;
2256
2257 task_lock(task);
2258 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
2259 {
2260 thread_mtx_lock(th_iter);
2261 th_iter->inspection = FALSE;
2262 thread_mtx_unlock(th_iter);
2263 }
2264
2265 thread_terminate_crashed_threads();
2266 /* remove the pending corpse report flag */
2267 task_clear_corpse_pending_report(task);
2268
2269 task_unlock(task);
2270}
2271
2272/*
2273 * task_port_notify
2274 *
2275 * Called whenever the Mach port system detects no-senders on
2276 * the task port of a corpse.
2277 * Each notification that comes in should terminate the task (corpse).
2278 */
2279void
2280task_port_notify(mach_msg_header_t *msg)
2281{
2282 mach_no_senders_notification_t *notification = (void *)msg;
2283 ipc_port_t port = notification->not_header.msgh_remote_port;
2284 task_t task;
2285
cb323159 2286 require_ip_active(port);
39037602 2287 assert(IKOT_TASK == ip_kotype(port));
ea3f0419 2288 task = (task_t) ip_get_kobject(port);
39037602
A
2289
2290 assert(task_is_a_corpse(task));
2291
2292 /* Remove the task from global corpse task list */
2293 task_remove_from_corpse_task_list(task);
2294
2295 task_clear_corpse(task);
2296 task_terminate_internal(task);
2297}
2298
2299/*
2300 * task_wait_till_threads_terminate_locked
2301 *
2302 * Wait till all the threads in the task are terminated.
2303 * Might release the task lock and re-acquire it.
2304 */
2305void
2306task_wait_till_threads_terminate_locked(task_t task)
2307{
2308 /* wait for all the threads in the task to terminate */
2309 while (task->active_thread_count != 0) {
2310 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
2311 task_unlock(task);
2312 thread_block(THREAD_CONTINUE_NULL);
2313
2314 task_lock(task);
2315 }
2316}
2317
2318/*
2319 * task_duplicate_map_and_threads
2320 *
2321 * Copy vmmap of source task.
2322 * Copy active threads from source task to destination task.
2323 * Source task would be suspended during the copy.
2324 */
2325kern_return_t
2326task_duplicate_map_and_threads(
0a7de745
A
2327 task_t task,
2328 void *p,
2329 task_t new_task,
2330 thread_t *thread_ret,
2331 uint64_t **udata_buffer,
2332 int *size,
2333 int *num_udata)
39037602
A
2334{
2335 kern_return_t kr = KERN_SUCCESS;
2336 int active;
2337 thread_t thread, self, thread_return = THREAD_NULL;
d9a64523 2338 thread_t new_thread = THREAD_NULL, first_thread = THREAD_NULL;
39037602
A
2339 thread_t *thread_array;
2340 uint32_t active_thread_count = 0, array_count = 0, i;
2341 vm_map_t oldmap;
2342 uint64_t *buffer = NULL;
2343 int buf_size = 0;
2344 int est_knotes = 0, num_knotes = 0;
2345
2346 self = current_thread();
2347
2348 /*
2349 * Suspend the task to copy thread state, use the internal
2350 * variant so that no user-space process can resume
2351 * the task from under us
2352 */
2353 kr = task_suspend_internal(task);
2354 if (kr != KERN_SUCCESS) {
2355 return kr;
2356 }
2357
2358 if (task->map->disable_vmentry_reuse == TRUE) {
2359 /*
2360 * Quite likely GuardMalloc (or some debugging tool)
2361 * is being used on this task. And it has gone through
2362 * its limit. Making a corpse will likely encounter
2363 * a lot of VM entries that will need COW.
2364 *
2365 * Skip it.
2366 */
a39ff7e2
A
2367#if DEVELOPMENT || DEBUG
2368 memorystatus_abort_vm_map_fork(task);
2369#endif
39037602
A
2370 task_resume_internal(task);
2371 return KERN_FAILURE;
2372 }
2373
5ba3f43e 2374 /* Check with VM if vm_map_fork is allowed for this task */
a39ff7e2 2375 if (memorystatus_allowed_vm_map_fork(task)) {
5ba3f43e
A
2376 /* Setup new task's vmmap, switch from parent task's map to it COW map */
2377 oldmap = new_task->map;
2378 new_task->map = vm_map_fork(new_task->ledger,
0a7de745
A
2379 task->map,
2380 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
2381 VM_MAP_FORK_PRESERVE_PURGEABLE |
2382 VM_MAP_FORK_CORPSE_FOOTPRINT));
5ba3f43e
A
2383 vm_map_deallocate(oldmap);
2384
d9a64523
A
2385 /* copy ledgers that impact the memory footprint */
2386 vm_map_copy_footprint_ledgers(task, new_task);
2387
5ba3f43e
A
2388 /* Get all the udata pointers from kqueue */
2389 est_knotes = kevent_proc_copy_uptrs(p, NULL, 0);
2390 if (est_knotes > 0) {
2391 buf_size = (est_knotes + 32) * sizeof(uint64_t);
2392 buffer = (uint64_t *) kalloc(buf_size);
2393 num_knotes = kevent_proc_copy_uptrs(p, buffer, buf_size);
2394 if (num_knotes > est_knotes + 32) {
2395 num_knotes = est_knotes + 32;
2396 }
39037602
A
2397 }
2398 }
2399
2400 active_thread_count = task->active_thread_count;
2401 if (active_thread_count == 0) {
2402 if (buffer != NULL) {
2403 kfree(buffer, buf_size);
2404 }
2405 task_resume_internal(task);
2406 return KERN_FAILURE;
2407 }
2408
2409 thread_array = (thread_t *) kalloc(sizeof(thread_t) * active_thread_count);
2410
2411 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
2412 task_lock(task);
2413 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2414 /* Skip inactive threads */
2415 active = thread->active;
2416 if (!active) {
2417 continue;
2418 }
2419
2420 if (array_count >= active_thread_count) {
2421 break;
2422 }
2423
2424 thread_array[array_count++] = thread;
2425 thread_reference(thread);
2426 }
2427 task_unlock(task);
2428
2429 for (i = 0; i < array_count; i++) {
39037602
A
2430 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
2431 if (kr != KERN_SUCCESS) {
2432 break;
2433 }
2434
2435 /* Equivalent of current thread in corpse */
2436 if (thread_array[i] == self) {
2437 thread_return = new_thread;
5ba3f43e 2438 new_task->crashed_thread_id = thread_tid(new_thread);
d9a64523
A
2439 } else if (first_thread == NULL) {
2440 first_thread = new_thread;
39037602
A
2441 } else {
2442 /* drop the extra ref returned by thread_create_with_continuation */
2443 thread_deallocate(new_thread);
2444 }
2445
2446 kr = thread_dup2(thread_array[i], new_thread);
2447 if (kr != KERN_SUCCESS) {
2448 thread_mtx_lock(new_thread);
2449 new_thread->corpse_dup = TRUE;
2450 thread_mtx_unlock(new_thread);
2451 continue;
2452 }
2453
2454 /* Copy thread name */
2455 bsd_copythreadname(new_thread->uthread, thread_array[i]->uthread);
d9a64523 2456 new_thread->thread_tag = thread_array[i]->thread_tag;
39037602
A
2457 thread_copy_resource_info(new_thread, thread_array[i]);
2458 }
2459
d9a64523
A
2460 /* return the first thread if we couldn't find the equivalent of current */
2461 if (thread_return == THREAD_NULL) {
2462 thread_return = first_thread;
0a7de745 2463 } else if (first_thread != THREAD_NULL) {
d9a64523
A
2464 /* drop the extra ref returned by thread_create_with_continuation */
2465 thread_deallocate(first_thread);
2466 }
2467
39037602
A
2468 task_resume_internal(task);
2469
2470 for (i = 0; i < array_count; i++) {
2471 thread_deallocate(thread_array[i]);
2472 }
2473 kfree(thread_array, sizeof(thread_t) * active_thread_count);
2474
2475 if (kr == KERN_SUCCESS) {
2476 *thread_ret = thread_return;
2477 *udata_buffer = buffer;
2478 *size = buf_size;
2479 *num_udata = num_knotes;
2480 } else {
2481 if (thread_return != THREAD_NULL) {
2482 thread_deallocate(thread_return);
2483 }
2484 if (buffer != NULL) {
2485 kfree(buffer, buf_size);
2486 }
2487 }
2488
2489 return kr;
2490}
2491
2492#if CONFIG_SECLUDED_MEMORY
2493extern void task_set_can_use_secluded_mem_locked(
0a7de745
A
2494 task_t task,
2495 boolean_t can_use_secluded_mem);
39037602
A
2496#endif /* CONFIG_SECLUDED_MEMORY */
2497
1c79356b
A
2498kern_return_t
2499task_terminate_internal(
0a7de745 2500 task_t task)
1c79356b 2501{
0a7de745
A
2502 thread_t thread, self;
2503 task_t self_task;
2504 boolean_t interrupt_save;
2505 int pid = 0;
1c79356b
A
2506
2507 assert(task != kernel_task);
2508
91447636
A
2509 self = current_thread();
2510 self_task = self->task;
1c79356b
A
2511
2512 /*
2513 * Get the task locked and make sure that we are not racing
2514 * with someone else trying to terminate us.
2515 */
0a7de745 2516 if (task == self_task) {
1c79356b 2517 task_lock(task);
0a7de745 2518 } else if (task < self_task) {
1c79356b 2519 task_lock(task);
91447636 2520 task_lock(self_task);
0a7de745 2521 } else {
91447636 2522 task_lock(self_task);
1c79356b
A
2523 task_lock(task);
2524 }
2525
39037602
A
2526#if CONFIG_SECLUDED_MEMORY
2527 if (task->task_can_use_secluded_mem) {
2528 task_set_can_use_secluded_mem_locked(task, FALSE);
2529 }
2530 task->task_could_use_secluded_mem = FALSE;
2531 task->task_could_also_use_secluded_mem = FALSE;
d9a64523
A
2532
2533 if (task->task_suppressed_secluded) {
2534 stop_secluded_suppression(task);
2535 }
39037602
A
2536#endif /* CONFIG_SECLUDED_MEMORY */
2537
6d2010ae 2538 if (!task->active) {
1c79356b 2539 /*
6d2010ae 2540 * Task is already being terminated.
1c79356b
A
2541 * Just return an error. If we are dying, this will
2542 * just get us to our AST special handler and that
2543 * will get us to finalize the termination of ourselves.
2544 */
2545 task_unlock(task);
0a7de745 2546 if (self_task != task) {
91447636 2547 task_unlock(self_task);
0a7de745 2548 }
91447636 2549
0a7de745 2550 return KERN_FAILURE;
1c79356b 2551 }
91447636 2552
3e170ce0
A
2553 if (task_corpse_pending_report(task)) {
2554 /*
2555 * Task is marked for reporting as corpse.
2556 * Just return an error. This will
2557 * just get us to our AST special handler and that
2558 * will get us to finish the path to death
2559 */
2560 task_unlock(task);
0a7de745 2561 if (self_task != task) {
3e170ce0 2562 task_unlock(self_task);
0a7de745 2563 }
3e170ce0 2564
0a7de745 2565 return KERN_FAILURE;
3e170ce0
A
2566 }
2567
0a7de745 2568 if (self_task != task) {
91447636 2569 task_unlock(self_task);
0a7de745 2570 }
1c79356b 2571
e7c99d92
A
2572 /*
2573 * Make sure the current thread does not get aborted out of
2574 * the waits inside these operations.
2575 */
9bccf70c 2576 interrupt_save = thread_interrupt_level(THREAD_UNINT);
e7c99d92 2577
1c79356b
A
2578 /*
2579 * Indicate that we want all the threads to stop executing
2580 * at user space by holding the task (we would have held
2581 * each thread independently in thread_terminate_internal -
2582 * but this way we may be more likely to already find it
2583 * held there). Mark the task inactive, and prevent
2584 * further task operations via the task port.
2585 */
2586 task_hold_locked(task);
2587 task->active = FALSE;
2588 ipc_task_disable(task);
2589
39236c6e
A
2590#if CONFIG_TELEMETRY
2591 /*
2592 * Notify telemetry that this task is going away.
2593 */
2594 telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2595#endif
2596
1c79356b 2597 /*
91447636
A
2598 * Terminate each thread in the task.
2599 */
2600 queue_iterate(&task->threads, thread, thread_t, task_threads) {
0a7de745 2601 thread_terminate_internal(thread);
1c79356b 2602 }
e7c99d92 2603
fe8ab488 2604#ifdef MACH_BSD
743345f9 2605 if (task->bsd_info != NULL && !task_is_exec_copy(task)) {
fe8ab488
A
2606 pid = proc_pid(task->bsd_info);
2607 }
2608#endif /* MACH_BSD */
2609
316670eb
A
2610 task_unlock(task);
2611
39037602 2612 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
0a7de745 2613 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
a1c7dba1 2614
0a7de745 2615 /* Early object reap phase */
fe8ab488
A
2616
2617// PR-17045188: Revisit implementation
2618// task_partial_reap(task, pid);
2619
5ba3f43e
A
2620#if CONFIG_EMBEDDED
2621 /*
0a7de745 2622 * remove all task watchers
5ba3f43e
A
2623 */
2624 task_removewatchers(task);
2625
2626#endif /* CONFIG_EMBEDDED */
1c79356b
A
2627
2628 /*
2629 * Destroy all synchronizers owned by the task.
2630 */
2631 task_synchronizer_destroy_all(task);
2632
cb323159
A
2633 /*
2634 * Clear the watchport boost on the task.
2635 */
2636 task_remove_turnstile_watchports(task);
2637
1c79356b
A
2638 /*
2639 * Destroy the IPC space, leaving just a reference for it.
2640 */
316670eb 2641 ipc_space_terminate(task->itk_space);
1c79356b 2642
fe8ab488
A
2643#if 00
2644 /* if some ledgers go negative on tear-down again... */
2645 ledger_disable_panic_on_negative(task->map->pmap->ledger,
0a7de745 2646 task_ledgers.phys_footprint);
fe8ab488 2647 ledger_disable_panic_on_negative(task->map->pmap->ledger,
0a7de745 2648 task_ledgers.internal);
fe8ab488 2649 ledger_disable_panic_on_negative(task->map->pmap->ledger,
0a7de745 2650 task_ledgers.internal_compressed);
fe8ab488 2651 ledger_disable_panic_on_negative(task->map->pmap->ledger,
0a7de745 2652 task_ledgers.iokit_mapped);
fe8ab488 2653 ledger_disable_panic_on_negative(task->map->pmap->ledger,
0a7de745 2654 task_ledgers.alternate_accounting);
3e170ce0 2655 ledger_disable_panic_on_negative(task->map->pmap->ledger,
0a7de745 2656 task_ledgers.alternate_accounting_compressed);
fe8ab488 2657#endif
91447636 2658
1c79356b
A
2659 /*
2660 * If the current thread is a member of the task
2661 * being terminated, then the last reference to
2662 * the task will not be dropped until the thread
2663 * is finally reaped. To avoid incurring the
2664 * expense of removing the address space regions
2665 * at reap time, we do it explictly here.
2666 */
3e170ce0
A
2667
2668 vm_map_lock(task->map);
2669 vm_map_disable_hole_optimization(task->map);
2670 vm_map_unlock(task->map);
2671
fe8ab488
A
2672#if MACH_ASSERT
2673 /*
2674 * Identify the pmap's process, in case the pmap ledgers drift
2675 * and we have to report it.
2676 */
2677 char procname[17];
743345f9 2678 if (task->bsd_info && !task_is_exec_copy(task)) {
fe8ab488 2679 pid = proc_pid(task->bsd_info);
0a7de745 2680 proc_name_kdp(task, procname, sizeof(procname));
fe8ab488
A
2681 } else {
2682 pid = 0;
0a7de745 2683 strlcpy(procname, "<unknown>", sizeof(procname));
fe8ab488
A
2684 }
2685 pmap_set_process(task->map->pmap, pid, procname);
2686#endif /* MACH_ASSERT */
2687
ea3f0419 2688 vm_map_terminate(task->map);
5c9f4661
A
2689
2690 /* release our shared region */
2691 vm_shared_region_set(task, NULL);
2692
2693
b0d623f7 2694 lck_mtx_lock(&tasks_threads_lock);
2d21ac55 2695 queue_remove(&tasks, task, task_t, tasks);
6d2010ae 2696 queue_enter(&terminated_tasks, task, task_t, tasks);
2d21ac55 2697 tasks_count--;
39236c6e 2698 terminated_tasks_count++;
b0d623f7 2699 lck_mtx_unlock(&tasks_threads_lock);
9bccf70c 2700
1c79356b 2701 /*
e7c99d92
A
2702 * We no longer need to guard against being aborted, so restore
2703 * the previous interruptible state.
2704 */
9bccf70c 2705 thread_interrupt_level(interrupt_save);
e7c99d92 2706
a39ff7e2 2707#if KPC
fe8ab488 2708 /* force the task to release all ctrs */
0a7de745 2709 if (task->t_kpc & TASK_KPC_FORCED_ALL_CTRS) {
fe8ab488 2710 kpc_force_all_ctrs(task, 0);
0a7de745 2711 }
a39ff7e2 2712#endif /* KPC */
fe8ab488
A
2713
2714#if CONFIG_COALITIONS
2715 /*
3e170ce0 2716 * Leave our coalitions. (drop activation but not reference)
fe8ab488 2717 */
3e170ce0 2718 coalitions_remove_task(task);
fe8ab488
A
2719#endif
2720
e7c99d92
A
2721 /*
2722 * Get rid of the task active reference on itself.
1c79356b 2723 */
1c79356b
A
2724 task_deallocate(task);
2725
0a7de745 2726 return KERN_SUCCESS;
1c79356b
A
2727}
2728
4bd07ac2
A
2729void
2730tasks_system_suspend(boolean_t suspend)
2731{
2732 task_t task;
2733
2734 lck_mtx_lock(&tasks_threads_lock);
2735 assert(tasks_suspend_state != suspend);
2736 tasks_suspend_state = suspend;
2737 queue_iterate(&tasks, task, task_t, tasks) {
2738 if (task == kernel_task) {
2739 continue;
2740 }
2741 suspend ? task_suspend_internal(task) : task_resume_internal(task);
2742 }
2743 lck_mtx_unlock(&tasks_threads_lock);
2744}
2745
1c79356b 2746/*
b0d623f7 2747 * task_start_halt:
91447636 2748 *
0a7de745 2749 * Shut the current task down (except for the current thread) in
91447636 2750 * preparation for dramatic changes to the task (probably exec).
b0d623f7
A
2751 * We hold the task and mark all other threads in the task for
2752 * termination.
1c79356b
A
2753 */
2754kern_return_t
3e170ce0
A
2755task_start_halt(task_t task)
2756{
2757 kern_return_t kr = KERN_SUCCESS;
2758 task_lock(task);
2759 kr = task_start_halt_locked(task, FALSE);
2760 task_unlock(task);
2761 return kr;
2762}
2763
2764static kern_return_t
2765task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
1c79356b 2766{
3e170ce0
A
2767 thread_t thread, self;
2768 uint64_t dispatchqueue_offset;
1c79356b
A
2769
2770 assert(task != kernel_task);
2771
91447636 2772 self = current_thread();
1c79356b 2773
0a7de745
A
2774 if (task != self->task && !task_is_a_corpse_fork(task)) {
2775 return KERN_INVALID_ARGUMENT;
2776 }
1c79356b 2777
b0d623f7 2778 if (task->halting || !task->active || !self->active) {
1c79356b 2779 /*
3e170ce0
A
2780 * Task or current thread is already being terminated.
2781 * Hurry up and return out of the current kernel context
2782 * so that we run our AST special handler to terminate
2783 * ourselves.
1c79356b 2784 */
0a7de745 2785 return KERN_FAILURE;
1c79356b
A
2786 }
2787
b0d623f7
A
2788 task->halting = TRUE;
2789
3e170ce0
A
2790 /*
2791 * Mark all the threads to keep them from starting any more
2792 * user-level execution. The thread_terminate_internal code
2793 * would do this on a thread by thread basis anyway, but this
2794 * gives us a better chance of not having to wait there.
2795 */
2796 task_hold_locked(task);
2797 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(task->bsd_info);
1c79356b 2798
3e170ce0
A
2799 /*
2800 * Terminate all the other threads in the task.
2801 */
2802 queue_iterate(&task->threads, thread, thread_t, task_threads)
2803 {
2804 if (should_mark_corpse) {
2805 thread_mtx_lock(thread);
2806 thread->inspection = TRUE;
2807 thread_mtx_unlock(thread);
1c79356b 2808 }
0a7de745 2809 if (thread != self) {
3e170ce0 2810 thread_terminate_internal(thread);
0a7de745 2811 }
1c79356b 2812 }
3e170ce0
A
2813 task->dispatchqueue_offset = dispatchqueue_offset;
2814
2815 task_release_locked(task);
2816
b0d623f7
A
2817 return KERN_SUCCESS;
2818}
2819
2820
2821/*
2822 * task_complete_halt:
2823 *
2824 * Complete task halt by waiting for threads to terminate, then clean
2825 * up task resources (VM, port namespace, etc...) and then let the
2826 * current thread go in the (practically empty) task context.
743345f9
A
2827 *
2828 * Note: task->halting flag is not cleared in order to avoid creation
2829 * of new thread in old exec'ed task.
b0d623f7
A
2830 */
2831void
2832task_complete_halt(task_t task)
2833{
2834 task_lock(task);
2835 assert(task->halting);
2836 assert(task == current_task());
e7c99d92 2837
b0d623f7
A
2838 /*
2839 * Wait for the other threads to get shut down.
2840 * When the last other thread is reaped, we'll be
316670eb 2841 * woken up.
b0d623f7
A
2842 */
2843 if (task->thread_count > 1) {
2844 assert_wait((event_t)&task->halting, THREAD_UNINT);
2845 task_unlock(task);
2846 thread_block(THREAD_CONTINUE_NULL);
2847 } else {
2848 task_unlock(task);
2849 }
1c79356b 2850
316670eb
A
2851 /*
2852 * Give the machine dependent code a chance
2853 * to perform cleanup of task-level resources
2854 * associated with the current thread before
2855 * ripping apart the task.
2856 */
2857 machine_task_terminate(task);
2858
1c79356b
A
2859 /*
2860 * Destroy all synchronizers owned by the task.
2861 */
2862 task_synchronizer_destroy_all(task);
2863
2864 /*
9bccf70c
A
2865 * Destroy the contents of the IPC space, leaving just
2866 * a reference for it.
e7c99d92 2867 */
55e303ae 2868 ipc_space_clean(task->itk_space);
1c79356b
A
2869
2870 /*
2871 * Clean out the address space, as we are going to be
2872 * getting a new one.
2873 */
91447636 2874 vm_map_remove(task->map, task->map->min_offset,
0a7de745
A
2875 task->map->max_offset,
2876 /*
2877 * Final cleanup:
2878 * + no unnesting
2879 * + remove immutable mappings
2880 * + allow gaps in the range
2881 */
2882 (VM_MAP_REMOVE_NO_UNNESTING |
2883 VM_MAP_REMOVE_IMMUTABLE |
2884 VM_MAP_REMOVE_GAPS_OK));
1c79356b 2885
39037602
A
2886 /*
2887 * Kick out any IOKitUser handles to the task. At best they're stale,
2888 * at worst someone is racing a SUID exec.
2889 */
2890 iokit_task_terminate(task);
1c79356b
A
2891}
2892
2893/*
2894 * task_hold_locked:
2895 *
2896 * Suspend execution of the specified task.
2897 * This is a recursive-style suspension of the task, a count of
2898 * suspends is maintained.
2899 *
d9a64523 2900 * CONDITIONS: the task is locked and active.
1c79356b
A
2901 */
2902void
2903task_hold_locked(
0a7de745 2904 task_t task)
1c79356b 2905{
0a7de745 2906 thread_t thread;
1c79356b
A
2907
2908 assert(task->active);
2909
0a7de745 2910 if (task->suspend_count++ > 0) {
9bccf70c 2911 return;
0a7de745 2912 }
1c79356b 2913
d9a64523
A
2914 if (task->bsd_info) {
2915 workq_proc_suspended(task->bsd_info);
2916 }
2917
1c79356b 2918 /*
91447636 2919 * Iterate through all the threads and hold them.
1c79356b 2920 */
91447636
A
2921 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2922 thread_mtx_lock(thread);
2923 thread_hold(thread);
2924 thread_mtx_unlock(thread);
1c79356b
A
2925 }
2926}
2927
2928/*
2929 * task_hold:
2930 *
2931 * Same as the internal routine above, except that is must lock
2932 * and verify that the task is active. This differs from task_suspend
0a7de745 2933 * in that it places a kernel hold on the task rather than just a
1c79356b
A
2934 * user-level hold. This keeps users from over resuming and setting
2935 * it running out from under the kernel.
2936 *
0a7de745 2937 * CONDITIONS: the caller holds a reference on the task
1c79356b
A
2938 */
2939kern_return_t
91447636 2940task_hold(
0a7de745 2941 task_t task)
1c79356b 2942{
0a7de745
A
2943 if (task == TASK_NULL) {
2944 return KERN_INVALID_ARGUMENT;
2945 }
91447636 2946
1c79356b 2947 task_lock(task);
91447636 2948
1c79356b
A
2949 if (!task->active) {
2950 task_unlock(task);
91447636 2951
0a7de745 2952 return KERN_FAILURE;
1c79356b 2953 }
1c79356b 2954
91447636
A
2955 task_hold_locked(task);
2956 task_unlock(task);
2957
0a7de745 2958 return KERN_SUCCESS;
1c79356b
A
2959}
2960
316670eb
A
2961kern_return_t
2962task_wait(
0a7de745
A
2963 task_t task,
2964 boolean_t until_not_runnable)
316670eb 2965{
0a7de745
A
2966 if (task == TASK_NULL) {
2967 return KERN_INVALID_ARGUMENT;
2968 }
316670eb
A
2969
2970 task_lock(task);
2971
2972 if (!task->active) {
2973 task_unlock(task);
2974
0a7de745 2975 return KERN_FAILURE;
316670eb
A
2976 }
2977
2978 task_wait_locked(task, until_not_runnable);
2979 task_unlock(task);
2980
0a7de745 2981 return KERN_SUCCESS;
316670eb
A
2982}
2983
1c79356b 2984/*
91447636
A
2985 * task_wait_locked:
2986 *
1c79356b
A
2987 * Wait for all threads in task to stop.
2988 *
2989 * Conditions:
2990 * Called with task locked, active, and held.
2991 */
2992void
2993task_wait_locked(
0a7de745
A
2994 task_t task,
2995 boolean_t until_not_runnable)
1c79356b 2996{
0a7de745 2997 thread_t thread, self;
1c79356b
A
2998
2999 assert(task->active);
3000 assert(task->suspend_count > 0);
3001
91447636
A
3002 self = current_thread();
3003
1c79356b 3004 /*
91447636 3005 * Iterate through all the threads and wait for them to
1c79356b
A
3006 * stop. Do not wait for the current thread if it is within
3007 * the task.
3008 */
91447636 3009 queue_iterate(&task->threads, thread, thread_t, task_threads) {
0a7de745 3010 if (thread != self) {
316670eb 3011 thread_wait(thread, until_not_runnable);
0a7de745 3012 }
1c79356b
A
3013 }
3014}
3015
cb323159
A
3016boolean_t
3017task_is_app_suspended(task_t task)
3018{
3019 return task->pidsuspended;
3020}
3021
1c79356b
A
3022/*
3023 * task_release_locked:
3024 *
3025 * Release a kernel hold on a task.
3026 *
0a7de745 3027 * CONDITIONS: the task is locked and active
1c79356b
A
3028 */
3029void
3030task_release_locked(
0a7de745 3031 task_t task)
1c79356b 3032{
0a7de745 3033 thread_t thread;
1c79356b
A
3034
3035 assert(task->active);
9bccf70c 3036 assert(task->suspend_count > 0);
1c79356b 3037
0a7de745 3038 if (--task->suspend_count > 0) {
9bccf70c 3039 return;
0a7de745 3040 }
1c79356b 3041
d9a64523
A
3042 if (task->bsd_info) {
3043 workq_proc_resumed(task->bsd_info);
3044 }
3045
91447636
A
3046 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3047 thread_mtx_lock(thread);
3048 thread_release(thread);
3049 thread_mtx_unlock(thread);
1c79356b
A
3050 }
3051}
3052
3053/*
3054 * task_release:
3055 *
3056 * Same as the internal routine above, except that it must lock
3057 * and verify that the task is active.
3058 *
0a7de745 3059 * CONDITIONS: The caller holds a reference to the task
1c79356b
A
3060 */
3061kern_return_t
91447636 3062task_release(
0a7de745 3063 task_t task)
1c79356b 3064{
0a7de745
A
3065 if (task == TASK_NULL) {
3066 return KERN_INVALID_ARGUMENT;
3067 }
91447636 3068
1c79356b 3069 task_lock(task);
91447636 3070
1c79356b
A
3071 if (!task->active) {
3072 task_unlock(task);
91447636 3073
0a7de745 3074 return KERN_FAILURE;
1c79356b 3075 }
1c79356b 3076
91447636
A
3077 task_release_locked(task);
3078 task_unlock(task);
3079
0a7de745 3080 return KERN_SUCCESS;
1c79356b
A
3081}
3082
3083kern_return_t
3084task_threads(
0a7de745
A
3085 task_t task,
3086 thread_act_array_t *threads_out,
3087 mach_msg_type_number_t *count)
1c79356b 3088{
0a7de745
A
3089 mach_msg_type_number_t actual;
3090 thread_t *thread_list;
3091 thread_t thread;
3092 vm_size_t size, size_needed;
3093 void *addr;
3094 unsigned int i, j;
1c79356b 3095
0a7de745
A
3096 if (task == TASK_NULL) {
3097 return KERN_INVALID_ARGUMENT;
3098 }
1c79356b 3099
2d21ac55 3100 size = 0; addr = NULL;
1c79356b
A
3101
3102 for (;;) {
3103 task_lock(task);
3104 if (!task->active) {
3105 task_unlock(task);
91447636 3106
0a7de745 3107 if (size != 0) {
1c79356b 3108 kfree(addr, size);
0a7de745 3109 }
91447636 3110
0a7de745 3111 return KERN_FAILURE;
1c79356b
A
3112 }
3113
55e303ae 3114 actual = task->thread_count;
1c79356b
A
3115
3116 /* do we have the memory we need? */
0a7de745
A
3117 size_needed = actual * sizeof(mach_port_t);
3118 if (size_needed <= size) {
1c79356b 3119 break;
0a7de745 3120 }
1c79356b
A
3121
3122 /* unlock the task and allocate more memory */
3123 task_unlock(task);
3124
0a7de745 3125 if (size != 0) {
1c79356b 3126 kfree(addr, size);
0a7de745 3127 }
1c79356b
A
3128
3129 assert(size_needed > 0);
3130 size = size_needed;
3131
3132 addr = kalloc(size);
0a7de745
A
3133 if (addr == 0) {
3134 return KERN_RESOURCE_SHORTAGE;
3135 }
1c79356b
A
3136 }
3137
3138 /* OK, have memory and the task is locked & active */
2d21ac55 3139 thread_list = (thread_t *)addr;
91447636
A
3140
3141 i = j = 0;
3142
3143 for (thread = (thread_t)queue_first(&task->threads); i < actual;
0a7de745 3144 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
91447636 3145 thread_reference_internal(thread);
2d21ac55 3146 thread_list[j++] = thread;
1c79356b 3147 }
91447636
A
3148
3149 assert(queue_end(&task->threads, (queue_entry_t)thread));
1c79356b
A
3150
3151 actual = j;
0a7de745 3152 size_needed = actual * sizeof(mach_port_t);
1c79356b 3153
91447636 3154 /* can unlock task now that we've got the thread refs */
1c79356b
A
3155 task_unlock(task);
3156
3157 if (actual == 0) {
91447636 3158 /* no threads, so return null pointer and deallocate memory */
1c79356b 3159
2d21ac55 3160 *threads_out = NULL;
1c79356b
A
3161 *count = 0;
3162
0a7de745 3163 if (size != 0) {
1c79356b 3164 kfree(addr, size);
0a7de745
A
3165 }
3166 } else {
1c79356b
A
3167 /* if we allocated too much, must copy */
3168
3169 if (size_needed < size) {
91447636 3170 void *newaddr;
1c79356b
A
3171
3172 newaddr = kalloc(size_needed);
3173 if (newaddr == 0) {
0a7de745 3174 for (i = 0; i < actual; ++i) {
2d21ac55 3175 thread_deallocate(thread_list[i]);
0a7de745 3176 }
1c79356b 3177 kfree(addr, size);
0a7de745 3178 return KERN_RESOURCE_SHORTAGE;
1c79356b
A
3179 }
3180
91447636 3181 bcopy(addr, newaddr, size_needed);
1c79356b 3182 kfree(addr, size);
2d21ac55 3183 thread_list = (thread_t *)newaddr;
1c79356b
A
3184 }
3185
2d21ac55 3186 *threads_out = thread_list;
1c79356b
A
3187 *count = actual;
3188
3189 /* do the conversion that Mig should handle */
3190
0a7de745 3191 for (i = 0; i < actual; ++i) {
2d21ac55 3192 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
0a7de745 3193 }
1c79356b
A
3194 }
3195
0a7de745 3196 return KERN_SUCCESS;
1c79356b
A
3197}
3198
0a7de745
A
3199#define TASK_HOLD_NORMAL 0
3200#define TASK_HOLD_PIDSUSPEND 1
3201#define TASK_HOLD_LEGACY 2
3202#define TASK_HOLD_LEGACY_ALL 3
39236c6e 3203
316670eb 3204static kern_return_t
0a7de745 3205place_task_hold(
39037602 3206 task_t task,
39236c6e 3207 int mode)
0a7de745 3208{
39037602 3209 if (!task->active && !task_is_a_corpse(task)) {
0a7de745 3210 return KERN_FAILURE;
1c79356b 3211 }
91447636 3212
39037602
A
3213 /* Return success for corpse task */
3214 if (task_is_a_corpse(task)) {
3215 return KERN_SUCCESS;
3216 }
3217
39236c6e 3218 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745 3219 MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND) | DBG_FUNC_NONE,
3e170ce0 3220 task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
39236c6e
A
3221 task->user_stop_count, task->user_stop_count + 1, 0);
3222
3223#if MACH_ASSERT
3224 current_task()->suspends_outstanding++;
3225#endif
3226
0a7de745 3227 if (mode == TASK_HOLD_LEGACY) {
39236c6e 3228 task->legacy_stop_count++;
0a7de745 3229 }
39236c6e 3230
91447636 3231 if (task->user_stop_count++ > 0) {
1c79356b
A
3232 /*
3233 * If the stop count was positive, the task is
3234 * already stopped and we can exit.
3235 */
0a7de745 3236 return KERN_SUCCESS;
1c79356b
A
3237 }
3238
3239 /*
3240 * Put a kernel-level hold on the threads in the task (all
3241 * user-level task suspensions added together represent a
3242 * single kernel-level hold). We then wait for the threads
3243 * to stop executing user code.
3244 */
3245 task_hold_locked(task);
39236c6e 3246 task_wait_locked(task, FALSE);
0a7de745
A
3247
3248 return KERN_SUCCESS;
316670eb
A
3249}
3250
3251static kern_return_t
0a7de745
A
3252release_task_hold(
3253 task_t task,
3254 int mode)
316670eb 3255{
39037602 3256 boolean_t release = FALSE;
0a7de745 3257
39037602 3258 if (!task->active && !task_is_a_corpse(task)) {
0a7de745 3259 return KERN_FAILURE;
316670eb 3260 }
39037602
A
3261
3262 /* Return success for corpse task */
3263 if (task_is_a_corpse(task)) {
3264 return KERN_SUCCESS;
3265 }
0a7de745 3266
39236c6e 3267 if (mode == TASK_HOLD_PIDSUSPEND) {
0a7de745
A
3268 if (task->pidsuspended == FALSE) {
3269 return KERN_FAILURE;
3270 }
3271 task->pidsuspended = FALSE;
316670eb
A
3272 }
3273
39236c6e 3274 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
39236c6e 3275 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745 3276 MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
3e170ce0 3277 task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
39236c6e
A
3278 task->user_stop_count, mode, task->legacy_stop_count);
3279
3280#if MACH_ASSERT
3281 /*
3282 * This is obviously not robust; if we suspend one task and then resume a different one,
3283 * we'll fly under the radar. This is only meant to catch the common case of a crashed
3284 * or buggy suspender.
3285 */
3286 current_task()->suspends_outstanding--;
3287#endif
3288
3289 if (mode == TASK_HOLD_LEGACY_ALL) {
3290 if (task->legacy_stop_count >= task->user_stop_count) {
3291 task->user_stop_count = 0;
3292 release = TRUE;
3293 } else {
3294 task->user_stop_count -= task->legacy_stop_count;
3295 }
3296 task->legacy_stop_count = 0;
3297 } else {
0a7de745 3298 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
39236c6e 3299 task->legacy_stop_count--;
0a7de745
A
3300 }
3301 if (--task->user_stop_count == 0) {
39236c6e 3302 release = TRUE;
0a7de745 3303 }
316670eb 3304 }
0a7de745
A
3305 } else {
3306 return KERN_FAILURE;
316670eb
A
3307 }
3308
3309 /*
3310 * Release the task if necessary.
3311 */
0a7de745 3312 if (release) {
316670eb 3313 task_release_locked(task);
0a7de745
A
3314 }
3315
3316 return KERN_SUCCESS;
316670eb
A
3317}
3318
cb323159
A
3319boolean_t
3320get_task_suspended(task_t task)
3321{
3322 return 0 != task->user_stop_count;
3323}
39236c6e 3324
316670eb
A
3325/*
3326 * task_suspend:
3327 *
39236c6e
A
3328 * Implement an (old-fashioned) user-level suspension on a task.
3329 *
3330 * Because the user isn't expecting to have to manage a suspension
3331 * token, we'll track it for him in the kernel in the form of a naked
3332 * send right to the task's resume port. All such send rights
3333 * account for a single suspension against the task (unlike task_suspend2()
3334 * where each caller gets a unique suspension count represented by a
3335 * unique send-once right).
316670eb
A
3336 *
3337 * Conditions:
0a7de745 3338 * The caller holds a reference to the task
316670eb
A
3339 */
3340kern_return_t
3341task_suspend(
0a7de745 3342 task_t task)
316670eb 3343{
0a7de745 3344 kern_return_t kr;
cb323159 3345 mach_port_t port;
0a7de745 3346 mach_port_name_t name;
39236c6e 3347
0a7de745
A
3348 if (task == TASK_NULL || task == kernel_task) {
3349 return KERN_INVALID_ARGUMENT;
3350 }
316670eb
A
3351
3352 task_lock(task);
3353
39236c6e
A
3354 /*
3355 * place a legacy hold on the task.
3356 */
3357 kr = place_task_hold(task, TASK_HOLD_LEGACY);
3358 if (kr != KERN_SUCCESS) {
3359 task_unlock(task);
39236c6e
A
3360 return kr;
3361 }
91447636 3362
cb323159
A
3363 /*
3364 * Claim a send right on the task resume port, and request a no-senders
3365 * notification on that port (if none outstanding).
3366 */
3367 (void)ipc_kobject_make_send_lazy_alloc_port(&task->itk_resume,
3368 (ipc_kobject_t)task, IKOT_TASK_RESUME);
3369 port = task->itk_resume;
3370
1c79356b 3371 task_unlock(task);
91447636 3372
39236c6e
A
3373 /*
3374 * Copyout the send right into the calling task's IPC space. It won't know it is there,
3375 * but we'll look it up when calling a traditional resume. Any IPC operations that
3376 * deallocate the send right will auto-release the suspension.
3377 */
cb323159
A
3378 if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, ip_to_object(port),
3379 MACH_MSG_TYPE_MOVE_SEND, NULL, NULL, &name)) != KERN_SUCCESS) {
3e170ce0 3380 printf("warning: %s(%d) failed to copyout suspension token for pid %d with error: %d\n",
0a7de745
A
3381 proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
3382 task_pid(task), kr);
3383 return kr;
39236c6e
A
3384 }
3385
0a7de745 3386 return kr;
1c79356b
A
3387}
3388
3389/*
91447636 3390 * task_resume:
39236c6e 3391 * Release a user hold on a task.
0a7de745 3392 *
1c79356b
A
3393 * Conditions:
3394 * The caller holds a reference to the task
3395 */
0a7de745 3396kern_return_t
91447636 3397task_resume(
0a7de745 3398 task_t task)
1c79356b 3399{
0a7de745 3400 kern_return_t kr;
39236c6e 3401 mach_port_name_t resume_port_name;
0a7de745
A
3402 ipc_entry_t resume_port_entry;
3403 ipc_space_t space = current_task()->itk_space;
39236c6e 3404
0a7de745
A
3405 if (task == TASK_NULL || task == kernel_task) {
3406 return KERN_INVALID_ARGUMENT;
3407 }
39236c6e
A
3408
3409 /* release a legacy task hold */
3410 task_lock(task);
3411 kr = release_task_hold(task, TASK_HOLD_LEGACY);
3412 task_unlock(task);
3413
3414 is_write_lock(space);
3415 if (is_active(space) && IP_VALID(task->itk_resume) &&
cb323159 3416 ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
39236c6e
A
3417 /*
3418 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
3419 * we are holding one less legacy hold on the task from this caller. If the release failed,
3420 * go ahead and drop all the rights, as someone either already released our holds or the task
3421 * is gone.
3422 */
0a7de745 3423 if (kr == KERN_SUCCESS) {
39236c6e 3424 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
0a7de745 3425 } else {
39236c6e 3426 ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
0a7de745 3427 }
39236c6e
A
3428 /* space unlocked */
3429 } else {
3430 is_write_unlock(space);
0a7de745 3431 if (kr == KERN_SUCCESS) {
3e170ce0 3432 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
0a7de745
A
3433 proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
3434 task_pid(task));
3435 }
39236c6e
A
3436 }
3437
3438 return kr;
3439}
1c79356b 3440
39236c6e
A
3441/*
3442 * Suspend the target task.
3443 * Making/holding a token/reference/port is the callers responsibility.
3444 */
3445kern_return_t
3446task_suspend_internal(task_t task)
3447{
0a7de745
A
3448 kern_return_t kr;
3449
3450 if (task == TASK_NULL || task == kernel_task) {
3451 return KERN_INVALID_ARGUMENT;
3452 }
1c79356b 3453
1c79356b 3454 task_lock(task);
39236c6e
A
3455 kr = place_task_hold(task, TASK_HOLD_NORMAL);
3456 task_unlock(task);
0a7de745 3457 return kr;
39236c6e
A
3458}
3459
3460/*
3461 * Suspend the target task, and return a suspension token. The token
3462 * represents a reference on the suspended task.
3463 */
3464kern_return_t
3465task_suspend2(
0a7de745 3466 task_t task,
39236c6e
A
3467 task_suspension_token_t *suspend_token)
3468{
0a7de745
A
3469 kern_return_t kr;
3470
39236c6e
A
3471 kr = task_suspend_internal(task);
3472 if (kr != KERN_SUCCESS) {
3473 *suspend_token = TASK_NULL;
0a7de745 3474 return kr;
39236c6e
A
3475 }
3476
3477 /*
3478 * Take a reference on the target task and return that to the caller
3479 * as a "suspension token," which can be converted into an SO right to
3480 * the now-suspended task's resume port.
3481 */
3482 task_reference_internal(task);
3483 *suspend_token = task;
3484
0a7de745 3485 return KERN_SUCCESS;
39236c6e
A
3486}
3487
3488/*
3489 * Resume the task
3490 * (reference/token/port management is caller's responsibility).
3491 */
3492kern_return_t
3493task_resume_internal(
0a7de745 3494 task_suspension_token_t task)
39236c6e
A
3495{
3496 kern_return_t kr;
91447636 3497
0a7de745
A
3498 if (task == TASK_NULL || task == kernel_task) {
3499 return KERN_INVALID_ARGUMENT;
3500 }
91447636 3501
39236c6e
A
3502 task_lock(task);
3503 kr = release_task_hold(task, TASK_HOLD_NORMAL);
316670eb 3504 task_unlock(task);
0a7de745 3505 return kr;
39236c6e
A
3506}
3507
3508/*
3509 * Resume the task using a suspension token. Consumes the token's ref.
3510 */
3511kern_return_t
3512task_resume2(
0a7de745 3513 task_suspension_token_t task)
39236c6e
A
3514{
3515 kern_return_t kr;
3516
3517 kr = task_resume_internal(task);
3518 task_suspension_token_deallocate(task);
91447636 3519
0a7de745 3520 return kr;
316670eb
A
3521}
3522
39236c6e
A
3523boolean_t
3524task_suspension_notify(mach_msg_header_t *request_header)
3525{
cb323159 3526 ipc_port_t port = request_header->msgh_remote_port;
39236c6e
A
3527 task_t task = convert_port_to_task_suspension_token(port);
3528 mach_msg_type_number_t not_count;
3529
0a7de745 3530 if (task == TASK_NULL || task == kernel_task) {
39236c6e 3531 return TRUE; /* nothing to do */
0a7de745 3532 }
39236c6e 3533 switch (request_header->msgh_id) {
39236c6e
A
3534 case MACH_NOTIFY_SEND_ONCE:
3535 /* release the hold held by this specific send-once right */
3536 task_lock(task);
3537 release_task_hold(task, TASK_HOLD_NORMAL);
3538 task_unlock(task);
3539 break;
3540
3541 case MACH_NOTIFY_NO_SENDERS:
3542 not_count = ((mach_no_senders_notification_t *)request_header)->not_count;
3543
3544 task_lock(task);
3545 ip_lock(port);
3546 if (port->ip_mscount == not_count) {
39236c6e
A
3547 /* release all the [remaining] outstanding legacy holds */
3548 assert(port->ip_nsrequest == IP_NULL);
3549 ip_unlock(port);
3550 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
3551 task_unlock(task);
39236c6e
A
3552 } else if (port->ip_nsrequest == IP_NULL) {
3553 ipc_port_t old_notify;
3554
3555 task_unlock(task);
3556 /* new send rights, re-arm notification at current make-send count */
3557 ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
3558 assert(old_notify == IP_NULL);
3559 /* port unlocked */
3560 } else {
3561 ip_unlock(port);
3562 task_unlock(task);
3563 }
3564 break;
3565
3566 default:
3567 break;
3568 }
3569
3570 task_suspension_token_deallocate(task); /* drop token reference */
3571 return TRUE;
3572}
3573
cb323159 3574static kern_return_t
316670eb
A
3575task_pidsuspend_locked(task_t task)
3576{
3577 kern_return_t kr;
3578
3579 if (task->pidsuspended) {
3580 kr = KERN_FAILURE;
3581 goto out;
1c79356b 3582 }
91447636 3583
316670eb
A
3584 task->pidsuspended = TRUE;
3585
39236c6e 3586 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
316670eb
A
3587 if (kr != KERN_SUCCESS) {
3588 task->pidsuspended = FALSE;
1c79356b 3589 }
316670eb 3590out:
0a7de745 3591 return kr;
316670eb 3592}
1c79356b 3593
316670eb
A
3594
3595/*
3596 * task_pidsuspend:
3597 *
3598 * Suspends a task by placing a hold on its threads.
3599 *
3600 * Conditions:
0a7de745 3601 * The caller holds a reference to the task
316670eb
A
3602 */
3603kern_return_t
3604task_pidsuspend(
0a7de745 3605 task_t task)
316670eb 3606{
0a7de745
A
3607 kern_return_t kr;
3608
3609 if (task == TASK_NULL || task == kernel_task) {
3610 return KERN_INVALID_ARGUMENT;
3611 }
316670eb
A
3612
3613 task_lock(task);
3614
3615 kr = task_pidsuspend_locked(task);
1c79356b
A
3616
3617 task_unlock(task);
91447636 3618
cb323159
A
3619 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
3620 iokit_task_app_suspended_changed(task);
3621 }
3622
0a7de745 3623 return kr;
316670eb
A
3624}
3625
316670eb
A
3626/*
3627 * task_pidresume:
3628 * Resumes a previously suspended task.
0a7de745 3629 *
316670eb
A
3630 * Conditions:
3631 * The caller holds a reference to the task
3632 */
0a7de745 3633kern_return_t
316670eb 3634task_pidresume(
0a7de745 3635 task_t task)
316670eb 3636{
0a7de745 3637 kern_return_t kr;
316670eb 3638
0a7de745
A
3639 if (task == TASK_NULL || task == kernel_task) {
3640 return KERN_INVALID_ARGUMENT;
3641 }
316670eb
A
3642
3643 task_lock(task);
0a7de745 3644
39037602 3645#if CONFIG_FREEZE
316670eb 3646
39236c6e 3647 while (task->changing_freeze_state) {
39236c6e
A
3648 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3649 task_unlock(task);
3650 thread_block(THREAD_CONTINUE_NULL);
316670eb 3651
39236c6e 3652 task_lock(task);
316670eb 3653 }
39236c6e
A
3654 task->changing_freeze_state = TRUE;
3655#endif
3656
3657 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
3658
3659 task_unlock(task);
3660
cb323159
A
3661 if ((KERN_SUCCESS == kr) && task->message_app_suspended) {
3662 iokit_task_app_suspended_changed(task);
3663 }
3664
39037602 3665#if CONFIG_FREEZE
39236c6e 3666
39236c6e
A
3667 task_lock(task);
3668
0a7de745 3669 if (kr == KERN_SUCCESS) {
39236c6e 3670 task->frozen = FALSE;
0a7de745 3671 }
39236c6e
A
3672 task->changing_freeze_state = FALSE;
3673 thread_wakeup(&task->changing_freeze_state);
0a7de745 3674
39236c6e 3675 task_unlock(task);
316670eb
A
3676#endif
3677
0a7de745 3678 return kr;
1c79356b
A
3679}
3680
cb323159
A
3681os_refgrp_decl(static, task_watchports_refgrp, "task_watchports", NULL);
3682
3683/*
3684 * task_add_turnstile_watchports:
3685 * Setup watchports to boost the main thread of the task.
3686 *
3687 * Arguments:
3688 * task: task being spawned
3689 * thread: main thread of task
3690 * portwatch_ports: array of watchports
3691 * portwatch_count: number of watchports
3692 *
3693 * Conditions:
3694 * Nothing locked.
3695 */
3696void
3697task_add_turnstile_watchports(
3698 task_t task,
3699 thread_t thread,
3700 ipc_port_t *portwatch_ports,
3701 uint32_t portwatch_count)
3702{
3703 struct task_watchports *watchports = NULL;
3704 struct task_watchport_elem *previous_elem_array[TASK_MAX_WATCHPORT_COUNT] = {};
3705 os_ref_count_t refs;
3706
3707 /* Check if the task has terminated */
3708 if (!task->active) {
3709 return;
3710 }
3711
3712 assert(portwatch_count <= TASK_MAX_WATCHPORT_COUNT);
3713
3714 watchports = task_watchports_alloc_init(task, thread, portwatch_count);
3715
3716 /* Lock the ipc space */
3717 is_write_lock(task->itk_space);
3718
3719 /* Setup watchports to boost the main thread */
3720 refs = task_add_turnstile_watchports_locked(task,
3721 watchports, previous_elem_array, portwatch_ports,
3722 portwatch_count);
3723
3724 /* Drop the space lock */
3725 is_write_unlock(task->itk_space);
3726
3727 if (refs == 0) {
3728 task_watchports_deallocate(watchports);
3729 }
3730
3731 /* Drop the ref on previous_elem_array */
3732 for (uint32_t i = 0; i < portwatch_count && previous_elem_array[i] != NULL; i++) {
3733 task_watchport_elem_deallocate(previous_elem_array[i]);
3734 }
3735}
3736
3737/*
3738 * task_remove_turnstile_watchports:
3739 * Clear all turnstile boost on the task from watchports.
3740 *
3741 * Arguments:
3742 * task: task being terminated
3743 *
3744 * Conditions:
3745 * Nothing locked.
3746 */
3747void
3748task_remove_turnstile_watchports(
3749 task_t task)
3750{
3751 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
3752 struct task_watchports *watchports = NULL;
3753 ipc_port_t port_freelist[TASK_MAX_WATCHPORT_COUNT] = {};
3754 uint32_t portwatch_count;
3755
3756 /* Lock the ipc space */
3757 is_write_lock(task->itk_space);
3758
3759 /* Check if watchport boost exist */
3760 if (task->watchports == NULL) {
3761 is_write_unlock(task->itk_space);
3762 return;
3763 }
3764 watchports = task->watchports;
3765 portwatch_count = watchports->tw_elem_array_count;
3766
3767 refs = task_remove_turnstile_watchports_locked(task, watchports,
3768 port_freelist);
3769
3770 is_write_unlock(task->itk_space);
3771
3772 /* Drop all the port references */
3773 for (uint32_t i = 0; i < portwatch_count && port_freelist[i] != NULL; i++) {
3774 ip_release(port_freelist[i]);
3775 }
3776
3777 /* Clear the task and thread references for task_watchport */
3778 if (refs == 0) {
3779 task_watchports_deallocate(watchports);
3780 }
3781}
3782
3783/*
3784 * task_transfer_turnstile_watchports:
3785 * Transfer all watchport turnstile boost from old task to new task.
3786 *
3787 * Arguments:
3788 * old_task: task calling exec
3789 * new_task: new exec'ed task
3790 * thread: main thread of new task
3791 *
3792 * Conditions:
3793 * Nothing locked.
3794 */
3795void
3796task_transfer_turnstile_watchports(
3797 task_t old_task,
3798 task_t new_task,
3799 thread_t new_thread)
3800{
3801 struct task_watchports *old_watchports = NULL;
3802 struct task_watchports *new_watchports = NULL;
3803 os_ref_count_t old_refs = TASK_MAX_WATCHPORT_COUNT;
3804 os_ref_count_t new_refs = TASK_MAX_WATCHPORT_COUNT;
3805 uint32_t portwatch_count;
3806
3807 if (old_task->watchports == NULL || !new_task->active) {
3808 return;
3809 }
3810
3811 /* Get the watch port count from the old task */
3812 is_write_lock(old_task->itk_space);
3813 if (old_task->watchports == NULL) {
3814 is_write_unlock(old_task->itk_space);
3815 return;
3816 }
3817
3818 portwatch_count = old_task->watchports->tw_elem_array_count;
3819 is_write_unlock(old_task->itk_space);
3820
3821 new_watchports = task_watchports_alloc_init(new_task, new_thread, portwatch_count);
3822
3823 /* Lock the ipc space for old task */
3824 is_write_lock(old_task->itk_space);
3825
3826 /* Lock the ipc space for new task */
3827 is_write_lock(new_task->itk_space);
3828
3829 /* Check if watchport boost exist */
3830 if (old_task->watchports == NULL || !new_task->active) {
3831 is_write_unlock(new_task->itk_space);
3832 is_write_unlock(old_task->itk_space);
3833 (void)task_watchports_release(new_watchports);
3834 task_watchports_deallocate(new_watchports);
3835 return;
3836 }
3837
3838 old_watchports = old_task->watchports;
3839 assert(portwatch_count == old_task->watchports->tw_elem_array_count);
3840
3841 /* Setup new task watchports */
3842 new_task->watchports = new_watchports;
3843
3844 for (uint32_t i = 0; i < portwatch_count; i++) {
3845 ipc_port_t port = old_watchports->tw_elem[i].twe_port;
3846
3847 if (port == NULL) {
3848 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
3849 continue;
3850 }
3851
3852 /* Lock the port and check if it has the entry */
3853 ip_lock(port);
3854 imq_lock(&port->ip_messages);
3855
3856 task_watchport_elem_init(&new_watchports->tw_elem[i], new_task, port);
3857
3858 if (ipc_port_replace_watchport_elem_conditional_locked(port,
3859 &old_watchports->tw_elem[i], &new_watchports->tw_elem[i]) == KERN_SUCCESS) {
3860 task_watchport_elem_clear(&old_watchports->tw_elem[i]);
3861
3862 task_watchports_retain(new_watchports);
3863 old_refs = task_watchports_release(old_watchports);
3864
3865 /* Check if all ports are cleaned */
3866 if (old_refs == 0) {
3867 old_task->watchports = NULL;
3868 }
3869 } else {
3870 task_watchport_elem_clear(&new_watchports->tw_elem[i]);
3871 }
3872 /* mqueue and port unlocked by ipc_port_replace_watchport_elem_conditional_locked */
3873 }
3874
3875 /* Drop the reference on new task_watchports struct returned by task_watchports_alloc_init */
3876 new_refs = task_watchports_release(new_watchports);
3877 if (new_refs == 0) {
3878 new_task->watchports = NULL;
3879 }
3880
3881 is_write_unlock(new_task->itk_space);
3882 is_write_unlock(old_task->itk_space);
3883
3884 /* Clear the task and thread references for old_watchport */
3885 if (old_refs == 0) {
3886 task_watchports_deallocate(old_watchports);
3887 }
3888
3889 /* Clear the task and thread references for new_watchport */
3890 if (new_refs == 0) {
3891 task_watchports_deallocate(new_watchports);
3892 }
3893}
3894
3895/*
3896 * task_add_turnstile_watchports_locked:
3897 * Setup watchports to boost the main thread of the task.
3898 *
3899 * Arguments:
3900 * task: task to boost
3901 * watchports: watchport structure to be attached to the task
3902 * previous_elem_array: an array of old watchport_elem to be returned to caller
3903 * portwatch_ports: array of watchports
3904 * portwatch_count: number of watchports
3905 *
3906 * Conditions:
3907 * ipc space of the task locked.
3908 * returns array of old watchport_elem in previous_elem_array
3909 */
3910static os_ref_count_t
3911task_add_turnstile_watchports_locked(
3912 task_t task,
3913 struct task_watchports *watchports,
3914 struct task_watchport_elem **previous_elem_array,
3915 ipc_port_t *portwatch_ports,
3916 uint32_t portwatch_count)
3917{
3918 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
3919
3920 /* Check if the task is still active */
3921 if (!task->active) {
3922 refs = task_watchports_release(watchports);
3923 return refs;
3924 }
3925
3926 assert(task->watchports == NULL);
3927 task->watchports = watchports;
3928
3929 for (uint32_t i = 0, j = 0; i < portwatch_count; i++) {
3930 ipc_port_t port = portwatch_ports[i];
3931
3932 task_watchport_elem_init(&watchports->tw_elem[i], task, port);
3933 if (port == NULL) {
3934 task_watchport_elem_clear(&watchports->tw_elem[i]);
3935 continue;
3936 }
3937
3938 ip_lock(port);
3939 imq_lock(&port->ip_messages);
3940
3941 /* Check if port is in valid state to be setup as watchport */
3942 if (ipc_port_add_watchport_elem_locked(port, &watchports->tw_elem[i],
3943 &previous_elem_array[j]) != KERN_SUCCESS) {
3944 task_watchport_elem_clear(&watchports->tw_elem[i]);
3945 continue;
3946 }
3947 /* port and mqueue unlocked on return */
3948
3949 ip_reference(port);
3950 task_watchports_retain(watchports);
3951 if (previous_elem_array[j] != NULL) {
3952 j++;
3953 }
3954 }
3955
3956 /* Drop the reference on task_watchport struct returned by os_ref_init */
3957 refs = task_watchports_release(watchports);
3958 if (refs == 0) {
3959 task->watchports = NULL;
3960 }
3961
3962 return refs;
3963}
3964
3965/*
3966 * task_remove_turnstile_watchports_locked:
3967 * Clear all turnstile boost on the task from watchports.
3968 *
3969 * Arguments:
3970 * task: task to remove watchports from
3971 * watchports: watchports structure for the task
3972 * port_freelist: array of ports returned with ref to caller
3973 *
3974 *
3975 * Conditions:
3976 * ipc space of the task locked.
3977 * array of ports with refs are returned in port_freelist
3978 */
3979static os_ref_count_t
3980task_remove_turnstile_watchports_locked(
3981 task_t task,
3982 struct task_watchports *watchports,
3983 ipc_port_t *port_freelist)
3984{
3985 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
3986
3987 for (uint32_t i = 0, j = 0; i < watchports->tw_elem_array_count; i++) {
3988 ipc_port_t port = watchports->tw_elem[i].twe_port;
3989 if (port == NULL) {
3990 continue;
3991 }
3992
3993 /* Lock the port and check if it has the entry */
3994 ip_lock(port);
3995 imq_lock(&port->ip_messages);
3996 if (ipc_port_clear_watchport_elem_internal_conditional_locked(port,
3997 &watchports->tw_elem[i]) == KERN_SUCCESS) {
3998 task_watchport_elem_clear(&watchports->tw_elem[i]);
3999 port_freelist[j++] = port;
4000 refs = task_watchports_release(watchports);
4001
4002 /* Check if all ports are cleaned */
4003 if (refs == 0) {
4004 task->watchports = NULL;
4005 break;
4006 }
4007 }
4008 /* mqueue and port unlocked by ipc_port_clear_watchport_elem_internal_conditional_locked */
4009 }
4010 return refs;
4011}
4012
4013/*
4014 * task_watchports_alloc_init:
4015 * Allocate and initialize task watchport struct.
4016 *
4017 * Conditions:
4018 * Nothing locked.
4019 */
4020static struct task_watchports *
4021task_watchports_alloc_init(
4022 task_t task,
4023 thread_t thread,
4024 uint32_t count)
4025{
4026 struct task_watchports *watchports = kalloc(sizeof(struct task_watchports) +
4027 count * sizeof(struct task_watchport_elem));
4028
4029 task_reference(task);
4030 thread_reference(thread);
4031 watchports->tw_task = task;
4032 watchports->tw_thread = thread;
4033 watchports->tw_elem_array_count = count;
4034 os_ref_init(&watchports->tw_refcount, &task_watchports_refgrp);
4035
4036 return watchports;
4037}
4038
4039/*
4040 * task_watchports_deallocate:
4041 * Deallocate task watchport struct.
4042 *
4043 * Conditions:
4044 * Nothing locked.
4045 */
4046static void
4047task_watchports_deallocate(
4048 struct task_watchports *watchports)
4049{
4050 uint32_t portwatch_count = watchports->tw_elem_array_count;
4051
4052 task_deallocate(watchports->tw_task);
4053 thread_deallocate(watchports->tw_thread);
4054 kfree(watchports, sizeof(struct task_watchports) + portwatch_count * sizeof(struct task_watchport_elem));
4055}
4056
4057/*
4058 * task_watchport_elem_deallocate:
4059 * Deallocate task watchport element and release its ref on task_watchport.
4060 *
4061 * Conditions:
4062 * Nothing locked.
4063 */
4064void
4065task_watchport_elem_deallocate(
4066 struct task_watchport_elem *watchport_elem)
4067{
4068 os_ref_count_t refs = TASK_MAX_WATCHPORT_COUNT;
4069 task_t task = watchport_elem->twe_task;
4070 struct task_watchports *watchports = NULL;
4071 ipc_port_t port = NULL;
4072
4073 assert(task != NULL);
4074
4075 /* Take the space lock to modify the elememt */
4076 is_write_lock(task->itk_space);
4077
4078 watchports = task->watchports;
4079 assert(watchports != NULL);
4080
4081 port = watchport_elem->twe_port;
4082 assert(port != NULL);
4083
4084 task_watchport_elem_clear(watchport_elem);
4085 refs = task_watchports_release(watchports);
4086
4087 if (refs == 0) {
4088 task->watchports = NULL;
4089 }
4090
4091 is_write_unlock(task->itk_space);
4092
4093 ip_release(port);
4094 if (refs == 0) {
4095 task_watchports_deallocate(watchports);
4096 }
4097}
4098
4099/*
4100 * task_has_watchports:
4101 * Return TRUE if task has watchport boosts.
4102 *
4103 * Conditions:
4104 * Nothing locked.
4105 */
4106boolean_t
4107task_has_watchports(task_t task)
4108{
4109 return task->watchports != NULL;
4110}
39037602
A
4111
4112#if DEVELOPMENT || DEBUG
4113
4114extern void IOSleep(int);
4115
4116kern_return_t
4117task_disconnect_page_mappings(task_t task)
4118{
0a7de745 4119 int n;
39037602 4120
0a7de745
A
4121 if (task == TASK_NULL || task == kernel_task) {
4122 return KERN_INVALID_ARGUMENT;
4123 }
39037602
A
4124
4125 /*
4126 * this function is used to strip all of the mappings from
4127 * the pmap for the specified task to force the task to
4128 * re-fault all of the pages it is actively using... this
4129 * allows us to approximate the true working set of the
4130 * specified task. We only engage if at least 1 of the
4131 * threads in the task is runnable, but we want to continuously
4132 * sweep (at least for a while - I've arbitrarily set the limit at
4133 * 100 sweeps to be re-looked at as we gain experience) to get a better
4134 * view into what areas within a page are being visited (as opposed to only
0a7de745 4135 * seeing the first fault of a page after the task becomes
39037602
A
4136 * runnable)... in the future I may
4137 * try to block until awakened by a thread in this task
4138 * being made runnable, but for now we'll periodically poll from the
4139 * user level debug tool driving the sysctl
4140 */
4141 for (n = 0; n < 100; n++) {
0a7de745
A
4142 thread_t thread;
4143 boolean_t runnable;
4144 boolean_t do_unnest;
4145 int page_count;
39037602
A
4146
4147 runnable = FALSE;
4148 do_unnest = FALSE;
4149
4150 task_lock(task);
4151
4152 queue_iterate(&task->threads, thread, thread_t, task_threads) {
39037602
A
4153 if (thread->state & TH_RUN) {
4154 runnable = TRUE;
4155 break;
4156 }
4157 }
0a7de745 4158 if (n == 0) {
39037602 4159 task->task_disconnected_count++;
0a7de745 4160 }
39037602
A
4161
4162 if (task->task_unnested == FALSE) {
4163 if (runnable == TRUE) {
4164 task->task_unnested = TRUE;
4165 do_unnest = TRUE;
4166 }
4167 }
4168 task_unlock(task);
4169
0a7de745 4170 if (runnable == FALSE) {
39037602 4171 break;
0a7de745 4172 }
39037602
A
4173
4174 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
0a7de745 4175 task, do_unnest, task->task_disconnected_count, 0, 0);
39037602
A
4176
4177 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
4178
4179 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
0a7de745 4180 task, page_count, 0, 0, 0);
39037602 4181
0a7de745 4182 if ((n % 5) == 4) {
39037602 4183 IOSleep(1);
0a7de745 4184 }
39037602 4185 }
0a7de745 4186 return KERN_SUCCESS;
39037602
A
4187}
4188
4189#endif
4190
4191
6d2010ae
A
4192#if CONFIG_FREEZE
4193
4194/*
4195 * task_freeze:
4196 *
316670eb 4197 * Freeze a task.
6d2010ae
A
4198 *
4199 * Conditions:
0a7de745 4200 * The caller holds a reference to the task
6d2010ae 4201 */
0a7de745
A
4202extern void vm_wake_compactor_swapper(void);
4203extern queue_head_t c_swapout_list_head;
3e170ce0 4204
6d2010ae
A
4205kern_return_t
4206task_freeze(
39037602 4207 task_t task,
6d2010ae
A
4208 uint32_t *purgeable_count,
4209 uint32_t *wired_count,
4210 uint32_t *clean_count,
4211 uint32_t *dirty_count,
316670eb 4212 uint32_t dirty_budget,
0a7de745
A
4213 uint32_t *shared_count,
4214 int *freezer_error_code,
4215 boolean_t eval_only)
6d2010ae 4216{
39037602 4217 kern_return_t kr = KERN_SUCCESS;
0a7de745
A
4218
4219 if (task == TASK_NULL || task == kernel_task) {
4220 return KERN_INVALID_ARGUMENT;
4221 }
6d2010ae 4222
316670eb
A
4223 task_lock(task);
4224
39236c6e 4225 while (task->changing_freeze_state) {
39236c6e
A
4226 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4227 task_unlock(task);
4228 thread_block(THREAD_CONTINUE_NULL);
4229
4230 task_lock(task);
4231 }
316670eb 4232 if (task->frozen) {
39236c6e 4233 task_unlock(task);
0a7de745 4234 return KERN_FAILURE;
316670eb 4235 }
39236c6e 4236 task->changing_freeze_state = TRUE;
316670eb
A
4237
4238 task_unlock(task);
4239
cb323159 4240 kr = vm_map_freeze(task,
0a7de745
A
4241 purgeable_count,
4242 wired_count,
4243 clean_count,
4244 dirty_count,
4245 dirty_budget,
4246 shared_count,
4247 freezer_error_code,
4248 eval_only);
6d2010ae 4249
39236c6e
A
4250 task_lock(task);
4251
d9a64523 4252 if ((kr == KERN_SUCCESS) && (eval_only == FALSE)) {
39236c6e 4253 task->frozen = TRUE;
d9a64523
A
4254 }
4255
39236c6e
A
4256 task->changing_freeze_state = FALSE;
4257 thread_wakeup(&task->changing_freeze_state);
0a7de745 4258
39236c6e
A
4259 task_unlock(task);
4260
d9a64523 4261 if (VM_CONFIG_COMPRESSOR_IS_PRESENT &&
ea3f0419 4262 (kr == KERN_SUCCESS) &&
d9a64523 4263 (eval_only == FALSE)) {
3e170ce0
A
4264 vm_wake_compactor_swapper();
4265 /*
4266 * We do an explicit wakeup of the swapout thread here
4267 * because the compact_and_swap routines don't have
4268 * knowledge about these kind of "per-task packed c_segs"
4269 * and so will not be evaluating whether we need to do
4270 * a wakeup there.
4271 */
4272 thread_wakeup((event_t)&c_swapout_list_head);
4273 }
4274
0a7de745 4275 return kr;
6d2010ae
A
4276}
4277
4278/*
4279 * task_thaw:
4280 *
4281 * Thaw a currently frozen task.
4282 *
4283 * Conditions:
0a7de745 4284 * The caller holds a reference to the task
6d2010ae
A
4285 */
4286kern_return_t
4287task_thaw(
0a7de745 4288 task_t task)
6d2010ae 4289{
0a7de745
A
4290 if (task == TASK_NULL || task == kernel_task) {
4291 return KERN_INVALID_ARGUMENT;
4292 }
6d2010ae 4293
316670eb 4294 task_lock(task);
39236c6e 4295
0a7de745 4296 while (task->changing_freeze_state) {
39236c6e
A
4297 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
4298 task_unlock(task);
4299 thread_block(THREAD_CONTINUE_NULL);
4300
4301 task_lock(task);
4302 }
316670eb 4303 if (!task->frozen) {
39236c6e 4304 task_unlock(task);
0a7de745 4305 return KERN_FAILURE;
316670eb 4306 }
39037602 4307 task->frozen = FALSE;
0a7de745 4308
316670eb
A
4309 task_unlock(task);
4310
0a7de745 4311 return KERN_SUCCESS;
6d2010ae
A
4312}
4313
4314#endif /* CONFIG_FREEZE */
4315
1c79356b
A
4316kern_return_t
4317host_security_set_task_token(
0a7de745
A
4318 host_security_t host_security,
4319 task_t task,
4320 security_token_t sec_token,
4321 audit_token_t audit_token,
4322 host_priv_t host_priv)
1c79356b 4323{
0a7de745
A
4324 ipc_port_t host_port;
4325 kern_return_t kr;
1c79356b 4326
0a7de745
A
4327 if (task == TASK_NULL) {
4328 return KERN_INVALID_ARGUMENT;
4329 }
1c79356b 4330
0a7de745
A
4331 if (host_security == HOST_NULL) {
4332 return KERN_INVALID_SECURITY;
4333 }
1c79356b 4334
0a7de745
A
4335 task_lock(task);
4336 task->sec_token = sec_token;
55e303ae 4337 task->audit_token = audit_token;
39236c6e 4338 task_unlock(task);
1c79356b
A
4339
4340 if (host_priv != HOST_PRIV_NULL) {
55e303ae 4341 kr = host_get_host_priv_port(host_priv, &host_port);
1c79356b 4342 } else {
55e303ae 4343 kr = host_get_host_port(host_priv_self(), &host_port);
1c79356b 4344 }
55e303ae 4345 assert(kr == KERN_SUCCESS);
4ba76501
A
4346
4347 kr = task_set_special_port_internal(task, TASK_HOST_PORT, host_port);
0a7de745 4348 return kr;
1c79356b
A
4349}
4350
fe8ab488
A
4351kern_return_t
4352task_send_trace_memory(
e8c3f781 4353 __unused task_t target_task,
fe8ab488
A
4354 __unused uint32_t pid,
4355 __unused uint64_t uniqueid)
4356{
e8c3f781 4357 return KERN_INVALID_ARGUMENT;
fe8ab488 4358}
e8c3f781 4359
1c79356b
A
4360/*
4361 * This routine was added, pretty much exclusively, for registering the
4362 * RPC glue vector for in-kernel short circuited tasks. Rather than
4363 * removing it completely, I have only disabled that feature (which was
4364 * the only feature at the time). It just appears that we are going to
4365 * want to add some user data to tasks in the future (i.e. bsd info,
4366 * task names, etc...), so I left it in the formal task interface.
4367 */
4368kern_return_t
4369task_set_info(
0a7de745
A
4370 task_t task,
4371 task_flavor_t flavor,
4372 __unused task_info_t task_info_in, /* pointer to IN array */
4373 __unused mach_msg_type_number_t task_info_count)
1c79356b 4374{
0a7de745
A
4375 if (task == TASK_NULL) {
4376 return KERN_INVALID_ARGUMENT;
4377 }
1c79356b
A
4378
4379 switch (flavor) {
fe8ab488 4380#if CONFIG_ATM
0a7de745
A
4381 case TASK_TRACE_MEMORY_INFO:
4382 {
4383 if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT) {
4384 return KERN_INVALID_ARGUMENT;
fe8ab488
A
4385 }
4386
0a7de745
A
4387 assert(task_info_in != NULL);
4388 task_trace_memory_info_t mem_info;
4389 mem_info = (task_trace_memory_info_t) task_info_in;
4390 kern_return_t kr = atm_register_trace_memory(task,
4391 mem_info->user_memory_address,
4392 mem_info->buffer_size);
4393 return kr;
4394 }
4395
fe8ab488 4396#endif
0a7de745
A
4397 default:
4398 return KERN_INVALID_ARGUMENT;
1c79356b 4399 }
0a7de745 4400 return KERN_SUCCESS;
1c79356b
A
4401}
4402
3e170ce0 4403int radar_20146450 = 1;
1c79356b
A
4404kern_return_t
4405task_info(
0a7de745
A
4406 task_t task,
4407 task_flavor_t flavor,
4408 task_info_t task_info_out,
4409 mach_msg_type_number_t *task_info_count)
1c79356b 4410{
b0d623f7 4411 kern_return_t error = KERN_SUCCESS;
0a7de745 4412 mach_msg_type_number_t original_task_info_count;
b0d623f7 4413
0a7de745
A
4414 if (task == TASK_NULL) {
4415 return KERN_INVALID_ARGUMENT;
4416 }
1c79356b 4417
39037602 4418 original_task_info_count = *task_info_count;
b0d623f7
A
4419 task_lock(task);
4420
4421 if ((task != current_task()) && (!task->active)) {
4422 task_unlock(task);
0a7de745 4423 return KERN_INVALID_ARGUMENT;
b0d623f7
A
4424 }
4425
1c79356b 4426 switch (flavor) {
91447636 4427 case TASK_BASIC_INFO_32:
2d21ac55 4428 case TASK_BASIC2_INFO_32:
5ba3f43e
A
4429#if defined(__arm__) || defined(__arm64__)
4430 case TASK_BASIC_INFO_64:
0a7de745
A
4431#endif
4432 {
4433 task_basic_info_32_t basic_info;
4434 vm_map_t map;
4435 clock_sec_t secs;
4436 clock_usec_t usecs;
1c79356b 4437
0a7de745
A
4438 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
4439 error = KERN_INVALID_ARGUMENT;
4440 break;
4441 }
1c79356b 4442
0a7de745 4443 basic_info = (task_basic_info_32_t)task_info_out;
1c79356b 4444
0a7de745
A
4445 map = (task == kernel_task)? kernel_map: task->map;
4446 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
4447 if (flavor == TASK_BASIC2_INFO_32) {
4448 /*
4449 * The "BASIC2" flavor gets the maximum resident
4450 * size instead of the current resident size...
4451 */
4452 basic_info->resident_size = pmap_resident_max(map->pmap);
4453 } else {
4454 basic_info->resident_size = pmap_resident_count(map->pmap);
4455 }
4456 basic_info->resident_size *= PAGE_SIZE;
91447636 4457
0a7de745
A
4458 basic_info->policy = ((task != kernel_task)?
4459 POLICY_TIMESHARE: POLICY_RR);
4460 basic_info->suspend_count = task->user_stop_count;
b0d623f7 4461
0a7de745
A
4462 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
4463 basic_info->user_time.seconds =
4464 (typeof(basic_info->user_time.seconds))secs;
4465 basic_info->user_time.microseconds = usecs;
1c79356b 4466
0a7de745
A
4467 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4468 basic_info->system_time.seconds =
4469 (typeof(basic_info->system_time.seconds))secs;
4470 basic_info->system_time.microseconds = usecs;
4471
4472 *task_info_count = TASK_BASIC_INFO_32_COUNT;
4473 break;
4474 }
1c79356b 4475
5ba3f43e
A
4476#if defined(__arm__) || defined(__arm64__)
4477 case TASK_BASIC_INFO_64_2:
4478 {
0a7de745
A
4479 task_basic_info_64_2_t basic_info;
4480 vm_map_t map;
4481 clock_sec_t secs;
4482 clock_usec_t usecs;
5ba3f43e
A
4483
4484 if (*task_info_count < TASK_BASIC_INFO_64_2_COUNT) {
0a7de745
A
4485 error = KERN_INVALID_ARGUMENT;
4486 break;
5ba3f43e
A
4487 }
4488
4489 basic_info = (task_basic_info_64_2_t)task_info_out;
4490
4491 map = (task == kernel_task)? kernel_map: task->map;
4492 basic_info->virtual_size = map->size;
4493 basic_info->resident_size =
0a7de745
A
4494 (mach_vm_size_t)(pmap_resident_count(map->pmap))
4495 * PAGE_SIZE_64;
5ba3f43e
A
4496
4497 basic_info->policy = ((task != kernel_task)?
0a7de745 4498 POLICY_TIMESHARE: POLICY_RR);
5ba3f43e
A
4499 basic_info->suspend_count = task->user_stop_count;
4500
4501 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
0a7de745
A
4502 basic_info->user_time.seconds =
4503 (typeof(basic_info->user_time.seconds))secs;
5ba3f43e
A
4504 basic_info->user_time.microseconds = usecs;
4505
4506 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4507 basic_info->system_time.seconds =
0a7de745 4508 (typeof(basic_info->system_time.seconds))secs;
5ba3f43e
A
4509 basic_info->system_time.microseconds = usecs;
4510
4511 *task_info_count = TASK_BASIC_INFO_64_2_COUNT;
4512 break;
4513 }
4514
4515#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
4516 case TASK_BASIC_INFO_64:
4517 {
0a7de745
A
4518 task_basic_info_64_t basic_info;
4519 vm_map_t map;
4520 clock_sec_t secs;
4521 clock_usec_t usecs;
1c79356b 4522
b0d623f7 4523 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
0a7de745
A
4524 error = KERN_INVALID_ARGUMENT;
4525 break;
b0d623f7 4526 }
91447636
A
4527
4528 basic_info = (task_basic_info_64_t)task_info_out;
4529
4530 map = (task == kernel_task)? kernel_map: task->map;
4531 basic_info->virtual_size = map->size;
2d21ac55 4532 basic_info->resident_size =
0a7de745
A
4533 (mach_vm_size_t)(pmap_resident_count(map->pmap))
4534 * PAGE_SIZE_64;
91447636 4535
91447636 4536 basic_info->policy = ((task != kernel_task)?
0a7de745 4537 POLICY_TIMESHARE: POLICY_RR);
91447636
A
4538 basic_info->suspend_count = task->user_stop_count;
4539
b0d623f7 4540 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
0a7de745
A
4541 basic_info->user_time.seconds =
4542 (typeof(basic_info->user_time.seconds))secs;
b0d623f7
A
4543 basic_info->user_time.microseconds = usecs;
4544
4545 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4546 basic_info->system_time.seconds =
0a7de745 4547 (typeof(basic_info->system_time.seconds))secs;
b0d623f7 4548 basic_info->system_time.microseconds = usecs;
91447636
A
4549
4550 *task_info_count = TASK_BASIC_INFO_64_COUNT;
4551 break;
4552 }
5ba3f43e 4553#endif /* defined(__arm__) || defined(__arm64__) */
91447636 4554
316670eb
A
4555 case MACH_TASK_BASIC_INFO:
4556 {
4557 mach_task_basic_info_t basic_info;
4558 vm_map_t map;
4559 clock_sec_t secs;
4560 clock_usec_t usecs;
4561
4562 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
0a7de745
A
4563 error = KERN_INVALID_ARGUMENT;
4564 break;
316670eb
A
4565 }
4566
4567 basic_info = (mach_task_basic_info_t)task_info_out;
4568
4569 map = (task == kernel_task) ? kernel_map : task->map;
4570
4571 basic_info->virtual_size = map->size;
4572
4573 basic_info->resident_size =
4574 (mach_vm_size_t)(pmap_resident_count(map->pmap));
4575 basic_info->resident_size *= PAGE_SIZE_64;
4576
4577 basic_info->resident_size_max =
4578 (mach_vm_size_t)(pmap_resident_max(map->pmap));
4579 basic_info->resident_size_max *= PAGE_SIZE_64;
4580
0a7de745
A
4581 basic_info->policy = ((task != kernel_task) ?
4582 POLICY_TIMESHARE : POLICY_RR);
316670eb
A
4583
4584 basic_info->suspend_count = task->user_stop_count;
4585
4586 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
0a7de745 4587 basic_info->user_time.seconds =
316670eb
A
4588 (typeof(basic_info->user_time.seconds))secs;
4589 basic_info->user_time.microseconds = usecs;
4590
4591 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
4592 basic_info->system_time.seconds =
4593 (typeof(basic_info->system_time.seconds))secs;
4594 basic_info->system_time.microseconds = usecs;
4595
4596 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
4597 break;
4598 }
4599
91447636
A
4600 case TASK_THREAD_TIMES_INFO:
4601 {
0a7de745
A
4602 task_thread_times_info_t times_info;
4603 thread_t thread;
91447636 4604
b0d623f7 4605 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
0a7de745
A
4606 error = KERN_INVALID_ARGUMENT;
4607 break;
b0d623f7 4608 }
1c79356b
A
4609
4610 times_info = (task_thread_times_info_t) task_info_out;
4611 times_info->user_time.seconds = 0;
4612 times_info->user_time.microseconds = 0;
4613 times_info->system_time.seconds = 0;
4614 times_info->system_time.microseconds = 0;
4615
1c79356b 4616
91447636 4617 queue_iterate(&task->threads, thread, thread_t, task_threads) {
0a7de745 4618 time_value_t user_time, system_time;
1c79356b 4619
0a7de745 4620 if (thread->options & TH_OPT_IDLE_THREAD) {
39236c6e 4621 continue;
0a7de745 4622 }
1c79356b 4623
d9a64523 4624 thread_read_times(thread, &user_time, &system_time, NULL);
91447636 4625
39236c6e
A
4626 time_value_add(&times_info->user_time, &user_time);
4627 time_value_add(&times_info->system_time, &system_time);
4628 }
1c79356b
A
4629
4630 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
4631 break;
91447636
A
4632 }
4633
4634 case TASK_ABSOLUTETIME_INFO:
4635 {
0a7de745
A
4636 task_absolutetime_info_t info;
4637 thread_t thread;
91447636 4638
b0d623f7
A
4639 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
4640 error = KERN_INVALID_ARGUMENT;
4641 break;
4642 }
91447636
A
4643
4644 info = (task_absolutetime_info_t)task_info_out;
4645 info->threads_user = info->threads_system = 0;
4646
91447636
A
4647
4648 info->total_user = task->total_user_time;
4649 info->total_system = task->total_system_time;
4650
4651 queue_iterate(&task->threads, thread, thread_t, task_threads) {
0a7de745
A
4652 uint64_t tval;
4653 spl_t x;
316670eb 4654
0a7de745 4655 if (thread->options & TH_OPT_IDLE_THREAD) {
39236c6e 4656 continue;
0a7de745 4657 }
39236c6e 4658
316670eb
A
4659 x = splsched();
4660 thread_lock(thread);
91447636
A
4661
4662 tval = timer_grab(&thread->user_timer);
4663 info->threads_user += tval;
4664 info->total_user += tval;
4665
4666 tval = timer_grab(&thread->system_timer);
316670eb
A
4667 if (thread->precise_user_kernel_time) {
4668 info->threads_system += tval;
4669 info->total_system += tval;
4670 } else {
4671 /* system_timer may represent either sys or user */
4672 info->threads_user += tval;
4673 info->total_user += tval;
4674 }
4675
4676 thread_unlock(thread);
4677 splx(x);
91447636
A
4678 }
4679
91447636
A
4680
4681 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
4682 break;
4683 }
1c79356b 4684
b0d623f7
A
4685 case TASK_DYLD_INFO:
4686 {
4687 task_dyld_info_t info;
4688
6d2010ae
A
4689 /*
4690 * We added the format field to TASK_DYLD_INFO output. For
4691 * temporary backward compatibility, accept the fact that
4692 * clients may ask for the old version - distinquished by the
4693 * size of the expected result structure.
4694 */
4695#define TASK_LEGACY_DYLD_INFO_COUNT \
0a7de745 4696 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
6d2010ae
A
4697
4698 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
b0d623f7
A
4699 error = KERN_INVALID_ARGUMENT;
4700 break;
4701 }
6d2010ae 4702
b0d623f7
A
4703 info = (task_dyld_info_t)task_info_out;
4704 info->all_image_info_addr = task->all_image_info_addr;
4705 info->all_image_info_size = task->all_image_info_size;
6d2010ae
A
4706
4707 /* only set format on output for those expecting it */
4708 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
d9a64523 4709 info->all_image_info_format = task_has_64Bit_addr(task) ?
0a7de745
A
4710 TASK_DYLD_ALL_IMAGE_INFO_64 :
4711 TASK_DYLD_ALL_IMAGE_INFO_32;
6d2010ae
A
4712 *task_info_count = TASK_DYLD_INFO_COUNT;
4713 } else {
4714 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
4715 }
b0d623f7
A
4716 break;
4717 }
4718
6d2010ae
A
4719 case TASK_EXTMOD_INFO:
4720 {
4721 task_extmod_info_t info;
4722 void *p;
4723
4724 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
4725 error = KERN_INVALID_ARGUMENT;
4726 break;
4727 }
4728
4729 info = (task_extmod_info_t)task_info_out;
4730
4731 p = get_bsdtask_info(task);
4732 if (p) {
4733 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
4734 } else {
4735 bzero(info->task_uuid, sizeof(info->task_uuid));
4736 }
4737 info->extmod_statistics = task->extmod_statistics;
4738 *task_info_count = TASK_EXTMOD_INFO_COUNT;
4739
4740 break;
4741 }
4742
4743 case TASK_KERNELMEMORY_INFO:
4744 {
0a7de745
A
4745 task_kernelmemory_info_t tkm_info;
4746 ledger_amount_t credit, debit;
6d2010ae
A
4747
4748 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
0a7de745
A
4749 error = KERN_INVALID_ARGUMENT;
4750 break;
6d2010ae
A
4751 }
4752
4753 tkm_info = (task_kernelmemory_info_t) task_info_out;
316670eb
A
4754 tkm_info->total_palloc = 0;
4755 tkm_info->total_pfree = 0;
4756 tkm_info->total_salloc = 0;
4757 tkm_info->total_sfree = 0;
6d2010ae
A
4758
4759 if (task == kernel_task) {
4760 /*
4761 * All shared allocs/frees from other tasks count against
4762 * the kernel private memory usage. If we are looking up
4763 * info for the kernel task, gather from everywhere.
4764 */
4765 task_unlock(task);
4766
4767 /* start by accounting for all the terminated tasks against the kernel */
4768 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
4769 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
6d2010ae
A
4770
4771 /* count all other task/thread shared alloc/free against the kernel */
4772 lck_mtx_lock(&tasks_threads_lock);
316670eb
A
4773
4774 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
6d2010ae
A
4775 queue_iterate(&tasks, task, task_t, tasks) {
4776 if (task == kernel_task) {
316670eb
A
4777 if (ledger_get_entries(task->ledger,
4778 task_ledgers.tkm_private, &credit,
4779 &debit) == KERN_SUCCESS) {
4780 tkm_info->total_palloc += credit;
4781 tkm_info->total_pfree += debit;
4782 }
6d2010ae 4783 }
316670eb
A
4784 if (!ledger_get_entries(task->ledger,
4785 task_ledgers.tkm_shared, &credit, &debit)) {
4786 tkm_info->total_palloc += credit;
4787 tkm_info->total_pfree += debit;
6d2010ae 4788 }
6d2010ae
A
4789 }
4790 lck_mtx_unlock(&tasks_threads_lock);
4791 } else {
316670eb
A
4792 if (!ledger_get_entries(task->ledger,
4793 task_ledgers.tkm_private, &credit, &debit)) {
4794 tkm_info->total_palloc = credit;
4795 tkm_info->total_pfree = debit;
4796 }
4797 if (!ledger_get_entries(task->ledger,
4798 task_ledgers.tkm_shared, &credit, &debit)) {
4799 tkm_info->total_salloc = credit;
4800 tkm_info->total_sfree = debit;
6d2010ae
A
4801 }
4802 task_unlock(task);
4803 }
4804
4805 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
4806 return KERN_SUCCESS;
4807 }
4808
91447636
A
4809 /* OBSOLETE */
4810 case TASK_SCHED_FIFO_INFO:
4811 {
b0d623f7
A
4812 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
4813 error = KERN_INVALID_ARGUMENT;
4814 break;
4815 }
1c79356b 4816
b0d623f7 4817 error = KERN_INVALID_POLICY;
6d2010ae 4818 break;
91447636 4819 }
1c79356b 4820
91447636
A
4821 /* OBSOLETE */
4822 case TASK_SCHED_RR_INFO:
4823 {
0a7de745 4824 policy_rr_base_t rr_base;
6d2010ae
A
4825 uint32_t quantum_time;
4826 uint64_t quantum_ns;
1c79356b 4827
b0d623f7
A
4828 if (*task_info_count < POLICY_RR_BASE_COUNT) {
4829 error = KERN_INVALID_ARGUMENT;
4830 break;
4831 }
1c79356b
A
4832
4833 rr_base = (policy_rr_base_t) task_info_out;
4834
0b4e3aa0 4835 if (task != kernel_task) {
b0d623f7
A
4836 error = KERN_INVALID_POLICY;
4837 break;
1c79356b
A
4838 }
4839
4840 rr_base->base_priority = task->priority;
1c79356b 4841
6d2010ae
A
4842 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
4843 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
0a7de745 4844
6d2010ae 4845 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1c79356b
A
4846
4847 *task_info_count = POLICY_RR_BASE_COUNT;
4848 break;
91447636 4849 }
1c79356b 4850
91447636
A
4851 /* OBSOLETE */
4852 case TASK_SCHED_TIMESHARE_INFO:
4853 {
0a7de745 4854 policy_timeshare_base_t ts_base;
1c79356b 4855
b0d623f7
A
4856 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
4857 error = KERN_INVALID_ARGUMENT;
4858 break;
4859 }
1c79356b
A
4860
4861 ts_base = (policy_timeshare_base_t) task_info_out;
4862
0b4e3aa0 4863 if (task == kernel_task) {
b0d623f7
A
4864 error = KERN_INVALID_POLICY;
4865 break;
1c79356b
A
4866 }
4867
4868 ts_base->base_priority = task->priority;
1c79356b
A
4869
4870 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
4871 break;
91447636 4872 }
1c79356b 4873
91447636
A
4874 case TASK_SECURITY_TOKEN:
4875 {
0a7de745 4876 security_token_t *sec_token_p;
1c79356b 4877
b0d623f7 4878 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
0a7de745
A
4879 error = KERN_INVALID_ARGUMENT;
4880 break;
b0d623f7 4881 }
1c79356b
A
4882
4883 sec_token_p = (security_token_t *) task_info_out;
4884
1c79356b 4885 *sec_token_p = task->sec_token;
1c79356b
A
4886
4887 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
91447636
A
4888 break;
4889 }
0a7de745 4890
91447636
A
4891 case TASK_AUDIT_TOKEN:
4892 {
0a7de745 4893 audit_token_t *audit_token_p;
55e303ae 4894
b0d623f7 4895 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
0a7de745
A
4896 error = KERN_INVALID_ARGUMENT;
4897 break;
b0d623f7 4898 }
55e303ae
A
4899
4900 audit_token_p = (audit_token_t *) task_info_out;
4901
55e303ae 4902 *audit_token_p = task->audit_token;
55e303ae
A
4903
4904 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
91447636
A
4905 break;
4906 }
0a7de745 4907
91447636 4908 case TASK_SCHED_INFO:
b0d623f7 4909 error = KERN_INVALID_ARGUMENT;
6d2010ae 4910 break;
1c79356b 4911
91447636
A
4912 case TASK_EVENTS_INFO:
4913 {
0a7de745
A
4914 task_events_info_t events_info;
4915 thread_t thread;
1c79356b 4916
b0d623f7 4917 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
0a7de745
A
4918 error = KERN_INVALID_ARGUMENT;
4919 break;
b0d623f7 4920 }
1c79356b
A
4921
4922 events_info = (task_events_info_t) task_info_out;
4923
2d21ac55 4924
1c79356b
A
4925 events_info->faults = task->faults;
4926 events_info->pageins = task->pageins;
4927 events_info->cow_faults = task->cow_faults;
4928 events_info->messages_sent = task->messages_sent;
4929 events_info->messages_received = task->messages_received;
4930 events_info->syscalls_mach = task->syscalls_mach;
4931 events_info->syscalls_unix = task->syscalls_unix;
2d21ac55
A
4932
4933 events_info->csw = task->c_switch;
4934
4935 queue_iterate(&task->threads, thread, thread_t, task_threads) {
0a7de745 4936 events_info->csw += thread->c_switch;
6d2010ae
A
4937 events_info->syscalls_mach += thread->syscalls_mach;
4938 events_info->syscalls_unix += thread->syscalls_unix;
2d21ac55
A
4939 }
4940
1c79356b
A
4941
4942 *task_info_count = TASK_EVENTS_INFO_COUNT;
4943 break;
91447636 4944 }
2d21ac55
A
4945 case TASK_AFFINITY_TAG_INFO:
4946 {
b0d623f7 4947 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
0a7de745
A
4948 error = KERN_INVALID_ARGUMENT;
4949 break;
b0d623f7 4950 }
2d21ac55 4951
b0d623f7 4952 error = task_affinity_info(task, task_info_out, task_info_count);
6d2010ae 4953 break;
2d21ac55 4954 }
4b17d6b6
A
4955 case TASK_POWER_INFO:
4956 {
4b17d6b6
A
4957 if (*task_info_count < TASK_POWER_INFO_COUNT) {
4958 error = KERN_INVALID_ARGUMENT;
4959 break;
4960 }
4961
cb323159 4962 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL, NULL);
fe8ab488
A
4963 break;
4964 }
4965
4966 case TASK_POWER_INFO_V2:
4967 {
5ba3f43e 4968 if (*task_info_count < TASK_POWER_INFO_V2_COUNT_OLD) {
fe8ab488
A
4969 error = KERN_INVALID_ARGUMENT;
4970 break;
4971 }
4972 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
cb323159 4973 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, tpiv2, NULL);
39236c6e
A
4974 break;
4975 }
4b17d6b6 4976
39236c6e
A
4977 case TASK_VM_INFO:
4978 case TASK_VM_INFO_PURGEABLE:
4979 {
0a7de745
A
4980 task_vm_info_t vm_info;
4981 vm_map_t map;
4b17d6b6 4982
cb323159
A
4983#if __arm64__
4984 struct proc *p;
4985 uint32_t platform, sdk;
4986 p = current_proc();
4987 platform = proc_platform(p);
4988 sdk = proc_sdk(p);
4989 if (original_task_info_count > TASK_VM_INFO_REV2_COUNT &&
4990 platform == PLATFORM_IOS &&
4991 sdk != 0 &&
4992 (sdk >> 16) <= 12) {
4993 /*
4994 * Some iOS apps pass an incorrect value for
4995 * task_info_count, expressed in number of bytes
4996 * instead of number of "natural_t" elements.
4997 * For the sake of backwards binary compatibility
4998 * for apps built with an iOS12 or older SDK and using
4999 * the "rev2" data structure, let's fix task_info_count
5000 * for them, to avoid stomping past the actual end
5001 * of their buffer.
5002 */
5003#if DEVELOPMENT || DEBUG
5004 printf("%s:%d %d[%s] rdar://49484582 task_info_count %d -> %d platform %d sdk %d.%d.%d\n", __FUNCTION__, __LINE__, proc_pid(p), proc_name_address(p), original_task_info_count, TASK_VM_INFO_REV2_COUNT, platform, (sdk >> 16), ((sdk >> 8) & 0xff), (sdk & 0xff));
5005#endif /* DEVELOPMENT || DEBUG */
5006 DTRACE_VM4(workaround_task_vm_info_count,
5007 mach_msg_type_number_t, original_task_info_count,
5008 mach_msg_type_number_t, TASK_VM_INFO_REV2_COUNT,
5009 uint32_t, platform,
5010 uint32_t, sdk);
5011 original_task_info_count = TASK_VM_INFO_REV2_COUNT;
5012 *task_info_count = original_task_info_count;
5013 }
5014#endif /* __arm64__ */
5015
3e170ce0 5016 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
0a7de745
A
5017 error = KERN_INVALID_ARGUMENT;
5018 break;
39236c6e 5019 }
4b17d6b6 5020
39236c6e 5021 vm_info = (task_vm_info_t)task_info_out;
4b17d6b6 5022
39236c6e
A
5023 if (task == kernel_task) {
5024 map = kernel_map;
5025 /* no lock */
5026 } else {
5027 map = task->map;
5028 vm_map_lock_read(map);
5029 }
4b17d6b6 5030
39236c6e
A
5031 vm_info->virtual_size = (typeof(vm_info->virtual_size))map->size;
5032 vm_info->region_count = map->hdr.nentries;
5033 vm_info->page_size = vm_map_page_size(map);
5034
5035 vm_info->resident_size = pmap_resident_count(map->pmap);
5036 vm_info->resident_size *= PAGE_SIZE;
5037 vm_info->resident_size_peak = pmap_resident_max(map->pmap);
5038 vm_info->resident_size_peak *= PAGE_SIZE;
5039
5040#define _VM_INFO(_name) \
5041 vm_info->_name = ((mach_vm_size_t) map->pmap->stats._name) * PAGE_SIZE
5042
5043 _VM_INFO(device);
5044 _VM_INFO(device_peak);
5045 _VM_INFO(external);
5046 _VM_INFO(external_peak);
5047 _VM_INFO(internal);
5048 _VM_INFO(internal_peak);
5049 _VM_INFO(reusable);
5050 _VM_INFO(reusable_peak);
5051 _VM_INFO(compressed);
5052 _VM_INFO(compressed_peak);
5053 _VM_INFO(compressed_lifetime);
5054
5055 vm_info->purgeable_volatile_pmap = 0;
5056 vm_info->purgeable_volatile_resident = 0;
5057 vm_info->purgeable_volatile_virtual = 0;
5058 if (task == kernel_task) {
5059 /*
5060 * We do not maintain the detailed stats for the
5061 * kernel_pmap, so just count everything as
5062 * "internal"...
5063 */
5064 vm_info->internal = vm_info->resident_size;
5065 /*
5066 * ... but since the memory held by the VM compressor
5067 * in the kernel address space ought to be attributed
5068 * to user-space tasks, we subtract it from "internal"
5069 * to give memory reporting tools a more accurate idea
5070 * of what the kernel itself is actually using, instead
5071 * of making it look like the kernel is leaking memory
5072 * when the system is under memory pressure.
5073 */
5074 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
0a7de745 5075 PAGE_SIZE);
39236c6e 5076 } else {
0a7de745
A
5077 mach_vm_size_t volatile_virtual_size;
5078 mach_vm_size_t volatile_resident_size;
5079 mach_vm_size_t volatile_compressed_size;
5080 mach_vm_size_t volatile_pmap_size;
5081 mach_vm_size_t volatile_compressed_pmap_size;
5082 kern_return_t kr;
39236c6e
A
5083
5084 if (flavor == TASK_VM_INFO_PURGEABLE) {
5085 kr = vm_map_query_volatile(
5086 map,
5087 &volatile_virtual_size,
5088 &volatile_resident_size,
3e170ce0
A
5089 &volatile_compressed_size,
5090 &volatile_pmap_size,
5091 &volatile_compressed_pmap_size);
39236c6e
A
5092 if (kr == KERN_SUCCESS) {
5093 vm_info->purgeable_volatile_pmap =
0a7de745 5094 volatile_pmap_size;
3e170ce0 5095 if (radar_20146450) {
0a7de745
A
5096 vm_info->compressed -=
5097 volatile_compressed_pmap_size;
3e170ce0 5098 }
39236c6e 5099 vm_info->purgeable_volatile_resident =
0a7de745 5100 volatile_resident_size;
39236c6e 5101 vm_info->purgeable_volatile_virtual =
0a7de745 5102 volatile_virtual_size;
39236c6e 5103 }
4b17d6b6 5104 }
4b17d6b6 5105 }
39037602 5106 *task_info_count = TASK_VM_INFO_REV0_COUNT;
39236c6e 5107
39037602
A
5108 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
5109 vm_info->phys_footprint =
0a7de745 5110 (mach_vm_size_t) get_task_phys_footprint(task);
39037602
A
5111 *task_info_count = TASK_VM_INFO_REV1_COUNT;
5112 }
5113 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
5114 vm_info->min_address = map->min_offset;
5115 vm_info->max_address = map->max_offset;
5116 *task_info_count = TASK_VM_INFO_REV2_COUNT;
5117 }
cb323159
A
5118 if (original_task_info_count >= TASK_VM_INFO_REV3_COUNT) {
5119 ledger_get_lifetime_max(task->ledger,
5120 task_ledgers.phys_footprint,
5121 &vm_info->ledger_phys_footprint_peak);
5122 ledger_get_balance(task->ledger,
5123 task_ledgers.purgeable_nonvolatile,
5124 &vm_info->ledger_purgeable_nonvolatile);
5125 ledger_get_balance(task->ledger,
5126 task_ledgers.purgeable_nonvolatile_compressed,
5127 &vm_info->ledger_purgeable_novolatile_compressed);
5128 ledger_get_balance(task->ledger,
5129 task_ledgers.purgeable_volatile,
5130 &vm_info->ledger_purgeable_volatile);
5131 ledger_get_balance(task->ledger,
5132 task_ledgers.purgeable_volatile_compressed,
5133 &vm_info->ledger_purgeable_volatile_compressed);
5134 ledger_get_balance(task->ledger,
5135 task_ledgers.network_nonvolatile,
5136 &vm_info->ledger_tag_network_nonvolatile);
5137 ledger_get_balance(task->ledger,
5138 task_ledgers.network_nonvolatile_compressed,
5139 &vm_info->ledger_tag_network_nonvolatile_compressed);
5140 ledger_get_balance(task->ledger,
5141 task_ledgers.network_volatile,
5142 &vm_info->ledger_tag_network_volatile);
5143 ledger_get_balance(task->ledger,
5144 task_ledgers.network_volatile_compressed,
5145 &vm_info->ledger_tag_network_volatile_compressed);
5146 ledger_get_balance(task->ledger,
5147 task_ledgers.media_footprint,
5148 &vm_info->ledger_tag_media_footprint);
5149 ledger_get_balance(task->ledger,
5150 task_ledgers.media_footprint_compressed,
5151 &vm_info->ledger_tag_media_footprint_compressed);
5152 ledger_get_balance(task->ledger,
5153 task_ledgers.media_nofootprint,
5154 &vm_info->ledger_tag_media_nofootprint);
5155 ledger_get_balance(task->ledger,
5156 task_ledgers.media_nofootprint_compressed,
5157 &vm_info->ledger_tag_media_nofootprint_compressed);
5158 ledger_get_balance(task->ledger,
5159 task_ledgers.graphics_footprint,
5160 &vm_info->ledger_tag_graphics_footprint);
5161 ledger_get_balance(task->ledger,
5162 task_ledgers.graphics_footprint_compressed,
5163 &vm_info->ledger_tag_graphics_footprint_compressed);
5164 ledger_get_balance(task->ledger,
5165 task_ledgers.graphics_nofootprint,
5166 &vm_info->ledger_tag_graphics_nofootprint);
5167 ledger_get_balance(task->ledger,
5168 task_ledgers.graphics_nofootprint_compressed,
5169 &vm_info->ledger_tag_graphics_nofootprint_compressed);
5170 ledger_get_balance(task->ledger,
5171 task_ledgers.neural_footprint,
5172 &vm_info->ledger_tag_neural_footprint);
5173 ledger_get_balance(task->ledger,
5174 task_ledgers.neural_footprint_compressed,
5175 &vm_info->ledger_tag_neural_footprint_compressed);
5176 ledger_get_balance(task->ledger,
5177 task_ledgers.neural_nofootprint,
5178 &vm_info->ledger_tag_neural_nofootprint);
5179 ledger_get_balance(task->ledger,
5180 task_ledgers.neural_nofootprint_compressed,
5181 &vm_info->ledger_tag_neural_nofootprint_compressed);
5182 *task_info_count = TASK_VM_INFO_REV3_COUNT;
5183 }
5184 if (original_task_info_count >= TASK_VM_INFO_REV4_COUNT) {
5185 if (task->bsd_info) {
5186 vm_info->limit_bytes_remaining =
5187 memorystatus_available_memory_internal(task->bsd_info);
5188 } else {
5189 vm_info->limit_bytes_remaining = 0;
5190 }
5191 *task_info_count = TASK_VM_INFO_REV4_COUNT;
5192 }
5193 if (original_task_info_count >= TASK_VM_INFO_REV5_COUNT) {
5194 thread_t thread;
5195 integer_t total = task->decompressions;
5196 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5197 total += thread->decompressions;
5198 }
5199 vm_info->decompressions = total;
5200 *task_info_count = TASK_VM_INFO_REV5_COUNT;
5201 }
39037602
A
5202
5203 if (task != kernel_task) {
5204 vm_map_unlock_read(map);
3e170ce0
A
5205 }
5206
4b17d6b6
A
5207 break;
5208 }
5209
fe8ab488
A
5210 case TASK_WAIT_STATE_INFO:
5211 {
0a7de745
A
5212 /*
5213 * Deprecated flavor. Currently allowing some results until all users
fe8ab488 5214 * stop calling it. The results may not be accurate.
0a7de745
A
5215 */
5216 task_wait_state_info_t wait_state_info;
fe8ab488
A
5217 uint64_t total_sfi_ledger_val = 0;
5218
5219 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
0a7de745
A
5220 error = KERN_INVALID_ARGUMENT;
5221 break;
fe8ab488
A
5222 }
5223
5224 wait_state_info = (task_wait_state_info_t) task_info_out;
5225
5226 wait_state_info->total_wait_state_time = 0;
5227 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
5228
3e170ce0 5229#if CONFIG_SCHED_SFI
fe8ab488
A
5230 int i, prev_lentry = -1;
5231 int64_t val_credit, val_debit;
5232
0a7de745
A
5233 for (i = 0; i < MAX_SFI_CLASS_ID; i++) {
5234 val_credit = 0;
fe8ab488 5235 /*
0a7de745 5236 * checking with prev_lentry != entry ensures adjacent classes
fe8ab488
A
5237 * which share the same ledger do not add wait times twice.
5238 * Note: Use ledger() call to get data for each individual sfi class.
5239 */
5240 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
0a7de745
A
5241 KERN_SUCCESS == ledger_get_entries(task->ledger,
5242 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
fe8ab488
A
5243 total_sfi_ledger_val += val_credit;
5244 }
5245 prev_lentry = task_ledgers.sfi_wait_times[i];
5246 }
5247
3e170ce0 5248#endif /* CONFIG_SCHED_SFI */
0a7de745 5249 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
fe8ab488
A
5250 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
5251
5252 break;
5253 }
3e170ce0
A
5254 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
5255 {
5256#if DEVELOPMENT || DEBUG
0a7de745 5257 pvm_account_info_t acnt_info;
3e170ce0
A
5258
5259 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
5260 error = KERN_INVALID_ARGUMENT;
5261 break;
5262 }
fe8ab488 5263
3e170ce0
A
5264 if (task_info_out == NULL) {
5265 error = KERN_INVALID_ARGUMENT;
5266 break;
5267 }
5268
5269 acnt_info = (pvm_account_info_t) task_info_out;
5270
5271 error = vm_purgeable_account(task, acnt_info);
5272
5273 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
5274
5275 break;
5276#else /* DEVELOPMENT || DEBUG */
5277 error = KERN_NOT_SUPPORTED;
5278 break;
5279#endif /* DEVELOPMENT || DEBUG */
5280 }
5281 case TASK_FLAGS_INFO:
5282 {
0a7de745 5283 task_flags_info_t flags_info;
3e170ce0
A
5284
5285 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
0a7de745
A
5286 error = KERN_INVALID_ARGUMENT;
5287 break;
3e170ce0
A
5288 }
5289
5290 flags_info = (task_flags_info_t)task_info_out;
5291
5292 /* only publish the 64-bit flag of the task */
d9a64523 5293 flags_info->flags = task->t_flags & (TF_64B_ADDR | TF_64B_DATA);
3e170ce0
A
5294
5295 *task_info_count = TASK_FLAGS_INFO_COUNT;
5296 break;
5297 }
5298
5299 case TASK_DEBUG_INFO_INTERNAL:
5300 {
5301#if DEVELOPMENT || DEBUG
5302 task_debug_info_internal_t dbg_info;
cb323159 5303 ipc_space_t space = task->itk_space;
3e170ce0
A
5304 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
5305 error = KERN_NOT_SUPPORTED;
5306 break;
5307 }
5308
5309 if (task_info_out == NULL) {
5310 error = KERN_INVALID_ARGUMENT;
5311 break;
5312 }
5313 dbg_info = (task_debug_info_internal_t) task_info_out;
5314 dbg_info->ipc_space_size = 0;
cb323159
A
5315
5316 if (space) {
5317 is_read_lock(space);
5318 dbg_info->ipc_space_size = space->is_table_size;
5319 is_read_unlock(space);
3e170ce0 5320 }
0a7de745 5321
a39ff7e2 5322 dbg_info->suspend_count = task->suspend_count;
3e170ce0
A
5323
5324 error = KERN_SUCCESS;
5325 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
5326 break;
5327#else /* DEVELOPMENT || DEBUG */
5328 error = KERN_NOT_SUPPORTED;
5329 break;
5330#endif /* DEVELOPMENT || DEBUG */
5331 }
91447636 5332 default:
b0d623f7 5333 error = KERN_INVALID_ARGUMENT;
1c79356b
A
5334 }
5335
b0d623f7 5336 task_unlock(task);
0a7de745 5337 return error;
1c79356b
A
5338}
5339
5ba3f43e
A
5340/*
5341 * task_info_from_user
5342 *
5343 * When calling task_info from user space,
5344 * this function will be executed as mig server side
5345 * instead of calling directly into task_info.
5346 * This gives the possibility to perform more security
5347 * checks on task_port.
5348 *
5349 * In the case of TASK_DYLD_INFO, we require the more
5350 * privileged task_port not the less-privileged task_name_port.
5351 *
5352 */
5353kern_return_t
5354task_info_from_user(
0a7de745
A
5355 mach_port_t task_port,
5356 task_flavor_t flavor,
5357 task_info_t task_info_out,
5358 mach_msg_type_number_t *task_info_count)
5ba3f43e
A
5359{
5360 task_t task;
5361 kern_return_t ret;
5362
0a7de745 5363 if (flavor == TASK_DYLD_INFO) {
5ba3f43e 5364 task = convert_port_to_task(task_port);
0a7de745 5365 } else {
5ba3f43e 5366 task = convert_port_to_task_name(task_port);
0a7de745 5367 }
5ba3f43e
A
5368
5369 ret = task_info(task, flavor, task_info_out, task_info_count);
5370
5371 task_deallocate(task);
5372
5373 return ret;
5374}
5375
0a7de745 5376/*
39236c6e
A
5377 * task_power_info
5378 *
5379 * Returns power stats for the task.
5380 * Note: Called with task locked.
5381 */
5382void
5383task_power_info_locked(
0a7de745
A
5384 task_t task,
5385 task_power_info_t info,
5386 gpu_energy_data_t ginfo,
cb323159
A
5387 task_power_info_v2_t infov2,
5388 uint64_t *runnable_time)
39236c6e 5389{
0a7de745
A
5390 thread_t thread;
5391 ledger_amount_t tmp;
39236c6e 5392
cb323159
A
5393 uint64_t runnable_time_sum = 0;
5394
39236c6e
A
5395 task_lock_assert_owned(task);
5396
5397 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
0a7de745 5398 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
39236c6e 5399 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
0a7de745 5400 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
39236c6e
A
5401
5402 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
5403 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
5404
5405 info->total_user = task->total_user_time;
5406 info->total_system = task->total_system_time;
cb323159 5407 runnable_time_sum = task->total_runnable_time;
39236c6e 5408
5ba3f43e
A
5409#if CONFIG_EMBEDDED
5410 if (infov2) {
5411 infov2->task_energy = task->task_energy;
39037602 5412 }
5ba3f43e 5413#endif
39037602 5414
fe8ab488
A
5415 if (ginfo) {
5416 ginfo->task_gpu_utilisation = task->task_gpu_ns;
5417 }
5418
5ba3f43e
A
5419 if (infov2) {
5420 infov2->task_ptime = task->total_ptime;
5421 infov2->task_pset_switches = task->ps_switch;
5422 }
5423
39236c6e 5424 queue_iterate(&task->threads, thread, thread_t, task_threads) {
0a7de745
A
5425 uint64_t tval;
5426 spl_t x;
39236c6e 5427
0a7de745 5428 if (thread->options & TH_OPT_IDLE_THREAD) {
39236c6e 5429 continue;
0a7de745 5430 }
39236c6e
A
5431
5432 x = splsched();
5433 thread_lock(thread);
5434
5435 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
5436 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
5437
5ba3f43e
A
5438#if CONFIG_EMBEDDED
5439 if (infov2) {
5440 infov2->task_energy += ml_energy_stat(thread);
39037602 5441 }
5ba3f43e 5442#endif
39037602 5443
39236c6e
A
5444 tval = timer_grab(&thread->user_timer);
5445 info->total_user += tval;
5446
5ba3f43e
A
5447 if (infov2) {
5448 tval = timer_grab(&thread->ptime);
5449 infov2->task_ptime += tval;
5450 infov2->task_pset_switches += thread->ps_switch;
5451 }
5452
39236c6e
A
5453 tval = timer_grab(&thread->system_timer);
5454 if (thread->precise_user_kernel_time) {
5455 info->total_system += tval;
5456 } else {
5457 /* system_timer may represent either sys or user */
5458 info->total_user += tval;
5459 }
5460
cb323159
A
5461 tval = timer_grab(&thread->runnable_timer);
5462
5463 runnable_time_sum += tval;
5464
fe8ab488
A
5465 if (ginfo) {
5466 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
5467 }
5468 thread_unlock(thread);
5469 splx(x);
5470 }
cb323159
A
5471
5472 if (runnable_time) {
5473 *runnable_time = runnable_time_sum;
5474 }
fe8ab488
A
5475}
5476
0a7de745 5477/*
fe8ab488
A
5478 * task_gpu_utilisation
5479 *
5480 * Returns the total gpu time used by the all the threads of the task
5481 * (both dead and alive)
5482 */
5483uint64_t
5484task_gpu_utilisation(
0a7de745 5485 task_t task)
fe8ab488
A
5486{
5487 uint64_t gpu_time = 0;
5ba3f43e 5488#if !CONFIG_EMBEDDED
fe8ab488
A
5489 thread_t thread;
5490
5491 task_lock(task);
5492 gpu_time += task->task_gpu_ns;
5493
5494 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5495 spl_t x;
5496 x = splsched();
5497 thread_lock(thread);
5498 gpu_time += ml_gpu_stat(thread);
39236c6e
A
5499 thread_unlock(thread);
5500 splx(x);
5501 }
fe8ab488
A
5502
5503 task_unlock(task);
5ba3f43e
A
5504#else /* CONFIG_EMBEDDED */
5505 /* silence compiler warning */
5506 (void)task;
5507#endif /* !CONFIG_EMBEDDED */
fe8ab488 5508 return gpu_time;
39236c6e
A
5509}
5510
0a7de745 5511/*
39037602
A
5512 * task_energy
5513 *
5514 * Returns the total energy used by the all the threads of the task
5515 * (both dead and alive)
5516 */
5517uint64_t
5518task_energy(
0a7de745 5519 task_t task)
39037602
A
5520{
5521 uint64_t energy = 0;
5522 thread_t thread;
5523
5524 task_lock(task);
5525 energy += task->task_energy;
5526
5527 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5528 spl_t x;
5529 x = splsched();
5530 thread_lock(thread);
5531 energy += ml_energy_stat(thread);
5532 thread_unlock(thread);
5533 splx(x);
5534 }
5535
5536 task_unlock(task);
5537 return energy;
5538}
5539
c6bf4f31
A
5540#if __AMP__
5541
5542uint64_t
5543task_cpu_ptime(
5544 task_t task)
5545{
5546 uint64_t cpu_ptime = 0;
5547 thread_t thread;
5548
5549 task_lock(task);
5550 cpu_ptime += task->total_ptime;
5551
5552 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5553 cpu_ptime += timer_grab(&thread->ptime);
5554 }
5555
5556 task_unlock(task);
5557 return cpu_ptime;
5558}
5559
5560#else /* __AMP__ */
5ba3f43e
A
5561
5562uint64_t
5563task_cpu_ptime(
5564 __unused task_t task)
5565{
0a7de745 5566 return 0;
5ba3f43e
A
5567}
5568
c6bf4f31 5569#endif /* __AMP__ */
5ba3f43e 5570
a39ff7e2
A
5571/* This function updates the cpu time in the arrays for each
5572 * effective and requested QoS class
5573 */
5574void
5575task_update_cpu_time_qos_stats(
0a7de745 5576 task_t task,
a39ff7e2
A
5577 uint64_t *eqos_stats,
5578 uint64_t *rqos_stats)
5579{
5580 if (!eqos_stats && !rqos_stats) {
5581 return;
5582 }
5583
5584 task_lock(task);
5585 thread_t thread;
5586 queue_iterate(&task->threads, thread, thread_t, task_threads) {
5587 if (thread->options & TH_OPT_IDLE_THREAD) {
5588 continue;
5589 }
5590
5591 thread_update_qos_cpu_time(thread);
5592 }
5593
5594 if (eqos_stats) {
5595 eqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_eqos_stats.cpu_time_qos_default;
5596 eqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_eqos_stats.cpu_time_qos_maintenance;
5597 eqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_eqos_stats.cpu_time_qos_background;
5598 eqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_eqos_stats.cpu_time_qos_utility;
5599 eqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_eqos_stats.cpu_time_qos_legacy;
5600 eqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_eqos_stats.cpu_time_qos_user_initiated;
5601 eqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_eqos_stats.cpu_time_qos_user_interactive;
5602 }
5603
5604 if (rqos_stats) {
5605 rqos_stats[THREAD_QOS_DEFAULT] += task->cpu_time_rqos_stats.cpu_time_qos_default;
5606 rqos_stats[THREAD_QOS_MAINTENANCE] += task->cpu_time_rqos_stats.cpu_time_qos_maintenance;
5607 rqos_stats[THREAD_QOS_BACKGROUND] += task->cpu_time_rqos_stats.cpu_time_qos_background;
5608 rqos_stats[THREAD_QOS_UTILITY] += task->cpu_time_rqos_stats.cpu_time_qos_utility;
5609 rqos_stats[THREAD_QOS_LEGACY] += task->cpu_time_rqos_stats.cpu_time_qos_legacy;
5610 rqos_stats[THREAD_QOS_USER_INITIATED] += task->cpu_time_rqos_stats.cpu_time_qos_user_initiated;
5611 rqos_stats[THREAD_QOS_USER_INTERACTIVE] += task->cpu_time_rqos_stats.cpu_time_qos_user_interactive;
5612 }
5613
5614 task_unlock(task);
5615}
5616
39236c6e
A
5617kern_return_t
5618task_purgable_info(
0a7de745
A
5619 task_t task,
5620 task_purgable_info_t *stats)
39236c6e 5621{
0a7de745 5622 if (task == TASK_NULL || stats == NULL) {
39236c6e 5623 return KERN_INVALID_ARGUMENT;
0a7de745 5624 }
39236c6e
A
5625 /* Take task reference */
5626 task_reference(task);
5627 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
5628 /* Drop task reference */
5629 task_deallocate(task);
5630 return KERN_SUCCESS;
5631}
5632
2d21ac55
A
5633void
5634task_vtimer_set(
0a7de745
A
5635 task_t task,
5636 integer_t which)
2d21ac55 5637{
0a7de745
A
5638 thread_t thread;
5639 spl_t x;
2d21ac55 5640
2d21ac55
A
5641 task_lock(task);
5642
5643 task->vtimers |= which;
5644
5645 switch (which) {
2d21ac55
A
5646 case TASK_VTIMER_USER:
5647 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
5648 x = splsched();
5649 thread_lock(thread);
0a7de745 5650 if (thread->precise_user_kernel_time) {
316670eb 5651 thread->vtimer_user_save = timer_grab(&thread->user_timer);
0a7de745 5652 } else {
316670eb 5653 thread->vtimer_user_save = timer_grab(&thread->system_timer);
0a7de745 5654 }
316670eb
A
5655 thread_unlock(thread);
5656 splx(x);
2d21ac55
A
5657 }
5658 break;
5659
5660 case TASK_VTIMER_PROF:
5661 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
5662 x = splsched();
5663 thread_lock(thread);
2d21ac55
A
5664 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
5665 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
316670eb
A
5666 thread_unlock(thread);
5667 splx(x);
2d21ac55
A
5668 }
5669 break;
5670
5671 case TASK_VTIMER_RLIM:
5672 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
5673 x = splsched();
5674 thread_lock(thread);
2d21ac55
A
5675 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
5676 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
316670eb
A
5677 thread_unlock(thread);
5678 splx(x);
2d21ac55
A
5679 }
5680 break;
5681 }
5682
5683 task_unlock(task);
5684}
5685
5686void
5687task_vtimer_clear(
0a7de745
A
5688 task_t task,
5689 integer_t which)
2d21ac55
A
5690{
5691 assert(task == current_task());
5692
5693 task_lock(task);
5694
5695 task->vtimers &= ~which;
5696
5697 task_unlock(task);
5698}
5699
5700void
5701task_vtimer_update(
0a7de745
A
5702 __unused
5703 task_t task,
5704 integer_t which,
5705 uint32_t *microsecs)
2d21ac55 5706{
0a7de745
A
5707 thread_t thread = current_thread();
5708 uint32_t tdelt = 0;
5709 clock_sec_t secs = 0;
5710 uint64_t tsum;
2d21ac55
A
5711
5712 assert(task == current_task());
5713
39037602
A
5714 spl_t s = splsched();
5715 thread_lock(thread);
2d21ac55 5716
39037602
A
5717 if ((task->vtimers & which) != (uint32_t)which) {
5718 thread_unlock(thread);
5719 splx(s);
5720 return;
5721 }
2d21ac55
A
5722
5723 switch (which) {
2d21ac55 5724 case TASK_VTIMER_USER:
316670eb
A
5725 if (thread->precise_user_kernel_time) {
5726 tdelt = (uint32_t)timer_delta(&thread->user_timer,
0a7de745 5727 &thread->vtimer_user_save);
316670eb
A
5728 } else {
5729 tdelt = (uint32_t)timer_delta(&thread->system_timer,
0a7de745 5730 &thread->vtimer_user_save);
316670eb 5731 }
b0d623f7 5732 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
5733 break;
5734
5735 case TASK_VTIMER_PROF:
5736 tsum = timer_grab(&thread->user_timer);
5737 tsum += timer_grab(&thread->system_timer);
b0d623f7
A
5738 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
5739 absolutetime_to_microtime(tdelt, &secs, microsecs);
5740 /* if the time delta is smaller than a usec, ignore */
0a7de745 5741 if (*microsecs != 0) {
b0d623f7 5742 thread->vtimer_prof_save = tsum;
0a7de745 5743 }
2d21ac55
A
5744 break;
5745
5746 case TASK_VTIMER_RLIM:
5747 tsum = timer_grab(&thread->user_timer);
5748 tsum += timer_grab(&thread->system_timer);
b0d623f7 5749 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2d21ac55 5750 thread->vtimer_rlim_save = tsum;
b0d623f7 5751 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
5752 break;
5753 }
5754
39037602
A
5755 thread_unlock(thread);
5756 splx(s);
2d21ac55
A
5757}
5758
1c79356b
A
5759/*
5760 * task_assign:
5761 *
5762 * Change the assigned processor set for the task
5763 */
5764kern_return_t
5765task_assign(
0a7de745
A
5766 __unused task_t task,
5767 __unused processor_set_t new_pset,
5768 __unused boolean_t assign_threads)
1c79356b 5769{
0a7de745 5770 return KERN_FAILURE;
1c79356b
A
5771}
5772
5773/*
5774 * task_assign_default:
5775 *
5776 * Version of task_assign to assign to default processor set.
5777 */
5778kern_return_t
5779task_assign_default(
0a7de745
A
5780 task_t task,
5781 boolean_t assign_threads)
1c79356b 5782{
0a7de745 5783 return task_assign(task, &pset0, assign_threads);
1c79356b
A
5784}
5785
5786/*
5787 * task_get_assignment
5788 *
5789 * Return name of processor set that task is assigned to.
5790 */
5791kern_return_t
5792task_get_assignment(
0a7de745
A
5793 task_t task,
5794 processor_set_t *pset)
1c79356b 5795{
0a7de745 5796 if (!task || !task->active) {
7e41aa88 5797 return KERN_FAILURE;
0a7de745 5798 }
1c79356b 5799
2d21ac55
A
5800 *pset = &pset0;
5801
7e41aa88 5802 return KERN_SUCCESS;
1c79356b
A
5803}
5804
3e170ce0
A
5805uint64_t
5806get_task_dispatchqueue_offset(
0a7de745 5807 task_t task)
3e170ce0
A
5808{
5809 return task->dispatchqueue_offset;
5810}
1c79356b
A
5811
5812/*
0a7de745 5813 * task_policy
1c79356b
A
5814 *
5815 * Set scheduling policy and parameters, both base and limit, for
5816 * the given task. Policy must be a policy which is enabled for the
0a7de745 5817 * processor set. Change contained threads if requested.
1c79356b
A
5818 */
5819kern_return_t
5820task_policy(
0a7de745
A
5821 __unused task_t task,
5822 __unused policy_t policy_id,
5823 __unused policy_base_t base,
5824 __unused mach_msg_type_number_t count,
5825 __unused boolean_t set_limit,
5826 __unused boolean_t change)
1c79356b 5827{
0a7de745 5828 return KERN_FAILURE;
1c79356b
A
5829}
5830
5831/*
5832 * task_set_policy
5833 *
0a7de745 5834 * Set scheduling policy and parameters, both base and limit, for
1c79356b
A
5835 * the given task. Policy can be any policy implemented by the
5836 * processor set, whether enabled or not. Change contained threads
5837 * if requested.
5838 */
5839kern_return_t
5840task_set_policy(
0a7de745
A
5841 __unused task_t task,
5842 __unused processor_set_t pset,
5843 __unused policy_t policy_id,
5844 __unused policy_base_t base,
5845 __unused mach_msg_type_number_t base_count,
5846 __unused policy_limit_t limit,
5847 __unused mach_msg_type_number_t limit_count,
5848 __unused boolean_t change)
1c79356b 5849{
0a7de745 5850 return KERN_FAILURE;
1c79356b
A
5851}
5852
91447636
A
5853kern_return_t
5854task_set_ras_pc(
0a7de745
A
5855 __unused task_t task,
5856 __unused vm_offset_t pc,
5857 __unused vm_offset_t endpc)
91447636 5858{
1c79356b 5859 return KERN_FAILURE;
1c79356b
A
5860}
5861
5862void
5863task_synchronizer_destroy_all(task_t task)
5864{
1c79356b
A
5865 /*
5866 * Destroy owned semaphores
5867 */
4bd07ac2 5868 semaphore_destroy_all(task);
1c79356b
A
5869}
5870
b0d623f7 5871/*
0a7de745 5872 * Install default (machine-dependent) initial thread state
b0d623f7
A
5873 * on the task. Subsequent thread creation will have this initial
5874 * state set on the thread by machine_thread_inherit_taskwide().
5875 * Flavors and structures are exactly the same as those to thread_set_state()
5876 */
0a7de745 5877kern_return_t
b0d623f7 5878task_set_state(
0a7de745
A
5879 task_t task,
5880 int flavor,
5881 thread_state_t state,
b0d623f7
A
5882 mach_msg_type_number_t state_count)
5883{
5884 kern_return_t ret;
5885
5886 if (task == TASK_NULL) {
0a7de745 5887 return KERN_INVALID_ARGUMENT;
b0d623f7
A
5888 }
5889
5890 task_lock(task);
5891
5892 if (!task->active) {
5893 task_unlock(task);
0a7de745 5894 return KERN_FAILURE;
b0d623f7
A
5895 }
5896
5897 ret = machine_task_set_state(task, flavor, state, state_count);
5898
5899 task_unlock(task);
5900 return ret;
5901}
5902
5903/*
0a7de745 5904 * Examine the default (machine-dependent) initial thread state
b0d623f7
A
5905 * on the task, as set by task_set_state(). Flavors and structures
5906 * are exactly the same as those passed to thread_get_state().
5907 */
0a7de745 5908kern_return_t
b0d623f7 5909task_get_state(
0a7de745
A
5910 task_t task,
5911 int flavor,
b0d623f7
A
5912 thread_state_t state,
5913 mach_msg_type_number_t *state_count)
5914{
5915 kern_return_t ret;
5916
5917 if (task == TASK_NULL) {
0a7de745 5918 return KERN_INVALID_ARGUMENT;
b0d623f7
A
5919 }
5920
5921 task_lock(task);
5922
5923 if (!task->active) {
5924 task_unlock(task);
0a7de745 5925 return KERN_FAILURE;
b0d623f7
A
5926 }
5927
5928 ret = machine_task_get_state(task, flavor, state, state_count);
5929
5930 task_unlock(task);
5931 return ret;
5932}
5933
5ba3f43e 5934
0a7de745 5935static kern_return_t __attribute__((noinline, not_tail_called))
5ba3f43e
A
5936PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(
5937 mach_exception_code_t code,
5938 mach_exception_subcode_t subcode,
5939 void *reason)
5940{
5941#ifdef MACH_BSD
0a7de745
A
5942 if (1 == proc_selfpid()) {
5943 return KERN_NOT_SUPPORTED; // initproc is immune
5944 }
5ba3f43e
A
5945#endif
5946 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX] = {
5947 [0] = code,
5948 [1] = subcode,
5949 };
5950 task_t task = current_task();
5951 kern_return_t kr;
5952
5953 /* (See jetsam-related comments below) */
5954
5955 proc_memstat_terminated(task->bsd_info, TRUE);
5956 kr = task_enqueue_exception_with_corpse(task, EXC_GUARD, codes, 2, reason);
5957 proc_memstat_terminated(task->bsd_info, FALSE);
5958 return kr;
5959}
5960
5ba3f43e
A
5961kern_return_t
5962task_violated_guard(
5963 mach_exception_code_t code,
5964 mach_exception_subcode_t subcode,
5965 void *reason)
5966{
5967 return PROC_VIOLATED_GUARD__SEND_EXC_GUARD_AND_SUSPEND(code, subcode, reason);
5968}
5969
5970
39037602 5971#if CONFIG_MEMORYSTATUS
813fb2f6
A
5972
5973boolean_t
5974task_get_memlimit_is_active(task_t task)
5975{
0a7de745 5976 assert(task != NULL);
813fb2f6 5977
5ba3f43e 5978 if (task->memlimit_is_active == 1) {
0a7de745 5979 return TRUE;
5ba3f43e 5980 } else {
0a7de745 5981 return FALSE;
5ba3f43e 5982 }
813fb2f6
A
5983}
5984
5985void
5986task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
5987{
0a7de745 5988 assert(task != NULL);
813fb2f6 5989
5ba3f43e
A
5990 if (memlimit_is_active) {
5991 task->memlimit_is_active = 1;
5992 } else {
5993 task->memlimit_is_active = 0;
5994 }
813fb2f6
A
5995}
5996
5997boolean_t
5998task_get_memlimit_is_fatal(task_t task)
0a7de745 5999{
813fb2f6
A
6000 assert(task != NULL);
6001
5ba3f43e 6002 if (task->memlimit_is_fatal == 1) {
0a7de745
A
6003 return TRUE;
6004 } else {
6005 return FALSE;
6006 }
813fb2f6
A
6007}
6008
6009void
6010task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
6011{
0a7de745 6012 assert(task != NULL);
813fb2f6 6013
5ba3f43e
A
6014 if (memlimit_is_fatal) {
6015 task->memlimit_is_fatal = 1;
6016 } else {
6017 task->memlimit_is_fatal = 0;
6018 }
813fb2f6
A
6019}
6020
6021boolean_t
6022task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6023{
6024 boolean_t triggered = FALSE;
6025
6026 assert(task == current_task());
6027
0a7de745 6028 /*
813fb2f6
A
6029 * Returns true, if task has already triggered an exc_resource exception.
6030 */
6031
6032 if (memlimit_is_active) {
6033 triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
6034 } else {
6035 triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
6036 }
6037
0a7de745 6038 return triggered;
813fb2f6
A
6039}
6040
6041void
6042task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
6043{
6044 assert(task == current_task());
6045
6046 /*
6047 * We allow one exc_resource per process per active/inactive limit.
6048 * The limit's fatal attribute does not come into play.
6049 */
6050
6051 if (memlimit_is_active) {
6052 task->memlimit_active_exc_resource = 1;
6053 } else {
6054 task->memlimit_inactive_exc_resource = 1;
6055 }
6056}
6057
39236c6e
A
6058#define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
6059
6060void __attribute__((noinline))
39037602 6061PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
39236c6e 6062{
0a7de745
A
6063 task_t task = current_task();
6064 int pid = 0;
6065 const char *procname = "unknown";
6066 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
d9a64523 6067 boolean_t send_sync_exc_resource = FALSE;
39236c6e
A
6068
6069#ifdef MACH_BSD
6070 pid = proc_selfpid();
fe8ab488
A
6071
6072 if (pid == 1) {
6073 /*
6074 * Cannot have ReportCrash analyzing
6075 * a suspended initproc.
6076 */
6077 return;
6078 }
6079
d9a64523 6080 if (task->bsd_info != NULL) {
39236c6e 6081 procname = proc_name_address(current_task()->bsd_info);
d9a64523
A
6082 send_sync_exc_resource = proc_send_synchronous_EXC_RESOURCE(current_task()->bsd_info);
6083 }
39236c6e 6084#endif
39037602 6085#if CONFIG_COREDUMP
39236c6e 6086 if (hwm_user_cores) {
0a7de745
A
6087 int error;
6088 uint64_t starttime, end;
6089 clock_sec_t secs = 0;
6090 uint32_t microsecs = 0;
39236c6e
A
6091
6092 starttime = mach_absolute_time();
6093 /*
6094 * Trigger a coredump of this process. Don't proceed unless we know we won't
6095 * be filling up the disk; and ignore the core size resource limit for this
6096 * core file.
6097 */
3e170ce0 6098 if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
39236c6e
A
6099 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
6100 }
6101 /*
0a7de745
A
6102 * coredump() leaves the task suspended.
6103 */
39236c6e
A
6104 task_resume_internal(current_task());
6105
6106 end = mach_absolute_time();
6107 absolutetime_to_microtime(end - starttime, &secs, &microsecs);
6108 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
0a7de745 6109 proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
39236c6e 6110 }
39037602 6111#endif /* CONFIG_COREDUMP */
39236c6e
A
6112
6113 if (disable_exc_resource) {
6114 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
0a7de745 6115 "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
39236c6e
A
6116 return;
6117 }
6118
3e170ce0
A
6119 /*
6120 * A task that has triggered an EXC_RESOURCE, should not be
6121 * jetsammed when the device is under memory pressure. Here
6122 * we set the P_MEMSTAT_TERMINATED flag so that the process
6123 * will be skipped if the memorystatus_thread wakes up.
6124 */
6125 proc_memstat_terminated(current_task()->bsd_info, TRUE);
6126
39236c6e
A
6127 code[0] = code[1] = 0;
6128 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
6129 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
6130 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
3e170ce0 6131
d9a64523
A
6132 /*
6133 * Do not generate a corpse fork if the violation is a fatal one
6134 * or the process wants synchronous EXC_RESOURCE exceptions.
6135 */
6136 if (is_fatal || send_sync_exc_resource || exc_via_corpse_forking == 0) {
6137 /* Do not send a EXC_RESOURCE if corpse_for_fatal_memkill is set */
6138 if (send_sync_exc_resource || corpse_for_fatal_memkill == 0) {
39037602
A
6139 /*
6140 * Use the _internal_ variant so that no user-space
6141 * process can resume our task from under us.
6142 */
6143 task_suspend_internal(task);
6144 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
6145 task_resume_internal(task);
6146 }
6147 } else {
5c9f4661
A
6148 if (audio_active) {
6149 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
0a7de745 6150 "supressed due to audio playback.\n", procname, pid, max_footprint_mb);
5c9f4661
A
6151 } else {
6152 task_enqueue_exception_with_corpse(task, EXC_RESOURCE,
0a7de745 6153 code, EXCEPTION_CODE_MAX, NULL);
5c9f4661 6154 }
39037602 6155 }
3e170ce0
A
6156
6157 /*
6158 * After the EXC_RESOURCE has been handled, we must clear the
6159 * P_MEMSTAT_TERMINATED flag so that the process can again be
6160 * considered for jetsam if the memorystatus_thread wakes up.
6161 */
6162 proc_memstat_terminated(current_task()->bsd_info, FALSE); /* clear the flag */
39236c6e
A
6163}
6164
6165/*
6166 * Callback invoked when a task exceeds its physical footprint limit.
6167 */
6168void
6169task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
6170{
fe8ab488 6171 ledger_amount_t max_footprint, max_footprint_mb;
fe8ab488 6172 task_t task;
0a7de745 6173 boolean_t is_warning;
813fb2f6
A
6174 boolean_t memlimit_is_active;
6175 boolean_t memlimit_is_fatal;
39236c6e
A
6176
6177 if (warning == LEDGER_WARNING_DIPPED_BELOW) {
6178 /*
6179 * Task memory limits only provide a warning on the way up.
6180 */
6181 return;
0a7de745
A
6182 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
6183 /*
6184 * This task is in danger of violating a memory limit,
6185 * It has exceeded a percentage level of the limit.
6186 */
6187 is_warning = TRUE;
6188 } else {
6189 /*
6190 * The task has exceeded the physical footprint limit.
6191 * This is not a warning but a true limit violation.
6192 */
6193 is_warning = FALSE;
6194 }
39236c6e 6195
fe8ab488
A
6196 task = current_task();
6197
6198 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
6199 max_footprint_mb = max_footprint >> 20;
6200
813fb2f6
A
6201 memlimit_is_active = task_get_memlimit_is_active(task);
6202 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
39236c6e
A
6203
6204 /*
813fb2f6
A
6205 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
6206 * We only generate the exception once per process per memlimit (active/inactive limit).
6207 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
6208 * and we disable it by marking that memlimit as exception triggered.
39236c6e 6209 */
813fb2f6
A
6210 if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) {
6211 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal);
6212 memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal);
6213 task_mark_has_triggered_exc_resource(task, memlimit_is_active);
39236c6e
A
6214 }
6215
813fb2f6 6216 memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal);
39236c6e
A
6217}
6218
6219extern int proc_check_footprint_priv(void);
6220
6221kern_return_t
6222task_set_phys_footprint_limit(
6223 task_t task,
6224 int new_limit_mb,
6225 int *old_limit_mb)
6226{
6227 kern_return_t error;
6228
813fb2f6
A
6229 boolean_t memlimit_is_active;
6230 boolean_t memlimit_is_fatal;
6231
39236c6e 6232 if ((error = proc_check_footprint_priv())) {
0a7de745 6233 return KERN_NO_ACCESS;
39236c6e
A
6234 }
6235
813fb2f6
A
6236 /*
6237 * This call should probably be obsoleted.
6238 * But for now, we default to current state.
6239 */
6240 memlimit_is_active = task_get_memlimit_is_active(task);
6241 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
6242
6243 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
39236c6e
A
6244}
6245
3e170ce0
A
6246kern_return_t
6247task_convert_phys_footprint_limit(
6248 int limit_mb,
6249 int *converted_limit_mb)
6250{
6251 if (limit_mb == -1) {
6252 /*
6253 * No limit
6254 */
6255 if (max_task_footprint != 0) {
6256 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
6257 } else {
6258 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
6259 }
6260 } else {
6261 /* nothing to convert */
6262 *converted_limit_mb = limit_mb;
6263 }
0a7de745 6264 return KERN_SUCCESS;
3e170ce0
A
6265}
6266
6267
39236c6e
A
6268kern_return_t
6269task_set_phys_footprint_limit_internal(
6270 task_t task,
6271 int new_limit_mb,
6272 int *old_limit_mb,
813fb2f6
A
6273 boolean_t memlimit_is_active,
6274 boolean_t memlimit_is_fatal)
39236c6e 6275{
0a7de745 6276 ledger_amount_t old;
cb323159 6277 kern_return_t ret;
39236c6e 6278
cb323159
A
6279 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
6280
6281 if (ret != KERN_SUCCESS) {
6282 return ret;
6283 }
5ba3f43e 6284
0a7de745 6285 /*
5ba3f43e
A
6286 * Check that limit >> 20 will not give an "unexpected" 32-bit
6287 * result. There are, however, implicit assumptions that -1 mb limit
6288 * equates to LEDGER_LIMIT_INFINITY.
6289 */
6290 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
0a7de745 6291
39236c6e 6292 if (old_limit_mb) {
3e170ce0 6293 *old_limit_mb = (int)(old >> 20);
39236c6e
A
6294 }
6295
6296 if (new_limit_mb == -1) {
6297 /*
6298 * Caller wishes to remove the limit.
6299 */
6300 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
0a7de745
A
6301 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
6302 max_task_footprint ? max_task_footprint_warning_level : 0);
813fb2f6 6303
5ba3f43e 6304 task_lock(task);
813fb2f6
A
6305 task_set_memlimit_is_active(task, memlimit_is_active);
6306 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
5ba3f43e 6307 task_unlock(task);
813fb2f6 6308
0a7de745 6309 return KERN_SUCCESS;
39236c6e
A
6310 }
6311
6312#ifdef CONFIG_NOMONITORS
0a7de745 6313 return KERN_SUCCESS;
39236c6e
A
6314#endif /* CONFIG_NOMONITORS */
6315
6316 task_lock(task);
6317
5ba3f43e
A
6318 if ((memlimit_is_active == task_get_memlimit_is_active(task)) &&
6319 (memlimit_is_fatal == task_get_memlimit_is_fatal(task)) &&
6320 (((ledger_amount_t)new_limit_mb << 20) == old)) {
6321 /*
6322 * memlimit state is not changing
6323 */
6324 task_unlock(task);
0a7de745 6325 return KERN_SUCCESS;
5ba3f43e
A
6326 }
6327
813fb2f6
A
6328 task_set_memlimit_is_active(task, memlimit_is_active);
6329 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
39236c6e
A
6330
6331 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
0a7de745 6332 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
39236c6e 6333
0a7de745
A
6334 if (task == current_task()) {
6335 ledger_check_new_balance(current_thread(), task->ledger,
6336 task_ledgers.phys_footprint);
6337 }
3e170ce0 6338
39236c6e
A
6339 task_unlock(task);
6340
0a7de745 6341 return KERN_SUCCESS;
39236c6e
A
6342}
6343
6344kern_return_t
0a7de745 6345task_get_phys_footprint_limit(
39236c6e
A
6346 task_t task,
6347 int *limit_mb)
6348{
0a7de745 6349 ledger_amount_t limit;
cb323159
A
6350 kern_return_t ret;
6351
6352 ret = ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
6353 if (ret != KERN_SUCCESS) {
6354 return ret;
6355 }
0a7de745 6356
0a7de745 6357 /*
3e170ce0
A
6358 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
6359 * result. There are, however, implicit assumptions that -1 mb limit
6360 * equates to LEDGER_LIMIT_INFINITY.
6361 */
6362 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
6363 *limit_mb = (int)(limit >> 20);
0a7de745
A
6364
6365 return KERN_SUCCESS;
39236c6e 6366}
39037602 6367#else /* CONFIG_MEMORYSTATUS */
39236c6e
A
6368kern_return_t
6369task_set_phys_footprint_limit(
6370 __unused task_t task,
6371 __unused int new_limit_mb,
6372 __unused int *old_limit_mb)
6373{
0a7de745 6374 return KERN_FAILURE;
39236c6e
A
6375}
6376
6377kern_return_t
0a7de745 6378task_get_phys_footprint_limit(
39236c6e
A
6379 __unused task_t task,
6380 __unused int *limit_mb)
6381{
0a7de745 6382 return KERN_FAILURE;
39236c6e 6383}
39037602 6384#endif /* CONFIG_MEMORYSTATUS */
b0d623f7 6385
d9a64523
A
6386void
6387task_set_thread_limit(task_t task, uint16_t thread_limit)
6388{
6389 assert(task != kernel_task);
6390 if (thread_limit <= TASK_MAX_THREAD_LIMIT) {
6391 task_lock(task);
6392 task->task_thread_limit = thread_limit;
6393 task_unlock(task);
6394 }
6395}
6396
1c79356b
A
6397/*
6398 * We need to export some functions to other components that
6399 * are currently implemented in macros within the osfmk
6400 * component. Just export them as functions of the same name.
6401 */
0a7de745
A
6402boolean_t
6403is_kerneltask(task_t t)
1c79356b 6404{
0a7de745
A
6405 if (t == kernel_task) {
6406 return TRUE;
6407 }
55e303ae 6408
0a7de745 6409 return FALSE;
1c79356b
A
6410}
6411
0a7de745
A
6412boolean_t
6413is_corpsetask(task_t t)
b0d623f7 6414{
0a7de745 6415 return task_is_a_corpse(t);
b0d623f7
A
6416}
6417
1c79356b 6418#undef current_task
91447636 6419task_t current_task(void);
0a7de745
A
6420task_t
6421current_task(void)
1c79356b 6422{
0a7de745 6423 return current_task_fast();
1c79356b 6424}
91447636
A
6425
6426#undef task_reference
6427void task_reference(task_t task);
6428void
6429task_reference(
0a7de745 6430 task_t task)
91447636 6431{
0a7de745 6432 if (task != TASK_NULL) {
91447636 6433 task_reference_internal(task);
0a7de745 6434 }
91447636 6435}
2d21ac55 6436
3e170ce0
A
6437/* defined in bsd/kern/kern_prot.c */
6438extern int get_audit_token_pid(audit_token_t *audit_token);
6439
0a7de745
A
6440int
6441task_pid(task_t task)
3e170ce0 6442{
0a7de745 6443 if (task) {
3e170ce0 6444 return get_audit_token_pid(&task->audit_token);
0a7de745 6445 }
3e170ce0
A
6446 return -1;
6447}
6448
6449
39037602
A
6450/*
6451 * This routine finds a thread in a task by its unique id
6452 * Returns a referenced thread or THREAD_NULL if the thread was not found
6453 *
6454 * TODO: This is super inefficient - it's an O(threads in task) list walk!
6455 * We should make a tid hash, or transition all tid clients to thread ports
6456 *
6457 * Precondition: No locks held (will take task lock)
6d2010ae
A
6458 */
6459thread_t
6460task_findtid(task_t task, uint64_t tid)
6461{
39037602
A
6462 thread_t self = current_thread();
6463 thread_t found_thread = THREAD_NULL;
6464 thread_t iter_thread = THREAD_NULL;
6d2010ae 6465
39037602
A
6466 /* Short-circuit the lookup if we're looking up ourselves */
6467 if (tid == self->thread_id || tid == TID_NULL) {
6468 assert(self->task == task);
6469
6470 thread_reference(self);
6471
6472 return self;
6d2010ae 6473 }
39037602
A
6474
6475 task_lock(task);
6476
6477 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
6478 if (iter_thread->thread_id == tid) {
6479 found_thread = iter_thread;
6480 thread_reference(found_thread);
6481 break;
6482 }
6483 }
6484
6485 task_unlock(task);
6486
0a7de745 6487 return found_thread;
6d2010ae
A
6488}
6489
0a7de745
A
6490int
6491pid_from_task(task_t task)
813fb2f6
A
6492{
6493 int pid = -1;
6494
6495 if (task->bsd_info) {
6496 pid = proc_pid(task->bsd_info);
6497 } else {
6498 pid = task_pid(task);
6499 }
6500
6501 return pid;
6502}
39037602 6503
39236c6e
A
6504/*
6505 * Control the CPU usage monitor for a task.
6506 */
6507kern_return_t
6508task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
6509{
6510 int error = KERN_SUCCESS;
6511
6512 if (*flags & CPUMON_MAKE_FATAL) {
6513 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
6514 } else {
6515 error = KERN_INVALID_ARGUMENT;
6516 }
6517
6518 return error;
6519}
6520
6521/*
6522 * Control the wakeups monitor for a task.
6523 */
6524kern_return_t
6525task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
6526{
6527 ledger_t ledger = task->ledger;
6528
6529 task_lock(task);
6530 if (*flags & WAKEMON_GET_PARAMS) {
0a7de745
A
6531 ledger_amount_t limit;
6532 uint64_t period;
39236c6e
A
6533
6534 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
6535 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
6536
6537 if (limit != LEDGER_LIMIT_INFINITY) {
6538 /*
6539 * An active limit means the wakeups monitor is enabled.
6540 */
6541 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
6542 *flags = WAKEMON_ENABLE;
6543 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
6544 *flags |= WAKEMON_MAKE_FATAL;
6545 }
6546 } else {
6547 *flags = WAKEMON_DISABLE;
6548 *rate_hz = -1;
6549 }
6550
6551 /*
6552 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
6553 */
0a7de745 6554 task_unlock(task);
39236c6e
A
6555 return KERN_SUCCESS;
6556 }
6557
6558 if (*flags & WAKEMON_ENABLE) {
6559 if (*flags & WAKEMON_SET_DEFAULTS) {
6560 *rate_hz = task_wakeups_monitor_rate;
6561 }
6562
6563#ifndef CONFIG_NOMONITORS
6564 if (*flags & WAKEMON_MAKE_FATAL) {
6565 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
6566 }
6567#endif /* CONFIG_NOMONITORS */
6568
39037602 6569 if (*rate_hz <= 0) {
39236c6e
A
6570 task_unlock(task);
6571 return KERN_INVALID_ARGUMENT;
6572 }
6573
6574#ifndef CONFIG_NOMONITORS
6575 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
0a7de745 6576 task_wakeups_monitor_ustackshots_trigger_pct);
39236c6e
A
6577 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
6578 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
6579#endif /* CONFIG_NOMONITORS */
6580 } else if (*flags & WAKEMON_DISABLE) {
6581 /*
6582 * Caller wishes to disable wakeups monitor on the task.
6583 *
6584 * Disable telemetry if it was triggered by the wakeups monitor, and
6585 * remove the limit & callback on the wakeups ledger entry.
6586 */
6587#if CONFIG_TELEMETRY
490019cf 6588 telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
39236c6e
A
6589#endif
6590 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
6591 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
6592 }
6593
6594 task_unlock(task);
6595 return KERN_SUCCESS;
6596}
6597
6598void
6599task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
6600{
6601 if (warning == LEDGER_WARNING_ROSE_ABOVE) {
0a7de745 6602#if CONFIG_TELEMETRY
39236c6e
A
6603 /*
6604 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
6605 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
6606 */
6607 telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
6608#endif
6609 return;
6610 }
6611
6612#if CONFIG_TELEMETRY
6613 /*
6614 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
6615 * exceeded the limit, turn telemetry off for the task.
6616 */
6617 telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
6618#endif
6619
6620 if (warning == 0) {
39037602 6621 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
39236c6e
A
6622 }
6623}
6624
6625void __attribute__((noinline))
39037602 6626SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
39236c6e 6627{
39037602
A
6628 task_t task = current_task();
6629 int pid = 0;
6630 const char *procname = "unknown";
6631 boolean_t fatal;
6632 kern_return_t kr;
6633#ifdef EXC_RESOURCE_MONITORS
6634 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
6635#endif /* EXC_RESOURCE_MONITORS */
6636 struct ledger_entry_info lei;
39236c6e
A
6637
6638#ifdef MACH_BSD
6639 pid = proc_selfpid();
0a7de745 6640 if (task->bsd_info != NULL) {
39236c6e 6641 procname = proc_name_address(current_task()->bsd_info);
0a7de745 6642 }
39236c6e
A
6643#endif
6644
6645 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
6646
6647 /*
6648 * Disable the exception notification so we don't overwhelm
6649 * the listener with an endless stream of redundant exceptions.
39037602 6650 * TODO: detect whether another thread is already reporting the violation.
39236c6e
A
6651 */
6652 uint32_t flags = WAKEMON_DISABLE;
6653 task_wakeups_monitor_ctl(task, &flags, NULL);
6654
39037602
A
6655 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
6656 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
5ba3f43e 6657 os_log(OS_LOG_DEFAULT, "process %s[%d] caught waking the CPU %llu times "
0a7de745
A
6658 "over ~%llu seconds, averaging %llu wakes / second and "
6659 "violating a %slimit of %llu wakes over %llu seconds.\n",
6660 procname, pid,
6661 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
6662 lei.lei_last_refill == 0 ? 0 :
6663 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
6664 fatal ? "FATAL " : "",
6665 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
39037602
A
6666
6667 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
0a7de745 6668 fatal ? kRNFatalLimitFlag : 0);
39037602
A
6669 if (kr) {
6670 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
6671 }
6672
6673#ifdef EXC_RESOURCE_MONITORS
39236c6e
A
6674 if (disable_exc_resource) {
6675 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
0a7de745 6676 "supressed by a boot-arg\n", procname, pid);
39236c6e
A
6677 return;
6678 }
15129b1c 6679 if (audio_active) {
5ba3f43e 6680 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
0a7de745 6681 "supressed due to audio playback\n", procname, pid);
15129b1c
A
6682 return;
6683 }
39037602 6684 if (lei.lei_last_refill == 0) {
5ba3f43e 6685 os_log(OS_LOG_DEFAULT, "process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
0a7de745 6686 "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
39037602 6687 }
39236c6e
A
6688
6689 code[0] = code[1] = 0;
6690 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
6691 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
39037602 6692 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
0a7de745 6693 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
39037602 6694 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
0a7de745 6695 lei.lei_last_refill);
39037602 6696 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
0a7de745 6697 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
39236c6e 6698 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
39037602 6699#endif /* EXC_RESOURCE_MONITORS */
39236c6e 6700
39037602 6701 if (fatal) {
39236c6e
A
6702 task_terminate_internal(task);
6703 }
6704}
fe8ab488 6705
0a7de745 6706static boolean_t
cb323159 6707global_update_logical_writes(int64_t io_delta, int64_t *global_write_count)
fe8ab488 6708{
39037602
A
6709 int64_t old_count, new_count;
6710 boolean_t needs_telemetry;
0a7de745 6711
39037602 6712 do {
cb323159 6713 new_count = old_count = *global_write_count;
39037602
A
6714 new_count += io_delta;
6715 if (new_count >= io_telemetry_limit) {
6716 new_count = 0;
6717 needs_telemetry = TRUE;
6718 } else {
6719 needs_telemetry = FALSE;
6720 }
cb323159 6721 } while (!OSCompareAndSwap64(old_count, new_count, global_write_count));
39037602
A
6722 return needs_telemetry;
6723}
fe8ab488 6724
0a7de745
A
6725void
6726task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
39037602
A
6727{
6728 int64_t io_delta = 0;
cb323159 6729 int64_t * global_counter_to_update;
39037602 6730 boolean_t needs_telemetry = FALSE;
ea3f0419 6731 boolean_t is_external_device = FALSE;
cb323159
A
6732 int ledger_to_update = 0;
6733 struct task_writes_counters * writes_counters_to_update;
fe8ab488 6734
0a7de745 6735 if ((!task) || (!io_size) || (!vp)) {
39037602 6736 return;
0a7de745
A
6737 }
6738
6739 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
6740 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
39037602 6741 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
cb323159
A
6742
6743 // Is the drive backing this vnode internal or external to the system?
6744 if (vnode_isonexternalstorage(vp) == false) {
6745 global_counter_to_update = &global_logical_writes_count;
6746 ledger_to_update = task_ledgers.logical_writes;
6747 writes_counters_to_update = &task->task_writes_counters_internal;
ea3f0419 6748 is_external_device = FALSE;
cb323159
A
6749 } else {
6750 global_counter_to_update = &global_logical_writes_to_external_count;
6751 ledger_to_update = task_ledgers.logical_writes_to_external;
6752 writes_counters_to_update = &task->task_writes_counters_external;
ea3f0419 6753 is_external_device = TRUE;
cb323159
A
6754 }
6755
0a7de745
A
6756 switch (flags) {
6757 case TASK_WRITE_IMMEDIATE:
cb323159
A
6758 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_immediate_writes));
6759 ledger_credit(task->ledger, ledger_to_update, io_size);
ea3f0419
A
6760 if (!is_external_device) {
6761 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
6762 }
0a7de745
A
6763 break;
6764 case TASK_WRITE_DEFERRED:
cb323159
A
6765 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_deferred_writes));
6766 ledger_credit(task->ledger, ledger_to_update, io_size);
ea3f0419
A
6767 if (!is_external_device) {
6768 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
6769 }
0a7de745
A
6770 break;
6771 case TASK_WRITE_INVALIDATED:
cb323159
A
6772 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_invalidated_writes));
6773 ledger_debit(task->ledger, ledger_to_update, io_size);
ea3f0419
A
6774 if (!is_external_device) {
6775 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, FALSE, io_size);
6776 }
0a7de745
A
6777 break;
6778 case TASK_WRITE_METADATA:
cb323159
A
6779 OSAddAtomic64(io_size, (SInt64 *)&(writes_counters_to_update->task_metadata_writes));
6780 ledger_credit(task->ledger, ledger_to_update, io_size);
ea3f0419
A
6781 if (!is_external_device) {
6782 coalition_io_ledger_update(task, FLAVOR_IO_LOGICAL_WRITES, TRUE, io_size);
6783 }
0a7de745 6784 break;
fe8ab488 6785 }
39037602
A
6786
6787 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
0a7de745 6788 if (io_telemetry_limit != 0) {
39037602 6789 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
cb323159 6790 needs_telemetry = global_update_logical_writes(io_delta, global_counter_to_update);
ea3f0419 6791 if (needs_telemetry && !is_external_device) {
39037602
A
6792 act_set_io_telemetry_ast(current_thread());
6793 }
fe8ab488 6794 }
39037602 6795}
fe8ab488 6796
39037602
A
6797/*
6798 * Control the I/O monitor for a task.
6799 */
6800kern_return_t
6801task_io_monitor_ctl(task_t task, uint32_t *flags)
6802{
6803 ledger_t ledger = task->ledger;
fe8ab488 6804
39037602
A
6805 task_lock(task);
6806 if (*flags & IOMON_ENABLE) {
0a7de745 6807 /* Configure the physical I/O ledger */
39037602
A
6808 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
6809 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
39037602
A
6810 } else if (*flags & IOMON_DISABLE) {
6811 /*
6812 * Caller wishes to disable I/O monitor on the task.
6813 */
6814 ledger_disable_refill(ledger, task_ledgers.physical_writes);
6815 ledger_disable_callback(ledger, task_ledgers.physical_writes);
39037602 6816 }
fe8ab488 6817
39037602 6818 task_unlock(task);
fe8ab488
A
6819 return KERN_SUCCESS;
6820}
6821
39037602
A
6822void
6823task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
6824{
6825 if (warning == 0) {
6826 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
6827 }
6828}
6829
0a7de745
A
6830void __attribute__((noinline))
6831SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
39037602
A
6832{
6833 int pid = 0;
6834 task_t task = current_task();
6835#ifdef EXC_RESOURCE_MONITORS
6836 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
6837#endif /* EXC_RESOURCE_MONITORS */
6838 struct ledger_entry_info lei;
6839 kern_return_t kr;
6840
6841#ifdef MACH_BSD
6842 pid = proc_selfpid();
6843#endif
0a7de745
A
6844 /*
6845 * Get the ledger entry info. We need to do this before disabling the exception
39037602
A
6846 * to get correct values for all fields.
6847 */
0a7de745
A
6848 switch (flavor) {
6849 case FLAVOR_IO_PHYSICAL_WRITES:
6850 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
6851 break;
39037602
A
6852 }
6853
0a7de745 6854
39037602
A
6855 /*
6856 * Disable the exception notification so we don't overwhelm
6857 * the listener with an endless stream of redundant exceptions.
6858 * TODO: detect whether another thread is already reporting the violation.
6859 */
6860 uint32_t flags = IOMON_DISABLE;
6861 task_io_monitor_ctl(task, &flags);
6862
6863 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
6864 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
6865 }
5ba3f43e 6866 os_log(OS_LOG_DEFAULT, "process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
0a7de745 6867 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
39037602
A
6868
6869 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
6870 if (kr) {
6871 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
6872 }
6873
6874#ifdef EXC_RESOURCE_MONITORS
6875 code[0] = code[1] = 0;
6876 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
6877 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
6878 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
6879 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
6880 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
6881 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
6882#endif /* EXC_RESOURCE_MONITORS */
0a7de745 6883}
39037602 6884
fe8ab488 6885/* Placeholders for the task set/get voucher interfaces */
0a7de745 6886kern_return_t
fe8ab488 6887task_get_mach_voucher(
0a7de745 6888 task_t task,
fe8ab488 6889 mach_voucher_selector_t __unused which,
0a7de745 6890 ipc_voucher_t *voucher)
fe8ab488 6891{
0a7de745 6892 if (TASK_NULL == task) {
fe8ab488 6893 return KERN_INVALID_TASK;
0a7de745 6894 }
fe8ab488
A
6895
6896 *voucher = NULL;
6897 return KERN_SUCCESS;
6898}
6899
0a7de745 6900kern_return_t
fe8ab488 6901task_set_mach_voucher(
0a7de745
A
6902 task_t task,
6903 ipc_voucher_t __unused voucher)
fe8ab488 6904{
0a7de745 6905 if (TASK_NULL == task) {
fe8ab488 6906 return KERN_INVALID_TASK;
0a7de745 6907 }
fe8ab488
A
6908
6909 return KERN_SUCCESS;
6910}
6911
6912kern_return_t
6913task_swap_mach_voucher(
b226f5e5
A
6914 __unused task_t task,
6915 __unused ipc_voucher_t new_voucher,
6916 ipc_voucher_t *in_out_old_voucher)
fe8ab488 6917{
b226f5e5
A
6918 /*
6919 * Currently this function is only called from a MIG generated
6920 * routine which doesn't release the reference on the voucher
6921 * addressed by in_out_old_voucher. To avoid leaking this reference,
6922 * a call to release it has been added here.
6923 */
6924 ipc_voucher_release(*in_out_old_voucher);
6925 return KERN_NOT_SUPPORTED;
fe8ab488
A
6926}
6927
0a7de745
A
6928void
6929task_set_gpu_denied(task_t task, boolean_t denied)
fe8ab488
A
6930{
6931 task_lock(task);
6932
6933 if (denied) {
6934 task->t_flags |= TF_GPU_DENIED;
6935 } else {
6936 task->t_flags &= ~TF_GPU_DENIED;
6937 }
6938
6939 task_unlock(task);
6940}
6941
0a7de745
A
6942boolean_t
6943task_is_gpu_denied(task_t task)
fe8ab488
A
6944{
6945 /* We don't need the lock to read this flag */
6946 return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
6947}
4bd07ac2 6948
39037602 6949
0a7de745
A
6950uint64_t
6951get_task_memory_region_count(task_t task)
4bd07ac2 6952{
39037602
A
6953 vm_map_t map;
6954 map = (task == kernel_task) ? kernel_map: task->map;
0a7de745 6955 return (uint64_t)get_map_nentries(map);
39037602
A
6956}
6957
6958static void
6959kdebug_trace_dyld_internal(uint32_t base_code,
0a7de745 6960 struct dyld_kernel_image_info *info)
39037602
A
6961{
6962 static_assert(sizeof(info->uuid) >= 16);
6963
6964#if defined(__LP64__)
6965 uint64_t *uuid = (uint64_t *)&(info->uuid);
6966
6967 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6968 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
6969 uuid[1], info->load_addr,
6970 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
6971 0);
39037602 6972 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6973 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
6974 (uint64_t)info->fsobjid.fid_objno |
6975 ((uint64_t)info->fsobjid.fid_generation << 32),
6976 0, 0, 0, 0);
39037602
A
6977#else /* defined(__LP64__) */
6978 uint32_t *uuid = (uint32_t *)&(info->uuid);
6979
6980 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6981 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
6982 uuid[1], uuid[2], uuid[3], 0);
39037602 6983 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6984 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
6985 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
6986 info->fsobjid.fid_objno, 0);
39037602 6987 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
0a7de745
A
6988 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
6989 info->fsobjid.fid_generation, 0, 0, 0, 0);
39037602
A
6990#endif /* !defined(__LP64__) */
6991}
6992
6993static kern_return_t
6994kdebug_trace_dyld(task_t task, uint32_t base_code,
0a7de745 6995 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
39037602
A
6996{
6997 kern_return_t kr;
6998 dyld_kernel_image_info_array_t infos;
6999 vm_map_offset_t map_data;
7000 vm_offset_t data;
7001
5ba3f43e
A
7002 if (!infos_copy) {
7003 return KERN_INVALID_ADDRESS;
7004 }
7005
d190cdc3 7006 if (!kdebug_enable ||
0a7de745 7007 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) {
d190cdc3
A
7008 vm_map_copy_discard(infos_copy);
7009 return KERN_SUCCESS;
7010 }
7011
39037602
A
7012 if (task == NULL || task != current_task()) {
7013 return KERN_INVALID_TASK;
4bd07ac2 7014 }
39037602
A
7015
7016 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
7017 if (kr != KERN_SUCCESS) {
7018 return kr;
7019 }
7020
7021 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
7022
7023 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
7024 kdebug_trace_dyld_internal(base_code, &(infos[i]));
7025 }
7026
7027 data = CAST_DOWN(vm_offset_t, map_data);
7028 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
7029 return KERN_SUCCESS;
7030}
7031
7032kern_return_t
7033task_register_dyld_image_infos(task_t task,
0a7de745
A
7034 dyld_kernel_image_info_array_t infos_copy,
7035 mach_msg_type_number_t infos_len)
39037602
A
7036{
7037 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
0a7de745 7038 (vm_map_copy_t)infos_copy, infos_len);
39037602
A
7039}
7040
7041kern_return_t
7042task_unregister_dyld_image_infos(task_t task,
0a7de745
A
7043 dyld_kernel_image_info_array_t infos_copy,
7044 mach_msg_type_number_t infos_len)
39037602
A
7045{
7046 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
0a7de745 7047 (vm_map_copy_t)infos_copy, infos_len);
39037602
A
7048}
7049
7050kern_return_t
7051task_get_dyld_image_infos(__unused task_t task,
0a7de745
A
7052 __unused dyld_kernel_image_info_array_t * dyld_images,
7053 __unused mach_msg_type_number_t * dyld_imagesCnt)
39037602
A
7054{
7055 return KERN_NOT_SUPPORTED;
7056}
7057
7058kern_return_t
7059task_register_dyld_shared_cache_image_info(task_t task,
0a7de745
A
7060 dyld_kernel_image_info_t cache_img,
7061 __unused boolean_t no_cache,
7062 __unused boolean_t private_cache)
39037602
A
7063{
7064 if (task == NULL || task != current_task()) {
7065 return KERN_INVALID_TASK;
7066 }
7067
7068 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
7069 return KERN_SUCCESS;
7070}
7071
7072kern_return_t
7073task_register_dyld_set_dyld_state(__unused task_t task,
0a7de745 7074 __unused uint8_t dyld_state)
39037602
A
7075{
7076 return KERN_NOT_SUPPORTED;
7077}
7078
7079kern_return_t
7080task_register_dyld_get_process_state(__unused task_t task,
0a7de745 7081 __unused dyld_kernel_process_info_t * dyld_process_state)
39037602
A
7082{
7083 return KERN_NOT_SUPPORTED;
7084}
7085
5ba3f43e
A
7086kern_return_t
7087task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor,
0a7de745 7088 task_inspect_info_t info_out, mach_msg_type_number_t *size_in_out)
5ba3f43e
A
7089{
7090#if MONOTONIC
7091 task_t task = (task_t)task_insp;
7092 kern_return_t kr = KERN_SUCCESS;
7093 mach_msg_type_number_t size;
7094
7095 if (task == TASK_NULL) {
7096 return KERN_INVALID_ARGUMENT;
7097 }
7098
7099 size = *size_in_out;
7100
7101 switch (flavor) {
7102 case TASK_INSPECT_BASIC_COUNTS: {
7103 struct task_inspect_basic_counts *bc;
e8c3f781 7104 uint64_t task_counts[MT_CORE_NFIXED] = { 0 };
5ba3f43e
A
7105
7106 if (size < TASK_INSPECT_BASIC_COUNTS_COUNT) {
7107 kr = KERN_INVALID_ARGUMENT;
7108 break;
7109 }
7110
7111 mt_fixed_task_counts(task, task_counts);
7112 bc = (struct task_inspect_basic_counts *)info_out;
7113#ifdef MT_CORE_INSTRS
7114 bc->instructions = task_counts[MT_CORE_INSTRS];
7115#else /* defined(MT_CORE_INSTRS) */
7116 bc->instructions = 0;
7117#endif /* !defined(MT_CORE_INSTRS) */
7118 bc->cycles = task_counts[MT_CORE_CYCLES];
7119 size = TASK_INSPECT_BASIC_COUNTS_COUNT;
7120 break;
7121 }
7122 default:
7123 kr = KERN_INVALID_ARGUMENT;
7124 break;
7125 }
7126
7127 if (kr == KERN_SUCCESS) {
7128 *size_in_out = size;
7129 }
7130 return kr;
7131#else /* MONOTONIC */
7132#pragma unused(task_insp, flavor, info_out, size_in_out)
7133 return KERN_NOT_SUPPORTED;
7134#endif /* !MONOTONIC */
7135}
7136
39037602
A
7137#if CONFIG_SECLUDED_MEMORY
7138int num_tasks_can_use_secluded_mem = 0;
7139
7140void
7141task_set_can_use_secluded_mem(
0a7de745
A
7142 task_t task,
7143 boolean_t can_use_secluded_mem)
39037602
A
7144{
7145 if (!task->task_could_use_secluded_mem) {
7146 return;
7147 }
7148 task_lock(task);
7149 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
7150 task_unlock(task);
7151}
7152
7153void
7154task_set_can_use_secluded_mem_locked(
0a7de745
A
7155 task_t task,
7156 boolean_t can_use_secluded_mem)
39037602
A
7157{
7158 assert(task->task_could_use_secluded_mem);
7159 if (can_use_secluded_mem &&
7160 secluded_for_apps && /* global boot-arg */
7161 !task->task_can_use_secluded_mem) {
7162 assert(num_tasks_can_use_secluded_mem >= 0);
7163 OSAddAtomic(+1,
0a7de745 7164 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
39037602
A
7165 task->task_can_use_secluded_mem = TRUE;
7166 } else if (!can_use_secluded_mem &&
0a7de745 7167 task->task_can_use_secluded_mem) {
39037602
A
7168 assert(num_tasks_can_use_secluded_mem > 0);
7169 OSAddAtomic(-1,
0a7de745 7170 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
39037602
A
7171 task->task_can_use_secluded_mem = FALSE;
7172 }
7173}
7174
7175void
7176task_set_could_use_secluded_mem(
0a7de745
A
7177 task_t task,
7178 boolean_t could_use_secluded_mem)
39037602
A
7179{
7180 task->task_could_use_secluded_mem = could_use_secluded_mem;
7181}
7182
7183void
7184task_set_could_also_use_secluded_mem(
0a7de745
A
7185 task_t task,
7186 boolean_t could_also_use_secluded_mem)
39037602
A
7187{
7188 task->task_could_also_use_secluded_mem = could_also_use_secluded_mem;
7189}
7190
7191boolean_t
7192task_can_use_secluded_mem(
0a7de745
A
7193 task_t task,
7194 boolean_t is_alloc)
39037602
A
7195{
7196 if (task->task_can_use_secluded_mem) {
7197 assert(task->task_could_use_secluded_mem);
7198 assert(num_tasks_can_use_secluded_mem > 0);
7199 return TRUE;
7200 }
7201 if (task->task_could_also_use_secluded_mem &&
7202 num_tasks_can_use_secluded_mem > 0) {
7203 assert(num_tasks_can_use_secluded_mem > 0);
7204 return TRUE;
7205 }
d9a64523
A
7206
7207 /*
7208 * If a single task is using more than some amount of
7209 * memory, allow it to dip into secluded and also begin
7210 * suppression of secluded memory until the tasks exits.
7211 */
7212 if (is_alloc && secluded_shutoff_trigger != 0) {
7213 uint64_t phys_used = get_task_phys_footprint(task);
7214 if (phys_used > secluded_shutoff_trigger) {
7215 start_secluded_suppression(task);
7216 return TRUE;
7217 }
7218 }
7219
39037602
A
7220 return FALSE;
7221}
7222
7223boolean_t
7224task_could_use_secluded_mem(
0a7de745 7225 task_t task)
39037602
A
7226{
7227 return task->task_could_use_secluded_mem;
4bd07ac2 7228}
cb323159
A
7229
7230boolean_t
7231task_could_also_use_secluded_mem(
7232 task_t task)
7233{
7234 return task->task_could_also_use_secluded_mem;
7235}
39037602 7236#endif /* CONFIG_SECLUDED_MEMORY */
7e41aa88
A
7237
7238queue_head_t *
7239task_io_user_clients(task_t task)
7240{
0a7de745 7241 return &task->io_user_clients;
7e41aa88 7242}
5ba3f43e 7243
cb323159
A
7244void
7245task_set_message_app_suspended(task_t task, boolean_t enable)
7246{
7247 task->message_app_suspended = enable;
7248}
7249
5ba3f43e
A
7250void
7251task_copy_fields_for_exec(task_t dst_task, task_t src_task)
7252{
7253 dst_task->vtimers = src_task->vtimers;
7254}
a39ff7e2
A
7255
7256#if DEVELOPMENT || DEBUG
7257int vm_region_footprint = 0;
7258#endif /* DEVELOPMENT || DEBUG */
7259
7260boolean_t
7261task_self_region_footprint(void)
7262{
7263#if DEVELOPMENT || DEBUG
7264 if (vm_region_footprint) {
7265 /* system-wide override */
7266 return TRUE;
7267 }
7268#endif /* DEVELOPMENT || DEBUG */
7269 return current_task()->task_region_footprint;
7270}
7271
7272void
7273task_self_region_footprint_set(
7274 boolean_t newval)
7275{
0a7de745 7276 task_t curtask;
a39ff7e2
A
7277
7278 curtask = current_task();
7279 task_lock(curtask);
7280 if (newval) {
7281 curtask->task_region_footprint = TRUE;
7282 } else {
7283 curtask->task_region_footprint = FALSE;
7284 }
7285 task_unlock(curtask);
7286}
d9a64523
A
7287
7288void
7289task_set_darkwake_mode(task_t task, boolean_t set_mode)
7290{
7291 assert(task);
7292
7293 task_lock(task);
7294
7295 if (set_mode) {
7296 task->t_flags |= TF_DARKWAKE_MODE;
7297 } else {
7298 task->t_flags &= ~(TF_DARKWAKE_MODE);
7299 }
7300
7301 task_unlock(task);
7302}
7303
7304boolean_t
7305task_get_darkwake_mode(task_t task)
7306{
7307 assert(task);
0a7de745 7308 return (task->t_flags & TF_DARKWAKE_MODE) != 0;
d9a64523
A
7309}
7310
cb323159
A
7311kern_return_t
7312task_get_exc_guard_behavior(
7313 task_t task,
7314 task_exc_guard_behavior_t *behaviorp)
7315{
7316 if (task == TASK_NULL) {
7317 return KERN_INVALID_TASK;
7318 }
7319 *behaviorp = task->task_exc_guard;
7320 return KERN_SUCCESS;
7321}
7322
7323#ifndef TASK_EXC_GUARD_ALL
7324/* Temporary define until two branches are merged */
7325#define TASK_EXC_GUARD_ALL (TASK_EXC_GUARD_VM_ALL | 0xf0)
7326#endif
7327
7328kern_return_t
7329task_set_exc_guard_behavior(
7330 task_t task,
7331 task_exc_guard_behavior_t behavior)
7332{
7333 if (task == TASK_NULL) {
7334 return KERN_INVALID_TASK;
7335 }
7336 if (behavior & ~TASK_EXC_GUARD_ALL) {
7337 return KERN_INVALID_VALUE;
7338 }
7339 task->task_exc_guard = behavior;
7340 return KERN_SUCCESS;
7341}
7342
d9a64523 7343#if __arm64__
cb323159
A
7344extern int legacy_footprint_entitlement_mode;
7345extern void memorystatus_act_on_legacy_footprint_entitlement(proc_t, boolean_t);
94ff46dc 7346extern void memorystatus_act_on_ios13extended_footprint_entitlement(proc_t);
cb323159 7347
d9a64523
A
7348void
7349task_set_legacy_footprint(
cb323159 7350 task_t task)
d9a64523
A
7351{
7352 task_lock(task);
cb323159
A
7353 task->task_legacy_footprint = TRUE;
7354 task_unlock(task);
7355}
7356
7357void
7358task_set_extra_footprint_limit(
7359 task_t task)
7360{
7361 if (task->task_extra_footprint_limit) {
7362 return;
7363 }
7364 task_lock(task);
94ff46dc
A
7365 if (task->task_extra_footprint_limit) {
7366 task_unlock(task);
7367 return;
7368 }
7369 task->task_extra_footprint_limit = TRUE;
7370 task_unlock(task);
7371 memorystatus_act_on_legacy_footprint_entitlement(task->bsd_info, TRUE);
7372}
7373
7374void
7375task_set_ios13extended_footprint_limit(
7376 task_t task)
7377{
7378 if (task->task_ios13extended_footprint_limit) {
7379 return;
7380 }
7381 task_lock(task);
7382 if (task->task_ios13extended_footprint_limit) {
7383 task_unlock(task);
7384 return;
cb323159 7385 }
94ff46dc 7386 task->task_ios13extended_footprint_limit = TRUE;
d9a64523 7387 task_unlock(task);
94ff46dc 7388 memorystatus_act_on_ios13extended_footprint_entitlement(task->bsd_info);
d9a64523
A
7389}
7390#endif /* __arm64__ */
cb323159
A
7391
7392static inline ledger_amount_t
7393task_ledger_get_balance(
7394 ledger_t ledger,
7395 int ledger_idx)
7396{
7397 ledger_amount_t amount;
7398 amount = 0;
7399 ledger_get_balance(ledger, ledger_idx, &amount);
7400 return amount;
7401}
7402
7403/*
7404 * Gather the amount of memory counted in a task's footprint due to
7405 * being in a specific set of ledgers.
7406 */
7407void
7408task_ledgers_footprint(
7409 ledger_t ledger,
7410 ledger_amount_t *ledger_resident,
7411 ledger_amount_t *ledger_compressed)
7412{
7413 *ledger_resident = 0;
7414 *ledger_compressed = 0;
7415
7416 /* purgeable non-volatile memory */
7417 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile);
7418 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.purgeable_nonvolatile_compressed);
7419
7420 /* "default" tagged memory */
7421 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint);
7422 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.tagged_footprint_compressed);
7423
7424 /* "network" currently never counts in the footprint... */
7425
7426 /* "media" tagged memory */
7427 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.media_footprint);
7428 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.media_footprint_compressed);
7429
7430 /* "graphics" tagged memory */
7431 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint);
7432 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.graphics_footprint_compressed);
7433
7434 /* "neural" tagged memory */
7435 *ledger_resident += task_ledger_get_balance(ledger, task_ledgers.neural_footprint);
7436 *ledger_compressed += task_ledger_get_balance(ledger, task_ledgers.neural_footprint_compressed);
7437}
7438
7439void
7440task_set_memory_ownership_transfer(
7441 task_t task,
7442 boolean_t value)
7443{
7444 task_lock(task);
7445 task->task_can_transfer_memory_ownership = value;
7446 task_unlock(task);
7447}
7448
7449void
7450task_copy_vmobjects(task_t task, vm_object_query_t query, int len, int64_t* num)
7451{
7452 vm_object_t find_vmo;
bca245ac
A
7453 unsigned int i = 0;
7454 unsigned int vmobj_limit = len / sizeof(vm_object_query_data_t);
cb323159
A
7455
7456 task_objq_lock(task);
7457 if (query != NULL) {
7458 queue_iterate(&task->task_objq, find_vmo, vm_object_t, task_objq)
7459 {
bca245ac
A
7460 vm_object_query_t p = &query[i];
7461
7462 /*
7463 * Clear the entire vm_object_query_t struct as we are using
7464 * only the first 6 bits in the uint64_t bitfield for this
7465 * anonymous struct member.
7466 */
7467 bzero(p, sizeof(*p));
cb323159
A
7468
7469 p->object_id = (vm_object_id_t) VM_KERNEL_ADDRPERM(find_vmo);
7470 p->virtual_size = find_vmo->internal ? find_vmo->vo_size : 0;
7471 p->resident_size = find_vmo->resident_page_count * PAGE_SIZE;
7472 p->wired_size = find_vmo->wired_page_count * PAGE_SIZE;
7473 p->reusable_size = find_vmo->reusable_page_count * PAGE_SIZE;
7474 p->vo_no_footprint = find_vmo->vo_no_footprint;
7475 p->vo_ledger_tag = find_vmo->vo_ledger_tag;
7476 p->purgable = find_vmo->purgable;
7477
7478 if (find_vmo->internal && find_vmo->pager_created && find_vmo->pager != NULL) {
7479 p->compressed_size = vm_compressor_pager_get_count(find_vmo->pager) * PAGE_SIZE;
7480 } else {
7481 p->compressed_size = 0;
7482 }
7483
bca245ac
A
7484 i++;
7485
7486 /* Make sure to not overrun */
7487 if (i == vmobj_limit) {
cb323159
A
7488 break;
7489 }
7490 }
7491 } else {
bca245ac 7492 i = task->task_owned_objects;
cb323159
A
7493 }
7494 task_objq_unlock(task);
7495
bca245ac 7496 *num = i;
cb323159 7497}
eb6b6ca3
A
7498
7499#if __has_feature(ptrauth_calls)
7500
7501#define PAC_EXCEPTION_ENTITLEMENT "com.apple.private.pac.exception"
7502
7503void
7504task_set_pac_exception_fatal_flag(
7505 task_t task)
7506{
7507 assert(task != TASK_NULL);
7508
7509 if (!IOTaskHasEntitlement(task, PAC_EXCEPTION_ENTITLEMENT)) {
7510 return;
7511 }
7512
7513 task_lock(task);
7514 task->t_flags |= TF_PAC_EXC_FATAL;
7515 task_unlock(task);
7516}
7517
7518bool
7519task_is_pac_exception_fatal(
7520 task_t task)
7521{
7522 uint32_t flags = 0;
7523
7524 assert(task != TASK_NULL);
7525
7526 flags = os_atomic_load(&task->t_flags, relaxed);
7527 return (bool)(flags & TF_PAC_EXC_FATAL);
7528}
7529#endif /* __has_feature(ptrauth_calls) */