]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/task.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / kern / task.c
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * File: kern/task.c
58 * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
59 * David Black
60 *
61 * Task management primitives implementation.
62 */
63/*
64 * Copyright (c) 1993 The University of Utah and
65 * the Computer Systems Laboratory (CSL). All rights reserved.
66 *
67 * Permission to use, copy, modify and distribute this software and its
68 * documentation is hereby granted, provided that both the copyright
69 * notice and this permission notice appear in all copies of the
70 * software, derivative works or modified versions, and any portions
71 * thereof, and that both notices appear in supporting documentation.
72 *
73 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
74 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
75 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
76 *
77 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
78 * improvements that they make and grant CSL redistribution rights.
79 *
80 */
2d21ac55
A
81/*
82 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
83 * support for mandatory and extensible security protections. This notice
84 * is included in support of clause 2.2 (b) of the Apple Public License,
85 * Version 2.0.
86 * Copyright (c) 2005 SPARTA, Inc.
87 */
1c79356b 88
91447636 89#include <mach/mach_types.h>
1c79356b 90#include <mach/boolean.h>
91447636 91#include <mach/host_priv.h>
1c79356b
A
92#include <mach/machine/vm_types.h>
93#include <mach/vm_param.h>
3e170ce0 94#include <mach/mach_vm.h>
1c79356b
A
95#include <mach/semaphore.h>
96#include <mach/task_info.h>
97#include <mach/task_special_ports.h>
39037602 98#include <mach/sdt.h>
91447636 99
fe8ab488 100#include <ipc/ipc_importance.h>
91447636 101#include <ipc/ipc_types.h>
1c79356b
A
102#include <ipc/ipc_space.h>
103#include <ipc/ipc_entry.h>
39236c6e 104#include <ipc/ipc_hash.h>
91447636
A
105
106#include <kern/kern_types.h>
1c79356b
A
107#include <kern/mach_param.h>
108#include <kern/misc_protos.h>
109#include <kern/task.h>
110#include <kern/thread.h>
fe8ab488 111#include <kern/coalition.h>
1c79356b
A
112#include <kern/zalloc.h>
113#include <kern/kalloc.h>
3e170ce0 114#include <kern/kern_cdata.h>
1c79356b
A
115#include <kern/processor.h>
116#include <kern/sched_prim.h> /* for thread_wakeup */
1c79356b 117#include <kern/ipc_tt.h>
1c79356b 118#include <kern/host.h>
91447636
A
119#include <kern/clock.h>
120#include <kern/timer.h>
1c79356b
A
121#include <kern/assert.h>
122#include <kern/sync_lock.h>
2d21ac55 123#include <kern/affinity.h>
39236c6e 124#include <kern/exc_resource.h>
3e170ce0 125#include <kern/machine.h>
39037602
A
126#include <kern/policy_internal.h>
127
3e170ce0 128#include <corpses/task_corpse.h>
39236c6e
A
129#if CONFIG_TELEMETRY
130#include <kern/telemetry.h>
131#endif
91447636
A
132
133#include <vm/pmap.h>
134#include <vm/vm_map.h>
135#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
136#include <vm/vm_pageout.h>
2d21ac55 137#include <vm/vm_protos.h>
39236c6e 138#include <vm/vm_purgeable_internal.h>
91447636 139
39236c6e 140#include <sys/resource.h>
3e170ce0
A
141#include <sys/signalvar.h> /* for coredump */
142
1c79356b
A
143/*
144 * Exported interfaces
145 */
146
147#include <mach/task_server.h>
148#include <mach/mach_host_server.h>
149#include <mach/host_security_server.h>
91447636
A
150#include <mach/mach_port_server.h>
151
2d21ac55
A
152#include <vm/vm_shared_region.h>
153
39236c6e
A
154#include <libkern/OSDebug.h>
155#include <libkern/OSAtomic.h>
156
fe8ab488
A
157#if CONFIG_ATM
158#include <atm/atm_internal.h>
159#endif
160
39037602
A
161#include <kern/sfi.h> /* picks up ledger.h */
162
163#if CONFIG_MACF
164#include <security/mac_mach_internal.h>
165#endif
fe8ab488
A
166
167#if KPERF
168extern int kpc_force_all_ctrs(task_t, int);
169#endif
170
b0d623f7
A
171task_t kernel_task;
172zone_t task_zone;
173lck_attr_t task_lck_attr;
174lck_grp_t task_lck_grp;
175lck_grp_attr_t task_lck_grp_attr;
176
39037602
A
177extern int exc_via_corpse_forking;
178extern int unify_corpse_blob_alloc;
179extern int corpse_for_fatal_memkill;
180
15129b1c
A
181/* Flag set by core audio when audio is playing. Used to stifle EXC_RESOURCE generation when active. */
182int audio_active = 0;
183
6d2010ae
A
184zinfo_usage_store_t tasks_tkm_private;
185zinfo_usage_store_t tasks_tkm_shared;
186
4b17d6b6 187/* A container to accumulate statistics for expired tasks */
39236c6e
A
188expired_task_statistics_t dead_task_statistics;
189lck_spin_t dead_task_statistics_lock;
4b17d6b6 190
fe8ab488
A
191ledger_template_t task_ledger_template = NULL;
192
193struct _task_ledger_indices task_ledgers __attribute__((used)) =
39037602 194 {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
fe8ab488
A
195 { 0 /* initialized at runtime */},
196#ifdef CONFIG_BANK
197 -1, -1,
198#endif
39037602 199 -1, -1,
fe8ab488
A
200 };
201
4bd07ac2
A
202/* System sleep state */
203boolean_t tasks_suspend_state;
204
205
316670eb 206void init_task_ledgers(void);
39236c6e
A
207void task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1);
208void task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1);
39037602
A
209void task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1);
210void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void);
211void __attribute__((noinline)) PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal);
212void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor);
39236c6e
A
213
214kern_return_t task_suspend_internal(task_t);
215kern_return_t task_resume_internal(task_t);
3e170ce0 216static kern_return_t task_start_halt_locked(task_t task, boolean_t should_mark_corpse);
39037602 217int proc_list_uptrs(void *p, uint64_t *udata_buffer, int size);
3e170ce0 218
7e41aa88 219extern kern_return_t iokit_task_terminate(task_t task);
39236c6e 220
3e170ce0 221extern kern_return_t exception_deliver(thread_t, exception_type_t, mach_exception_data_t, mach_msg_type_number_t, struct exception_action *, lck_mtx_t *);
39037602 222extern void bsd_copythreadname(void *dst_uth, void *src_uth);
743345f9 223extern kern_return_t thread_resume(thread_t thread);
39236c6e
A
224
225// Warn tasks when they hit 80% of their memory limit.
226#define PHYS_FOOTPRINT_WARNING_LEVEL 80
227
228#define TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT 150 /* wakeups per second */
229#define TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL 300 /* in seconds. */
230
231/*
232 * Level (in terms of percentage of the limit) at which the wakeups monitor triggers telemetry.
233 *
234 * (ie when the task's wakeups rate exceeds 70% of the limit, start taking user
235 * stacktraces, aka micro-stackshots)
236 */
237#define TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER 70
238
239int task_wakeups_monitor_interval; /* In seconds. Time period over which wakeups rate is observed */
240int task_wakeups_monitor_rate; /* In hz. Maximum allowable wakeups per task before EXC_RESOURCE is sent */
241
242int task_wakeups_monitor_ustackshots_trigger_pct; /* Percentage. Level at which we start gathering telemetry. */
316670eb 243
39236c6e 244int disable_exc_resource; /* Global override to supress EXC_RESOURCE for resource monitor violations. */
316670eb 245
3e170ce0 246ledger_amount_t max_task_footprint = 0; /* Per-task limit on physical memory consumption in bytes */
39037602 247int max_task_footprint_warning_level = 0; /* Per-task limit warning percentage */
3e170ce0
A
248int max_task_footprint_mb = 0; /* Per-task limit on physical memory consumption in megabytes */
249
39037602
A
250/* I/O Monitor Limits */
251#define IOMON_DEFAULT_LIMIT (20480ull) /* MB of logical/physical I/O */
252#define IOMON_DEFAULT_INTERVAL (86400ull) /* in seconds */
253
254uint64_t task_iomon_limit_mb; /* Per-task I/O monitor limit in MBs */
255uint64_t task_iomon_interval_secs; /* Per-task I/O monitor interval in secs */
256
257#define IO_TELEMETRY_DEFAULT_LIMIT (10ll * 1024ll * 1024ll)
258int64_t io_telemetry_limit; /* Threshold to take a microstackshot (0 indicated I/O telemetry is turned off) */
259int64_t global_logical_writes_count = 0; /* Global count for logical writes */
260static boolean_t global_update_logical_writes(int64_t);
261
fe8ab488
A
262#if MACH_ASSERT
263int pmap_ledgers_panic = 1;
264#endif /* MACH_ASSERT */
265
b0d623f7 266int task_max = CONFIG_TASK_MAX; /* Max number of tasks */
1c79356b 267
39037602 268#if CONFIG_COREDUMP
39236c6e 269int hwm_user_cores = 0; /* high watermark violations generate user core files */
39037602 270#endif
39236c6e
A
271
272#ifdef MACH_BSD
273extern void proc_getexecutableuuid(void *, unsigned char *, unsigned long);
274extern int proc_pid(struct proc *p);
275extern int proc_selfpid(void);
276extern char *proc_name_address(struct proc *p);
3e170ce0 277extern uint64_t get_dispatchqueue_offset_from_proc(void *);
39037602
A
278
279#if CONFIG_MEMORYSTATUS
3e170ce0 280extern void proc_memstat_terminated(struct proc* p, boolean_t set);
813fb2f6
A
281extern void memorystatus_on_ledger_footprint_exceeded(int warning, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
282extern void memorystatus_log_exception(const int max_footprint_mb, boolean_t memlimit_is_active, boolean_t memlimit_is_fatal);
39037602
A
283#endif /* CONFIG_MEMORYSTATUS */
284
285#endif /* MACH_BSD */
6d2010ae 286
1c79356b
A
287/* Forwards */
288
39037602
A
289static void task_hold_locked(task_t task);
290static void task_wait_locked(task_t task, boolean_t until_not_runnable);
291static void task_release_locked(task_t task);
292
293static void task_synchronizer_destroy_all(task_t task);
b0d623f7 294
55e303ae
A
295void
296task_backing_store_privileged(
297 task_t task)
298{
299 task_lock(task);
300 task->priv_flags |= VM_BACKING_STORE_PRIV;
301 task_unlock(task);
302 return;
303}
304
91447636
A
305
306void
307task_set_64bit(
308 task_t task,
309 boolean_t is64bit)
310{
fe8ab488 311#if defined(__i386__) || defined(__x86_64__) || defined(__arm64__)
2d21ac55 312 thread_t thread;
fe8ab488 313#endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm64__) */
39236c6e
A
314
315 task_lock(task);
0c530ab8
A
316
317 if (is64bit) {
2d21ac55 318 if (task_has_64BitAddr(task))
39236c6e 319 goto out;
91447636 320 task_set_64BitAddr(task);
91447636 321 } else {
2d21ac55 322 if ( !task_has_64BitAddr(task))
39236c6e 323 goto out;
91447636 324 task_clear_64BitAddr(task);
91447636 325 }
0c530ab8
A
326 /* FIXME: On x86, the thread save state flavor can diverge from the
327 * task's 64-bit feature flag due to the 32-bit/64-bit register save
328 * state dichotomy. Since we can be pre-empted in this interval,
329 * certain routines may observe the thread as being in an inconsistent
330 * state with respect to its task's 64-bitness.
331 */
39236c6e 332
00867663 333#if defined(__x86_64__) || defined(__arm64__)
0c530ab8 334 queue_iterate(&task->threads, thread, thread_t, task_threads) {
b0d623f7 335 thread_mtx_lock(thread);
2d21ac55 336 machine_thread_switch_addrmode(thread);
b0d623f7 337 thread_mtx_unlock(thread);
39037602
A
338
339 if (thread == current_thread()) {
340 uint64_t arg1, arg2;
341 int urgency;
342 spl_t spl = splsched();
343 /*
344 * This call tell that the current thread changed it's 32bitness.
345 * Other thread were no more on core when 32bitness was changed,
346 * but current_thread() is on core and the previous call to
347 * machine_thread_going_on_core() gave 32bitness which is now wrong.
348 *
349 * This is needed for bring-up, a different callback should be used
350 * in the future.
351 */
352 thread_lock(thread);
353 urgency = thread_get_urgency(thread, &arg1, &arg2);
d190cdc3 354 machine_thread_going_on_core(thread, urgency, 0, 0);
39037602
A
355 thread_unlock(thread);
356 splx(spl);
357 }
0c530ab8 358 }
00867663 359#endif /* defined(__x86_64__) || defined(__arm64__) */
39236c6e
A
360
361out:
b0d623f7 362 task_unlock(task);
91447636
A
363}
364
b0d623f7
A
365
366void
367task_set_dyld_info(task_t task, mach_vm_address_t addr, mach_vm_size_t size)
368{
369 task_lock(task);
370 task->all_image_info_addr = addr;
371 task->all_image_info_size = size;
372 task_unlock(task);
373}
374
fe8ab488
A
375void
376task_atm_reset(__unused task_t task) {
377
378#if CONFIG_ATM
379 if (task->atm_context != NULL) {
380 atm_task_descriptor_destroy(task->atm_context);
381 task->atm_context = NULL;
382 }
383#endif
384
385}
386
490019cf
A
387void
388task_bank_reset(__unused task_t task) {
389
390#if CONFIG_BANK
391 if (task->bank_context != NULL) {
392 bank_task_destroy(task);
393 }
394#endif
395
396}
397
398/*
399 * NOTE: This should only be called when the P_LINTRANSIT
400 * flag is set (the proc_trans lock is held) on the
401 * proc associated with the task.
402 */
403void
404task_bank_init(__unused task_t task) {
405
406#if CONFIG_BANK
407 if (task->bank_context != NULL) {
408 panic("Task bank init called with non null bank context for task: %p and bank_context: %p", task, task->bank_context);
409 }
410 bank_task_initialize(task);
411#endif
412
413}
414
743345f9
A
415void
416task_set_did_exec_flag(task_t task)
417{
418 task->t_procflags |= TPF_DID_EXEC;
419}
420
421void
422task_clear_exec_copy_flag(task_t task)
423{
424 task->t_procflags &= ~TPF_EXEC_COPY;
425}
426
427/*
428 * This wait event is t_procflags instead of t_flags because t_flags is volatile
429 *
430 * TODO: store the flags in the same place as the event
431 * rdar://problem/28501994
432 */
433event_t
434task_get_return_wait_event(task_t task)
435{
436 return (event_t)&task->t_procflags;
437}
438
439void
440task_clear_return_wait(task_t task)
441{
442 task_lock(task);
443
444 task->t_flags &= ~TF_LRETURNWAIT;
445
446 if (task->t_flags & TF_LRETURNWAITER) {
447 thread_wakeup(task_get_return_wait_event(task));
448 task->t_flags &= ~TF_LRETURNWAITER;
449 }
450
451 task_unlock(task);
452}
453
454void
455task_wait_to_return(void)
456{
457 task_t task;
458
459 task = current_task();
460 task_lock(task);
461
462 if (task->t_flags & TF_LRETURNWAIT) {
463 do {
464 task->t_flags |= TF_LRETURNWAITER;
465 assert_wait(task_get_return_wait_event(task), THREAD_UNINT);
466 task_unlock(task);
467
468 thread_block(THREAD_CONTINUE_NULL);
469
470 task_lock(task);
471 } while (task->t_flags & TF_LRETURNWAIT);
472 }
473
474 task_unlock(task);
475
476 thread_bootstrap_return();
477}
478
479boolean_t
480task_is_exec_copy(task_t task)
481{
482 return task_is_exec_copy_internal(task);
483}
484
485boolean_t
486task_did_exec(task_t task)
487{
488 return task_did_exec_internal(task);
489}
490
491boolean_t
492task_is_active(task_t task)
493{
494 return task->active;
495}
496
39236c6e
A
497#if TASK_REFERENCE_LEAK_DEBUG
498#include <kern/btlog.h>
499
39236c6e
A
500static btlog_t *task_ref_btlog;
501#define TASK_REF_OP_INCR 0x1
502#define TASK_REF_OP_DECR 0x2
503
39037602 504#define TASK_REF_NUM_RECORDS 100000
39236c6e
A
505#define TASK_REF_BTDEPTH 7
506
39236c6e
A
507void
508task_reference_internal(task_t task)
509{
510 void * bt[TASK_REF_BTDEPTH];
511 int numsaved = 0;
512
513 numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
514
515 (void)hw_atomic_add(&(task)->ref_count, 1);
516 btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_INCR,
517 bt, numsaved);
518}
519
520uint32_t
521task_deallocate_internal(task_t task)
522{
523 void * bt[TASK_REF_BTDEPTH];
524 int numsaved = 0;
525
526 numsaved = OSBacktrace(bt, TASK_REF_BTDEPTH);
527
528 btlog_add_entry(task_ref_btlog, task, TASK_REF_OP_DECR,
529 bt, numsaved);
530 return hw_atomic_sub(&(task)->ref_count, 1);
531}
532
533#endif /* TASK_REFERENCE_LEAK_DEBUG */
534
1c79356b
A
535void
536task_init(void)
537{
b0d623f7
A
538
539 lck_grp_attr_setdefault(&task_lck_grp_attr);
540 lck_grp_init(&task_lck_grp, "task", &task_lck_grp_attr);
541 lck_attr_setdefault(&task_lck_attr);
542 lck_mtx_init(&tasks_threads_lock, &task_lck_grp, &task_lck_attr);
39037602 543 lck_mtx_init(&tasks_corpse_lock, &task_lck_grp, &task_lck_attr);
b0d623f7 544
1c79356b
A
545 task_zone = zinit(
546 sizeof(struct task),
b0d623f7 547 task_max * sizeof(struct task),
1c79356b
A
548 TASK_CHUNK * sizeof(struct task),
549 "tasks");
6d2010ae 550
0b4c1975 551 zone_change(task_zone, Z_NOENCRYPT, TRUE);
1c79356b 552
39037602 553
39236c6e 554 /*
fe8ab488
A
555 * Configure per-task memory limit.
556 * The boot-arg is interpreted as Megabytes,
557 * and takes precedence over the device tree.
558 * Setting the boot-arg to 0 disables task limits.
39236c6e 559 */
3e170ce0
A
560 if (!PE_parse_boot_argn("max_task_pmem", &max_task_footprint_mb,
561 sizeof (max_task_footprint_mb))) {
39236c6e
A
562 /*
563 * No limit was found in boot-args, so go look in the device tree.
564 */
3e170ce0
A
565 if (!PE_get_default("kern.max_task_pmem", &max_task_footprint_mb,
566 sizeof(max_task_footprint_mb))) {
fe8ab488
A
567 /*
568 * No limit was found in device tree.
569 */
3e170ce0 570 max_task_footprint_mb = 0;
39236c6e
A
571 }
572 }
573
3e170ce0 574 if (max_task_footprint_mb != 0) {
39037602 575#if CONFIG_MEMORYSTATUS
3e170ce0 576 if (max_task_footprint_mb < 50) {
39236c6e 577 printf("Warning: max_task_pmem %d below minimum.\n",
3e170ce0
A
578 max_task_footprint_mb);
579 max_task_footprint_mb = 50;
39236c6e
A
580 }
581 printf("Limiting task physical memory footprint to %d MB\n",
3e170ce0
A
582 max_task_footprint_mb);
583
584 max_task_footprint = (ledger_amount_t)max_task_footprint_mb * 1024 * 1024; // Convert MB to bytes
39037602
A
585
586 /*
587 * Configure the per-task memory limit warning level.
588 * This is computed as a percentage.
589 */
590 max_task_footprint_warning_level = 0;
591
592 if (max_mem < 0x40000000) {
593 /*
594 * On devices with < 1GB of memory:
595 * -- set warnings to 50MB below the per-task limit.
596 */
597 if (max_task_footprint_mb > 50) {
598 max_task_footprint_warning_level = ((max_task_footprint_mb - 50) * 100) / max_task_footprint_mb;
599 }
600 } else {
601 /*
602 * On devices with >= 1GB of memory:
603 * -- set warnings to 100MB below the per-task limit.
604 */
605 if (max_task_footprint_mb > 100) {
606 max_task_footprint_warning_level = ((max_task_footprint_mb - 100) * 100) / max_task_footprint_mb;
607 }
608 }
609
610 /*
611 * Never allow warning level to land below the default.
612 */
613 if (max_task_footprint_warning_level < PHYS_FOOTPRINT_WARNING_LEVEL) {
614 max_task_footprint_warning_level = PHYS_FOOTPRINT_WARNING_LEVEL;
615 }
616
617 printf("Limiting task physical memory warning to %d%%\n", max_task_footprint_warning_level);
618
39236c6e 619#else
39037602
A
620 printf("Warning: max_task_pmem specified, but jetsam not configured; ignoring.\n");
621#endif /* CONFIG_MEMORYSTATUS */
39236c6e
A
622 }
623
fe8ab488
A
624#if MACH_ASSERT
625 PE_parse_boot_argn("pmap_ledgers_panic", &pmap_ledgers_panic,
626 sizeof (pmap_ledgers_panic));
627#endif /* MACH_ASSERT */
628
39037602 629#if CONFIG_COREDUMP
39236c6e
A
630 if (!PE_parse_boot_argn("hwm_user_cores", &hwm_user_cores,
631 sizeof (hwm_user_cores))) {
632 hwm_user_cores = 0;
633 }
39037602 634#endif
a1c7dba1 635
39236c6e
A
636 proc_init_cpumon_params();
637
638 if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof (task_wakeups_monitor_rate))) {
639 task_wakeups_monitor_rate = TASK_WAKEUPS_MONITOR_DEFAULT_LIMIT;
640 }
641
642 if (!PE_parse_boot_argn("task_wakeups_monitor_interval", &task_wakeups_monitor_interval, sizeof (task_wakeups_monitor_interval))) {
643 task_wakeups_monitor_interval = TASK_WAKEUPS_MONITOR_DEFAULT_INTERVAL;
644 }
645
646 if (!PE_parse_boot_argn("task_wakeups_monitor_ustackshots_trigger_pct", &task_wakeups_monitor_ustackshots_trigger_pct,
647 sizeof (task_wakeups_monitor_ustackshots_trigger_pct))) {
648 task_wakeups_monitor_ustackshots_trigger_pct = TASK_WAKEUPS_MONITOR_DEFAULT_USTACKSHOTS_TRIGGER;
649 }
650
651 if (!PE_parse_boot_argn("disable_exc_resource", &disable_exc_resource,
652 sizeof (disable_exc_resource))) {
653 disable_exc_resource = 0;
654 }
655
39037602
A
656 if (!PE_parse_boot_argn("task_iomon_limit_mb", &task_iomon_limit_mb, sizeof (task_iomon_limit_mb))) {
657 task_iomon_limit_mb = IOMON_DEFAULT_LIMIT;
658 }
659
660 if (!PE_parse_boot_argn("task_iomon_interval_secs", &task_iomon_interval_secs, sizeof (task_iomon_interval_secs))) {
661 task_iomon_interval_secs = IOMON_DEFAULT_INTERVAL;
662 }
663
664 if (!PE_parse_boot_argn("io_telemetry_limit", &io_telemetry_limit, sizeof (io_telemetry_limit))) {
665 io_telemetry_limit = IO_TELEMETRY_DEFAULT_LIMIT;
666 }
667
fe8ab488
A
668/*
669 * If we have coalitions, coalition_init() will call init_task_ledgers() as it
670 * sets up the ledgers for the default coalition. If we don't have coalitions,
671 * then we have to call it now.
672 */
673#if CONFIG_COALITIONS
674 assert(task_ledger_template);
675#else /* CONFIG_COALITIONS */
316670eb 676 init_task_ledgers();
fe8ab488 677#endif /* CONFIG_COALITIONS */
316670eb 678
39236c6e 679#if TASK_REFERENCE_LEAK_DEBUG
39037602 680 task_ref_btlog = btlog_create(TASK_REF_NUM_RECORDS, TASK_REF_BTDEPTH, TRUE /* caller_will_remove_entries_for_element? */);
39236c6e
A
681 assert(task_ref_btlog);
682#endif
683
1c79356b
A
684 /*
685 * Create the kernel task as the first task.
1c79356b 686 */
b0d623f7 687#ifdef __LP64__
743345f9 688 if (task_create_internal(TASK_NULL, NULL, FALSE, TRUE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS)
b0d623f7 689#else
743345f9 690 if (task_create_internal(TASK_NULL, NULL, FALSE, FALSE, TF_NONE, TPF_NONE, &kernel_task) != KERN_SUCCESS)
b0d623f7 691#endif
1c79356b 692 panic("task_init\n");
55e303ae 693
1c79356b
A
694 vm_map_deallocate(kernel_task->map);
695 kernel_task->map = kernel_map;
4b17d6b6 696 lck_spin_init(&dead_task_statistics_lock, &task_lck_grp, &task_lck_attr);
1c79356b
A
697}
698
1c79356b
A
699/*
700 * Create a task running in the kernel address space. It may
701 * have its own map of size mem_size and may have ipc privileges.
702 */
703kern_return_t
704kernel_task_create(
91447636
A
705 __unused task_t parent_task,
706 __unused vm_offset_t map_base,
707 __unused vm_size_t map_size,
708 __unused task_t *child_task)
1c79356b 709{
55e303ae 710 return (KERN_INVALID_ARGUMENT);
1c79356b
A
711}
712
713kern_return_t
714task_create(
2d21ac55 715 task_t parent_task,
91447636 716 __unused ledger_port_array_t ledger_ports,
2d21ac55
A
717 __unused mach_msg_type_number_t num_ledger_ports,
718 __unused boolean_t inherit_memory,
719 __unused task_t *child_task) /* OUT */
1c79356b
A
720{
721 if (parent_task == TASK_NULL)
722 return(KERN_INVALID_ARGUMENT);
723
2d21ac55
A
724 /*
725 * No longer supported: too many calls assume that a task has a valid
726 * process attached.
727 */
728 return(KERN_FAILURE);
1c79356b
A
729}
730
731kern_return_t
732host_security_create_task_token(
91447636 733 host_security_t host_security,
2d21ac55
A
734 task_t parent_task,
735 __unused security_token_t sec_token,
736 __unused audit_token_t audit_token,
737 __unused host_priv_t host_priv,
91447636
A
738 __unused ledger_port_array_t ledger_ports,
739 __unused mach_msg_type_number_t num_ledger_ports,
2d21ac55
A
740 __unused boolean_t inherit_memory,
741 __unused task_t *child_task) /* OUT */
1c79356b 742{
1c79356b
A
743 if (parent_task == TASK_NULL)
744 return(KERN_INVALID_ARGUMENT);
745
746 if (host_security == HOST_NULL)
747 return(KERN_INVALID_SECURITY);
748
2d21ac55
A
749 /*
750 * No longer supported.
751 */
752 return(KERN_FAILURE);
1c79356b
A
753}
754
39236c6e
A
755/*
756 * Task ledgers
757 * ------------
758 *
759 * phys_footprint
760 * Physical footprint: This is the sum of:
3e170ce0
A
761 * + (internal - alternate_accounting)
762 * + (internal_compressed - alternate_accounting_compressed)
fe8ab488 763 * + iokit_mapped
3e170ce0
A
764 * + purgeable_nonvolatile
765 * + purgeable_nonvolatile_compressed
39037602 766 * + page_table
39236c6e 767 *
fe8ab488
A
768 * internal
769 * The task's anonymous memory, which on iOS is always resident.
770 *
771 * internal_compressed
772 * Amount of this task's internal memory which is held by the compressor.
39236c6e
A
773 * Such memory is no longer actually resident for the task [i.e., resident in its pmap],
774 * and could be either decompressed back into memory, or paged out to storage, depending
775 * on our implementation.
fe8ab488
A
776 *
777 * iokit_mapped
778 * IOKit mappings: The total size of all IOKit mappings in this task, regardless of
779 clean/dirty or internal/external state].
780 *
781 * alternate_accounting
782 * The number of internal dirty pages which are part of IOKit mappings. By definition, these pages
783 * are counted in both internal *and* iokit_mapped, so we must subtract them from the total to avoid
784 * double counting.
39236c6e 785 */
316670eb
A
786void
787init_task_ledgers(void)
788{
789 ledger_template_t t;
790
791 assert(task_ledger_template == NULL);
792 assert(kernel_task == TASK_NULL);
793
39037602
A
794#if MACH_ASSERT
795 PE_parse_boot_argn("pmap_ledgers_panic", &pmap_ledgers_panic,
796 sizeof (pmap_ledgers_panic));
797#endif /* MACH_ASSERT */
798
316670eb
A
799 if ((t = ledger_template_create("Per-task ledger")) == NULL)
800 panic("couldn't create task ledger template");
801
802 task_ledgers.cpu_time = ledger_entry_add(t, "cpu_time", "sched", "ns");
803 task_ledgers.tkm_private = ledger_entry_add(t, "tkm_private",
804 "physmem", "bytes");
805 task_ledgers.tkm_shared = ledger_entry_add(t, "tkm_shared", "physmem",
806 "bytes");
807 task_ledgers.phys_mem = ledger_entry_add(t, "phys_mem", "physmem",
808 "bytes");
809 task_ledgers.wired_mem = ledger_entry_add(t, "wired_mem", "physmem",
810 "bytes");
fe8ab488
A
811 task_ledgers.internal = ledger_entry_add(t, "internal", "physmem",
812 "bytes");
813 task_ledgers.iokit_mapped = ledger_entry_add(t, "iokit_mapped", "mappings",
814 "bytes");
815 task_ledgers.alternate_accounting = ledger_entry_add(t, "alternate_accounting", "physmem",
39236c6e 816 "bytes");
3e170ce0
A
817 task_ledgers.alternate_accounting_compressed = ledger_entry_add(t, "alternate_accounting_compressed", "physmem",
818 "bytes");
39037602
A
819 task_ledgers.page_table = ledger_entry_add(t, "page_table", "physmem",
820 "bytes");
39236c6e
A
821 task_ledgers.phys_footprint = ledger_entry_add(t, "phys_footprint", "physmem",
822 "bytes");
fe8ab488 823 task_ledgers.internal_compressed = ledger_entry_add(t, "internal_compressed", "physmem",
39236c6e 824 "bytes");
fe8ab488
A
825 task_ledgers.purgeable_volatile = ledger_entry_add(t, "purgeable_volatile", "physmem", "bytes");
826 task_ledgers.purgeable_nonvolatile = ledger_entry_add(t, "purgeable_nonvolatile", "physmem", "bytes");
827 task_ledgers.purgeable_volatile_compressed = ledger_entry_add(t, "purgeable_volatile_compress", "physmem", "bytes");
828 task_ledgers.purgeable_nonvolatile_compressed = ledger_entry_add(t, "purgeable_nonvolatile_compress", "physmem", "bytes");
4b17d6b6 829 task_ledgers.platform_idle_wakeups = ledger_entry_add(t, "platform_idle_wakeups", "power",
39236c6e 830 "count");
4b17d6b6 831 task_ledgers.interrupt_wakeups = ledger_entry_add(t, "interrupt_wakeups", "power",
39236c6e 832 "count");
fe8ab488 833
3e170ce0 834#if CONFIG_SCHED_SFI
fe8ab488
A
835 sfi_class_id_t class_id, ledger_alias;
836 for (class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
837 task_ledgers.sfi_wait_times[class_id] = -1;
838 }
839
840 /* don't account for UNSPECIFIED */
841 for (class_id = SFI_CLASS_UNSPECIFIED + 1; class_id < MAX_SFI_CLASS_ID; class_id++) {
842 ledger_alias = sfi_get_ledger_alias_for_class(class_id);
843 if (ledger_alias != SFI_CLASS_UNSPECIFIED) {
844 /* Check to see if alias has been registered yet */
845 if (task_ledgers.sfi_wait_times[ledger_alias] != -1) {
846 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias];
847 } else {
848 /* Otherwise, initialize it first */
849 task_ledgers.sfi_wait_times[class_id] = task_ledgers.sfi_wait_times[ledger_alias] = sfi_ledger_entry_add(t, ledger_alias);
850 }
851 } else {
852 task_ledgers.sfi_wait_times[class_id] = sfi_ledger_entry_add(t, class_id);
853 }
854
855 if (task_ledgers.sfi_wait_times[class_id] < 0) {
856 panic("couldn't create entries for task ledger template for SFI class 0x%x", class_id);
857 }
858 }
316670eb 859
3e170ce0
A
860 assert(task_ledgers.sfi_wait_times[MAX_SFI_CLASS_ID -1] != -1);
861#endif /* CONFIG_SCHED_SFI */
862
fe8ab488
A
863#ifdef CONFIG_BANK
864 task_ledgers.cpu_time_billed_to_me = ledger_entry_add(t, "cpu_time_billed_to_me", "sched", "ns");
865 task_ledgers.cpu_time_billed_to_others = ledger_entry_add(t, "cpu_time_billed_to_others", "sched", "ns");
866#endif
39037602
A
867 task_ledgers.physical_writes = ledger_entry_add(t, "physical_writes", "res", "bytes");
868 task_ledgers.logical_writes = ledger_entry_add(t, "logical_writes", "res", "bytes");
869
fe8ab488
A
870 if ((task_ledgers.cpu_time < 0) ||
871 (task_ledgers.tkm_private < 0) ||
872 (task_ledgers.tkm_shared < 0) ||
873 (task_ledgers.phys_mem < 0) ||
874 (task_ledgers.wired_mem < 0) ||
875 (task_ledgers.internal < 0) ||
876 (task_ledgers.iokit_mapped < 0) ||
877 (task_ledgers.alternate_accounting < 0) ||
3e170ce0 878 (task_ledgers.alternate_accounting_compressed < 0) ||
39037602 879 (task_ledgers.page_table < 0) ||
fe8ab488
A
880 (task_ledgers.phys_footprint < 0) ||
881 (task_ledgers.internal_compressed < 0) ||
882 (task_ledgers.purgeable_volatile < 0) ||
883 (task_ledgers.purgeable_nonvolatile < 0) ||
884 (task_ledgers.purgeable_volatile_compressed < 0) ||
885 (task_ledgers.purgeable_nonvolatile_compressed < 0) ||
886 (task_ledgers.platform_idle_wakeups < 0) ||
39037602 887 (task_ledgers.interrupt_wakeups < 0) ||
fe8ab488 888#ifdef CONFIG_BANK
39037602 889 (task_ledgers.cpu_time_billed_to_me < 0) || (task_ledgers.cpu_time_billed_to_others < 0) ||
fe8ab488 890#endif
39037602
A
891 (task_ledgers.physical_writes < 0) ||
892 (task_ledgers.logical_writes < 0)
fe8ab488 893 ) {
316670eb
A
894 panic("couldn't create entries for task ledger template");
895 }
896
39037602 897 ledger_track_credit_only(t, task_ledgers.phys_footprint);
813fb2f6 898 ledger_track_credit_only(t, task_ledgers.page_table);
39037602
A
899 ledger_track_credit_only(t, task_ledgers.internal);
900 ledger_track_credit_only(t, task_ledgers.internal_compressed);
901 ledger_track_credit_only(t, task_ledgers.iokit_mapped);
902 ledger_track_credit_only(t, task_ledgers.alternate_accounting);
903 ledger_track_credit_only(t, task_ledgers.alternate_accounting_compressed);
904 ledger_track_credit_only(t, task_ledgers.purgeable_volatile);
905 ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile);
906 ledger_track_credit_only(t, task_ledgers.purgeable_volatile_compressed);
907 ledger_track_credit_only(t, task_ledgers.purgeable_nonvolatile_compressed);
908
39236c6e 909 ledger_track_maximum(t, task_ledgers.phys_footprint, 60);
fe8ab488
A
910#if MACH_ASSERT
911 if (pmap_ledgers_panic) {
912 ledger_panic_on_negative(t, task_ledgers.phys_footprint);
39037602 913 ledger_panic_on_negative(t, task_ledgers.page_table);
fe8ab488
A
914 ledger_panic_on_negative(t, task_ledgers.internal);
915 ledger_panic_on_negative(t, task_ledgers.internal_compressed);
916 ledger_panic_on_negative(t, task_ledgers.iokit_mapped);
917 ledger_panic_on_negative(t, task_ledgers.alternate_accounting);
3e170ce0 918 ledger_panic_on_negative(t, task_ledgers.alternate_accounting_compressed);
fe8ab488
A
919 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile);
920 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile);
921 ledger_panic_on_negative(t, task_ledgers.purgeable_volatile_compressed);
922 ledger_panic_on_negative(t, task_ledgers.purgeable_nonvolatile_compressed);
923 }
924#endif /* MACH_ASSERT */
39236c6e 925
39037602 926#if CONFIG_MEMORYSTATUS
39236c6e 927 ledger_set_callback(t, task_ledgers.phys_footprint, task_footprint_exceeded, NULL, NULL);
39037602 928#endif /* CONFIG_MEMORYSTATUS */
39236c6e
A
929
930 ledger_set_callback(t, task_ledgers.interrupt_wakeups,
931 task_wakeups_rate_exceeded, NULL, NULL);
39037602
A
932 ledger_set_callback(t, task_ledgers.physical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_PHYSICAL_WRITES, NULL);
933 ledger_set_callback(t, task_ledgers.logical_writes, task_io_rate_exceeded, (void *)FLAVOR_IO_LOGICAL_WRITES, NULL);
316670eb
A
934 task_ledger_template = t;
935}
936
1c79356b 937kern_return_t
55e303ae 938task_create_internal(
1c79356b 939 task_t parent_task,
3e170ce0 940 coalition_t *parent_coalitions __unused,
1c79356b 941 boolean_t inherit_memory,
813fb2f6 942 __unused boolean_t is_64bit,
39037602 943 uint32_t t_flags,
743345f9 944 uint32_t t_procflags,
1c79356b
A
945 task_t *child_task) /* OUT */
946{
2d21ac55
A
947 task_t new_task;
948 vm_shared_region_t shared_region;
316670eb 949 ledger_t ledger = NULL;
1c79356b
A
950
951 new_task = (task_t) zalloc(task_zone);
952
953 if (new_task == TASK_NULL)
954 return(KERN_RESOURCE_SHORTAGE);
955
956 /* one ref for just being alive; one for our caller */
957 new_task->ref_count = 2;
958
316670eb
A
959 /* allocate with active entries */
960 assert(task_ledger_template != NULL);
961 if ((ledger = ledger_instantiate(task_ledger_template,
962 LEDGER_CREATE_ACTIVE_ENTRIES)) == NULL) {
963 zfree(task_zone, new_task);
964 return(KERN_RESOURCE_SHORTAGE);
965 }
39236c6e 966
316670eb
A
967 new_task->ledger = ledger;
968
fe8ab488
A
969#if defined(CONFIG_SCHED_MULTIQ)
970 new_task->sched_group = sched_group_create();
971#endif
972
b0d623f7 973 /* if inherit_memory is true, parent_task MUST not be NULL */
39037602
A
974 if (!(t_flags & TF_CORPSE_FORK) && inherit_memory)
975 new_task->map = vm_map_fork(ledger, parent_task->map, 0);
1c79356b 976 else
316670eb
A
977 new_task->map = vm_map_create(pmap_create(ledger, 0, is_64bit),
978 (vm_map_offset_t)(VM_MIN_ADDRESS),
979 (vm_map_offset_t)(VM_MAX_ADDRESS), TRUE);
1c79356b 980
2d21ac55
A
981 /* Inherit memlock limit from parent */
982 if (parent_task)
b0d623f7 983 vm_map_set_user_wire_limit(new_task->map, (vm_size_t)parent_task->map->user_wire_limit);
2d21ac55 984
b0d623f7 985 lck_mtx_init(&new_task->lock, &task_lck_grp, &task_lck_attr);
55e303ae 986 queue_init(&new_task->threads);
1c79356b 987 new_task->suspend_count = 0;
55e303ae 988 new_task->thread_count = 0;
55e303ae 989 new_task->active_thread_count = 0;
1c79356b 990 new_task->user_stop_count = 0;
39236c6e 991 new_task->legacy_stop_count = 0;
1c79356b 992 new_task->active = TRUE;
b0d623f7 993 new_task->halting = FALSE;
2d21ac55 994 new_task->user_data = NULL;
55e303ae 995 new_task->priv_flags = 0;
39037602 996 new_task->t_flags = t_flags;
743345f9 997 new_task->t_procflags = t_procflags;
39236c6e 998 new_task->importance = 0;
39037602
A
999 new_task->corpse_info_kernel = NULL;
1000 new_task->exec_token = 0;
1c79356b 1001
fe8ab488
A
1002#if CONFIG_ATM
1003 new_task->atm_context = NULL;
1004#endif
1005#if CONFIG_BANK
1006 new_task->bank_context = NULL;
1007#endif
1008
1c79356b 1009#ifdef MACH_BSD
2d21ac55 1010 new_task->bsd_info = NULL;
3e170ce0 1011 new_task->corpse_info = NULL;
1c79356b
A
1012#endif /* MACH_BSD */
1013
39037602
A
1014#if CONFIG_MACF
1015 new_task->crash_label = NULL;
1016#endif
1017
1018#if CONFIG_MEMORYSTATUS
39236c6e
A
1019 if (max_task_footprint != 0) {
1020 ledger_set_limit(ledger, task_ledgers.phys_footprint, max_task_footprint, PHYS_FOOTPRINT_WARNING_LEVEL);
1021 }
39037602 1022#endif /* CONFIG_MEMORYSTATUS */
39236c6e
A
1023
1024 if (task_wakeups_monitor_rate != 0) {
1025 uint32_t flags = WAKEMON_ENABLE | WAKEMON_SET_DEFAULTS;
1026 int32_t rate; // Ignored because of WAKEMON_SET_DEFAULTS
1027 task_wakeups_monitor_ctl(new_task, &flags, &rate);
1028 }
1029
39037602
A
1030#if CONFIG_IO_ACCOUNTING
1031 uint32_t flags = IOMON_ENABLE;
1032 task_io_monitor_ctl(new_task, &flags);
1033#endif /* CONFIG_IO_ACCOUNTING */
1034
b0d623f7 1035#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
1036 new_task->i386_ldt = 0;
1037#endif
1038
39236c6e 1039 new_task->task_debug = NULL;
55e303ae 1040
39037602
A
1041#if DEVELOPMENT || DEBUG
1042 new_task->task_unnested = FALSE;
1043 new_task->task_disconnected_count = 0;
1044#endif
1c79356b 1045 queue_init(&new_task->semaphore_list);
1c79356b 1046 new_task->semaphores_owned = 0;
1c79356b 1047
1c79356b
A
1048 ipc_task_init(new_task, parent_task);
1049
2d21ac55 1050 new_task->vtimers = 0;
1c79356b 1051
2d21ac55
A
1052 new_task->shared_region = NULL;
1053
1054 new_task->affinity_space = NULL;
1c79356b 1055
813fb2f6
A
1056 new_task->t_chud = 0;
1057
316670eb
A
1058 new_task->pidsuspended = FALSE;
1059 new_task->frozen = FALSE;
39236c6e 1060 new_task->changing_freeze_state = FALSE;
316670eb
A
1061 new_task->rusage_cpu_flags = 0;
1062 new_task->rusage_cpu_percentage = 0;
1063 new_task->rusage_cpu_interval = 0;
1064 new_task->rusage_cpu_deadline = 0;
1065 new_task->rusage_cpu_callt = NULL;
39236c6e
A
1066#if MACH_ASSERT
1067 new_task->suspends_outstanding = 0;
1068#endif
1069
fe8ab488
A
1070#if HYPERVISOR
1071 new_task->hv_task_target = NULL;
1072#endif /* HYPERVISOR */
1073
316670eb 1074
39236c6e
A
1075 new_task->mem_notify_reserved = 0;
1076#if IMPORTANCE_INHERITANCE
fe8ab488 1077 new_task->task_imp_base = NULL;
39236c6e
A
1078#endif /* IMPORTANCE_INHERITANCE */
1079
1080#if defined(__x86_64__)
db609669 1081 new_task->uexc_range_start = new_task->uexc_range_size = new_task->uexc_handler = 0;
39236c6e
A
1082#endif
1083
1084 new_task->requested_policy = default_task_requested_policy;
1085 new_task->effective_policy = default_task_effective_policy;
db609669 1086
2d21ac55 1087 if (parent_task != TASK_NULL) {
1c79356b 1088 new_task->sec_token = parent_task->sec_token;
55e303ae 1089 new_task->audit_token = parent_task->audit_token;
1c79356b 1090
2d21ac55
A
1091 /* inherit the parent's shared region */
1092 shared_region = vm_shared_region_get(parent_task);
1093 vm_shared_region_set(new_task, shared_region);
1c79356b 1094
91447636
A
1095 if(task_has_64BitAddr(parent_task))
1096 task_set_64BitAddr(new_task);
b0d623f7
A
1097 new_task->all_image_info_addr = parent_task->all_image_info_addr;
1098 new_task->all_image_info_size = parent_task->all_image_info_size;
0c530ab8 1099
b0d623f7 1100#if defined(__i386__) || defined(__x86_64__)
0c530ab8
A
1101 if (inherit_memory && parent_task->i386_ldt)
1102 new_task->i386_ldt = user_ldt_copy(parent_task->i386_ldt);
1103#endif
2d21ac55
A
1104 if (inherit_memory && parent_task->affinity_space)
1105 task_affinity_create(parent_task, new_task);
b0d623f7
A
1106
1107 new_task->pset_hint = parent_task->pset_hint = task_choose_pset(parent_task);
39236c6e
A
1108
1109#if IMPORTANCE_INHERITANCE
fe8ab488 1110 ipc_importance_task_t new_task_imp = IIT_NULL;
743345f9 1111 boolean_t inherit_receive = TRUE;
fe8ab488
A
1112
1113 if (task_is_marked_importance_donor(parent_task)) {
1114 new_task_imp = ipc_importance_for_task(new_task, FALSE);
1115 assert(IIT_NULL != new_task_imp);
1116 ipc_importance_task_mark_donor(new_task_imp, TRUE);
1117 }
743345f9
A
1118
1119 if (inherit_receive) {
1120 if (task_is_marked_importance_receiver(parent_task)) {
1121 if (IIT_NULL == new_task_imp)
1122 new_task_imp = ipc_importance_for_task(new_task, FALSE);
1123 assert(IIT_NULL != new_task_imp);
1124 ipc_importance_task_mark_receiver(new_task_imp, TRUE);
1125 }
1126 if (task_is_marked_importance_denap_receiver(parent_task)) {
1127 if (IIT_NULL == new_task_imp)
1128 new_task_imp = ipc_importance_for_task(new_task, FALSE);
1129 assert(IIT_NULL != new_task_imp);
1130 ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
1131 }
fe8ab488
A
1132 }
1133
1134 if (IIT_NULL != new_task_imp) {
1135 assert(new_task->task_imp_base == new_task_imp);
1136 ipc_importance_task_release(new_task_imp);
1137 }
39236c6e
A
1138#endif /* IMPORTANCE_INHERITANCE */
1139
fe8ab488
A
1140 new_task->priority = BASEPRI_DEFAULT;
1141 new_task->max_priority = MAXPRI_USER;
1142
39037602 1143 task_policy_create(new_task, parent_task);
39236c6e 1144 } else {
1c79356b 1145 new_task->sec_token = KERNEL_SECURITY_TOKEN;
55e303ae 1146 new_task->audit_token = KERNEL_AUDIT_TOKEN;
b0d623f7
A
1147#ifdef __LP64__
1148 if(is_64bit)
1149 task_set_64BitAddr(new_task);
1150#endif
6d2010ae
A
1151 new_task->all_image_info_addr = (mach_vm_address_t)0;
1152 new_task->all_image_info_size = (mach_vm_size_t)0;
b0d623f7
A
1153
1154 new_task->pset_hint = PROCESSOR_SET_NULL;
fe8ab488
A
1155
1156 if (kernel_task == TASK_NULL) {
1157 new_task->priority = BASEPRI_KERNEL;
1158 new_task->max_priority = MAXPRI_KERNEL;
1159 } else {
1160 new_task->priority = BASEPRI_DEFAULT;
1161 new_task->max_priority = MAXPRI_USER;
1162 }
1c79356b
A
1163 }
1164
3e170ce0
A
1165 bzero(new_task->coalition, sizeof(new_task->coalition));
1166 for (int i = 0; i < COALITION_NUM_TYPES; i++)
1167 queue_chain_init(new_task->task_coalition[i]);
fe8ab488
A
1168
1169 /* Allocate I/O Statistics */
1170 new_task->task_io_stats = (io_stat_info_t)kalloc(sizeof(struct io_stat_info));
1171 assert(new_task->task_io_stats != NULL);
1172 bzero(new_task->task_io_stats, sizeof(struct io_stat_info));
1173
1174 bzero(&(new_task->cpu_time_qos_stats), sizeof(struct _cpu_time_qos_stats));
1175
6d2010ae 1176 bzero(&new_task->extmod_statistics, sizeof(new_task->extmod_statistics));
3e170ce0 1177
39037602
A
1178 /* Copy resource acc. info from Parent for Corpe Forked task. */
1179 if (parent_task != NULL && (t_flags & TF_CORPSE_FORK)) {
743345f9 1180 task_rollup_accounting_info(new_task, parent_task);
39037602
A
1181 } else {
1182 /* Initialize to zero for standard fork/spawn case */
1183 new_task->total_user_time = 0;
1184 new_task->total_system_time = 0;
1185 new_task->faults = 0;
1186 new_task->pageins = 0;
1187 new_task->cow_faults = 0;
1188 new_task->messages_sent = 0;
1189 new_task->messages_received = 0;
1190 new_task->syscalls_mach = 0;
1191 new_task->syscalls_unix = 0;
1192 new_task->c_switch = 0;
1193 new_task->p_switch = 0;
1194 new_task->ps_switch = 0;
1195 new_task->low_mem_notified_warn = 0;
1196 new_task->low_mem_notified_critical = 0;
1197 new_task->purged_memory_warn = 0;
1198 new_task->purged_memory_critical = 0;
1199 new_task->low_mem_privileged_listener = 0;
813fb2f6
A
1200 new_task->memlimit_is_active = 0;
1201 new_task->memlimit_is_fatal = 0;
1202 new_task->memlimit_active_exc_resource = 0;
1203 new_task->memlimit_inactive_exc_resource = 0;
39037602
A
1204 new_task->task_timer_wakeups_bin_1 = 0;
1205 new_task->task_timer_wakeups_bin_2 = 0;
1206 new_task->task_gpu_ns = 0;
1207 new_task->task_immediate_writes = 0;
1208 new_task->task_deferred_writes = 0;
1209 new_task->task_invalidated_writes = 0;
1210 new_task->task_metadata_writes = 0;
1211 new_task->task_energy = 0;
1212 }
3e170ce0 1213
39037602
A
1214
1215#if CONFIG_COALITIONS
1216 if (!(t_flags & TF_CORPSE_FORK)) {
1217 /* TODO: there is no graceful failure path here... */
1218 if (parent_coalitions && parent_coalitions[COALITION_TYPE_RESOURCE]) {
1219 coalitions_adopt_task(parent_coalitions, new_task);
1220 } else if (parent_task && parent_task->coalition[COALITION_TYPE_RESOURCE]) {
1221 /*
1222 * all tasks at least have a resource coalition, so
1223 * if the parent has one then inherit all coalitions
1224 * the parent is a part of
1225 */
1226 coalitions_adopt_task(parent_task->coalition, new_task);
1227 } else {
1228 /* TODO: assert that new_task will be PID 1 (launchd) */
1229 coalitions_adopt_init_task(new_task);
1230 }
3e170ce0 1231 } else {
39037602 1232 coalitions_adopt_corpse_task(new_task);
3e170ce0
A
1233 }
1234
1235 if (new_task->coalition[COALITION_TYPE_RESOURCE] == COALITION_NULL) {
1236 panic("created task is not a member of a resource coalition");
1237 }
1238#endif /* CONFIG_COALITIONS */
1239
1240 new_task->dispatchqueue_offset = 0;
1241 if (parent_task != NULL) {
1242 new_task->dispatchqueue_offset = parent_task->dispatchqueue_offset;
1243 }
1c79356b 1244
55e303ae
A
1245 if (vm_backing_store_low && parent_task != NULL)
1246 new_task->priv_flags |= (parent_task->priv_flags&VM_BACKING_STORE_PRIV);
1c79356b 1247
39236c6e 1248 new_task->task_volatile_objects = 0;
fe8ab488
A
1249 new_task->task_nonvolatile_objects = 0;
1250 new_task->task_purgeable_disowning = FALSE;
1251 new_task->task_purgeable_disowned = FALSE;
39236c6e 1252
39037602
A
1253#if CONFIG_SECLUDED_MEMORY
1254 new_task->task_can_use_secluded_mem = FALSE;
1255 new_task->task_could_use_secluded_mem = FALSE;
1256 new_task->task_could_also_use_secluded_mem = FALSE;
1257#endif /* CONFIG_SECLUDED_MEMORY */
1258
7e41aa88
A
1259 queue_init(&new_task->io_user_clients);
1260
1c79356b
A
1261 ipc_task_enable(new_task);
1262
3e170ce0
A
1263 lck_mtx_lock(&tasks_threads_lock);
1264 queue_enter(&tasks, new_task, task_t, tasks);
1265 tasks_count++;
4bd07ac2
A
1266 if (tasks_suspend_state) {
1267 task_suspend_internal(new_task);
1268 }
3e170ce0
A
1269 lck_mtx_unlock(&tasks_threads_lock);
1270
1c79356b
A
1271 *child_task = new_task;
1272 return(KERN_SUCCESS);
1273}
1274
743345f9
A
1275/*
1276 * task_rollup_accounting_info
1277 *
1278 * Roll up accounting stats. Used to rollup stats
1279 * for exec copy task and corpse fork.
1280 */
1281void
1282task_rollup_accounting_info(task_t to_task, task_t from_task)
1283{
1284 assert(from_task != to_task);
1285
1286 to_task->total_user_time = from_task->total_user_time;
1287 to_task->total_system_time = from_task->total_system_time;
1288 to_task->faults = from_task->faults;
1289 to_task->pageins = from_task->pageins;
1290 to_task->cow_faults = from_task->cow_faults;
1291 to_task->messages_sent = from_task->messages_sent;
1292 to_task->messages_received = from_task->messages_received;
1293 to_task->syscalls_mach = from_task->syscalls_mach;
1294 to_task->syscalls_unix = from_task->syscalls_unix;
1295 to_task->c_switch = from_task->c_switch;
1296 to_task->p_switch = from_task->p_switch;
1297 to_task->ps_switch = from_task->ps_switch;
1298 to_task->extmod_statistics = from_task->extmod_statistics;
1299 to_task->low_mem_notified_warn = from_task->low_mem_notified_warn;
1300 to_task->low_mem_notified_critical = from_task->low_mem_notified_critical;
1301 to_task->purged_memory_warn = from_task->purged_memory_warn;
1302 to_task->purged_memory_critical = from_task->purged_memory_critical;
1303 to_task->low_mem_privileged_listener = from_task->low_mem_privileged_listener;
1304 *to_task->task_io_stats = *from_task->task_io_stats;
1305 to_task->cpu_time_qos_stats = from_task->cpu_time_qos_stats;
1306 to_task->task_timer_wakeups_bin_1 = from_task->task_timer_wakeups_bin_1;
1307 to_task->task_timer_wakeups_bin_2 = from_task->task_timer_wakeups_bin_2;
1308 to_task->task_gpu_ns = from_task->task_gpu_ns;
1309 to_task->task_immediate_writes = from_task->task_immediate_writes;
1310 to_task->task_deferred_writes = from_task->task_deferred_writes;
1311 to_task->task_invalidated_writes = from_task->task_invalidated_writes;
1312 to_task->task_metadata_writes = from_task->task_metadata_writes;
1313 to_task->task_energy = from_task->task_energy;
1314
1315 /* Skip ledger roll up for memory accounting entries */
1316 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time);
1317 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.platform_idle_wakeups);
1318 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.interrupt_wakeups);
1319#if CONFIG_SCHED_SFI
1320 for (sfi_class_id_t class_id = SFI_CLASS_UNSPECIFIED; class_id < MAX_SFI_CLASS_ID; class_id++) {
1321 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.sfi_wait_times[class_id]);
1322 }
1323#endif
1324#if CONFIG_BANK
1325 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_me);
1326 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.cpu_time_billed_to_others);
1327#endif
1328 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.physical_writes);
1329 ledger_rollup_entry(to_task->ledger, from_task->ledger, task_ledgers.logical_writes);
1330}
1331
fe8ab488
A
1332int task_dropped_imp_count = 0;
1333
1c79356b 1334/*
91447636 1335 * task_deallocate:
1c79356b 1336 *
91447636 1337 * Drop a reference on a task.
1c79356b
A
1338 */
1339void
9bccf70c 1340task_deallocate(
1c79356b
A
1341 task_t task)
1342{
4b17d6b6 1343 ledger_amount_t credit, debit, interrupt_wakeups, platform_idle_wakeups;
fe8ab488 1344 uint32_t refs;
316670eb 1345
9bccf70c
A
1346 if (task == TASK_NULL)
1347 return;
1348
fe8ab488
A
1349 refs = task_deallocate_internal(task);
1350
1351#if IMPORTANCE_INHERITANCE
1352 if (refs > 1)
9bccf70c 1353 return;
fe8ab488
A
1354
1355 if (refs == 1) {
1356 /*
1357 * If last ref potentially comes from the task's importance,
1358 * disconnect it. But more task refs may be added before
1359 * that completes, so wait for the reference to go to zero
1360 * naturually (it may happen on a recursive task_deallocate()
1361 * from the ipc_importance_disconnect_task() call).
1362 */
1363 if (IIT_NULL != task->task_imp_base)
1364 ipc_importance_disconnect_task(task);
1365 return;
1366 }
1367#else
1368 if (refs > 0)
1369 return;
1370#endif /* IMPORTANCE_INHERITANCE */
1c79356b 1371
6d2010ae
A
1372 lck_mtx_lock(&tasks_threads_lock);
1373 queue_remove(&terminated_tasks, task, task_t, tasks);
39236c6e 1374 terminated_tasks_count--;
6d2010ae
A
1375 lck_mtx_unlock(&tasks_threads_lock);
1376
fe8ab488
A
1377 /*
1378 * remove the reference on atm descriptor
1379 */
490019cf 1380 task_atm_reset(task);
fe8ab488 1381
fe8ab488
A
1382 /*
1383 * remove the reference on bank context
1384 */
490019cf 1385 task_bank_reset(task);
fe8ab488
A
1386
1387 if (task->task_io_stats)
1388 kfree(task->task_io_stats, sizeof(struct io_stat_info));
1389
316670eb
A
1390 /*
1391 * Give the machine dependent code a chance
1392 * to perform cleanup before ripping apart
1393 * the task.
1394 */
1395 machine_task_terminate(task);
1396
9bccf70c
A
1397 ipc_task_terminate(task);
1398
7e41aa88
A
1399 /* let iokit know */
1400 iokit_task_terminate(task);
1401
2d21ac55
A
1402 if (task->affinity_space)
1403 task_affinity_deallocate(task);
1404
fe8ab488
A
1405#if MACH_ASSERT
1406 if (task->ledger != NULL &&
1407 task->map != NULL &&
1408 task->map->pmap != NULL &&
1409 task->map->pmap->ledger != NULL) {
1410 assert(task->ledger == task->map->pmap->ledger);
1411 }
1412#endif /* MACH_ASSERT */
1413
1414 vm_purgeable_disown(task);
1415 assert(task->task_purgeable_disowned);
1416 if (task->task_volatile_objects != 0 ||
1417 task->task_nonvolatile_objects != 0) {
1418 panic("task_deallocate(%p): "
1419 "volatile_objects=%d nonvolatile_objects=%d\n",
1420 task,
1421 task->task_volatile_objects,
1422 task->task_nonvolatile_objects);
1423 }
1424
1c79356b
A
1425 vm_map_deallocate(task->map);
1426 is_release(task->itk_space);
1c79356b 1427
4b17d6b6
A
1428 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
1429 &interrupt_wakeups, &debit);
1430 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
1431 &platform_idle_wakeups, &debit);
1432
fe8ab488
A
1433#if defined(CONFIG_SCHED_MULTIQ)
1434 sched_group_destroy(task->sched_group);
1435#endif
1436
4b17d6b6
A
1437 /* Accumulate statistics for dead tasks */
1438 lck_spin_lock(&dead_task_statistics_lock);
1439 dead_task_statistics.total_user_time += task->total_user_time;
1440 dead_task_statistics.total_system_time += task->total_system_time;
1441
1442 dead_task_statistics.task_interrupt_wakeups += interrupt_wakeups;
1443 dead_task_statistics.task_platform_idle_wakeups += platform_idle_wakeups;
1444
1445 dead_task_statistics.task_timer_wakeups_bin_1 += task->task_timer_wakeups_bin_1;
1446 dead_task_statistics.task_timer_wakeups_bin_2 += task->task_timer_wakeups_bin_2;
1447
1448 lck_spin_unlock(&dead_task_statistics_lock);
b0d623f7
A
1449 lck_mtx_destroy(&task->lock, &task_lck_grp);
1450
316670eb
A
1451 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_private, &credit,
1452 &debit)) {
1453 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_private.alloc);
1454 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_private.free);
1455 }
1456 if (!ledger_get_entries(task->ledger, task_ledgers.tkm_shared, &credit,
1457 &debit)) {
1458 OSAddAtomic64(credit, (int64_t *)&tasks_tkm_shared.alloc);
1459 OSAddAtomic64(debit, (int64_t *)&tasks_tkm_shared.free);
1460 }
1461 ledger_dereference(task->ledger);
39236c6e
A
1462
1463#if TASK_REFERENCE_LEAK_DEBUG
1464 btlog_remove_entries_for_element(task_ref_btlog, task);
1465#endif
1466
fe8ab488 1467#if CONFIG_COALITIONS
3e170ce0 1468 task_release_coalitions(task);
fe8ab488
A
1469#endif /* CONFIG_COALITIONS */
1470
3e170ce0
A
1471 bzero(task->coalition, sizeof(task->coalition));
1472
1473#if MACH_BSD
1474 /* clean up collected information since last reference to task is gone */
1475 if (task->corpse_info) {
39037602 1476 task_crashinfo_destroy(task->corpse_info, RELEASE_CORPSE_REF);
3e170ce0
A
1477 task->corpse_info = NULL;
1478 }
1479#endif
39037602
A
1480 if (task->corpse_info_kernel) {
1481 kfree(task->corpse_info_kernel, CORPSEINFO_ALLOCATION_SIZE);
1482 }
1483
1484#if CONFIG_MACF
1485 if (task->crash_label) {
1486 mac_exc_action_label_task_destroy(task);
1487 }
1488#endif
39236c6e 1489
91447636 1490 zfree(task_zone, task);
1c79356b
A
1491}
1492
0c530ab8
A
1493/*
1494 * task_name_deallocate:
1495 *
1496 * Drop a reference on a task name.
1497 */
1498void
1499task_name_deallocate(
1500 task_name_t task_name)
1501{
1502 return(task_deallocate((task_t)task_name));
1503}
1504
813fb2f6
A
1505/*
1506 * task_inspect_deallocate:
1507 *
1508 * Drop a task inspection reference.
1509 */
1510void
1511task_inspect_deallocate(
1512 task_inspect_t task_inspect)
1513{
1514 return(task_deallocate((task_t)task_inspect));
1515}
1516
39236c6e
A
1517/*
1518 * task_suspension_token_deallocate:
1519 *
1520 * Drop a reference on a task suspension token.
1521 */
1522void
1523task_suspension_token_deallocate(
1524 task_suspension_token_t token)
1525{
1526 return(task_deallocate((task_t)token));
1527}
0c530ab8 1528
3e170ce0
A
1529
1530/*
1531 * task_collect_crash_info:
1532 *
1533 * collect crash info from bsd and mach based data
1534 */
1535kern_return_t
39037602 1536task_collect_crash_info(task_t task, struct proc *proc, int is_corpse_fork)
3e170ce0
A
1537{
1538 kern_return_t kr = KERN_SUCCESS;
1539
1540 kcdata_descriptor_t crash_data = NULL;
1541 kcdata_descriptor_t crash_data_release = NULL;
1542 mach_msg_type_number_t size = CORPSEINFO_ALLOCATION_SIZE;
39037602
A
1543 mach_vm_offset_t crash_data_ptr = 0;
1544 void *crash_data_kernel = NULL;
1545 void *crash_data_kernel_release = NULL;
1546 int corpse_blob_kernel_alloc = (is_corpse_fork || unify_corpse_blob_alloc);
3e170ce0
A
1547
1548 if (!corpses_enabled()) {
1549 return KERN_NOT_SUPPORTED;
1550 }
1551
1552 task_lock(task);
39037602
A
1553
1554 assert(is_corpse_fork || task->bsd_info != NULL);
1555 if (task->corpse_info == NULL && (is_corpse_fork || task->bsd_info != NULL)) {
1556#if CONFIG_MACF
1557 /* Update the corpse label, used by the exception delivery mac hook */
1558 mac_exc_action_label_task_update(task, proc);
1559#endif
3e170ce0 1560 task_unlock(task);
3e170ce0 1561
39037602
A
1562 if (!corpse_blob_kernel_alloc) {
1563 /* map crash data memory in task's vm map */
1564 kr = mach_vm_allocate(task->map, &crash_data_ptr, size, (VM_MAKE_TAG(VM_MEMORY_CORPSEINFO) | VM_FLAGS_ANYWHERE));
1565 } else {
1566 crash_data_kernel = (void *) kalloc(CORPSEINFO_ALLOCATION_SIZE);
1567 if (crash_data_kernel == 0)
1568 kr = KERN_RESOURCE_SHORTAGE;
1569 bzero(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE);
1570 crash_data_ptr = (mach_vm_offset_t) crash_data_kernel;
1571 }
3e170ce0
A
1572 if (kr != KERN_SUCCESS)
1573 goto out_no_lock;
1574
39037602
A
1575 /* Do not get a corpse ref for corpse fork */
1576 crash_data = task_crashinfo_alloc_init((mach_vm_address_t)crash_data_ptr, size, is_corpse_fork ? !GET_CORPSE_REF : GET_CORPSE_REF, corpse_blob_kernel_alloc ? KCFLAG_USE_MEMCOPY: KCFLAG_USE_COPYOUT);
3e170ce0
A
1577 if (crash_data) {
1578 task_lock(task);
1579 crash_data_release = task->corpse_info;
39037602 1580 crash_data_kernel_release = task->corpse_info_kernel;
3e170ce0 1581 task->corpse_info = crash_data;
39037602
A
1582 task->corpse_info_kernel = crash_data_kernel;
1583
3e170ce0
A
1584 task_unlock(task);
1585 kr = KERN_SUCCESS;
1586 } else {
1587 /* if failed to create corpse info, free the mapping */
39037602
A
1588 if (!corpse_blob_kernel_alloc) {
1589 if (KERN_SUCCESS != mach_vm_deallocate(task->map, crash_data_ptr, size)) {
1590 printf("mach_vm_deallocate failed to clear corpse_data for pid %d.\n", task_pid(task));
1591 }
1592 } else {
1593 kfree(crash_data_kernel, CORPSEINFO_ALLOCATION_SIZE);
3e170ce0
A
1594 }
1595 kr = KERN_FAILURE;
1596 }
1597
1598 if (crash_data_release != NULL) {
39037602
A
1599 task_crashinfo_destroy(crash_data_release, is_corpse_fork ? !RELEASE_CORPSE_REF : RELEASE_CORPSE_REF);
1600 }
1601 if (crash_data_kernel_release != NULL) {
1602 kfree(crash_data_kernel_release, CORPSEINFO_ALLOCATION_SIZE);
3e170ce0
A
1603 }
1604 } else {
1605 task_unlock(task);
1606 }
1607
1608out_no_lock:
1609 return kr;
1610}
1611
1612/*
1613 * task_deliver_crash_notification:
1614 *
1615 * Makes outcall to registered host port for a corpse.
1616 */
1617kern_return_t
39037602 1618task_deliver_crash_notification(task_t task, thread_t thread, mach_exception_data_type_t subcode)
3e170ce0
A
1619{
1620 kcdata_descriptor_t crash_info = task->corpse_info;
1621 thread_t th_iter = NULL;
1622 kern_return_t kr = KERN_SUCCESS;
1623 wait_interrupt_t wsave;
1624 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
39037602 1625 ipc_port_t task_port, old_notify;
3e170ce0
A
1626
1627 if (crash_info == NULL)
1628 return KERN_FAILURE;
1629
3e170ce0 1630 task_lock(task);
39037602
A
1631 if (task_is_a_corpse_fork(task)) {
1632 /* Populate code with EXC_RESOURCE for corpse fork */
1633 code[0] = EXC_RESOURCE;
1634 code[1] = subcode;
1635 } else if (unify_corpse_blob_alloc) {
1636 /* Populate code with EXC_CRASH for corpses */
1637 code[0] = EXC_CRASH;
1638 code[1] = 0;
1639 /* Update the code[1] if the boot-arg corpse_for_fatal_memkill is set */
1640 if (corpse_for_fatal_memkill) {
1641 code[1] = subcode;
1642 }
1643 } else {
1644 /* Populate code with address and length for EXC_CRASH */
1645 code[0] = crash_info->kcd_addr_begin;
1646 code[1] = crash_info->kcd_length;
1647 }
3e170ce0
A
1648 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
1649 {
39037602
A
1650 if (th_iter->corpse_dup == FALSE) {
1651 ipc_thread_reset(th_iter);
1652 }
3e170ce0
A
1653 }
1654 task_unlock(task);
1655
39037602
A
1656 /* Arm the no-sender notification for taskport */
1657 task_reference(task);
1658 task_port = convert_task_to_port(task);
1659 ip_lock(task_port);
1660 assert(ip_active(task_port));
1661 ipc_port_nsrequest(task_port, task_port->ip_mscount, ipc_port_make_sonce_locked(task_port), &old_notify);
1662 /* port unlocked */
1663 assert(IP_NULL == old_notify);
1664
3e170ce0 1665 wsave = thread_interrupt_level(THREAD_UNINT);
39037602 1666 kr = exception_triage_thread(EXC_CORPSE_NOTIFY, code, EXCEPTION_CODE_MAX, thread);
3e170ce0
A
1667 if (kr != KERN_SUCCESS) {
1668 printf("Failed to send exception EXC_CORPSE_NOTIFY. error code: %d for pid %d\n", kr, task_pid(task));
1669 }
1670
3e170ce0 1671 (void)thread_interrupt_level(wsave);
3e170ce0 1672
39037602
A
1673 /*
1674 * Drop the send right on task port, will fire the
1675 * no-sender notification if exception deliver failed.
1676 */
1677 ipc_port_release_send(task_port);
3e170ce0
A
1678 return kr;
1679}
1680
1c79356b
A
1681/*
1682 * task_terminate:
1683 *
1684 * Terminate the specified task. See comments on thread_terminate
1685 * (kern/thread.c) about problems with terminating the "current task."
1686 */
1687
1688kern_return_t
1689task_terminate(
1690 task_t task)
1691{
1692 if (task == TASK_NULL)
91447636
A
1693 return (KERN_INVALID_ARGUMENT);
1694
1c79356b 1695 if (task->bsd_info)
91447636
A
1696 return (KERN_FAILURE);
1697
1c79356b
A
1698 return (task_terminate_internal(task));
1699}
1700
fe8ab488
A
1701#if MACH_ASSERT
1702extern int proc_pid(struct proc *);
1703extern void proc_name_kdp(task_t t, char *buf, int size);
1704#endif /* MACH_ASSERT */
1705
1706#define VM_MAP_PARTIAL_REAP 0x54 /* 0x150 */
1707static void
1708__unused task_partial_reap(task_t task, __unused int pid)
1709{
1710 unsigned int reclaimed_resident = 0;
1711 unsigned int reclaimed_compressed = 0;
1712 uint64_t task_page_count;
1713
1714 task_page_count = (get_task_phys_footprint(task) / PAGE_SIZE_64);
1715
1716 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_START),
1717 pid, task_page_count, 0, 0, 0);
1718
1719 vm_map_partial_reap(task->map, &reclaimed_resident, &reclaimed_compressed);
1720
1721 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_MAP_PARTIAL_REAP) | DBG_FUNC_END),
1722 pid, reclaimed_resident, reclaimed_compressed, 0, 0);
1723}
1724
3e170ce0
A
1725kern_return_t
1726task_mark_corpse(task_t task)
1727{
1728 kern_return_t kr = KERN_SUCCESS;
1729 thread_t self_thread;
1730 (void) self_thread;
1731 wait_interrupt_t wsave;
1732
1733 assert(task != kernel_task);
1734 assert(task == current_task());
1735 assert(!task_is_a_corpse(task));
1736
39037602 1737 kr = task_collect_crash_info(task, (struct proc*)task->bsd_info, FALSE);
3e170ce0
A
1738 if (kr != KERN_SUCCESS) {
1739 return kr;
1740 }
1741
1742 self_thread = current_thread();
1743
1744 wsave = thread_interrupt_level(THREAD_UNINT);
1745 task_lock(task);
1746
1747 task_set_corpse_pending_report(task);
1748 task_set_corpse(task);
1749
1750 kr = task_start_halt_locked(task, TRUE);
1751 assert(kr == KERN_SUCCESS);
39037602 1752
3e170ce0 1753 ipc_task_reset(task);
39037602
A
1754 /* Remove the naked send right for task port, needed to arm no sender notification */
1755 task_set_special_port(task, TASK_KERNEL_PORT, IPC_PORT_NULL);
3e170ce0
A
1756 ipc_task_enable(task);
1757
1758 task_unlock(task);
1759 /* terminate the ipc space */
1760 ipc_space_terminate(task->itk_space);
39037602
A
1761
1762 /* Add it to global corpse task list */
1763 task_add_to_corpse_task_list(task);
3e170ce0
A
1764
1765 task_start_halt(task);
1766 thread_terminate_internal(self_thread);
39037602 1767
3e170ce0
A
1768 (void) thread_interrupt_level(wsave);
1769 assert(task->halting == TRUE);
1770 return kr;
1771}
1772
39037602
A
1773/*
1774 * task_clear_corpse
1775 *
1776 * Clears the corpse pending bit on task.
1777 * Removes inspection bit on the threads.
1778 */
1779void
1780task_clear_corpse(task_t task)
1781{
1782 thread_t th_iter = NULL;
1783
1784 task_lock(task);
1785 queue_iterate(&task->threads, th_iter, thread_t, task_threads)
1786 {
1787 thread_mtx_lock(th_iter);
1788 th_iter->inspection = FALSE;
1789 thread_mtx_unlock(th_iter);
1790 }
1791
1792 thread_terminate_crashed_threads();
1793 /* remove the pending corpse report flag */
1794 task_clear_corpse_pending_report(task);
1795
1796 task_unlock(task);
1797}
1798
1799/*
1800 * task_port_notify
1801 *
1802 * Called whenever the Mach port system detects no-senders on
1803 * the task port of a corpse.
1804 * Each notification that comes in should terminate the task (corpse).
1805 */
1806void
1807task_port_notify(mach_msg_header_t *msg)
1808{
1809 mach_no_senders_notification_t *notification = (void *)msg;
1810 ipc_port_t port = notification->not_header.msgh_remote_port;
1811 task_t task;
1812
1813 assert(ip_active(port));
1814 assert(IKOT_TASK == ip_kotype(port));
1815 task = (task_t) port->ip_kobject;
1816
1817 assert(task_is_a_corpse(task));
1818
1819 /* Remove the task from global corpse task list */
1820 task_remove_from_corpse_task_list(task);
1821
1822 task_clear_corpse(task);
1823 task_terminate_internal(task);
1824}
1825
1826/*
1827 * task_wait_till_threads_terminate_locked
1828 *
1829 * Wait till all the threads in the task are terminated.
1830 * Might release the task lock and re-acquire it.
1831 */
1832void
1833task_wait_till_threads_terminate_locked(task_t task)
1834{
1835 /* wait for all the threads in the task to terminate */
1836 while (task->active_thread_count != 0) {
1837 assert_wait((event_t)&task->active_thread_count, THREAD_UNINT);
1838 task_unlock(task);
1839 thread_block(THREAD_CONTINUE_NULL);
1840
1841 task_lock(task);
1842 }
1843}
1844
1845/*
1846 * task_duplicate_map_and_threads
1847 *
1848 * Copy vmmap of source task.
1849 * Copy active threads from source task to destination task.
1850 * Source task would be suspended during the copy.
1851 */
1852kern_return_t
1853task_duplicate_map_and_threads(
1854 task_t task,
1855 void *p,
1856 task_t new_task,
1857 thread_t *thread_ret,
39037602
A
1858 uint64_t **udata_buffer,
1859 int *size,
1860 int *num_udata)
1861{
1862 kern_return_t kr = KERN_SUCCESS;
1863 int active;
1864 thread_t thread, self, thread_return = THREAD_NULL;
1865 thread_t new_thread = THREAD_NULL;
1866 thread_t *thread_array;
1867 uint32_t active_thread_count = 0, array_count = 0, i;
1868 vm_map_t oldmap;
1869 uint64_t *buffer = NULL;
1870 int buf_size = 0;
1871 int est_knotes = 0, num_knotes = 0;
1872
1873 self = current_thread();
1874
1875 /*
1876 * Suspend the task to copy thread state, use the internal
1877 * variant so that no user-space process can resume
1878 * the task from under us
1879 */
1880 kr = task_suspend_internal(task);
1881 if (kr != KERN_SUCCESS) {
1882 return kr;
1883 }
1884
1885 if (task->map->disable_vmentry_reuse == TRUE) {
1886 /*
1887 * Quite likely GuardMalloc (or some debugging tool)
1888 * is being used on this task. And it has gone through
1889 * its limit. Making a corpse will likely encounter
1890 * a lot of VM entries that will need COW.
1891 *
1892 * Skip it.
1893 */
1894 task_resume_internal(task);
1895 return KERN_FAILURE;
1896 }
1897
1898 /* Setup new task's vmmap, switch from parent task's map to it COW map */
1899 oldmap = new_task->map;
1900 new_task->map = vm_map_fork(new_task->ledger,
1901 task->map,
1902 (VM_MAP_FORK_SHARE_IF_INHERIT_NONE |
1903 VM_MAP_FORK_PRESERVE_PURGEABLE));
1904 vm_map_deallocate(oldmap);
1905
39037602
A
1906 /* Get all the udata pointers from kqueue */
1907 est_knotes = proc_list_uptrs(p, NULL, 0);
1908 if (est_knotes > 0) {
1909 buf_size = (est_knotes + 32) * sizeof(uint64_t);
1910 buffer = (uint64_t *) kalloc(buf_size);
1911 num_knotes = proc_list_uptrs(p, buffer, buf_size);
1912 if (num_knotes > est_knotes + 32) {
1913 num_knotes = est_knotes + 32;
1914 }
1915 }
1916
1917 active_thread_count = task->active_thread_count;
1918 if (active_thread_count == 0) {
1919 if (buffer != NULL) {
1920 kfree(buffer, buf_size);
1921 }
1922 task_resume_internal(task);
1923 return KERN_FAILURE;
1924 }
1925
1926 thread_array = (thread_t *) kalloc(sizeof(thread_t) * active_thread_count);
1927
1928 /* Iterate all the threads and drop the task lock before calling thread_create_with_continuation */
1929 task_lock(task);
1930 queue_iterate(&task->threads, thread, thread_t, task_threads) {
1931 /* Skip inactive threads */
1932 active = thread->active;
1933 if (!active) {
1934 continue;
1935 }
1936
1937 if (array_count >= active_thread_count) {
1938 break;
1939 }
1940
1941 thread_array[array_count++] = thread;
1942 thread_reference(thread);
1943 }
1944 task_unlock(task);
1945
1946 for (i = 0; i < array_count; i++) {
1947
1948 kr = thread_create_with_continuation(new_task, &new_thread, (thread_continue_t)thread_corpse_continue);
1949 if (kr != KERN_SUCCESS) {
1950 break;
1951 }
1952
1953 /* Equivalent of current thread in corpse */
1954 if (thread_array[i] == self) {
1955 thread_return = new_thread;
1956 } else {
1957 /* drop the extra ref returned by thread_create_with_continuation */
1958 thread_deallocate(new_thread);
1959 }
1960
1961 kr = thread_dup2(thread_array[i], new_thread);
1962 if (kr != KERN_SUCCESS) {
1963 thread_mtx_lock(new_thread);
1964 new_thread->corpse_dup = TRUE;
1965 thread_mtx_unlock(new_thread);
1966 continue;
1967 }
1968
1969 /* Copy thread name */
1970 bsd_copythreadname(new_thread->uthread, thread_array[i]->uthread);
1971 thread_copy_resource_info(new_thread, thread_array[i]);
1972 }
1973
1974 task_resume_internal(task);
1975
1976 for (i = 0; i < array_count; i++) {
1977 thread_deallocate(thread_array[i]);
1978 }
1979 kfree(thread_array, sizeof(thread_t) * active_thread_count);
1980
1981 if (kr == KERN_SUCCESS) {
1982 *thread_ret = thread_return;
1983 *udata_buffer = buffer;
1984 *size = buf_size;
1985 *num_udata = num_knotes;
1986 } else {
1987 if (thread_return != THREAD_NULL) {
1988 thread_deallocate(thread_return);
1989 }
1990 if (buffer != NULL) {
1991 kfree(buffer, buf_size);
1992 }
1993 }
1994
1995 return kr;
1996}
1997
1998#if CONFIG_SECLUDED_MEMORY
1999extern void task_set_can_use_secluded_mem_locked(
2000 task_t task,
2001 boolean_t can_use_secluded_mem);
2002#endif /* CONFIG_SECLUDED_MEMORY */
2003
1c79356b
A
2004kern_return_t
2005task_terminate_internal(
91447636 2006 task_t task)
1c79356b 2007{
91447636
A
2008 thread_t thread, self;
2009 task_t self_task;
2010 boolean_t interrupt_save;
fe8ab488 2011 int pid = 0;
1c79356b
A
2012
2013 assert(task != kernel_task);
2014
91447636
A
2015 self = current_thread();
2016 self_task = self->task;
1c79356b
A
2017
2018 /*
2019 * Get the task locked and make sure that we are not racing
2020 * with someone else trying to terminate us.
2021 */
91447636 2022 if (task == self_task)
1c79356b 2023 task_lock(task);
91447636
A
2024 else
2025 if (task < self_task) {
1c79356b 2026 task_lock(task);
91447636
A
2027 task_lock(self_task);
2028 }
2029 else {
2030 task_lock(self_task);
1c79356b
A
2031 task_lock(task);
2032 }
2033
39037602
A
2034#if CONFIG_SECLUDED_MEMORY
2035 if (task->task_can_use_secluded_mem) {
2036 task_set_can_use_secluded_mem_locked(task, FALSE);
2037 }
2038 task->task_could_use_secluded_mem = FALSE;
2039 task->task_could_also_use_secluded_mem = FALSE;
2040#endif /* CONFIG_SECLUDED_MEMORY */
2041
6d2010ae 2042 if (!task->active) {
1c79356b 2043 /*
6d2010ae 2044 * Task is already being terminated.
1c79356b
A
2045 * Just return an error. If we are dying, this will
2046 * just get us to our AST special handler and that
2047 * will get us to finalize the termination of ourselves.
2048 */
2049 task_unlock(task);
91447636
A
2050 if (self_task != task)
2051 task_unlock(self_task);
2052
2053 return (KERN_FAILURE);
1c79356b 2054 }
91447636 2055
3e170ce0
A
2056 if (task_corpse_pending_report(task)) {
2057 /*
2058 * Task is marked for reporting as corpse.
2059 * Just return an error. This will
2060 * just get us to our AST special handler and that
2061 * will get us to finish the path to death
2062 */
2063 task_unlock(task);
2064 if (self_task != task)
2065 task_unlock(self_task);
2066
2067 return (KERN_FAILURE);
2068 }
2069
91447636
A
2070 if (self_task != task)
2071 task_unlock(self_task);
1c79356b 2072
e7c99d92
A
2073 /*
2074 * Make sure the current thread does not get aborted out of
2075 * the waits inside these operations.
2076 */
9bccf70c 2077 interrupt_save = thread_interrupt_level(THREAD_UNINT);
e7c99d92 2078
1c79356b
A
2079 /*
2080 * Indicate that we want all the threads to stop executing
2081 * at user space by holding the task (we would have held
2082 * each thread independently in thread_terminate_internal -
2083 * but this way we may be more likely to already find it
2084 * held there). Mark the task inactive, and prevent
2085 * further task operations via the task port.
2086 */
2087 task_hold_locked(task);
2088 task->active = FALSE;
2089 ipc_task_disable(task);
2090
39236c6e
A
2091#if CONFIG_TELEMETRY
2092 /*
2093 * Notify telemetry that this task is going away.
2094 */
2095 telemetry_task_ctl_locked(task, TF_TELEMETRY, 0);
2096#endif
2097
1c79356b 2098 /*
91447636
A
2099 * Terminate each thread in the task.
2100 */
2101 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2102 thread_terminate_internal(thread);
1c79356b 2103 }
e7c99d92 2104
fe8ab488 2105#ifdef MACH_BSD
743345f9 2106 if (task->bsd_info != NULL && !task_is_exec_copy(task)) {
fe8ab488
A
2107 pid = proc_pid(task->bsd_info);
2108 }
2109#endif /* MACH_BSD */
2110
316670eb
A
2111 task_unlock(task);
2112
39037602 2113 proc_set_task_policy(task, TASK_POLICY_ATTRIBUTE,
3e170ce0 2114 TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
a1c7dba1 2115
fe8ab488
A
2116 /* Early object reap phase */
2117
2118// PR-17045188: Revisit implementation
2119// task_partial_reap(task, pid);
2120
1c79356b
A
2121
2122 /*
2123 * Destroy all synchronizers owned by the task.
2124 */
2125 task_synchronizer_destroy_all(task);
2126
1c79356b
A
2127 /*
2128 * Destroy the IPC space, leaving just a reference for it.
2129 */
316670eb 2130 ipc_space_terminate(task->itk_space);
1c79356b 2131
fe8ab488
A
2132#if 00
2133 /* if some ledgers go negative on tear-down again... */
2134 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2135 task_ledgers.phys_footprint);
2136 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2137 task_ledgers.internal);
2138 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2139 task_ledgers.internal_compressed);
2140 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2141 task_ledgers.iokit_mapped);
2142 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2143 task_ledgers.alternate_accounting);
3e170ce0
A
2144 ledger_disable_panic_on_negative(task->map->pmap->ledger,
2145 task_ledgers.alternate_accounting_compressed);
fe8ab488 2146#endif
91447636 2147
1c79356b
A
2148 /*
2149 * If the current thread is a member of the task
2150 * being terminated, then the last reference to
2151 * the task will not be dropped until the thread
2152 * is finally reaped. To avoid incurring the
2153 * expense of removing the address space regions
2154 * at reap time, we do it explictly here.
2155 */
3e170ce0
A
2156
2157 vm_map_lock(task->map);
2158 vm_map_disable_hole_optimization(task->map);
2159 vm_map_unlock(task->map);
2160
2d21ac55
A
2161 vm_map_remove(task->map,
2162 task->map->min_offset,
2163 task->map->max_offset,
3e170ce0
A
2164 /* no unnesting on final cleanup: */
2165 VM_MAP_REMOVE_NO_UNNESTING);
1c79356b 2166
2d21ac55
A
2167 /* release our shared region */
2168 vm_shared_region_set(task, NULL);
9bccf70c 2169
3e170ce0 2170
fe8ab488
A
2171#if MACH_ASSERT
2172 /*
2173 * Identify the pmap's process, in case the pmap ledgers drift
2174 * and we have to report it.
2175 */
2176 char procname[17];
743345f9 2177 if (task->bsd_info && !task_is_exec_copy(task)) {
fe8ab488
A
2178 pid = proc_pid(task->bsd_info);
2179 proc_name_kdp(task, procname, sizeof (procname));
2180 } else {
2181 pid = 0;
2182 strlcpy(procname, "<unknown>", sizeof (procname));
2183 }
2184 pmap_set_process(task->map->pmap, pid, procname);
2185#endif /* MACH_ASSERT */
2186
b0d623f7 2187 lck_mtx_lock(&tasks_threads_lock);
2d21ac55 2188 queue_remove(&tasks, task, task_t, tasks);
6d2010ae 2189 queue_enter(&terminated_tasks, task, task_t, tasks);
2d21ac55 2190 tasks_count--;
39236c6e 2191 terminated_tasks_count++;
b0d623f7 2192 lck_mtx_unlock(&tasks_threads_lock);
9bccf70c 2193
1c79356b 2194 /*
e7c99d92
A
2195 * We no longer need to guard against being aborted, so restore
2196 * the previous interruptible state.
2197 */
9bccf70c 2198 thread_interrupt_level(interrupt_save);
e7c99d92 2199
fe8ab488
A
2200#if KPERF
2201 /* force the task to release all ctrs */
2202 if (task->t_chud & TASK_KPC_FORCED_ALL_CTRS)
2203 kpc_force_all_ctrs(task, 0);
2204#endif
2205
2206#if CONFIG_COALITIONS
2207 /*
3e170ce0 2208 * Leave our coalitions. (drop activation but not reference)
fe8ab488 2209 */
3e170ce0 2210 coalitions_remove_task(task);
fe8ab488
A
2211#endif
2212
e7c99d92
A
2213 /*
2214 * Get rid of the task active reference on itself.
1c79356b 2215 */
1c79356b
A
2216 task_deallocate(task);
2217
91447636 2218 return (KERN_SUCCESS);
1c79356b
A
2219}
2220
4bd07ac2
A
2221void
2222tasks_system_suspend(boolean_t suspend)
2223{
2224 task_t task;
2225
2226 lck_mtx_lock(&tasks_threads_lock);
2227 assert(tasks_suspend_state != suspend);
2228 tasks_suspend_state = suspend;
2229 queue_iterate(&tasks, task, task_t, tasks) {
2230 if (task == kernel_task) {
2231 continue;
2232 }
2233 suspend ? task_suspend_internal(task) : task_resume_internal(task);
2234 }
2235 lck_mtx_unlock(&tasks_threads_lock);
2236}
2237
1c79356b 2238/*
b0d623f7 2239 * task_start_halt:
91447636
A
2240 *
2241 * Shut the current task down (except for the current thread) in
2242 * preparation for dramatic changes to the task (probably exec).
b0d623f7
A
2243 * We hold the task and mark all other threads in the task for
2244 * termination.
1c79356b
A
2245 */
2246kern_return_t
3e170ce0
A
2247task_start_halt(task_t task)
2248{
2249 kern_return_t kr = KERN_SUCCESS;
2250 task_lock(task);
2251 kr = task_start_halt_locked(task, FALSE);
2252 task_unlock(task);
2253 return kr;
2254}
2255
2256static kern_return_t
2257task_start_halt_locked(task_t task, boolean_t should_mark_corpse)
1c79356b 2258{
3e170ce0
A
2259 thread_t thread, self;
2260 uint64_t dispatchqueue_offset;
1c79356b
A
2261
2262 assert(task != kernel_task);
2263
91447636 2264 self = current_thread();
1c79356b 2265
39037602 2266 if (task != self->task && !task_is_a_corpse_fork(task))
91447636 2267 return (KERN_INVALID_ARGUMENT);
1c79356b 2268
b0d623f7 2269 if (task->halting || !task->active || !self->active) {
1c79356b 2270 /*
3e170ce0
A
2271 * Task or current thread is already being terminated.
2272 * Hurry up and return out of the current kernel context
2273 * so that we run our AST special handler to terminate
2274 * ourselves.
1c79356b 2275 */
91447636 2276 return (KERN_FAILURE);
1c79356b
A
2277 }
2278
b0d623f7
A
2279 task->halting = TRUE;
2280
3e170ce0
A
2281 /*
2282 * Mark all the threads to keep them from starting any more
2283 * user-level execution. The thread_terminate_internal code
2284 * would do this on a thread by thread basis anyway, but this
2285 * gives us a better chance of not having to wait there.
2286 */
2287 task_hold_locked(task);
2288 dispatchqueue_offset = get_dispatchqueue_offset_from_proc(task->bsd_info);
1c79356b 2289
3e170ce0
A
2290 /*
2291 * Terminate all the other threads in the task.
2292 */
2293 queue_iterate(&task->threads, thread, thread_t, task_threads)
2294 {
2295 if (should_mark_corpse) {
2296 thread_mtx_lock(thread);
2297 thread->inspection = TRUE;
2298 thread_mtx_unlock(thread);
1c79356b 2299 }
3e170ce0
A
2300 if (thread != self)
2301 thread_terminate_internal(thread);
1c79356b 2302 }
3e170ce0
A
2303 task->dispatchqueue_offset = dispatchqueue_offset;
2304
2305 task_release_locked(task);
2306
b0d623f7
A
2307 return KERN_SUCCESS;
2308}
2309
2310
2311/*
2312 * task_complete_halt:
2313 *
2314 * Complete task halt by waiting for threads to terminate, then clean
2315 * up task resources (VM, port namespace, etc...) and then let the
2316 * current thread go in the (practically empty) task context.
743345f9
A
2317 *
2318 * Note: task->halting flag is not cleared in order to avoid creation
2319 * of new thread in old exec'ed task.
b0d623f7
A
2320 */
2321void
2322task_complete_halt(task_t task)
2323{
2324 task_lock(task);
2325 assert(task->halting);
2326 assert(task == current_task());
e7c99d92 2327
b0d623f7
A
2328 /*
2329 * Wait for the other threads to get shut down.
2330 * When the last other thread is reaped, we'll be
316670eb 2331 * woken up.
b0d623f7
A
2332 */
2333 if (task->thread_count > 1) {
2334 assert_wait((event_t)&task->halting, THREAD_UNINT);
2335 task_unlock(task);
2336 thread_block(THREAD_CONTINUE_NULL);
2337 } else {
2338 task_unlock(task);
2339 }
1c79356b 2340
316670eb
A
2341 /*
2342 * Give the machine dependent code a chance
2343 * to perform cleanup of task-level resources
2344 * associated with the current thread before
2345 * ripping apart the task.
2346 */
2347 machine_task_terminate(task);
2348
1c79356b
A
2349 /*
2350 * Destroy all synchronizers owned by the task.
2351 */
2352 task_synchronizer_destroy_all(task);
2353
2354 /*
9bccf70c
A
2355 * Destroy the contents of the IPC space, leaving just
2356 * a reference for it.
e7c99d92 2357 */
55e303ae 2358 ipc_space_clean(task->itk_space);
1c79356b
A
2359
2360 /*
2361 * Clean out the address space, as we are going to be
2362 * getting a new one.
2363 */
91447636 2364 vm_map_remove(task->map, task->map->min_offset,
3e170ce0
A
2365 task->map->max_offset,
2366 /* no unnesting on final cleanup: */
2367 VM_MAP_REMOVE_NO_UNNESTING);
1c79356b 2368
39037602
A
2369 /*
2370 * Kick out any IOKitUser handles to the task. At best they're stale,
2371 * at worst someone is racing a SUID exec.
2372 */
2373 iokit_task_terminate(task);
1c79356b
A
2374}
2375
2376/*
2377 * task_hold_locked:
2378 *
2379 * Suspend execution of the specified task.
2380 * This is a recursive-style suspension of the task, a count of
2381 * suspends is maintained.
2382 *
2383 * CONDITIONS: the task is locked and active.
2384 */
2385void
2386task_hold_locked(
39037602 2387 task_t task)
1c79356b 2388{
39037602 2389 thread_t thread;
1c79356b
A
2390
2391 assert(task->active);
2392
9bccf70c
A
2393 if (task->suspend_count++ > 0)
2394 return;
1c79356b
A
2395
2396 /*
91447636 2397 * Iterate through all the threads and hold them.
1c79356b 2398 */
91447636
A
2399 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2400 thread_mtx_lock(thread);
2401 thread_hold(thread);
2402 thread_mtx_unlock(thread);
1c79356b
A
2403 }
2404}
2405
2406/*
2407 * task_hold:
2408 *
2409 * Same as the internal routine above, except that is must lock
2410 * and verify that the task is active. This differs from task_suspend
2411 * in that it places a kernel hold on the task rather than just a
2412 * user-level hold. This keeps users from over resuming and setting
2413 * it running out from under the kernel.
2414 *
2415 * CONDITIONS: the caller holds a reference on the task
2416 */
2417kern_return_t
91447636 2418task_hold(
39037602 2419 task_t task)
1c79356b 2420{
1c79356b
A
2421 if (task == TASK_NULL)
2422 return (KERN_INVALID_ARGUMENT);
91447636 2423
1c79356b 2424 task_lock(task);
91447636 2425
1c79356b
A
2426 if (!task->active) {
2427 task_unlock(task);
91447636 2428
1c79356b
A
2429 return (KERN_FAILURE);
2430 }
1c79356b 2431
91447636
A
2432 task_hold_locked(task);
2433 task_unlock(task);
2434
2435 return (KERN_SUCCESS);
1c79356b
A
2436}
2437
316670eb
A
2438kern_return_t
2439task_wait(
2440 task_t task,
2441 boolean_t until_not_runnable)
2442{
2443 if (task == TASK_NULL)
2444 return (KERN_INVALID_ARGUMENT);
2445
2446 task_lock(task);
2447
2448 if (!task->active) {
2449 task_unlock(task);
2450
2451 return (KERN_FAILURE);
2452 }
2453
2454 task_wait_locked(task, until_not_runnable);
2455 task_unlock(task);
2456
2457 return (KERN_SUCCESS);
2458}
2459
1c79356b 2460/*
91447636
A
2461 * task_wait_locked:
2462 *
1c79356b
A
2463 * Wait for all threads in task to stop.
2464 *
2465 * Conditions:
2466 * Called with task locked, active, and held.
2467 */
2468void
2469task_wait_locked(
39037602 2470 task_t task,
316670eb 2471 boolean_t until_not_runnable)
1c79356b 2472{
39037602 2473 thread_t thread, self;
1c79356b
A
2474
2475 assert(task->active);
2476 assert(task->suspend_count > 0);
2477
91447636
A
2478 self = current_thread();
2479
1c79356b 2480 /*
91447636 2481 * Iterate through all the threads and wait for them to
1c79356b
A
2482 * stop. Do not wait for the current thread if it is within
2483 * the task.
2484 */
91447636
A
2485 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2486 if (thread != self)
316670eb 2487 thread_wait(thread, until_not_runnable);
1c79356b
A
2488 }
2489}
2490
2491/*
2492 * task_release_locked:
2493 *
2494 * Release a kernel hold on a task.
2495 *
2496 * CONDITIONS: the task is locked and active
2497 */
2498void
2499task_release_locked(
39037602 2500 task_t task)
1c79356b 2501{
39037602 2502 thread_t thread;
1c79356b
A
2503
2504 assert(task->active);
9bccf70c 2505 assert(task->suspend_count > 0);
1c79356b 2506
9bccf70c
A
2507 if (--task->suspend_count > 0)
2508 return;
1c79356b 2509
91447636
A
2510 queue_iterate(&task->threads, thread, thread_t, task_threads) {
2511 thread_mtx_lock(thread);
2512 thread_release(thread);
2513 thread_mtx_unlock(thread);
1c79356b
A
2514 }
2515}
2516
2517/*
2518 * task_release:
2519 *
2520 * Same as the internal routine above, except that it must lock
2521 * and verify that the task is active.
2522 *
2523 * CONDITIONS: The caller holds a reference to the task
2524 */
2525kern_return_t
91447636
A
2526task_release(
2527 task_t task)
1c79356b 2528{
1c79356b
A
2529 if (task == TASK_NULL)
2530 return (KERN_INVALID_ARGUMENT);
91447636 2531
1c79356b 2532 task_lock(task);
91447636 2533
1c79356b
A
2534 if (!task->active) {
2535 task_unlock(task);
91447636 2536
1c79356b
A
2537 return (KERN_FAILURE);
2538 }
1c79356b 2539
91447636
A
2540 task_release_locked(task);
2541 task_unlock(task);
2542
2543 return (KERN_SUCCESS);
1c79356b
A
2544}
2545
2546kern_return_t
2547task_threads(
91447636
A
2548 task_t task,
2549 thread_act_array_t *threads_out,
1c79356b
A
2550 mach_msg_type_number_t *count)
2551{
91447636 2552 mach_msg_type_number_t actual;
2d21ac55 2553 thread_t *thread_list;
91447636
A
2554 thread_t thread;
2555 vm_size_t size, size_needed;
2556 void *addr;
2557 unsigned int i, j;
1c79356b
A
2558
2559 if (task == TASK_NULL)
91447636 2560 return (KERN_INVALID_ARGUMENT);
1c79356b 2561
2d21ac55 2562 size = 0; addr = NULL;
1c79356b
A
2563
2564 for (;;) {
2565 task_lock(task);
2566 if (!task->active) {
2567 task_unlock(task);
91447636 2568
1c79356b
A
2569 if (size != 0)
2570 kfree(addr, size);
91447636
A
2571
2572 return (KERN_FAILURE);
1c79356b
A
2573 }
2574
55e303ae 2575 actual = task->thread_count;
1c79356b
A
2576
2577 /* do we have the memory we need? */
91447636 2578 size_needed = actual * sizeof (mach_port_t);
1c79356b
A
2579 if (size_needed <= size)
2580 break;
2581
2582 /* unlock the task and allocate more memory */
2583 task_unlock(task);
2584
2585 if (size != 0)
2586 kfree(addr, size);
2587
2588 assert(size_needed > 0);
2589 size = size_needed;
2590
2591 addr = kalloc(size);
2592 if (addr == 0)
91447636 2593 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
2594 }
2595
2596 /* OK, have memory and the task is locked & active */
2d21ac55 2597 thread_list = (thread_t *)addr;
91447636
A
2598
2599 i = j = 0;
2600
2601 for (thread = (thread_t)queue_first(&task->threads); i < actual;
2602 ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
2603 thread_reference_internal(thread);
2d21ac55 2604 thread_list[j++] = thread;
1c79356b 2605 }
91447636
A
2606
2607 assert(queue_end(&task->threads, (queue_entry_t)thread));
1c79356b
A
2608
2609 actual = j;
91447636 2610 size_needed = actual * sizeof (mach_port_t);
1c79356b 2611
91447636 2612 /* can unlock task now that we've got the thread refs */
1c79356b
A
2613 task_unlock(task);
2614
2615 if (actual == 0) {
91447636 2616 /* no threads, so return null pointer and deallocate memory */
1c79356b 2617
2d21ac55 2618 *threads_out = NULL;
1c79356b
A
2619 *count = 0;
2620
2621 if (size != 0)
2622 kfree(addr, size);
91447636
A
2623 }
2624 else {
1c79356b
A
2625 /* if we allocated too much, must copy */
2626
2627 if (size_needed < size) {
91447636 2628 void *newaddr;
1c79356b
A
2629
2630 newaddr = kalloc(size_needed);
2631 if (newaddr == 0) {
91447636 2632 for (i = 0; i < actual; ++i)
2d21ac55 2633 thread_deallocate(thread_list[i]);
1c79356b 2634 kfree(addr, size);
91447636 2635 return (KERN_RESOURCE_SHORTAGE);
1c79356b
A
2636 }
2637
91447636 2638 bcopy(addr, newaddr, size_needed);
1c79356b 2639 kfree(addr, size);
2d21ac55 2640 thread_list = (thread_t *)newaddr;
1c79356b
A
2641 }
2642
2d21ac55 2643 *threads_out = thread_list;
1c79356b
A
2644 *count = actual;
2645
2646 /* do the conversion that Mig should handle */
2647
91447636 2648 for (i = 0; i < actual; ++i)
2d21ac55 2649 ((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1c79356b
A
2650 }
2651
91447636 2652 return (KERN_SUCCESS);
1c79356b
A
2653}
2654
39236c6e
A
2655#define TASK_HOLD_NORMAL 0
2656#define TASK_HOLD_PIDSUSPEND 1
2657#define TASK_HOLD_LEGACY 2
2658#define TASK_HOLD_LEGACY_ALL 3
2659
316670eb
A
2660static kern_return_t
2661place_task_hold (
39037602 2662 task_t task,
39236c6e 2663 int mode)
316670eb 2664{
39037602 2665 if (!task->active && !task_is_a_corpse(task)) {
1c79356b
A
2666 return (KERN_FAILURE);
2667 }
91447636 2668
39037602
A
2669 /* Return success for corpse task */
2670 if (task_is_a_corpse(task)) {
2671 return KERN_SUCCESS;
2672 }
2673
39236c6e
A
2674 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2675 MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_SUSPEND) | DBG_FUNC_NONE,
3e170ce0 2676 task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
39236c6e
A
2677 task->user_stop_count, task->user_stop_count + 1, 0);
2678
2679#if MACH_ASSERT
2680 current_task()->suspends_outstanding++;
2681#endif
2682
2683 if (mode == TASK_HOLD_LEGACY)
2684 task->legacy_stop_count++;
2685
91447636 2686 if (task->user_stop_count++ > 0) {
1c79356b
A
2687 /*
2688 * If the stop count was positive, the task is
2689 * already stopped and we can exit.
2690 */
1c79356b
A
2691 return (KERN_SUCCESS);
2692 }
2693
2694 /*
2695 * Put a kernel-level hold on the threads in the task (all
2696 * user-level task suspensions added together represent a
2697 * single kernel-level hold). We then wait for the threads
2698 * to stop executing user code.
2699 */
2700 task_hold_locked(task);
39236c6e 2701 task_wait_locked(task, FALSE);
316670eb
A
2702
2703 return (KERN_SUCCESS);
2704}
2705
2706static kern_return_t
2707release_task_hold (
39037602 2708 task_t task,
39236c6e 2709 int mode)
316670eb 2710{
39037602 2711 boolean_t release = FALSE;
316670eb 2712
39037602 2713 if (!task->active && !task_is_a_corpse(task)) {
316670eb
A
2714 return (KERN_FAILURE);
2715 }
39037602
A
2716
2717 /* Return success for corpse task */
2718 if (task_is_a_corpse(task)) {
2719 return KERN_SUCCESS;
2720 }
316670eb 2721
39236c6e 2722 if (mode == TASK_HOLD_PIDSUSPEND) {
316670eb 2723 if (task->pidsuspended == FALSE) {
39236c6e 2724 return (KERN_FAILURE);
316670eb
A
2725 }
2726 task->pidsuspended = FALSE;
2727 }
2728
39236c6e
A
2729 if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
2730
2731 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
2732 MACHDBG_CODE(DBG_MACH_IPC,MACH_TASK_RESUME) | DBG_FUNC_NONE,
3e170ce0 2733 task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
39236c6e
A
2734 task->user_stop_count, mode, task->legacy_stop_count);
2735
2736#if MACH_ASSERT
2737 /*
2738 * This is obviously not robust; if we suspend one task and then resume a different one,
2739 * we'll fly under the radar. This is only meant to catch the common case of a crashed
2740 * or buggy suspender.
2741 */
2742 current_task()->suspends_outstanding--;
2743#endif
2744
2745 if (mode == TASK_HOLD_LEGACY_ALL) {
2746 if (task->legacy_stop_count >= task->user_stop_count) {
2747 task->user_stop_count = 0;
2748 release = TRUE;
2749 } else {
2750 task->user_stop_count -= task->legacy_stop_count;
2751 }
2752 task->legacy_stop_count = 0;
2753 } else {
2754 if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0)
2755 task->legacy_stop_count--;
2756 if (--task->user_stop_count == 0)
2757 release = TRUE;
316670eb
A
2758 }
2759 }
2760 else {
2761 return (KERN_FAILURE);
2762 }
2763
2764 /*
2765 * Release the task if necessary.
2766 */
2767 if (release)
2768 task_release_locked(task);
2769
2770 return (KERN_SUCCESS);
2771}
2772
39236c6e 2773
316670eb
A
2774/*
2775 * task_suspend:
2776 *
39236c6e
A
2777 * Implement an (old-fashioned) user-level suspension on a task.
2778 *
2779 * Because the user isn't expecting to have to manage a suspension
2780 * token, we'll track it for him in the kernel in the form of a naked
2781 * send right to the task's resume port. All such send rights
2782 * account for a single suspension against the task (unlike task_suspend2()
2783 * where each caller gets a unique suspension count represented by a
2784 * unique send-once right).
316670eb
A
2785 *
2786 * Conditions:
2787 * The caller holds a reference to the task
2788 */
2789kern_return_t
2790task_suspend(
39037602 2791 task_t task)
316670eb 2792{
39236c6e
A
2793 kern_return_t kr;
2794 mach_port_t port, send, old_notify;
2795 mach_port_name_t name;
2796
316670eb
A
2797 if (task == TASK_NULL || task == kernel_task)
2798 return (KERN_INVALID_ARGUMENT);
2799
2800 task_lock(task);
2801
39236c6e
A
2802 /*
2803 * Claim a send right on the task resume port, and request a no-senders
2804 * notification on that port (if none outstanding).
2805 */
2806 if (task->itk_resume == IP_NULL) {
2807 task->itk_resume = ipc_port_alloc_kernel();
2808 if (!IP_VALID(task->itk_resume))
2809 panic("failed to create resume port");
2810 ipc_kobject_set(task->itk_resume, (ipc_kobject_t)task, IKOT_TASK_RESUME);
2811 }
2812
2813 port = task->itk_resume;
2814 ip_lock(port);
2815 assert(ip_active(port));
2816
2817 send = ipc_port_make_send_locked(port);
2818 assert(IP_VALID(send));
2819
2820 if (port->ip_nsrequest == IP_NULL) {
2821 ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
2822 assert(old_notify == IP_NULL);
2823 /* port unlocked */
2824 } else {
2825 ip_unlock(port);
2826 }
2827
2828 /*
2829 * place a legacy hold on the task.
2830 */
2831 kr = place_task_hold(task, TASK_HOLD_LEGACY);
2832 if (kr != KERN_SUCCESS) {
2833 task_unlock(task);
2834 ipc_port_release_send(send);
2835 return kr;
2836 }
91447636 2837
1c79356b 2838 task_unlock(task);
91447636 2839
39236c6e
A
2840 /*
2841 * Copyout the send right into the calling task's IPC space. It won't know it is there,
2842 * but we'll look it up when calling a traditional resume. Any IPC operations that
2843 * deallocate the send right will auto-release the suspension.
2844 */
2845 if ((kr = ipc_kmsg_copyout_object(current_task()->itk_space, (ipc_object_t)send,
2846 MACH_MSG_TYPE_MOVE_SEND, &name)) != KERN_SUCCESS) {
3e170ce0
A
2847 printf("warning: %s(%d) failed to copyout suspension token for pid %d with error: %d\n",
2848 proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
2849 task_pid(task), kr);
39236c6e
A
2850 return (kr);
2851 }
2852
316670eb 2853 return (kr);
1c79356b
A
2854}
2855
2856/*
91447636 2857 * task_resume:
39236c6e 2858 * Release a user hold on a task.
1c79356b
A
2859 *
2860 * Conditions:
2861 * The caller holds a reference to the task
2862 */
2863kern_return_t
91447636 2864task_resume(
39037602 2865 task_t task)
1c79356b 2866{
316670eb 2867 kern_return_t kr;
39236c6e
A
2868 mach_port_name_t resume_port_name;
2869 ipc_entry_t resume_port_entry;
2870 ipc_space_t space = current_task()->itk_space;
2871
2872 if (task == TASK_NULL || task == kernel_task )
2873 return (KERN_INVALID_ARGUMENT);
2874
2875 /* release a legacy task hold */
2876 task_lock(task);
2877 kr = release_task_hold(task, TASK_HOLD_LEGACY);
2878 task_unlock(task);
2879
2880 is_write_lock(space);
2881 if (is_active(space) && IP_VALID(task->itk_resume) &&
2882 ipc_hash_lookup(space, (ipc_object_t)task->itk_resume, &resume_port_name, &resume_port_entry) == TRUE) {
2883 /*
2884 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
2885 * we are holding one less legacy hold on the task from this caller. If the release failed,
2886 * go ahead and drop all the rights, as someone either already released our holds or the task
2887 * is gone.
2888 */
2889 if (kr == KERN_SUCCESS)
2890 ipc_right_dealloc(space, resume_port_name, resume_port_entry);
2891 else
2892 ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
2893 /* space unlocked */
2894 } else {
2895 is_write_unlock(space);
2896 if (kr == KERN_SUCCESS)
3e170ce0 2897 printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
39236c6e 2898 proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
3e170ce0 2899 task_pid(task));
39236c6e
A
2900 }
2901
2902 return kr;
2903}
1c79356b 2904
39236c6e
A
2905/*
2906 * Suspend the target task.
2907 * Making/holding a token/reference/port is the callers responsibility.
2908 */
2909kern_return_t
2910task_suspend_internal(task_t task)
2911{
2912 kern_return_t kr;
2913
91447636
A
2914 if (task == TASK_NULL || task == kernel_task)
2915 return (KERN_INVALID_ARGUMENT);
1c79356b 2916
1c79356b 2917 task_lock(task);
39236c6e
A
2918 kr = place_task_hold(task, TASK_HOLD_NORMAL);
2919 task_unlock(task);
2920 return (kr);
2921}
2922
2923/*
2924 * Suspend the target task, and return a suspension token. The token
2925 * represents a reference on the suspended task.
2926 */
2927kern_return_t
2928task_suspend2(
39037602 2929 task_t task,
39236c6e
A
2930 task_suspension_token_t *suspend_token)
2931{
2932 kern_return_t kr;
2933
2934 kr = task_suspend_internal(task);
2935 if (kr != KERN_SUCCESS) {
2936 *suspend_token = TASK_NULL;
2937 return (kr);
2938 }
2939
2940 /*
2941 * Take a reference on the target task and return that to the caller
2942 * as a "suspension token," which can be converted into an SO right to
2943 * the now-suspended task's resume port.
2944 */
2945 task_reference_internal(task);
2946 *suspend_token = task;
2947
2948 return (KERN_SUCCESS);
2949}
2950
2951/*
2952 * Resume the task
2953 * (reference/token/port management is caller's responsibility).
2954 */
2955kern_return_t
2956task_resume_internal(
39037602 2957 task_suspension_token_t task)
39236c6e
A
2958{
2959 kern_return_t kr;
91447636 2960
39236c6e
A
2961 if (task == TASK_NULL || task == kernel_task)
2962 return (KERN_INVALID_ARGUMENT);
91447636 2963
39236c6e
A
2964 task_lock(task);
2965 kr = release_task_hold(task, TASK_HOLD_NORMAL);
316670eb 2966 task_unlock(task);
39236c6e
A
2967 return (kr);
2968}
2969
2970/*
2971 * Resume the task using a suspension token. Consumes the token's ref.
2972 */
2973kern_return_t
2974task_resume2(
39037602 2975 task_suspension_token_t task)
39236c6e
A
2976{
2977 kern_return_t kr;
2978
2979 kr = task_resume_internal(task);
2980 task_suspension_token_deallocate(task);
91447636 2981
316670eb
A
2982 return (kr);
2983}
2984
39236c6e
A
2985boolean_t
2986task_suspension_notify(mach_msg_header_t *request_header)
2987{
2988 ipc_port_t port = (ipc_port_t) request_header->msgh_remote_port;
2989 task_t task = convert_port_to_task_suspension_token(port);
2990 mach_msg_type_number_t not_count;
2991
2992 if (task == TASK_NULL || task == kernel_task)
2993 return TRUE; /* nothing to do */
2994
2995 switch (request_header->msgh_id) {
2996
2997 case MACH_NOTIFY_SEND_ONCE:
2998 /* release the hold held by this specific send-once right */
2999 task_lock(task);
3000 release_task_hold(task, TASK_HOLD_NORMAL);
3001 task_unlock(task);
3002 break;
3003
3004 case MACH_NOTIFY_NO_SENDERS:
3005 not_count = ((mach_no_senders_notification_t *)request_header)->not_count;
3006
3007 task_lock(task);
3008 ip_lock(port);
3009 if (port->ip_mscount == not_count) {
3010
3011 /* release all the [remaining] outstanding legacy holds */
3012 assert(port->ip_nsrequest == IP_NULL);
3013 ip_unlock(port);
3014 release_task_hold(task, TASK_HOLD_LEGACY_ALL);
3015 task_unlock(task);
3016
3017 } else if (port->ip_nsrequest == IP_NULL) {
3018 ipc_port_t old_notify;
3019
3020 task_unlock(task);
3021 /* new send rights, re-arm notification at current make-send count */
3022 ipc_port_nsrequest(port, port->ip_mscount, ipc_port_make_sonce_locked(port), &old_notify);
3023 assert(old_notify == IP_NULL);
3024 /* port unlocked */
3025 } else {
3026 ip_unlock(port);
3027 task_unlock(task);
3028 }
3029 break;
3030
3031 default:
3032 break;
3033 }
3034
3035 task_suspension_token_deallocate(task); /* drop token reference */
3036 return TRUE;
3037}
3038
316670eb
A
3039kern_return_t
3040task_pidsuspend_locked(task_t task)
3041{
3042 kern_return_t kr;
3043
3044 if (task->pidsuspended) {
3045 kr = KERN_FAILURE;
3046 goto out;
1c79356b 3047 }
91447636 3048
316670eb
A
3049 task->pidsuspended = TRUE;
3050
39236c6e 3051 kr = place_task_hold(task, TASK_HOLD_PIDSUSPEND);
316670eb
A
3052 if (kr != KERN_SUCCESS) {
3053 task->pidsuspended = FALSE;
1c79356b 3054 }
316670eb
A
3055out:
3056 return(kr);
3057}
1c79356b 3058
316670eb
A
3059
3060/*
3061 * task_pidsuspend:
3062 *
3063 * Suspends a task by placing a hold on its threads.
3064 *
3065 * Conditions:
3066 * The caller holds a reference to the task
3067 */
3068kern_return_t
3069task_pidsuspend(
39037602 3070 task_t task)
316670eb
A
3071{
3072 kern_return_t kr;
3073
3074 if (task == TASK_NULL || task == kernel_task)
3075 return (KERN_INVALID_ARGUMENT);
3076
3077 task_lock(task);
3078
3079 kr = task_pidsuspend_locked(task);
1c79356b
A
3080
3081 task_unlock(task);
91447636 3082
316670eb
A
3083 return (kr);
3084}
3085
316670eb
A
3086/*
3087 * task_pidresume:
3088 * Resumes a previously suspended task.
3089 *
3090 * Conditions:
3091 * The caller holds a reference to the task
3092 */
3093kern_return_t
3094task_pidresume(
39037602 3095 task_t task)
316670eb
A
3096{
3097 kern_return_t kr;
316670eb
A
3098
3099 if (task == TASK_NULL || task == kernel_task)
3100 return (KERN_INVALID_ARGUMENT);
3101
3102 task_lock(task);
3103
39037602 3104#if CONFIG_FREEZE
316670eb 3105
39236c6e 3106 while (task->changing_freeze_state) {
316670eb 3107
39236c6e
A
3108 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3109 task_unlock(task);
3110 thread_block(THREAD_CONTINUE_NULL);
316670eb 3111
39236c6e 3112 task_lock(task);
316670eb 3113 }
39236c6e
A
3114 task->changing_freeze_state = TRUE;
3115#endif
3116
3117 kr = release_task_hold(task, TASK_HOLD_PIDSUSPEND);
3118
3119 task_unlock(task);
3120
39037602 3121#if CONFIG_FREEZE
39236c6e 3122
39236c6e
A
3123 task_lock(task);
3124
3125 if (kr == KERN_SUCCESS)
3126 task->frozen = FALSE;
3127 task->changing_freeze_state = FALSE;
3128 thread_wakeup(&task->changing_freeze_state);
3129
3130 task_unlock(task);
316670eb
A
3131#endif
3132
3133 return (kr);
1c79356b
A
3134}
3135
39037602
A
3136
3137#if DEVELOPMENT || DEBUG
3138
3139extern void IOSleep(int);
3140
3141kern_return_t
3142task_disconnect_page_mappings(task_t task)
3143{
3144 int n;
3145
3146 if (task == TASK_NULL || task == kernel_task)
3147 return (KERN_INVALID_ARGUMENT);
3148
3149 /*
3150 * this function is used to strip all of the mappings from
3151 * the pmap for the specified task to force the task to
3152 * re-fault all of the pages it is actively using... this
3153 * allows us to approximate the true working set of the
3154 * specified task. We only engage if at least 1 of the
3155 * threads in the task is runnable, but we want to continuously
3156 * sweep (at least for a while - I've arbitrarily set the limit at
3157 * 100 sweeps to be re-looked at as we gain experience) to get a better
3158 * view into what areas within a page are being visited (as opposed to only
3159 * seeing the first fault of a page after the task becomes
3160 * runnable)... in the future I may
3161 * try to block until awakened by a thread in this task
3162 * being made runnable, but for now we'll periodically poll from the
3163 * user level debug tool driving the sysctl
3164 */
3165 for (n = 0; n < 100; n++) {
3166 thread_t thread;
3167 boolean_t runnable;
3168 boolean_t do_unnest;
3169 int page_count;
3170
3171 runnable = FALSE;
3172 do_unnest = FALSE;
3173
3174 task_lock(task);
3175
3176 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3177
3178 if (thread->state & TH_RUN) {
3179 runnable = TRUE;
3180 break;
3181 }
3182 }
3183 if (n == 0)
3184 task->task_disconnected_count++;
3185
3186 if (task->task_unnested == FALSE) {
3187 if (runnable == TRUE) {
3188 task->task_unnested = TRUE;
3189 do_unnest = TRUE;
3190 }
3191 }
3192 task_unlock(task);
3193
3194 if (runnable == FALSE)
3195 break;
3196
3197 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_START,
3198 task, do_unnest, task->task_disconnected_count, 0, 0);
3199
3200 page_count = vm_map_disconnect_page_mappings(task->map, do_unnest);
3201
3202 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_DISCONNECT_TASK_PAGE_MAPPINGS)) | DBG_FUNC_END,
3203 task, page_count, 0, 0, 0);
3204
3205 if ((n % 5) == 4)
3206 IOSleep(1);
3207 }
3208 return (KERN_SUCCESS);
3209}
3210
3211#endif
3212
3213
6d2010ae
A
3214#if CONFIG_FREEZE
3215
3216/*
3217 * task_freeze:
3218 *
316670eb 3219 * Freeze a task.
6d2010ae
A
3220 *
3221 * Conditions:
3222 * The caller holds a reference to the task
3223 */
3e170ce0
A
3224extern void vm_wake_compactor_swapper();
3225extern queue_head_t c_swapout_list_head;
3226
6d2010ae
A
3227kern_return_t
3228task_freeze(
39037602 3229 task_t task,
6d2010ae
A
3230 uint32_t *purgeable_count,
3231 uint32_t *wired_count,
3232 uint32_t *clean_count,
3233 uint32_t *dirty_count,
316670eb 3234 uint32_t dirty_budget,
6d2010ae
A
3235 boolean_t *shared,
3236 boolean_t walk_only)
3237{
39037602 3238 kern_return_t kr = KERN_SUCCESS;
316670eb 3239
6d2010ae
A
3240 if (task == TASK_NULL || task == kernel_task)
3241 return (KERN_INVALID_ARGUMENT);
3242
316670eb
A
3243 task_lock(task);
3244
39236c6e
A
3245 while (task->changing_freeze_state) {
3246
3247 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3248 task_unlock(task);
3249 thread_block(THREAD_CONTINUE_NULL);
3250
3251 task_lock(task);
3252 }
316670eb 3253 if (task->frozen) {
39236c6e
A
3254 task_unlock(task);
3255 return (KERN_FAILURE);
316670eb 3256 }
39236c6e 3257 task->changing_freeze_state = TRUE;
316670eb
A
3258
3259 task_unlock(task);
3260
6d2010ae 3261 if (walk_only) {
39037602 3262 panic("task_freeze - walk_only == TRUE");
6d2010ae 3263 } else {
316670eb 3264 kr = vm_map_freeze(task->map, purgeable_count, wired_count, clean_count, dirty_count, dirty_budget, shared);
6d2010ae
A
3265 }
3266
39236c6e
A
3267 task_lock(task);
3268
3269 if (walk_only == FALSE && kr == KERN_SUCCESS)
3270 task->frozen = TRUE;
3271 task->changing_freeze_state = FALSE;
3272 thread_wakeup(&task->changing_freeze_state);
3273
3274 task_unlock(task);
3275
39037602 3276 if (VM_CONFIG_COMPRESSOR_IS_PRESENT) {
3e170ce0
A
3277 vm_wake_compactor_swapper();
3278 /*
3279 * We do an explicit wakeup of the swapout thread here
3280 * because the compact_and_swap routines don't have
3281 * knowledge about these kind of "per-task packed c_segs"
3282 * and so will not be evaluating whether we need to do
3283 * a wakeup there.
3284 */
3285 thread_wakeup((event_t)&c_swapout_list_head);
3286 }
3287
316670eb 3288 return (kr);
6d2010ae
A
3289}
3290
3291/*
3292 * task_thaw:
3293 *
3294 * Thaw a currently frozen task.
3295 *
3296 * Conditions:
3297 * The caller holds a reference to the task
3298 */
3299kern_return_t
3300task_thaw(
39037602 3301 task_t task)
6d2010ae
A
3302{
3303 if (task == TASK_NULL || task == kernel_task)
3304 return (KERN_INVALID_ARGUMENT);
3305
316670eb
A
3306 task_lock(task);
3307
39236c6e
A
3308 while (task->changing_freeze_state) {
3309
3310 assert_wait((event_t)&task->changing_freeze_state, THREAD_UNINT);
3311 task_unlock(task);
3312 thread_block(THREAD_CONTINUE_NULL);
3313
3314 task_lock(task);
3315 }
316670eb 3316 if (!task->frozen) {
39236c6e
A
3317 task_unlock(task);
3318 return (KERN_FAILURE);
316670eb 3319 }
39037602 3320 task->frozen = FALSE;
39236c6e 3321
316670eb
A
3322 task_unlock(task);
3323
39037602 3324 return (KERN_SUCCESS);
6d2010ae
A
3325}
3326
3327#endif /* CONFIG_FREEZE */
3328
1c79356b
A
3329kern_return_t
3330host_security_set_task_token(
3331 host_security_t host_security,
3332 task_t task,
3333 security_token_t sec_token,
55e303ae 3334 audit_token_t audit_token,
1c79356b
A
3335 host_priv_t host_priv)
3336{
55e303ae 3337 ipc_port_t host_port;
1c79356b
A
3338 kern_return_t kr;
3339
3340 if (task == TASK_NULL)
3341 return(KERN_INVALID_ARGUMENT);
3342
3343 if (host_security == HOST_NULL)
3344 return(KERN_INVALID_SECURITY);
3345
3346 task_lock(task);
3347 task->sec_token = sec_token;
55e303ae 3348 task->audit_token = audit_token;
39236c6e
A
3349
3350 task_unlock(task);
1c79356b
A
3351
3352 if (host_priv != HOST_PRIV_NULL) {
55e303ae 3353 kr = host_get_host_priv_port(host_priv, &host_port);
1c79356b 3354 } else {
55e303ae 3355 kr = host_get_host_port(host_priv_self(), &host_port);
1c79356b 3356 }
55e303ae
A
3357 assert(kr == KERN_SUCCESS);
3358 kr = task_set_special_port(task, TASK_HOST_PORT, host_port);
1c79356b
A
3359 return(kr);
3360}
3361
fe8ab488
A
3362kern_return_t
3363task_send_trace_memory(
3364 task_t target_task,
3365 __unused uint32_t pid,
3366 __unused uint64_t uniqueid)
3367{
3368 kern_return_t kr = KERN_INVALID_ARGUMENT;
3369 if (target_task == TASK_NULL)
3370 return (KERN_INVALID_ARGUMENT);
3371
3372#if CONFIG_ATM
3373 kr = atm_send_proc_inspect_notification(target_task,
3374 pid,
3375 uniqueid);
3376
3377#endif
3378 return (kr);
3379}
1c79356b
A
3380/*
3381 * This routine was added, pretty much exclusively, for registering the
3382 * RPC glue vector for in-kernel short circuited tasks. Rather than
3383 * removing it completely, I have only disabled that feature (which was
3384 * the only feature at the time). It just appears that we are going to
3385 * want to add some user data to tasks in the future (i.e. bsd info,
3386 * task names, etc...), so I left it in the formal task interface.
3387 */
3388kern_return_t
3389task_set_info(
3390 task_t task,
3391 task_flavor_t flavor,
91447636
A
3392 __unused task_info_t task_info_in, /* pointer to IN array */
3393 __unused mach_msg_type_number_t task_info_count)
1c79356b 3394{
1c79356b
A
3395 if (task == TASK_NULL)
3396 return(KERN_INVALID_ARGUMENT);
3397
3398 switch (flavor) {
fe8ab488
A
3399
3400#if CONFIG_ATM
3401 case TASK_TRACE_MEMORY_INFO:
3402 {
3403 if (task_info_count != TASK_TRACE_MEMORY_INFO_COUNT)
3404 return (KERN_INVALID_ARGUMENT);
3405
3406 assert(task_info_in != NULL);
3407 task_trace_memory_info_t mem_info;
3408 mem_info = (task_trace_memory_info_t) task_info_in;
3409 kern_return_t kr = atm_register_trace_memory(task,
3410 mem_info->user_memory_address,
3e170ce0 3411 mem_info->buffer_size);
fe8ab488 3412 return kr;
fe8ab488
A
3413 }
3414
3415#endif
1c79356b
A
3416 default:
3417 return (KERN_INVALID_ARGUMENT);
3418 }
3419 return (KERN_SUCCESS);
3420}
3421
3e170ce0 3422int radar_20146450 = 1;
1c79356b
A
3423kern_return_t
3424task_info(
39236c6e
A
3425 task_t task,
3426 task_flavor_t flavor,
3427 task_info_t task_info_out,
1c79356b
A
3428 mach_msg_type_number_t *task_info_count)
3429{
b0d623f7 3430 kern_return_t error = KERN_SUCCESS;
39037602 3431 mach_msg_type_number_t original_task_info_count;
b0d623f7 3432
1c79356b 3433 if (task == TASK_NULL)
91447636 3434 return (KERN_INVALID_ARGUMENT);
1c79356b 3435
39037602 3436 original_task_info_count = *task_info_count;
b0d623f7
A
3437 task_lock(task);
3438
3439 if ((task != current_task()) && (!task->active)) {
3440 task_unlock(task);
3441 return (KERN_INVALID_ARGUMENT);
3442 }
3443
1c79356b
A
3444 switch (flavor) {
3445
91447636 3446 case TASK_BASIC_INFO_32:
2d21ac55 3447 case TASK_BASIC2_INFO_32:
91447636
A
3448 {
3449 task_basic_info_32_t basic_info;
b0d623f7
A
3450 vm_map_t map;
3451 clock_sec_t secs;
3452 clock_usec_t usecs;
1c79356b 3453
b0d623f7
A
3454 if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
3455 error = KERN_INVALID_ARGUMENT;
3456 break;
3457 }
1c79356b 3458
91447636 3459 basic_info = (task_basic_info_32_t)task_info_out;
1c79356b 3460
91447636 3461 map = (task == kernel_task)? kernel_map: task->map;
b0d623f7 3462 basic_info->virtual_size = (typeof(basic_info->virtual_size))map->size;
2d21ac55
A
3463 if (flavor == TASK_BASIC2_INFO_32) {
3464 /*
3465 * The "BASIC2" flavor gets the maximum resident
3466 * size instead of the current resident size...
3467 */
3468 basic_info->resident_size = pmap_resident_max(map->pmap);
3469 } else {
3470 basic_info->resident_size = pmap_resident_count(map->pmap);
3471 }
3472 basic_info->resident_size *= PAGE_SIZE;
1c79356b 3473
0b4e3aa0
A
3474 basic_info->policy = ((task != kernel_task)?
3475 POLICY_TIMESHARE: POLICY_RR);
1c79356b 3476 basic_info->suspend_count = task->user_stop_count;
91447636 3477
b0d623f7
A
3478 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
3479 basic_info->user_time.seconds =
3480 (typeof(basic_info->user_time.seconds))secs;
3481 basic_info->user_time.microseconds = usecs;
3482
3483 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
3484 basic_info->system_time.seconds =
3485 (typeof(basic_info->system_time.seconds))secs;
3486 basic_info->system_time.microseconds = usecs;
1c79356b 3487
91447636 3488 *task_info_count = TASK_BASIC_INFO_32_COUNT;
1c79356b 3489 break;
91447636 3490 }
1c79356b 3491
91447636
A
3492 case TASK_BASIC_INFO_64:
3493 {
3494 task_basic_info_64_t basic_info;
b0d623f7
A
3495 vm_map_t map;
3496 clock_sec_t secs;
3497 clock_usec_t usecs;
1c79356b 3498
b0d623f7
A
3499 if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
3500 error = KERN_INVALID_ARGUMENT;
3501 break;
3502 }
91447636
A
3503
3504 basic_info = (task_basic_info_64_t)task_info_out;
3505
3506 map = (task == kernel_task)? kernel_map: task->map;
3507 basic_info->virtual_size = map->size;
2d21ac55
A
3508 basic_info->resident_size =
3509 (mach_vm_size_t)(pmap_resident_count(map->pmap))
3510 * PAGE_SIZE_64;
91447636 3511
91447636
A
3512 basic_info->policy = ((task != kernel_task)?
3513 POLICY_TIMESHARE: POLICY_RR);
3514 basic_info->suspend_count = task->user_stop_count;
3515
b0d623f7
A
3516 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
3517 basic_info->user_time.seconds =
3518 (typeof(basic_info->user_time.seconds))secs;
3519 basic_info->user_time.microseconds = usecs;
3520
3521 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
3522 basic_info->system_time.seconds =
3523 (typeof(basic_info->system_time.seconds))secs;
3524 basic_info->system_time.microseconds = usecs;
91447636
A
3525
3526 *task_info_count = TASK_BASIC_INFO_64_COUNT;
3527 break;
3528 }
3529
316670eb
A
3530 case MACH_TASK_BASIC_INFO:
3531 {
3532 mach_task_basic_info_t basic_info;
3533 vm_map_t map;
3534 clock_sec_t secs;
3535 clock_usec_t usecs;
3536
3537 if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
3538 error = KERN_INVALID_ARGUMENT;
3539 break;
3540 }
3541
3542 basic_info = (mach_task_basic_info_t)task_info_out;
3543
3544 map = (task == kernel_task) ? kernel_map : task->map;
3545
3546 basic_info->virtual_size = map->size;
3547
3548 basic_info->resident_size =
3549 (mach_vm_size_t)(pmap_resident_count(map->pmap));
3550 basic_info->resident_size *= PAGE_SIZE_64;
3551
3552 basic_info->resident_size_max =
3553 (mach_vm_size_t)(pmap_resident_max(map->pmap));
3554 basic_info->resident_size_max *= PAGE_SIZE_64;
3555
3556 basic_info->policy = ((task != kernel_task) ?
3557 POLICY_TIMESHARE : POLICY_RR);
3558
3559 basic_info->suspend_count = task->user_stop_count;
3560
3561 absolutetime_to_microtime(task->total_user_time, &secs, &usecs);
3562 basic_info->user_time.seconds =
3563 (typeof(basic_info->user_time.seconds))secs;
3564 basic_info->user_time.microseconds = usecs;
3565
3566 absolutetime_to_microtime(task->total_system_time, &secs, &usecs);
3567 basic_info->system_time.seconds =
3568 (typeof(basic_info->system_time.seconds))secs;
3569 basic_info->system_time.microseconds = usecs;
3570
3571 *task_info_count = MACH_TASK_BASIC_INFO_COUNT;
3572 break;
3573 }
3574
91447636
A
3575 case TASK_THREAD_TIMES_INFO:
3576 {
39037602
A
3577 task_thread_times_info_t times_info;
3578 thread_t thread;
91447636 3579
b0d623f7
A
3580 if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
3581 error = KERN_INVALID_ARGUMENT;
3582 break;
3583 }
1c79356b
A
3584
3585 times_info = (task_thread_times_info_t) task_info_out;
3586 times_info->user_time.seconds = 0;
3587 times_info->user_time.microseconds = 0;
3588 times_info->system_time.seconds = 0;
3589 times_info->system_time.microseconds = 0;
3590
1c79356b 3591
91447636 3592 queue_iterate(&task->threads, thread, thread_t, task_threads) {
39236c6e 3593 time_value_t user_time, system_time;
1c79356b 3594
39236c6e
A
3595 if (thread->options & TH_OPT_IDLE_THREAD)
3596 continue;
1c79356b 3597
39236c6e 3598 thread_read_times(thread, &user_time, &system_time);
91447636 3599
39236c6e
A
3600 time_value_add(&times_info->user_time, &user_time);
3601 time_value_add(&times_info->system_time, &system_time);
3602 }
1c79356b
A
3603
3604 *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
3605 break;
91447636
A
3606 }
3607
3608 case TASK_ABSOLUTETIME_INFO:
3609 {
3610 task_absolutetime_info_t info;
39037602 3611 thread_t thread;
91447636 3612
b0d623f7
A
3613 if (*task_info_count < TASK_ABSOLUTETIME_INFO_COUNT) {
3614 error = KERN_INVALID_ARGUMENT;
3615 break;
3616 }
91447636
A
3617
3618 info = (task_absolutetime_info_t)task_info_out;
3619 info->threads_user = info->threads_system = 0;
3620
91447636
A
3621
3622 info->total_user = task->total_user_time;
3623 info->total_system = task->total_system_time;
3624
3625 queue_iterate(&task->threads, thread, thread_t, task_threads) {
3626 uint64_t tval;
316670eb
A
3627 spl_t x;
3628
39236c6e
A
3629 if (thread->options & TH_OPT_IDLE_THREAD)
3630 continue;
3631
316670eb
A
3632 x = splsched();
3633 thread_lock(thread);
91447636
A
3634
3635 tval = timer_grab(&thread->user_timer);
3636 info->threads_user += tval;
3637 info->total_user += tval;
3638
3639 tval = timer_grab(&thread->system_timer);
316670eb
A
3640 if (thread->precise_user_kernel_time) {
3641 info->threads_system += tval;
3642 info->total_system += tval;
3643 } else {
3644 /* system_timer may represent either sys or user */
3645 info->threads_user += tval;
3646 info->total_user += tval;
3647 }
3648
3649 thread_unlock(thread);
3650 splx(x);
91447636
A
3651 }
3652
91447636
A
3653
3654 *task_info_count = TASK_ABSOLUTETIME_INFO_COUNT;
3655 break;
3656 }
1c79356b 3657
b0d623f7
A
3658 case TASK_DYLD_INFO:
3659 {
3660 task_dyld_info_t info;
3661
6d2010ae
A
3662 /*
3663 * We added the format field to TASK_DYLD_INFO output. For
3664 * temporary backward compatibility, accept the fact that
3665 * clients may ask for the old version - distinquished by the
3666 * size of the expected result structure.
3667 */
3668#define TASK_LEGACY_DYLD_INFO_COUNT \
3669 offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
3670
3671 if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
b0d623f7
A
3672 error = KERN_INVALID_ARGUMENT;
3673 break;
3674 }
6d2010ae 3675
b0d623f7
A
3676 info = (task_dyld_info_t)task_info_out;
3677 info->all_image_info_addr = task->all_image_info_addr;
3678 info->all_image_info_size = task->all_image_info_size;
6d2010ae
A
3679
3680 /* only set format on output for those expecting it */
3681 if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
3682 info->all_image_info_format = task_has_64BitAddr(task) ?
3683 TASK_DYLD_ALL_IMAGE_INFO_64 :
3684 TASK_DYLD_ALL_IMAGE_INFO_32 ;
3685 *task_info_count = TASK_DYLD_INFO_COUNT;
3686 } else {
3687 *task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
3688 }
b0d623f7
A
3689 break;
3690 }
3691
6d2010ae
A
3692 case TASK_EXTMOD_INFO:
3693 {
3694 task_extmod_info_t info;
3695 void *p;
3696
3697 if (*task_info_count < TASK_EXTMOD_INFO_COUNT) {
3698 error = KERN_INVALID_ARGUMENT;
3699 break;
3700 }
3701
3702 info = (task_extmod_info_t)task_info_out;
3703
3704 p = get_bsdtask_info(task);
3705 if (p) {
3706 proc_getexecutableuuid(p, info->task_uuid, sizeof(info->task_uuid));
3707 } else {
3708 bzero(info->task_uuid, sizeof(info->task_uuid));
3709 }
3710 info->extmod_statistics = task->extmod_statistics;
3711 *task_info_count = TASK_EXTMOD_INFO_COUNT;
3712
3713 break;
3714 }
3715
3716 case TASK_KERNELMEMORY_INFO:
3717 {
3718 task_kernelmemory_info_t tkm_info;
316670eb 3719 ledger_amount_t credit, debit;
6d2010ae
A
3720
3721 if (*task_info_count < TASK_KERNELMEMORY_INFO_COUNT) {
3722 error = KERN_INVALID_ARGUMENT;
3723 break;
3724 }
3725
3726 tkm_info = (task_kernelmemory_info_t) task_info_out;
316670eb
A
3727 tkm_info->total_palloc = 0;
3728 tkm_info->total_pfree = 0;
3729 tkm_info->total_salloc = 0;
3730 tkm_info->total_sfree = 0;
6d2010ae
A
3731
3732 if (task == kernel_task) {
3733 /*
3734 * All shared allocs/frees from other tasks count against
3735 * the kernel private memory usage. If we are looking up
3736 * info for the kernel task, gather from everywhere.
3737 */
3738 task_unlock(task);
3739
3740 /* start by accounting for all the terminated tasks against the kernel */
3741 tkm_info->total_palloc = tasks_tkm_private.alloc + tasks_tkm_shared.alloc;
3742 tkm_info->total_pfree = tasks_tkm_private.free + tasks_tkm_shared.free;
6d2010ae
A
3743
3744 /* count all other task/thread shared alloc/free against the kernel */
3745 lck_mtx_lock(&tasks_threads_lock);
316670eb
A
3746
3747 /* XXX this really shouldn't be using the function parameter 'task' as a local var! */
6d2010ae
A
3748 queue_iterate(&tasks, task, task_t, tasks) {
3749 if (task == kernel_task) {
316670eb
A
3750 if (ledger_get_entries(task->ledger,
3751 task_ledgers.tkm_private, &credit,
3752 &debit) == KERN_SUCCESS) {
3753 tkm_info->total_palloc += credit;
3754 tkm_info->total_pfree += debit;
3755 }
6d2010ae 3756 }
316670eb
A
3757 if (!ledger_get_entries(task->ledger,
3758 task_ledgers.tkm_shared, &credit, &debit)) {
3759 tkm_info->total_palloc += credit;
3760 tkm_info->total_pfree += debit;
6d2010ae 3761 }
6d2010ae
A
3762 }
3763 lck_mtx_unlock(&tasks_threads_lock);
3764 } else {
316670eb
A
3765 if (!ledger_get_entries(task->ledger,
3766 task_ledgers.tkm_private, &credit, &debit)) {
3767 tkm_info->total_palloc = credit;
3768 tkm_info->total_pfree = debit;
3769 }
3770 if (!ledger_get_entries(task->ledger,
3771 task_ledgers.tkm_shared, &credit, &debit)) {
3772 tkm_info->total_salloc = credit;
3773 tkm_info->total_sfree = debit;
6d2010ae
A
3774 }
3775 task_unlock(task);
3776 }
3777
3778 *task_info_count = TASK_KERNELMEMORY_INFO_COUNT;
3779 return KERN_SUCCESS;
3780 }
3781
91447636
A
3782 /* OBSOLETE */
3783 case TASK_SCHED_FIFO_INFO:
3784 {
1c79356b 3785
b0d623f7
A
3786 if (*task_info_count < POLICY_FIFO_BASE_COUNT) {
3787 error = KERN_INVALID_ARGUMENT;
3788 break;
3789 }
1c79356b 3790
b0d623f7 3791 error = KERN_INVALID_POLICY;
6d2010ae 3792 break;
91447636 3793 }
1c79356b 3794
91447636
A
3795 /* OBSOLETE */
3796 case TASK_SCHED_RR_INFO:
3797 {
39037602 3798 policy_rr_base_t rr_base;
6d2010ae
A
3799 uint32_t quantum_time;
3800 uint64_t quantum_ns;
1c79356b 3801
b0d623f7
A
3802 if (*task_info_count < POLICY_RR_BASE_COUNT) {
3803 error = KERN_INVALID_ARGUMENT;
3804 break;
3805 }
1c79356b
A
3806
3807 rr_base = (policy_rr_base_t) task_info_out;
3808
0b4e3aa0 3809 if (task != kernel_task) {
b0d623f7
A
3810 error = KERN_INVALID_POLICY;
3811 break;
1c79356b
A
3812 }
3813
3814 rr_base->base_priority = task->priority;
1c79356b 3815
6d2010ae
A
3816 quantum_time = SCHED(initial_quantum_size)(THREAD_NULL);
3817 absolutetime_to_nanoseconds(quantum_time, &quantum_ns);
3818
3819 rr_base->quantum = (uint32_t)(quantum_ns / 1000 / 1000);
1c79356b
A
3820
3821 *task_info_count = POLICY_RR_BASE_COUNT;
3822 break;
91447636 3823 }
1c79356b 3824
91447636
A
3825 /* OBSOLETE */
3826 case TASK_SCHED_TIMESHARE_INFO:
3827 {
39037602 3828 policy_timeshare_base_t ts_base;
1c79356b 3829
b0d623f7
A
3830 if (*task_info_count < POLICY_TIMESHARE_BASE_COUNT) {
3831 error = KERN_INVALID_ARGUMENT;
3832 break;
3833 }
1c79356b
A
3834
3835 ts_base = (policy_timeshare_base_t) task_info_out;
3836
0b4e3aa0 3837 if (task == kernel_task) {
b0d623f7
A
3838 error = KERN_INVALID_POLICY;
3839 break;
1c79356b
A
3840 }
3841
3842 ts_base->base_priority = task->priority;
1c79356b
A
3843
3844 *task_info_count = POLICY_TIMESHARE_BASE_COUNT;
3845 break;
91447636 3846 }
1c79356b 3847
91447636
A
3848 case TASK_SECURITY_TOKEN:
3849 {
39037602 3850 security_token_t *sec_token_p;
1c79356b 3851
b0d623f7
A
3852 if (*task_info_count < TASK_SECURITY_TOKEN_COUNT) {
3853 error = KERN_INVALID_ARGUMENT;
3854 break;
3855 }
1c79356b
A
3856
3857 sec_token_p = (security_token_t *) task_info_out;
3858
1c79356b 3859 *sec_token_p = task->sec_token;
1c79356b
A
3860
3861 *task_info_count = TASK_SECURITY_TOKEN_COUNT;
91447636
A
3862 break;
3863 }
1c79356b 3864
91447636
A
3865 case TASK_AUDIT_TOKEN:
3866 {
39037602 3867 audit_token_t *audit_token_p;
55e303ae 3868
b0d623f7
A
3869 if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
3870 error = KERN_INVALID_ARGUMENT;
3871 break;
3872 }
55e303ae
A
3873
3874 audit_token_p = (audit_token_t *) task_info_out;
3875
55e303ae 3876 *audit_token_p = task->audit_token;
55e303ae
A
3877
3878 *task_info_count = TASK_AUDIT_TOKEN_COUNT;
91447636
A
3879 break;
3880 }
55e303ae 3881
91447636 3882 case TASK_SCHED_INFO:
b0d623f7 3883 error = KERN_INVALID_ARGUMENT;
6d2010ae 3884 break;
1c79356b 3885
91447636
A
3886 case TASK_EVENTS_INFO:
3887 {
39037602
A
3888 task_events_info_t events_info;
3889 thread_t thread;
1c79356b 3890
b0d623f7
A
3891 if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
3892 error = KERN_INVALID_ARGUMENT;
3893 break;
3894 }
1c79356b
A
3895
3896 events_info = (task_events_info_t) task_info_out;
3897
2d21ac55 3898
1c79356b
A
3899 events_info->faults = task->faults;
3900 events_info->pageins = task->pageins;
3901 events_info->cow_faults = task->cow_faults;
3902 events_info->messages_sent = task->messages_sent;
3903 events_info->messages_received = task->messages_received;
3904 events_info->syscalls_mach = task->syscalls_mach;
3905 events_info->syscalls_unix = task->syscalls_unix;
2d21ac55
A
3906
3907 events_info->csw = task->c_switch;
3908
3909 queue_iterate(&task->threads, thread, thread_t, task_threads) {
6d2010ae
A
3910 events_info->csw += thread->c_switch;
3911 events_info->syscalls_mach += thread->syscalls_mach;
3912 events_info->syscalls_unix += thread->syscalls_unix;
2d21ac55
A
3913 }
3914
1c79356b
A
3915
3916 *task_info_count = TASK_EVENTS_INFO_COUNT;
3917 break;
91447636 3918 }
2d21ac55
A
3919 case TASK_AFFINITY_TAG_INFO:
3920 {
b0d623f7
A
3921 if (*task_info_count < TASK_AFFINITY_TAG_INFO_COUNT) {
3922 error = KERN_INVALID_ARGUMENT;
3923 break;
3924 }
2d21ac55 3925
b0d623f7 3926 error = task_affinity_info(task, task_info_out, task_info_count);
6d2010ae 3927 break;
2d21ac55 3928 }
4b17d6b6
A
3929 case TASK_POWER_INFO:
3930 {
4b17d6b6
A
3931 if (*task_info_count < TASK_POWER_INFO_COUNT) {
3932 error = KERN_INVALID_ARGUMENT;
3933 break;
3934 }
3935
39037602 3936 task_power_info_locked(task, (task_power_info_t)task_info_out, NULL, NULL);
fe8ab488
A
3937 break;
3938 }
3939
3940 case TASK_POWER_INFO_V2:
3941 {
3942 if (*task_info_count < TASK_POWER_INFO_V2_COUNT) {
3943 error = KERN_INVALID_ARGUMENT;
3944 break;
3945 }
3946 task_power_info_v2_t tpiv2 = (task_power_info_v2_t) task_info_out;
39037602
A
3947
3948 uint64_t *task_energy = NULL;
3949 task_power_info_locked(task, &tpiv2->cpu_energy, &tpiv2->gpu_energy, task_energy);
39236c6e
A
3950 break;
3951 }
4b17d6b6 3952
39236c6e
A
3953 case TASK_VM_INFO:
3954 case TASK_VM_INFO_PURGEABLE:
3955 {
3956 task_vm_info_t vm_info;
3957 vm_map_t map;
4b17d6b6 3958
3e170ce0 3959 if (*task_info_count < TASK_VM_INFO_REV0_COUNT) {
39236c6e
A
3960 error = KERN_INVALID_ARGUMENT;
3961 break;
3962 }
4b17d6b6 3963
39236c6e 3964 vm_info = (task_vm_info_t)task_info_out;
4b17d6b6 3965
39236c6e
A
3966 if (task == kernel_task) {
3967 map = kernel_map;
3968 /* no lock */
3969 } else {
3970 map = task->map;
3971 vm_map_lock_read(map);
3972 }
4b17d6b6 3973
39236c6e
A
3974 vm_info->virtual_size = (typeof(vm_info->virtual_size))map->size;
3975 vm_info->region_count = map->hdr.nentries;
3976 vm_info->page_size = vm_map_page_size(map);
3977
3978 vm_info->resident_size = pmap_resident_count(map->pmap);
3979 vm_info->resident_size *= PAGE_SIZE;
3980 vm_info->resident_size_peak = pmap_resident_max(map->pmap);
3981 vm_info->resident_size_peak *= PAGE_SIZE;
3982
3983#define _VM_INFO(_name) \
3984 vm_info->_name = ((mach_vm_size_t) map->pmap->stats._name) * PAGE_SIZE
3985
3986 _VM_INFO(device);
3987 _VM_INFO(device_peak);
3988 _VM_INFO(external);
3989 _VM_INFO(external_peak);
3990 _VM_INFO(internal);
3991 _VM_INFO(internal_peak);
3992 _VM_INFO(reusable);
3993 _VM_INFO(reusable_peak);
3994 _VM_INFO(compressed);
3995 _VM_INFO(compressed_peak);
3996 _VM_INFO(compressed_lifetime);
3997
3998 vm_info->purgeable_volatile_pmap = 0;
3999 vm_info->purgeable_volatile_resident = 0;
4000 vm_info->purgeable_volatile_virtual = 0;
4001 if (task == kernel_task) {
4002 /*
4003 * We do not maintain the detailed stats for the
4004 * kernel_pmap, so just count everything as
4005 * "internal"...
4006 */
4007 vm_info->internal = vm_info->resident_size;
4008 /*
4009 * ... but since the memory held by the VM compressor
4010 * in the kernel address space ought to be attributed
4011 * to user-space tasks, we subtract it from "internal"
4012 * to give memory reporting tools a more accurate idea
4013 * of what the kernel itself is actually using, instead
4014 * of making it look like the kernel is leaking memory
4015 * when the system is under memory pressure.
4016 */
4017 vm_info->internal -= (VM_PAGE_COMPRESSOR_COUNT *
4018 PAGE_SIZE);
4019 } else {
4020 mach_vm_size_t volatile_virtual_size;
4021 mach_vm_size_t volatile_resident_size;
3e170ce0 4022 mach_vm_size_t volatile_compressed_size;
39236c6e 4023 mach_vm_size_t volatile_pmap_size;
3e170ce0 4024 mach_vm_size_t volatile_compressed_pmap_size;
39236c6e
A
4025 kern_return_t kr;
4026
4027 if (flavor == TASK_VM_INFO_PURGEABLE) {
4028 kr = vm_map_query_volatile(
4029 map,
4030 &volatile_virtual_size,
4031 &volatile_resident_size,
3e170ce0
A
4032 &volatile_compressed_size,
4033 &volatile_pmap_size,
4034 &volatile_compressed_pmap_size);
39236c6e
A
4035 if (kr == KERN_SUCCESS) {
4036 vm_info->purgeable_volatile_pmap =
4037 volatile_pmap_size;
3e170ce0
A
4038 if (radar_20146450) {
4039 vm_info->compressed -=
4040 volatile_compressed_pmap_size;
4041 }
39236c6e
A
4042 vm_info->purgeable_volatile_resident =
4043 volatile_resident_size;
4044 vm_info->purgeable_volatile_virtual =
4045 volatile_virtual_size;
4046 }
4b17d6b6 4047 }
4b17d6b6 4048 }
39037602 4049 *task_info_count = TASK_VM_INFO_REV0_COUNT;
39236c6e 4050
39037602
A
4051 if (original_task_info_count >= TASK_VM_INFO_REV1_COUNT) {
4052 vm_info->phys_footprint =
4053 (mach_vm_size_t) get_task_phys_footprint(task);
4054 *task_info_count = TASK_VM_INFO_REV1_COUNT;
4055 }
4056 if (original_task_info_count >= TASK_VM_INFO_REV2_COUNT) {
4057 vm_info->min_address = map->min_offset;
4058 vm_info->max_address = map->max_offset;
4059 *task_info_count = TASK_VM_INFO_REV2_COUNT;
4060 }
4061
4062 if (task != kernel_task) {
4063 vm_map_unlock_read(map);
3e170ce0
A
4064 }
4065
4b17d6b6
A
4066 break;
4067 }
4068
fe8ab488
A
4069 case TASK_WAIT_STATE_INFO:
4070 {
4071 /*
4072 * Deprecated flavor. Currently allowing some results until all users
4073 * stop calling it. The results may not be accurate.
4074 */
4075 task_wait_state_info_t wait_state_info;
4076 uint64_t total_sfi_ledger_val = 0;
4077
4078 if (*task_info_count < TASK_WAIT_STATE_INFO_COUNT) {
4079 error = KERN_INVALID_ARGUMENT;
4080 break;
4081 }
4082
4083 wait_state_info = (task_wait_state_info_t) task_info_out;
4084
4085 wait_state_info->total_wait_state_time = 0;
4086 bzero(wait_state_info->_reserved, sizeof(wait_state_info->_reserved));
4087
3e170ce0 4088#if CONFIG_SCHED_SFI
fe8ab488
A
4089 int i, prev_lentry = -1;
4090 int64_t val_credit, val_debit;
4091
4092 for (i = 0; i < MAX_SFI_CLASS_ID; i++){
4093 val_credit =0;
4094 /*
4095 * checking with prev_lentry != entry ensures adjacent classes
4096 * which share the same ledger do not add wait times twice.
4097 * Note: Use ledger() call to get data for each individual sfi class.
4098 */
4099 if (prev_lentry != task_ledgers.sfi_wait_times[i] &&
4100 KERN_SUCCESS == ledger_get_entries(task->ledger,
4101 task_ledgers.sfi_wait_times[i], &val_credit, &val_debit)) {
4102 total_sfi_ledger_val += val_credit;
4103 }
4104 prev_lentry = task_ledgers.sfi_wait_times[i];
4105 }
4106
3e170ce0 4107#endif /* CONFIG_SCHED_SFI */
fe8ab488
A
4108 wait_state_info->total_wait_sfi_state_time = total_sfi_ledger_val;
4109 *task_info_count = TASK_WAIT_STATE_INFO_COUNT;
4110
4111 break;
4112 }
3e170ce0
A
4113 case TASK_VM_INFO_PURGEABLE_ACCOUNT:
4114 {
4115#if DEVELOPMENT || DEBUG
4116 pvm_account_info_t acnt_info;
4117
4118 if (*task_info_count < PVM_ACCOUNT_INFO_COUNT) {
4119 error = KERN_INVALID_ARGUMENT;
4120 break;
4121 }
fe8ab488 4122
3e170ce0
A
4123 if (task_info_out == NULL) {
4124 error = KERN_INVALID_ARGUMENT;
4125 break;
4126 }
4127
4128 acnt_info = (pvm_account_info_t) task_info_out;
4129
4130 error = vm_purgeable_account(task, acnt_info);
4131
4132 *task_info_count = PVM_ACCOUNT_INFO_COUNT;
4133
4134 break;
4135#else /* DEVELOPMENT || DEBUG */
4136 error = KERN_NOT_SUPPORTED;
4137 break;
4138#endif /* DEVELOPMENT || DEBUG */
4139 }
4140 case TASK_FLAGS_INFO:
4141 {
4142 task_flags_info_t flags_info;
4143
4144 if (*task_info_count < TASK_FLAGS_INFO_COUNT) {
4145 error = KERN_INVALID_ARGUMENT;
4146 break;
4147 }
4148
4149 flags_info = (task_flags_info_t)task_info_out;
4150
4151 /* only publish the 64-bit flag of the task */
4152 flags_info->flags = task->t_flags & TF_64B_ADDR;
4153
4154 *task_info_count = TASK_FLAGS_INFO_COUNT;
4155 break;
4156 }
4157
4158 case TASK_DEBUG_INFO_INTERNAL:
4159 {
4160#if DEVELOPMENT || DEBUG
4161 task_debug_info_internal_t dbg_info;
4162 if (*task_info_count < TASK_DEBUG_INFO_INTERNAL_COUNT) {
4163 error = KERN_NOT_SUPPORTED;
4164 break;
4165 }
4166
4167 if (task_info_out == NULL) {
4168 error = KERN_INVALID_ARGUMENT;
4169 break;
4170 }
4171 dbg_info = (task_debug_info_internal_t) task_info_out;
4172 dbg_info->ipc_space_size = 0;
4173 if (task->itk_space){
4174 dbg_info->ipc_space_size = task->itk_space->is_table_size;
4175 }
4176
4177 error = KERN_SUCCESS;
4178 *task_info_count = TASK_DEBUG_INFO_INTERNAL_COUNT;
4179 break;
4180#else /* DEVELOPMENT || DEBUG */
4181 error = KERN_NOT_SUPPORTED;
4182 break;
4183#endif /* DEVELOPMENT || DEBUG */
4184 }
91447636 4185 default:
b0d623f7 4186 error = KERN_INVALID_ARGUMENT;
1c79356b
A
4187 }
4188
b0d623f7
A
4189 task_unlock(task);
4190 return (error);
1c79356b
A
4191}
4192
39236c6e
A
4193/*
4194 * task_power_info
4195 *
4196 * Returns power stats for the task.
4197 * Note: Called with task locked.
4198 */
4199void
4200task_power_info_locked(
4201 task_t task,
fe8ab488 4202 task_power_info_t info,
39037602
A
4203 gpu_energy_data_t ginfo,
4204 uint64_t *task_energy)
39236c6e
A
4205{
4206 thread_t thread;
4207 ledger_amount_t tmp;
4208
4209 task_lock_assert_owned(task);
4210
4211 ledger_get_entries(task->ledger, task_ledgers.interrupt_wakeups,
4212 (ledger_amount_t *)&info->task_interrupt_wakeups, &tmp);
4213 ledger_get_entries(task->ledger, task_ledgers.platform_idle_wakeups,
4214 (ledger_amount_t *)&info->task_platform_idle_wakeups, &tmp);
4215
4216 info->task_timer_wakeups_bin_1 = task->task_timer_wakeups_bin_1;
4217 info->task_timer_wakeups_bin_2 = task->task_timer_wakeups_bin_2;
4218
4219 info->total_user = task->total_user_time;
4220 info->total_system = task->total_system_time;
4221
39037602
A
4222 if (task_energy) {
4223 *task_energy = task->task_energy;
4224 }
4225
fe8ab488
A
4226 if (ginfo) {
4227 ginfo->task_gpu_utilisation = task->task_gpu_ns;
4228 }
4229
39236c6e
A
4230 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4231 uint64_t tval;
4232 spl_t x;
4233
4234 if (thread->options & TH_OPT_IDLE_THREAD)
4235 continue;
4236
4237 x = splsched();
4238 thread_lock(thread);
4239
4240 info->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
4241 info->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
4242
39037602
A
4243 if (task_energy) {
4244 *task_energy += ml_energy_stat(thread);
4245 }
4246
39236c6e
A
4247 tval = timer_grab(&thread->user_timer);
4248 info->total_user += tval;
4249
4250 tval = timer_grab(&thread->system_timer);
4251 if (thread->precise_user_kernel_time) {
4252 info->total_system += tval;
4253 } else {
4254 /* system_timer may represent either sys or user */
4255 info->total_user += tval;
4256 }
4257
fe8ab488
A
4258 if (ginfo) {
4259 ginfo->task_gpu_utilisation += ml_gpu_stat(thread);
4260 }
4261 thread_unlock(thread);
4262 splx(x);
4263 }
4264}
4265
4266/*
4267 * task_gpu_utilisation
4268 *
4269 * Returns the total gpu time used by the all the threads of the task
4270 * (both dead and alive)
4271 */
4272uint64_t
4273task_gpu_utilisation(
4274 task_t task)
4275{
4276 uint64_t gpu_time = 0;
4277 thread_t thread;
4278
4279 task_lock(task);
4280 gpu_time += task->task_gpu_ns;
4281
4282 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4283 spl_t x;
4284 x = splsched();
4285 thread_lock(thread);
4286 gpu_time += ml_gpu_stat(thread);
39236c6e
A
4287 thread_unlock(thread);
4288 splx(x);
4289 }
fe8ab488
A
4290
4291 task_unlock(task);
4292 return gpu_time;
39236c6e
A
4293}
4294
39037602
A
4295/*
4296 * task_energy
4297 *
4298 * Returns the total energy used by the all the threads of the task
4299 * (both dead and alive)
4300 */
4301uint64_t
4302task_energy(
4303 task_t task)
4304{
4305 uint64_t energy = 0;
4306 thread_t thread;
4307
4308 task_lock(task);
4309 energy += task->task_energy;
4310
4311 queue_iterate(&task->threads, thread, thread_t, task_threads) {
4312 spl_t x;
4313 x = splsched();
4314 thread_lock(thread);
4315 energy += ml_energy_stat(thread);
4316 thread_unlock(thread);
4317 splx(x);
4318 }
4319
4320 task_unlock(task);
4321 return energy;
4322}
4323
39236c6e
A
4324kern_return_t
4325task_purgable_info(
4326 task_t task,
4327 task_purgable_info_t *stats)
4328{
4329 if (task == TASK_NULL || stats == NULL)
4330 return KERN_INVALID_ARGUMENT;
4331 /* Take task reference */
4332 task_reference(task);
4333 vm_purgeable_stats((vm_purgeable_info_t)stats, task);
4334 /* Drop task reference */
4335 task_deallocate(task);
4336 return KERN_SUCCESS;
4337}
4338
2d21ac55
A
4339void
4340task_vtimer_set(
4341 task_t task,
4342 integer_t which)
4343{
4344 thread_t thread;
316670eb 4345 spl_t x;
2d21ac55 4346
2d21ac55
A
4347 task_lock(task);
4348
4349 task->vtimers |= which;
4350
4351 switch (which) {
4352
4353 case TASK_VTIMER_USER:
4354 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
4355 x = splsched();
4356 thread_lock(thread);
4357 if (thread->precise_user_kernel_time)
4358 thread->vtimer_user_save = timer_grab(&thread->user_timer);
4359 else
4360 thread->vtimer_user_save = timer_grab(&thread->system_timer);
4361 thread_unlock(thread);
4362 splx(x);
2d21ac55
A
4363 }
4364 break;
4365
4366 case TASK_VTIMER_PROF:
4367 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
4368 x = splsched();
4369 thread_lock(thread);
2d21ac55
A
4370 thread->vtimer_prof_save = timer_grab(&thread->user_timer);
4371 thread->vtimer_prof_save += timer_grab(&thread->system_timer);
316670eb
A
4372 thread_unlock(thread);
4373 splx(x);
2d21ac55
A
4374 }
4375 break;
4376
4377 case TASK_VTIMER_RLIM:
4378 queue_iterate(&task->threads, thread, thread_t, task_threads) {
316670eb
A
4379 x = splsched();
4380 thread_lock(thread);
2d21ac55
A
4381 thread->vtimer_rlim_save = timer_grab(&thread->user_timer);
4382 thread->vtimer_rlim_save += timer_grab(&thread->system_timer);
316670eb
A
4383 thread_unlock(thread);
4384 splx(x);
2d21ac55
A
4385 }
4386 break;
4387 }
4388
4389 task_unlock(task);
4390}
4391
4392void
4393task_vtimer_clear(
4394 task_t task,
4395 integer_t which)
4396{
4397 assert(task == current_task());
4398
4399 task_lock(task);
4400
4401 task->vtimers &= ~which;
4402
4403 task_unlock(task);
4404}
4405
4406void
4407task_vtimer_update(
4408__unused
4409 task_t task,
4410 integer_t which,
4411 uint32_t *microsecs)
4412{
4413 thread_t thread = current_thread();
39037602
A
4414 uint32_t tdelt = 0;
4415 clock_sec_t secs = 0;
2d21ac55
A
4416 uint64_t tsum;
4417
4418 assert(task == current_task());
4419
39037602
A
4420 spl_t s = splsched();
4421 thread_lock(thread);
2d21ac55 4422
39037602
A
4423 if ((task->vtimers & which) != (uint32_t)which) {
4424 thread_unlock(thread);
4425 splx(s);
4426 return;
4427 }
2d21ac55
A
4428
4429 switch (which) {
4430
4431 case TASK_VTIMER_USER:
316670eb
A
4432 if (thread->precise_user_kernel_time) {
4433 tdelt = (uint32_t)timer_delta(&thread->user_timer,
4434 &thread->vtimer_user_save);
4435 } else {
4436 tdelt = (uint32_t)timer_delta(&thread->system_timer,
2d21ac55 4437 &thread->vtimer_user_save);
316670eb 4438 }
b0d623f7 4439 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
4440 break;
4441
4442 case TASK_VTIMER_PROF:
4443 tsum = timer_grab(&thread->user_timer);
4444 tsum += timer_grab(&thread->system_timer);
b0d623f7
A
4445 tdelt = (uint32_t)(tsum - thread->vtimer_prof_save);
4446 absolutetime_to_microtime(tdelt, &secs, microsecs);
4447 /* if the time delta is smaller than a usec, ignore */
4448 if (*microsecs != 0)
4449 thread->vtimer_prof_save = tsum;
2d21ac55
A
4450 break;
4451
4452 case TASK_VTIMER_RLIM:
4453 tsum = timer_grab(&thread->user_timer);
4454 tsum += timer_grab(&thread->system_timer);
b0d623f7 4455 tdelt = (uint32_t)(tsum - thread->vtimer_rlim_save);
2d21ac55 4456 thread->vtimer_rlim_save = tsum;
b0d623f7 4457 absolutetime_to_microtime(tdelt, &secs, microsecs);
2d21ac55
A
4458 break;
4459 }
4460
39037602
A
4461 thread_unlock(thread);
4462 splx(s);
2d21ac55
A
4463}
4464
1c79356b
A
4465/*
4466 * task_assign:
4467 *
4468 * Change the assigned processor set for the task
4469 */
4470kern_return_t
4471task_assign(
91447636
A
4472 __unused task_t task,
4473 __unused processor_set_t new_pset,
4474 __unused boolean_t assign_threads)
1c79356b 4475{
1c79356b
A
4476 return(KERN_FAILURE);
4477}
4478
4479/*
4480 * task_assign_default:
4481 *
4482 * Version of task_assign to assign to default processor set.
4483 */
4484kern_return_t
4485task_assign_default(
4486 task_t task,
4487 boolean_t assign_threads)
4488{
2d21ac55 4489 return (task_assign(task, &pset0, assign_threads));
1c79356b
A
4490}
4491
4492/*
4493 * task_get_assignment
4494 *
4495 * Return name of processor set that task is assigned to.
4496 */
4497kern_return_t
4498task_get_assignment(
4499 task_t task,
4500 processor_set_t *pset)
4501{
7e41aa88
A
4502 if (!task || !task->active)
4503 return KERN_FAILURE;
1c79356b 4504
2d21ac55
A
4505 *pset = &pset0;
4506
7e41aa88 4507 return KERN_SUCCESS;
1c79356b
A
4508}
4509
3e170ce0
A
4510uint64_t
4511get_task_dispatchqueue_offset(
4512 task_t task)
4513{
4514 return task->dispatchqueue_offset;
4515}
1c79356b
A
4516
4517/*
4518 * task_policy
4519 *
4520 * Set scheduling policy and parameters, both base and limit, for
4521 * the given task. Policy must be a policy which is enabled for the
4522 * processor set. Change contained threads if requested.
4523 */
4524kern_return_t
4525task_policy(
91447636
A
4526 __unused task_t task,
4527 __unused policy_t policy_id,
4528 __unused policy_base_t base,
4529 __unused mach_msg_type_number_t count,
4530 __unused boolean_t set_limit,
4531 __unused boolean_t change)
1c79356b
A
4532{
4533 return(KERN_FAILURE);
4534}
4535
4536/*
4537 * task_set_policy
4538 *
4539 * Set scheduling policy and parameters, both base and limit, for
4540 * the given task. Policy can be any policy implemented by the
4541 * processor set, whether enabled or not. Change contained threads
4542 * if requested.
4543 */
4544kern_return_t
4545task_set_policy(
91447636
A
4546 __unused task_t task,
4547 __unused processor_set_t pset,
4548 __unused policy_t policy_id,
4549 __unused policy_base_t base,
4550 __unused mach_msg_type_number_t base_count,
4551 __unused policy_limit_t limit,
4552 __unused mach_msg_type_number_t limit_count,
4553 __unused boolean_t change)
1c79356b
A
4554{
4555 return(KERN_FAILURE);
4556}
4557
91447636
A
4558kern_return_t
4559task_set_ras_pc(
4560 __unused task_t task,
4561 __unused vm_offset_t pc,
4562 __unused vm_offset_t endpc)
4563{
1c79356b 4564 return KERN_FAILURE;
1c79356b
A
4565}
4566
4567void
4568task_synchronizer_destroy_all(task_t task)
4569{
1c79356b
A
4570 /*
4571 * Destroy owned semaphores
4572 */
4bd07ac2 4573 semaphore_destroy_all(task);
1c79356b
A
4574}
4575
b0d623f7
A
4576/*
4577 * Install default (machine-dependent) initial thread state
4578 * on the task. Subsequent thread creation will have this initial
4579 * state set on the thread by machine_thread_inherit_taskwide().
4580 * Flavors and structures are exactly the same as those to thread_set_state()
4581 */
4582kern_return_t
4583task_set_state(
4584 task_t task,
4585 int flavor,
4586 thread_state_t state,
4587 mach_msg_type_number_t state_count)
4588{
4589 kern_return_t ret;
4590
4591 if (task == TASK_NULL) {
4592 return (KERN_INVALID_ARGUMENT);
4593 }
4594
4595 task_lock(task);
4596
4597 if (!task->active) {
4598 task_unlock(task);
4599 return (KERN_FAILURE);
4600 }
4601
4602 ret = machine_task_set_state(task, flavor, state, state_count);
4603
4604 task_unlock(task);
4605 return ret;
4606}
4607
4608/*
4609 * Examine the default (machine-dependent) initial thread state
4610 * on the task, as set by task_set_state(). Flavors and structures
4611 * are exactly the same as those passed to thread_get_state().
4612 */
4613kern_return_t
4614task_get_state(
4615 task_t task,
4616 int flavor,
4617 thread_state_t state,
4618 mach_msg_type_number_t *state_count)
4619{
4620 kern_return_t ret;
4621
4622 if (task == TASK_NULL) {
4623 return (KERN_INVALID_ARGUMENT);
4624 }
4625
4626 task_lock(task);
4627
4628 if (!task->active) {
4629 task_unlock(task);
4630 return (KERN_FAILURE);
4631 }
4632
4633 ret = machine_task_get_state(task, flavor, state, state_count);
4634
4635 task_unlock(task);
4636 return ret;
4637}
4638
39037602 4639#if CONFIG_MEMORYSTATUS
813fb2f6
A
4640
4641boolean_t
4642task_get_memlimit_is_active(task_t task)
4643{
4644 assert (task != NULL);
4645
4646 return (task->memlimit_is_active ? TRUE : FALSE);
4647}
4648
4649void
4650task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active)
4651{
4652 assert (task != NULL);
4653
4654 memlimit_is_active ? (task->memlimit_is_active = 1) : (task->memlimit_is_active = 0);
4655}
4656
4657boolean_t
4658task_get_memlimit_is_fatal(task_t task)
4659{
4660 assert(task != NULL);
4661
4662 return (task->memlimit_is_fatal ? TRUE : FALSE);
4663}
4664
4665void
4666task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal)
4667{
4668 assert (task != NULL);
4669
4670 memlimit_is_fatal ? (task->memlimit_is_fatal = 1) : (task->memlimit_is_fatal = 0);
4671}
4672
4673boolean_t
4674task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
4675{
4676 boolean_t triggered = FALSE;
4677
4678 assert(task == current_task());
4679
4680 /*
4681 * Returns true, if task has already triggered an exc_resource exception.
4682 */
4683
4684 if (memlimit_is_active) {
4685 triggered = (task->memlimit_active_exc_resource ? TRUE : FALSE);
4686 } else {
4687 triggered = (task->memlimit_inactive_exc_resource ? TRUE : FALSE);
4688 }
4689
4690 return(triggered);
4691}
4692
4693void
4694task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active)
4695{
4696 assert(task == current_task());
4697
4698 /*
4699 * We allow one exc_resource per process per active/inactive limit.
4700 * The limit's fatal attribute does not come into play.
4701 */
4702
4703 if (memlimit_is_active) {
4704 task->memlimit_active_exc_resource = 1;
4705 } else {
4706 task->memlimit_inactive_exc_resource = 1;
4707 }
4708}
4709
39236c6e
A
4710#define HWM_USERCORE_MINSPACE 250 // free space (in MB) required *after* core file creation
4711
4712void __attribute__((noinline))
39037602 4713PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND(int max_footprint_mb, boolean_t is_fatal)
39236c6e
A
4714{
4715 task_t task = current_task();
4716 int pid = 0;
3e170ce0 4717 const char *procname = "unknown";
39236c6e
A
4718 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
4719
4720#ifdef MACH_BSD
4721 pid = proc_selfpid();
fe8ab488
A
4722
4723 if (pid == 1) {
4724 /*
4725 * Cannot have ReportCrash analyzing
4726 * a suspended initproc.
4727 */
4728 return;
4729 }
4730
39236c6e
A
4731 if (task->bsd_info != NULL)
4732 procname = proc_name_address(current_task()->bsd_info);
4733#endif
39037602 4734#if CONFIG_COREDUMP
39236c6e
A
4735 if (hwm_user_cores) {
4736 int error;
4737 uint64_t starttime, end;
4738 clock_sec_t secs = 0;
4739 uint32_t microsecs = 0;
4740
4741 starttime = mach_absolute_time();
4742 /*
4743 * Trigger a coredump of this process. Don't proceed unless we know we won't
4744 * be filling up the disk; and ignore the core size resource limit for this
4745 * core file.
4746 */
3e170ce0 4747 if ((error = coredump(current_task()->bsd_info, HWM_USERCORE_MINSPACE, COREDUMP_IGNORE_ULIMIT)) != 0) {
39236c6e
A
4748 printf("couldn't take coredump of %s[%d]: %d\n", procname, pid, error);
4749 }
4750 /*
4751 * coredump() leaves the task suspended.
4752 */
4753 task_resume_internal(current_task());
4754
4755 end = mach_absolute_time();
4756 absolutetime_to_microtime(end - starttime, &secs, &microsecs);
4757 printf("coredump of %s[%d] taken in %d secs %d microsecs\n",
4758 proc_name_address(current_task()->bsd_info), pid, (int)secs, microsecs);
4759 }
39037602 4760#endif /* CONFIG_COREDUMP */
39236c6e
A
4761
4762 if (disable_exc_resource) {
4763 printf("process %s[%d] crossed memory high watermark (%d MB); EXC_RESOURCE "
4764 "supressed by a boot-arg.\n", procname, pid, max_footprint_mb);
4765 return;
4766 }
4767
3e170ce0
A
4768 /*
4769 * A task that has triggered an EXC_RESOURCE, should not be
4770 * jetsammed when the device is under memory pressure. Here
4771 * we set the P_MEMSTAT_TERMINATED flag so that the process
4772 * will be skipped if the memorystatus_thread wakes up.
4773 */
4774 proc_memstat_terminated(current_task()->bsd_info, TRUE);
4775
39236c6e
A
4776 printf("process %s[%d] crossed memory high watermark (%d MB); sending "
4777 "EXC_RESOURCE.\n", procname, pid, max_footprint_mb);
4778
4779 code[0] = code[1] = 0;
4780 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_MEMORY);
4781 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_HIGH_WATERMARK);
4782 EXC_RESOURCE_HWM_ENCODE_LIMIT(code[0], max_footprint_mb);
3e170ce0 4783
39037602
A
4784 /* Do not generate a corpse fork if the violation is a fatal one */
4785 if (is_fatal || exc_via_corpse_forking == 0) {
4786 /* Do not send a EXC_RESOURCE is corpse_for_fatal_memkill is set */
4787 if (corpse_for_fatal_memkill == 0) {
4788 /*
4789 * Use the _internal_ variant so that no user-space
4790 * process can resume our task from under us.
4791 */
4792 task_suspend_internal(task);
4793 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
4794 task_resume_internal(task);
4795 }
4796 } else {
4797 task_enqueue_exception_with_corpse(task, code, EXCEPTION_CODE_MAX);
4798 }
3e170ce0
A
4799
4800 /*
4801 * After the EXC_RESOURCE has been handled, we must clear the
4802 * P_MEMSTAT_TERMINATED flag so that the process can again be
4803 * considered for jetsam if the memorystatus_thread wakes up.
4804 */
4805 proc_memstat_terminated(current_task()->bsd_info, FALSE); /* clear the flag */
39236c6e
A
4806}
4807
4808/*
4809 * Callback invoked when a task exceeds its physical footprint limit.
4810 */
4811void
4812task_footprint_exceeded(int warning, __unused const void *param0, __unused const void *param1)
4813{
fe8ab488 4814 ledger_amount_t max_footprint, max_footprint_mb;
fe8ab488 4815 task_t task;
813fb2f6
A
4816 boolean_t is_warning;
4817 boolean_t memlimit_is_active;
4818 boolean_t memlimit_is_fatal;
39236c6e
A
4819
4820 if (warning == LEDGER_WARNING_DIPPED_BELOW) {
4821 /*
4822 * Task memory limits only provide a warning on the way up.
4823 */
4824 return;
813fb2f6
A
4825 } else if (warning == LEDGER_WARNING_ROSE_ABOVE) {
4826 /*
4827 * This task is in danger of violating a memory limit,
4828 * It has exceeded a percentage level of the limit.
4829 */
4830 is_warning = TRUE;
4831 } else {
4832 /*
4833 * The task has exceeded the physical footprint limit.
4834 * This is not a warning but a true limit violation.
4835 */
4836 is_warning = FALSE;
4837 }
39236c6e 4838
fe8ab488
A
4839 task = current_task();
4840
4841 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &max_footprint);
4842 max_footprint_mb = max_footprint >> 20;
4843
813fb2f6
A
4844 memlimit_is_active = task_get_memlimit_is_active(task);
4845 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
39236c6e
A
4846
4847 /*
813fb2f6
A
4848 * If this is an actual violation (not a warning), then generate EXC_RESOURCE exception.
4849 * We only generate the exception once per process per memlimit (active/inactive limit).
4850 * To enforce this, we monitor state based on the memlimit's active/inactive attribute
4851 * and we disable it by marking that memlimit as exception triggered.
39236c6e 4852 */
813fb2f6
A
4853 if ((is_warning == FALSE) && (!task_has_triggered_exc_resource(task, memlimit_is_active))) {
4854 PROC_CROSSED_HIGH_WATERMARK__SEND_EXC_RESOURCE_AND_SUSPEND((int)max_footprint_mb, memlimit_is_fatal);
4855 memorystatus_log_exception((int)max_footprint_mb, memlimit_is_active, memlimit_is_fatal);
4856 task_mark_has_triggered_exc_resource(task, memlimit_is_active);
39236c6e
A
4857 }
4858
813fb2f6 4859 memorystatus_on_ledger_footprint_exceeded(is_warning, memlimit_is_active, memlimit_is_fatal);
39236c6e
A
4860}
4861
4862extern int proc_check_footprint_priv(void);
4863
4864kern_return_t
4865task_set_phys_footprint_limit(
4866 task_t task,
4867 int new_limit_mb,
4868 int *old_limit_mb)
4869{
4870 kern_return_t error;
4871
813fb2f6
A
4872 boolean_t memlimit_is_active;
4873 boolean_t memlimit_is_fatal;
4874
39236c6e
A
4875 if ((error = proc_check_footprint_priv())) {
4876 return (KERN_NO_ACCESS);
4877 }
4878
813fb2f6
A
4879 /*
4880 * This call should probably be obsoleted.
4881 * But for now, we default to current state.
4882 */
4883 memlimit_is_active = task_get_memlimit_is_active(task);
4884 memlimit_is_fatal = task_get_memlimit_is_fatal(task);
4885
4886 return task_set_phys_footprint_limit_internal(task, new_limit_mb, old_limit_mb, memlimit_is_active, memlimit_is_fatal);
39236c6e
A
4887}
4888
3e170ce0
A
4889kern_return_t
4890task_convert_phys_footprint_limit(
4891 int limit_mb,
4892 int *converted_limit_mb)
4893{
4894 if (limit_mb == -1) {
4895 /*
4896 * No limit
4897 */
4898 if (max_task_footprint != 0) {
4899 *converted_limit_mb = (int)(max_task_footprint / 1024 / 1024); /* bytes to MB */
4900 } else {
4901 *converted_limit_mb = (int)(LEDGER_LIMIT_INFINITY >> 20);
4902 }
4903 } else {
4904 /* nothing to convert */
4905 *converted_limit_mb = limit_mb;
4906 }
4907 return (KERN_SUCCESS);
4908}
4909
4910
39236c6e
A
4911kern_return_t
4912task_set_phys_footprint_limit_internal(
4913 task_t task,
4914 int new_limit_mb,
4915 int *old_limit_mb,
813fb2f6
A
4916 boolean_t memlimit_is_active,
4917 boolean_t memlimit_is_fatal)
39236c6e
A
4918{
4919 ledger_amount_t old;
4920
4921 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &old);
4922
4923 if (old_limit_mb) {
3e170ce0
A
4924 /*
4925 * Check that limit >> 20 will not give an "unexpected" 32-bit
4926 * result. There are, however, implicit assumptions that -1 mb limit
4927 * equates to LEDGER_LIMIT_INFINITY.
4928 */
4929 assert(((old & 0xFFF0000000000000LL) == 0) || (old == LEDGER_LIMIT_INFINITY));
4930 *old_limit_mb = (int)(old >> 20);
39236c6e
A
4931 }
4932
4933 if (new_limit_mb == -1) {
4934 /*
4935 * Caller wishes to remove the limit.
4936 */
4937 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
4938 max_task_footprint ? max_task_footprint : LEDGER_LIMIT_INFINITY,
39037602 4939 max_task_footprint ? max_task_footprint_warning_level : 0);
813fb2f6
A
4940
4941 task_set_memlimit_is_active(task, memlimit_is_active);
4942 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
4943
39236c6e
A
4944 return (KERN_SUCCESS);
4945 }
4946
4947#ifdef CONFIG_NOMONITORS
4948 return (KERN_SUCCESS);
4949#endif /* CONFIG_NOMONITORS */
4950
4951 task_lock(task);
4952
813fb2f6
A
4953 task_set_memlimit_is_active(task, memlimit_is_active);
4954 task_set_memlimit_is_fatal(task, memlimit_is_fatal);
39236c6e
A
4955
4956 ledger_set_limit(task->ledger, task_ledgers.phys_footprint,
4957 (ledger_amount_t)new_limit_mb << 20, PHYS_FOOTPRINT_WARNING_LEVEL);
4958
3e170ce0
A
4959 if (task == current_task()) {
4960 ledger_check_new_balance(task->ledger, task_ledgers.phys_footprint);
4961 }
4962
39236c6e
A
4963 task_unlock(task);
4964
4965 return (KERN_SUCCESS);
4966}
4967
4968kern_return_t
4969task_get_phys_footprint_limit(
4970 task_t task,
4971 int *limit_mb)
4972{
4973 ledger_amount_t limit;
4974
4975 ledger_get_limit(task->ledger, task_ledgers.phys_footprint, &limit);
3e170ce0
A
4976 /*
4977 * Check that limit >> 20 will not give an "unexpected" signed, 32-bit
4978 * result. There are, however, implicit assumptions that -1 mb limit
4979 * equates to LEDGER_LIMIT_INFINITY.
4980 */
4981 assert(((limit & 0xFFF0000000000000LL) == 0) || (limit == LEDGER_LIMIT_INFINITY));
4982 *limit_mb = (int)(limit >> 20);
39236c6e
A
4983
4984 return (KERN_SUCCESS);
4985}
39037602 4986#else /* CONFIG_MEMORYSTATUS */
39236c6e
A
4987kern_return_t
4988task_set_phys_footprint_limit(
4989 __unused task_t task,
4990 __unused int new_limit_mb,
4991 __unused int *old_limit_mb)
4992{
4993 return (KERN_FAILURE);
4994}
4995
4996kern_return_t
4997task_get_phys_footprint_limit(
4998 __unused task_t task,
4999 __unused int *limit_mb)
5000{
5001 return (KERN_FAILURE);
5002}
39037602 5003#endif /* CONFIG_MEMORYSTATUS */
b0d623f7 5004
1c79356b
A
5005/*
5006 * We need to export some functions to other components that
5007 * are currently implemented in macros within the osfmk
5008 * component. Just export them as functions of the same name.
5009 */
5010boolean_t is_kerneltask(task_t t)
5011{
5012 if (t == kernel_task)
55e303ae
A
5013 return (TRUE);
5014
5015 return (FALSE);
1c79356b
A
5016}
5017
39037602 5018boolean_t is_corpsetask(task_t t)
b0d623f7 5019{
39037602 5020 return (task_is_a_corpse(t));
b0d623f7
A
5021}
5022
1c79356b 5023#undef current_task
91447636
A
5024task_t current_task(void);
5025task_t current_task(void)
1c79356b
A
5026{
5027 return (current_task_fast());
5028}
91447636
A
5029
5030#undef task_reference
5031void task_reference(task_t task);
5032void
5033task_reference(
5034 task_t task)
5035{
5036 if (task != TASK_NULL)
5037 task_reference_internal(task);
5038}
2d21ac55 5039
3e170ce0
A
5040/* defined in bsd/kern/kern_prot.c */
5041extern int get_audit_token_pid(audit_token_t *audit_token);
5042
5043int task_pid(task_t task)
5044{
5045 if (task)
5046 return get_audit_token_pid(&task->audit_token);
5047 return -1;
5048}
5049
5050
39037602
A
5051/*
5052 * This routine finds a thread in a task by its unique id
5053 * Returns a referenced thread or THREAD_NULL if the thread was not found
5054 *
5055 * TODO: This is super inefficient - it's an O(threads in task) list walk!
5056 * We should make a tid hash, or transition all tid clients to thread ports
5057 *
5058 * Precondition: No locks held (will take task lock)
6d2010ae
A
5059 */
5060thread_t
5061task_findtid(task_t task, uint64_t tid)
5062{
39037602
A
5063 thread_t self = current_thread();
5064 thread_t found_thread = THREAD_NULL;
5065 thread_t iter_thread = THREAD_NULL;
6d2010ae 5066
39037602
A
5067 /* Short-circuit the lookup if we're looking up ourselves */
5068 if (tid == self->thread_id || tid == TID_NULL) {
5069 assert(self->task == task);
5070
5071 thread_reference(self);
5072
5073 return self;
6d2010ae 5074 }
39037602
A
5075
5076 task_lock(task);
5077
5078 queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
5079 if (iter_thread->thread_id == tid) {
5080 found_thread = iter_thread;
5081 thread_reference(found_thread);
5082 break;
5083 }
5084 }
5085
5086 task_unlock(task);
5087
5088 return (found_thread);
6d2010ae
A
5089}
5090
813fb2f6
A
5091int pid_from_task(task_t task)
5092{
5093 int pid = -1;
5094
5095 if (task->bsd_info) {
5096 pid = proc_pid(task->bsd_info);
5097 } else {
5098 pid = task_pid(task);
5099 }
5100
5101 return pid;
5102}
39037602 5103
39236c6e
A
5104/*
5105 * Control the CPU usage monitor for a task.
5106 */
5107kern_return_t
5108task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags)
5109{
5110 int error = KERN_SUCCESS;
5111
5112 if (*flags & CPUMON_MAKE_FATAL) {
5113 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_CPUMON;
5114 } else {
5115 error = KERN_INVALID_ARGUMENT;
5116 }
5117
5118 return error;
5119}
5120
5121/*
5122 * Control the wakeups monitor for a task.
5123 */
5124kern_return_t
5125task_wakeups_monitor_ctl(task_t task, uint32_t *flags, int32_t *rate_hz)
5126{
5127 ledger_t ledger = task->ledger;
5128
5129 task_lock(task);
5130 if (*flags & WAKEMON_GET_PARAMS) {
5131 ledger_amount_t limit;
5132 uint64_t period;
5133
5134 ledger_get_limit(ledger, task_ledgers.interrupt_wakeups, &limit);
5135 ledger_get_period(ledger, task_ledgers.interrupt_wakeups, &period);
5136
5137 if (limit != LEDGER_LIMIT_INFINITY) {
5138 /*
5139 * An active limit means the wakeups monitor is enabled.
5140 */
5141 *rate_hz = (int32_t)(limit / (int64_t)(period / NSEC_PER_SEC));
5142 *flags = WAKEMON_ENABLE;
5143 if (task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON) {
5144 *flags |= WAKEMON_MAKE_FATAL;
5145 }
5146 } else {
5147 *flags = WAKEMON_DISABLE;
5148 *rate_hz = -1;
5149 }
5150
5151 /*
5152 * If WAKEMON_GET_PARAMS is present in flags, all other flags are ignored.
5153 */
5154 task_unlock(task);
5155 return KERN_SUCCESS;
5156 }
5157
5158 if (*flags & WAKEMON_ENABLE) {
5159 if (*flags & WAKEMON_SET_DEFAULTS) {
5160 *rate_hz = task_wakeups_monitor_rate;
5161 }
5162
5163#ifndef CONFIG_NOMONITORS
5164 if (*flags & WAKEMON_MAKE_FATAL) {
5165 task->rusage_cpu_flags |= TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
5166 }
5167#endif /* CONFIG_NOMONITORS */
5168
39037602 5169 if (*rate_hz <= 0) {
39236c6e
A
5170 task_unlock(task);
5171 return KERN_INVALID_ARGUMENT;
5172 }
5173
5174#ifndef CONFIG_NOMONITORS
5175 ledger_set_limit(ledger, task_ledgers.interrupt_wakeups, *rate_hz * task_wakeups_monitor_interval,
5176 task_wakeups_monitor_ustackshots_trigger_pct);
5177 ledger_set_period(ledger, task_ledgers.interrupt_wakeups, task_wakeups_monitor_interval * NSEC_PER_SEC);
5178 ledger_enable_callback(ledger, task_ledgers.interrupt_wakeups);
5179#endif /* CONFIG_NOMONITORS */
5180 } else if (*flags & WAKEMON_DISABLE) {
5181 /*
5182 * Caller wishes to disable wakeups monitor on the task.
5183 *
5184 * Disable telemetry if it was triggered by the wakeups monitor, and
5185 * remove the limit & callback on the wakeups ledger entry.
5186 */
5187#if CONFIG_TELEMETRY
490019cf 5188 telemetry_task_ctl_locked(task, TF_WAKEMON_WARNING, 0);
39236c6e
A
5189#endif
5190 ledger_disable_refill(ledger, task_ledgers.interrupt_wakeups);
5191 ledger_disable_callback(ledger, task_ledgers.interrupt_wakeups);
5192 }
5193
5194 task_unlock(task);
5195 return KERN_SUCCESS;
5196}
5197
5198void
5199task_wakeups_rate_exceeded(int warning, __unused const void *param0, __unused const void *param1)
5200{
5201 if (warning == LEDGER_WARNING_ROSE_ABOVE) {
5202#if CONFIG_TELEMETRY
5203 /*
5204 * This task is in danger of violating the wakeups monitor. Enable telemetry on this task
5205 * so there are micro-stackshots available if and when EXC_RESOURCE is triggered.
5206 */
5207 telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 1);
5208#endif
5209 return;
5210 }
5211
5212#if CONFIG_TELEMETRY
5213 /*
5214 * If the balance has dipped below the warning level (LEDGER_WARNING_DIPPED_BELOW) or
5215 * exceeded the limit, turn telemetry off for the task.
5216 */
5217 telemetry_task_ctl(current_task(), TF_WAKEMON_WARNING, 0);
5218#endif
5219
5220 if (warning == 0) {
39037602 5221 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS();
39236c6e
A
5222 }
5223}
5224
5225void __attribute__((noinline))
39037602 5226SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MANY_WAKEUPS(void)
39236c6e 5227{
39037602
A
5228 task_t task = current_task();
5229 int pid = 0;
5230 const char *procname = "unknown";
5231 boolean_t fatal;
5232 kern_return_t kr;
5233#ifdef EXC_RESOURCE_MONITORS
5234 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
5235#endif /* EXC_RESOURCE_MONITORS */
5236 struct ledger_entry_info lei;
39236c6e
A
5237
5238#ifdef MACH_BSD
5239 pid = proc_selfpid();
5240 if (task->bsd_info != NULL)
5241 procname = proc_name_address(current_task()->bsd_info);
5242#endif
5243
5244 ledger_get_entry_info(task->ledger, task_ledgers.interrupt_wakeups, &lei);
5245
5246 /*
5247 * Disable the exception notification so we don't overwhelm
5248 * the listener with an endless stream of redundant exceptions.
39037602 5249 * TODO: detect whether another thread is already reporting the violation.
39236c6e
A
5250 */
5251 uint32_t flags = WAKEMON_DISABLE;
5252 task_wakeups_monitor_ctl(task, &flags, NULL);
5253
39037602
A
5254 fatal = task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_FATAL_WAKEUPSMON;
5255 trace_resource_violation(RMON_CPUWAKES_VIOLATED, &lei);
5256 printf("process %s[%d] caught waking the CPU %llu times "
5257 "over ~%llu seconds, averaging %llu wakes / second and "
5258 "violating a %slimit of %llu wakes over %llu seconds.\n",
5259 procname, pid,
5260 lei.lei_balance, lei.lei_last_refill / NSEC_PER_SEC,
5261 lei.lei_last_refill == 0 ? 0 :
5262 (NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill),
5263 fatal ? "FATAL " : "",
5264 lei.lei_limit, lei.lei_refill_period / NSEC_PER_SEC);
5265
5266 kr = send_resource_violation(send_cpu_wakes_violation, task, &lei,
5267 fatal ? kRNFatalLimitFlag : 0);
5268 if (kr) {
5269 printf("send_resource_violation(CPU wakes, ...): error %#x\n", kr);
5270 }
5271
5272#ifdef EXC_RESOURCE_MONITORS
39236c6e
A
5273 if (disable_exc_resource) {
5274 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
5275 "supressed by a boot-arg\n", procname, pid);
5276 return;
5277 }
15129b1c
A
5278 if (audio_active) {
5279 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
5280 "supressed due to audio playback\n", procname, pid);
5281 return;
5282 }
39037602
A
5283 if (lei.lei_last_refill == 0) {
5284 printf("process %s[%d] caught causing excessive wakeups. EXC_RESOURCE "
5285 "supressed due to lei.lei_last_refill = 0 \n", procname, pid);
5286 }
39236c6e
A
5287
5288 code[0] = code[1] = 0;
5289 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_WAKEUPS);
5290 EXC_RESOURCE_ENCODE_FLAVOR(code[0], FLAVOR_WAKEUPS_MONITOR);
39037602
A
5291 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_PERMITTED(code[0],
5292 NSEC_PER_SEC * lei.lei_limit / lei.lei_refill_period);
5293 EXC_RESOURCE_CPUMONITOR_ENCODE_OBSERVATION_INTERVAL(code[0],
5294 lei.lei_last_refill);
5295 EXC_RESOURCE_CPUMONITOR_ENCODE_WAKEUPS_OBSERVED(code[1],
5296 NSEC_PER_SEC * lei.lei_balance / lei.lei_last_refill);
39236c6e 5297 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
39037602 5298#endif /* EXC_RESOURCE_MONITORS */
39236c6e 5299
39037602 5300 if (fatal) {
39236c6e
A
5301 task_terminate_internal(task);
5302 }
5303}
fe8ab488 5304
39037602
A
5305static boolean_t
5306global_update_logical_writes(int64_t io_delta)
fe8ab488 5307{
39037602
A
5308 int64_t old_count, new_count;
5309 boolean_t needs_telemetry;
5310
5311 do {
5312 new_count = old_count = global_logical_writes_count;
5313 new_count += io_delta;
5314 if (new_count >= io_telemetry_limit) {
5315 new_count = 0;
5316 needs_telemetry = TRUE;
5317 } else {
5318 needs_telemetry = FALSE;
5319 }
5320 } while(!OSCompareAndSwap64(old_count, new_count, &global_logical_writes_count));
5321 return needs_telemetry;
5322}
fe8ab488 5323
39037602
A
5324void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp)
5325{
5326 int64_t io_delta = 0;
5327 boolean_t needs_telemetry = FALSE;
fe8ab488 5328
39037602
A
5329 if ((!task) || (!io_size) || (!vp))
5330 return;
5331
5332 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_DATA_WRITE)) | DBG_FUNC_NONE,
5333 task_pid(task), io_size, flags, (uintptr_t)VM_KERNEL_ADDRPERM(vp), 0);
5334 DTRACE_IO4(logical_writes, struct task *, task, uint32_t, io_size, int, flags, vnode *, vp);
5335 switch(flags) {
5336 case TASK_WRITE_IMMEDIATE:
5337 OSAddAtomic64(io_size, (SInt64 *)&(task->task_immediate_writes));
5338 ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
5339 break;
5340 case TASK_WRITE_DEFERRED:
5341 OSAddAtomic64(io_size, (SInt64 *)&(task->task_deferred_writes));
5342 ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
5343 break;
5344 case TASK_WRITE_INVALIDATED:
5345 OSAddAtomic64(io_size, (SInt64 *)&(task->task_invalidated_writes));
5346 ledger_debit(task->ledger, task_ledgers.logical_writes, io_size);
5347 break;
5348 case TASK_WRITE_METADATA:
5349 OSAddAtomic64(io_size, (SInt64 *)&(task->task_metadata_writes));
5350 ledger_credit(task->ledger, task_ledgers.logical_writes, io_size);
5351 break;
fe8ab488 5352 }
39037602
A
5353
5354 io_delta = (flags == TASK_WRITE_INVALIDATED) ? ((int64_t)io_size * -1ll) : ((int64_t)io_size);
5355 if (io_telemetry_limit != 0) {
5356 /* If io_telemetry_limit is 0, disable global updates and I/O telemetry */
5357 needs_telemetry = global_update_logical_writes(io_delta);
5358 if (needs_telemetry) {
5359 act_set_io_telemetry_ast(current_thread());
5360 }
fe8ab488 5361 }
39037602 5362}
fe8ab488 5363
39037602
A
5364/*
5365 * Control the I/O monitor for a task.
5366 */
5367kern_return_t
5368task_io_monitor_ctl(task_t task, uint32_t *flags)
5369{
5370 ledger_t ledger = task->ledger;
fe8ab488 5371
39037602
A
5372 task_lock(task);
5373 if (*flags & IOMON_ENABLE) {
5374 /* Configure the physical I/O ledger */
5375 ledger_set_limit(ledger, task_ledgers.physical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
5376 ledger_set_period(ledger, task_ledgers.physical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
5377
5378 /* Configure the logical I/O ledger */
5379 ledger_set_limit(ledger, task_ledgers.logical_writes, (task_iomon_limit_mb * 1024 * 1024), 0);
5380 ledger_set_period(ledger, task_ledgers.logical_writes, (task_iomon_interval_secs * NSEC_PER_SEC));
5381
5382 } else if (*flags & IOMON_DISABLE) {
5383 /*
5384 * Caller wishes to disable I/O monitor on the task.
5385 */
5386 ledger_disable_refill(ledger, task_ledgers.physical_writes);
5387 ledger_disable_callback(ledger, task_ledgers.physical_writes);
5388 ledger_disable_refill(ledger, task_ledgers.logical_writes);
5389 ledger_disable_callback(ledger, task_ledgers.logical_writes);
5390 }
fe8ab488 5391
39037602 5392 task_unlock(task);
fe8ab488
A
5393 return KERN_SUCCESS;
5394}
5395
39037602
A
5396void
5397task_io_rate_exceeded(int warning, const void *param0, __unused const void *param1)
5398{
5399 if (warning == 0) {
5400 SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO((int)param0);
5401 }
5402}
5403
5404void __attribute__((noinline)) SENDING_NOTIFICATION__THIS_PROCESS_IS_CAUSING_TOO_MUCH_IO(int flavor)
5405{
5406 int pid = 0;
5407 task_t task = current_task();
5408#ifdef EXC_RESOURCE_MONITORS
5409 mach_exception_data_type_t code[EXCEPTION_CODE_MAX];
5410#endif /* EXC_RESOURCE_MONITORS */
5411 struct ledger_entry_info lei;
5412 kern_return_t kr;
5413
5414#ifdef MACH_BSD
5415 pid = proc_selfpid();
5416#endif
5417 /*
5418 * Get the ledger entry info. We need to do this before disabling the exception
5419 * to get correct values for all fields.
5420 */
5421 switch(flavor) {
5422 case FLAVOR_IO_PHYSICAL_WRITES:
5423 ledger_get_entry_info(task->ledger, task_ledgers.physical_writes, &lei);
5424 break;
5425 case FLAVOR_IO_LOGICAL_WRITES:
5426 ledger_get_entry_info(task->ledger, task_ledgers.logical_writes, &lei);
5427 break;
5428 }
5429
5430
5431 /*
5432 * Disable the exception notification so we don't overwhelm
5433 * the listener with an endless stream of redundant exceptions.
5434 * TODO: detect whether another thread is already reporting the violation.
5435 */
5436 uint32_t flags = IOMON_DISABLE;
5437 task_io_monitor_ctl(task, &flags);
5438
5439 if (flavor == FLAVOR_IO_LOGICAL_WRITES) {
5440 trace_resource_violation(RMON_LOGWRITES_VIOLATED, &lei);
5441 }
5442 printf("process [%d] caught causing excessive I/O (flavor: %d). Task I/O: %lld MB. [Limit : %lld MB per %lld secs]\n",
5443 pid, flavor, (lei.lei_balance / (1024 * 1024)), (lei.lei_limit / (1024 * 1024)), (lei.lei_refill_period / NSEC_PER_SEC));
5444
5445 kr = send_resource_violation(send_disk_writes_violation, task, &lei, kRNFlagsNone);
5446 if (kr) {
5447 printf("send_resource_violation(disk_writes, ...): error %#x\n", kr);
5448 }
5449
5450#ifdef EXC_RESOURCE_MONITORS
5451 code[0] = code[1] = 0;
5452 EXC_RESOURCE_ENCODE_TYPE(code[0], RESOURCE_TYPE_IO);
5453 EXC_RESOURCE_ENCODE_FLAVOR(code[0], flavor);
5454 EXC_RESOURCE_IO_ENCODE_INTERVAL(code[0], (lei.lei_refill_period / NSEC_PER_SEC));
5455 EXC_RESOURCE_IO_ENCODE_LIMIT(code[0], (lei.lei_limit / (1024 * 1024)));
5456 EXC_RESOURCE_IO_ENCODE_OBSERVED(code[1], (lei.lei_balance / (1024 * 1024)));
5457 exception_triage(EXC_RESOURCE, code, EXCEPTION_CODE_MAX);
5458#endif /* EXC_RESOURCE_MONITORS */
5459}
5460
fe8ab488
A
5461/* Placeholders for the task set/get voucher interfaces */
5462kern_return_t
5463task_get_mach_voucher(
5464 task_t task,
5465 mach_voucher_selector_t __unused which,
5466 ipc_voucher_t *voucher)
5467{
5468 if (TASK_NULL == task)
5469 return KERN_INVALID_TASK;
5470
5471 *voucher = NULL;
5472 return KERN_SUCCESS;
5473}
5474
5475kern_return_t
5476task_set_mach_voucher(
5477 task_t task,
5478 ipc_voucher_t __unused voucher)
5479{
5480 if (TASK_NULL == task)
5481 return KERN_INVALID_TASK;
5482
5483 return KERN_SUCCESS;
5484}
5485
5486kern_return_t
5487task_swap_mach_voucher(
5488 task_t task,
5489 ipc_voucher_t new_voucher,
5490 ipc_voucher_t *in_out_old_voucher)
5491{
5492 if (TASK_NULL == task)
5493 return KERN_INVALID_TASK;
5494
5495 *in_out_old_voucher = new_voucher;
5496 return KERN_SUCCESS;
5497}
5498
5499void task_set_gpu_denied(task_t task, boolean_t denied)
5500{
5501 task_lock(task);
5502
5503 if (denied) {
5504 task->t_flags |= TF_GPU_DENIED;
5505 } else {
5506 task->t_flags &= ~TF_GPU_DENIED;
5507 }
5508
5509 task_unlock(task);
5510}
5511
5512boolean_t task_is_gpu_denied(task_t task)
5513{
5514 /* We don't need the lock to read this flag */
5515 return (task->t_flags & TF_GPU_DENIED) ? TRUE : FALSE;
5516}
4bd07ac2 5517
39037602
A
5518
5519uint64_t get_task_memory_region_count(task_t task)
4bd07ac2 5520{
39037602
A
5521 vm_map_t map;
5522 map = (task == kernel_task) ? kernel_map: task->map;
5523 return((uint64_t)get_map_nentries(map));
5524}
5525
5526static void
5527kdebug_trace_dyld_internal(uint32_t base_code,
5528 struct dyld_kernel_image_info *info)
5529{
5530 static_assert(sizeof(info->uuid) >= 16);
5531
5532#if defined(__LP64__)
5533 uint64_t *uuid = (uint64_t *)&(info->uuid);
5534
5535 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5536 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code), uuid[0],
5537 uuid[1], info->load_addr,
5538 (uint64_t)info->fsid.val[0] | ((uint64_t)info->fsid.val[1] << 32),
5539 0);
5540 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5541 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 1),
5542 (uint64_t)info->fsobjid.fid_objno |
5543 ((uint64_t)info->fsobjid.fid_generation << 32),
5544 0, 0, 0, 0);
5545#else /* defined(__LP64__) */
5546 uint32_t *uuid = (uint32_t *)&(info->uuid);
5547
5548 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5549 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 2), uuid[0],
5550 uuid[1], uuid[2], uuid[3], 0);
5551 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5552 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 3),
5553 (uint32_t)info->load_addr, info->fsid.val[0], info->fsid.val[1],
5554 info->fsobjid.fid_objno, 0);
5555 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
5556 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, base_code + 4),
5557 info->fsobjid.fid_generation, 0, 0, 0, 0);
5558#endif /* !defined(__LP64__) */
5559}
5560
5561static kern_return_t
5562kdebug_trace_dyld(task_t task, uint32_t base_code,
5563 vm_map_copy_t infos_copy, mach_msg_type_number_t infos_len)
5564{
5565 kern_return_t kr;
5566 dyld_kernel_image_info_array_t infos;
5567 vm_map_offset_t map_data;
5568 vm_offset_t data;
5569
d190cdc3
A
5570 if (!kdebug_enable ||
5571 !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0)))
5572 {
5573 vm_map_copy_discard(infos_copy);
5574 return KERN_SUCCESS;
5575 }
5576
39037602
A
5577 assert(infos_copy != NULL);
5578
5579 if (task == NULL || task != current_task()) {
5580 return KERN_INVALID_TASK;
4bd07ac2 5581 }
39037602
A
5582
5583 kr = vm_map_copyout(ipc_kernel_map, &map_data, (vm_map_copy_t)infos_copy);
5584 if (kr != KERN_SUCCESS) {
5585 return kr;
5586 }
5587
5588 infos = CAST_DOWN(dyld_kernel_image_info_array_t, map_data);
5589
5590 for (mach_msg_type_number_t i = 0; i < infos_len; i++) {
5591 kdebug_trace_dyld_internal(base_code, &(infos[i]));
5592 }
5593
5594 data = CAST_DOWN(vm_offset_t, map_data);
5595 mach_vm_deallocate(ipc_kernel_map, data, infos_len * sizeof(infos[0]));
5596 return KERN_SUCCESS;
5597}
5598
5599kern_return_t
5600task_register_dyld_image_infos(task_t task,
5601 dyld_kernel_image_info_array_t infos_copy,
5602 mach_msg_type_number_t infos_len)
5603{
5604 return kdebug_trace_dyld(task, DBG_DYLD_UUID_MAP_A,
5605 (vm_map_copy_t)infos_copy, infos_len);
5606}
5607
5608kern_return_t
5609task_unregister_dyld_image_infos(task_t task,
5610 dyld_kernel_image_info_array_t infos_copy,
5611 mach_msg_type_number_t infos_len)
5612{
5613 return kdebug_trace_dyld(task, DBG_DYLD_UUID_UNMAP_A,
5614 (vm_map_copy_t)infos_copy, infos_len);
5615}
5616
5617kern_return_t
5618task_get_dyld_image_infos(__unused task_t task,
5619 __unused dyld_kernel_image_info_array_t * dyld_images,
5620 __unused mach_msg_type_number_t * dyld_imagesCnt)
5621{
5622 return KERN_NOT_SUPPORTED;
5623}
5624
5625kern_return_t
5626task_register_dyld_shared_cache_image_info(task_t task,
5627 dyld_kernel_image_info_t cache_img,
5628 __unused boolean_t no_cache,
5629 __unused boolean_t private_cache)
5630{
5631 if (task == NULL || task != current_task()) {
5632 return KERN_INVALID_TASK;
5633 }
5634
5635 kdebug_trace_dyld_internal(DBG_DYLD_UUID_SHARED_CACHE_A, &cache_img);
5636 return KERN_SUCCESS;
5637}
5638
5639kern_return_t
5640task_register_dyld_set_dyld_state(__unused task_t task,
5641 __unused uint8_t dyld_state)
5642{
5643 return KERN_NOT_SUPPORTED;
5644}
5645
5646kern_return_t
5647task_register_dyld_get_process_state(__unused task_t task,
5648 __unused dyld_kernel_process_info_t * dyld_process_state)
5649{
5650 return KERN_NOT_SUPPORTED;
5651}
5652
5653#if CONFIG_SECLUDED_MEMORY
5654int num_tasks_can_use_secluded_mem = 0;
5655
5656void
5657task_set_can_use_secluded_mem(
5658 task_t task,
5659 boolean_t can_use_secluded_mem)
5660{
5661 if (!task->task_could_use_secluded_mem) {
5662 return;
5663 }
5664 task_lock(task);
5665 task_set_can_use_secluded_mem_locked(task, can_use_secluded_mem);
5666 task_unlock(task);
5667}
5668
5669void
5670task_set_can_use_secluded_mem_locked(
5671 task_t task,
5672 boolean_t can_use_secluded_mem)
5673{
5674 assert(task->task_could_use_secluded_mem);
5675 if (can_use_secluded_mem &&
5676 secluded_for_apps && /* global boot-arg */
5677 !task->task_can_use_secluded_mem) {
5678 assert(num_tasks_can_use_secluded_mem >= 0);
5679 OSAddAtomic(+1,
5680 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
5681 task->task_can_use_secluded_mem = TRUE;
5682 } else if (!can_use_secluded_mem &&
5683 task->task_can_use_secluded_mem) {
5684 assert(num_tasks_can_use_secluded_mem > 0);
5685 OSAddAtomic(-1,
5686 (volatile SInt32 *)&num_tasks_can_use_secluded_mem);
5687 task->task_can_use_secluded_mem = FALSE;
5688 }
5689}
5690
5691void
5692task_set_could_use_secluded_mem(
5693 task_t task,
5694 boolean_t could_use_secluded_mem)
5695{
5696 task->task_could_use_secluded_mem = could_use_secluded_mem;
5697}
5698
5699void
5700task_set_could_also_use_secluded_mem(
5701 task_t task,
5702 boolean_t could_also_use_secluded_mem)
5703{
5704 task->task_could_also_use_secluded_mem = could_also_use_secluded_mem;
5705}
5706
5707boolean_t
5708task_can_use_secluded_mem(
5709 task_t task)
5710{
5711 if (task->task_can_use_secluded_mem) {
5712 assert(task->task_could_use_secluded_mem);
5713 assert(num_tasks_can_use_secluded_mem > 0);
5714 return TRUE;
5715 }
5716 if (task->task_could_also_use_secluded_mem &&
5717 num_tasks_can_use_secluded_mem > 0) {
5718 assert(num_tasks_can_use_secluded_mem > 0);
5719 return TRUE;
5720 }
5721 return FALSE;
5722}
5723
5724boolean_t
5725task_could_use_secluded_mem(
5726 task_t task)
5727{
5728 return task->task_could_use_secluded_mem;
4bd07ac2 5729}
39037602 5730#endif /* CONFIG_SECLUDED_MEMORY */
7e41aa88
A
5731
5732queue_head_t *
5733task_io_user_clients(task_t task)
5734{
39037602 5735 return (&task->io_user_clients);
7e41aa88 5736}