2 * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
62 * This file contains the structure definitions for tasks.
66 * Copyright (c) 1993 The University of Utah and
67 * the Computer Systems Laboratory (CSL). All rights reserved.
69 * Permission to use, copy, modify and distribute this software and its
70 * documentation is hereby granted, provided that both the copyright
71 * notice and this permission notice appear in all copies of the
72 * software, derivative works or modified versions, and any portions
73 * thereof, and that both notices appear in supporting documentation.
75 * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
76 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
77 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
79 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
80 * improvements that they make and grant CSL redistribution rights.
84 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
85 * support for mandatory and extensible security protections. This notice
86 * is included in support of clause 2.2 (b) of the Apple Public License,
88 * Copyright (c) 2005 SPARTA, Inc.
94 #include <kern/kern_types.h>
95 #include <mach/mach_types.h>
96 #include <sys/cdefs.h>
98 #ifdef XNU_KERNEL_PRIVATE
99 #include <kern/kern_cdata.h>
100 #include <mach/sfi_class.h>
101 #include <kern/queue.h>
102 #endif /* XNU_KERNEL_PRIVATE */
104 #ifdef MACH_KERNEL_PRIVATE
106 #include <mach/boolean.h>
107 #include <mach/port.h>
108 #include <mach/time_value.h>
109 #include <mach/message.h>
110 #include <mach/mach_param.h>
111 #include <mach/task_info.h>
112 #include <mach/exception_types.h>
113 #include <mach/vm_statistics.h>
114 #include <machine/task.h>
117 #include <machine/monotonic.h>
118 #endif /* MONOTONIC */
120 #include <kern/cpu_data.h>
121 #include <kern/queue.h>
122 #include <kern/exception.h>
123 #include <kern/locks.h>
124 #include <security/_label.h>
125 #include <ipc/ipc_port.h>
127 #include <kern/thread.h>
128 #include <mach/coalition.h>
129 #include <stdatomic.h>
130 #include <os/refcnt.h>
133 #include <atm/atm_internal.h>
136 struct _cpu_time_qos_stats
{
137 uint64_t cpu_time_qos_default
;
138 uint64_t cpu_time_qos_maintenance
;
139 uint64_t cpu_time_qos_background
;
140 uint64_t cpu_time_qos_utility
;
141 uint64_t cpu_time_qos_legacy
;
142 uint64_t cpu_time_qos_user_initiated
;
143 uint64_t cpu_time_qos_user_interactive
;
146 #include <bank/bank_internal.h>
149 /* Synchronization/destruction information */
150 decl_lck_mtx_data(,lock
) /* Task's lock */
151 os_refcnt_t ref_count
; /* Number of references to me */
152 boolean_t active
; /* Task has not been terminated */
153 boolean_t halting
; /* Task is being halted */
158 vm_map_t map
; /* Address space description */
159 queue_chain_t tasks
; /* global list of tasks */
161 #if defined(CONFIG_SCHED_MULTIQ)
162 sched_group_t sched_group
;
163 #endif /* defined(CONFIG_SCHED_MULTIQ) */
165 /* Threads in this task */
166 queue_head_t threads
;
168 processor_set_t pset_hint
;
169 struct affinity_space
*affinity_space
;
172 uint32_t active_thread_count
;
173 int suspend_count
; /* Internal scheduling only */
175 /* User-visible scheduling information */
176 integer_t user_stop_count
; /* outstanding stops */
177 integer_t legacy_stop_count
; /* outstanding legacy stops */
179 integer_t priority
; /* base priority for threads */
180 integer_t max_priority
; /* maximum priority for threads */
182 integer_t importance
; /* priority offset (BSD 'nice' value) */
184 /* Task security and audit tokens */
185 security_token_t sec_token
;
186 audit_token_t audit_token
;
189 uint64_t total_user_time
; /* terminated threads only */
190 uint64_t total_system_time
;
191 uint64_t total_ptime
;
192 uint64_t total_runnable_time
;
195 decl_lck_mtx_data(,itk_lock_data
)
196 struct ipc_port
*itk_self
; /* not a right, doesn't hold ref */
197 struct ipc_port
*itk_nself
; /* not a right, doesn't hold ref */
198 struct ipc_port
*itk_sself
; /* a send right */
199 struct exception_action exc_actions
[EXC_TYPES_COUNT
];
200 /* a send right each valid element */
201 struct ipc_port
*itk_host
; /* a send right */
202 struct ipc_port
*itk_bootstrap
; /* a send right */
203 struct ipc_port
*itk_seatbelt
; /* a send right */
204 struct ipc_port
*itk_gssd
; /* yet another send right */
205 struct ipc_port
*itk_debug_control
; /* send right for debugmode communications */
206 struct ipc_port
*itk_task_access
; /* and another send right */
207 struct ipc_port
*itk_resume
; /* a receive right to resume this task */
208 struct ipc_port
*itk_registered
[TASK_PORT_REGISTER_MAX
];
209 /* all send rights */
211 struct ipc_space
*itk_space
;
214 /* Synchronizer ownership information */
215 queue_head_t semaphore_list
; /* list of owned semaphores */
216 int semaphores_owned
; /* number of semaphores owned */
218 unsigned int priv_flags
; /* privilege resource flags */
219 #define VM_BACKING_STORE_PRIV 0x1
223 integer_t faults
; /* faults counter */
224 integer_t pageins
; /* pageins counter */
225 integer_t cow_faults
; /* copy on write fault counter */
226 integer_t messages_sent
; /* messages sent counter */
227 integer_t messages_received
; /* messages received counter */
228 integer_t syscalls_mach
; /* mach system call counter */
229 integer_t syscalls_unix
; /* unix system call counter */
230 uint32_t c_switch
; /* total context switches */
231 uint32_t p_switch
; /* total processor switches */
232 uint32_t ps_switch
; /* total pset switches */
237 kcdata_descriptor_t corpse_info
;
238 uint64_t crashed_thread_id
;
239 queue_chain_t corpse_tasks
;
241 struct label
* crash_label
;
243 struct vm_shared_region
*shared_region
;
244 volatile uint32_t t_flags
; /* general-purpose task flags protected by task_lock (TL) */
246 #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */
247 #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */
248 #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */
249 #define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */
250 #define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */
251 #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */
252 #define TF_CORPSE 0x00000020 /* task is a corpse */
253 #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */
254 #define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */
255 #define TF_LRETURNWAIT 0x00000100 /* task is waiting for fork/posix_spawn/exec to complete */
256 #define TF_LRETURNWAITER 0x00000200 /* task is waiting for TF_LRETURNWAIT to get cleared */
257 #define TF_PLATFORM 0x00000400 /* task is a platform binary */
258 #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */
259 #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */
262 * Task is running within a 64-bit address space.
264 #define task_has_64Bit_addr(task) \
265 (((task)->t_flags & TF_64B_ADDR) != 0)
266 #define task_set_64Bit_addr(task) \
267 ((task)->t_flags |= TF_64B_ADDR)
268 #define task_clear_64Bit_addr(task) \
269 ((task)->t_flags &= ~TF_64B_ADDR)
272 * Task is using 64-bit machine state.
274 #define task_has_64Bit_data(task) \
275 (((task)->t_flags & TF_64B_DATA) != 0)
276 #define task_set_64Bit_data(task) \
277 ((task)->t_flags |= TF_64B_DATA)
278 #define task_clear_64Bit_data(task) \
279 ((task)->t_flags &= ~TF_64B_DATA)
281 #define task_is_a_corpse(task) \
282 (((task)->t_flags & TF_CORPSE) != 0)
284 #define task_set_corpse(task) \
285 ((task)->t_flags |= TF_CORPSE)
287 #define task_corpse_pending_report(task) \
288 (((task)->t_flags & TF_PENDING_CORPSE) != 0)
290 #define task_set_corpse_pending_report(task) \
291 ((task)->t_flags |= TF_PENDING_CORPSE)
293 #define task_clear_corpse_pending_report(task) \
294 ((task)->t_flags &= ~TF_PENDING_CORPSE)
296 #define task_is_a_corpse_fork(task) \
297 (((task)->t_flags & TF_CORPSE_FORK) != 0)
299 uint32_t t_procflags
; /* general-purpose task flags protected by proc_lock (PL) */
301 #define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
302 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
303 #ifdef CONFIG_32BIT_TELEMETRY
304 #define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */
307 #define task_did_exec_internal(task) \
308 (((task)->t_procflags & TPF_DID_EXEC) != 0)
310 #define task_is_exec_copy_internal(task) \
311 (((task)->t_procflags & TPF_EXEC_COPY) != 0)
313 mach_vm_address_t all_image_info_addr
; /* dyld __all_image_info */
314 mach_vm_size_t all_image_info_size
; /* section location and size */
317 #define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */
318 uint32_t t_kpc
; /* kpc flags */
321 boolean_t pidsuspended
; /* pid_suspend called; no threads can execute */
322 boolean_t frozen
; /* frozen; private resident pages committed to swap */
323 boolean_t changing_freeze_state
; /* in the process of freezing or thawing */
324 uint16_t policy_ru_cpu
:4,
325 policy_ru_cpu_ext
:4,
327 applied_ru_cpu_ext
:4;
328 uint8_t rusage_cpu_flags
;
329 uint8_t rusage_cpu_percentage
; /* Task-wide CPU limit percentage */
330 uint8_t rusage_cpu_perthr_percentage
; /* Per-thread CPU limit percentage */
332 int8_t suspends_outstanding
; /* suspends this task performed in excess of resumes */
334 uint64_t rusage_cpu_interval
; /* Task-wide CPU limit interval */
335 uint64_t rusage_cpu_perthr_interval
; /* Per-thread CPU limit interval */
336 uint64_t rusage_cpu_deadline
;
337 thread_call_t rusage_cpu_callt
;
339 queue_head_t task_watchers
; /* app state watcher threads */
340 int num_taskwatchers
;
342 #endif /* CONFIG_EMBEDDED */
345 struct atm_task_descriptor
*atm_context
; /* pointer to per task atm descriptor */
347 struct bank_task
*bank_context
; /* pointer to per task bank structure */
349 #if IMPORTANCE_INHERITANCE
350 struct ipc_importance_task
*task_imp_base
; /* Base of IPC importance chain */
351 #endif /* IMPORTANCE_INHERITANCE */
353 vm_extmod_statistics_data_t extmod_statistics
;
355 struct task_requested_policy requested_policy
;
356 struct task_effective_policy effective_policy
;
359 * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
361 uint32_t low_mem_notified_warn
:1, /* warning low memory notification is sent to the task */
362 low_mem_notified_critical
:1, /* critical low memory notification is sent to the task */
363 purged_memory_warn
:1, /* purgeable memory of the task is purged for warning level pressure */
364 purged_memory_critical
:1, /* purgeable memory of the task is purged for critical level pressure */
365 low_mem_privileged_listener
:1, /* if set, task would like to know about pressure changes before other tasks on the system */
366 mem_notify_reserved
:27; /* reserved for future use */
368 uint32_t memlimit_is_active
:1, /* if set, use active attributes, otherwise use inactive attributes */
369 memlimit_is_fatal
:1, /* if set, exceeding current memlimit will prove fatal to the task */
370 memlimit_active_exc_resource
:1, /* if set, suppress exc_resource exception when task exceeds active memory limit */
371 memlimit_inactive_exc_resource
:1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */
372 memlimit_attrs_reserved
:28; /* reserved for future use */
374 io_stat_info_t task_io_stats
;
375 uint64_t task_immediate_writes
__attribute__((aligned(8)));
376 uint64_t task_deferred_writes
__attribute__((aligned(8)));
377 uint64_t task_invalidated_writes
__attribute__((aligned(8)));
378 uint64_t task_metadata_writes
__attribute__((aligned(8)));
381 * The cpu_time_qos_stats fields are protected by the task lock
383 struct _cpu_time_qos_stats cpu_time_eqos_stats
;
384 struct _cpu_time_qos_stats cpu_time_rqos_stats
;
386 /* Statistics accumulated for terminated threads from this task */
387 uint32_t task_timer_wakeups_bin_1
;
388 uint32_t task_timer_wakeups_bin_2
;
389 uint64_t task_gpu_ns
;
390 uint64_t task_energy
;
393 /* Read and written under task_lock */
394 struct mt_task task_monotonic
;
395 #endif /* MONOTONIC */
397 /* # of purgeable volatile VM objects owned by this task: */
398 int task_volatile_objects
;
399 /* # of purgeable but not volatile VM objects owned by this task: */
400 int task_nonvolatile_objects
;
401 boolean_t task_purgeable_disowning
;
402 boolean_t task_purgeable_disowned
;
403 queue_head_t task_objq
;
404 decl_lck_mtx_data(,task_objq_lock
) /* protects "task_objq" */
406 unsigned int task_thread_limit
:16;
408 unsigned int task_legacy_footprint
:1;
409 #endif /* __arm64__ */
410 unsigned int task_region_footprint
:1;
411 unsigned int task_has_crossed_thread_limit
:1;
414 * A task's coalition set is "adopted" in task_create_internal
415 * and unset in task_deallocate_internal, so each array member
416 * can be referenced without the task lock.
417 * Note: these fields are protected by coalition->lock,
420 coalition_t coalition
[COALITION_NUM_TYPES
];
421 queue_chain_t task_coalition
[COALITION_NUM_TYPES
];
422 uint64_t dispatchqueue_offset
;
424 #if DEVELOPMENT || DEBUG
425 boolean_t task_unnested
;
426 int task_disconnected_count
;
430 void *hv_task_target
; /* hypervisor virtual machine object associated with this task */
431 #endif /* HYPERVISOR */
433 #if CONFIG_SECLUDED_MEMORY
434 uint8_t task_can_use_secluded_mem
;
435 uint8_t task_could_use_secluded_mem
;
436 uint8_t task_could_also_use_secluded_mem
;
437 uint8_t task_suppressed_secluded
;
438 #endif /* CONFIG_SECLUDED_MEMORY */
440 uint32_t task_exc_guard
;
442 queue_head_t io_user_clients
;
445 #define TASK_EXC_GUARD_VM_DELIVER 0x01 /* Deliver virtual memory EXC_GUARD exceptions */
446 #define TASK_EXC_GUARD_VM_ONCE 0x02 /* Deliver them only once */
447 #define TASK_EXC_GUARD_VM_CORPSE 0x04 /* Deliver them via a forked corpse */
448 #define TASK_EXC_GUARD_VM_FATAL 0x08 /* Virtual Memory EXC_GUARD delivery is fatal */
449 #define TASK_EXC_GUARD_VM_ALL 0x0f
451 #define TASK_EXC_GUARD_MP_DELIVER 0x10 /* Deliver mach port EXC_GUARD exceptions */
452 #define TASK_EXC_GUARD_MP_ONCE 0x20 /* Deliver them only once */
453 #define TASK_EXC_GUARD_MP_CORPSE 0x04 /* Deliver them via a forked corpse */
454 #define TASK_EXC_GUARD_MP_FATAL 0x80 /* mach port EXC_GUARD delivery is fatal */
456 extern uint32_t task_exc_guard_default
;
459 task_violated_guard(mach_exception_code_t
, mach_exception_subcode_t
, void *);
461 #define task_lock(task) lck_mtx_lock(&(task)->lock)
462 #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
463 #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
464 #define task_unlock(task) lck_mtx_unlock(&(task)->lock)
466 #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr)
467 #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock)
468 #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED)
469 #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock)
470 #define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock)
472 #define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr)
473 #define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp)
474 #define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data)
475 #define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data)
477 #define TASK_REFERENCE_LEAK_DEBUG 0
479 #if TASK_REFERENCE_LEAK_DEBUG
480 extern void task_reference_internal(task_t task
);
481 extern os_ref_count_t
task_deallocate_internal(task_t task
);
483 #define task_reference_internal(task) os_ref_retain(&(task)->ref_count)
484 #define task_deallocate_internal(task) os_ref_release(&(task)->ref_count)
487 #define task_reference(task) \
489 if ((task) != TASK_NULL) \
490 task_reference_internal(task); \
493 extern kern_return_t
kernel_task_create(
495 vm_offset_t map_base
,
499 /* Initialize task module */
500 extern void task_init(void);
502 /* coalition_init() calls this to initialize ledgers before task_init() */
503 extern void init_task_ledgers(void);
505 #define current_task_fast() (current_thread()->task)
506 #define current_task() current_task_fast()
508 extern lck_attr_t task_lck_attr
;
509 extern lck_grp_t task_lck_grp
;
511 #else /* MACH_KERNEL_PRIVATE */
515 extern task_t
current_task(void);
517 extern void task_reference(task_t task
);
520 #define TF_LRETURNWAIT 0x00000100 /* task is waiting for fork/posix_spawn/exec to complete */
521 #define TF_LRETURNWAITER 0x00000200 /* task is waiting for TF_LRETURNWAIT to get cleared */
524 #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
529 #endif /* MACH_KERNEL_PRIVATE */
533 #ifdef XNU_KERNEL_PRIVATE
535 /* Hold all threads in a task */
536 extern kern_return_t
task_hold(
539 /* Wait for task to stop running, either just to get off CPU or to cease being runnable */
540 extern kern_return_t
task_wait(
542 boolean_t until_not_runnable
);
544 /* Release hold on all threads in a task */
545 extern kern_return_t
task_release(
548 /* Suspend/resume a task where the kernel owns the suspend count */
549 extern kern_return_t
task_suspend_internal( task_t task
);
550 extern kern_return_t
task_resume_internal( task_t task
);
552 /* Suspends a task by placing a hold on its threads */
553 extern kern_return_t
task_pidsuspend(
555 extern kern_return_t
task_pidsuspend_locked(
558 /* Resumes a previously paused task */
559 extern kern_return_t
task_pidresume(
562 extern kern_return_t
task_send_trace_memory(
567 #if DEVELOPMENT || DEBUG
569 extern kern_return_t
task_disconnect_page_mappings(
573 extern void tasks_system_suspend(boolean_t suspend
);
577 /* Freeze a task's resident pages */
578 extern kern_return_t
task_freeze(
580 uint32_t *purgeable_count
,
581 uint32_t *wired_count
,
582 uint32_t *clean_count
,
583 uint32_t *dirty_count
,
584 uint32_t dirty_budget
,
585 uint32_t *shared_count
,
586 int *freezer_error_code
,
587 boolean_t eval_only
);
589 /* Thaw a currently frozen task */
590 extern kern_return_t
task_thaw(
593 #endif /* CONFIG_FREEZE */
595 /* Halt all other threads in the current task */
596 extern kern_return_t
task_start_halt(
599 /* Wait for other threads to halt and free halting task resources */
600 extern void task_complete_halt(
603 extern kern_return_t
task_terminate_internal(
606 extern kern_return_t
task_create_internal(
608 coalition_t
*parent_coalitions
,
609 boolean_t inherit_memory
,
611 boolean_t is_64bit_data
,
614 task_t
*child_task
); /* OUT */
616 extern kern_return_t
task_info(
618 task_flavor_t flavor
,
619 task_info_t task_info_out
,
620 mach_msg_type_number_t
*task_info_count
);
622 extern void task_power_info_locked(
624 task_power_info_t info
,
625 gpu_energy_data_t gpu_energy
,
626 task_power_info_v2_t infov2
);
628 extern uint64_t task_gpu_utilisation(
631 extern uint64_t task_energy(
634 extern uint64_t task_cpu_ptime(
636 extern void task_update_cpu_time_qos_stats(
638 uint64_t *eqos_stats
,
639 uint64_t *rqos_stats
);
641 extern void task_vtimer_set(
645 extern void task_vtimer_clear(
649 extern void task_vtimer_update(
652 uint32_t *microsecs
);
654 #define TASK_VTIMER_USER 0x01
655 #define TASK_VTIMER_PROF 0x02
656 #define TASK_VTIMER_RLIM 0x04
658 extern void task_set_64bit(
661 boolean_t is_64bit_data
);
663 extern boolean_t
task_get_64bit_data(
666 extern void task_set_platform_binary(
668 boolean_t is_platform
);
669 extern bool task_set_ca_client_wi(
671 boolean_t ca_client_wi
);
673 extern void task_set_dyld_info(
675 mach_vm_address_t addr
,
676 mach_vm_size_t size
);
678 /* Get number of activations in a task */
679 extern int get_task_numacts(
682 extern int get_task_numactivethreads(task_t task
);
685 extern kern_return_t
task_collect_crash_info(
688 struct label
*crash_label
,
691 void task_port_notify(mach_msg_header_t
*msg
);
692 void task_wait_till_threads_terminate_locked(task_t task
);
694 /* JMM - should just be temporary (implementation in bsd_kern still) */
695 extern void set_bsdtask_info(task_t
,void *);
696 extern vm_map_t
get_task_map_reference(task_t
);
697 extern vm_map_t
swap_task_map(task_t
, thread_t
, vm_map_t
);
698 extern pmap_t
get_task_pmap(task_t
);
699 extern uint64_t get_task_resident_size(task_t
);
700 extern uint64_t get_task_compressed(task_t
);
701 extern uint64_t get_task_resident_max(task_t
);
702 extern uint64_t get_task_phys_footprint(task_t
);
703 #if CONFIG_LEDGER_INTERVAL_MAX
704 extern uint64_t get_task_phys_footprint_interval_max(task_t
, int reset
);
705 #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */
706 extern uint64_t get_task_phys_footprint_lifetime_max(task_t
);
707 extern uint64_t get_task_phys_footprint_limit(task_t
);
708 extern uint64_t get_task_purgeable_size(task_t
);
709 extern uint64_t get_task_cpu_time(task_t
);
710 extern uint64_t get_task_dispatchqueue_offset(task_t
);
711 extern uint64_t get_task_dispatchqueue_serialno_offset(task_t
);
712 extern uint64_t get_task_uniqueid(task_t task
);
713 extern int get_task_version(task_t task
);
715 extern uint64_t get_task_internal(task_t
);
716 extern uint64_t get_task_internal_compressed(task_t
);
717 extern uint64_t get_task_purgeable_nonvolatile(task_t
);
718 extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t
);
719 extern uint64_t get_task_iokit_mapped(task_t
);
720 extern uint64_t get_task_alternate_accounting(task_t
);
721 extern uint64_t get_task_alternate_accounting_compressed(task_t
);
722 extern uint64_t get_task_memory_region_count(task_t
);
723 extern uint64_t get_task_page_table(task_t
);
724 extern uint64_t get_task_network_nonvolatile(task_t
);
725 extern uint64_t get_task_network_nonvolatile_compressed(task_t
);
726 extern uint64_t get_task_wired_mem(task_t
);
728 extern kern_return_t
task_convert_phys_footprint_limit(int, int *);
729 extern kern_return_t
task_set_phys_footprint_limit_internal(task_t
, int, int *, boolean_t
, boolean_t
);
730 extern kern_return_t
task_get_phys_footprint_limit(task_t task
, int *limit_mb
);
732 /* Jetsam memlimit attributes */
733 extern boolean_t
task_get_memlimit_is_active(task_t task
);
734 extern boolean_t
task_get_memlimit_is_fatal(task_t task
);
735 extern void task_set_memlimit_is_active(task_t task
, boolean_t memlimit_is_active
);
736 extern void task_set_memlimit_is_fatal(task_t task
, boolean_t memlimit_is_fatal
);
737 extern boolean_t
task_has_triggered_exc_resource(task_t task
, boolean_t memlimit_is_active
);
738 extern void task_mark_has_triggered_exc_resource(task_t task
, boolean_t memlimit_is_active
);
740 extern void task_set_thread_limit(task_t task
, uint16_t thread_limit
);
743 extern boolean_t
is_kerneltask(task_t task
);
744 extern boolean_t
is_corpsetask(task_t task
);
746 extern kern_return_t
check_actforsig(task_t task
, thread_t thread
, int setast
);
748 extern kern_return_t
machine_task_get_state(
751 thread_state_t state
,
752 mach_msg_type_number_t
*state_count
);
754 extern kern_return_t
machine_task_set_state(
757 thread_state_t state
,
758 mach_msg_type_number_t state_count
);
760 extern void machine_task_terminate(task_t task
);
762 struct _task_ledger_indices
{
770 int alternate_accounting
;
771 int alternate_accounting_compressed
;
774 int internal_compressed
;
775 int purgeable_volatile
;
776 int purgeable_nonvolatile
;
777 int purgeable_volatile_compressed
;
778 int purgeable_nonvolatile_compressed
;
779 int network_volatile
;
780 int network_nonvolatile
;
781 int network_volatile_compressed
;
782 int network_nonvolatile_compressed
;
783 int platform_idle_wakeups
;
784 int interrupt_wakeups
;
786 int sfi_wait_times
[MAX_SFI_CLASS_ID
];
787 #endif /* CONFIG_SCHED_SFI */
788 int cpu_time_billed_to_me
;
789 int cpu_time_billed_to_others
;
792 int energy_billed_to_me
;
793 int energy_billed_to_others
;
795 extern struct _task_ledger_indices task_ledgers
;
797 /* requires task to be unlocked, returns a referenced thread */
798 thread_t
task_findtid(task_t task
, uint64_t tid
);
799 int pid_from_task(task_t task
);
801 extern kern_return_t
task_wakeups_monitor_ctl(task_t task
, uint32_t *rate_hz
, int32_t *flags
);
802 extern kern_return_t
task_cpu_usage_monitor_ctl(task_t task
, uint32_t *flags
);
803 extern void task_rollup_accounting_info(task_t new_task
, task_t parent_task
);
804 extern kern_return_t
task_io_monitor_ctl(task_t task
, uint32_t *flags
);
805 extern void task_set_did_exec_flag(task_t task
);
806 extern void task_clear_exec_copy_flag(task_t task
);
807 extern boolean_t
task_is_exec_copy(task_t
);
808 extern boolean_t
task_did_exec(task_t task
);
809 #ifdef CONFIG_32BIT_TELEMETRY
810 extern boolean_t
task_consume_32bit_log_flag(task_t task
);
811 extern void task_set_32bit_log_flag(task_t task
);
812 #endif /* CONFIG_32BIT_TELEMETRY */
813 extern boolean_t
task_is_active(task_t task
);
814 extern boolean_t
task_is_halting(task_t task
);
815 extern void task_clear_return_wait(task_t task
);
816 extern void task_wait_to_return(void) __attribute__((noreturn
));
817 extern event_t
task_get_return_wait_event(task_t task
);
819 extern void task_atm_reset(task_t task
);
820 extern void task_bank_reset(task_t task
);
821 extern void task_bank_init(task_t task
);
823 extern int task_pid(task_t task
);
824 extern boolean_t
task_has_assertions(task_t task
);
825 /* End task_policy */
827 extern void task_set_gpu_denied(task_t task
, boolean_t denied
);
828 extern boolean_t
task_is_gpu_denied(task_t task
);
830 extern queue_head_t
* task_io_user_clients(task_t task
);
832 extern void task_copy_fields_for_exec(task_t dst_task
, task_t src_task
);
834 #endif /* XNU_KERNEL_PRIVATE */
836 #ifdef KERNEL_PRIVATE
838 extern void *get_bsdtask_info(task_t
);
839 extern void *get_bsdthreadtask_info(thread_t
);
840 extern void task_bsdtask_kill(task_t
);
841 extern vm_map_t
get_task_map(task_t
);
842 extern ledger_t
get_task_ledger(task_t
);
844 extern boolean_t
get_task_pidsuspended(task_t
);
845 extern boolean_t
get_task_frozen(task_t
);
847 /* Convert from a task to a port */
848 extern ipc_port_t
convert_task_to_port(task_t
);
849 extern ipc_port_t
convert_task_name_to_port(task_name_t
);
850 extern ipc_port_t
convert_task_inspect_to_port(task_inspect_t
);
851 extern ipc_port_t
convert_task_suspension_token_to_port(task_suspension_token_t task
);
853 /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */
854 extern task_suspension_token_t
convert_port_to_task_suspension_token(ipc_port_t port
);
856 extern boolean_t
task_suspension_notify(mach_msg_header_t
*);
858 #define TASK_WRITE_IMMEDIATE 0x1
859 #define TASK_WRITE_DEFERRED 0x2
860 #define TASK_WRITE_INVALIDATED 0x4
861 #define TASK_WRITE_METADATA 0x8
862 extern void task_update_logical_writes(task_t task
, uint32_t io_size
, int flags
, void *vp
);
864 #if CONFIG_SECLUDED_MEMORY
865 extern void task_set_can_use_secluded_mem(
867 boolean_t can_use_secluded_mem
);
868 extern void task_set_could_use_secluded_mem(
870 boolean_t could_use_secluded_mem
);
871 extern void task_set_could_also_use_secluded_mem(
873 boolean_t could_also_use_secluded_mem
);
874 extern boolean_t
task_can_use_secluded_mem(
876 boolean_t is_allocate
);
877 extern boolean_t
task_could_use_secluded_mem(task_t task
);
878 #endif /* CONFIG_SECLUDED_MEMORY */
880 extern void task_set_darkwake_mode(task_t
, boolean_t
);
881 extern boolean_t
task_get_darkwake_mode(task_t
);
884 extern void task_set_legacy_footprint(task_t task
, boolean_t new_val
);
885 #endif /* __arm64__ */
888 extern struct label
*get_task_crash_label(task_t task
);
889 #endif /* CONFIG_MACF */
891 #endif /* KERNEL_PRIVATE */
893 extern task_t kernel_task
;
895 extern void task_deallocate(
898 extern void task_name_deallocate(
899 task_name_t task_name
);
901 extern void task_inspect_deallocate(
902 task_inspect_t task_inspect
);
904 extern void task_suspension_token_deallocate(
905 task_suspension_token_t token
);
907 extern boolean_t
task_self_region_footprint(void);
908 extern void task_self_region_footprint_set(boolean_t newval
);
912 #endif /* _KERN_TASK_H_ */