| 1 | /* |
| 2 | * Copyright (c) 2000-2018 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_FREE_COPYRIGHT@ |
| 30 | */ |
| 31 | /* |
| 32 | * Mach Operating System |
| 33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University |
| 34 | * All Rights Reserved. |
| 35 | * |
| 36 | * Permission to use, copy, modify and distribute this software and its |
| 37 | * documentation is hereby granted, provided that both the copyright |
| 38 | * notice and this permission notice appear in all copies of the |
| 39 | * software, derivative works or modified versions, and any portions |
| 40 | * thereof, and that both notices appear in supporting documentation. |
| 41 | * |
| 42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
| 43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
| 44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 45 | * |
| 46 | * Carnegie Mellon requests users of this software to return to |
| 47 | * |
| 48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
| 49 | * School of Computer Science |
| 50 | * Carnegie Mellon University |
| 51 | * Pittsburgh PA 15213-3890 |
| 52 | * |
| 53 | * any improvements or extensions that they make and grant Carnegie Mellon |
| 54 | * the rights to redistribute these changes. |
| 55 | */ |
| 56 | /* |
| 57 | */ |
| 58 | /* |
| 59 | * File: task.h |
| 60 | * Author: Avadis Tevanian, Jr. |
| 61 | * |
| 62 | * This file contains the structure definitions for tasks. |
| 63 | * |
| 64 | */ |
| 65 | /* |
| 66 | * Copyright (c) 1993 The University of Utah and |
| 67 | * the Computer Systems Laboratory (CSL). All rights reserved. |
| 68 | * |
| 69 | * Permission to use, copy, modify and distribute this software and its |
| 70 | * documentation is hereby granted, provided that both the copyright |
| 71 | * notice and this permission notice appear in all copies of the |
| 72 | * software, derivative works or modified versions, and any portions |
| 73 | * thereof, and that both notices appear in supporting documentation. |
| 74 | * |
| 75 | * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS |
| 76 | * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF |
| 77 | * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
| 78 | * |
| 79 | * CSL requests users of this software to return to csl-dist@cs.utah.edu any |
| 80 | * improvements that they make and grant CSL redistribution rights. |
| 81 | * |
| 82 | */ |
| 83 | /* |
| 84 | * NOTICE: This file was modified by McAfee Research in 2004 to introduce |
| 85 | * support for mandatory and extensible security protections. This notice |
| 86 | * is included in support of clause 2.2 (b) of the Apple Public License, |
| 87 | * Version 2.0. |
| 88 | * Copyright (c) 2005 SPARTA, Inc. |
| 89 | */ |
| 90 | |
| 91 | #ifndef _KERN_TASK_H_ |
| 92 | #define _KERN_TASK_H_ |
| 93 | |
| 94 | #include <kern/kern_types.h> |
| 95 | #include <mach/mach_types.h> |
| 96 | #include <sys/cdefs.h> |
| 97 | |
| 98 | #ifdef XNU_KERNEL_PRIVATE |
| 99 | #include <kern/kern_cdata.h> |
| 100 | #include <mach/sfi_class.h> |
| 101 | #include <kern/queue.h> |
| 102 | #endif /* XNU_KERNEL_PRIVATE */ |
| 103 | |
| 104 | #ifdef MACH_KERNEL_PRIVATE |
| 105 | |
| 106 | #include <mach/boolean.h> |
| 107 | #include <mach/port.h> |
| 108 | #include <mach/time_value.h> |
| 109 | #include <mach/message.h> |
| 110 | #include <mach/mach_param.h> |
| 111 | #include <mach/task_info.h> |
| 112 | #include <mach/exception_types.h> |
| 113 | #include <mach/vm_statistics.h> |
| 114 | #include <machine/task.h> |
| 115 | |
| 116 | #if MONOTONIC |
| 117 | #include <machine/monotonic.h> |
| 118 | #endif /* MONOTONIC */ |
| 119 | |
| 120 | #include <kern/cpu_data.h> |
| 121 | #include <kern/queue.h> |
| 122 | #include <kern/exception.h> |
| 123 | #include <kern/locks.h> |
| 124 | #include <security/_label.h> |
| 125 | #include <ipc/ipc_port.h> |
| 126 | |
| 127 | #include <kern/thread.h> |
| 128 | #include <mach/coalition.h> |
| 129 | #include <stdatomic.h> |
| 130 | #include <os/refcnt.h> |
| 131 | |
| 132 | #ifdef CONFIG_ATM |
| 133 | #include <atm/atm_internal.h> |
| 134 | #endif |
| 135 | |
| 136 | struct _cpu_time_qos_stats { |
| 137 | uint64_t cpu_time_qos_default; |
| 138 | uint64_t cpu_time_qos_maintenance; |
| 139 | uint64_t cpu_time_qos_background; |
| 140 | uint64_t cpu_time_qos_utility; |
| 141 | uint64_t cpu_time_qos_legacy; |
| 142 | uint64_t cpu_time_qos_user_initiated; |
| 143 | uint64_t cpu_time_qos_user_interactive; |
| 144 | }; |
| 145 | |
| 146 | #include <bank/bank_internal.h> |
| 147 | |
| 148 | struct task { |
| 149 | /* Synchronization/destruction information */ |
| 150 | decl_lck_mtx_data(, lock) /* Task's lock */ |
| 151 | os_refcnt_t ref_count; /* Number of references to me */ |
| 152 | boolean_t active; /* Task has not been terminated */ |
| 153 | boolean_t halting; /* Task is being halted */ |
| 154 | /* Virtual timers */ |
| 155 | uint32_t vtimers; |
| 156 | |
| 157 | /* Miscellaneous */ |
| 158 | vm_map_t map; /* Address space description */ |
| 159 | queue_chain_t tasks; /* global list of tasks */ |
| 160 | |
| 161 | #if defined(CONFIG_SCHED_MULTIQ) |
| 162 | sched_group_t sched_group; |
| 163 | #endif /* defined(CONFIG_SCHED_MULTIQ) */ |
| 164 | |
| 165 | /* Threads in this task */ |
| 166 | queue_head_t threads; |
| 167 | |
| 168 | processor_set_t pset_hint; |
| 169 | struct affinity_space *affinity_space; |
| 170 | |
| 171 | int thread_count; |
| 172 | uint32_t active_thread_count; |
| 173 | int suspend_count; /* Internal scheduling only */ |
| 174 | |
| 175 | /* User-visible scheduling information */ |
| 176 | integer_t user_stop_count; /* outstanding stops */ |
| 177 | integer_t legacy_stop_count; /* outstanding legacy stops */ |
| 178 | |
| 179 | integer_t priority; /* base priority for threads */ |
| 180 | integer_t max_priority; /* maximum priority for threads */ |
| 181 | |
| 182 | integer_t importance; /* priority offset (BSD 'nice' value) */ |
| 183 | |
| 184 | /* Task security and audit tokens */ |
| 185 | security_token_t sec_token; |
| 186 | audit_token_t audit_token; |
| 187 | |
| 188 | /* Statistics */ |
| 189 | uint64_t total_user_time; /* terminated threads only */ |
| 190 | uint64_t total_system_time; |
| 191 | uint64_t total_ptime; |
| 192 | uint64_t total_runnable_time; |
| 193 | |
| 194 | /* IPC structures */ |
| 195 | decl_lck_mtx_data(, itk_lock_data) |
| 196 | struct ipc_port *itk_self; /* not a right, doesn't hold ref */ |
| 197 | struct ipc_port *itk_nself; /* not a right, doesn't hold ref */ |
| 198 | struct ipc_port *itk_sself; /* a send right */ |
| 199 | struct exception_action exc_actions[EXC_TYPES_COUNT]; |
| 200 | /* a send right each valid element */ |
| 201 | struct ipc_port *itk_host; /* a send right */ |
| 202 | struct ipc_port *itk_bootstrap; /* a send right */ |
| 203 | struct ipc_port *itk_seatbelt; /* a send right */ |
| 204 | struct ipc_port *itk_gssd; /* yet another send right */ |
| 205 | struct ipc_port *itk_debug_control; /* send right for debugmode communications */ |
| 206 | struct ipc_port *itk_task_access; /* and another send right */ |
| 207 | struct ipc_port *itk_resume; /* a receive right to resume this task */ |
| 208 | struct ipc_port *itk_registered[TASK_PORT_REGISTER_MAX]; |
| 209 | /* all send rights */ |
| 210 | |
| 211 | struct ipc_space *itk_space; |
| 212 | |
| 213 | ledger_t ledger; |
| 214 | /* Synchronizer ownership information */ |
| 215 | queue_head_t semaphore_list; /* list of owned semaphores */ |
| 216 | int semaphores_owned; /* number of semaphores owned */ |
| 217 | |
| 218 | unsigned int priv_flags; /* privilege resource flags */ |
| 219 | #define VM_BACKING_STORE_PRIV 0x1 |
| 220 | |
| 221 | MACHINE_TASK |
| 222 | |
| 223 | integer_t faults; /* faults counter */ |
| 224 | integer_t pageins; /* pageins counter */ |
| 225 | integer_t cow_faults; /* copy on write fault counter */ |
| 226 | integer_t messages_sent; /* messages sent counter */ |
| 227 | integer_t messages_received; /* messages received counter */ |
| 228 | integer_t syscalls_mach; /* mach system call counter */ |
| 229 | integer_t syscalls_unix; /* unix system call counter */ |
| 230 | uint32_t c_switch; /* total context switches */ |
| 231 | uint32_t p_switch; /* total processor switches */ |
| 232 | uint32_t ps_switch; /* total pset switches */ |
| 233 | |
| 234 | #ifdef MACH_BSD |
| 235 | void *bsd_info; |
| 236 | #endif |
| 237 | kcdata_descriptor_t corpse_info; |
| 238 | uint64_t crashed_thread_id; |
| 239 | queue_chain_t corpse_tasks; |
| 240 | #ifdef CONFIG_MACF |
| 241 | struct label * crash_label; |
| 242 | #endif |
| 243 | struct vm_shared_region *shared_region; |
| 244 | volatile uint32_t t_flags; /* general-purpose task flags protected by task_lock (TL) */ |
| 245 | #define TF_NONE 0 |
| 246 | #define TF_64B_ADDR 0x00000001 /* task has 64-bit addressing */ |
| 247 | #define TF_64B_DATA 0x00000002 /* task has 64-bit data registers */ |
| 248 | #define TF_CPUMON_WARNING 0x00000004 /* task has at least one thread in CPU usage warning zone */ |
| 249 | #define TF_WAKEMON_WARNING 0x00000008 /* task is in wakeups monitor warning zone */ |
| 250 | #define TF_TELEMETRY (TF_CPUMON_WARNING | TF_WAKEMON_WARNING) /* task is a telemetry participant */ |
| 251 | #define TF_GPU_DENIED 0x00000010 /* task is not allowed to access the GPU */ |
| 252 | #define TF_CORPSE 0x00000020 /* task is a corpse */ |
| 253 | #define TF_PENDING_CORPSE 0x00000040 /* task corpse has not been reported yet */ |
| 254 | #define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */ |
| 255 | #define TF_LRETURNWAIT 0x00000100 /* task is waiting for fork/posix_spawn/exec to complete */ |
| 256 | #define TF_LRETURNWAITER 0x00000200 /* task is waiting for TF_LRETURNWAIT to get cleared */ |
| 257 | #define TF_PLATFORM 0x00000400 /* task is a platform binary */ |
| 258 | #define TF_CA_CLIENT_WI 0x00000800 /* task has CA_CLIENT work interval */ |
| 259 | #define TF_DARKWAKE_MODE 0x00001000 /* task is in darkwake mode */ |
| 260 | #define TF_NO_SMT 0x00002000 /* task threads must not be paired with SMT threads */ |
| 261 | |
| 262 | /* |
| 263 | * Task is running within a 64-bit address space. |
| 264 | */ |
| 265 | #define task_has_64Bit_addr(task) \ |
| 266 | (((task)->t_flags & TF_64B_ADDR) != 0) |
| 267 | #define task_set_64Bit_addr(task) \ |
| 268 | ((task)->t_flags |= TF_64B_ADDR) |
| 269 | #define task_clear_64Bit_addr(task) \ |
| 270 | ((task)->t_flags &= ~TF_64B_ADDR) |
| 271 | |
| 272 | /* |
| 273 | * Task is using 64-bit machine state. |
| 274 | */ |
| 275 | #define task_has_64Bit_data(task) \ |
| 276 | (((task)->t_flags & TF_64B_DATA) != 0) |
| 277 | #define task_set_64Bit_data(task) \ |
| 278 | ((task)->t_flags |= TF_64B_DATA) |
| 279 | #define task_clear_64Bit_data(task) \ |
| 280 | ((task)->t_flags &= ~TF_64B_DATA) |
| 281 | |
| 282 | #define task_is_a_corpse(task) \ |
| 283 | (((task)->t_flags & TF_CORPSE) != 0) |
| 284 | |
| 285 | #define task_set_corpse(task) \ |
| 286 | ((task)->t_flags |= TF_CORPSE) |
| 287 | |
| 288 | #define task_corpse_pending_report(task) \ |
| 289 | (((task)->t_flags & TF_PENDING_CORPSE) != 0) |
| 290 | |
| 291 | #define task_set_corpse_pending_report(task) \ |
| 292 | ((task)->t_flags |= TF_PENDING_CORPSE) |
| 293 | |
| 294 | #define task_clear_corpse_pending_report(task) \ |
| 295 | ((task)->t_flags &= ~TF_PENDING_CORPSE) |
| 296 | |
| 297 | #define task_is_a_corpse_fork(task) \ |
| 298 | (((task)->t_flags & TF_CORPSE_FORK) != 0) |
| 299 | |
| 300 | uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */ |
| 301 | #define TPF_NONE 0 |
| 302 | #define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */ |
| 303 | #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */ |
| 304 | #ifdef CONFIG_32BIT_TELEMETRY |
| 305 | #define TPF_LOG_32BIT_TELEMETRY 0x00000004 /* task should log identifying information */ |
| 306 | #endif |
| 307 | |
| 308 | #define task_did_exec_internal(task) \ |
| 309 | (((task)->t_procflags & TPF_DID_EXEC) != 0) |
| 310 | |
| 311 | #define task_is_exec_copy_internal(task) \ |
| 312 | (((task)->t_procflags & TPF_EXEC_COPY) != 0) |
| 313 | |
| 314 | mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */ |
| 315 | mach_vm_size_t all_image_info_size; /* section location and size */ |
| 316 | |
| 317 | #if KPC |
| 318 | #define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_kpc" signifying this task forced all counters */ |
| 319 | uint32_t t_kpc; /* kpc flags */ |
| 320 | #endif /* KPC */ |
| 321 | |
| 322 | boolean_t pidsuspended; /* pid_suspend called; no threads can execute */ |
| 323 | boolean_t frozen; /* frozen; private resident pages committed to swap */ |
| 324 | boolean_t changing_freeze_state; /* in the process of freezing or thawing */ |
| 325 | uint16_t policy_ru_cpu :4, |
| 326 | policy_ru_cpu_ext :4, |
| 327 | applied_ru_cpu :4, |
| 328 | applied_ru_cpu_ext :4; |
| 329 | uint8_t rusage_cpu_flags; |
| 330 | uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */ |
| 331 | uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */ |
| 332 | #if MACH_ASSERT |
| 333 | int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */ |
| 334 | #endif |
| 335 | uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */ |
| 336 | uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */ |
| 337 | uint64_t rusage_cpu_deadline; |
| 338 | thread_call_t rusage_cpu_callt; |
| 339 | #if CONFIG_EMBEDDED |
| 340 | queue_head_t task_watchers; /* app state watcher threads */ |
| 341 | int num_taskwatchers; |
| 342 | int watchapplying; |
| 343 | #endif /* CONFIG_EMBEDDED */ |
| 344 | |
| 345 | #if CONFIG_ATM |
| 346 | struct atm_task_descriptor *atm_context; /* pointer to per task atm descriptor */ |
| 347 | #endif |
| 348 | struct bank_task *bank_context; /* pointer to per task bank structure */ |
| 349 | |
| 350 | #if IMPORTANCE_INHERITANCE |
| 351 | struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */ |
| 352 | #endif /* IMPORTANCE_INHERITANCE */ |
| 353 | |
| 354 | vm_extmod_statistics_data_t extmod_statistics; |
| 355 | |
| 356 | struct task_requested_policy requested_policy; |
| 357 | struct task_effective_policy effective_policy; |
| 358 | |
| 359 | /* |
| 360 | * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away. |
| 361 | */ |
| 362 | uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */ |
| 363 | low_mem_notified_critical :1, /* critical low memory notification is sent to the task */ |
| 364 | purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */ |
| 365 | purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */ |
| 366 | low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */ |
| 367 | mem_notify_reserved :27; /* reserved for future use */ |
| 368 | |
| 369 | uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */ |
| 370 | memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */ |
| 371 | memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */ |
| 372 | memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */ |
| 373 | memlimit_attrs_reserved :28; /* reserved for future use */ |
| 374 | |
| 375 | io_stat_info_t task_io_stats; |
| 376 | uint64_t task_immediate_writes __attribute__((aligned(8))); |
| 377 | uint64_t task_deferred_writes __attribute__((aligned(8))); |
| 378 | uint64_t task_invalidated_writes __attribute__((aligned(8))); |
| 379 | uint64_t task_metadata_writes __attribute__((aligned(8))); |
| 380 | |
| 381 | /* |
| 382 | * The cpu_time_qos_stats fields are protected by the task lock |
| 383 | */ |
| 384 | struct _cpu_time_qos_stats cpu_time_eqos_stats; |
| 385 | struct _cpu_time_qos_stats cpu_time_rqos_stats; |
| 386 | |
| 387 | /* Statistics accumulated for terminated threads from this task */ |
| 388 | uint32_t task_timer_wakeups_bin_1; |
| 389 | uint32_t task_timer_wakeups_bin_2; |
| 390 | uint64_t task_gpu_ns; |
| 391 | uint64_t task_energy; |
| 392 | |
| 393 | #if MONOTONIC |
| 394 | /* Read and written under task_lock */ |
| 395 | struct mt_task task_monotonic; |
| 396 | #endif /* MONOTONIC */ |
| 397 | |
| 398 | /* # of purgeable volatile VM objects owned by this task: */ |
| 399 | int task_volatile_objects; |
| 400 | /* # of purgeable but not volatile VM objects owned by this task: */ |
| 401 | int task_nonvolatile_objects; |
| 402 | boolean_t task_purgeable_disowning; |
| 403 | boolean_t task_purgeable_disowned; |
| 404 | queue_head_t task_objq; |
| 405 | decl_lck_mtx_data(, task_objq_lock) /* protects "task_objq" */ |
| 406 | |
| 407 | unsigned int task_thread_limit:16; |
| 408 | #if __arm64__ |
| 409 | unsigned int task_legacy_footprint:1; |
| 410 | #endif /* __arm64__ */ |
| 411 | unsigned int task_region_footprint:1; |
| 412 | unsigned int task_has_crossed_thread_limit:1; |
| 413 | uint32_t exec_token; |
| 414 | /* |
| 415 | * A task's coalition set is "adopted" in task_create_internal |
| 416 | * and unset in task_deallocate_internal, so each array member |
| 417 | * can be referenced without the task lock. |
| 418 | * Note: these fields are protected by coalition->lock, |
| 419 | * not the task lock. |
| 420 | */ |
| 421 | coalition_t coalition[COALITION_NUM_TYPES]; |
| 422 | queue_chain_t task_coalition[COALITION_NUM_TYPES]; |
| 423 | uint64_t dispatchqueue_offset; |
| 424 | |
| 425 | #if DEVELOPMENT || DEBUG |
| 426 | boolean_t task_unnested; |
| 427 | int task_disconnected_count; |
| 428 | #endif |
| 429 | |
| 430 | #if HYPERVISOR |
| 431 | void *hv_task_target; /* hypervisor virtual machine object associated with this task */ |
| 432 | #endif /* HYPERVISOR */ |
| 433 | |
| 434 | #if CONFIG_SECLUDED_MEMORY |
| 435 | uint8_t task_can_use_secluded_mem; |
| 436 | uint8_t task_could_use_secluded_mem; |
| 437 | uint8_t task_could_also_use_secluded_mem; |
| 438 | uint8_t task_suppressed_secluded; |
| 439 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 440 | |
| 441 | uint32_t task_exc_guard; |
| 442 | |
| 443 | queue_head_t io_user_clients; |
| 444 | }; |
| 445 | |
| 446 | #define TASK_EXC_GUARD_VM_DELIVER 0x01 /* Deliver virtual memory EXC_GUARD exceptions */ |
| 447 | #define TASK_EXC_GUARD_VM_ONCE 0x02 /* Deliver them only once */ |
| 448 | #define TASK_EXC_GUARD_VM_CORPSE 0x04 /* Deliver them via a forked corpse */ |
| 449 | #define TASK_EXC_GUARD_VM_FATAL 0x08 /* Virtual Memory EXC_GUARD delivery is fatal */ |
| 450 | #define TASK_EXC_GUARD_VM_ALL 0x0f |
| 451 | |
| 452 | #define TASK_EXC_GUARD_MP_DELIVER 0x10 /* Deliver mach port EXC_GUARD exceptions */ |
| 453 | #define TASK_EXC_GUARD_MP_ONCE 0x20 /* Deliver them only once */ |
| 454 | #define TASK_EXC_GUARD_MP_CORPSE 0x04 /* Deliver them via a forked corpse */ |
| 455 | #define TASK_EXC_GUARD_MP_FATAL 0x80 /* mach port EXC_GUARD delivery is fatal */ |
| 456 | |
| 457 | extern uint32_t task_exc_guard_default; |
| 458 | |
| 459 | extern kern_return_t |
| 460 | task_violated_guard(mach_exception_code_t, mach_exception_subcode_t, void *); |
| 461 | |
| 462 | #define task_lock(task) lck_mtx_lock(&(task)->lock) |
| 463 | #define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED) |
| 464 | #define task_lock_try(task) lck_mtx_try_lock(&(task)->lock) |
| 465 | #define task_unlock(task) lck_mtx_unlock(&(task)->lock) |
| 466 | |
| 467 | #define task_objq_lock_init(task) lck_mtx_init(&(task)->task_objq_lock, &vm_object_lck_grp, &vm_object_lck_attr) |
| 468 | #define task_objq_lock(task) lck_mtx_lock(&(task)->task_objq_lock) |
| 469 | #define task_objq_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->task_objq_lock, LCK_MTX_ASSERT_OWNED) |
| 470 | #define task_objq_lock_try(task) lck_mtx_try_lock(&(task)->task_objq_lock) |
| 471 | #define task_objq_unlock(task) lck_mtx_unlock(&(task)->task_objq_lock) |
| 472 | |
| 473 | #define itk_lock_init(task) lck_mtx_init(&(task)->itk_lock_data, &ipc_lck_grp, &ipc_lck_attr) |
| 474 | #define itk_lock_destroy(task) lck_mtx_destroy(&(task)->itk_lock_data, &ipc_lck_grp) |
| 475 | #define itk_lock(task) lck_mtx_lock(&(task)->itk_lock_data) |
| 476 | #define itk_unlock(task) lck_mtx_unlock(&(task)->itk_lock_data) |
| 477 | |
| 478 | #define TASK_REFERENCE_LEAK_DEBUG 0 |
| 479 | |
| 480 | #if TASK_REFERENCE_LEAK_DEBUG |
| 481 | extern void task_reference_internal(task_t task); |
| 482 | extern os_ref_count_t task_deallocate_internal(task_t task); |
| 483 | #else |
| 484 | #define task_reference_internal(task) os_ref_retain(&(task)->ref_count) |
| 485 | #define task_deallocate_internal(task) os_ref_release(&(task)->ref_count) |
| 486 | #endif |
| 487 | |
| 488 | #define task_reference(task) \ |
| 489 | MACRO_BEGIN \ |
| 490 | if ((task) != TASK_NULL) \ |
| 491 | task_reference_internal(task); \ |
| 492 | MACRO_END |
| 493 | |
| 494 | extern kern_return_t kernel_task_create( |
| 495 | task_t task, |
| 496 | vm_offset_t map_base, |
| 497 | vm_size_t map_size, |
| 498 | task_t *child); |
| 499 | |
| 500 | /* Initialize task module */ |
| 501 | extern void task_init(void); |
| 502 | |
| 503 | /* coalition_init() calls this to initialize ledgers before task_init() */ |
| 504 | extern void init_task_ledgers(void); |
| 505 | |
| 506 | #define current_task_fast() (current_thread()->task) |
| 507 | #define current_task() current_task_fast() |
| 508 | |
| 509 | extern lck_attr_t task_lck_attr; |
| 510 | extern lck_grp_t task_lck_grp; |
| 511 | |
| 512 | #else /* MACH_KERNEL_PRIVATE */ |
| 513 | |
| 514 | __BEGIN_DECLS |
| 515 | |
| 516 | extern task_t current_task(void); |
| 517 | |
| 518 | extern void task_reference(task_t task); |
| 519 | |
| 520 | #define TF_NONE 0 |
| 521 | #define TF_LRETURNWAIT 0x00000100 /* task is waiting for fork/posix_spawn/exec to complete */ |
| 522 | #define TF_LRETURNWAITER 0x00000200 /* task is waiting for TF_LRETURNWAIT to get cleared */ |
| 523 | |
| 524 | #define TPF_NONE 0 |
| 525 | #define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */ |
| 526 | |
| 527 | |
| 528 | __END_DECLS |
| 529 | |
| 530 | #endif /* MACH_KERNEL_PRIVATE */ |
| 531 | |
| 532 | __BEGIN_DECLS |
| 533 | |
| 534 | #ifdef XNU_KERNEL_PRIVATE |
| 535 | |
| 536 | /* Hold all threads in a task */ |
| 537 | extern kern_return_t task_hold( |
| 538 | task_t task); |
| 539 | |
| 540 | /* Wait for task to stop running, either just to get off CPU or to cease being runnable */ |
| 541 | extern kern_return_t task_wait( |
| 542 | task_t task, |
| 543 | boolean_t until_not_runnable); |
| 544 | |
| 545 | /* Release hold on all threads in a task */ |
| 546 | extern kern_return_t task_release( |
| 547 | task_t task); |
| 548 | |
| 549 | /* Suspend/resume a task where the kernel owns the suspend count */ |
| 550 | extern kern_return_t task_suspend_internal( task_t task); |
| 551 | extern kern_return_t task_resume_internal( task_t task); |
| 552 | |
| 553 | /* Suspends a task by placing a hold on its threads */ |
| 554 | extern kern_return_t task_pidsuspend( |
| 555 | task_t task); |
| 556 | extern kern_return_t task_pidsuspend_locked( |
| 557 | task_t task); |
| 558 | |
| 559 | /* Resumes a previously paused task */ |
| 560 | extern kern_return_t task_pidresume( |
| 561 | task_t task); |
| 562 | |
| 563 | extern kern_return_t task_send_trace_memory( |
| 564 | task_t task, |
| 565 | uint32_t pid, |
| 566 | uint64_t uniqueid); |
| 567 | |
| 568 | #if DEVELOPMENT || DEBUG |
| 569 | |
| 570 | extern kern_return_t task_disconnect_page_mappings( |
| 571 | task_t task); |
| 572 | #endif |
| 573 | |
| 574 | extern void tasks_system_suspend(boolean_t suspend); |
| 575 | |
| 576 | #if CONFIG_FREEZE |
| 577 | |
| 578 | /* Freeze a task's resident pages */ |
| 579 | extern kern_return_t task_freeze( |
| 580 | task_t task, |
| 581 | uint32_t *purgeable_count, |
| 582 | uint32_t *wired_count, |
| 583 | uint32_t *clean_count, |
| 584 | uint32_t *dirty_count, |
| 585 | uint32_t dirty_budget, |
| 586 | uint32_t *shared_count, |
| 587 | int *freezer_error_code, |
| 588 | boolean_t eval_only); |
| 589 | |
| 590 | /* Thaw a currently frozen task */ |
| 591 | extern kern_return_t task_thaw( |
| 592 | task_t task); |
| 593 | |
| 594 | #endif /* CONFIG_FREEZE */ |
| 595 | |
| 596 | /* Halt all other threads in the current task */ |
| 597 | extern kern_return_t task_start_halt( |
| 598 | task_t task); |
| 599 | |
| 600 | /* Wait for other threads to halt and free halting task resources */ |
| 601 | extern void task_complete_halt( |
| 602 | task_t task); |
| 603 | |
| 604 | extern kern_return_t task_terminate_internal( |
| 605 | task_t task); |
| 606 | |
| 607 | extern kern_return_t task_create_internal( |
| 608 | task_t parent_task, |
| 609 | coalition_t *parent_coalitions, |
| 610 | boolean_t inherit_memory, |
| 611 | boolean_t is_64bit, |
| 612 | boolean_t is_64bit_data, |
| 613 | uint32_t flags, |
| 614 | uint32_t procflags, |
| 615 | task_t *child_task); /* OUT */ |
| 616 | |
| 617 | extern kern_return_t task_info( |
| 618 | task_t task, |
| 619 | task_flavor_t flavor, |
| 620 | task_info_t task_info_out, |
| 621 | mach_msg_type_number_t *task_info_count); |
| 622 | |
| 623 | extern void task_power_info_locked( |
| 624 | task_t task, |
| 625 | task_power_info_t info, |
| 626 | gpu_energy_data_t gpu_energy, |
| 627 | task_power_info_v2_t infov2); |
| 628 | |
| 629 | extern uint64_t task_gpu_utilisation( |
| 630 | task_t task); |
| 631 | |
| 632 | extern uint64_t task_energy( |
| 633 | task_t task); |
| 634 | |
| 635 | extern uint64_t task_cpu_ptime( |
| 636 | task_t task); |
| 637 | extern void task_update_cpu_time_qos_stats( |
| 638 | task_t task, |
| 639 | uint64_t *eqos_stats, |
| 640 | uint64_t *rqos_stats); |
| 641 | |
| 642 | extern void task_vtimer_set( |
| 643 | task_t task, |
| 644 | integer_t which); |
| 645 | |
| 646 | extern void task_vtimer_clear( |
| 647 | task_t task, |
| 648 | integer_t which); |
| 649 | |
| 650 | extern void task_vtimer_update( |
| 651 | task_t task, |
| 652 | integer_t which, |
| 653 | uint32_t *microsecs); |
| 654 | |
| 655 | #define TASK_VTIMER_USER 0x01 |
| 656 | #define TASK_VTIMER_PROF 0x02 |
| 657 | #define TASK_VTIMER_RLIM 0x04 |
| 658 | |
| 659 | extern void task_set_64bit( |
| 660 | task_t task, |
| 661 | boolean_t is_64bit, |
| 662 | boolean_t is_64bit_data); |
| 663 | |
| 664 | extern boolean_t task_get_64bit_data( |
| 665 | task_t task); |
| 666 | |
| 667 | extern void task_set_platform_binary( |
| 668 | task_t task, |
| 669 | boolean_t is_platform); |
| 670 | extern bool task_set_ca_client_wi( |
| 671 | task_t task, |
| 672 | boolean_t ca_client_wi); |
| 673 | |
| 674 | extern void task_set_dyld_info( |
| 675 | task_t task, |
| 676 | mach_vm_address_t addr, |
| 677 | mach_vm_size_t size); |
| 678 | |
| 679 | /* Get number of activations in a task */ |
| 680 | extern int get_task_numacts( |
| 681 | task_t task); |
| 682 | |
| 683 | extern int get_task_numactivethreads(task_t task); |
| 684 | |
| 685 | struct label; |
| 686 | extern kern_return_t task_collect_crash_info( |
| 687 | task_t task, |
| 688 | #if CONFIG_MACF |
| 689 | struct label *crash_label, |
| 690 | #endif |
| 691 | int is_corpse_fork); |
| 692 | void task_port_notify(mach_msg_header_t *msg); |
| 693 | void task_wait_till_threads_terminate_locked(task_t task); |
| 694 | |
| 695 | /* JMM - should just be temporary (implementation in bsd_kern still) */ |
| 696 | extern void set_bsdtask_info(task_t, void *); |
| 697 | extern vm_map_t get_task_map_reference(task_t); |
| 698 | extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t); |
| 699 | extern pmap_t get_task_pmap(task_t); |
| 700 | extern uint64_t get_task_resident_size(task_t); |
| 701 | extern uint64_t get_task_compressed(task_t); |
| 702 | extern uint64_t get_task_resident_max(task_t); |
| 703 | extern uint64_t get_task_phys_footprint(task_t); |
| 704 | #if CONFIG_LEDGER_INTERVAL_MAX |
| 705 | extern uint64_t get_task_phys_footprint_interval_max(task_t, int reset); |
| 706 | #endif /* CONFIG_FOOTPRINT_INTERVAL_MAX */ |
| 707 | extern uint64_t get_task_phys_footprint_lifetime_max(task_t); |
| 708 | extern uint64_t get_task_phys_footprint_limit(task_t); |
| 709 | extern uint64_t get_task_purgeable_size(task_t); |
| 710 | extern uint64_t get_task_cpu_time(task_t); |
| 711 | extern uint64_t get_task_dispatchqueue_offset(task_t); |
| 712 | extern uint64_t get_task_dispatchqueue_serialno_offset(task_t); |
| 713 | extern uint64_t get_task_uniqueid(task_t task); |
| 714 | extern int get_task_version(task_t task); |
| 715 | |
| 716 | extern uint64_t get_task_internal(task_t); |
| 717 | extern uint64_t get_task_internal_compressed(task_t); |
| 718 | extern uint64_t get_task_purgeable_nonvolatile(task_t); |
| 719 | extern uint64_t get_task_purgeable_nonvolatile_compressed(task_t); |
| 720 | extern uint64_t get_task_iokit_mapped(task_t); |
| 721 | extern uint64_t get_task_alternate_accounting(task_t); |
| 722 | extern uint64_t get_task_alternate_accounting_compressed(task_t); |
| 723 | extern uint64_t get_task_memory_region_count(task_t); |
| 724 | extern uint64_t get_task_page_table(task_t); |
| 725 | extern uint64_t get_task_network_nonvolatile(task_t); |
| 726 | extern uint64_t get_task_network_nonvolatile_compressed(task_t); |
| 727 | extern uint64_t get_task_wired_mem(task_t); |
| 728 | |
| 729 | extern kern_return_t task_convert_phys_footprint_limit(int, int *); |
| 730 | extern kern_return_t task_set_phys_footprint_limit_internal(task_t, int, int *, boolean_t, boolean_t); |
| 731 | extern kern_return_t task_get_phys_footprint_limit(task_t task, int *limit_mb); |
| 732 | |
| 733 | /* Jetsam memlimit attributes */ |
| 734 | extern boolean_t task_get_memlimit_is_active(task_t task); |
| 735 | extern boolean_t task_get_memlimit_is_fatal(task_t task); |
| 736 | extern void task_set_memlimit_is_active(task_t task, boolean_t memlimit_is_active); |
| 737 | extern void task_set_memlimit_is_fatal(task_t task, boolean_t memlimit_is_fatal); |
| 738 | extern boolean_t task_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active); |
| 739 | extern void task_mark_has_triggered_exc_resource(task_t task, boolean_t memlimit_is_active); |
| 740 | |
| 741 | extern void task_set_thread_limit(task_t task, uint16_t thread_limit); |
| 742 | |
| 743 | |
| 744 | extern boolean_t is_kerneltask(task_t task); |
| 745 | extern boolean_t is_corpsetask(task_t task); |
| 746 | |
| 747 | extern kern_return_t check_actforsig(task_t task, thread_t thread, int setast); |
| 748 | |
| 749 | extern kern_return_t machine_task_get_state( |
| 750 | task_t task, |
| 751 | int flavor, |
| 752 | thread_state_t state, |
| 753 | mach_msg_type_number_t *state_count); |
| 754 | |
| 755 | extern kern_return_t machine_task_set_state( |
| 756 | task_t task, |
| 757 | int flavor, |
| 758 | thread_state_t state, |
| 759 | mach_msg_type_number_t state_count); |
| 760 | |
| 761 | extern void machine_task_terminate(task_t task); |
| 762 | |
| 763 | struct _task_ledger_indices { |
| 764 | int cpu_time; |
| 765 | int tkm_private; |
| 766 | int tkm_shared; |
| 767 | int phys_mem; |
| 768 | int wired_mem; |
| 769 | int internal; |
| 770 | int iokit_mapped; |
| 771 | int alternate_accounting; |
| 772 | int alternate_accounting_compressed; |
| 773 | int page_table; |
| 774 | int phys_footprint; |
| 775 | int internal_compressed; |
| 776 | int purgeable_volatile; |
| 777 | int purgeable_nonvolatile; |
| 778 | int purgeable_volatile_compressed; |
| 779 | int purgeable_nonvolatile_compressed; |
| 780 | int network_volatile; |
| 781 | int network_nonvolatile; |
| 782 | int network_volatile_compressed; |
| 783 | int network_nonvolatile_compressed; |
| 784 | int platform_idle_wakeups; |
| 785 | int interrupt_wakeups; |
| 786 | #if CONFIG_SCHED_SFI |
| 787 | int sfi_wait_times[MAX_SFI_CLASS_ID]; |
| 788 | #endif /* CONFIG_SCHED_SFI */ |
| 789 | int cpu_time_billed_to_me; |
| 790 | int cpu_time_billed_to_others; |
| 791 | int physical_writes; |
| 792 | int logical_writes; |
| 793 | int energy_billed_to_me; |
| 794 | int energy_billed_to_others; |
| 795 | int pages_grabbed; |
| 796 | int pages_grabbed_kern; |
| 797 | int pages_grabbed_iopl; |
| 798 | int pages_grabbed_upl; |
| 799 | }; |
| 800 | extern struct _task_ledger_indices task_ledgers; |
| 801 | |
| 802 | /* requires task to be unlocked, returns a referenced thread */ |
| 803 | thread_t task_findtid(task_t task, uint64_t tid); |
| 804 | int pid_from_task(task_t task); |
| 805 | |
| 806 | extern kern_return_t task_wakeups_monitor_ctl(task_t task, uint32_t *rate_hz, int32_t *flags); |
| 807 | extern kern_return_t task_cpu_usage_monitor_ctl(task_t task, uint32_t *flags); |
| 808 | extern void task_rollup_accounting_info(task_t new_task, task_t parent_task); |
| 809 | extern kern_return_t task_io_monitor_ctl(task_t task, uint32_t *flags); |
| 810 | extern void task_set_did_exec_flag(task_t task); |
| 811 | extern void task_clear_exec_copy_flag(task_t task); |
| 812 | extern boolean_t task_is_exec_copy(task_t); |
| 813 | extern boolean_t task_did_exec(task_t task); |
| 814 | #ifdef CONFIG_32BIT_TELEMETRY |
| 815 | extern boolean_t task_consume_32bit_log_flag(task_t task); |
| 816 | extern void task_set_32bit_log_flag(task_t task); |
| 817 | #endif /* CONFIG_32BIT_TELEMETRY */ |
| 818 | extern boolean_t task_is_active(task_t task); |
| 819 | extern boolean_t task_is_halting(task_t task); |
| 820 | extern void task_clear_return_wait(task_t task); |
| 821 | extern void task_wait_to_return(void) __attribute__((noreturn)); |
| 822 | extern event_t task_get_return_wait_event(task_t task); |
| 823 | |
| 824 | extern void task_atm_reset(task_t task); |
| 825 | extern void task_bank_reset(task_t task); |
| 826 | extern void task_bank_init(task_t task); |
| 827 | |
| 828 | extern int task_pid(task_t task); |
| 829 | extern boolean_t task_has_assertions(task_t task); |
| 830 | /* End task_policy */ |
| 831 | |
| 832 | extern void task_set_gpu_denied(task_t task, boolean_t denied); |
| 833 | extern boolean_t task_is_gpu_denied(task_t task); |
| 834 | |
| 835 | extern queue_head_t * task_io_user_clients(task_t task); |
| 836 | |
| 837 | extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task); |
| 838 | |
| 839 | #endif /* XNU_KERNEL_PRIVATE */ |
| 840 | |
| 841 | #ifdef KERNEL_PRIVATE |
| 842 | |
| 843 | extern void *get_bsdtask_info(task_t); |
| 844 | extern void *get_bsdthreadtask_info(thread_t); |
| 845 | extern void task_bsdtask_kill(task_t); |
| 846 | extern vm_map_t get_task_map(task_t); |
| 847 | extern ledger_t get_task_ledger(task_t); |
| 848 | |
| 849 | extern boolean_t get_task_pidsuspended(task_t); |
| 850 | extern boolean_t get_task_frozen(task_t); |
| 851 | |
| 852 | /* Convert from a task to a port */ |
| 853 | extern ipc_port_t convert_task_to_port(task_t); |
| 854 | extern ipc_port_t convert_task_name_to_port(task_name_t); |
| 855 | extern ipc_port_t convert_task_inspect_to_port(task_inspect_t); |
| 856 | extern ipc_port_t convert_task_suspension_token_to_port(task_suspension_token_t task); |
| 857 | |
| 858 | /* Convert from a port (in this case, an SO right to a task's resume port) to a task. */ |
| 859 | extern task_suspension_token_t convert_port_to_task_suspension_token(ipc_port_t port); |
| 860 | |
| 861 | extern boolean_t task_suspension_notify(mach_msg_header_t *); |
| 862 | |
| 863 | #define TASK_WRITE_IMMEDIATE 0x1 |
| 864 | #define TASK_WRITE_DEFERRED 0x2 |
| 865 | #define TASK_WRITE_INVALIDATED 0x4 |
| 866 | #define TASK_WRITE_METADATA 0x8 |
| 867 | extern void task_update_logical_writes(task_t task, uint32_t io_size, int flags, void *vp); |
| 868 | |
| 869 | #if CONFIG_SECLUDED_MEMORY |
| 870 | extern void task_set_can_use_secluded_mem( |
| 871 | task_t task, |
| 872 | boolean_t can_use_secluded_mem); |
| 873 | extern void task_set_could_use_secluded_mem( |
| 874 | task_t task, |
| 875 | boolean_t could_use_secluded_mem); |
| 876 | extern void task_set_could_also_use_secluded_mem( |
| 877 | task_t task, |
| 878 | boolean_t could_also_use_secluded_mem); |
| 879 | extern boolean_t task_can_use_secluded_mem( |
| 880 | task_t task, |
| 881 | boolean_t is_allocate); |
| 882 | extern boolean_t task_could_use_secluded_mem(task_t task); |
| 883 | #endif /* CONFIG_SECLUDED_MEMORY */ |
| 884 | |
| 885 | extern void task_set_darkwake_mode(task_t, boolean_t); |
| 886 | extern boolean_t task_get_darkwake_mode(task_t); |
| 887 | |
| 888 | #if __arm64__ |
| 889 | extern void task_set_legacy_footprint(task_t task, boolean_t new_val); |
| 890 | #endif /* __arm64__ */ |
| 891 | |
| 892 | #if CONFIG_MACF |
| 893 | extern struct label *get_task_crash_label(task_t task); |
| 894 | #endif /* CONFIG_MACF */ |
| 895 | |
| 896 | #endif /* KERNEL_PRIVATE */ |
| 897 | |
| 898 | extern task_t kernel_task; |
| 899 | |
| 900 | extern void task_deallocate( |
| 901 | task_t task); |
| 902 | |
| 903 | extern void task_name_deallocate( |
| 904 | task_name_t task_name); |
| 905 | |
| 906 | extern void task_inspect_deallocate( |
| 907 | task_inspect_t task_inspect); |
| 908 | |
| 909 | extern void task_suspension_token_deallocate( |
| 910 | task_suspension_token_t token); |
| 911 | |
| 912 | extern boolean_t task_self_region_footprint(void); |
| 913 | extern void task_self_region_footprint_set(boolean_t newval); |
| 914 | |
| 915 | __END_DECLS |
| 916 | |
| 917 | #endif /* _KERN_TASK_H_ */ |