+ ((task)->t_flags &= ~TF_64B_ADDR)
+#define task_has_64BitData(task) \
+ (((task)->t_flags & TF_64B_DATA) != 0)
+
+#define task_is_a_corpse(task) \
+ (((task)->t_flags & TF_CORPSE) != 0)
+
+#define task_set_corpse(task) \
+ ((task)->t_flags |= TF_CORPSE)
+
+#define task_corpse_pending_report(task) \
+ (((task)->t_flags & TF_PENDING_CORPSE) != 0)
+
+#define task_set_corpse_pending_report(task) \
+ ((task)->t_flags |= TF_PENDING_CORPSE)
+
+#define task_clear_corpse_pending_report(task) \
+ ((task)->t_flags &= ~TF_PENDING_CORPSE)
+
+#define task_is_a_corpse_fork(task) \
+ (((task)->t_flags & TF_CORPSE_FORK) != 0)
+
+ uint32_t t_procflags; /* general-purpose task flags protected by proc_lock (PL) */
+#define TPF_NONE 0
+#define TPF_DID_EXEC 0x00000001 /* task has been execed to a new task */
+#define TPF_EXEC_COPY 0x00000002 /* task is the new copy of an exec */
+
+#define task_did_exec_internal(task) \
+ (((task)->t_procflags & TPF_DID_EXEC) != 0)
+
+#define task_is_exec_copy_internal(task) \
+ (((task)->t_procflags & TPF_EXEC_COPY) != 0)
+
+ mach_vm_address_t all_image_info_addr; /* dyld __all_image_info */
+ mach_vm_size_t all_image_info_size; /* section location and size */
+
+#if KPERF
+#define TASK_PMC_FLAG 0x1 /* Bit in "t_chud" signifying PMC interest */
+#define TASK_KPC_FORCED_ALL_CTRS 0x2 /* Bit in "t_chud" signifying KPC forced all counters */
+
+ uint32_t t_chud; /* CHUD flags, used for Shark */
+#endif
+
+ boolean_t pidsuspended; /* pid_suspend called; no threads can execute */
+ boolean_t frozen; /* frozen; private resident pages committed to swap */
+ boolean_t changing_freeze_state; /* in the process of freezing or thawing */
+ uint16_t policy_ru_cpu :4,
+ policy_ru_cpu_ext :4,
+ applied_ru_cpu :4,
+ applied_ru_cpu_ext :4;
+ uint8_t rusage_cpu_flags;
+ uint8_t rusage_cpu_percentage; /* Task-wide CPU limit percentage */
+ uint64_t rusage_cpu_interval; /* Task-wide CPU limit interval */
+ uint8_t rusage_cpu_perthr_percentage; /* Per-thread CPU limit percentage */
+ uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
+ uint64_t rusage_cpu_deadline;
+ thread_call_t rusage_cpu_callt;
+#if CONFIG_EMBEDDED
+ queue_head_t task_watchers; /* app state watcher threads */
+ int num_taskwatchers;
+ int watchapplying;
+#endif /* CONFIG_EMBEDDED */
+
+#if CONFIG_ATM
+ struct atm_task_descriptor *atm_context; /* pointer to per task atm descriptor */
+#endif
+ struct bank_task *bank_context; /* pointer to per task bank structure */
+
+#if IMPORTANCE_INHERITANCE
+ struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */
+#endif /* IMPORTANCE_INHERITANCE */
+
+ vm_extmod_statistics_data_t extmod_statistics;
+
+#if MACH_ASSERT
+ int8_t suspends_outstanding; /* suspends this task performed in excess of resumes */
+#endif
+
+ struct task_requested_policy requested_policy;
+ struct task_effective_policy effective_policy;
+
+ /*
+ * Can be merged with imp_donor bits, once the IMPORTANCE_INHERITANCE macro goes away.
+ */
+ uint32_t low_mem_notified_warn :1, /* warning low memory notification is sent to the task */
+ low_mem_notified_critical :1, /* critical low memory notification is sent to the task */
+ purged_memory_warn :1, /* purgeable memory of the task is purged for warning level pressure */
+ purged_memory_critical :1, /* purgeable memory of the task is purged for critical level pressure */
+ low_mem_privileged_listener :1, /* if set, task would like to know about pressure changes before other tasks on the system */
+ mem_notify_reserved :27; /* reserved for future use */
+
+ uint32_t memlimit_is_active :1, /* if set, use active attributes, otherwise use inactive attributes */
+ memlimit_is_fatal :1, /* if set, exceeding current memlimit will prove fatal to the task */
+ memlimit_active_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds active memory limit */
+ memlimit_inactive_exc_resource :1, /* if set, suppress exc_resource exception when task exceeds inactive memory limit */
+ memlimit_attrs_reserved :28; /* reserved for future use */
+
+ io_stat_info_t task_io_stats;
+ uint64_t task_immediate_writes __attribute__((aligned(8)));
+ uint64_t task_deferred_writes __attribute__((aligned(8)));
+ uint64_t task_invalidated_writes __attribute__((aligned(8)));
+ uint64_t task_metadata_writes __attribute__((aligned(8)));
+
+ /*
+ * The cpu_time_qos_stats fields are protected by the task lock
+ */
+ struct _cpu_time_qos_stats cpu_time_qos_stats;
+
+ /* Statistics accumulated for terminated threads from this task */
+ uint32_t task_timer_wakeups_bin_1;
+ uint32_t task_timer_wakeups_bin_2;
+ uint64_t task_gpu_ns;
+ uint64_t task_energy;
+
+#if MONOTONIC
+ /* Read and written under task_lock */
+ struct mt_task task_monotonic;
+#endif /* MONOTONIC */
+
+ /* # of purgeable volatile VM objects owned by this task: */
+ int task_volatile_objects;
+ /* # of purgeable but not volatile VM objects owned by this task: */
+ int task_nonvolatile_objects;
+ boolean_t task_purgeable_disowning;
+ boolean_t task_purgeable_disowned;
+
+ /*
+ * A task's coalition set is "adopted" in task_create_internal
+ * and unset in task_deallocate_internal, so each array member
+ * can be referenced without the task lock.
+ * Note: these fields are protected by coalition->lock,
+ * not the task lock.
+ */
+ coalition_t coalition[COALITION_NUM_TYPES];
+ queue_chain_t task_coalition[COALITION_NUM_TYPES];
+ uint64_t dispatchqueue_offset;
+
+#if DEVELOPMENT || DEBUG
+ boolean_t task_unnested;
+ int task_disconnected_count;
+#endif
+
+#if HYPERVISOR
+ void *hv_task_target; /* hypervisor virtual machine object associated with this task */
+#endif /* HYPERVISOR */
+
+#if CONFIG_SECLUDED_MEMORY
+ boolean_t task_can_use_secluded_mem;
+ boolean_t task_could_use_secluded_mem;
+ boolean_t task_could_also_use_secluded_mem;
+#endif /* CONFIG_SECLUDED_MEMORY */
+
+ queue_head_t io_user_clients;
+ uint32_t exec_token;