#include <mach/vm_statistics.h>
#include <machine/task.h>
+#if MONOTONIC
+#include <machine/monotonic.h>
+#endif /* MONOTONIC */
+
#include <kern/cpu_data.h>
#include <kern/queue.h>
#include <kern/exception.h>
#include <kern/thread.h>
#include <mach/coalition.h>
+#include <stdatomic.h>
#ifdef CONFIG_ATM
#include <atm/atm_internal.h>
uint64_t cpu_time_qos_user_interactive;
};
-#ifdef CONFIG_BANK
#include <bank/bank_internal.h>
-#endif
struct task {
/* Synchronization/destruction information */
decl_lck_mtx_data(,lock) /* Task's lock */
- uint32_t ref_count; /* Number of references to me */
+ _Atomic uint32_t ref_count; /* Number of references to me */
boolean_t active; /* Task has not been terminated */
boolean_t halting; /* Task is being halted */
/* Statistics */
uint64_t total_user_time; /* terminated threads only */
uint64_t total_system_time;
+ uint64_t total_ptime;
/* Virtual timers */
uint32_t vtimers;
void *bsd_info;
#endif
kcdata_descriptor_t corpse_info;
- void * corpse_info_kernel;
+ uint64_t crashed_thread_id;
queue_chain_t corpse_tasks;
#ifdef CONFIG_MACF
struct label * crash_label;
#define TF_CORPSE_FORK 0x00000080 /* task is a forked corpse */
#define TF_LRETURNWAIT 0x00000100 /* task is waiting for fork/posix_spawn/exec to complete */
#define TF_LRETURNWAITER 0x00000200 /* task is waiting for TF_LRETURNWAIT to get cleared */
-
+#define TF_PLATFORM 0x00000400 /* task is a platform binary */
#define task_has_64BitAddr(task) \
(((task)->t_flags & TF_64B_ADDR) != 0)
uint64_t rusage_cpu_perthr_interval; /* Per-thread CPU limit interval */
uint64_t rusage_cpu_deadline;
thread_call_t rusage_cpu_callt;
+#if CONFIG_EMBEDDED
+ queue_head_t task_watchers; /* app state watcher threads */
+ int num_taskwatchers;
+ int watchapplying;
+#endif /* CONFIG_EMBEDDED */
#if CONFIG_ATM
struct atm_task_descriptor *atm_context; /* pointer to per task atm descriptor */
#endif
-#if CONFIG_BANK
struct bank_task *bank_context; /* pointer to per task bank structure */
-#endif
#if IMPORTANCE_INHERITANCE
struct ipc_importance_task *task_imp_base; /* Base of IPC importance chain */
uint64_t task_gpu_ns;
uint64_t task_energy;
+#if MONOTONIC
+ /* Read and written under task_lock */
+ struct mt_task task_monotonic;
+#endif /* MONOTONIC */
+
/* # of purgeable volatile VM objects owned by this task: */
int task_volatile_objects;
/* # of purgeable but not volatile VM objects owned by this task: */
};
#define task_lock(task) lck_mtx_lock(&(task)->lock)
-#define task_lock_assert_owned(task) lck_mtx_assert(&(task)->lock, LCK_MTX_ASSERT_OWNED)
+#define task_lock_assert_owned(task) LCK_MTX_ASSERT(&(task)->lock, LCK_MTX_ASSERT_OWNED)
#define task_lock_try(task) lck_mtx_try_lock(&(task)->lock)
#define task_unlock(task) lck_mtx_unlock(&(task)->lock)
extern uint32_t task_deallocate_internal(task_t task);
#else
#define task_reference_internal(task) \
- (void)hw_atomic_add(&(task)->ref_count, 1)
+ (void)atomic_fetch_add_explicit(&(task)->ref_count, 1, memory_order_relaxed)
#define task_deallocate_internal(task) \
- hw_atomic_sub(&(task)->ref_count, 1)
+ (atomic_fetch_sub_explicit(&task->ref_count, 1, memory_order_release) - 1)
#endif
#define task_reference(task) \
uint32_t procflags,
task_t *child_task); /* OUT */
+extern kern_return_t task_info(
+ task_t task,
+ task_flavor_t flavor,
+ task_info_t task_info_out,
+ mach_msg_type_number_t *task_info_count);
extern void task_power_info_locked(
task_t task,
task_power_info_t info,
- gpu_energy_data_t gpu_energy,
- uint64_t *task_power);
+ gpu_energy_data_t gpu_energy,
+ task_power_info_v2_t infov2);
extern uint64_t task_gpu_utilisation(
task_t task);
extern uint64_t task_energy(
task_t task);
+extern uint64_t task_cpu_ptime(
+ task_t task);
+
extern void task_vtimer_set(
task_t task,
integer_t which);
task_t task,
boolean_t is64bit);
+extern void task_set_platform_binary(
+ task_t task,
+ boolean_t is_platform);
+
extern void task_backing_store_privileged(
task_t task);
task_t task);
extern int get_task_numactivethreads(task_t task);
-extern kern_return_t task_collect_crash_info(task_t task, struct proc *p, int is_corpse_fork);
+
+struct label;
+extern kern_return_t task_collect_crash_info(
+ task_t task,
+#if CONFIG_MACF
+ struct label *crash_label,
+#endif
+ int is_corpse_fork);
void task_port_notify(mach_msg_header_t *msg);
void task_wait_till_threads_terminate_locked(task_t task);
extern uint64_t get_task_compressed(task_t);
extern uint64_t get_task_resident_max(task_t);
extern uint64_t get_task_phys_footprint(task_t);
-extern uint64_t get_task_phys_footprint_max(task_t);
+extern uint64_t get_task_phys_footprint_recent_max(task_t);
+extern uint64_t get_task_phys_footprint_lifetime_max(task_t);
extern uint64_t get_task_phys_footprint_limit(task_t);
extern uint64_t get_task_purgeable_size(task_t);
extern uint64_t get_task_cpu_time(task_t);
extern uint64_t get_task_dispatchqueue_offset(task_t);
extern uint64_t get_task_dispatchqueue_serialno_offset(task_t);
-extern uint64_t get_task_uniqueid(task_t);
+extern uint64_t get_task_uniqueid(task_t task);
+extern int get_task_version(task_t task);
extern uint64_t get_task_internal(task_t);
extern uint64_t get_task_internal_compressed(task_t);
#if CONFIG_SCHED_SFI
int sfi_wait_times[MAX_SFI_CLASS_ID];
#endif /* CONFIG_SCHED_SFI */
-#ifdef CONFIG_BANK
int cpu_time_billed_to_me;
int cpu_time_billed_to_others;
-#endif
int physical_writes;
int logical_writes;
+ int energy_billed_to_me;
+ int energy_billed_to_others;
};
extern struct _task_ledger_indices task_ledgers;
extern boolean_t task_is_exec_copy(task_t);
extern boolean_t task_did_exec(task_t task);
extern boolean_t task_is_active(task_t task);
+extern boolean_t task_is_halting(task_t task);
extern void task_clear_return_wait(task_t task);
extern void task_wait_to_return(void);
extern event_t task_get_return_wait_event(task_t task);
extern queue_head_t * task_io_user_clients(task_t task);
+extern void task_copy_fields_for_exec(task_t dst_task, task_t src_task);
+
#endif /* XNU_KERNEL_PRIVATE */
#ifdef KERNEL_PRIVATE
#if CONFIG_MACF
extern struct label *get_task_crash_label(task_t task);
-extern void set_task_crash_label(task_t task, struct label *label);
#endif /* CONFIG_MACF */
#endif /* KERNEL_PRIVATE */