/* dynamically generated at build time based on syscalls.master */
extern const char *syscallnames[];
+#define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || ((code) == SYS_kdebug_trace64))
+
/*
* Function: unix_syscall
*
/* NOTREACHED */
}
- if (__probable(code != 180)) {
- int *ip = (int *)vt;
+ if (__probable(!code_is_kdebug_trace(code))) {
+ int *ip = (int *)vt;
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
*/
throttle_lowpri_io(1);
}
- if (__probable(code != 180))
+ if (__probable(!code_is_kdebug_trace(code)))
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);
memcpy(vt, args_start_at_rdi ? ®s->rdi : ®s->rsi, args_in_regs * sizeof(syscall_arg_t));
- if (code != 180) {
+ if (!code_is_kdebug_trace(code)) {
uint64_t *ip = (uint64_t *)vt;
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
*/
throttle_lowpri_io(1);
}
- if (__probable(code != 180))
+ if (__probable(!code_is_kdebug_trace(code)))
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);
*/
throttle_lowpri_io(1);
}
- if (code != 180)
+ if (!code_is_kdebug_trace(code))
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);
out_args[0] = in_args[0];
}
+void
+munge_wllll(void *args)
+{
+ volatile uint64_t *out_args = (volatile uint64_t*)args;
+ volatile uint32_t *in_args = (volatile uint32_t*)args;
+
+ out_args[4] = *(uint64_t*)&in_args[7];
+ out_args[3] = *(uint64_t*)&in_args[5];
+ out_args[2] = *(uint64_t*)&in_args[3];
+ out_args[1] = *(uint64_t*)&in_args[1];
+ out_args[0] = in_args[0];
+}
+
void
munge_wllww(void *args)
{
/* XXX should have prototypes, but Mach does not provide one */
void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
int cpu_number(void); /* XXX <machine/...> include path broken */
+void commpage_update_kdebug_enable(void); /* XXX sign */
/* XXX should probably be static, but it's debugging code... */
int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
kdebug_enable |= trace_type;
kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
kd_ctrl_page.enabled = 1;
+ commpage_update_kdebug_enable();
} else {
kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
kd_ctrl_page.enabled = 0;
+ commpage_update_kdebug_enable();
}
lck_spin_unlock(kds_spin_lock);
ml_set_interrupts_enabled(s);
if (kdbp_vict == NULL) {
kdebug_enable = 0;
kd_ctrl_page.enabled = 0;
+ commpage_update_kdebug_enable();
retval = FALSE;
goto out;
}
-void
-kernel_debug_internal(
- uint32_t debugid,
- uintptr_t arg1,
- uintptr_t arg2,
- uintptr_t arg3,
- uintptr_t arg4,
- uintptr_t arg5);
-
-__attribute__((always_inline)) void
+static void
kernel_debug_internal(
uint32_t debugid,
uintptr_t arg1,
}
/*
- * Support syscall SYS_kdebug_trace
+ * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
*/
int
-kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
+kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
+{
+ struct kdebug_trace64_args uap64;
+
+ uap64.code = uap->code;
+ uap64.arg1 = uap->arg1;
+ uap64.arg2 = uap->arg2;
+ uap64.arg3 = uap->arg3;
+ uap64.arg4 = uap->arg4;
+
+ return kdebug_trace64(p, &uap64, retval);
+}
+
+/*
+ * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
+ */
+int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
{
+ uint8_t code_class;
+
+ /*
+ * Not all class are supported for injection from userspace, especially ones used by the core
+ * kernel tracing infrastructure.
+ */
+ code_class = EXTRACT_CLASS(uap->code);
+
+ switch (code_class) {
+ case DBG_TRACE:
+ return EPERM;
+ }
+
if ( __probable(kdebug_enable == 0) )
- return(0);
-
- kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()));
+ return(0);
+
+ kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
return(0);
}
-
static void
kdbg_lock_init(void)
{
*/
kdebug_enable = 0;
kd_ctrl_page.enabled = 0;
+ commpage_update_kdebug_enable();
return;
}
}
kdebug_enable = 0;
kd_ctrl_page.enabled = 0;
+ commpage_update_kdebug_enable();
ctx = vfs_context_kernel();
}
/*
- * Beyond this point, we must be the resolver process.
+ * Beyond this point, we must be the resolver process. We verify this
+ * by confirming the resolver credential and pid.
*/
- if (current_proc()->p_pid != kauth_resolver_identity) {
+ if ((kauth_cred_getuid(kauth_cred_get()) != 0) || (current_proc()->p_pid != kauth_resolver_identity)) {
KAUTH_DEBUG("RESOLVER - call from bogus resolver %d\n", current_proc()->p_pid);
return(EPERM);
}
struct kauth_identity_extlookup extl;
struct kauth_resolver_work *workp;
struct kauth_resolver_work *killp;
- int error, result;
+ int error, result, request_flags;
/*
* Copy in the mesage, including the extension field, since we are
TAILQ_FOREACH(workp, &kauth_resolver_submitted, kr_link) {
/* found it? */
if (workp->kr_seqno == extl.el_seqno) {
+ /*
+ * Take a snapshot of the original request flags.
+ */
+ request_flags = workp->kr_work.el_flags;
/*
* Get the request of the submitted queue so
* issue and is easily detectable by comparing
* time to live on last response vs. time of
* next request in the resolver logs.
+ *
+ * A malicious/faulty resolver could overwrite
+ * part of a user's address space if they return
+ * flags that mismatch the original request's flags.
*/
- if (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) {
+ if ((extl.el_flags & request_flags) & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) {
size_t actual; /* notused */
KAUTH_RESOLVER_UNLOCK();
error = copyinstr(extl.el_extend, CAST_DOWN(void *, workp->kr_extend), MAXPATHLEN, &actual);
KAUTH_RESOLVER_LOCK();
+ } else if (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) {
+ error = EFAULT;
+ KAUTH_DEBUG("RESOLVER - resolver returned mismatching extension flags (%d), request contained (%d)",
+ extl.el_flags, request_flags);
}
/*
* Parameters: uid
*
* Returns: NULL Insufficient memory to satisfy
- * the request
+ * the request or bad parameters
* !NULL A pointer to the allocated
* structure, filled in
*
kip->ki_valid = KI_VALID_UID;
}
if (supgrpcnt) {
+ /*
+ * A malicious/faulty resolver could return bad values
+ */
+ assert(supgrpcnt >= 0);
assert(supgrpcnt <= NGROUPS);
assert(supgrps != NULL);
+
+ if ((supgrpcnt < 0) || (supgrpcnt > NGROUPS) || (supgrps == NULL)) {
+ return NULL;
+ }
if (kip->ki_valid & KI_VALID_GID)
panic("can't allocate kauth identity with both gid and supplementary groups");
kip->ki_supgrpcnt = supgrpcnt;
SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI2, 0, sysctl_cpu_capability, "I", "");
SYSCTL_PROC(_hw_optional, OID_AUTO, rtm, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", "");
SYSCTL_PROC(_hw_optional, OID_AUTO, hle, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", "");
+SYSCTL_PROC(_hw_optional, OID_AUTO, adx, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", "");
#else
#error Unsupported arch
#endif /* !__i386__ && !__x86_64 && !__arm__ && ! __arm64__ */
CS_KILL | CS_EXEC_SET_KILL |
CS_RESTRICT |
CS_REQUIRE_LV |
- CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
+ CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
+ CS_ENTITLEMENTS_VALIDATED;
proc_lock(pt);
if (pt->p_csflags & CS_VALID)
task_t task = current_task();
thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
- return proc_thread_qos_add_override(task, thread, tid, override_qos, first_override_for_resource);
+ return proc_thread_qos_add_override(task, thread, tid, override_qos, first_override_for_resource, USER_ADDR_NULL, THREAD_QOS_OVERRIDE_TYPE_UNKNOWN);
}
static boolean_t proc_usynch_thread_qos_remove_override(struct uthread *uth, uint64_t tid)
task_t task = current_task();
thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
- return proc_thread_qos_remove_override(task, thread, tid);
+ return proc_thread_qos_remove_override(task, thread, tid, USER_ADDR_NULL, THREAD_QOS_OVERRIDE_TYPE_UNKNOWN);
+}
+
+static boolean_t proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type)
+{
+ thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
+
+ return proc_thread_qos_add_override(task, thread, tid, override_qos, first_override_for_resource, resource, resource_type);
+}
+
+static boolean_t proc_usynch_thread_qos_remove_override_for_resource(task_t task, struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
+{
+ thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
+
+ return proc_thread_qos_remove_override(task, thread, tid, resource, resource_type);
+}
+
+static boolean_t proc_usynch_thread_qos_reset_override_for_resource(task_t task, struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
+{
+ thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
+
+ return proc_thread_qos_reset_override(task, thread, tid, resource, resource_type);
}
/* kernel (core) to kext shims */
.proc_usynch_thread_qos_remove_override = proc_usynch_thread_qos_remove_override,
.qos_main_thread_active = qos_main_thread_active,
+
+ .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
+ .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
+ .proc_usynch_thread_qos_reset_override_for_resource = proc_usynch_thread_qos_reset_override_for_resource,
};
pthread_callbacks_t pthread_kern = &pthread_callbacks;
176 AUE_NULL ALL { int nosys(void); } { old add_profil }
177 AUE_NULL ALL { int nosys(void); }
178 AUE_NULL ALL { int nosys(void); }
-179 AUE_NULL ALL { int nosys(void); }
-180 AUE_KDEBUGTRACE ALL { int kdebug_trace(int code, int arg1, int arg2, int arg3, int arg4, int arg5) NO_SYSCALL_STUB; }
+179 AUE_KDEBUGTRACE ALL { int kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) NO_SYSCALL_STUB; }
+180 AUE_KDEBUGTRACE ALL { int kdebug_trace(uint32_t code, u_long arg1, u_long arg2, u_long arg3, u_long arg4) NO_SYSCALL_STUB; }
181 AUE_SETGID ALL { int setgid(gid_t gid); }
182 AUE_SETEGID ALL { int setegid(gid_t egid); }
183 AUE_SETEUID ALL { int seteuid(uid_t euid); }
0x40c02c0 BSC_add_profil
0x40c02c4 BSC_#177
0x40c02c8 BSC_#178
-0x40c02cc BSC_#179
+0x40c02cc BSC_kdebug_trace64
0x40c02d0 BSC_kdebug_trace
0x40c02d4 BSC_setgid
0x40c02d8 BSC_setegid
0x40c0780 BSC_recvmsg_x
0x40c0784 BSC_sendmsg_x
0x40c0788 BSC_thread_selfusage
-0x40c07a4 BSC_mremap_extended
+0x40c07a4 BSC_mremap_encrypted
0x40e0104 BSC_msync_extended_info
0x40e0264 BSC_pread_extended_info
0x40e0268 BSC_pwrite_extended_info
0x5310274 CPUPM_PST_QOS_SWITCH2
0x5310278 CPUPM_PST_UIB
0x531027C CPUPM_PST_PLIMIT_UIB
+0x5310280 CPUPM_IO
0x5330000 HIBERNATE
0x5330004 HIBERNATE_WRITE_IMAGE
0x5330008 HIBERNATE_MACHINE_INIT
#define GID_BIN 7
#define GID_GAMES 13
#define GID_DIALER 68
+#define GID_WINDOWSERVER 88
#endif /* __APPLE_API_PRIVATE */
#endif /* !_MISCFS_DEVFS_DEVFS_H_ */
return (0);
}
+
+#define TMP_IF_PROTO_ARR_SIZE 10
static int
dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
{
- struct ifnet_filter *filter;
+ struct ifnet_filter *filter = NULL;
+ struct if_proto *proto = NULL;
+ int if_proto_count = 0;
+ struct if_proto **tmp_ifproto_arr = NULL;
+ struct if_proto *tmp_ifproto_stack_arr[TMP_IF_PROTO_ARR_SIZE] = {NULL};
+ int tmp_ifproto_arr_idx = 0;
+ bool tmp_malloc = false;
/* Get an io ref count if the interface is attached */
if (!ifnet_is_attached(ifp, 1))
if_flt_monitor_unbusy(ifp);
lck_mtx_unlock(&ifp->if_flt_lock);
+ /*
+ * An embedded tmp_list_entry in if_proto may still get
+ * over-written by another thread after giving up ifnet lock,
+ * therefore we are avoiding embedded pointers here.
+ */
ifnet_lock_shared(ifp);
- if (ifp->if_proto_hash != NULL) {
+ if_proto_count = dlil_ifp_proto_count(ifp);
+ if (if_proto_count) {
int i;
+ VERIFY(ifp->if_proto_hash != NULL);
+ if (if_proto_count <= TMP_IF_PROTO_ARR_SIZE) {
+ tmp_ifproto_arr = tmp_ifproto_stack_arr;
+ } else {
+ MALLOC(tmp_ifproto_arr, struct if_proto **,
+ sizeof (*tmp_ifproto_arr) * if_proto_count,
+ M_TEMP, M_ZERO);
+ if (tmp_ifproto_arr == NULL) {
+ ifnet_lock_done(ifp);
+ goto cleanup;
+ }
+ tmp_malloc = true;
+ }
for (i = 0; i < PROTO_HASH_SLOTS; i++) {
- struct if_proto *proto;
-
SLIST_FOREACH(proto, &ifp->if_proto_hash[i],
next_hash) {
- proto_media_event eventp =
- (proto->proto_kpi == kProtoKPI_v1 ?
- proto->kpi.v1.event :
- proto->kpi.v2.event);
-
- if (eventp != NULL) {
- if_proto_ref(proto);
- ifnet_lock_done(ifp);
-
- eventp(ifp, proto->protocol_family,
- event);
-
- ifnet_lock_shared(ifp);
- if_proto_free(proto);
- }
+ if_proto_ref(proto);
+ tmp_ifproto_arr[tmp_ifproto_arr_idx] = proto;
+ tmp_ifproto_arr_idx++;
}
}
+ VERIFY(if_proto_count == tmp_ifproto_arr_idx);
}
ifnet_lock_done(ifp);
+ for (tmp_ifproto_arr_idx = 0; tmp_ifproto_arr_idx < if_proto_count;
+ tmp_ifproto_arr_idx++) {
+ proto = tmp_ifproto_arr[tmp_ifproto_arr_idx];
+ VERIFY(proto != NULL);
+ proto_media_event eventp =
+ (proto->proto_kpi == kProtoKPI_v1 ?
+ proto->kpi.v1.event :
+ proto->kpi.v2.event);
+
+ if (eventp != NULL) {
+ eventp(ifp, proto->protocol_family,
+ event);
+ }
+ if_proto_free(proto);
+ }
+
+cleanup:
+ if (tmp_malloc) {
+ FREE(tmp_ifproto_arr, M_TEMP);
+ }
+
/* Pass the event to the interface */
if (ifp->if_event != NULL)
ifp->if_event(ifp, event);
/* Release the io ref count */
ifnet_decr_iorefcnt(ifp);
-
done:
return (kev_post_msg(event));
}
uint32_t mpte_thread_active; /* thread is running */
uint32_t mpte_thread_reqs; /* # of requests for thread */
struct mptsub *mpte_active_sub; /* ptr to last active subf */
- uint8_t mpte_flags; /* per mptcp session flags */
- uint8_t mpte_lost_aid; /* storing lost address id */
- uint8_t mpte_addrid_last; /* storing address id parm */
+ uint8_t mpte_flags; /* per mptcp session flags */
+ uint8_t mpte_lost_aid; /* storing lost address id */
+ uint8_t mpte_addrid_last; /* storing address id parm */
};
/*
#define CS_RESTRICT 0x0000800 /* tell dyld to treat restricted */
#define CS_ENFORCEMENT 0x0001000 /* require enforcement */
#define CS_REQUIRE_LV 0x0002000 /* require library validation */
+#define CS_ENTITLEMENTS_VALIDATED 0x0004000
#define CS_ALLOWED_MACHO 0x00ffffe
#include <mach/clock_types.h>
#include <stdint.h>
-#if defined(KERNEL_BUILD)
-#include <kdebug.h>
-#endif /* KERNEL_BUILD */
+
+#ifndef KERNEL
+#include <Availability.h>
+#endif
#ifdef XNU_KERNEL_PRIVATE
#include <stdint.h>
#include <mach/branch_predicates.h>
#endif
+#ifdef KERNEL_PRIVATE
+
typedef enum
{
KD_CALLBACK_KDEBUG_ENABLED, // Trace is now enabled. No arguments
uintptr_t threadid
);
-
-/*
- * state bits for hfs_update event
- */
-#define DBG_HFS_UPDATE_ACCTIME 0x01
-#define DBG_HFS_UPDATE_MODTIME 0x02
-#define DBG_HFS_UPDATE_CHGTIME 0x04
-#define DBG_HFS_UPDATE_MODIFIED 0x08
-#define DBG_HFS_UPDATE_FORCE 0x10
-#define DBG_HFS_UPDATE_DATEADDED 0x20
-
-
-/*
- * types of faults that vm_fault handles
- * and creates trace entries for
- */
-#define DBG_ZERO_FILL_FAULT 1
-#define DBG_PAGEIN_FAULT 2
-#define DBG_COW_FAULT 3
-#define DBG_CACHE_HIT_FAULT 4
-#define DBG_NZF_PAGE_FAULT 5
-#define DBG_GUARD_FAULT 6
-#define DBG_PAGEINV_FAULT 7
-#define DBG_PAGEIND_FAULT 8
-#define DBG_COMPRESSOR_FAULT 9
-#define DBG_COMPRESSOR_SWAPIN_FAULT 10
-
+#endif /* KERNEL_PRIVATE */
/* The debug code consists of the following
*
#define DBG_BANK 40
#define DBG_XPC 41
#define DBG_ATM 42
+#define DBG_ARIADNE 43
+
#define DBG_MIG 255
+#ifdef PRIVATE
+/*
+ * OS components can use the full precision of the "code" field
+ * (Class, SubClass, Code) to inject events using kdebug_trace() by
+ * using:
+ *
+ * kdebug_trace(KDBG_CODE(DBG_XPC, 15, 1) | DBG_FUNC_NONE, 1, 2, 3, 4);
+ *
+ * These trace points can be included in production code, since they
+ * use reserved, non-overlapping ranges. The performance impact when
+ * kernel tracing is not enabled is minimal. Classes can be reserved
+ * by filing a Radar in xnu|all.
+ *
+ * 64-bit arguments may be truncated if the system is using a 32-bit
+ * kernel.
+ *
+ * On error, -1 will be returned and errno will indicate the error.
+ */
+#ifndef KERNEL
+extern int kdebug_trace(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) __OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA);
+#endif
+#endif /* PRIVATE */
+
/* **** The Kernel Debug Sub Classes for Mach (DBG_MACH) **** */
#define DBG_MACH_EXCP_KTRAP_x86 0x02 /* Kernel Traps on x86 */
#define DBG_MACH_EXCP_DFLT 0x03 /* Data Translation Fault */
#define MACH_MULTIQ_GROUP 2
#define MACH_MULTIQ_GLOBAL 3
+/* Arguments for vm_fault (DBG_MACH_VM) */
+#define DBG_ZERO_FILL_FAULT 1
+#define DBG_PAGEIN_FAULT 2
+#define DBG_COW_FAULT 3
+#define DBG_CACHE_HIT_FAULT 4
+#define DBG_NZF_PAGE_FAULT 5
+#define DBG_GUARD_FAULT 6
+#define DBG_PAGEINV_FAULT 7
+#define DBG_PAGEIND_FAULT 8
+#define DBG_COMPRESSOR_FAULT 9
+#define DBG_COMPRESSOR_SWAPIN_FAULT 10
+
/* Codes for IPC (DBG_MACH_IPC) */
#define MACH_TASK_SUSPEND 0x0 /* Suspended a task */
#define MACH_TASK_RESUME 0x1 /* Resumed a task */
#define DBG_THROTTLE 0x11 /* I/O Throttling events */
#define DBG_CONTENT_PROT 0xCF /* Content Protection Events: see bsd/sys/cprotect.h */
+/*
+ * For Kernel Debug Sub Class DBG_HFS, state bits for hfs_update event
+ */
+#define DBG_HFS_UPDATE_ACCTIME 0x01
+#define DBG_HFS_UPDATE_MODTIME 0x02
+#define DBG_HFS_UPDATE_CHGTIME 0x04
+#define DBG_HFS_UPDATE_MODIFIED 0x08
+#define DBG_HFS_UPDATE_FORCE 0x10
+#define DBG_HFS_UPDATE_DATEADDED 0x20
+
/* The Kernel Debug Sub Classes for BSD */
#define DBG_BSD_PROC 0x01 /* process/signals related */
#define DBG_BSD_MEMSTAT 0x02 /* memorystatus / jetsam operations */
#define DYLDDBG_CODE(SubClass,code) KDBG_CODE(DBG_DYLD, SubClass, code)
#define QTDBG_CODE(SubClass,code) KDBG_CODE(DBG_QT, SubClass, code)
#define APPSDBG_CODE(SubClass,code) KDBG_CODE(DBG_APPS, SubClass, code)
+#define ARIADNEDBG_CODE(SubClass, code) KDBG_CODE(DBG_ARIADNE, SubClass, code)
#define CPUPM_CODE(code) IOKDBG_CODE(DBG_IOCPUPM, code)
#define KMEM_ALLOC_CODE MACHDBG_CODE(DBG_MACH_LEAKS, 0)
done
for ver in $(${SDKROOT}/usr/local/libexec/availability.pl --macosx) ; do
- ver_major=${ver%.*}
- ver_minor=${ver#*.}
- value=$(printf "%d%d0" ${ver_major} ${ver_minor})
- str=$(printf "__MAC_%d_%d" ${ver_major} ${ver_minor})
+ set -- $(echo "$ver" | tr '.' ' ')
+ ver_major=$1
+ ver_minor=$2
+ ver_rel=$3
+ if [ -z "$ver_rel" ]; then
+ ver_rel=0
+ fi
+ if [ "$ver_major" -lt 10 -o \( "$ver_major" -eq 10 -a "$ver_minor" -lt 10 \) ]; then
+ value=$(printf "%d%d0" ${ver_major} ${ver_minor})
+ str=$(printf "__MAC_%d_%d" ${ver_major} ${ver_minor})
+ else
+ value=$(printf "%d%02d%02d" ${ver_major} ${ver_minor} ${ver_rel})
+ if [ "$ver_rel" -gt 0 ]; then
+ str=$(printf "__MAC_%d_%d_%d" ${ver_major} ${ver_minor} ${ver_rel})
+ else
+ str=$(printf "__MAC_%d_%d" ${ver_major} ${ver_minor})
+ fi
+ fi
echo "#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= ${value}"
echo "#define __DARWIN_ALIAS_STARTING_MAC_${str}(x) x"
echo "#else"
void munge_wll(void *args);
void munge_wllww(void *args);
void munge_wlll(void *args);
+void munge_wllll(void *args);
void munge_wllwwll(void *args);
void munge_wwwlw(void *args);
void munge_wwwlww(void *args);
kern_return_t (*thread_set_voucher_name)(mach_port_name_t voucher_name);
+ boolean_t (*proc_usynch_thread_qos_add_override_for_resource)(task_t task, struct uthread *, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type);
+ boolean_t (*proc_usynch_thread_qos_remove_override_for_resource)(task_t task, struct uthread *, uint64_t tid, user_addr_t resource, int resource_type);
+ boolean_t (*proc_usynch_thread_qos_reset_override_for_resource)(task_t task, struct uthread *, uint64_t tid, user_addr_t resource, int resource_type);
+
/* padding for future */
- void* _pad[87];
+ void* _pad[84];
} *pthread_callbacks_t;
-14.0.0
+14.1.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
#define kIOPMThermalLevelWarningKey "Thermal_Level_Warning"
/* Thermal Warning Level values
- * kIOPMThermalWarningLevelNormal - under normal operating conditions
- * kIOPMThermalWarningLevelDanger - thermal pressure may cause system slowdown
- * kIOPMThermalWarningLevelCrisis - thermal conditions may cause imminent shutdown
+ * kIOPMThermalLevelNormal - under normal operating conditions
+ * kIOPMThermalLevelDanger - thermal pressure may cause system slowdown
+ * kIOPMThermalLevelCritical - thermal conditions may cause imminent shutdown
*
* The platform may define additional thermal levels if necessary.
+ * Platform specific values are defined from 100 and above
*/
enum {
- kIOPMThermalWarningLevelNormal = 0,
- kIOPMThermalWarningLevelDanger = 5,
- kIOPMThermalWarningLevelCrisis = 10
+ kIOPMThermalLevelNormal = 0,
+ kIOPMThermalLevelDanger = 5,
+ kIOPMThermalLevelCritical = 10,
+
+ kIOPMThermalLevelWarning = 100,
+ kIOPMThermalLevelTrap = 110,
+
+ kIOPMThermalLevelUnknown = 255,
};
+#define kIOPMThermalWarningLevelNormal kIOPMThermalLevelNormal
+#define kIOPMThermalWarningLevelDanger kIOPMThermalLevelWarning
+#define kIOPMThermalWarningLevelCrisis kIOPMThermalLevelCritical
// PM Settings Controller setting types
// Settings types used primarily with:
kIOPMSleepFactorExternalDisplay = 0x00080000ULL,
kIOPMSleepFactorNetworkKeepAliveActive = 0x00100000ULL,
kIOPMSleepFactorLocalUserActivity = 0x00200000ULL,
- kIOPMSleepFactorHibernateFailed = 0x00400000ULL
+ kIOPMSleepFactorHibernateFailed = 0x00400000ULL,
+ kIOPMSleepFactorThermalWarning = 0x00800000ULL,
+ kIOPMSleepFactorDisplayCaptured = 0x01000000ULL
};
// System Sleep Types
#define kUserWkCntChID IOREPORT_MAKEID('D','r','k','W','k','C','n','t')
+/* Sleep Options/settings */
+#define kSleepOptionDisplayCapturedModeKey "DisplayCapturedMode"
+
#if defined(KERNEL) && defined(__cplusplus)
#endif /* KERNEL */
+/*****************************************************************************
+ *
+ * Performance Warning
+ *
+ *****************************************************************************/
+
+/* Performance Warning Key
+ * Key for performance warning event published using IOPMrootDomain::
+ * systemPowerEventOccurred()
+ */
+#define kIOPMPerformanceWarningKey "Performance_Warning"
+
+/* Performance warning values */
+enum {
+ kIOPMPerformanceNormal = 0,
+ kIOPMPerformanceWarning = 100
+};
+
#endif /* ! _IOKIT_IOPMPRIVATE_H */
unsigned int displayIdleForDemandSleep :1;
unsigned int darkWakeHibernateError :1;
+ unsigned int thermalWarningState:1;
uint32_t hibernateMode;
AbsoluteTime userActivityTime;
void acceptSystemWakeEvents( bool accept );
void systemDidNotSleep( void );
void preventTransitionToUserActive( bool prevent );
+ void setThermalState(OSObject *value);
#endif /* XNU_KERNEL_PRIVATE */
};
if (dataQueue == 0) {
return false;
}
+ bzero(dataQueue, allocSize);
dataQueue->queueSize = size;
- dataQueue->head = 0;
- dataQueue->tail = 0;
+// dataQueue->head = 0;
+// dataQueue->tail = 0;
if (!notifyMsg) {
notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
if (kIODefaultCache != cacheMode)
{
- // VM system requires write access to change cache mode
- prot |= VM_PROT_WRITE;
- // update named entries cache mode
- memEntryCacheMode = (MAP_MEM_ONLY | prot | vmProtForCacheMode(cacheMode));
+ // VM system requires write access to update named entry cache mode
+ memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
}
if (_task)
#else
LOG("System Sleep\n");
#endif
-
+ if (thermalWarningState) {
+ const OSSymbol *event = OSSymbol::withCString(kIOPMThermalLevelWarningKey);
+ if (event) {
+ systemPowerEventOccurred(event, kIOPMThermalLevelUnknown);
+ event->release();
+ }
+ }
((IOService *)this)->stop_watchdog_timer(); //14456299
getPlatform()->sleepKernel();
userWasActive = false;
fullWakeReason = kFullWakeReasonNone;
+
OSString * wakeType = OSDynamicCast(
OSString, getProperty(kIOPMRootDomainWakeTypeKey));
OSString * wakeReason = OSDynamicCast(
currentFactors |= kIOPMSleepFactorLocalUserActivity;
if (darkWakeHibernateError && !CAP_HIGHEST(kIOPMSystemCapabilityGraphics))
currentFactors |= kIOPMSleepFactorHibernateFailed;
+ if (thermalWarningState)
+ currentFactors |= kIOPMSleepFactorThermalWarning;
DLOG("sleep factors 0x%llx\n", currentFactors);
{
obj = copyProperty(key);
}
- if (obj && (num = OSDynamicCast(OSNumber, obj)))
+ if (obj)
{
- *option = num->unsigned32BitValue();
- ok = true;
+ if ((num = OSDynamicCast(OSNumber, obj)))
+ {
+ *option = num->unsigned32BitValue();
+ ok = true;
+ }
+ else if (OSDynamicCast(OSBoolean, obj))
+ {
+ *option = (obj == kOSBooleanTrue) ? 1 : 0;
+ ok = true;
+ }
}
if (obj)
break;
#endif
- if (lowBatteryCondition)
+ if (lowBatteryCondition || thermalWarningState)
{
- break; // always sleep on low battery
+ break; // always sleep on low battery or when in thermal warning state
}
if (sleepReason == kIOPMSleepReasonDarkWakeThermalEmergency)
bool IOPMrootDomain::checkSystemCanSustainFullWake( void )
{
#if !NO_KERNEL_HID
- if (lowBatteryCondition)
+ if (lowBatteryCondition || thermalWarningState)
{
// Low battery wake, or received a low battery notification
// while system is awake. This condition will persist until
return attempt;
}
+void IOPMrootDomain::setThermalState(OSObject *value)
+{
+ OSNumber * num;
+
+ if (gIOPMWorkLoop->inGate() == false) {
+ gIOPMWorkLoop->runAction(
+ OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setThermalState),
+ (OSObject *)this,
+ (void *)value);
+
+ return;
+ }
+ if (value && (num = OSDynamicCast(OSNumber, value))) {
+ thermalWarningState = ((num->unsigned32BitValue() == kIOPMThermalLevelWarning) ||
+ (num->unsigned32BitValue() == kIOPMThermalLevelTrap)) ? 1 : 0;
+ }
+}
+
IOReturn IOPMrootDomain::systemPowerEventOccurred(
const OSSymbol *event,
OSObject *value)
// UNLOCK
if (featuresDictLock) IOLockUnlock(featuresDictLock);
- if (shouldUpdate)
+ if (shouldUpdate) {
+ if (event &&
+ event->isEqualTo(kIOPMThermalLevelWarningKey)) {
+ setThermalState(value);
+ }
messageClients (kIOPMMessageSystemPowerEventOccurred, (void *)NULL);
+ }
return kIOReturnSuccess;
}
if (dataQueue == 0) {
return false;
}
+ bzero(dataQueue, allocSize);
dataQueue->queueSize = size;
- dataQueue->head = 0;
- dataQueue->tail = 0;
+// dataQueue->head = 0;
+// dataQueue->tail = 0;
if (!setQueueSize(size)) {
return false;
appendix = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
appendix->version = 0;
- notifyMsg = &(appendix->msgh);
+
+ if (!notifyMsg) {
+ notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
+ if (!notifyMsg)
+ return false;
+ }
+ bzero(notifyMsg, sizeof(mach_msg_header_t));
+
setNotificationPort(MACH_PORT_NULL);
return true;
if (dataQueue) {
IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
dataQueue = NULL;
+ if (notifyMsg) {
+ IOFree(notifyMsg, sizeof(mach_msg_header_t));
+ notifyMsg = NULL;
+ }
}
if (_reserved) {
return false;
}
// Check for underflow of (getQueueSize() - tail)
- if (getQueueSize() < tail) {
+ if (getQueueSize() < tail || getQueueSize() < head) {
return false;
}
kernel_mach_header_t *kext_mach_hdr = (kernel_mach_header_t *)
linkedExecutable->getBytesNoCopy();
+#if !SECURE_KERNEL
if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey)) {
kernel_mach_header_t * temp_kext_mach_hdr;
struct load_command * lcp;
__FUNCTION__, segp->segname, segp->vmaddr,
VM_KERNEL_UNSLIDE(segp->vmaddr),
segp->vmsize, segp->nsects);
+ if ( (VM_KERNEL_IS_SLID(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_KEXT(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_PRELINKTEXT(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_PRELINKINFO(segp->vmaddr) == false) &&
+ (VM_KERNEL_IS_KEXT_LINKEDIT(segp->vmaddr) == false) ) {
+ OSKextLog(/* kext */ NULL,
+ kOSKextLogErrorLevel |
+ kOSKextLogGeneralFlag,
+ "%s: not in kext range - vmaddr 0x%llX vm_kext_base 0x%lX vm_kext_top 0x%lX",
+ __FUNCTION__, segp->vmaddr, vm_kext_base, vm_kext_top);
+ }
#endif
segp->vmaddr = VM_KERNEL_UNSLIDE(segp->vmaddr);
}
result->setObject(kOSBundleMachOHeadersKey, headerData);
}
+#endif // SECURE_KERNEL
if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey)) {
cpuTypeNumber = OSNumber::withNumber(
9002401118FC9A7F00D73BFA /* rename_ext.c in Sources */ = {isa = PBXBuildFile; fileRef = 906AA2D018F74CD1001C681A /* rename_ext.c */; };
A59CB95616669EFB00B064B3 /* stack_logging_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = A59CB95516669DB700B064B3 /* stack_logging_internal.h */; };
A59CB9581666A1A200B064B3 /* munmap.c in Sources */ = {isa = PBXBuildFile; fileRef = A59CB9571666A1A200B064B3 /* munmap.c */; };
+ BA0D9FB1199031AD007E8A73 /* kdebug_trace.c in Sources */ = {isa = PBXBuildFile; fileRef = BA0D9FB0199031AD007E8A73 /* kdebug_trace.c */; };
BA4414AA18336A5F00AAE813 /* mach in CopyFiles */ = {isa = PBXBuildFile; fileRef = BA4414A51833697C00AAE813 /* mach */; };
BA4414AB18336A6400AAE813 /* servers in CopyFiles */ = {isa = PBXBuildFile; fileRef = BA4414A6183369A100AAE813 /* servers */; };
BA4414AD18336A9300AAE813 /* mach in CopyFiles */ = {isa = PBXBuildFile; fileRef = BA4414A7183369C100AAE813 /* mach */; };
906AA2D018F74CD1001C681A /* rename_ext.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rename_ext.c; sourceTree = "<group>"; };
A59CB95516669DB700B064B3 /* stack_logging_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stack_logging_internal.h; sourceTree = "<group>"; };
A59CB9571666A1A200B064B3 /* munmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = munmap.c; sourceTree = "<group>"; };
+ BA0D9FB0199031AD007E8A73 /* kdebug_trace.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = kdebug_trace.c; sourceTree = "<group>"; };
BA4414A51833697C00AAE813 /* mach */ = {isa = PBXFileReference; lastKnownFileType = text; name = mach; path = mig_hdr/include/mach; sourceTree = BUILT_PRODUCTS_DIR; };
BA4414A6183369A100AAE813 /* servers */ = {isa = PBXFileReference; lastKnownFileType = text; name = servers; path = mig_hdr/include/servers; sourceTree = BUILT_PRODUCTS_DIR; };
BA4414A7183369C100AAE813 /* mach */ = {isa = PBXFileReference; lastKnownFileType = text; name = mach; path = mig_hdr/local/include/mach; sourceTree = BUILT_PRODUCTS_DIR; };
248AA962122C7B2A0085F5B1 /* unlink.c */,
29A59AE5183B110C00E8B896 /* unlinkat.c */,
374A36E214748EE400AAF39D /* varargs_wrappers.s */,
+ BA0D9FB0199031AD007E8A73 /* kdebug_trace.c */,
);
path = wrappers;
sourceTree = "<group>";
248BA08F121DC545008C073F /* open.c in Sources */,
248BA093121DE369008C073F /* select.c in Sources */,
248BA095121DE565008C073F /* select-pre1050.c in Sources */,
+ BA0D9FB1199031AD007E8A73 /* kdebug_trace.c in Sources */,
4BDD5F1E1891AB2F004BF300 /* mach_approximate_time.s in Sources */,
248BA0B3121DE760008C073F /* select-cancel.c in Sources */,
248BA0BE121DE902008C073F /* select.c in Sources */,
+++ /dev/null
-*.pbxuser
-*.perspectivev3
-build/
--- /dev/null
+/*
+ * Copyright (c) 2014 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdint.h>
+#include <machine/cpu_capabilities.h>
+#include <sys/kdebug.h>
+#include <sys/errno.h>
+
+#define CLASS_MASK 0xff000000
+#define CLASS_OFFSET 24
+#define SUBCLASS_MASK 0x00ff0000
+#define SUBCLASS_OFFSET 16
+
+#define EXTRACT_CLASS(debugid) ((uint8_t)(((debugid) & CLASS_MASK) >> CLASS_OFFSET))
+#define EXTRACT_SUBCLASS(debugid) ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
+
+extern int __kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4);
+
+int
+kdebug_trace(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4)
+{
+ uint8_t code_class;
+ volatile uint32_t *kdebug_enable_address = (volatile uint32_t *)(uintptr_t)(_COMM_PAGE_KDEBUG_ENABLE);
+
+ /*
+ * This filtering is also done in the kernel, but we also do it here so that errors
+ * are returned in all cases, not just when the system call is actually performed.
+ */
+ code_class = EXTRACT_CLASS(code);
+ switch (code_class) {
+ case DBG_TRACE:
+ errno = EPERM;
+ return -1;
+ }
+
+ if (*kdebug_enable_address == 0) {
+ return 0;
+ }
+
+ return __kdebug_trace64(code, arg1, arg2, arg3, arg4);
+}
uint64_t cpu_insns;
uint64_t cpu_ucc;
uint64_t cpu_urc;
+#if DIAG_ALL_PMCS
+ uint64_t gpmcs[4];
+#endif /* DIAG_ALL_PMCS */
} core_energy_stat_t;
typedef struct {
cest.cpu_insns = cpu_data_ptr[i]->cpu_cur_insns;
cest.cpu_ucc = cpu_data_ptr[i]->cpu_cur_ucc;
cest.cpu_urc = cpu_data_ptr[i]->cpu_cur_urc;
+#if DIAG_ALL_PMCS
+ bcopy(&cpu_data_ptr[i]->cpu_gpmcs[0], &cest.gpmcs[0], sizeof(cest.gpmcs));
+#endif /* DIAG_ALL_PMCS */
(void) ml_set_interrupts_enabled(TRUE);
copyout(&cest, curpos, sizeof(cest));
uint64_t insns = read_pmc(FIXED_PMC0);
uint64_t ucc = read_pmc(FIXED_PMC1);
uint64_t urc = read_pmc(FIXED_PMC2);
+#if DIAG_ALL_PMCS
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ cdp->cpu_gpmcs[i] = read_pmc(i);
+ }
+#endif /* DIAG_ALL_PMCS */
cdp->cpu_cur_insns = insns;
cdp->cpu_cur_ucc = ucc;
cdp->cpu_cur_urc = urc;
#define FIXED_PMC0 (FIXED_PMC)
#define FIXED_PMC1 (FIXED_PMC | 1)
#define FIXED_PMC2 (FIXED_PMC | 2)
-
+#define GPMC0 (0)
+#define GPMC1 (1)
+#define GPMC2 (2)
+#define GPMC3 (3)
+
static inline uint64_t read_pmc(uint32_t counter)
{
uint32_t lo = 0, hi = 0;
#include <kern/page_decrypt.h>
#include <kern/processor.h>
+#include <sys/kdebug.h>
+
/* the lists of commpage routines are in commpage_asm.s */
extern commpage_descriptor* commpage_32_routines[];
extern commpage_descriptor* commpage_64_routines[];
CPUID_LEAF7_FEATURE_HLE);
setif(bits, kHasAVX2_0, cpuid_leaf7_features() &
CPUID_LEAF7_FEATURE_AVX2);
+ setif(bits, kHasRDSEED, cpuid_features() &
+ CPUID_LEAF7_FEATURE_RDSEED);
+ setif(bits, kHasADX, cpuid_features() &
+ CPUID_LEAF7_FEATURE_ADX);
uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
simple_lock_init(&commpage_active_cpus_lock, 0);
commpage_update_active_cpus();
- commpage_mach_approximate_time_init();
+ commpage_mach_approximate_time_init();
rtc_nanotime_init_commpage();
+ commpage_update_kdebug_enable();
}
/* Fill in the common routines during kernel initialization.
simple_unlock(&commpage_active_cpus_lock);
}
+/*
+ * Update the commpage data with the value of the "kdebug_enable"
+ * global so that userspace can avoid trapping into the kernel
+ * for kdebug_trace() calls. Serialization is handled
+ * by the caller in bsd/kern/kdebug.c.
+ */
+void
+commpage_update_kdebug_enable(void)
+{
+ volatile uint32_t *saved_data_ptr;
+ char *cp;
+
+ cp = commPagePtr32;
+ if (cp) {
+ cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_BASE_ADDRESS);
+ saved_data_ptr = (volatile uint32_t *)cp;
+ *saved_data_ptr = kdebug_enable;
+ }
+
+ cp = commPagePtr64;
+ if ( cp ) {
+ cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_START_ADDRESS);
+ saved_data_ptr = (volatile uint32_t *)cp;
+ *saved_data_ptr = kdebug_enable;
+ }
+}
+
+
/*
* update the commpage data for last known value of mach_absolute_time()
*/
{
#ifdef CONFIG_MACH_APPROXIMATE_TIME
uint64_t saved_data;
- char *cp;
-
- cp = commPagePtr32;
+ char *cp;
+
+ cp = commPagePtr32;
if ( cp ) {
- cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS);
+ cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS);
saved_data = *(uint64_t *)cp;
if (saved_data < abstime) {
/* ignoring the success/fail return value assuming that
OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp);
}
}
- cp = commPagePtr64;
+ cp = commPagePtr64;
if ( cp ) {
- cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS);
+ cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS);
saved_data = *(uint64_t *)cp;
if (saved_data < abstime) {
/* ignoring the success/fail return value assuming that
extern void commpage_sched_gen_inc(void);
extern void commpage_update_active_cpus(void);
extern void commpage_update_mach_approximate_time(uint64_t abstime);
+extern void commpage_update_kdebug_enable(void);
extern uint32_t commpage_is_in_pfz32(uint32_t);
extern uint32_t commpage_is_in_pfz64(addr64_t);
/* Extending into 64-bits from here: */
#define kHasRTM 0x0000000100000000ULL
#define kHasHLE 0x0000000200000000ULL
+#define kHasRDSEED 0x0000000800000000ULL
+#define kHasADX 0x0000000400000000ULL
#ifndef __ASSEMBLER__
#define _COMM_PAGE_MEMORY_SIZE (_COMM_PAGE_START_ADDRESS+0x038) /* uint64_t max memory size */
#define _COMM_PAGE_CPUFAMILY (_COMM_PAGE_START_ADDRESS+0x040) /* uint32_t hw.cpufamily, x86*/
-#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_START_ADDRESS+0x044) /* [0x44,0x50) unused */
+#define _COMM_PAGE_KDEBUG_ENABLE (_COMM_PAGE_START_ADDRESS+0x044) /* uint32_t export "kdebug_enable" to userspace */
+#define _COMM_PAGE_UNUSED2 (_COMM_PAGE_START_ADDRESS+0x048) /* [0x48,0x50) unused */
#define _COMM_PAGE_TIME_DATA_START (_COMM_PAGE_START_ADDRESS+0x050) /* base of offsets below (_NT_SCALE etc) */
#define _COMM_PAGE_NT_TSC_BASE (_COMM_PAGE_START_ADDRESS+0x050) /* used by nanotime() */
uint64_t cpu_cur_insns;
uint64_t cpu_cur_ucc;
uint64_t cpu_cur_urc;
+ uint64_t cpu_gpmcs[4];
uint64_t cpu_max_observed_int_latency;
int cpu_max_observed_int_latency_vector;
volatile boolean_t cpu_NMI_acknowledged;
* Leaf7 Features:
*/
cpuid_fn(0x7, reg);
- info_p->cpuid_leaf7_features = reg[ebx];
+ info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
DBG(" Feature Leaf7:\n");
DBG(" EBX : 0x%x\n", reg[ebx]);
+ DBG(" ECX : 0x%x\n", reg[ecx]);
}
return;
cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
break;
case CPUID_MODEL_HASWELL:
+ case CPUID_MODEL_HASWELL_EP:
case CPUID_MODEL_HASWELL_ULT:
case CPUID_MODEL_CRYSTALWELL:
cpufamily = CPUFAMILY_INTEL_HASWELL;
break;
+#if !defined(XNU_HIDE_SEED)
+ case CPUID_MODEL_BROADWELL:
+ case CPUID_MODEL_BRYSTALWELL:
+ cpufamily = CPUFAMILY_INTEL_BROADWELL;
+ break;
+#endif /* not XNU_HIDE_SEED */
}
break;
}
* (which determines whether SMT/Hyperthreading is active).
*/
switch (info_p->cpuid_cpufamily) {
+ case CPUFAMILY_INTEL_MEROM:
+ case CPUFAMILY_INTEL_PENRYN:
+ info_p->core_count = info_p->cpuid_cores_per_package;
+ info_p->thread_count = info_p->cpuid_logical_per_package;
+ break;
case CPUFAMILY_INTEL_WESTMERE: {
uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
info_p->core_count = bitfield32((uint32_t)msr, 19, 16);
info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
break;
}
- case CPUFAMILY_INTEL_HASWELL:
- case CPUFAMILY_INTEL_IVYBRIDGE:
- case CPUFAMILY_INTEL_SANDYBRIDGE:
- case CPUFAMILY_INTEL_NEHALEM: {
+ default: {
uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
info_p->core_count = bitfield32((uint32_t)msr, 31, 16);
info_p->thread_count = bitfield32((uint32_t)msr, 15, 0);
{CPUID_LEAF7_FEATURE_BMI2, "BMI2"},
{CPUID_LEAF7_FEATURE_INVPCID, "INVPCID"},
{CPUID_LEAF7_FEATURE_RTM, "RTM"},
+ {CPUID_LEAF7_FEATURE_RDSEED, "RDSEED"},
+ {CPUID_LEAF7_FEATURE_ADX, "ADX"},
+#if !defined(XNU_HIDE_SEED)
+ {CPUID_LEAF7_FEATURE_SMAP, "SMAP"},
+#endif /* not XNU_HIDE_SEED */
{0, 0}
};
/*
* Leaf 7, subleaf 0 additional features.
- * Bits returned in %ebx to a CPUID request with {%eax,%ecx} of (0x7,0x0}:
+ * Bits returned in %ebx:%ecx to a CPUID request with {%eax,%ecx} of (0x7,0x0}:
*/
#define CPUID_LEAF7_FEATURE_RDWRFSGS _Bit(0) /* FS/GS base read/write */
#define CPUID_LEAF7_FEATURE_TSCOFF _Bit(1) /* TSC thread offset */
#define CPUID_LEAF7_FEATURE_BMI2 _Bit(8) /* Bit Manipulation Instrs, set 2 */
#define CPUID_LEAF7_FEATURE_ERMS _Bit(9) /* Enhanced Rep Movsb/Stosb */
#define CPUID_LEAF7_FEATURE_INVPCID _Bit(10) /* INVPCID intruction, TDB */
-#define CPUID_LEAF7_FEATURE_RTM _Bit(11) /* TBD */
+#define CPUID_LEAF7_FEATURE_RTM _Bit(11) /* RTM */
+#define CPUID_LEAF7_FEATURE_RDSEED _Bit(18) /* RDSEED Instruction */
+#define CPUID_LEAF7_FEATURE_ADX _Bit(19) /* ADX Instructions */
+#if !defined(XNU_HIDE_SEED)
+#define CPUID_LEAF7_FEATURE_SMAP _Bit(20) /* Supervisor Mode Access Protect */
+#endif /* not XNU_HIDE_SEED */
/*
* The CPUID_EXTFEATURE_XXX values define 64-bit values
#define CPUID_MODEL_IVYBRIDGE_EP 0x3E
#define CPUID_MODEL_CRYSTALWELL 0x46
#define CPUID_MODEL_HASWELL 0x3C
-#define CPUID_MODEL_HASWELL_SVR 0x3F
+#define CPUID_MODEL_HASWELL_EP 0x3F
#define CPUID_MODEL_HASWELL_ULT 0x45
+#if !defined(XNU_HIDE_SEED)
+#define CPUID_MODEL_BROADWELL 0x3D
+#define CPUID_MODEL_BROADWELL_ULX 0x3D
+#define CPUID_MODEL_BROADWELL_ULT 0x3D
+#define CPUID_MODEL_BRYSTALWELL 0x47
+#endif /* not XNU_HIDE_SEED */
#define CPUID_VMM_FAMILY_UNKNOWN 0x0
#define CPUID_VMM_FAMILY_VMWARE 0x1
cpuid_thermal_leaf_t *cpuid_thermal_leafp;
cpuid_arch_perf_leaf_t *cpuid_arch_perf_leafp;
cpuid_xsave_leaf_t *cpuid_xsave_leafp;
- uint32_t cpuid_leaf7_features;
+ uint64_t cpuid_leaf7_features;
} i386_cpu_info_t;
#ifdef MACH_KERNEL_PRIVATE
if (xsp->extended_state[0] & (uint32_t)XFEM_YMM) {
assert(xsp->extended_state[0] & (uint32_t) XFEM_SSE);
/* XSAVE container size for all features */
- assert(xsp->extended_state[2] == sizeof(struct x86_avx_thread_state));
+ if (xsp->extended_state[2] != sizeof(struct x86_avx_thread_state))
+ kprintf("sizeof(struct x86_avx_thread_state)=%lu != xsp->extended_state[2]=%u\n",
+ sizeof(struct x86_avx_thread_state), xsp->extended_state[2]);
fp_register_state_size = sizeof(struct x86_avx_thread_state);
fpu_YMM_present = TRUE;
set_cr4(get_cr4() | CR4_OSXSAVE);
vm_offset_t vm_kernel_stext;
vm_offset_t vm_kernel_etext;
vm_offset_t vm_kernel_slide;
-vm_offset_t vm_hib_base;
+vm_offset_t vm_hib_base;
vm_offset_t vm_kext_base = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
vm_offset_t vm_kext_top = VM_MIN_KERNEL_ADDRESS;
+vm_offset_t vm_prelink_stext;
+vm_offset_t vm_prelink_etext;
+vm_offset_t vm_prelink_sinfo;
+vm_offset_t vm_prelink_einfo;
+vm_offset_t vm_slinkedit;
+vm_offset_t vm_elinkedit;
+
#define MAXLORESERVE (32 * 1024 * 1024)
ppnum_t max_ppnum = 0;
vm_offset_t segDATAB; unsigned long segSizeDATA;
vm_offset_t segLINKB; unsigned long segSizeLINK;
vm_offset_t segPRELINKB; unsigned long segSizePRELINK;
+vm_offset_t segPRELINKINFOB; unsigned long segSizePRELINKINFO;
vm_offset_t segHIBB; unsigned long segSizeHIB;
vm_offset_t sectCONSTB; unsigned long sectSizeConst;
"__HIB", &segSizeHIB);
segPRELINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
"__PRELINK_TEXT", &segSizePRELINK);
+ segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
+ "__PRELINK_INFO", &segSizePRELINKINFO);
segTEXT = getsegbynamefromheader(&_mh_execute_header,
"__TEXT");
segDATA = getsegbynamefromheader(&_mh_execute_header,
DBG("segLINKB = %p\n", (void *) segLINKB);
DBG("segHIBB = %p\n", (void *) segHIBB);
DBG("segPRELINKB = %p\n", (void *) segPRELINKB);
+ DBG("segPRELINKINFOB = %p\n", (void *) segPRELINKINFOB);
DBG("sHIB = %p\n", (void *) sHIB);
DBG("eHIB = %p\n", (void *) eHIB);
DBG("stext = %p\n", (void *) stext);
vm_kernel_top = (vm_offset_t) &last_kernel_symbol;
vm_kernel_stext = stext;
vm_kernel_etext = etext;
+ vm_prelink_stext = segPRELINKB;
+ vm_prelink_etext = segPRELINKB + segSizePRELINK;
+ vm_prelink_sinfo = segPRELINKINFOB;
+ vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
+ vm_slinkedit = segLINKB;
+ vm_elinkedit = segLINKB + segSizePRELINK;
vm_set_page_size();
/*
* CR4
*/
+#define CR4_SMAP 0x00200000 /* Supervisor-Mode Access Protect */
#define CR4_SMEP 0x00100000 /* Supervisor-Mode Execute Protect */
#define CR4_OSXSAVE 0x00040000 /* OS supports XSAVE */
#define CR4_PCIDE 0x00020000 /* PCID Enable */
/*
* XCR0 - XFEATURE_ENABLED_MASK (a.k.a. XFEM) register
*/
-#define XCR0_YMM 0x0000000000000004ULL /* YMM state available */
-#define XFEM_YMM XCR0_YMM
-#define XCR0_SSE 0x0000000000000002ULL /* SSE supported by XSAVE/XRESTORE */
-#define XCR0_X87 0x0000000000000001ULL /* x87, FPU/MMX (always set) */
-#define XFEM_SSE XCR0_SSE
-#define XFEM_X87 XCR0_X87
+#define XCR0_X87 (1ULL << 0) /* x87, FPU/MMX (always set) */
+#define XCR0_SSE (1ULL << 1) /* SSE supported by XSAVE/XRESTORE */
+#define XCR0_YMM (1ULL << 2) /* YMM state available */
+#define XFEM_X87 XCR0_X87
+#define XFEM_SSE XCR0_SSE
+#define XFEM_YMM XCR0_YMM
#define XCR0 (0)
#define PMAP_PCID_PRESERVE (1ULL << 63)
#define MSR_IA32_PERFCTR0 0xc1
#define MSR_IA32_PERFCTR1 0xc2
+#define MSR_IA32_PERFCTR3 0xc3
+#define MSR_IA32_PERFCTR4 0xc4
#define MSR_PLATFORM_INFO 0xce
#define MSR_IA32_EVNTSEL0 0x186
#define MSR_IA32_EVNTSEL1 0x187
+#define MSR_IA32_EVNTSEL2 0x188
+#define MSR_IA32_EVNTSEL3 0x189
#define MSR_FLEX_RATIO 0x194
#define MSR_IA32_PERF_STS 0x198
#include <kern/mach_param.h> /* for TASK_CHUNK */
#include <kern/task.h>
#include <kern/zalloc.h>
+#include <kern/sfi.h>
#include <libkern/OSAtomic.h>
unsigned int terminated : 1; /* coalition became empty and spawns are now forbidden */
unsigned int reaped : 1; /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
unsigned int notified : 1; /* no-more-processes notification was sent via special port */
+
+ uint32_t focal_tasks_count; /* count of TASK_FOREGROUND_APPLICATION tasks in the coalition */
+ uint32_t non_focal_tasks_count; /* count of TASK_BACKGROUND_APPLICATION tasks in the coalition */
};
#define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
assert(coal->terminated);
assert(coal->active_count == 0);
assert(coal->reaped);
+ assert(coal->focal_tasks_count == 0);
+ assert(coal->non_focal_tasks_count == 0);
ledger_dereference(coal->ledger);
lck_mtx_destroy(&coal->lock, &coalitions_lck_grp);
/* "Leak" our reference to the global object */
}
+/* coalition focal tasks */
+uint32_t coalition_adjust_focal_task_count(coalition_t coal, int count)
+{
+ return hw_atomic_add(&coal->focal_tasks_count, count);
+}
+
+uint32_t coalition_focal_task_count(coalition_t coal)
+{
+ return coal->focal_tasks_count;
+}
+
+uint32_t coalition_adjust_non_focal_task_count(coalition_t coal, int count)
+{
+ return hw_atomic_add(&coal->non_focal_tasks_count, count);
+}
+
+uint32_t coalition_non_focal_task_count(coalition_t coal)
+{
+ return coal->non_focal_tasks_count;
+}
+
+/* Call sfi_reevaluate() for every thread in the coalition */
+void coalition_sfi_reevaluate(coalition_t coal, task_t updated_task) {
+ task_t task;
+ thread_t thread;
+
+ coalition_lock(coal);
+
+ queue_iterate(&coal->tasks, task, task_t, coalition_tasks) {
+
+ /* Skip the task we're doing this on behalf of - it's already updated */
+ if (task == updated_task)
+ continue;
+
+ task_lock(task);
+
+ queue_iterate(&task->threads, thread, thread_t, task_threads) {
+ sfi_reevaluate(thread);
+ }
+ task_unlock(task);
+ }
+ coalition_unlock(coal);
+}
+
ledger_t coalition_get_ledger(coalition_t coal);
+uint32_t coalition_adjust_focal_task_count(coalition_t coal, int count);
+uint32_t coalition_focal_task_count(coalition_t coal);
+uint32_t coalition_adjust_non_focal_task_count(coalition_t coal, int count);
+uint32_t coalition_non_focal_task_count(coalition_t coal);
+
+void coalition_sfi_reevaluate(coalition_t coal, task_t updated_task);
+
#endif /* XNU_KERNEL_PRIVATE */
#endif /* _KERN_COALITION_H */
#include <kern/timer_call.h>
#include <kern/wait_queue.h>
#include <kern/ledger.h>
+#include <kern/coalition.h>
+
#include <pexpert/pexpert.h>
#include <libkern/kernel_mach_header.h>
int thread_bg = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG);
int managed_task = proc_get_effective_task_policy(task, TASK_POLICY_SFI_MANAGED);
int thread_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
+ boolean_t focal = FALSE;
/* kernel threads never reach the user AST boundary, and are in a separate world for SFI */
if (is_kernel_thread) {
}
/*
- * Threads with unspecified or legacy QOS class can be individually managed
+ * Threads with unspecified, legacy, or user-initiated QOS class can be individually managed.
*/
- if (managed_task &&
- (thread_qos == THREAD_QOS_UNSPECIFIED || thread_qos == THREAD_QOS_LEGACY)) {
- if (task_role == TASK_FOREGROUND_APPLICATION || task_role == TASK_CONTROL_APPLICATION)
- return SFI_CLASS_MANAGED_FOCAL;
- else
- return SFI_CLASS_MANAGED_NONFOCAL;
+
+ switch (task_role) {
+ case TASK_CONTROL_APPLICATION:
+ case TASK_FOREGROUND_APPLICATION:
+ focal = TRUE;
+ break;
+
+ case TASK_BACKGROUND_APPLICATION:
+ case TASK_DEFAULT_APPLICATION:
+ case TASK_UNSPECIFIED:
+ /* Focal if in coalition with foreground app */
+ if (coalition_focal_task_count(thread->task->coalition) > 0)
+ focal = TRUE;
+ break;
+
+ default:
+ break;
+ }
+
+ if (managed_task) {
+ switch (thread_qos) {
+ case THREAD_QOS_UNSPECIFIED:
+ case THREAD_QOS_LEGACY:
+ case THREAD_QOS_USER_INITIATED:
+ if (focal)
+ return SFI_CLASS_MANAGED_FOCAL;
+ else
+ return SFI_CLASS_MANAGED_NONFOCAL;
+ default:
+ break;
+ }
}
if (thread_qos == THREAD_QOS_UTILITY)
return SFI_CLASS_UTILITY;
- if (task_role == TASK_FOREGROUND_APPLICATION || task_role == TASK_CONTROL_APPLICATION) {
+ /*
+ * Classify threads in non-managed tasks
+ */
+ if (focal) {
switch (thread_qos) {
case THREAD_QOS_USER_INTERACTIVE:
return SFI_CLASS_USER_INTERACTIVE_FOCAL;
}
}
-/* Thread must be unlocked */
+/*
+ * Thread must be unlocked
+ * May be called with coalition, task, or thread mutex held
+ */
void sfi_reevaluate(thread_t thread)
{
kern_return_t kret;
extern int kpc_force_all_ctrs(task_t, int);
#endif
+uint32_t qos_override_mode;
+
task_t kernel_task;
zone_t task_zone;
lck_attr_t task_lck_attr;
hwm_user_cores = 0;
}
+ if (PE_parse_boot_argn("qos_override_mode", &qos_override_mode, sizeof(qos_override_mode))) {
+ printf("QOS override mode: 0x%08x\n", qos_override_mode);
+ } else {
+ qos_override_mode = QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE;
+ }
+
proc_init_cpumon_params();
if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof (task_wakeups_monitor_rate))) {
task_unlock(task);
+ proc_set_task_policy(task, THREAD_NULL, TASK_POLICY_ATTRIBUTE,
+ TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
+
/* Early object reap phase */
// PR-17045188: Revisit implementation
extern lck_attr_t task_lck_attr;
extern lck_grp_t task_lck_grp;
+#define QOS_OVERRIDE_MODE_OVERHANG_PEAK 0
+#define QOS_OVERRIDE_MODE_IGNORE_OVERRIDE 1
+#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE 2
+#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH 3
+#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE 4
+
+extern uint32_t qos_override_mode;
+
#else /* MACH_KERNEL_PRIVATE */
__BEGIN_DECLS
uint32_t tpt_update_sockets :1,
tpt_update_timers :1,
tpt_update_watchers :1,
- tpt_update_live_donor :1;
+ tpt_update_live_donor :1,
+ tpt_update_coal_sfi :1;
} *task_pend_token_t;
extern void task_policy_update_complete_unlocked(task_t task, thread_t thread, task_pend_token_t pend_token);
thread_t task_findtid(task_t, uint64_t);
void set_thread_iotier_override(thread_t, int policy);
-boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource);
-boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid);
+boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type);
+boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type);
+boolean_t proc_thread_qos_reset_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type);
+void proc_thread_qos_deallocate(thread_t thread);
#define TASK_RUSECPU_FLAGS_PROC_LIMIT 0x01
#define TASK_RUSECPU_FLAGS_PERTHR_LIMIT 0x02
#include <kern/ledger.h>
#include <kern/thread_call.h>
#include <kern/sfi.h>
+#include <kern/coalition.h>
#if CONFIG_TELEMETRY
#include <kern/telemetry.h>
#endif
static void task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_create, task_pend_token_t pend_token);
static void task_policy_update_task_locked(task_t task, boolean_t update_throttle, boolean_t update_bg_throttle, boolean_t update_sfi);
static void task_policy_update_thread_locked(thread_t thread, int update_cpu, boolean_t update_throttle, boolean_t update_sfi, boolean_t update_qos);
+static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role);
static int proc_get_effective_policy(task_t task, thread_t thread, int policy);
extern void proc_apply_task_networkbg(void * bsd_info, thread_t thread);
#endif /* MACH_BSD */
+extern zone_t thread_qos_override_zone;
+static boolean_t _proc_thread_qos_remove_override_internal(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type, boolean_t reset);
+
/* Importance Inheritance related helper functions */
next.t_qos_ceiling = THREAD_QOS_UNSPECIFIED;
break;
+ case TASK_DEFAULT_APPLICATION:
+ /* This is 'may render UI but we don't know if it's focal/nonfocal' */
+ next.t_qos_ceiling = THREAD_QOS_UNSPECIFIED;
+ break;
+
case TASK_NONUI_APPLICATION:
/* i.e. 'off-screen' */
next.t_qos_ceiling = THREAD_QOS_LEGACY;
(prev.t_sfi_managed != next.t_sfi_managed))
update_sfi = TRUE;
+/* TODO: if CONFIG_SFI */
+ if (prev.t_role != next.t_role && task_policy_update_coalition_focal_tasks(task, prev.t_role, next.t_role)) {
+ update_sfi = TRUE;
+ pend_token->tpt_update_coal_sfi = 1;
+ }
+
task_policy_update_task_locked(task, update_throttle, update_threads, update_sfi);
} else {
int update_cpu = 0;
}
}
+/*
+ * Yet another layering violation. We reach out and bang on the coalition directly.
+ */
+static boolean_t
+task_policy_update_coalition_focal_tasks(task_t task,
+ int prev_role,
+ int next_role)
+{
+ boolean_t sfi_transition = FALSE;
+
+ if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
+ if (coalition_adjust_focal_task_count(task->coalition, 1) == 1)
+ sfi_transition = TRUE;
+ } else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
+ if (coalition_adjust_focal_task_count(task->coalition, -1) == 0)
+ sfi_transition = TRUE;
+ }
+
+ if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
+ if (coalition_adjust_non_focal_task_count(task->coalition, 1) == 1)
+ sfi_transition = TRUE;
+ } else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
+ if (coalition_adjust_non_focal_task_count(task->coalition, -1) == 0)
+ sfi_transition = TRUE;
+ }
+
+ return sfi_transition;
+}
+
/* Despite the name, the thread's task is locked, the thread is not */
void
task_policy_update_thread_locked(thread_t thread,
if (pend_token->tpt_update_live_donor)
task_importance_update_live_donor(task);
+
+ if (pend_token->tpt_update_coal_sfi)
+ coalition_sfi_reevaluate(task->coalition, task);
}
}
* as the subsystem informs us of the relationships between the threads. The userspace
* synchronization subsystem should maintain the information of owner->resource and
* resource->waiters itself.
- *
- * The add/remove routines can return failure if the target of the override cannot be
- * found, perhaps because the resource subsystem doesn't have an accurate view of the
- * resource owner in the face of race conditions.
*/
-boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource)
+/*
+ * This helper canonicalizes the resource/resource_type given the current qos_override_mode
+ * in effect. Note that wildcards (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD) may need
+ * to be handled specially in the future, but for now it's fine to slam
+ * *resource to USER_ADDR_NULL even if it was previously a wildcard.
+ */
+static void _canonicalize_resource_and_type(user_addr_t *resource, int *resource_type) {
+ if (qos_override_mode == QOS_OVERRIDE_MODE_OVERHANG_PEAK || qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) {
+ /* Map all input resource/type to a single one */
+ *resource = USER_ADDR_NULL;
+ *resource_type = THREAD_QOS_OVERRIDE_TYPE_UNKNOWN;
+ } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE) {
+ /* no transform */
+ } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH) {
+ /* Map all dispatch overrides to a single one, to avoid memory overhead */
+ if (*resource_type == THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE) {
+ *resource = USER_ADDR_NULL;
+ }
+ } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE) {
+ /* Map all mutex overrides to a single one, to avoid memory overhead */
+ if (*resource_type == THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX) {
+ *resource = USER_ADDR_NULL;
+ }
+ }
+}
+
+/* This helper routine finds an existing override if known. Locking should be done by caller */
+static struct thread_qos_override *_find_qos_override(thread_t thread, user_addr_t resource, int resource_type) {
+ struct thread_qos_override *override;
+
+ override = thread->overrides;
+ while (override) {
+ if (override->override_resource == resource &&
+ override->override_resource_type == resource_type) {
+ return override;
+ }
+
+ override = override->override_next;
+ }
+
+ return NULL;
+}
+
+static void _find_and_decrement_qos_override(thread_t thread, user_addr_t resource, int resource_type, boolean_t reset, struct thread_qos_override **free_override_list) {
+ struct thread_qos_override *override, *override_prev;
+
+ override_prev = NULL;
+ override = thread->overrides;
+ while (override) {
+ struct thread_qos_override *override_next = override->override_next;
+
+ if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD == resource || override->override_resource == resource) &&
+ override->override_resource_type == resource_type) {
+ if (reset) {
+ override->override_contended_resource_count = 0;
+ } else {
+ override->override_contended_resource_count--;
+ }
+
+ if (override->override_contended_resource_count == 0) {
+ if (override_prev == NULL) {
+ thread->overrides = override_next;
+ } else {
+ override_prev->override_next = override_next;
+ }
+
+ /* Add to out-param for later zfree */
+ override->override_next = *free_override_list;
+ *free_override_list = override;
+ } else {
+ override_prev = override;
+ }
+
+ if (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD != resource) {
+ return;
+ }
+ } else {
+ override_prev = override;
+ }
+
+ override = override_next;
+ }
+}
+
+/* This helper recalculates the current requested override using the policy selected at boot */
+static int _calculate_requested_qos_override(thread_t thread)
+{
+ if (qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) {
+ return THREAD_QOS_UNSPECIFIED;
+ }
+
+ /* iterate over all overrides and calculate MAX */
+ struct thread_qos_override *override;
+ int qos_override = THREAD_QOS_UNSPECIFIED;
+
+ override = thread->overrides;
+ while (override) {
+ if (qos_override_mode != QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH ||
+ override->override_resource_type != THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE) {
+ qos_override = MAX(qos_override, override->override_qos);
+ }
+
+ override = override->override_next;
+ }
+
+ return qos_override;
+}
+
+boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type)
{
thread_t self = current_thread();
- int resource_count;
struct task_pend_token pend_token = {};
/* XXX move to thread mutex when thread policy does */
DTRACE_BOOST5(qos_add_override_pre, uint64_t, tid, uint64_t, thread->requested_policy.thrp_qos,
uint64_t, thread->effective_policy.thep_qos, int, override_qos, boolean_t, first_override_for_resource);
+ struct task_requested_policy requested = thread->requested_policy;
+ struct thread_qos_override *override;
+ struct thread_qos_override *deferred_free_override = NULL;
+ int new_qos_override, prev_qos_override;
+ int new_effective_qos;
+ boolean_t has_thread_reference = FALSE;
+
+ _canonicalize_resource_and_type(&resource, &resource_type);
+
if (first_override_for_resource) {
- resource_count = ++thread->usynch_override_contended_resource_count;
+ override = _find_qos_override(thread, resource, resource_type);
+ if (override) {
+ override->override_contended_resource_count++;
+ } else {
+ struct thread_qos_override *override_new;
+
+ /* We need to allocate a new object. Drop the task lock and recheck afterwards in case someone else added the override */
+ thread_reference(thread);
+ has_thread_reference = TRUE;
+ task_unlock(task);
+ override_new = zalloc(thread_qos_override_zone);
+ task_lock(task);
+
+ override = _find_qos_override(thread, resource, resource_type);
+ if (override) {
+ /* Someone else already allocated while the task lock was dropped */
+ deferred_free_override = override_new;
+ override->override_contended_resource_count++;
+ } else {
+ override = override_new;
+ override->override_next = thread->overrides;
+ override->override_contended_resource_count = 1 /* since first_override_for_resource was TRUE */;
+ override->override_resource = resource;
+ override->override_resource_type = resource_type;
+ override->override_qos = THREAD_QOS_UNSPECIFIED;
+ thread->overrides = override;
+ }
+ }
} else {
- resource_count = thread->usynch_override_contended_resource_count;
+ override = _find_qos_override(thread, resource, resource_type);
}
- struct task_requested_policy requested = thread->requested_policy;
+ if (override) {
+ if (override->override_qos == THREAD_QOS_UNSPECIFIED)
+ override->override_qos = override_qos;
+ else
+ override->override_qos = MAX(override->override_qos, override_qos);
+ }
- if (requested.thrp_qos_override == THREAD_QOS_UNSPECIFIED)
- requested.thrp_qos_override = override_qos;
- else
- requested.thrp_qos_override = MAX(requested.thrp_qos_override, override_qos);
+ /* Determine how to combine the various overrides into a single current requested override */
+ prev_qos_override = requested.thrp_qos_override;
+ new_qos_override = _calculate_requested_qos_override(thread);
- thread->requested_policy = requested;
+ if (new_qos_override != prev_qos_override) {
+ requested.thrp_qos_override = new_qos_override;
- task_policy_update_locked(task, thread, &pend_token);
+ thread->requested_policy = requested;
+
+ task_policy_update_locked(task, thread, &pend_token);
+
+ if (!has_thread_reference) {
+ thread_reference(thread);
+ }
+
+ task_unlock(task);
+
+ task_policy_update_complete_unlocked(task, thread, &pend_token);
- thread_reference(thread);
+ new_effective_qos = thread->effective_policy.thep_qos;
+
+ thread_deallocate(thread);
+ } else {
+ new_effective_qos = thread->effective_policy.thep_qos;
- task_unlock(task);
+ task_unlock(task);
- task_policy_update_complete_unlocked(task, thread, &pend_token);
+ if (has_thread_reference) {
+ thread_deallocate(thread);
+ }
+ }
- DTRACE_BOOST3(qos_add_override_post, uint64_t, requested.thrp_qos_override,
- uint64_t, thread->effective_policy.thep_qos, int, resource_count);
+ if (deferred_free_override) {
+ zfree(thread_qos_override_zone, deferred_free_override);
+ }
- thread_deallocate(thread);
+ DTRACE_BOOST3(qos_add_override_post, int, prev_qos_override, int, new_qos_override,
+ int, new_effective_qos);
KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_END,
- requested.thrp_qos_override, resource_count, 0, 0, 0);
+ new_qos_override, resource, resource_type, 0, 0);
return TRUE;
}
-boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid)
+
+static boolean_t _proc_thread_qos_remove_override_internal(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type, boolean_t reset)
{
thread_t self = current_thread();
- int resource_count;
struct task_pend_token pend_token = {};
/* XXX move to thread mutex when thread policy does */
}
}
- resource_count = --thread->usynch_override_contended_resource_count;
+ struct task_requested_policy requested = thread->requested_policy;
+ struct thread_qos_override *deferred_free_override_list = NULL;
+ int new_qos_override, prev_qos_override;
+
+ _canonicalize_resource_and_type(&resource, &resource_type);
+
+ _find_and_decrement_qos_override(thread, resource, resource_type, reset, &deferred_free_override_list);
KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_START,
- thread_tid(thread), resource_count, 0, 0, 0);
+ thread_tid(thread), resource, reset, 0, 0);
+
+ /* Determine how to combine the various overrides into a single current requested override */
+ prev_qos_override = requested.thrp_qos_override;
+ new_qos_override = _calculate_requested_qos_override(thread);
+
+ if (new_qos_override != prev_qos_override) {
+ requested.thrp_qos_override = new_qos_override;
- if (0 == resource_count) {
- thread->requested_policy.thrp_qos_override = THREAD_QOS_UNSPECIFIED;
+ thread->requested_policy = requested;
task_policy_update_locked(task, thread, &pend_token);
thread_reference(thread);
-
+
task_unlock(task);
task_policy_update_complete_unlocked(task, thread, &pend_token);
-
+
thread_deallocate(thread);
- } else if (0 > resource_count) {
- // panic("usynch_override_contended_resource_count underflow for thread %p", thread);
- task_unlock(task);
} else {
task_unlock(task);
}
+ while (deferred_free_override_list) {
+ struct thread_qos_override *override_next = deferred_free_override_list->override_next;
+
+ zfree(thread_qos_override_zone, deferred_free_override_list);
+ deferred_free_override_list = override_next;
+ }
+
KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_END,
0, 0, 0, 0, 0);
return TRUE;
}
+boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type)
+{
+ return _proc_thread_qos_remove_override_internal(task, thread, tid, resource, resource_type, FALSE);
+
+}
+
+boolean_t proc_thread_qos_reset_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type)
+{
+ return _proc_thread_qos_remove_override_internal(task, thread, tid, resource, resource_type, TRUE);
+}
+
+/* Deallocate before thread termination */
+void proc_thread_qos_deallocate(thread_t thread)
+{
+ task_t task = thread->task;
+ struct thread_qos_override *override;
+
+ /* XXX move to thread mutex when thread policy does */
+ task_lock(task);
+ override = thread->overrides;
+ thread->overrides = NULL; /* task policy re-evaluation needed? */
+ thread->requested_policy.thrp_qos_override = THREAD_QOS_UNSPECIFIED;
+ task_unlock(task);
+
+ while (override) {
+ struct thread_qos_override *override_next = override->override_next;
+
+ zfree(thread_qos_override_zone, override);
+ override = override_next;
+ }
+}
+
/* TODO: remove this variable when interactive daemon audit period is over */
extern boolean_t ipc_importance_interactive_receiver;
lck_attr_t thread_lck_attr;
lck_grp_t thread_lck_grp;
+struct zone *thread_qos_override_zone;
+
decl_simple_lock_data(static,thread_stack_lock)
static queue_head_t thread_stack_queue;
thread_template.effective_policy = default_task_effective_policy;
thread_template.pended_policy = default_task_pended_policy;
- thread_template.usynch_override_contended_resource_count = 0;
+ bzero(&thread_template.overrides, sizeof(thread_template.overrides));
thread_template.iotier_override = THROTTLE_LEVEL_NONE;
thread_template.thread_io_stats = NULL;
THREAD_CHUNK * sizeof(struct thread),
"threads");
+ thread_qos_override_zone = zinit(
+ sizeof(struct thread_qos_override),
+ 4 * thread_max * sizeof(struct thread_qos_override),
+ PAGE_SIZE,
+ "thread qos override");
+ zone_change(thread_qos_override_zone, Z_EXPAND, TRUE);
+ zone_change(thread_qos_override_zone, Z_COLLECT, TRUE);
+ zone_change(thread_qos_override_zone, Z_CALLERACCT, FALSE);
+ zone_change(thread_qos_override_zone, Z_NOENCRYPT, TRUE);
+
lck_grp_attr_setdefault(&thread_lck_grp_attr);
lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr);
lck_attr_setdefault(&thread_lck_attr);
ipc_thread_terminate(thread);
+ proc_thread_qos_deallocate(thread);
+
task = thread->task;
#ifdef MACH_BSD
struct task_pended_policy pended_policy;
/* usynch override is protected by the task lock, eventually will be thread mutex */
- int usynch_override_contended_resource_count;
+ struct thread_qos_override {
+ struct thread_qos_override *override_next;
+ uint32_t override_contended_resource_count;
+ int16_t override_qos;
+ int16_t override_resource_type;
+ user_addr_t override_resource;
+ } *overrides;
int iotier_override; /* atomic operations to set, cleared on ret to user */
io_stat_info_t thread_io_stats; /* per-thread I/O statistics */
#define CPUFAMILY_INTEL_SANDYBRIDGE 0x5490b78c
#define CPUFAMILY_INTEL_IVYBRIDGE 0x1f65e835
#define CPUFAMILY_INTEL_HASWELL 0x10b282dc
+#if !defined(XNU_HIDE_SEED)
+#define CPUFAMILY_INTEL_BROADWELL 0x582ed09c
+#endif /* not XNU_HIDE_SEED */
#define CPUFAMILY_ARM_9 0xe73283ae
#define CPUFAMILY_ARM_11 0x8ff620d8
#define CPUFAMILY_ARM_XSCALE 0x53b005f5
#define THREAD_QOS_MIN_TIER_IMPORTANCE (-15)
+/*
+ * Overrides are inputs to the task/thread policy engine that
+ * temporarily elevate the effective QoS of a thread without changing
+ * its steady-state (and round-trip-able) requested QoS. The
+ * interfaces into the kernel allow the caller to associate a resource
+ * and type that describe the reason/lifecycle of the override. For
+ * instance, a contended pthread_mutex_t held by a UTILITY thread
+ * might get an override to USER_INTERACTIVE, with the resource
+ * being the userspace address of the pthread_mutex_t. When the
+ * owning thread releases that resource, it can call into the
+ * task policy subsystem to drop the override because of that resource,
+ * although if more contended locks are held by the thread, the
+ * effective QoS may remain overridden for longer.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX is used for contended
+ * pthread_mutex_t's via the pthread kext. The holder gets an override
+ * with resource=&mutex and a count of 1 by the initial contender.
+ * Subsequent contenders raise the QoS value, until the holder
+ * decrements the count to 0 and the override is released.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK is unimplemented and has no
+ * specified semantics.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE are explicitly
+ * paired start/end overrides on a target thread. The resource can
+ * either be a memory allocation in userspace, or the pthread_t of the
+ * overrider if no allocation was used.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE are used to
+ * override the QoS of a thread currently draining a serial dispatch
+ * queue, so that it can get to a block of higher QoS than its
+ * predecessors. The override is applied by a thread enqueueing work
+ * with resource=&queue, and reset by the thread that was overriden
+ * once it has drained the queue. Since the ++ and reset are
+ * asynchronous, there is the possibility of a ++ after the target
+ * thread has issued a reset, in which case the workqueue thread may
+ * issue a reset-all in its outermost scope before deciding whether it
+ * should return to dequeueing work from the global concurrent queues,
+ * or return to the kernel.
+ */
+
+#define THREAD_QOS_OVERRIDE_TYPE_UNKNOWN (0)
+#define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX (1)
+#define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK (2)
+#define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE (3)
+#define THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE (4)
+
+/* A special resource value to indicate a resource wildcard */
+#define THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD (~((user_addr_t)0))
+
struct thread_qos_policy {
integer_t qos_tier;
integer_t tier_importance;
extern vm_offset_t vm_kext_base;
extern vm_offset_t vm_kext_top;
+extern vm_offset_t vm_prelink_stext;
+extern vm_offset_t vm_prelink_etext;
+extern vm_offset_t vm_prelink_sinfo;
+extern vm_offset_t vm_prelink_einfo;
+extern vm_offset_t vm_slinkedit;
+extern vm_offset_t vm_elinkedit;
#define VM_KERNEL_IS_SLID(_o) \
(((vm_offset_t)(_o) >= vm_kernel_base) && \
(((vm_offset_t)(_o) >= vm_kext_base) && \
((vm_offset_t)(_o) < vm_kext_top))
+#define VM_KERNEL_IS_PRELINKTEXT(_o) \
+ (((vm_offset_t)(_o) >= vm_prelink_stext) && \
+ ((vm_offset_t)(_o) < vm_prelink_etext))
+
+#define VM_KERNEL_IS_PRELINKINFO(_o) \
+ (((vm_offset_t)(_o) >= vm_prelink_sinfo) && \
+ ((vm_offset_t)(_o) < vm_prelink_einfo))
+
+#define VM_KERNEL_IS_KEXT_LINKEDIT(_o) \
+ (((vm_offset_t)(_o) >= vm_slinkedit) && \
+ ((vm_offset_t)(_o) < vm_elinkedit))
+
#define VM_KERNEL_SLIDE(_u) \
((vm_offset_t)(_u) + vm_kernel_slide)
*/
#define VM_KERNEL_UNSLIDE(_v) \
((VM_KERNEL_IS_SLID(_v) || \
- VM_KERNEL_IS_KEXT(_v)) ? \
+ VM_KERNEL_IS_KEXT(_v) || \
+ VM_KERNEL_IS_PRELINKTEXT(_v) || \
+ VM_KERNEL_IS_PRELINKINFO(_v) || \
+ VM_KERNEL_IS_KEXT_LINKEDIT(_v)) ? \
(vm_offset_t)(_v) - vm_kernel_slide : \
(vm_offset_t)(_v))
#define VM_KERNEL_UNSLIDE_OR_PERM(_v) \
((VM_KERNEL_IS_SLID(_v) || \
- VM_KERNEL_IS_KEXT(_v)) ? \
+ VM_KERNEL_IS_KEXT(_v) || \
+ VM_KERNEL_IS_PRELINKTEXT(_v) || \
+ VM_KERNEL_IS_PRELINKINFO(_v) || \
+ VM_KERNEL_IS_KEXT_LINKEDIT(_v)) ? \
(vm_offset_t)(_v) - vm_kernel_slide : \
VM_KERNEL_ADDRPERM(_v))
/* First step is just to get the size of the upl to find out how big the reprio info is */
- upl_lock(upl);
+ if(!upl_try_lock(upl))
+ return;
+
if (upl->decmp_io_upl == NULL) {
/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
upl_unlock(upl);
return;
/* Now again take the lock, recheck the state and grab out the required info */
- upl_lock(upl);
+ if(!upl_try_lock(upl))
+ goto out;
+
if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
upl_unlock(upl);
#define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
#define upl_lock(object) lck_mtx_lock(&(object)->Lock)
#define upl_unlock(object) lck_mtx_unlock(&(object)->Lock)
+#define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock)
#define MAX_VECTOR_UPL_ELEMENTS 8
#define COPYINPHYS 3 /* from user virtual to kernel physical */
#define COPYOUTPHYS 4 /* from kernel physical to user virtual */
+
static int
copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
@lldb_type_summary(['coalition_t', 'coalition'])
-@header("type coalition summary (header tbw)")
+@header("{:>18s} {:>10s} {:>8s} {:>8s} {:>8s} {:>8s}".format("coalition", "id", "refcount", "active", "focal", "nonfocal"))
def GetCoalitionSummary(coal):
out_string = ""
- format_string = '{0: <#020x} {1: <d} {2: <d} {3: <d}'
+ format_string = '{:>#018x} {:>10d} {:>8d} {:>8d} {:>8d} {:>8d}'
+
flags_string = ''
if (coal.terminated):
flags_string += ' terminated'
if (coal.reaped):
flags_string += ' reaped'
- out_string += format_string.format(coal, coal.id, coal.ref_count, coal.active_count, )
+ out_string += format_string.format(coal, coal.id, coal.ref_count, coal.active_count, coal.focal_tasks_count, coal.non_focal_tasks_count)
+
return out_string
@lldb_type_summary(['proc', 'proc *'])
""" Routine to print a summary listing of all the coalitions
"""
global kern
+
+ role_strs = {
+ 0 : "TASK_UNSPECIFIED",
+ 1 : "TASK_FOREGROUND_APPLICATION",
+ 2 : "TASK_BACKGROUND_APPLICATION",
+ 3 : "TASK_CONTROL_APPLICATION",
+ 4 : "TASK_GRAPHICS_SERVER",
+ 5 : "TASK_THROTTLE_APPLICATION",
+ 6 : "TASK_NONUI_APPLICATION",
+ 7 : "TASK_DEFAULT_APPLICATION",
+ }
+
+ sfi_strs = {
+ 0x0 : "SFI_CLASS_UNSPECIFIED",
+ 0x1 : "SFI_CLASS_DARWIN_BG",
+ 0x2 : "SFI_CLASS_APP_NAP",
+ 0x3 : "SFI_CLASS_MANAGED_FOCAL",
+ 0x4 : "SFI_CLASS_MANAGED_NONFOCAL",
+ 0x5 : "SFI_CLASS_DEFAULT_FOCAL",
+ 0x6 : "SFI_CLASS_DEFAULT_NONFOCAL",
+ 0x7 : "SFI_CLASS_KERNEL",
+ 0x8 : "SFI_CLASS_OPTED_OUT",
+ 0x9 : "SFI_CLASS_UTILITY",
+ 0xA : "SFI_CLASS_LEGACY_FOCAL",
+ 0xB : "SFI_CLASS_LEGACY_NONFOCAL",
+ 0xC : "SFI_CLASS_USER_INITIATED_FOCAL",
+ 0xD : "SFI_CLASS_USER_INITIATED_NONFOCAL",
+ 0xE : "SFI_CLASS_USER_INTERACTIVE_FOCAL",
+ 0xF : "SFI_CLASS_USER_INTERACTIVE_NONFOCAL",
+ 0x10 : "SFI_CLASS_MAINTENANCE",
+ }
+
+
print GetCoalitionSummary.header
for c in kern.coalitions:
print GetCoalitionSummary(c)
+ for task in IterateQueue(c.tasks, "task_t", "coalition_tasks"):
+ print "\t" + hex(task) + " " + GetProcNameForTask(task) + " " + role_strs[int(task.effective_policy.t_role)]
+ for thread in IterateQueue(task.threads, "thread_t", "task_threads"):
+ print "\t\t" + hex(thread) + " " + sfi_strs[int(thread.sfi_class)]
+
@lldb_command('showalltasks')
def ShowAllTasks(cmd_args=None):
DSTROOT?=$(shell /bin/pwd)
-CFLAGS:=$(patsubst %, -arch %,$(ARCHS)) -g -Wall -Os $(ISYSROOT)
+CFLAGS:=$(patsubst %, -arch %,$(ARCHS)) -g -Wall -Os -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
all: $(DSTROOT)/jitter
#include <stdio.h>
#include <math.h>
#include <sys/wait.h>
-#include <sys/syscall.h>
+#include <sys/kdebug.h>
#include <sys/types.h>
#include <sys/ptrace.h>
#include <semaphore.h>
/* Too much: cut a tracepoint for a debugger */
if (jitter_arr[i] >= too_much) {
- syscall(SYS_kdebug_trace, 0xeeeeeeee, 0, 0, 0, 0);
+ kdebug_trace(0xeeeee0 | DBG_FUNC_NONE, 0, 0, 0, 0);
}
if (wakeup_second_thread) {
/* Too much: cut a tracepoint for a debugger */
if (secargs->wakeup_second_jitter_arr[i] >= secargs->too_much) {
- syscall(SYS_kdebug_trace, 0xeeeeeeef, 0, 0, 0, 0);
+ kdebug_trace(0xeeeee4 | DBG_FUNC_NONE, 0, 0, 0, 0);
}
kret = semaphore_signal(secargs->return_semaphore);
#include <math.h>
#include <sys/wait.h>
#include <sys/param.h>
-#include <sys/syscall.h>
+#include <sys/kdebug.h>
#include <sys/types.h>
#include <sys/ptrace.h>
#include <semaphore.h>
printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0);
}
- _tmp = syscall(SYS_kdebug_trace, 0xEEEEEEEE, 0, 0, 0, 0);
+ _tmp = kdebug_trace(0xeeeee0 | DBG_FUNC_NONE,
+ worst_latencies_from_first_ns[i] >> 32,
+ worst_latencies_from_first_ns[i] & 0xFFFFFFFF,
+ traceworthy_latency_ns >> 32,
+ traceworthy_latency_ns & 0xFFFFFFFF);
}
/* Let worker threads get back to sleep... */
char *new_argv[argc + 1 + 1 /* NULL */];
int i;
char prog[PATH_MAX];
- int32_t prog_size = PATH_MAX;
+ uint32_t prog_size = PATH_MAX;
ret = _NSGetExecutablePath(prog, &prog_size);
if (ret != 0) err(1, "_NSGetExecutablePath");