]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-2782.10.72.tar.gz os-x-10102 v2782.10.72
authorApple <opensource@apple.com>
Fri, 5 Jun 2015 01:16:16 +0000 (01:16 +0000)
committerApple <opensource@apple.com>
Fri, 5 Jun 2015 01:16:16 +0000 (01:16 +0000)
59 files changed:
bsd/dev/i386/systemcalls.c
bsd/dev/munge.c
bsd/dev/x86_64/munge.s [deleted file]
bsd/kern/kdebug.c
bsd/kern/kern_credential.c
bsd/kern/kern_mib.c
bsd/kern/kern_proc.c
bsd/kern/pthread_shims.c
bsd/kern/syscalls.master
bsd/kern/trace.codes
bsd/miscfs/devfs/devfs.h
bsd/net/dlil.c
bsd/netinet/mptcp_var.h
bsd/sys/codesign.h
bsd/sys/kdebug.h
bsd/sys/make_symbol_aliasing.sh
bsd/sys/munge.h
bsd/sys/pthread_shims.h
config/MasterVersion
iokit/IOKit/pwr_mgt/IOPM.h
iokit/IOKit/pwr_mgt/IOPMPrivate.h
iokit/IOKit/pwr_mgt/RootDomain.h
iokit/Kernel/IODataQueue.cpp
iokit/Kernel/IOMemoryDescriptor.cpp
iokit/Kernel/IOPMrootDomain.cpp
iokit/Kernel/IOSharedDataQueue.cpp
libkern/c++/OSKext.cpp
libsyscall/Libsyscall.xcodeproj/project.pbxproj
libsyscall/mach/.gitignore [deleted file]
libsyscall/wrappers/kdebug_trace.c [new file with mode: 0644]
osfmk/i386/Diagnostics.c
osfmk/i386/Diagnostics.h
osfmk/i386/commpage/commpage.c
osfmk/i386/commpage/commpage.h
osfmk/i386/cpu_capabilities.h
osfmk/i386/cpu_data.h
osfmk/i386/cpuid.c
osfmk/i386/cpuid.h
osfmk/i386/fpu.c
osfmk/i386/i386_vm_init.c
osfmk/i386/proc_reg.h
osfmk/kern/coalition.c
osfmk/kern/coalition.h
osfmk/kern/sfi.c
osfmk/kern/task.c
osfmk/kern/task.h
osfmk/kern/task_policy.c
osfmk/kern/thread.c
osfmk/kern/thread.h
osfmk/mach/machine.h
osfmk/mach/thread_policy.h
osfmk/mach/vm_param.h
osfmk/vm/vm_object.c
osfmk/vm/vm_pageout.h
osfmk/x86_64/copyio.c
tools/lldbmacros/process.py
tools/tests/jitter/Makefile
tools/tests/jitter/timer_jitter.c
tools/tests/zero-to-n/zero-to-n.c

index 8b2c8ab98f2ba24b17cdcb9999aee9d491cf4c25..9f57943df18a8df3c96dadd70861d44e7fd60e6c 100644 (file)
@@ -69,6 +69,8 @@ extern void *find_user_regs(thread_t);
 /* dynamically generated at build time based on syscalls.master */
 extern const char *syscallnames[];
 
+#define code_is_kdebug_trace(code) (((code) == SYS_kdebug_trace) || ((code) == SYS_kdebug_trace64))
+
 /*
  * Function:   unix_syscall
  *
@@ -151,8 +153,8 @@ unix_syscall(x86_saved_state_t *state)
                        /* NOTREACHED */
                }
 
-               if (__probable(code != 180)) {
-                       int *ip = (int *)vt;
+               if (__probable(!code_is_kdebug_trace(code))) {
+                       int *ip = (int *)vt;
 
                        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
                                BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
@@ -239,7 +241,7 @@ unix_syscall(x86_saved_state_t *state)
                 */
                throttle_lowpri_io(1);
        }
-       if (__probable(code != 180))
+       if (__probable(!code_is_kdebug_trace(code)))
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
                        BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
                        error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);
@@ -320,7 +322,7 @@ unix_syscall64(x86_saved_state_t *state)
                memcpy(vt, args_start_at_rdi ? &regs->rdi : &regs->rsi, args_in_regs * sizeof(syscall_arg_t));
 
 
-               if (code != 180) {
+               if (!code_is_kdebug_trace(code)) {
                        uint64_t *ip = (uint64_t *)vt;
 
                        KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
@@ -432,7 +434,7 @@ unix_syscall64(x86_saved_state_t *state)
                 */
                throttle_lowpri_io(1);
        }
-       if (__probable(code != 180))
+       if (__probable(!code_is_kdebug_trace(code)))
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
                        BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
                        error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);
@@ -559,7 +561,7 @@ unix_syscall_return(int error)
                 */
                throttle_lowpri_io(1);
        }
-       if (code != 180)
+       if (!code_is_kdebug_trace(code))
                KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
                        BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
                        error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);
index 36da48eb41c42776649a5b03a8f2b06a78ca3df7..adaba8e1ae43bf60c9b6960ba84caf6662317a84 100644 (file)
@@ -220,6 +220,19 @@ munge_wlll(void *args)
        out_args[0] = in_args[0];
 }
 
+void 
+munge_wllll(void *args)
+{
+       volatile uint64_t *out_args = (volatile uint64_t*)args;
+       volatile uint32_t *in_args = (volatile uint32_t*)args;
+
+       out_args[4] = *(uint64_t*)&in_args[7];
+       out_args[3] = *(uint64_t*)&in_args[5];
+       out_args[2] = *(uint64_t*)&in_args[3];
+       out_args[1] = *(uint64_t*)&in_args[1];
+       out_args[0] = in_args[0];
+}
+
 void
 munge_wllww(void *args)
 {
diff --git a/bsd/dev/x86_64/munge.s b/bsd/dev/x86_64/munge.s
deleted file mode 100644 (file)
index e69de29..0000000
index a9dad0b4e5cd2715b7e6be5ad7424e21d262fd27..1b89e2d4aa3ad573c6e23a0436161446c60c355f 100644 (file)
@@ -113,6 +113,7 @@ static kd_iop_t* kd_iops = NULL;
 /* XXX should have prototypes, but Mach does not provide one */
 void task_act_iterate_wth_args(task_t, void(*)(thread_t, void *), void *);
 int cpu_number(void);  /* XXX <machine/...> include path broken */
+void commpage_update_kdebug_enable(void); /* XXX sign */
 
 /* XXX should probably be static, but it's debugging code... */
 int kdbg_read(user_addr_t, size_t *, vnode_t, vfs_context_t);
@@ -396,10 +397,12 @@ kdbg_set_tracing_enabled(boolean_t enabled, uint32_t trace_type)
                kdebug_enable |= trace_type;
                kd_ctrl_page.kdebug_slowcheck &= ~SLOW_NOLOG;
                kd_ctrl_page.enabled = 1;
+               commpage_update_kdebug_enable();
        } else {
                kdebug_enable &= ~(KDEBUG_ENABLE_TRACE|KDEBUG_ENABLE_PPT);
                kd_ctrl_page.kdebug_slowcheck |= SLOW_NOLOG;
                kd_ctrl_page.enabled = 0;
+               commpage_update_kdebug_enable();
        }
        lck_spin_unlock(kds_spin_lock);
        ml_set_interrupts_enabled(s);
@@ -734,6 +737,7 @@ allocate_storage_unit(int cpu)
                if (kdbp_vict == NULL) {
                        kdebug_enable = 0;
                        kd_ctrl_page.enabled = 0;
+                       commpage_update_kdebug_enable();
                        retval = FALSE;
                        goto out;
                }
@@ -952,16 +956,7 @@ out1:
 
 
 
-void
-kernel_debug_internal(
-       uint32_t        debugid,
-       uintptr_t       arg1,
-       uintptr_t       arg2,
-       uintptr_t       arg3,
-       uintptr_t       arg4,
-       uintptr_t       arg5);
-
-__attribute__((always_inline)) void
+static void
 kernel_debug_internal(
        uint32_t        debugid,
        uintptr_t       arg1,
@@ -1255,20 +1250,48 @@ kernel_debug_early_end(void)
 }
 
 /*
- * Support syscall SYS_kdebug_trace
+ * Support syscall SYS_kdebug_trace. U64->K32 args may get truncated in kdebug_trace64
  */
 int
-kdebug_trace(__unused struct proc *p, struct kdebug_trace_args *uap, __unused int32_t *retval)
+kdebug_trace(struct proc *p, struct kdebug_trace_args *uap, int32_t *retval)
+{
+       struct kdebug_trace64_args uap64;
+
+       uap64.code = uap->code;
+       uap64.arg1 = uap->arg1;
+       uap64.arg2 = uap->arg2;
+       uap64.arg3 = uap->arg3;
+       uap64.arg4 = uap->arg4;
+
+       return kdebug_trace64(p, &uap64, retval);
+}
+
+/*
+ * Support syscall SYS_kdebug_trace64. 64-bit args on K32 will get truncated to fit in 32-bit record format.
+ */
+int kdebug_trace64(__unused struct proc *p, struct kdebug_trace64_args *uap, __unused int32_t *retval)
 {
+       uint8_t code_class;
+
+       /*
+        * Not all class are supported for injection from userspace, especially ones used by the core
+        * kernel tracing infrastructure.
+        */
+       code_class = EXTRACT_CLASS(uap->code);
+
+       switch (code_class) {
+               case DBG_TRACE:
+                       return EPERM;
+       }
+
        if ( __probable(kdebug_enable == 0) )
-               return(0);
-       kernel_debug_internal(uap->code, uap->arg1, uap->arg2, uap->arg3, uap->arg4, (uintptr_t)thread_tid(current_thread()));
+               return(0); 
+
+       kernel_debug_internal(uap->code, (uintptr_t)uap->arg1, (uintptr_t)uap->arg2, (uintptr_t)uap->arg3, (uintptr_t)uap->arg4, (uintptr_t)thread_tid(current_thread()));
 
        return(0);
 }
 
-
 static void
 kdbg_lock_init(void)
 {
@@ -3140,6 +3163,7 @@ kdbg_dump_trace_to_file(const char *filename)
                         */
                        kdebug_enable = 0;
                        kd_ctrl_page.enabled = 0;
+                       commpage_update_kdebug_enable();
                        return;
                }
        }
@@ -3147,6 +3171,7 @@ kdbg_dump_trace_to_file(const char *filename)
 
        kdebug_enable = 0;
        kd_ctrl_page.enabled = 0;
+       commpage_update_kdebug_enable();
 
        ctx = vfs_context_kernel();
 
index 911f0e710105de53bc5a0179f0700146c6781e2c..b3c0d357afa8c42c01071e9257352ec9f76fb44d 100644 (file)
@@ -619,9 +619,10 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3
        }
 
        /*
-        * Beyond this point, we must be the resolver process.
+        * Beyond this point, we must be the resolver process. We verify this
+        * by confirming the resolver credential and pid.
         */
-       if (current_proc()->p_pid != kauth_resolver_identity) {
+       if ((kauth_cred_getuid(kauth_cred_get()) != 0) || (current_proc()->p_pid != kauth_resolver_identity)) {
                KAUTH_DEBUG("RESOLVER - call from bogus resolver %d\n", current_proc()->p_pid);
                return(EPERM);
        }
@@ -923,7 +924,7 @@ kauth_resolver_complete(user_addr_t message)
        struct kauth_identity_extlookup extl;
        struct kauth_resolver_work *workp;
        struct kauth_resolver_work *killp;
-       int error, result;
+       int error, result, request_flags;
 
        /*
         * Copy in the mesage, including the extension field, since we are
@@ -1004,6 +1005,10 @@ kauth_resolver_complete(user_addr_t message)
                TAILQ_FOREACH(workp, &kauth_resolver_submitted, kr_link) {
                        /* found it? */
                        if (workp->kr_seqno == extl.el_seqno) {
+                               /*
+                                * Take a snapshot of the original request flags.
+                                */
+                               request_flags = workp->kr_work.el_flags;
 
                                /*
                                 * Get the request of the submitted queue so
@@ -1041,13 +1046,21 @@ kauth_resolver_complete(user_addr_t message)
                                 * issue and is easily detectable by comparing
                                 * time to live on last response vs. time of
                                 * next request in the resolver logs.
+                                *
+                                * A malicious/faulty resolver could overwrite
+                                * part of a user's address space if they return
+                                * flags that mismatch the original request's flags.
                                 */
-                               if (extl.el_flags & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) {
+                               if ((extl.el_flags & request_flags) & (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) {
                                        size_t actual;  /* notused */
 
                                        KAUTH_RESOLVER_UNLOCK();
                                        error = copyinstr(extl.el_extend, CAST_DOWN(void *, workp->kr_extend), MAXPATHLEN, &actual);
                                        KAUTH_RESOLVER_LOCK();
+                               } else if (extl.el_flags &  (KAUTH_EXTLOOKUP_VALID_PWNAM|KAUTH_EXTLOOKUP_VALID_GRNAM)) {
+                                       error = EFAULT;
+                                       KAUTH_DEBUG("RESOLVER - resolver returned mismatching extension flags (%d), request contained (%d)",
+                                                       extl.el_flags, request_flags);
                                }
 
                                /*
@@ -1117,7 +1130,7 @@ kauth_identity_init(void)
  * Parameters: uid
  *
  * Returns:    NULL                            Insufficient memory to satisfy
- *                                             the request
+ *                                             the request or bad parameters
  *             !NULL                           A pointer to the allocated
  *                                             structure, filled in
  *
@@ -1146,8 +1159,16 @@ kauth_identity_alloc(uid_t uid, gid_t gid, guid_t *guidp, time_t guid_expiry,
                        kip->ki_valid = KI_VALID_UID;
                }
                if (supgrpcnt) {
+                       /*
+                        * A malicious/faulty resolver could return bad values
+                        */
+                       assert(supgrpcnt >= 0);
                        assert(supgrpcnt <= NGROUPS);
                        assert(supgrps != NULL);
+
+                       if ((supgrpcnt < 0) || (supgrpcnt > NGROUPS) || (supgrps == NULL)) {
+                               return NULL;
+                       }
                        if (kip->ki_valid & KI_VALID_GID)
                                panic("can't allocate kauth identity with both gid and supplementary groups");
                        kip->ki_supgrpcnt = supgrpcnt;
index 0783d51d4f922b896b3a07f8108502b858db98d4..b3bbdd9d6703f2a77896c3c6ec1742d20b5df2be 100644 (file)
@@ -447,6 +447,7 @@ SYSCTL_PROC(_hw_optional, OID_AUTO, bmi1,   CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KER
 SYSCTL_PROC(_hw_optional, OID_AUTO, bmi2,      CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasBMI2, 0, sysctl_cpu_capability, "I", "");
 SYSCTL_PROC(_hw_optional, OID_AUTO, rtm,       CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasRTM, 0, sysctl_cpu_capability, "I", "");
 SYSCTL_PROC(_hw_optional, OID_AUTO, hle,       CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasHLE, 0, sysctl_cpu_capability, "I", "");
+SYSCTL_PROC(_hw_optional, OID_AUTO, adx,       CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (void *) kHasADX, 0, sysctl_cpu_capability, "I", "");
 #else
 #error Unsupported arch
 #endif /* !__i386__ && !__x86_64 && !__arm__ && ! __arm64__ */
index ce7a65a8e89800216ef7441563aa1385a743be16..7b8aace099e8fdca24ac9ff72e10b4e2e7d3a1e3 100644 (file)
@@ -2080,7 +2080,8 @@ csops_internal(pid_t pid, int ops, user_addr_t uaddr, user_size_t usersize, user
                            CS_KILL | CS_EXEC_SET_KILL |
                            CS_RESTRICT |
                            CS_REQUIRE_LV |
-                           CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT;
+                           CS_ENFORCEMENT | CS_EXEC_SET_ENFORCEMENT |
+                           CS_ENTITLEMENTS_VALIDATED;
 
                        proc_lock(pt);
                        if (pt->p_csflags & CS_VALID)
index 4e41a19e4a62a4c172e7ba83daeebd0994843769..2d5b931e370f5750063d7bd9b4bdec4c460d0d4f 100644 (file)
@@ -172,7 +172,7 @@ static boolean_t proc_usynch_thread_qos_add_override(struct uthread *uth, uint64
        task_t task = current_task();
        thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
        
-       return proc_thread_qos_add_override(task, thread, tid, override_qos, first_override_for_resource);
+       return proc_thread_qos_add_override(task, thread, tid, override_qos, first_override_for_resource, USER_ADDR_NULL, THREAD_QOS_OVERRIDE_TYPE_UNKNOWN);
 }
 
 static boolean_t proc_usynch_thread_qos_remove_override(struct uthread *uth, uint64_t tid)
@@ -180,7 +180,28 @@ static boolean_t proc_usynch_thread_qos_remove_override(struct uthread *uth, uin
        task_t task = current_task();
        thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
 
-       return proc_thread_qos_remove_override(task, thread, tid);
+       return proc_thread_qos_remove_override(task, thread, tid, USER_ADDR_NULL, THREAD_QOS_OVERRIDE_TYPE_UNKNOWN);
+}
+
+static boolean_t proc_usynch_thread_qos_add_override_for_resource(task_t task, struct uthread *uth, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type)
+{
+       thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
+       
+       return proc_thread_qos_add_override(task, thread, tid, override_qos, first_override_for_resource, resource, resource_type);
+}
+
+static boolean_t proc_usynch_thread_qos_remove_override_for_resource(task_t task, struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
+{
+       thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
+
+       return proc_thread_qos_remove_override(task, thread, tid, resource, resource_type);
+}
+
+static boolean_t proc_usynch_thread_qos_reset_override_for_resource(task_t task, struct uthread *uth, uint64_t tid, user_addr_t resource, int resource_type)
+{
+       thread_t thread = uth ? uth->uu_thread : THREAD_NULL;
+
+       return proc_thread_qos_reset_override(task, thread, tid, resource, resource_type);
 }
 
 /* kernel (core) to kext shims */
@@ -493,6 +514,10 @@ static struct pthread_callbacks_s pthread_callbacks = {
        .proc_usynch_thread_qos_remove_override = proc_usynch_thread_qos_remove_override,
 
        .qos_main_thread_active = qos_main_thread_active,
+
+       .proc_usynch_thread_qos_add_override_for_resource = proc_usynch_thread_qos_add_override_for_resource,
+       .proc_usynch_thread_qos_remove_override_for_resource = proc_usynch_thread_qos_remove_override_for_resource,
+       .proc_usynch_thread_qos_reset_override_for_resource = proc_usynch_thread_qos_reset_override_for_resource,
 };
 
 pthread_callbacks_t pthread_kern = &pthread_callbacks;
index d19267e8467007ee6628ebcee4fc4b6e168d4e73..624fd33dcb3d136df2e66bab4a6c3536d5a5a4d2 100644 (file)
 176    AUE_NULL        ALL     { int nosys(void); }   { old add_profil }
 177    AUE_NULL        ALL     { int nosys(void); } 
 178    AUE_NULL        ALL     { int nosys(void); } 
-179    AUE_NULL        ALL     { int nosys(void); } 
-180    AUE_KDEBUGTRACE ALL     { int kdebug_trace(int code, int arg1, int arg2, int arg3, int arg4, int arg5) NO_SYSCALL_STUB; } 
+179    AUE_KDEBUGTRACE ALL     { int kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) NO_SYSCALL_STUB; } 
+180    AUE_KDEBUGTRACE ALL     { int kdebug_trace(uint32_t code, u_long arg1, u_long arg2, u_long arg3, u_long arg4) NO_SYSCALL_STUB; } 
 181    AUE_SETGID      ALL     { int setgid(gid_t gid); } 
 182    AUE_SETEGID     ALL     { int setegid(gid_t egid); } 
 183    AUE_SETEUID     ALL     { int seteuid(uid_t euid); } 
index 904d2311f916f48a1a64d0236d5bf76c3946fdea..e08f59ff53766cc25fdd7e24330b80778ccd7923 100644 (file)
 0x40c02c0      BSC_add_profil
 0x40c02c4      BSC_#177
 0x40c02c8      BSC_#178
-0x40c02cc      BSC_#179
+0x40c02cc      BSC_kdebug_trace64
 0x40c02d0      BSC_kdebug_trace
 0x40c02d4      BSC_setgid
 0x40c02d8      BSC_setegid
 0x40c0780      BSC_recvmsg_x
 0x40c0784      BSC_sendmsg_x
 0x40c0788      BSC_thread_selfusage
-0x40c07a4      BSC_mremap_extended
+0x40c07a4      BSC_mremap_encrypted
 0x40e0104      BSC_msync_extended_info
 0x40e0264      BSC_pread_extended_info
 0x40e0268      BSC_pwrite_extended_info
 0x5310274      CPUPM_PST_QOS_SWITCH2
 0x5310278      CPUPM_PST_UIB
 0x531027C      CPUPM_PST_PLIMIT_UIB
+0x5310280      CPUPM_IO
 0x5330000      HIBERNATE
 0x5330004      HIBERNATE_WRITE_IMAGE
 0x5330008      HIBERNATE_MACHINE_INIT
index 88e63b4f5bc9cde1908820e4e799b2f6897e26ce..6615b97b5214b45596bc0d11812cf2eea7d73572 100644 (file)
@@ -148,6 +148,7 @@ __END_DECLS
 #define        GID_BIN         7
 #define        GID_GAMES       13
 #define        GID_DIALER      68
+#define GID_WINDOWSERVER 88
 #endif /* __APPLE_API_PRIVATE */
 
 #endif /* !_MISCFS_DEVFS_DEVFS_H_ */
index 28d307d133e9b6fb1c1c24f3bede3f1e92d8d7e3..0df5c6ea43a78fd65e51610faabc63b4fa603723 100644 (file)
@@ -3473,10 +3473,18 @@ if_mcasts_update(struct ifnet *ifp)
        return (0);
 }
 
+       
+#define TMP_IF_PROTO_ARR_SIZE  10
 static int
 dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
 {
-       struct ifnet_filter *filter;
+       struct ifnet_filter *filter = NULL;
+       struct if_proto *proto = NULL;
+       int if_proto_count = 0;
+       struct if_proto **tmp_ifproto_arr = NULL;
+       struct if_proto *tmp_ifproto_stack_arr[TMP_IF_PROTO_ARR_SIZE] = {NULL};
+       int tmp_ifproto_arr_idx = 0;
+       bool tmp_malloc = false;
 
        /* Get an io ref count if the interface is attached */
        if (!ifnet_is_attached(ifp, 1))
@@ -3502,42 +3510,68 @@ dlil_event_internal(struct ifnet *ifp, struct kev_msg *event)
        if_flt_monitor_unbusy(ifp);
        lck_mtx_unlock(&ifp->if_flt_lock);
 
+       /*
+        * An embedded tmp_list_entry in if_proto may still get
+        * over-written by another thread after giving up ifnet lock,
+        * therefore we are avoiding embedded pointers here.
+        */
        ifnet_lock_shared(ifp);
-       if (ifp->if_proto_hash != NULL) {
+       if_proto_count = dlil_ifp_proto_count(ifp);
+       if (if_proto_count) {
                int i;
+               VERIFY(ifp->if_proto_hash != NULL);
+               if (if_proto_count <= TMP_IF_PROTO_ARR_SIZE) {
+                       tmp_ifproto_arr = tmp_ifproto_stack_arr;
+               } else {
+                       MALLOC(tmp_ifproto_arr, struct if_proto **,
+                           sizeof (*tmp_ifproto_arr) * if_proto_count,
+                           M_TEMP, M_ZERO);
+                       if (tmp_ifproto_arr == NULL) {
+                               ifnet_lock_done(ifp);
+                               goto cleanup;
+                       }
+                       tmp_malloc = true;
+               }
 
                for (i = 0; i < PROTO_HASH_SLOTS; i++) {
-                       struct if_proto *proto;
-
                        SLIST_FOREACH(proto, &ifp->if_proto_hash[i],
                            next_hash) {
-                               proto_media_event eventp =
-                                   (proto->proto_kpi == kProtoKPI_v1 ?
-                                   proto->kpi.v1.event :
-                                   proto->kpi.v2.event);
-
-                               if (eventp != NULL) {
-                                       if_proto_ref(proto);
-                                       ifnet_lock_done(ifp);
-
-                                       eventp(ifp, proto->protocol_family,
-                                           event);
-
-                                       ifnet_lock_shared(ifp);
-                                       if_proto_free(proto);
-                               }
+                               if_proto_ref(proto);
+                               tmp_ifproto_arr[tmp_ifproto_arr_idx] = proto;
+                               tmp_ifproto_arr_idx++;
                        }
                }
+               VERIFY(if_proto_count == tmp_ifproto_arr_idx);
        }
        ifnet_lock_done(ifp);
 
+       for (tmp_ifproto_arr_idx = 0; tmp_ifproto_arr_idx < if_proto_count;
+           tmp_ifproto_arr_idx++) {
+               proto = tmp_ifproto_arr[tmp_ifproto_arr_idx];
+               VERIFY(proto != NULL);
+               proto_media_event eventp =
+                   (proto->proto_kpi == kProtoKPI_v1 ?
+                   proto->kpi.v1.event :
+                   proto->kpi.v2.event);
+
+               if (eventp != NULL) {
+                       eventp(ifp, proto->protocol_family,
+                           event);
+               }
+               if_proto_free(proto);
+       }
+
+cleanup:       
+       if (tmp_malloc) {
+               FREE(tmp_ifproto_arr, M_TEMP);
+       }
+
        /* Pass the event to the interface */
        if (ifp->if_event != NULL)
                ifp->if_event(ifp, event);
 
        /* Release the io ref count */
        ifnet_decr_iorefcnt(ifp);
-
 done:
        return (kev_post_msg(event));
 }
index 474477b2670bd425e7680c83aed541a78a2faa8c..9faaba48ff06ecb236562cd3a11191f463c0db04 100644 (file)
@@ -65,9 +65,9 @@ struct mptses {
        uint32_t        mpte_thread_active;     /* thread is running */
        uint32_t        mpte_thread_reqs;       /* # of requests for thread */
        struct mptsub   *mpte_active_sub;       /* ptr to last active subf */
-       uint8_t mpte_flags;             /* per mptcp session flags */
-       uint8_t mpte_lost_aid;          /* storing lost address id */
-       uint8_t mpte_addrid_last;       /* storing address id parm */
+       uint8_t mpte_flags;                     /* per mptcp session flags */
+       uint8_t mpte_lost_aid;                  /* storing lost address id */
+       uint8_t mpte_addrid_last;               /* storing address id parm */
 };
 
 /*
index 6461895d5bfe8c1703fe367f86243ecc3f25a058..bf908f8f875df7a5b86ebc6a805774291501c06d 100644 (file)
@@ -41,6 +41,7 @@
 #define CS_RESTRICT            0x0000800       /* tell dyld to treat restricted */
 #define CS_ENFORCEMENT         0x0001000       /* require enforcement */
 #define CS_REQUIRE_LV          0x0002000       /* require library validation */
+#define CS_ENTITLEMENTS_VALIDATED      0x0004000
 
 #define        CS_ALLOWED_MACHO        0x00ffffe
 
index 80a04ea1f3eaacd7b598a8f2408d6baaaaf01c71..186ace8f3818a4f23332dd92e690949828f71c45 100644 (file)
@@ -43,15 +43,18 @@ __BEGIN_DECLS
 
 #include <mach/clock_types.h>
 #include <stdint.h>
-#if    defined(KERNEL_BUILD)
-#include <kdebug.h>
-#endif /* KERNEL_BUILD */
+
+#ifndef KERNEL
+#include <Availability.h>
+#endif
 
 #ifdef XNU_KERNEL_PRIVATE
 #include <stdint.h>
 #include <mach/branch_predicates.h>
 #endif
 
+#ifdef KERNEL_PRIVATE
+
 typedef enum
 {
        KD_CALLBACK_KDEBUG_ENABLED,             // Trace is now enabled. No arguments
@@ -100,33 +103,7 @@ extern void kernel_debug_enter(
        uintptr_t       threadid
        );
 
-
-/*
- * state bits for hfs_update event
- */
-#define DBG_HFS_UPDATE_ACCTIME   0x01
-#define DBG_HFS_UPDATE_MODTIME  0x02
-#define DBG_HFS_UPDATE_CHGTIME  0x04
-#define DBG_HFS_UPDATE_MODIFIED         0x08
-#define DBG_HFS_UPDATE_FORCE    0x10
-#define DBG_HFS_UPDATE_DATEADDED 0x20
-
-
-/*
- * types of faults that vm_fault handles
- * and creates trace entries for
- */
-#define DBG_ZERO_FILL_FAULT   1
-#define DBG_PAGEIN_FAULT      2
-#define DBG_COW_FAULT         3
-#define DBG_CACHE_HIT_FAULT   4
-#define DBG_NZF_PAGE_FAULT    5
-#define DBG_GUARD_FAULT              6 
-#define DBG_PAGEINV_FAULT     7
-#define DBG_PAGEIND_FAULT     8
-#define DBG_COMPRESSOR_FAULT  9
-#define DBG_COMPRESSOR_SWAPIN_FAULT  10
-
+#endif /* KERNEL_PRIVATE */
 
 /* The debug code consists of the following 
 *
@@ -166,9 +143,34 @@ extern void kernel_debug_enter(
 #define DBG_BANK                40
 #define DBG_XPC                 41
 #define DBG_ATM                 42
+#define DBG_ARIADNE             43
+
 
 #define DBG_MIG                        255
 
+#ifdef PRIVATE
+/*
+ * OS components can use the full precision of the "code" field
+ * (Class, SubClass, Code) to inject events using kdebug_trace() by
+ * using:
+ *
+ * kdebug_trace(KDBG_CODE(DBG_XPC, 15, 1) | DBG_FUNC_NONE, 1, 2, 3, 4);
+ *
+ * These trace points can be included in production code, since they
+ * use reserved, non-overlapping ranges. The performance impact when
+ * kernel tracing is not enabled is minimal. Classes can be reserved
+ * by filing a Radar in xnu|all.
+ *
+ * 64-bit arguments may be truncated if the system is using a 32-bit
+ * kernel.
+ *
+ * On error, -1 will be returned and errno will indicate the error.
+ */
+#ifndef KERNEL
+extern int kdebug_trace(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4) __OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA);
+#endif
+#endif /* PRIVATE */
+
 /* **** The Kernel Debug Sub Classes for Mach (DBG_MACH) **** */
 #define        DBG_MACH_EXCP_KTRAP_x86 0x02    /* Kernel Traps on x86 */
 #define        DBG_MACH_EXCP_DFLT      0x03    /* Data Translation Fault */
@@ -240,6 +242,18 @@ extern void kernel_debug_enter(
 #define MACH_MULTIQ_GROUP     2
 #define MACH_MULTIQ_GLOBAL    3
 
+/* Arguments for vm_fault (DBG_MACH_VM) */
+#define DBG_ZERO_FILL_FAULT   1
+#define DBG_PAGEIN_FAULT      2
+#define DBG_COW_FAULT         3
+#define DBG_CACHE_HIT_FAULT   4
+#define DBG_NZF_PAGE_FAULT    5
+#define DBG_GUARD_FAULT              6 
+#define DBG_PAGEINV_FAULT     7
+#define DBG_PAGEIND_FAULT     8
+#define DBG_COMPRESSOR_FAULT  9
+#define DBG_COMPRESSOR_SWAPIN_FAULT  10
+
 /* Codes for IPC (DBG_MACH_IPC) */
 #define        MACH_TASK_SUSPEND                       0x0     /* Suspended a task */
 #define        MACH_TASK_RESUME                        0x1     /* Resumed a task */
@@ -403,6 +417,16 @@ extern void kernel_debug_enter(
 #define DBG_THROTTLE  0x11    /* I/O Throttling events */      
 #define DBG_CONTENT_PROT 0xCF /* Content Protection Events: see bsd/sys/cprotect.h */
 
+/*
+ * For Kernel Debug Sub Class DBG_HFS, state bits for hfs_update event
+ */
+#define DBG_HFS_UPDATE_ACCTIME   0x01
+#define DBG_HFS_UPDATE_MODTIME  0x02
+#define DBG_HFS_UPDATE_CHGTIME  0x04
+#define DBG_HFS_UPDATE_MODIFIED         0x08
+#define DBG_HFS_UPDATE_FORCE    0x10
+#define DBG_HFS_UPDATE_DATEADDED 0x20
+
 /* The Kernel Debug Sub Classes for BSD */
 #define DBG_BSD_PROC           0x01    /* process/signals related */
 #define DBG_BSD_MEMSTAT                0x02    /* memorystatus / jetsam operations */
@@ -563,6 +587,7 @@ extern void kernel_debug_enter(
 #define DYLDDBG_CODE(SubClass,code) KDBG_CODE(DBG_DYLD, SubClass, code)
 #define QTDBG_CODE(SubClass,code) KDBG_CODE(DBG_QT, SubClass, code)
 #define APPSDBG_CODE(SubClass,code) KDBG_CODE(DBG_APPS, SubClass, code)
+#define ARIADNEDBG_CODE(SubClass, code) KDBG_CODE(DBG_ARIADNE, SubClass, code)
 #define CPUPM_CODE(code) IOKDBG_CODE(DBG_IOCPUPM, code)
 
 #define KMEM_ALLOC_CODE MACHDBG_CODE(DBG_MACH_LEAKS, 0)
index a4a9b881f2cfe52af1c0a7c5f15cecaeb7302193..ef47e37c87e0c812739d345242429661efbc02d6 100755 (executable)
@@ -88,10 +88,24 @@ for ver in $(${SDKROOT}/usr/local/libexec/availability.pl --ios) ; do
 done
 
 for ver in $(${SDKROOT}/usr/local/libexec/availability.pl --macosx) ; do
-    ver_major=${ver%.*}
-    ver_minor=${ver#*.}
-    value=$(printf "%d%d0" ${ver_major} ${ver_minor})
-    str=$(printf "__MAC_%d_%d" ${ver_major} ${ver_minor})
+    set -- $(echo "$ver" | tr '.' ' ')
+    ver_major=$1
+    ver_minor=$2
+    ver_rel=$3
+    if [ -z "$ver_rel" ]; then
+       ver_rel=0
+    fi
+    if [ "$ver_major" -lt 10 -o \( "$ver_major" -eq 10 -a "$ver_minor" -lt 10 \) ]; then
+       value=$(printf "%d%d0" ${ver_major} ${ver_minor})
+       str=$(printf "__MAC_%d_%d" ${ver_major} ${ver_minor})
+    else
+       value=$(printf "%d%02d%02d" ${ver_major} ${ver_minor} ${ver_rel})
+       if [ "$ver_rel" -gt 0 ]; then
+           str=$(printf "__MAC_%d_%d_%d" ${ver_major} ${ver_minor} ${ver_rel})
+       else
+           str=$(printf "__MAC_%d_%d" ${ver_major} ${ver_minor})
+       fi
+    fi
     echo "#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= ${value}"
     echo "#define __DARWIN_ALIAS_STARTING_MAC_${str}(x) x"
     echo "#else"
index 89b71a2b7c836f41190709f84ef5b8b1db0ebe97..122e3c527efada23a0be4dff5cc35541cd6a5b9c 100644 (file)
@@ -91,6 +91,7 @@ void munge_wlwwlwlw(void *args);
 void munge_wll(void *args);
 void munge_wllww(void *args);
 void munge_wlll(void *args);
+void munge_wllll(void *args);
 void munge_wllwwll(void *args);
 void munge_wwwlw(void *args);
 void munge_wwwlww(void *args);
index 5c403a81bc35dffd63d95d45f08707b45a5141b9..d0050e4163b006aae20d782c5737598da75c63ea 100644 (file)
@@ -230,8 +230,12 @@ typedef struct pthread_callbacks_s {
 
        kern_return_t (*thread_set_voucher_name)(mach_port_name_t voucher_name);
 
+       boolean_t (*proc_usynch_thread_qos_add_override_for_resource)(task_t task, struct uthread *, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type);
+       boolean_t (*proc_usynch_thread_qos_remove_override_for_resource)(task_t task, struct uthread *, uint64_t tid, user_addr_t resource, int resource_type);
+       boolean_t (*proc_usynch_thread_qos_reset_override_for_resource)(task_t task, struct uthread *, uint64_t tid, user_addr_t resource, int resource_type);
+
        /* padding for future */
-       void* _pad[87];
+       void* _pad[84];
 
 } *pthread_callbacks_t;
 
index 5445aaa606ae6f7e44ddef6a08a87ad37c8243de..1e84dc0e92931851c7a13c1e503fd2b532ff8664 100644 (file)
@@ -1,4 +1,4 @@
-14.0.0
+14.1.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
index f2ba018b234b0190dca5fce2a64aa8073f9837d5..9623838aa6f62576e3c55a1e2a3b6475a3131c61 100644 (file)
@@ -683,18 +683,27 @@ enum {
 #define kIOPMThermalLevelWarningKey                     "Thermal_Level_Warning"
 
 /* Thermal Warning Level values
- *      kIOPMThermalWarningLevelNormal - under normal operating conditions
- *      kIOPMThermalWarningLevelDanger - thermal pressure may cause system slowdown
- *      kIOPMThermalWarningLevelCrisis - thermal conditions may cause imminent shutdown
+ *      kIOPMThermalLevelNormal   - under normal operating conditions
+ *      kIOPMThermalLevelDanger   - thermal pressure may cause system slowdown
+ *      kIOPMThermalLevelCritical - thermal conditions may cause imminent shutdown
  *
  * The platform may define additional thermal levels if necessary.
+ * Platform specific values are defined from 100 and above
  */
 enum {
-  kIOPMThermalWarningLevelNormal    = 0,
-  kIOPMThermalWarningLevelDanger    = 5,
-  kIOPMThermalWarningLevelCrisis    = 10
+  kIOPMThermalLevelNormal    = 0,
+  kIOPMThermalLevelDanger    = 5,
+  kIOPMThermalLevelCritical  = 10,
+
+  kIOPMThermalLevelWarning = 100,
+  kIOPMThermalLevelTrap    = 110,
+
+  kIOPMThermalLevelUnknown = 255,
 };
 
+#define kIOPMThermalWarningLevelNormal kIOPMThermalLevelNormal
+#define kIOPMThermalWarningLevelDanger kIOPMThermalLevelWarning
+#define kIOPMThermalWarningLevelCrisis kIOPMThermalLevelCritical
 
 // PM Settings Controller setting types
 // Settings types used primarily with:
index 4aecff1462dc49e28ad01d2d0c1aefb875fdd4df..0163072dd6d49124e475791ec3796950048b90d3 100644 (file)
@@ -700,7 +700,9 @@ enum {
     kIOPMSleepFactorExternalDisplay         = 0x00080000ULL,
     kIOPMSleepFactorNetworkKeepAliveActive  = 0x00100000ULL,
     kIOPMSleepFactorLocalUserActivity       = 0x00200000ULL,
-    kIOPMSleepFactorHibernateFailed         = 0x00400000ULL
+    kIOPMSleepFactorHibernateFailed         = 0x00400000ULL,
+    kIOPMSleepFactorThermalWarning          = 0x00800000ULL,
+    kIOPMSleepFactorDisplayCaptured         = 0x01000000ULL
 };
 
 // System Sleep Types
@@ -832,6 +834,9 @@ inline char const* getDumpLogFilename(swd_hdr *hdr)
 #define kUserWkCntChID IOREPORT_MAKEID('D','r','k','W','k','C','n','t')
 
 
+/* Sleep Options/settings */
+#define kSleepOptionDisplayCapturedModeKey         "DisplayCapturedMode"
+
 
 #if defined(KERNEL) && defined(__cplusplus)
 
@@ -853,4 +858,22 @@ typedef IOReturn (*IOPMSystemSleepPolicyHandler)(
 
 #endif /* KERNEL */
 
+/*****************************************************************************
+ *
+ * Performance Warning
+ *
+ *****************************************************************************/
+
+/* Performance Warning Key
+ * Key for performance warning event published using IOPMrootDomain::
+ * systemPowerEventOccurred()
+ */
+#define kIOPMPerformanceWarningKey          "Performance_Warning"
+
+/* Performance warning values */
+enum {
+  kIOPMPerformanceNormal    = 0,
+  kIOPMPerformanceWarning   = 100
+};
+
 #endif /* ! _IOKIT_IOPMPRIVATE_H */
index 0de68bbc870f48f6baecd2177e7fa3b6dd86b4c4..071575abfa6170f9b7ef8013a693e5c90f939e25 100644 (file)
@@ -693,6 +693,7 @@ private:
 
     unsigned int            displayIdleForDemandSleep :1;
     unsigned int            darkWakeHibernateError  :1;
+    unsigned int            thermalWarningState:1;
 
     uint32_t                hibernateMode;
     AbsoluteTime            userActivityTime;
@@ -846,6 +847,7 @@ private:
     void        acceptSystemWakeEvents( bool accept );
     void        systemDidNotSleep( void );
     void        preventTransitionToUserActive( bool prevent );
+    void        setThermalState(OSObject *value);
 #endif /* XNU_KERNEL_PRIVATE */
 };
 
index e489cd9a81ae86c0c1430373f02c18d3a074bc45..79c97e1af1145073b5acdbfe01cdeb5eb94aed2c 100644 (file)
@@ -99,10 +99,11 @@ Boolean IODataQueue::initWithCapacity(UInt32 size)
     if (dataQueue == 0) {
         return false;
     }
+    bzero(dataQueue, allocSize);
 
     dataQueue->queueSize    = size;
-    dataQueue->head         = 0;
-    dataQueue->tail         = 0;
+//  dataQueue->head         = 0;
+//  dataQueue->tail         = 0;
 
     if (!notifyMsg) {
         notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
index f6ab8e93b8f599b6612ee3ab53c85c4a69970f96..0c77443861c05e2b02c8ac46dd5ffafc0181b486 100644 (file)
@@ -743,10 +743,8 @@ IOGeneralMemoryDescriptor::memoryReferenceMap(
     cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
     if (kIODefaultCache != cacheMode)
     {
-       // VM system requires write access to change cache mode
-        prot |= VM_PROT_WRITE;
-        // update named entries cache mode
-       memEntryCacheMode = (MAP_MEM_ONLY | prot | vmProtForCacheMode(cacheMode));
+       // VM system requires write access to update named entry cache mode
+       memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
     }
 
     if (_task)
index ddce488072142902f1eb28fe8608eabd1fd64edb..73738c14ba9eaa6915d2230c0617caec22f3288a 100644 (file)
@@ -2040,7 +2040,13 @@ void IOPMrootDomain::powerChangeDone( unsigned long previousPowerState )
 #else
             LOG("System Sleep\n");
 #endif
-
+            if (thermalWarningState) {
+                const OSSymbol *event = OSSymbol::withCString(kIOPMThermalLevelWarningKey);
+                if (event) {
+                    systemPowerEventOccurred(event, kIOPMThermalLevelUnknown);
+                    event->release();
+                }
+            }
             ((IOService *)this)->stop_watchdog_timer(); //14456299
             getPlatform()->sleepKernel();
 
@@ -2091,6 +2097,7 @@ void IOPMrootDomain::powerChangeDone( unsigned long previousPowerState )
             userWasActive           = false;
             fullWakeReason = kFullWakeReasonNone;
 
+
             OSString * wakeType = OSDynamicCast(
                 OSString, getProperty(kIOPMRootDomainWakeTypeKey));
             OSString * wakeReason = OSDynamicCast(
@@ -3710,6 +3717,8 @@ bool IOPMrootDomain::evaluateSystemSleepPolicy(
         currentFactors |= kIOPMSleepFactorLocalUserActivity;
     if (darkWakeHibernateError && !CAP_HIGHEST(kIOPMSystemCapabilityGraphics))
         currentFactors |= kIOPMSleepFactorHibernateFailed;
+    if (thermalWarningState)
+        currentFactors |= kIOPMSleepFactorThermalWarning;
 
     DLOG("sleep factors 0x%llx\n", currentFactors);
 
@@ -4006,10 +4015,18 @@ bool IOPMrootDomain::getSleepOption( const char * key, uint32_t * option )
     {
         obj = copyProperty(key);
     }
-    if (obj && (num = OSDynamicCast(OSNumber, obj)))
+    if (obj
     {
-        *option = num->unsigned32BitValue();
-        ok = true;
+        if ((num = OSDynamicCast(OSNumber, obj)))
+        {
+            *option = num->unsigned32BitValue();
+            ok = true;
+        }
+        else if (OSDynamicCast(OSBoolean, obj))
+        {
+            *option = (obj == kOSBooleanTrue) ? 1 : 0;
+            ok = true;
+        }
     }
 
     if (obj)
@@ -5622,9 +5639,9 @@ bool IOPMrootDomain::checkSystemSleepAllowed( IOOptionBits options,
         break;
 #endif
 
-        if (lowBatteryCondition)
+        if (lowBatteryCondition || thermalWarningState)
         {
-            break;          // always sleep on low battery
+            break;          // always sleep on low battery or when in thermal warning state
         }
 
         if (sleepReason == kIOPMSleepReasonDarkWakeThermalEmergency)
@@ -5698,7 +5715,7 @@ bool IOPMrootDomain::checkSystemCanSleep( uint32_t sleepReason )
 bool IOPMrootDomain::checkSystemCanSustainFullWake( void )
 {
 #if !NO_KERNEL_HID
-    if (lowBatteryCondition)
+    if (lowBatteryCondition || thermalWarningState)
     {
         // Low battery wake, or received a low battery notification
         // while system is awake. This condition will persist until
@@ -5962,6 +5979,24 @@ IOReturn IOPMrootDomain::systemPowerEventOccurred(
     return attempt;
 }
 
+void IOPMrootDomain::setThermalState(OSObject *value)
+{
+   OSNumber * num;
+
+   if (gIOPMWorkLoop->inGate() == false) {
+       gIOPMWorkLoop->runAction(
+               OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::setThermalState),
+               (OSObject *)this,
+               (void *)value);
+
+       return;
+    }
+    if (value && (num = OSDynamicCast(OSNumber, value))) {
+        thermalWarningState = ((num->unsigned32BitValue() == kIOPMThermalLevelWarning) || 
+                               (num->unsigned32BitValue() == kIOPMThermalLevelTrap)) ? 1 : 0; 
+    }
+}
+
 IOReturn IOPMrootDomain::systemPowerEventOccurred(
     const OSSymbol *event,
     OSObject *value)
@@ -6001,8 +6036,13 @@ exit:
     // UNLOCK
     if (featuresDictLock) IOLockUnlock(featuresDictLock);
 
-    if (shouldUpdate)
+    if (shouldUpdate) {
+        if (event && 
+             event->isEqualTo(kIOPMThermalLevelWarningKey)) {
+             setThermalState(value);
+        }
         messageClients (kIOPMMessageSystemPowerEventOccurred, (void *)NULL);
+    }
 
     return kIOReturnSuccess;
 }
index 0ad0f3cdec1a96a7d0c7bee8ec71e1d605fb3341..71daaa6b449ae17a5ee5c881ae8c96b706a7a240 100644 (file)
@@ -99,10 +99,11 @@ Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
     if (dataQueue == 0) {
         return false;
     }
+    bzero(dataQueue, allocSize);
 
     dataQueue->queueSize    = size;
-    dataQueue->head         = 0;
-    dataQueue->tail         = 0;
+//  dataQueue->head         = 0;
+//  dataQueue->tail         = 0;
 
     if (!setQueueSize(size)) {
         return false;
@@ -110,7 +111,14 @@ Boolean IOSharedDataQueue::initWithCapacity(UInt32 size)
     
     appendix            = (IODataQueueAppendix *)((UInt8 *)dataQueue + size + DATA_QUEUE_MEMORY_HEADER_SIZE);
     appendix->version   = 0;
-    notifyMsg           = &(appendix->msgh);
+
+    if (!notifyMsg) {
+        notifyMsg = IOMalloc(sizeof(mach_msg_header_t));
+        if (!notifyMsg)
+            return false;
+    }
+    bzero(notifyMsg, sizeof(mach_msg_header_t));
+
     setNotificationPort(MACH_PORT_NULL);
 
     return true;
@@ -121,6 +129,10 @@ void IOSharedDataQueue::free()
     if (dataQueue) {
         IOFreeAligned(dataQueue, round_page(getQueueSize() + DATA_QUEUE_MEMORY_HEADER_SIZE + DATA_QUEUE_MEMORY_APPENDIX_SIZE));
         dataQueue = NULL;
+        if (notifyMsg) {
+            IOFree(notifyMsg, sizeof(mach_msg_header_t));
+            notifyMsg = NULL;
+        }
     }
 
     if (_reserved) {
@@ -192,7 +204,7 @@ Boolean IOSharedDataQueue::enqueue(void * data, UInt32 dataSize)
         return false;
     }
     // Check for underflow of (getQueueSize() - tail)
-    if (getQueueSize() < tail) {
+    if (getQueueSize() < tail || getQueueSize() < head) {
         return false;
     }
     
index 05cd9653b22c3185252e463519602522985b5777..f1e6133442987a3616833655409f2cbc333ae9ce 100644 (file)
@@ -7803,6 +7803,7 @@ OSKext::copyInfo(OSArray * infoKeys)
             kernel_mach_header_t *kext_mach_hdr = (kernel_mach_header_t *)
                 linkedExecutable->getBytesNoCopy();
 
+#if !SECURE_KERNEL
             if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleMachOHeadersKey)) {
                 kernel_mach_header_t *  temp_kext_mach_hdr;
                 struct load_command *   lcp;
@@ -7843,6 +7844,17 @@ OSKext::copyInfo(OSArray * infoKeys)
                                   __FUNCTION__, segp->segname, segp->vmaddr,
                                   VM_KERNEL_UNSLIDE(segp->vmaddr),
                                   segp->vmsize, segp->nsects);
+                        if ( (VM_KERNEL_IS_SLID(segp->vmaddr) == false) &&
+                             (VM_KERNEL_IS_KEXT(segp->vmaddr) == false) &&
+                             (VM_KERNEL_IS_PRELINKTEXT(segp->vmaddr) == false) &&
+                             (VM_KERNEL_IS_PRELINKINFO(segp->vmaddr) == false) &&
+                             (VM_KERNEL_IS_KEXT_LINKEDIT(segp->vmaddr) == false) ) {
+                            OSKextLog(/* kext */ NULL,
+                                      kOSKextLogErrorLevel |
+                                      kOSKextLogGeneralFlag,
+                                      "%s: not in kext range - vmaddr 0x%llX vm_kext_base 0x%lX vm_kext_top 0x%lX",
+                                      __FUNCTION__, segp->vmaddr, vm_kext_base, vm_kext_top);
+                        }
 #endif
                         segp->vmaddr = VM_KERNEL_UNSLIDE(segp->vmaddr);
                         
@@ -7854,6 +7866,7 @@ OSKext::copyInfo(OSArray * infoKeys)
                 }
                 result->setObject(kOSBundleMachOHeadersKey, headerData);
             }
+#endif // SECURE_KERNEL
 
             if (!infoKeys || _OSArrayContainsCString(infoKeys, kOSBundleCPUTypeKey)) {
                 cpuTypeNumber = OSNumber::withNumber(
index 1bb73464bfb92c00752cbacf2ae38a6098cb108d..ccd84ed5420081ff803fb92dfc365ba4492bfa67 100644 (file)
                9002401118FC9A7F00D73BFA /* rename_ext.c in Sources */ = {isa = PBXBuildFile; fileRef = 906AA2D018F74CD1001C681A /* rename_ext.c */; };
                A59CB95616669EFB00B064B3 /* stack_logging_internal.h in Headers */ = {isa = PBXBuildFile; fileRef = A59CB95516669DB700B064B3 /* stack_logging_internal.h */; };
                A59CB9581666A1A200B064B3 /* munmap.c in Sources */ = {isa = PBXBuildFile; fileRef = A59CB9571666A1A200B064B3 /* munmap.c */; };
+               BA0D9FB1199031AD007E8A73 /* kdebug_trace.c in Sources */ = {isa = PBXBuildFile; fileRef = BA0D9FB0199031AD007E8A73 /* kdebug_trace.c */; };
                BA4414AA18336A5F00AAE813 /* mach in CopyFiles */ = {isa = PBXBuildFile; fileRef = BA4414A51833697C00AAE813 /* mach */; };
                BA4414AB18336A6400AAE813 /* servers in CopyFiles */ = {isa = PBXBuildFile; fileRef = BA4414A6183369A100AAE813 /* servers */; };
                BA4414AD18336A9300AAE813 /* mach in CopyFiles */ = {isa = PBXBuildFile; fileRef = BA4414A7183369C100AAE813 /* mach */; };
                906AA2D018F74CD1001C681A /* rename_ext.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = rename_ext.c; sourceTree = "<group>"; };
                A59CB95516669DB700B064B3 /* stack_logging_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stack_logging_internal.h; sourceTree = "<group>"; };
                A59CB9571666A1A200B064B3 /* munmap.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = munmap.c; sourceTree = "<group>"; };
+               BA0D9FB0199031AD007E8A73 /* kdebug_trace.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = kdebug_trace.c; sourceTree = "<group>"; };
                BA4414A51833697C00AAE813 /* mach */ = {isa = PBXFileReference; lastKnownFileType = text; name = mach; path = mig_hdr/include/mach; sourceTree = BUILT_PRODUCTS_DIR; };
                BA4414A6183369A100AAE813 /* servers */ = {isa = PBXFileReference; lastKnownFileType = text; name = servers; path = mig_hdr/include/servers; sourceTree = BUILT_PRODUCTS_DIR; };
                BA4414A7183369C100AAE813 /* mach */ = {isa = PBXFileReference; lastKnownFileType = text; name = mach; path = mig_hdr/local/include/mach; sourceTree = BUILT_PRODUCTS_DIR; };
                                248AA962122C7B2A0085F5B1 /* unlink.c */,
                                29A59AE5183B110C00E8B896 /* unlinkat.c */,
                                374A36E214748EE400AAF39D /* varargs_wrappers.s */,
+                               BA0D9FB0199031AD007E8A73 /* kdebug_trace.c */,
                        );
                        path = wrappers;
                        sourceTree = "<group>";
                                248BA08F121DC545008C073F /* open.c in Sources */,
                                248BA093121DE369008C073F /* select.c in Sources */,
                                248BA095121DE565008C073F /* select-pre1050.c in Sources */,
+                               BA0D9FB1199031AD007E8A73 /* kdebug_trace.c in Sources */,
                                4BDD5F1E1891AB2F004BF300 /* mach_approximate_time.s in Sources */,
                                248BA0B3121DE760008C073F /* select-cancel.c in Sources */,
                                248BA0BE121DE902008C073F /* select.c in Sources */,
diff --git a/libsyscall/mach/.gitignore b/libsyscall/mach/.gitignore
deleted file mode 100644 (file)
index f718d68..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-*.pbxuser
-*.perspectivev3
-build/
diff --git a/libsyscall/wrappers/kdebug_trace.c b/libsyscall/wrappers/kdebug_trace.c
new file mode 100644 (file)
index 0000000..4867f9b
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#include <stdint.h>
+#include <machine/cpu_capabilities.h>
+#include <sys/kdebug.h>
+#include <sys/errno.h>
+
+#define CLASS_MASK      0xff000000
+#define CLASS_OFFSET    24
+#define SUBCLASS_MASK   0x00ff0000
+#define SUBCLASS_OFFSET 16
+
+#define EXTRACT_CLASS(debugid)          ((uint8_t)(((debugid) & CLASS_MASK) >> CLASS_OFFSET))
+#define EXTRACT_SUBCLASS(debugid)       ( (uint8_t) ( ((debugid) & SUBCLASS_MASK) >> SUBCLASS_OFFSET ) )
+
+extern int __kdebug_trace64(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4);
+
+int
+kdebug_trace(uint32_t code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4)
+{
+       uint8_t code_class;
+       volatile uint32_t *kdebug_enable_address = (volatile uint32_t *)(uintptr_t)(_COMM_PAGE_KDEBUG_ENABLE);
+
+       /*
+        * This filtering is also done in the kernel, but we also do it here so that errors
+        * are returned in all cases, not just when the system call is actually performed.
+        */
+       code_class = EXTRACT_CLASS(code);
+       switch (code_class) {
+               case DBG_TRACE:
+                       errno = EPERM;
+                       return -1;
+       }
+
+       if (*kdebug_enable_address == 0) {
+               return 0;
+       }
+       
+       return __kdebug_trace64(code, arg1, arg2, arg3, arg4);
+}
index 88a86c31bb816aca08e0a58f5495719b4a561960..067018965af6cb9737638bb222dadb3c45c35a96 100644 (file)
@@ -94,6 +94,9 @@ typedef struct {
        uint64_t cpu_insns;
        uint64_t cpu_ucc;
        uint64_t cpu_urc;
+#if    DIAG_ALL_PMCS
+       uint64_t gpmcs[4];
+#endif /* DIAG_ALL_PMCS */
 } core_energy_stat_t;
 
 typedef struct {
@@ -262,6 +265,9 @@ diagCall64(x86_saved_state_t * state)
                        cest.cpu_insns = cpu_data_ptr[i]->cpu_cur_insns;
                        cest.cpu_ucc = cpu_data_ptr[i]->cpu_cur_ucc;
                        cest.cpu_urc = cpu_data_ptr[i]->cpu_cur_urc;
+#if DIAG_ALL_PMCS
+                       bcopy(&cpu_data_ptr[i]->cpu_gpmcs[0], &cest.gpmcs[0], sizeof(cest.gpmcs));
+#endif /* DIAG_ALL_PMCS */                     
                        (void) ml_set_interrupts_enabled(TRUE);
 
                        copyout(&cest, curpos, sizeof(cest));
@@ -344,6 +350,13 @@ void cpu_powerstats(__unused void *arg) {
                uint64_t insns = read_pmc(FIXED_PMC0);
                uint64_t ucc = read_pmc(FIXED_PMC1);
                uint64_t urc = read_pmc(FIXED_PMC2);
+#if DIAG_ALL_PMCS
+               int i;
+
+               for (i = 0; i < 4; i++) {
+                       cdp->cpu_gpmcs[i] = read_pmc(i);
+               }
+#endif /* DIAG_ALL_PMCS */
                cdp->cpu_cur_insns = insns;
                cdp->cpu_cur_ucc = ucc;
                cdp->cpu_cur_urc = urc;
index 4e37eea2bb6c451cee45050a5563fb2d105e5b11..133ddc463a7ba0610f019c7fd8e9aef24ff16b2f 100644 (file)
@@ -104,7 +104,11 @@ extern diagWork dgWork;
 #define FIXED_PMC0 (FIXED_PMC)
 #define FIXED_PMC1 (FIXED_PMC | 1)
 #define FIXED_PMC2 (FIXED_PMC | 2)
+#define GPMC0 (0)
+#define GPMC1 (1)
+#define GPMC2 (2)
+#define GPMC3 (3)
+
 static inline uint64_t read_pmc(uint32_t counter)
 {
        uint32_t lo = 0, hi = 0;
index 6bea2bef756a00a4d2321fd4b291a3b2fd62e856..eea96ab64532140928cde2de32f09d0d8c0478ad 100644 (file)
@@ -68,6 +68,8 @@
 #include <kern/page_decrypt.h>
 #include <kern/processor.h>
 
+#include <sys/kdebug.h>
+
 /* the lists of commpage routines are in commpage_asm.s  */
 extern commpage_descriptor*    commpage_32_routines[];
 extern commpage_descriptor*    commpage_64_routines[];
@@ -281,6 +283,10 @@ commpage_init_cpu_capabilities( void )
                                        CPUID_LEAF7_FEATURE_HLE);
        setif(bits, kHasAVX2_0,  cpuid_leaf7_features() &
                                        CPUID_LEAF7_FEATURE_AVX2);
+       setif(bits, kHasRDSEED,  cpuid_features() &
+                                       CPUID_LEAF7_FEATURE_RDSEED);
+       setif(bits, kHasADX,     cpuid_features() &
+                                       CPUID_LEAF7_FEATURE_ADX);
        
        uint64_t misc_enable = rdmsr64(MSR_IA32_MISC_ENABLE);
        setif(bits, kHasENFSTRG, (misc_enable & 1ULL) &&
@@ -457,8 +463,9 @@ commpage_populate( void )
        simple_lock_init(&commpage_active_cpus_lock, 0);
 
        commpage_update_active_cpus();
-        commpage_mach_approximate_time_init();
+       commpage_mach_approximate_time_init();
        rtc_nanotime_init_commpage();
+       commpage_update_kdebug_enable();
 }
 
 /* Fill in the common routines during kernel initialization. 
@@ -690,6 +697,34 @@ commpage_update_active_cpus(void)
        simple_unlock(&commpage_active_cpus_lock);
 }
 
+/*
+ * Update the commpage data with the value of the "kdebug_enable"
+ * global so that userspace can avoid trapping into the kernel
+ * for kdebug_trace() calls. Serialization is handled
+ * by the caller in bsd/kern/kdebug.c.
+ */
+void
+commpage_update_kdebug_enable(void)
+{
+       volatile uint32_t *saved_data_ptr;
+       char *cp;
+
+       cp = commPagePtr32;
+       if (cp) {
+               cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_BASE_ADDRESS);
+               saved_data_ptr = (volatile uint32_t *)cp;
+               *saved_data_ptr = kdebug_enable;
+       }
+
+       cp = commPagePtr64;
+       if ( cp ) {
+               cp += (_COMM_PAGE_KDEBUG_ENABLE - _COMM_PAGE32_START_ADDRESS);
+               saved_data_ptr = (volatile uint32_t *)cp;
+               *saved_data_ptr = kdebug_enable;
+       }
+}
+
+
 /*
  * update the commpage data for last known value of mach_absolute_time()
  */
@@ -699,11 +734,11 @@ commpage_update_mach_approximate_time(uint64_t abstime)
 {
 #ifdef CONFIG_MACH_APPROXIMATE_TIME
        uint64_t saved_data;
-        char *cp;
-
-        cp = commPagePtr32;
+       char *cp;
+       
+       cp = commPagePtr32;
        if ( cp ) {
-               cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS);
+               cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_BASE_ADDRESS);
                saved_data = *(uint64_t *)cp;
                if (saved_data < abstime) {
                        /* ignoring the success/fail return value assuming that
@@ -713,9 +748,9 @@ commpage_update_mach_approximate_time(uint64_t abstime)
                        OSCompareAndSwap64(saved_data, abstime, (uint64_t *)cp);
                }
        }
-        cp = commPagePtr64;
+       cp = commPagePtr64;
        if ( cp ) {
-               cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS);
+               cp += (_COMM_PAGE_APPROX_TIME - _COMM_PAGE32_START_ADDRESS);
                saved_data = *(uint64_t *)cp;
                if (saved_data < abstime) {
                        /* ignoring the success/fail return value assuming that
index a4ad20f0f66b16610a421ccc1081ac2ff69bbfda..1abe641132bf907cfb6739a55e8fcb0740a6d7cf 100644 (file)
@@ -146,6 +146,7 @@ extern      void    commpage_set_spin_count(unsigned int  count);
 extern void    commpage_sched_gen_inc(void);
 extern void    commpage_update_active_cpus(void);
 extern void    commpage_update_mach_approximate_time(uint64_t abstime);
+extern void    commpage_update_kdebug_enable(void);
 
 extern uint32_t        commpage_is_in_pfz32(uint32_t);
 extern uint32_t        commpage_is_in_pfz64(addr64_t);
index 922faa5da2c67cf3864eadd02217b66e81f24be7..46f08196ecb4bd0222c4d51ddec75502590724c3 100644 (file)
@@ -69,6 +69,8 @@
 /* Extending into 64-bits from here: */ 
 #define        kHasRTM                 0x0000000100000000ULL
 #define        kHasHLE                 0x0000000200000000ULL
+#define        kHasRDSEED              0x0000000800000000ULL
+#define        kHasADX                 0x0000000400000000ULL
 
 
 #ifndef        __ASSEMBLER__
@@ -182,7 +184,8 @@ int _NumCPUs( void )
 #define _COMM_PAGE_MEMORY_SIZE         (_COMM_PAGE_START_ADDRESS+0x038)        /* uint64_t max memory size */
 
 #define _COMM_PAGE_CPUFAMILY           (_COMM_PAGE_START_ADDRESS+0x040)        /* uint32_t hw.cpufamily, x86*/
-#define _COMM_PAGE_UNUSED2             (_COMM_PAGE_START_ADDRESS+0x044)        /* [0x44,0x50) unused */
+#define _COMM_PAGE_KDEBUG_ENABLE       (_COMM_PAGE_START_ADDRESS+0x044)        /* uint32_t export "kdebug_enable" to userspace */
+#define _COMM_PAGE_UNUSED2             (_COMM_PAGE_START_ADDRESS+0x048)        /* [0x48,0x50) unused */
 
 #define        _COMM_PAGE_TIME_DATA_START      (_COMM_PAGE_START_ADDRESS+0x050)        /* base of offsets below (_NT_SCALE etc) */
 #define _COMM_PAGE_NT_TSC_BASE         (_COMM_PAGE_START_ADDRESS+0x050)        /* used by nanotime() */
index f3b201e275a1979c25848637a1f9d0e551422c58..b4b4bd4b90cab7965a0c738aeed2e235c05dae0b 100644 (file)
@@ -220,6 +220,7 @@ typedef struct cpu_data
        uint64_t                cpu_cur_insns;
        uint64_t                cpu_cur_ucc;
        uint64_t                cpu_cur_urc;
+       uint64_t                cpu_gpmcs[4];
        uint64_t                cpu_max_observed_int_latency;
        int                     cpu_max_observed_int_latency_vector;
        volatile boolean_t      cpu_NMI_acknowledged;
index 2302c833b57e5875ac519d2e5387e6f4f671133f..532c49ee344ae9ec083e8d0eb736c432df924eea 100644 (file)
@@ -713,10 +713,11 @@ cpuid_set_generic_info(i386_cpu_info_t *info_p)
                 * Leaf7 Features:
                 */
                cpuid_fn(0x7, reg);
-               info_p->cpuid_leaf7_features = reg[ebx];
+               info_p->cpuid_leaf7_features = quad(reg[ecx], reg[ebx]);
 
                DBG(" Feature Leaf7:\n");
                DBG("  EBX           : 0x%x\n", reg[ebx]);
+               DBG("  ECX           : 0x%x\n", reg[ecx]);
        }
 
        return;
@@ -756,10 +757,17 @@ cpuid_set_cpufamily(i386_cpu_info_t *info_p)
                        cpufamily = CPUFAMILY_INTEL_IVYBRIDGE;
                        break;
                case CPUID_MODEL_HASWELL:
+               case CPUID_MODEL_HASWELL_EP:
                case CPUID_MODEL_HASWELL_ULT:
                case CPUID_MODEL_CRYSTALWELL:
                        cpufamily = CPUFAMILY_INTEL_HASWELL;
                        break;
+#if !defined(XNU_HIDE_SEED)
+               case CPUID_MODEL_BROADWELL:
+               case CPUID_MODEL_BRYSTALWELL:
+                       cpufamily = CPUFAMILY_INTEL_BROADWELL;
+                       break;
+#endif /* not XNU_HIDE_SEED */
                }
                break;
        }
@@ -814,16 +822,18 @@ cpuid_set_info(void)
         * (which determines whether SMT/Hyperthreading is active).
         */
        switch (info_p->cpuid_cpufamily) {
+       case CPUFAMILY_INTEL_MEROM:
+       case CPUFAMILY_INTEL_PENRYN:
+               info_p->core_count   = info_p->cpuid_cores_per_package;
+               info_p->thread_count = info_p->cpuid_logical_per_package;
+               break;
        case CPUFAMILY_INTEL_WESTMERE: {
                uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
                info_p->core_count   = bitfield32((uint32_t)msr, 19, 16);
                info_p->thread_count = bitfield32((uint32_t)msr, 15,  0);
                break;
                }
-       case CPUFAMILY_INTEL_HASWELL:
-       case CPUFAMILY_INTEL_IVYBRIDGE:
-       case CPUFAMILY_INTEL_SANDYBRIDGE:
-       case CPUFAMILY_INTEL_NEHALEM: {
+       default: {
                uint64_t msr = rdmsr64(MSR_CORE_THREAD_COUNT);
                info_p->core_count   = bitfield32((uint32_t)msr, 31, 16);
                info_p->thread_count = bitfield32((uint32_t)msr, 15,  0);
@@ -932,6 +942,11 @@ leaf7_feature_map[] = {
        {CPUID_LEAF7_FEATURE_BMI2,     "BMI2"},
        {CPUID_LEAF7_FEATURE_INVPCID,  "INVPCID"},
        {CPUID_LEAF7_FEATURE_RTM,      "RTM"},
+       {CPUID_LEAF7_FEATURE_RDSEED,   "RDSEED"},
+       {CPUID_LEAF7_FEATURE_ADX,      "ADX"},
+#if !defined(XNU_HIDE_SEED)
+       {CPUID_LEAF7_FEATURE_SMAP,     "SMAP"},
+#endif /* not XNU_HIDE_SEED */
        {0, 0}
 };
 
index cd37a55a2612ed83bfa57daa76bcf4a599189f6a..1f58d5250ab404f60951e99e8ed7b78ab497e33b 100644 (file)
 
 /*
  * Leaf 7, subleaf 0 additional features.
- * Bits returned in %ebx to a CPUID request with {%eax,%ecx} of (0x7,0x0}:
+ * Bits returned in %ebx:%ecx to a CPUID request with {%eax,%ecx} of (0x7,0x0}:
  */
 #define CPUID_LEAF7_FEATURE_RDWRFSGS _Bit(0)   /* FS/GS base read/write */
 #define CPUID_LEAF7_FEATURE_TSCOFF   _Bit(1)   /* TSC thread offset */
 #define CPUID_LEAF7_FEATURE_BMI2     _Bit(8)   /* Bit Manipulation Instrs, set 2 */
 #define CPUID_LEAF7_FEATURE_ERMS     _Bit(9)   /* Enhanced Rep Movsb/Stosb */
 #define CPUID_LEAF7_FEATURE_INVPCID  _Bit(10)  /* INVPCID intruction, TDB */
-#define CPUID_LEAF7_FEATURE_RTM      _Bit(11)  /* TBD */
+#define CPUID_LEAF7_FEATURE_RTM      _Bit(11)  /* RTM */
+#define CPUID_LEAF7_FEATURE_RDSEED   _Bit(18)  /* RDSEED Instruction */
+#define CPUID_LEAF7_FEATURE_ADX      _Bit(19)  /* ADX Instructions */
+#if !defined(XNU_HIDE_SEED)
+#define CPUID_LEAF7_FEATURE_SMAP     _Bit(20)  /* Supervisor Mode Access Protect */
+#endif /* not XNU_HIDE_SEED */
 
 /*
  * The CPUID_EXTFEATURE_XXX values define 64-bit values
 #define CPUID_MODEL_IVYBRIDGE_EP       0x3E
 #define CPUID_MODEL_CRYSTALWELL                0x46
 #define CPUID_MODEL_HASWELL            0x3C
-#define CPUID_MODEL_HASWELL_SVR                0x3F
+#define CPUID_MODEL_HASWELL_EP         0x3F
 #define CPUID_MODEL_HASWELL_ULT                0x45
+#if !defined(XNU_HIDE_SEED)
+#define CPUID_MODEL_BROADWELL          0x3D
+#define CPUID_MODEL_BROADWELL_ULX      0x3D
+#define CPUID_MODEL_BROADWELL_ULT      0x3D
+#define CPUID_MODEL_BRYSTALWELL                0x47
+#endif /* not XNU_HIDE_SEED */
 
 #define CPUID_VMM_FAMILY_UNKNOWN       0x0
 #define CPUID_VMM_FAMILY_VMWARE                0x1
@@ -375,7 +386,7 @@ typedef struct {
        cpuid_thermal_leaf_t    *cpuid_thermal_leafp;
        cpuid_arch_perf_leaf_t  *cpuid_arch_perf_leafp;
        cpuid_xsave_leaf_t      *cpuid_xsave_leafp;
-       uint32_t                cpuid_leaf7_features;
+       uint64_t                cpuid_leaf7_features;
 } i386_cpu_info_t;
 
 #ifdef MACH_KERNEL_PRIVATE
index fb0eca6f97b28c8e2a9847a951f56f6b653aabad..5f4ef89341a60a02a4ec4b7bc2bcd03a01a29056 100644 (file)
@@ -245,7 +245,9 @@ init_fpu(void)
                if (xsp->extended_state[0] & (uint32_t)XFEM_YMM) {
                        assert(xsp->extended_state[0] & (uint32_t) XFEM_SSE);
                        /* XSAVE container size for all features */
-                       assert(xsp->extended_state[2] == sizeof(struct x86_avx_thread_state));
+                       if (xsp->extended_state[2] != sizeof(struct x86_avx_thread_state))
+                               kprintf("sizeof(struct x86_avx_thread_state)=%lu != xsp->extended_state[2]=%u\n",
+                                       sizeof(struct x86_avx_thread_state), xsp->extended_state[2]);
                        fp_register_state_size = sizeof(struct x86_avx_thread_state);
                        fpu_YMM_present = TRUE;
                        set_cr4(get_cr4() | CR4_OSXSAVE);
index 4ec9f5cbe5d70f0107d0017f4f89e39523ed0d9c..8a1d753b588ff8fbf8d5474f1c4d5a806f680133 100644 (file)
@@ -100,10 +100,17 @@ vm_offset_t       vm_kernel_top;
 vm_offset_t    vm_kernel_stext;
 vm_offset_t    vm_kernel_etext;
 vm_offset_t    vm_kernel_slide;
-vm_offset_t     vm_hib_base;
+vm_offset_t vm_hib_base;
 vm_offset_t    vm_kext_base = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
 vm_offset_t    vm_kext_top = VM_MIN_KERNEL_ADDRESS;
 
+vm_offset_t vm_prelink_stext;
+vm_offset_t vm_prelink_etext;
+vm_offset_t vm_prelink_sinfo;
+vm_offset_t vm_prelink_einfo;
+vm_offset_t vm_slinkedit;
+vm_offset_t vm_elinkedit;
+
 #define MAXLORESERVE   (32 * 1024 * 1024)
 
 ppnum_t                max_ppnum = 0;
@@ -133,6 +140,7 @@ vm_offset_t segTEXTB; unsigned long segSizeTEXT;
 vm_offset_t segDATAB; unsigned long segSizeDATA;
 vm_offset_t segLINKB; unsigned long segSizeLINK;
 vm_offset_t segPRELINKB; unsigned long segSizePRELINK;
+vm_offset_t segPRELINKINFOB; unsigned long segSizePRELINKINFO;
 vm_offset_t segHIBB; unsigned long segSizeHIB;
 vm_offset_t sectCONSTB; unsigned long sectSizeConst;
 
@@ -245,6 +253,8 @@ i386_vm_init(uint64_t       maxmem,
                                        "__HIB", &segSizeHIB);
        segPRELINKB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
                                        "__PRELINK_TEXT", &segSizePRELINK);
+    segPRELINKINFOB = (vm_offset_t) getsegdatafromheader(&_mh_execute_header,
+                    "__PRELINK_INFO", &segSizePRELINKINFO);
        segTEXT = getsegbynamefromheader(&_mh_execute_header,
                                        "__TEXT");
        segDATA = getsegbynamefromheader(&_mh_execute_header,
@@ -285,6 +295,7 @@ i386_vm_init(uint64_t       maxmem,
        DBG("segLINKB    = %p\n", (void *) segLINKB);
        DBG("segHIBB     = %p\n", (void *) segHIBB);
        DBG("segPRELINKB = %p\n", (void *) segPRELINKB);
+    DBG("segPRELINKINFOB = %p\n", (void *) segPRELINKINFOB);
        DBG("sHIB        = %p\n", (void *) sHIB);
        DBG("eHIB        = %p\n", (void *) eHIB);
        DBG("stext       = %p\n", (void *) stext);
@@ -299,6 +310,12 @@ i386_vm_init(uint64_t      maxmem,
        vm_kernel_top   = (vm_offset_t) &last_kernel_symbol;
        vm_kernel_stext = stext;
        vm_kernel_etext = etext;
+    vm_prelink_stext = segPRELINKB;
+    vm_prelink_etext = segPRELINKB + segSizePRELINK;
+    vm_prelink_sinfo = segPRELINKINFOB;
+    vm_prelink_einfo = segPRELINKINFOB + segSizePRELINKINFO;
+    vm_slinkedit = segLINKB;
+    vm_elinkedit = segLINKB + segSizePRELINK;
 
        vm_set_page_size();
 
index 22ce119992252fb50a0886ca46e84c70c123e012..2229ab8a82ce1c2618d9419ce7a3e1d41bcba76d 100644 (file)
 /*
  * CR4
  */
+#define CR4_SMAP       0x00200000      /* Supervisor-Mode Access Protect */
 #define CR4_SMEP       0x00100000      /* Supervisor-Mode Execute Protect */
 #define CR4_OSXSAVE    0x00040000      /* OS supports XSAVE */
 #define CR4_PCIDE      0x00020000      /* PCID Enable */
 /*
  * XCR0 - XFEATURE_ENABLED_MASK (a.k.a. XFEM) register
  */
-#define        XCR0_YMM 0x0000000000000004ULL /* YMM state available */
-#define        XFEM_YMM XCR0_YMM
-#define XCR0_SSE 0x0000000000000002ULL /* SSE supported by XSAVE/XRESTORE */
-#define XCR0_X87 0x0000000000000001ULL /* x87, FPU/MMX (always set) */
-#define XFEM_SSE XCR0_SSE
-#define XFEM_X87 XCR0_X87
+#define XCR0_X87       (1ULL << 0)     /* x87, FPU/MMX (always set) */
+#define XCR0_SSE       (1ULL << 1)     /* SSE supported by XSAVE/XRESTORE */
+#define        XCR0_YMM        (1ULL << 2)     /* YMM state available */
+#define XFEM_X87       XCR0_X87
+#define XFEM_SSE       XCR0_SSE
+#define        XFEM_YMM        XCR0_YMM
 #define XCR0 (0)
 
 #define        PMAP_PCID_PRESERVE (1ULL << 63)
@@ -473,6 +474,8 @@ __END_DECLS
 
 #define MSR_IA32_PERFCTR0                      0xc1
 #define MSR_IA32_PERFCTR1                      0xc2
+#define MSR_IA32_PERFCTR3                      0xc3
+#define MSR_IA32_PERFCTR4                      0xc4
 
 #define MSR_PLATFORM_INFO                      0xce
 
@@ -491,6 +494,8 @@ __END_DECLS
 
 #define MSR_IA32_EVNTSEL0                      0x186
 #define MSR_IA32_EVNTSEL1                      0x187
+#define MSR_IA32_EVNTSEL2                      0x188
+#define MSR_IA32_EVNTSEL3                      0x189
 
 #define MSR_FLEX_RATIO                         0x194
 #define MSR_IA32_PERF_STS                      0x198
index df8126bb6fbb4190a1e0870d28e887d4411d4829..95babd63280f2211f645ba2ac4c114e4e5889126 100644 (file)
@@ -37,6 +37,7 @@
 #include <kern/mach_param.h> /* for TASK_CHUNK */
 #include <kern/task.h>
 #include <kern/zalloc.h>
+#include <kern/sfi.h>
 
 #include <libkern/OSAtomic.h>
 
@@ -109,6 +110,9 @@ struct coalition {
        unsigned int terminated : 1;            /* coalition became empty and spawns are now forbidden */
        unsigned int reaped : 1;                /* reaped, invisible to userspace, but waiting for ref_count to go to zero */
        unsigned int notified : 1;              /* no-more-processes notification was sent via special port */
+
+       uint32_t focal_tasks_count;     /* count of TASK_FOREGROUND_APPLICATION tasks in the coalition */
+       uint32_t non_focal_tasks_count; /* count of TASK_BACKGROUND_APPLICATION tasks in the coalition */
 };
 
 #define coalition_lock(c) do{ lck_mtx_lock(&c->lock); }while(0)
@@ -304,6 +308,8 @@ coalition_release(coalition_t coal)
                assert(coal->terminated);
                assert(coal->active_count == 0);
                assert(coal->reaped);
+               assert(coal->focal_tasks_count == 0);
+               assert(coal->non_focal_tasks_count == 0);
 
                ledger_dereference(coal->ledger);
                lck_mtx_destroy(&coal->lock, &coalitions_lck_grp);
@@ -741,3 +747,47 @@ coalition_init(void)
        /* "Leak" our reference to the global object */
 }
 
+/* coalition focal tasks */
+uint32_t coalition_adjust_focal_task_count(coalition_t coal, int count)
+{
+       return hw_atomic_add(&coal->focal_tasks_count, count);
+}
+
+uint32_t coalition_focal_task_count(coalition_t coal)
+{
+       return coal->focal_tasks_count;
+}
+
+uint32_t coalition_adjust_non_focal_task_count(coalition_t coal, int count)
+{
+       return hw_atomic_add(&coal->non_focal_tasks_count, count);
+}
+
+uint32_t coalition_non_focal_task_count(coalition_t coal)
+{
+       return coal->non_focal_tasks_count;
+}
+
+/* Call sfi_reevaluate() for every thread in the coalition */
+void coalition_sfi_reevaluate(coalition_t coal, task_t updated_task) {
+       task_t task;
+       thread_t thread;
+
+       coalition_lock(coal);
+
+       queue_iterate(&coal->tasks, task, task_t, coalition_tasks) {
+
+               /* Skip the task we're doing this on behalf of - it's already updated */
+               if (task == updated_task)
+                       continue;
+
+               task_lock(task);
+
+               queue_iterate(&task->threads, thread, thread_t, task_threads) {
+                               sfi_reevaluate(thread);
+               }
+               task_unlock(task);
+       }
+       coalition_unlock(coal);
+}
+
index 38bdefd58f449c225a969eeec55c5147dea8c4e3..1c996968849d5e00993771317919eaf39c6af238 100644 (file)
@@ -116,6 +116,13 @@ kern_return_t coalition_resource_usage_internal(coalition_t coal, struct coaliti
 
 ledger_t coalition_get_ledger(coalition_t coal);
 
+uint32_t coalition_adjust_focal_task_count(coalition_t coal, int count);
+uint32_t coalition_focal_task_count(coalition_t coal);
+uint32_t coalition_adjust_non_focal_task_count(coalition_t coal, int count);
+uint32_t coalition_non_focal_task_count(coalition_t coal);
+
+void coalition_sfi_reevaluate(coalition_t coal, task_t updated_task);
+
 #endif /* XNU_KERNEL_PRIVATE */
 
 #endif /* _KERN_COALITION_H */
index 85055d0272edeb4e1bbd65543fdd6ed68605d7d8..725164c77c311d009a3af2106aaab9e63121e3fb 100644 (file)
@@ -41,6 +41,8 @@
 #include <kern/timer_call.h>
 #include <kern/wait_queue.h>
 #include <kern/ledger.h>
+#include <kern/coalition.h>
+
 #include <pexpert/pexpert.h>
 
 #include <libkern/kernel_mach_header.h>
@@ -687,6 +689,7 @@ sfi_class_id_t sfi_thread_classify(thread_t thread)
        int thread_bg = proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG);
        int managed_task = proc_get_effective_task_policy(task, TASK_POLICY_SFI_MANAGED);
        int thread_qos = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS);
+       boolean_t focal = FALSE;
 
        /* kernel threads never reach the user AST boundary, and are in a separate world for SFI */
        if (is_kernel_thread) {
@@ -717,20 +720,48 @@ sfi_class_id_t sfi_thread_classify(thread_t thread)
        }
 
        /*
-        * Threads with unspecified or legacy QOS class can be individually managed
+        * Threads with unspecified, legacy, or user-initiated QOS class can be individually managed.
         */
-       if (managed_task &&
-           (thread_qos == THREAD_QOS_UNSPECIFIED || thread_qos == THREAD_QOS_LEGACY)) {
-               if (task_role == TASK_FOREGROUND_APPLICATION || task_role == TASK_CONTROL_APPLICATION)
-                       return SFI_CLASS_MANAGED_FOCAL;
-               else
-                       return SFI_CLASS_MANAGED_NONFOCAL;
+
+       switch (task_role) {
+               case TASK_CONTROL_APPLICATION:
+               case TASK_FOREGROUND_APPLICATION:
+                       focal = TRUE;
+                       break;
+
+               case TASK_BACKGROUND_APPLICATION:
+               case TASK_DEFAULT_APPLICATION:
+               case TASK_UNSPECIFIED:
+                       /* Focal if in coalition with foreground app */
+                       if (coalition_focal_task_count(thread->task->coalition) > 0)
+                               focal = TRUE;
+                       break;
+
+               default:
+                       break;
+       }
+
+       if (managed_task) {
+               switch (thread_qos) {
+               case THREAD_QOS_UNSPECIFIED:
+               case THREAD_QOS_LEGACY:
+               case THREAD_QOS_USER_INITIATED:
+                       if (focal)
+                               return SFI_CLASS_MANAGED_FOCAL;
+                       else
+                               return SFI_CLASS_MANAGED_NONFOCAL;
+               default:
+                       break;
+               }
        }
 
        if (thread_qos == THREAD_QOS_UTILITY)
                return SFI_CLASS_UTILITY;
 
-       if (task_role == TASK_FOREGROUND_APPLICATION || task_role == TASK_CONTROL_APPLICATION) {
+       /*
+        * Classify threads in non-managed tasks
+        */
+       if (focal) {
                switch (thread_qos) {
                case THREAD_QOS_USER_INTERACTIVE:
                        return SFI_CLASS_USER_INTERACTIVE_FOCAL;
@@ -920,7 +951,10 @@ void sfi_ast(thread_t thread)
        }
 }
 
-/* Thread must be unlocked */
+/*
+ * Thread must be unlocked
+ * May be called with coalition, task, or thread mutex held
+ */
 void sfi_reevaluate(thread_t thread)
 {
        kern_return_t kret;
index 049701dff80729271d554b1db33f16a57127979b..999dfefcf89c0df40050ec72ca85afee189a52a9 100644 (file)
 extern int kpc_force_all_ctrs(task_t, int);
 #endif
 
+uint32_t qos_override_mode;
+
 task_t                 kernel_task;
 zone_t                 task_zone;
 lck_attr_t      task_lck_attr;
@@ -440,6 +442,12 @@ task_init(void)
                hwm_user_cores = 0;
        }
 
+       if (PE_parse_boot_argn("qos_override_mode", &qos_override_mode, sizeof(qos_override_mode))) {
+               printf("QOS override mode: 0x%08x\n", qos_override_mode);
+       } else {
+               qos_override_mode = QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE;
+       }
+
        proc_init_cpumon_params();
 
        if (!PE_parse_boot_argn("task_wakeups_monitor_rate", &task_wakeups_monitor_rate, sizeof (task_wakeups_monitor_rate))) {
@@ -1289,6 +1297,9 @@ task_terminate_internal(
 
        task_unlock(task);
 
+       proc_set_task_policy(task, THREAD_NULL, TASK_POLICY_ATTRIBUTE,
+                            TASK_POLICY_TERMINATED, TASK_POLICY_ENABLE);
+
         /* Early object reap phase */
 
 // PR-17045188: Revisit implementation
index c2a8d86a90720198f6ed5f7f685fafa2f81bce5d..b82c53bb41cdb26d48389f4628860b1af59b8abe 100644 (file)
@@ -407,6 +407,14 @@ extern void                init_task_ledgers(void);
 extern lck_attr_t      task_lck_attr;
 extern lck_grp_t       task_lck_grp;
 
+#define QOS_OVERRIDE_MODE_OVERHANG_PEAK 0
+#define QOS_OVERRIDE_MODE_IGNORE_OVERRIDE 1
+#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE 2
+#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH 3
+#define QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE 4
+
+extern uint32_t qos_override_mode;
+
 #else  /* MACH_KERNEL_PRIVATE */
 
 __BEGIN_DECLS
@@ -702,7 +710,8 @@ typedef struct task_pend_token {
        uint32_t        tpt_update_sockets      :1,
                        tpt_update_timers       :1,
                        tpt_update_watchers     :1,
-                       tpt_update_live_donor   :1;
+                       tpt_update_live_donor   :1,
+                       tpt_update_coal_sfi     :1;
 } *task_pend_token_t;
 
 extern void task_policy_update_complete_unlocked(task_t task, thread_t thread, task_pend_token_t pend_token);
@@ -726,8 +735,10 @@ int proc_clear_task_ruse_cpu(task_t task, int cpumon_entitled);
 thread_t task_findtid(task_t, uint64_t);
 void set_thread_iotier_override(thread_t, int policy);
 
-boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource);
-boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid);
+boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type);
+boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type);
+boolean_t proc_thread_qos_reset_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type);
+void proc_thread_qos_deallocate(thread_t thread);
 
 #define TASK_RUSECPU_FLAGS_PROC_LIMIT                  0x01
 #define TASK_RUSECPU_FLAGS_PERTHR_LIMIT                        0x02
index f826ea7c142243a6cdc58cd9127461396778ab95..e7df6a708ecf7067390eb54c9595deff8c7ac709 100644 (file)
@@ -38,6 +38,7 @@
 #include <kern/ledger.h>
 #include <kern/thread_call.h>
 #include <kern/sfi.h>
+#include <kern/coalition.h>
 #if CONFIG_TELEMETRY
 #include <kern/telemetry.h>
 #endif
@@ -130,6 +131,7 @@ static void task_policy_update_locked(task_t task, thread_t thread, task_pend_to
 static void task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_create, task_pend_token_t pend_token);
 static void task_policy_update_task_locked(task_t task, boolean_t update_throttle, boolean_t update_bg_throttle, boolean_t update_sfi);
 static void task_policy_update_thread_locked(thread_t thread, int update_cpu, boolean_t update_throttle, boolean_t update_sfi, boolean_t update_qos);
+static boolean_t task_policy_update_coalition_focal_tasks(task_t task, int prev_role, int next_role);
 
 static int proc_get_effective_policy(task_t task, thread_t thread, int policy);
 
@@ -163,6 +165,9 @@ extern void     rethrottle_thread(void * uthread);
 extern void     proc_apply_task_networkbg(void * bsd_info, thread_t thread);
 #endif /* MACH_BSD */
 
+extern zone_t thread_qos_override_zone;
+static boolean_t _proc_thread_qos_remove_override_internal(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type, boolean_t reset);
+
 
 /* Importance Inheritance related helper functions */
 
@@ -763,6 +768,11 @@ task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_cr
                                        next.t_qos_ceiling = THREAD_QOS_UNSPECIFIED;
                                        break;
 
+                               case TASK_DEFAULT_APPLICATION:
+                                       /* This is 'may render UI but we don't know if it's focal/nonfocal' */
+                                       next.t_qos_ceiling = THREAD_QOS_UNSPECIFIED;
+                                       break;                                  
+
                                case TASK_NONUI_APPLICATION:
                                        /* i.e. 'off-screen' */
                                        next.t_qos_ceiling = THREAD_QOS_LEGACY;
@@ -1174,6 +1184,12 @@ task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_cr
                        (prev.t_sfi_managed != next.t_sfi_managed))
                        update_sfi = TRUE;
 
+/* TODO: if CONFIG_SFI */
+               if (prev.t_role != next.t_role && task_policy_update_coalition_focal_tasks(task, prev.t_role, next.t_role)) {
+                       update_sfi = TRUE;
+                       pend_token->tpt_update_coal_sfi = 1;
+               }
+
                task_policy_update_task_locked(task, update_throttle, update_threads, update_sfi);
        } else {
                int update_cpu = 0;
@@ -1197,6 +1213,35 @@ task_policy_update_internal_locked(task_t task, thread_t thread, boolean_t in_cr
        }
 }
 
+/*
+ * Yet another layering violation. We reach out and bang on the coalition directly.
+ */
+static boolean_t
+task_policy_update_coalition_focal_tasks(task_t     task,
+                                         int        prev_role,
+                                         int        next_role)
+{
+       boolean_t sfi_transition = FALSE;
+
+       if (prev_role != TASK_FOREGROUND_APPLICATION && next_role == TASK_FOREGROUND_APPLICATION) {
+               if (coalition_adjust_focal_task_count(task->coalition, 1) == 1)
+                       sfi_transition = TRUE;
+       } else if (prev_role == TASK_FOREGROUND_APPLICATION && next_role != TASK_FOREGROUND_APPLICATION) {
+               if (coalition_adjust_focal_task_count(task->coalition, -1) == 0)
+                       sfi_transition = TRUE;
+       }
+
+       if (prev_role != TASK_BACKGROUND_APPLICATION && next_role == TASK_BACKGROUND_APPLICATION) {
+               if (coalition_adjust_non_focal_task_count(task->coalition, 1) == 1)
+                       sfi_transition = TRUE;
+       } else if (prev_role == TASK_BACKGROUND_APPLICATION && next_role != TASK_BACKGROUND_APPLICATION) {
+               if (coalition_adjust_non_focal_task_count(task->coalition, -1) == 0)
+                       sfi_transition = TRUE;
+       }
+
+       return sfi_transition;
+}
+
 /* Despite the name, the thread's task is locked, the thread is not */
 void
 task_policy_update_thread_locked(thread_t thread,
@@ -1355,6 +1400,9 @@ task_policy_update_complete_unlocked(task_t task, thread_t thread, task_pend_tok
 
                if (pend_token->tpt_update_live_donor)
                        task_importance_update_live_donor(task);
+
+               if (pend_token->tpt_update_coal_sfi)
+                       coalition_sfi_reevaluate(task->coalition, task);
        }
 }
 
@@ -2095,16 +2143,119 @@ void set_thread_iotier_override(thread_t thread, int policy)
  * as the subsystem informs us of the relationships between the threads. The userspace
  * synchronization subsystem should maintain the information of owner->resource and
  * resource->waiters itself.
- *
- * The add/remove routines can return failure if the target of the override cannot be
- * found, perhaps because the resource subsystem doesn't have an accurate view of the
- * resource owner in the face of race conditions.
  */
 
-boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource)
+/*
+ * This helper canonicalizes the resource/resource_type given the current qos_override_mode
+ * in effect. Note that wildcards (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD) may need
+ * to be handled specially in the future, but for now it's fine to slam
+ * *resource to USER_ADDR_NULL even if it was previously a wildcard.
+ */
+static void _canonicalize_resource_and_type(user_addr_t *resource, int *resource_type) {
+       if (qos_override_mode == QOS_OVERRIDE_MODE_OVERHANG_PEAK || qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) {
+               /* Map all input resource/type to a single one */
+               *resource = USER_ADDR_NULL;
+               *resource_type = THREAD_QOS_OVERRIDE_TYPE_UNKNOWN;
+       } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE) {
+               /* no transform */
+       } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH) {
+               /* Map all dispatch overrides to a single one, to avoid memory overhead */
+               if (*resource_type == THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE) {
+                       *resource = USER_ADDR_NULL;
+               }
+       } else if (qos_override_mode == QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_SINGLE_MUTEX_OVERRIDE) {
+               /* Map all mutex overrides to a single one, to avoid memory overhead */
+               if (*resource_type == THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX) {
+                       *resource = USER_ADDR_NULL;
+               }
+       }
+}
+
+/* This helper routine finds an existing override if known. Locking should be done by caller */
+static struct thread_qos_override *_find_qos_override(thread_t thread, user_addr_t resource, int resource_type) {
+       struct thread_qos_override *override;
+
+       override = thread->overrides;
+       while (override) {
+               if (override->override_resource == resource &&
+                       override->override_resource_type == resource_type) {
+                       return override;
+               }
+               
+               override = override->override_next;
+       }
+
+       return NULL;
+}
+
+static void _find_and_decrement_qos_override(thread_t thread, user_addr_t resource, int resource_type, boolean_t reset, struct thread_qos_override **free_override_list) {
+       struct thread_qos_override *override, *override_prev;
+
+       override_prev = NULL;
+       override = thread->overrides;
+       while (override) {
+               struct thread_qos_override *override_next = override->override_next;
+
+               if ((THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD == resource || override->override_resource == resource) &&
+                       override->override_resource_type == resource_type) {
+                       if (reset) {
+                               override->override_contended_resource_count = 0;
+                       } else {
+                               override->override_contended_resource_count--;
+                       }
+
+                       if (override->override_contended_resource_count == 0) {
+                               if (override_prev == NULL) {
+                                       thread->overrides = override_next;
+                               } else {
+                                       override_prev->override_next = override_next;
+                               }
+                               
+                               /* Add to out-param for later zfree */
+                               override->override_next = *free_override_list;
+                               *free_override_list = override;
+                       } else {
+                               override_prev = override;
+                       }
+
+                       if (THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD != resource) {
+                               return;
+                       }
+               } else {
+                       override_prev = override;
+               }
+               
+               override = override_next;
+       }
+}
+
+/* This helper recalculates the current requested override using the policy selected at boot */
+static int _calculate_requested_qos_override(thread_t thread)
+{
+       if (qos_override_mode == QOS_OVERRIDE_MODE_IGNORE_OVERRIDE) {
+               return THREAD_QOS_UNSPECIFIED;
+       }
+
+       /* iterate over all overrides and calculate MAX */
+       struct thread_qos_override *override;
+       int qos_override = THREAD_QOS_UNSPECIFIED;
+
+       override = thread->overrides;
+       while (override) {
+               if (qos_override_mode != QOS_OVERRIDE_MODE_FINE_GRAINED_OVERRIDE_BUT_IGNORE_DISPATCH ||
+                       override->override_resource_type != THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE) {
+                       qos_override = MAX(qos_override, override->override_qos);
+               }
+               
+               override = override->override_next;
+       }
+
+       return qos_override;
+}
+
+boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t tid, int override_qos, boolean_t first_override_for_resource, user_addr_t resource, int resource_type)
 {
        thread_t        self = current_thread();
-       int                     resource_count;
        struct task_pend_token pend_token = {};
 
        /* XXX move to thread mutex when thread policy does */
@@ -2138,44 +2289,104 @@ boolean_t proc_thread_qos_add_override(task_t task, thread_t thread, uint64_t ti
        DTRACE_BOOST5(qos_add_override_pre, uint64_t, tid, uint64_t, thread->requested_policy.thrp_qos,
                uint64_t, thread->effective_policy.thep_qos, int, override_qos, boolean_t, first_override_for_resource);
 
+       struct task_requested_policy requested = thread->requested_policy;
+       struct thread_qos_override *override;
+       struct thread_qos_override *deferred_free_override = NULL;
+       int new_qos_override, prev_qos_override;
+       int new_effective_qos;
+       boolean_t has_thread_reference = FALSE;
+
+       _canonicalize_resource_and_type(&resource, &resource_type);
+
        if (first_override_for_resource) {
-               resource_count = ++thread->usynch_override_contended_resource_count;
+               override = _find_qos_override(thread, resource, resource_type);
+               if (override) {
+                       override->override_contended_resource_count++;
+               } else {
+                       struct thread_qos_override *override_new;
+
+                       /* We need to allocate a new object. Drop the task lock and recheck afterwards in case someone else added the override */
+                       thread_reference(thread);
+                       has_thread_reference = TRUE;
+                       task_unlock(task);
+                       override_new = zalloc(thread_qos_override_zone);
+                       task_lock(task);
+
+                       override = _find_qos_override(thread, resource, resource_type);
+                       if (override) {
+                               /* Someone else already allocated while the task lock was dropped */
+                               deferred_free_override = override_new;
+                               override->override_contended_resource_count++;
+                       } else {
+                               override = override_new;
+                               override->override_next = thread->overrides;
+                               override->override_contended_resource_count = 1 /* since first_override_for_resource was TRUE */;
+                               override->override_resource = resource;
+                               override->override_resource_type = resource_type;
+                               override->override_qos = THREAD_QOS_UNSPECIFIED;
+                               thread->overrides = override;
+                       }
+               }
        } else {
-               resource_count = thread->usynch_override_contended_resource_count;
+               override = _find_qos_override(thread, resource, resource_type);
        }
 
-       struct task_requested_policy requested = thread->requested_policy;
+       if (override) {
+               if (override->override_qos == THREAD_QOS_UNSPECIFIED)
+                       override->override_qos = override_qos;
+               else
+                       override->override_qos = MAX(override->override_qos, override_qos);
+       }
 
-       if (requested.thrp_qos_override == THREAD_QOS_UNSPECIFIED)
-               requested.thrp_qos_override = override_qos;
-       else
-               requested.thrp_qos_override = MAX(requested.thrp_qos_override, override_qos);
+       /* Determine how to combine the various overrides into a single current requested override */
+       prev_qos_override = requested.thrp_qos_override;
+       new_qos_override = _calculate_requested_qos_override(thread);
 
-       thread->requested_policy = requested;
+       if (new_qos_override != prev_qos_override) {
+               requested.thrp_qos_override = new_qos_override;
 
-       task_policy_update_locked(task, thread, &pend_token);
+               thread->requested_policy = requested;
+
+               task_policy_update_locked(task, thread, &pend_token);
+               
+               if (!has_thread_reference) {
+                       thread_reference(thread);
+               }
+               
+               task_unlock(task);
+               
+               task_policy_update_complete_unlocked(task, thread, &pend_token);
 
-       thread_reference(thread);
+               new_effective_qos = thread->effective_policy.thep_qos;
+               
+               thread_deallocate(thread);
+       } else {
+               new_effective_qos = thread->effective_policy.thep_qos;
 
-       task_unlock(task);
+               task_unlock(task);
 
-       task_policy_update_complete_unlocked(task, thread, &pend_token);
+               if (has_thread_reference) {
+                       thread_deallocate(thread);
+               }
+       }
 
-       DTRACE_BOOST3(qos_add_override_post, uint64_t, requested.thrp_qos_override,
-               uint64_t, thread->effective_policy.thep_qos, int, resource_count);
+       if (deferred_free_override) {
+               zfree(thread_qos_override_zone, deferred_free_override);
+       }
 
-       thread_deallocate(thread);
+       DTRACE_BOOST3(qos_add_override_post, int, prev_qos_override, int, new_qos_override,
+                                 int, new_effective_qos);
 
        KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_ADD_OVERRIDE)) | DBG_FUNC_END,
-                                                 requested.thrp_qos_override, resource_count, 0, 0, 0);
+                                                 new_qos_override, resource, resource_type, 0, 0);
 
        return TRUE;
 }
 
-boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid)
+
+static boolean_t _proc_thread_qos_remove_override_internal(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type, boolean_t reset)
 {
        thread_t        self = current_thread();
-       int                     resource_count;
        struct task_pend_token pend_token = {};
 
        /* XXX move to thread mutex when thread policy does */
@@ -2202,36 +2413,84 @@ boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t
                }
        }
 
-       resource_count = --thread->usynch_override_contended_resource_count;
+       struct task_requested_policy requested = thread->requested_policy;
+       struct thread_qos_override *deferred_free_override_list = NULL;
+       int new_qos_override, prev_qos_override;
+
+       _canonicalize_resource_and_type(&resource, &resource_type);
+
+       _find_and_decrement_qos_override(thread, resource, resource_type, reset, &deferred_free_override_list);
 
        KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_START,
-                                                 thread_tid(thread), resource_count, 0, 0, 0);
+                                                 thread_tid(thread), resource, reset, 0, 0);
+
+       /* Determine how to combine the various overrides into a single current requested override */
+       prev_qos_override = requested.thrp_qos_override;
+       new_qos_override = _calculate_requested_qos_override(thread);
+
+       if (new_qos_override != prev_qos_override) {
+               requested.thrp_qos_override = new_qos_override;
 
-       if (0 == resource_count) {
-               thread->requested_policy.thrp_qos_override = THREAD_QOS_UNSPECIFIED;
+               thread->requested_policy = requested;
 
                task_policy_update_locked(task, thread, &pend_token);
                
                thread_reference(thread);
-
+                       
                task_unlock(task);
                
                task_policy_update_complete_unlocked(task, thread, &pend_token);
-
+               
                thread_deallocate(thread);
-       } else if (0 > resource_count) {
-               // panic("usynch_override_contended_resource_count underflow for thread %p", thread);
-               task_unlock(task);
        } else {
                task_unlock(task);
        }
 
+       while (deferred_free_override_list) {
+               struct thread_qos_override *override_next = deferred_free_override_list->override_next;
+               
+               zfree(thread_qos_override_zone, deferred_free_override_list);
+               deferred_free_override_list = override_next;
+       }
+
        KERNEL_DEBUG_CONSTANT((IMPORTANCE_CODE(IMP_USYNCH_QOS_OVERRIDE, IMP_USYNCH_REMOVE_OVERRIDE)) | DBG_FUNC_END,
                                                  0, 0, 0, 0, 0);
 
        return TRUE;
 }
 
+boolean_t proc_thread_qos_remove_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type)
+{
+       return _proc_thread_qos_remove_override_internal(task, thread, tid, resource, resource_type, FALSE);
+
+}
+
+boolean_t proc_thread_qos_reset_override(task_t task, thread_t thread, uint64_t tid, user_addr_t resource, int resource_type)
+{
+       return _proc_thread_qos_remove_override_internal(task, thread, tid, resource, resource_type, TRUE);
+}
+
+/* Deallocate before thread termination */
+void proc_thread_qos_deallocate(thread_t thread)
+{
+       task_t task = thread->task;
+       struct thread_qos_override *override;
+
+       /* XXX move to thread mutex when thread policy does */
+       task_lock(task);
+       override = thread->overrides;
+       thread->overrides = NULL;               /* task policy re-evaluation needed? */
+       thread->requested_policy.thrp_qos_override = THREAD_QOS_UNSPECIFIED;
+       task_unlock(task);
+
+       while (override) {
+               struct thread_qos_override *override_next = override->override_next;
+               
+               zfree(thread_qos_override_zone, override);
+               override = override_next;
+       }
+}
+
 /* TODO: remove this variable when interactive daemon audit period is over */
 extern boolean_t ipc_importance_interactive_receiver;
 
index f8556bbab77097b705b49af756192ed72ba3abd3..b9a7ae0ebe5a749c2d0e7902d507a0389069eadd 100644 (file)
@@ -146,6 +146,8 @@ static lck_grp_attr_t               thread_lck_grp_attr;
 lck_attr_t                                     thread_lck_attr;
 lck_grp_t                                      thread_lck_grp;
 
+struct zone                                    *thread_qos_override_zone;
+
 decl_simple_lock_data(static,thread_stack_lock)
 static queue_head_t            thread_stack_queue;
 
@@ -326,7 +328,7 @@ thread_bootstrap(void)
        thread_template.effective_policy = default_task_effective_policy;
        thread_template.pended_policy    = default_task_pended_policy;
 
-       thread_template.usynch_override_contended_resource_count = 0;
+       bzero(&thread_template.overrides, sizeof(thread_template.overrides));
 
        thread_template.iotier_override = THROTTLE_LEVEL_NONE;
        thread_template.thread_io_stats = NULL;
@@ -355,6 +357,16 @@ thread_init(void)
                        THREAD_CHUNK * sizeof(struct thread),
                        "threads");
 
+       thread_qos_override_zone = zinit(
+               sizeof(struct thread_qos_override),
+               4 * thread_max * sizeof(struct thread_qos_override),
+               PAGE_SIZE,
+               "thread qos override");
+       zone_change(thread_qos_override_zone, Z_EXPAND, TRUE);
+       zone_change(thread_qos_override_zone, Z_COLLECT, TRUE);
+       zone_change(thread_qos_override_zone, Z_CALLERACCT, FALSE);
+       zone_change(thread_qos_override_zone, Z_NOENCRYPT, TRUE);
+
        lck_grp_attr_setdefault(&thread_lck_grp_attr);
        lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr);
        lck_attr_setdefault(&thread_lck_attr);
@@ -527,6 +539,8 @@ thread_deallocate(
 
        ipc_thread_terminate(thread);
 
+       proc_thread_qos_deallocate(thread);
+
        task = thread->task;
 
 #ifdef MACH_BSD 
index a4074c30188fe59b68de803ff1b48d85642ef3ea..a81bc6c8ddf44c2e6b13edb17aa739798edae589 100644 (file)
@@ -463,7 +463,13 @@ struct thread {
        struct task_pended_policy        pended_policy;
 
        /* usynch override is protected by the task lock, eventually will be thread mutex */
-       int             usynch_override_contended_resource_count;
+       struct thread_qos_override {
+               struct thread_qos_override      *override_next;
+               uint32_t        override_contended_resource_count;
+               int16_t         override_qos;
+               int16_t         override_resource_type;
+               user_addr_t     override_resource;
+       } *overrides;
 
        int     iotier_override; /* atomic operations to set, cleared on ret to user */
        io_stat_info_t                  thread_io_stats; /* per-thread I/O statistics */
index 03e4f646a5628d8cd45f774a81509a110b173224..7d3a98630b2b3186e37fa34c58c87979dd36b128 100644 (file)
@@ -393,6 +393,9 @@ __END_DECLS
 #define CPUFAMILY_INTEL_SANDYBRIDGE    0x5490b78c
 #define CPUFAMILY_INTEL_IVYBRIDGE      0x1f65e835
 #define CPUFAMILY_INTEL_HASWELL                0x10b282dc
+#if !defined(XNU_HIDE_SEED)
+#define CPUFAMILY_INTEL_BROADWELL      0x582ed09c
+#endif /* not XNU_HIDE_SEED */
 #define CPUFAMILY_ARM_9                        0xe73283ae
 #define CPUFAMILY_ARM_11               0x8ff620d8
 #define CPUFAMILY_ARM_XSCALE           0x53b005f5
index 8ab95290c6d4f34a030e4f0433d04c48696af3d4..2527143897d57c623a84d6c936d6746f2f9841e6 100644 (file)
@@ -300,6 +300,56 @@ typedef struct thread_policy_state         *thread_policy_state_t;
 
 #define THREAD_QOS_MIN_TIER_IMPORTANCE (-15)
 
+/*
+ * Overrides are inputs to the task/thread policy engine that
+ * temporarily elevate the effective QoS of a thread without changing
+ * its steady-state (and round-trip-able) requested QoS. The
+ * interfaces into the kernel allow the caller to associate a resource
+ * and type that describe the reason/lifecycle of the override. For
+ * instance, a contended pthread_mutex_t held by a UTILITY thread
+ * might get an override to USER_INTERACTIVE, with the resource
+ * being the userspace address of the pthread_mutex_t. When the
+ * owning thread releases that resource, it can call into the
+ * task policy subsystem to drop the override because of that resource,
+ * although if more contended locks are held by the thread, the
+ * effective QoS may remain overridden for longer.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX is used for contended
+ * pthread_mutex_t's via the pthread kext. The holder gets an override
+ * with resource=&mutex and a count of 1 by the initial contender.
+ * Subsequent contenders raise the QoS value, until the holder
+ * decrements the count to 0 and the override is released.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK is unimplemented and has no
+ * specified semantics.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE are explicitly
+ * paired start/end overrides on a target thread. The resource can
+ * either be a memory allocation in userspace, or the pthread_t of the
+ * overrider if no allocation was used.
+ *
+ * THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE are used to
+ * override the QoS of a thread currently draining a serial dispatch
+ * queue, so that it can get to a block of higher QoS than its
+ * predecessors. The override is applied by a thread enqueueing work
+ * with resource=&queue, and reset by the thread that was overriden
+ * once it has drained the queue. Since the ++ and reset are
+ * asynchronous, there is the possibility of a ++ after the target
+ * thread has issued a reset, in which case the workqueue thread may
+ * issue a reset-all in its outermost scope before deciding whether it
+ * should return to dequeueing work from the global concurrent queues,
+ * or return to the kernel.
+ */
+
+#define THREAD_QOS_OVERRIDE_TYPE_UNKNOWN                                       (0)
+#define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_MUTEX                         (1)
+#define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_RWLOCK                                (2)
+#define THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE     (3)
+#define THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE        (4)
+
+/* A special resource value to indicate a resource wildcard */
+#define THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD (~((user_addr_t)0))
+
 struct thread_qos_policy {
        integer_t qos_tier;
        integer_t tier_importance;
index 04726c55c2b737fc5ce416e3a0974e06c808b2a9..499fb9cf5d7929bd860c8149c320612e31650c3c 100644 (file)
@@ -250,6 +250,12 @@ extern vm_offset_t         vm_kernel_addrperm;
 
 extern vm_offset_t             vm_kext_base;
 extern vm_offset_t             vm_kext_top;
+extern vm_offset_t      vm_prelink_stext;
+extern vm_offset_t      vm_prelink_etext;
+extern vm_offset_t      vm_prelink_sinfo;
+extern vm_offset_t      vm_prelink_einfo;
+extern vm_offset_t      vm_slinkedit;
+extern vm_offset_t      vm_elinkedit;
 
 #define VM_KERNEL_IS_SLID(_o)                                                 \
                (((vm_offset_t)(_o) >= vm_kernel_base) &&                      \
@@ -258,6 +264,18 @@ extern vm_offset_t         vm_kext_top;
                 (((vm_offset_t)(_o) >= vm_kext_base) &&                        \
                  ((vm_offset_t)(_o) <  vm_kext_top))
 
+#define VM_KERNEL_IS_PRELINKTEXT(_o)        \
+        (((vm_offset_t)(_o) >= vm_prelink_stext) &&     \
+         ((vm_offset_t)(_o) <  vm_prelink_etext))
+
+#define VM_KERNEL_IS_PRELINKINFO(_o)        \
+        (((vm_offset_t)(_o) >= vm_prelink_sinfo) &&     \
+         ((vm_offset_t)(_o) <  vm_prelink_einfo))
+
+#define VM_KERNEL_IS_KEXT_LINKEDIT(_o)        \
+        (((vm_offset_t)(_o) >= vm_slinkedit) &&     \
+         ((vm_offset_t)(_o) <  vm_elinkedit))
+
 #define VM_KERNEL_SLIDE(_u)                                                   \
                ((vm_offset_t)(_u) + vm_kernel_slide)
 
@@ -296,7 +314,10 @@ extern vm_offset_t         vm_kext_top;
  */
 #define VM_KERNEL_UNSLIDE(_v)                                                 \
                ((VM_KERNEL_IS_SLID(_v) ||                                     \
-                 VM_KERNEL_IS_KEXT(_v)) ?                                     \
+          VM_KERNEL_IS_KEXT(_v) ||      \
+          VM_KERNEL_IS_PRELINKTEXT(_v) ||   \
+          VM_KERNEL_IS_PRELINKINFO(_v) ||   \
+          VM_KERNEL_IS_KEXT_LINKEDIT(_v)) ?     \
                        (vm_offset_t)(_v) - vm_kernel_slide :                  \
                        (vm_offset_t)(_v))
 
@@ -307,7 +328,10 @@ extern vm_offset_t         vm_kext_top;
 
 #define VM_KERNEL_UNSLIDE_OR_PERM(_v)                                         \
                ((VM_KERNEL_IS_SLID(_v) ||                                     \
-                 VM_KERNEL_IS_KEXT(_v)) ?                                     \
+          VM_KERNEL_IS_KEXT(_v) ||      \
+          VM_KERNEL_IS_PRELINKTEXT(_v) ||   \
+          VM_KERNEL_IS_PRELINKINFO(_v) ||   \
+          VM_KERNEL_IS_KEXT_LINKEDIT(_v)) ?     \
                        (vm_offset_t)(_v) - vm_kernel_slide :                  \
                        VM_KERNEL_ADDRPERM(_v))
        
index 3658a00a2928248fce2aa3a43fcca212524ad553..e65ac5c15f9fbaf1dabd88de63d09c0fcf478603 100644 (file)
@@ -8704,7 +8704,9 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio)
 
 
        /* First step is just to get the size of the upl to find out how big the reprio info is */
-       upl_lock(upl);
+       if(!upl_try_lock(upl))
+               return;
+
        if (upl->decmp_io_upl == NULL) {
                /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
                upl_unlock(upl);
@@ -8722,7 +8724,9 @@ vm_decmp_upl_reprioritize(upl_t upl, int prio)
                return;
 
        /* Now again take the lock, recheck the state and grab out the required info */
-       upl_lock(upl);
+       if(!upl_try_lock(upl))
+               goto out;
+
        if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
                /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
                upl_unlock(upl);
index 6d358ed3cf135c59aa7480110741d0078039ff71..cb148c9df7962519e82031ae0bb2a0c93021fa59 100644 (file)
@@ -241,6 +241,7 @@ extern void         vm_pageclean_setup(
 #define upl_lock_destroy(object)       lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp)
 #define upl_lock(object)       lck_mtx_lock(&(object)->Lock)
 #define upl_unlock(object)     lck_mtx_unlock(&(object)->Lock)
+#define upl_try_lock(object)   lck_mtx_try_lock(&(object)->Lock)
 
 #define MAX_VECTOR_UPL_ELEMENTS        8
 
index b9d7910dbb6bc501d65e8b4cb592fbb8c511e3a3..6411418061d88e562eae15aeee01d98e3d2bc7a5 100644 (file)
@@ -74,6 +74,7 @@ extern int _bcopystr(const void *, void *, vm_size_t, vm_size_t *);
 #define COPYINPHYS     3       /* from user virtual to kernel physical */
 #define COPYOUTPHYS    4       /* from kernel physical to user virtual */
 
+
 static int
 copyio(int copy_type, user_addr_t user_addr, char *kernel_addr,
        vm_size_t nbytes, vm_size_t *lencopied, int use_kernel_map)
index 965049dd671c9686b458e990bad72b37c809766f..40f8cfedbd195ab93bce0942825d3740f169d90c 100644 (file)
@@ -327,16 +327,18 @@ def GetThreadSummary(thread):
     
 
 @lldb_type_summary(['coalition_t', 'coalition'])
-@header("type coalition summary (header tbw)")
+@header("{:>18s} {:>10s} {:>8s} {:>8s} {:>8s} {:>8s}".format("coalition", "id", "refcount", "active", "focal", "nonfocal"))
 def GetCoalitionSummary(coal):
     out_string = ""
-    format_string = '{0: <#020x} {1: <d} {2: <d} {3: <d}'
+    format_string = '{:>#018x} {:>10d} {:>8d} {:>8d} {:>8d} {:>8d}'
+
     flags_string = ''
     if (coal.terminated):
         flags_string += ' terminated'
     if (coal.reaped):
         flags_string += ' reaped'
-    out_string += format_string.format(coal, coal.id, coal.ref_count, coal.active_count, )
+    out_string += format_string.format(coal, coal.id, coal.ref_count, coal.active_count, coal.focal_tasks_count, coal.non_focal_tasks_count)
+
     return out_string
 
 @lldb_type_summary(['proc', 'proc *'])
@@ -724,9 +726,47 @@ def ShowAllCoalitions(cmd_args=None):
     """  Routine to print a summary listing of all the coalitions
     """
     global kern
+    
+    role_strs = {
+                 0 : "TASK_UNSPECIFIED",
+                 1 : "TASK_FOREGROUND_APPLICATION",
+                 2 : "TASK_BACKGROUND_APPLICATION",
+                 3 : "TASK_CONTROL_APPLICATION",
+                 4 : "TASK_GRAPHICS_SERVER",
+                 5 : "TASK_THROTTLE_APPLICATION",
+                 6 : "TASK_NONUI_APPLICATION",
+                 7 : "TASK_DEFAULT_APPLICATION",
+                }
+    
+    sfi_strs = {
+                 0x0  : "SFI_CLASS_UNSPECIFIED",
+                 0x1  : "SFI_CLASS_DARWIN_BG",
+                 0x2  : "SFI_CLASS_APP_NAP",
+                 0x3  : "SFI_CLASS_MANAGED_FOCAL",
+                 0x4  : "SFI_CLASS_MANAGED_NONFOCAL",
+                 0x5  : "SFI_CLASS_DEFAULT_FOCAL",
+                 0x6  : "SFI_CLASS_DEFAULT_NONFOCAL",
+                 0x7  : "SFI_CLASS_KERNEL",
+                 0x8  : "SFI_CLASS_OPTED_OUT",
+                 0x9  : "SFI_CLASS_UTILITY",
+                 0xA  : "SFI_CLASS_LEGACY_FOCAL",
+                 0xB  : "SFI_CLASS_LEGACY_NONFOCAL",
+                 0xC  : "SFI_CLASS_USER_INITIATED_FOCAL",
+                 0xD  : "SFI_CLASS_USER_INITIATED_NONFOCAL",
+                 0xE  : "SFI_CLASS_USER_INTERACTIVE_FOCAL",
+                 0xF  : "SFI_CLASS_USER_INTERACTIVE_NONFOCAL",
+                 0x10 : "SFI_CLASS_MAINTENANCE",
+                }
+    
+
     print GetCoalitionSummary.header
     for c in kern.coalitions:
         print GetCoalitionSummary(c)
+        for task in IterateQueue(c.tasks, "task_t", "coalition_tasks"):
+            print "\t" + hex(task) + " " + GetProcNameForTask(task) + " " + role_strs[int(task.effective_policy.t_role)]
+            for thread in IterateQueue(task.threads, "thread_t", "task_threads"):
+                print "\t\t" + hex(thread) + " " + sfi_strs[int(thread.sfi_class)]
+
 
 @lldb_command('showalltasks') 
 def ShowAllTasks(cmd_args=None):
index 6500ebb8e7d87b4f7ad0e87552451d92313180c5..43d2dc2edd7f548f663b0ae2c79167a5618b72c6 100644 (file)
@@ -23,7 +23,7 @@ endif
 
 DSTROOT?=$(shell /bin/pwd)
 
-CFLAGS:=$(patsubst %, -arch %,$(ARCHS)) -g -Wall -Os $(ISYSROOT)
+CFLAGS:=$(patsubst %, -arch %,$(ARCHS)) -g -Wall -Os -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
 
 all: $(DSTROOT)/jitter
 
index f81ab3396dbedba90d9bafa250fbbdea306b2046..abcfe87b8492f06bd65affe2b43fabba3300d171 100644 (file)
@@ -29,7 +29,7 @@
 #include <stdio.h>
 #include <math.h>
 #include <sys/wait.h>
-#include <sys/syscall.h>
+#include <sys/kdebug.h>
 #include <sys/types.h>
 #include <sys/ptrace.h>
 #include <semaphore.h>
@@ -371,7 +371,7 @@ main(int argc, char **argv)
                
                /* Too much: cut a tracepoint for a debugger */
                if (jitter_arr[i] >= too_much) {
-                       syscall(SYS_kdebug_trace, 0xeeeeeeee, 0, 0, 0, 0);
+                       kdebug_trace(0xeeeee0 | DBG_FUNC_NONE, 0, 0, 0, 0);
                }
 
                if (wakeup_second_thread) {
@@ -466,7 +466,7 @@ second_thread(void *args)
                
                /* Too much: cut a tracepoint for a debugger */
                if (secargs->wakeup_second_jitter_arr[i] >= secargs->too_much) {
-                       syscall(SYS_kdebug_trace, 0xeeeeeeef, 0, 0, 0, 0);
+                       kdebug_trace(0xeeeee4 | DBG_FUNC_NONE, 0, 0, 0, 0);
                }
 
                kret = semaphore_signal(secargs->return_semaphore);
index 97910a06f64d0a7da7e780a360287b665115eefc..af0aedf2d846eca2117451493505c11f4f378f91 100644 (file)
@@ -30,7 +30,7 @@
 #include <math.h>
 #include <sys/wait.h>
 #include <sys/param.h>
-#include <sys/syscall.h>
+#include <sys/kdebug.h>
 #include <sys/types.h>
 #include <sys/ptrace.h>
 #include <semaphore.h>
@@ -588,7 +588,11 @@ main(int argc, char **argv)
                                printf("Worst on this round was %.2f us.\n", ((float)worst_latencies_from_first_ns[i]) / 1000.0);
                        }
 
-                       _tmp = syscall(SYS_kdebug_trace, 0xEEEEEEEE, 0, 0, 0, 0);
+                       _tmp = kdebug_trace(0xeeeee0 | DBG_FUNC_NONE,
+                                                                  worst_latencies_from_first_ns[i] >> 32,
+                                                                  worst_latencies_from_first_ns[i] & 0xFFFFFFFF,
+                                                                  traceworthy_latency_ns >> 32,
+                                                                  traceworthy_latency_ns & 0xFFFFFFFF);
                }
 
                /* Let worker threads get back to sleep... */
@@ -643,7 +647,7 @@ selfexec_with_apptype(int argc, char *argv[])
        char *new_argv[argc + 1 + 1 /* NULL */];
        int i;
        char prog[PATH_MAX];
-       int32_t prog_size = PATH_MAX;
+       uint32_t prog_size = PATH_MAX;
 
        ret = _NSGetExecutablePath(prog, &prog_size);
        if (ret != 0) err(1, "_NSGetExecutablePath");