/*
- * Copyright (c) 2003-2011 Apple Inc. All rights reserved.
+ * Copyright (c) 2003-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
SYSCTL_INT(_machdep, OID_AUTO, pltrace,
CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
&plctrace_enabled, 0, "");
+
+extern int fpsimd_fault_popc;
+SYSCTL_INT(_machdep, OID_AUTO, fpsimd_fault_popc,
+ CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &fpsimd_fault_popc, 0, "");
+
#endif /* DEVELOPMENT || DEBUG */
static int sd_closelog(vfs_context_t);
static void sd_log(vfs_context_t, const char *, ...);
static void proc_shutdown(void);
-static void kernel_hwm_panic_info(void);
+static void zprint_panic_info(void);
extern void halt_log_enter(const char * what, const void * pc, uint64_t time);
#if DEVELOPMENT || DEBUG
extern vm_size_t panic_kext_memory_size;
static void
-kernel_hwm_panic_info(void)
+zprint_panic_info(void)
{
unsigned int num_sites;
kern_return_t kr;
return (system_inshutdown);
}
+static void
+panic_kernel(int howto, char *message)
+{
+ if ((howto & RB_PANIC_ZPRINT) == RB_PANIC_ZPRINT) {
+ zprint_panic_info();
+ }
+ panic("userspace panic: %s", message);
+}
+
int
reboot_kernel(int howto, char *message)
{
int hostboot_option=0;
uint64_t startTime;
+ if ((howto & (RB_PANIC | RB_QUICK)) == (RB_PANIC | RB_QUICK)) {
+ panic_kernel(howto, message);
+ }
+
if (!OSCompareAndSwap(0, 1, &system_inshutdown)) {
if ( (howto&RB_QUICK) == RB_QUICK)
goto force_reboot;
force_reboot:
if (howto & RB_PANIC) {
- if (strncmp(message, "Kernel memory has exceeded limits", 33) == 0) {
- kernel_hwm_panic_info();
- }
- panic ("userspace panic: %s", message);
+ panic_kernel(howto, message);
}
if (howto & RB_POWERDOWN)
/* Allocate the spin lock */
tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr);
}
+
527 AUE_NULL ALL { int ntp_adjtime(struct timex *tp); }
528 AUE_NULL ALL { int ntp_gettime(struct ntptimeval *ntvp); }
529 AUE_NULL ALL { int os_fault_with_payload(uint32_t reason_namespace, uint64_t reason_code, void *payload, uint32_t payload_size, const char *reason_string, uint64_t reason_flags); }
+530 AUE_NULL ALL { int enosys(void); }
+531 AUE_NULL ALL { int enosys(void); }
0x1800008 MACH_CLOCK_BRIDGE_REMOTE_TIME
0x180000c MACH_CLOCK_BRIDGE_RESET_TS
0x1800010 MACH_CLOCK_BRIDGE_TS_PARAMS
+0x1800014 MACH_CLOCK_BRIDGE_SKIP_TS
+0x1800018 MACH_CLOCK_BRIDGE_TS_MISMATCH
+0x180001c MACH_CLOCK_BRIDGE_OBSV_RATE
0x1900000 MP_TLB_FLUSH
0x1900004 MP_CPUS_CALL
0x1900008 MP_CPUS_CALL_LOCAL
0x5310290 CPUPM_URGENCY
0x5310294 CPUPM_IDLE_EXIT1
0x5310298 CPUPM_PST_QOS_CONT
+0x531029C CPUPM_MID
0x5330000 HIBERNATE
0x5330004 HIBERNATE_WRITE_IMAGE
0x5330008 HIBERNATE_MACHINE_INIT
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
+#include <sys/coalition.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
static bool necp_remove_string_to_id_mapping(struct necp_string_id_mapping_list *list, char *domain);
static struct necp_string_id_mapping *necp_lookup_string_with_id_locked(struct necp_string_id_mapping_list *list, u_int32_t local_id);
+static struct necp_kernel_socket_policy *necp_kernel_socket_policy_find(necp_kernel_policy_id policy_id);
+static struct necp_kernel_ip_output_policy *necp_kernel_ip_output_policy_find(necp_kernel_policy_id policy_id);
+
static LIST_HEAD(_necp_kernel_service_list, necp_service_registration) necp_registered_service_list;
static char *necp_create_trimmed_domain(char *string, size_t length);
static bool necp_remove_route_rule(struct necp_route_rule_list *list, u_int32_t route_rule_id);
static bool necp_route_is_allowed(struct rtentry *route, ifnet_t interface, u_int32_t route_rule_id, u_int32_t *interface_type_denied);
static struct necp_route_rule *necp_lookup_route_rule_locked(struct necp_route_rule_list *list, u_int32_t route_rule_id);
+static inline void necp_get_parent_cred_result(proc_t proc, struct necp_socket_info *info);
#define MAX_AGGREGATE_ROUTE_RULES 16
struct necp_aggregate_route_rule {
LCK_RW_ASSERT(&necp_kernel_policy_lock, LCK_RW_ASSERT_EXCLUSIVE);
if (socket_level) {
- necp_last_kernel_socket_policy_id++;
- if (necp_last_kernel_socket_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET ||
- necp_last_kernel_socket_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
- necp_last_kernel_socket_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET;
- }
- newid = necp_last_kernel_socket_policy_id;
+ bool wrapped = FALSE;
+ do {
+ necp_last_kernel_socket_policy_id++;
+ if (necp_last_kernel_socket_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET ||
+ necp_last_kernel_socket_policy_id >= NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
+ if (wrapped) {
+ // Already wrapped, give up
+ NECPLOG0(LOG_ERR, "Failed to find a free socket kernel policy ID.\n");
+ return (NECP_KERNEL_POLICY_ID_NONE);
+ }
+ necp_last_kernel_socket_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_SOCKET;
+ wrapped = TRUE;
+ }
+ newid = necp_last_kernel_socket_policy_id;
+ } while (necp_kernel_socket_policy_find(newid) != NULL); // If already used, keep trying
} else {
- necp_last_kernel_ip_policy_id++;
- if (necp_last_kernel_ip_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
- necp_last_kernel_ip_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_IP;
- }
- newid = necp_last_kernel_ip_policy_id;
+ bool wrapped = FALSE;
+ do {
+ necp_last_kernel_ip_policy_id++;
+ if (necp_last_kernel_ip_policy_id < NECP_KERNEL_POLICY_ID_FIRST_VALID_IP) {
+ if (wrapped) {
+ // Already wrapped, give up
+ NECPLOG0(LOG_ERR, "Failed to find a free IP kernel policy ID.\n");
+ return (NECP_KERNEL_POLICY_ID_NONE);
+ }
+ necp_last_kernel_ip_policy_id = NECP_KERNEL_POLICY_ID_FIRST_VALID_IP;
+ wrapped = TRUE;
+ }
+ newid = necp_last_kernel_ip_policy_id;
+ } while (necp_kernel_ip_output_policy_find(newid) != NULL); // If already used, keep trying
}
if (newid == NECP_KERNEL_POLICY_ID_NONE) {
- NECPLOG0(LOG_DEBUG, "Allocate kernel policy id failed.\n");
- return (0);
+ NECPLOG0(LOG_ERR, "Allocate kernel policy id failed.\n");
+ return (NECP_KERNEL_POLICY_ID_NONE);
}
return (newid);
return (copied_string);
}
+static inline void
+necp_get_parent_cred_result(proc_t proc, struct necp_socket_info *info)
+{
+ task_t task = proc_task(proc ? proc : current_proc());
+ coalition_t coal = COALITION_NULL;
+ Boolean is_leader = coalition_is_leader(task, COALITION_TYPE_JETSAM, &coal);
+
+ if (is_leader == TRUE) {
+ // No parent, nothing to do
+ return;
+ }
+
+ if (coal != NULL) {
+ task_t lead_task = coalition_get_leader(coal);
+ if (lead_task != NULL) {
+ proc_t lead_proc = get_bsdtask_info(lead_task);
+ if (lead_proc != NULL) {
+ kauth_cred_t lead_cred = kauth_cred_proc_ref(lead_proc);
+ if (lead_cred != NULL) {
+ errno_t cred_result = priv_check_cred(lead_cred, PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
+ kauth_cred_unref(&lead_cred);
+ info->cred_result = cred_result;
+ }
+ }
+ task_deallocate(lead_task);
+ }
+ }
+}
+
#define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX)
static void
necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, proc_t proc, struct necp_socket_info *info)
if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT && proc != NULL) {
info->cred_result = priv_check_cred(proc_ucred(proc), PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
+ if (info->cred_result != 0) {
+ // Process does not have entitlement, check the parent process
+ necp_get_parent_cred_result(proc, info);
+ }
}
if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID && !uuid_is_null(application_uuid)) {
if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_ENTITLEMENT) {
info->cred_result = priv_check_cred(so->so_cred, PRIV_NET_PRIVILEGED_NECP_MATCH, 0);
+ if (info->cred_result != 0) {
+ // Process does not have entitlement, check the parent process
+ necp_get_parent_cred_result(NULL, info);
+ }
}
}
mptcp_sopt_insert(struct mptses *mpte, struct mptopt *mpo)
{
mpte_lock_assert_held(mpte); /* same as MP socket lock */
- VERIFY(!(mpo->mpo_flags & MPOF_ATTACHED));
mpo->mpo_flags |= MPOF_ATTACHED;
TAILQ_INSERT_TAIL(&mpte->mpte_sopts, mpo, mpo_entry);
}
mpo->mpo_name == sopt->sopt_name)
break;
}
- VERIFY(mpo == NULL || sopt->sopt_valsize == sizeof (int));
-
return (mpo);
}
static int mptcp_usr_sosend(struct socket *, struct sockaddr *, struct uio *,
struct mbuf *, struct mbuf *, int);
static int mptcp_usr_socheckopt(struct socket *, struct sockopt *);
-static int mptcp_setopt(struct mptses *, struct sockopt *);
-static int mptcp_getopt(struct mptses *, struct sockopt *);
static int mptcp_default_tcp_optval(struct mptses *, struct sockopt *, int *);
static int mptcp_usr_preconnect(struct socket *so);
level = sopt->sopt_level;
optname = sopt->sopt_name;
- VERIFY(sopt->sopt_dir == SOPT_SET);
- VERIFY(level == SOL_SOCKET || level == IPPROTO_TCP);
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
mp_so = mptetoso(mpte);
/*
mpo->mpo_name = optname;
mptcp_sopt_insert(mpte, mpo);
}
- VERIFY(mpo->mpo_flags & MPOF_ATTACHED);
/* this can be issued on the subflow socket */
mpo->mpo_flags |= MPOF_SUBFLOW_OK;
}
mpo->mpo_name = optname;
mpo->mpo_intval = optval;
}
- VERIFY(mpo == NULL || error == 0);
/* issue this socket option on existing subflows */
if (error == 0) {
{
int error = 0, optval = 0;
- VERIFY(sopt->sopt_dir == SOPT_GET);
- mpte_lock_assert_held(mpte); /* same as MP socket lock */
-
/*
* We only handle SOPT_GET for TCP level socket options; we should
* not get here for socket level options since they are already
}
if (NFS_BITMAP_ISSET(bitmap, NFS_FATTR_FILEHANDLE)) {
nfsm_chain_get_32(error, nmc, val);
+ if (error == 0 && val > NFS_MAX_FH_SIZE)
+ error = EBADRPC;
+ nfsmout_if(error);
if (fhp) {
fhp->fh_len = val;
nfsm_chain_get_opaque(error, nmc, nfsm_rndup(val), fhp->fh_data);
nfsmout_if(error || !fhp || !nvap);
nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
nfsm_chain_get_32(error, &nmrep, fhp->fh_len);
+ if (error == 0 && fhp->fh_len > sizeof(fhp->fh_data))
+ error = EBADRPC;
+ nfsmout_if(error);
nfsm_chain_get_opaque(error, &nmrep, fhp->fh_len, fhp->fh_data);
nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
if ((error == NFSERR_MOVED) || (error == NFSERR_INVAL)) {
* the context is complete.
*/
if (!(cp->gss_clnt_flags & GSS_CTX_COMPLETE)) {
+ if (verflen > KRB5_MAX_MIC_SIZE)
+ return (EBADRPC);
MALLOC(cp->gss_clnt_verf, u_char *, verflen, M_TEMP, M_WAITOK|M_ZERO);
if (cp->gss_clnt_verf == NULL)
return (ENOMEM);
nmc_tmp = *nmc;
nfsm_chain_adv(error, &nmc_tmp, reslen); // skip over the results
nfsm_chain_get_32(error, &nmc_tmp, cksum.length);
+ if (cksum.length > KRB5_MAX_MIC_SIZE) {
+ error = EBADRPC;
+ goto nfsmout;
+ }
MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK);
nfsm_chain_get_opaque(error, &nmc_tmp, cksum.length, cksum.value);
//XXX chop offf the cksum?
goto nfsmout;
if (flavor != RPCSEC_GSS || cksum.length > KRB5_MAX_MIC_SIZE)
error = NFSERR_AUTHERR | AUTH_BADVERF;
- MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK);
- nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value);
+ else {
+ MALLOC(cksum.value, void *, cksum.length, M_TEMP, M_WAITOK);
+ nfsm_chain_get_opaque(error, nmc, cksum.length, cksum.value);
+ }
if (error)
goto nfsmout;
struct sockaddr *saddr = (struct sockaddr*)&ss;
struct nfsm_chain nmreq, nmrep;
mbuf_t mreq;
- int error = 0, ip, pmprog, pmvers, pmproc, ualen = 0;
+ int error = 0, ip, pmprog, pmvers, pmproc;
+ uint32_t ualen = 0;
uint32_t port;
uint64_t xid = 0;
char uaddr[MAX_IPv6_STR_LEN+16];
/* get uaddr string and convert to sockaddr */
nfsm_chain_get_32(error, &nmrep, ualen);
if (!error) {
- if (ualen > ((int)sizeof(uaddr)-1))
+ if (ualen > (sizeof(uaddr)-1))
error = EIO;
if (ualen < 1) {
/* program is not available, just return a zero port */
nfsmout_if(error);
nfsm_chain_op_check(error, &nmrep, NFS_OP_GETFH);
nfsm_chain_get_32(error, &nmrep, fh.fh_len);
+ if (fh.fh_len > sizeof(fh.fh_data))
+ error = EBADRPC;
+ nfsmout_if(error);
nfsm_chain_get_opaque(error, &nmrep, fh.fh_len, fh.fh_data);
nfsm_chain_op_check(error, &nmrep, NFS_OP_GETATTR);
if (!error) {
error = ENOMEM;
xb_get_32(error, &xb, nmp->nm_fh->fh_len);
nfsmerr_if(error);
- if (nmp->nm_fh->fh_len < 0 ||
- (size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data))
+ if ((size_t)nmp->nm_fh->fh_len > sizeof(nmp->nm_fh->fh_data))
error = EINVAL;
else
error = xb_get_bytes(&xb, (char*)&nmp->nm_fh->fh_data[0], nmp->nm_fh->fh_len, 0);
struct nfs_dir_buf_header *ndbhp;
struct nfs_vattr *nvattrp;
daddr64_t nextlbn = 0;
- int i, error = ESRCH, fhlen;
+ int i, error = ESRCH;
+ uint32_t fhlen;
/* scan the buffer for the name */
ndbhp = (struct nfs_dir_buf_header*)bp->nb_data;
/* zero the last 4 bytes for a range of opaque */
/* data to make sure any pad bytes will be zero. */
#define nfsm_chain_zero_opaque_pad(BUF, LEN) \
- *(((uint32_t*)(BUF))+((nfsm_rndup(LEN)>>2)-1)) = 0
+ do { \
+ if ((LEN) > 0) \
+ *(((uint32_t*)(BUF))+((nfsm_rndup(LEN)>>2)-1)) = 0; \
+ } while (0)
/* add buffer of opaque data to an mbuf chain */
#define nfsm_chain_add_opaque(E, NMC, BUF, LEN) \
uint32_t rndlen; \
if (E) break; \
rndlen = nfsm_rndup(LEN); \
+ if (rndlen < (LEN)) { \
+ (E) = EBADRPC; \
+ break; \
+ } \
if ((NMC)->nmc_left >= rndlen) { \
(PTR) = (void*)(NMC)->nmc_ptr; \
(NMC)->nmc_left -= rndlen; \
uint32_t rndlen; \
if (E) break; \
rndlen = nfsm_rndup(LEN); \
+ if (rndlen < (LEN)) { \
+ (E) = EBADRPC; \
+ break; \
+ } \
if ((NMC)->nmc_left >= rndlen) { \
u_char *__tmpptr = (u_char*)(NMC)->nmc_ptr; \
(NMC)->nmc_left -= rndlen; \
/* get the size of and a pointer to a file handle in an mbuf chain */
#define nfsm_chain_get_fh_ptr(E, NMC, VERS, FHP, FHSIZE) \
do { \
- if ((VERS) != NFS_VER2) \
+ if ((VERS) != NFS_VER2) { \
nfsm_chain_get_32((E), (NMC), (FHSIZE)); \
- else \
+ if (E) break; \
+ if ((FHSIZE) > NFS_MAX_FH_SIZE) \
+ (E) = EBADRPC; \
+ } else \
(FHSIZE) = NFSX_V2FH;\
- nfsm_chain_get_opaque_pointer((E), (NMC), (FHSIZE), (FHP));\
+ if ((E) == 0) \
+ nfsm_chain_get_opaque_pointer((E), (NMC), (FHSIZE), (FHP));\
} while (0)
/* get the size of and data for a file handle in an mbuf chain */
#define nfsm_chain_get_fh(E, NMC, VERS, FHP) \
do { \
- if ((VERS) != NFS_VER2) \
+ if ((VERS) != NFS_VER2) { \
nfsm_chain_get_32((E), (NMC), (FHP)->fh_len); \
- else \
+ if ((FHP)->fh_len > sizeof((FHP)->fh_data)) \
+ (E) = EBADRPC; \
+ } else \
(FHP)->fh_len = NFSX_V2FH;\
- nfsm_chain_get_opaque((E), (NMC), (uint32_t)(FHP)->fh_len, (FHP)->fh_data);\
- if (E) \
+ if ((E) == 0) \
+ nfsm_chain_get_opaque((E), (NMC), (uint32_t)(FHP)->fh_len, (FHP)->fh_data);\
+ else \
(FHP)->fh_len = 0;\
} while (0)
#define NFSV3_MAX_FH_SIZE 64
#define NFSV2_MAX_FH_SIZE 32
struct fhandle {
- int fh_len; /* length of file handle */
+ unsigned int fh_len; /* length of file handle */
unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */
};
typedef struct fhandle fhandle_t;
#define NFSV3_MAX_FH_SIZE 64
#define NFSV2_MAX_FH_SIZE 32
struct fhandle {
- int fh_len; /* length of file handle */
+ unsigned int fh_len; /* length of file handle */
unsigned char fh_data[NFS_MAX_FH_SIZE]; /* file handle value */
};
typedef struct fhandle fhandle_t;
#define RB_UPSDELAY 0x200 /* Delays restart by 5 minutes */
#define RB_QUICK 0x400 /* quick and ungraceful reboot with file system caches flushed*/
#define RB_PANIC 0x800 /* panic the kernel */
+#define RB_PANIC_ZPRINT 0x1000 /* add zprint info to panic string */
#ifndef KERNEL
__BEGIN_DECLS
-17.6.0
+17.7.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
void evaluateAssertions(IOPMDriverAssertionType newAssertions,
IOPMDriverAssertionType oldAssertions);
- void evaluateWranglerAssertions();
void deregisterPMSettingObject( PMSettingObject * pmso );
}
// Force a single read of head and tail
- head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
+ // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
+ head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
// Check for underflow of (dataQueue->queueSize - tail)
queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
// Send notification (via mach message) that data is available.
if ( ( head == tail ) /* queue was empty prior to enqueue() */
- || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED) ) ) /* queue was emptied during enqueue() */
+ || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE) ) ) /* queue was emptied during enqueue() */
{
sendDataAvailableNotification();
}
digits++;
}
}
+#if DEBUG || DEVELOPMENT
+ if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> rtc:",
+ &rtcVars, sizeof(rtcVars), &kprintf);
+#endif /* DEBUG || DEVELOPMENT */
+
data = OSData::withBytes(&rtcVars, sizeof(rtcVars));
if (data)
{
uint16_t bits = 0x0082;
gIOHibernateBootNextData = OSData::withBytes(&bits, sizeof(bits));
}
+
+#if DEBUG || DEVELOPMENT
+ if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> bootnext:",
+ gIOHibernateBoot0082Data->getBytesNoCopy(), gIOHibernateBoot0082Data->getLength(), &kprintf);
+#endif /* DEBUG || DEVELOPMENT */
if (gIOHibernateBoot0082Key && gIOHibernateBoot0082Data && gIOHibernateBootNextKey && gIOHibernateBootNextData)
{
gIOHibernateBootNextSave = gIOOptionsEntry->copyProperty(gIOHibernateBootNextKey);
gIOHibernateVars.fileVars = &gFileVars;
gIOHibernateCurrentHeader->signature = kIOHibernateHeaderSignature;
gIOHibernateState = kIOHibernateStateHibernating;
+
+#if DEBUG || DEVELOPMENT
+ if (kIOLogHibernate & gIOKitDebug)
+ {
+ OSData * data = OSDynamicCast(OSData, IOService::getPMRootDomain()->getProperty(kIOHibernateSMCVariablesKey));
+ if (data)
+ {
+ uintptr_t * smcVars = (typeof(smcVars)) data->getBytesNoCopy();
+ IOKitKernelLogBuffer("H> smc:",
+ (const void *)smcVars[1], smcVars[0], &kprintf);
+ }
+ }
+#endif /* DEBUG || DEVELOPMENT */
}
else
{
}
if (gIOOptionsEntry && gIOHibernateBootImageKey)
{
- if (data) gIOOptionsEntry->setProperty(gIOHibernateBootImageKey, data);
+ if (data)
+ {
+ gIOOptionsEntry->setProperty(gIOHibernateBootImageKey, data);
+#if DEBUG || DEVELOPMENT
+ if (kIOLogHibernate & gIOKitDebug) IOKitKernelLogBuffer("H> boot-image:",
+ data->getBytesNoCopy(), data->getLength(), &kprintf);
+#endif /* DEBUG || DEVELOPMENT */
+ }
else
{
gIOOptionsEntry->removeProperty(gIOHibernateBootImageKey);
{
err = IOPolledFilePollersSetEncryptionKey(vars->fileVars,
&vars->volumeCryptKey[0], vars->volumeCryptKeySize);
- HIBLOG("IOPolledFilePollersSetEncryptionKey(%x)\n", err);
+ HIBLOG("IOPolledFilePollersSetEncryptionKey(%x) %ld\n", err, vars->volumeCryptKeySize);
if (kIOReturnSuccess != err) panic("IOPolledFilePollersSetEncryptionKey(0x%x)", err);
cryptvars = 0;
}
extern "C" OSString * IOCopyLogNameForPID(int pid);
+extern "C" void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
+ void (*output)(const char *format, ...));
+
#if defined(__i386__) || defined(__x86_64__)
#ifndef __cplusplus
#error xx
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+void IOKitKernelLogBuffer(const char * title, const void * buffer, size_t size,
+ void (*output)(const char *format, ...))
+{
+ uint8_t c, chars[17];
+ size_t idx;
+
+ output("%s(0x%x):\n", title, size);
+ if (size > 4096) size = 4096;
+ chars[16] = idx = 0;
+ while (true) {
+ if (!(idx & 15)) {
+ if (idx) output(" |%s|\n", chars);
+ if (idx >= size) break;
+ output("%04x: ", idx);
+ }
+ else if (!(idx & 7)) output(" ");
+
+ c = ((char *)buffer)[idx];
+ output("%02x ", c);
+ chars[idx & 15] = ((c >= 0x20) && (c <= 0x7f)) ? c : ' ';
+
+ idx++;
+ if ((idx == size) && (idx & 15)) {
+ chars[idx & 15] = 0;
+ while (idx & 15) {
+ idx++;
+ output(" ");
+ }
+ }
+ }
+}
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
/*
* Convert a integer constant (typically a #define or enum) to a string.
*/
static OSArray * gPMHaltArray = 0;
static const OSSymbol * gPMHaltClientAcknowledgeKey = 0;
static bool gPMQuiesced;
+static uint32_t gIOPMPCIHostBridgeWakeDelay;
// Constants used as arguments to IOPMrootDomain::informCPUStateChange
#define kCPUUnknownIndex 9999999
PE_parse_boot_argn("noidle", &gNoIdleFlag, sizeof(gNoIdleFlag));
PE_parse_boot_argn("haltmspanic", &gHaltTimeMaxPanic, sizeof(gHaltTimeMaxPanic));
PE_parse_boot_argn("haltmslog", &gHaltTimeMaxLog, sizeof(gHaltTimeMaxLog));
+ PE_parse_boot_argn("pcihostbridge_wake_delay", &gIOPMPCIHostBridgeWakeDelay, sizeof(gIOPMPCIHostBridgeWakeDelay));
queue_init(&aggressivesQueue);
aggressivesThreadCall = thread_call_allocate(handleAggressivesFunction, this);
// MARK: -
// MARK: System Capability
+SYSCTL_UINT(_kern, OID_AUTO, pcihostbridge_wake_delay, CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED, (uint32_t *)&gIOPMPCIHostBridgeWakeDelay, 0, "");
+
//******************************************************************************
// tagPowerPlaneService
//
if (isDisplayWrangler)
{
wrangler = service;
+ // found the display wrangler, check for any display assertions already created
+ if (pmAssertions->getActivatedAssertions() & kIOPMDriverAssertionPreventDisplaySleepBit) {
+ DLOG("wrangler setIgnoreIdleTimer\(1) due to pre-existing assertion\n");
+ wrangler->setIgnoreIdleTimer( true );
+ }
}
#else
isDisplayWrangler = false;
while (child != this)
{
- if ((parent == pciHostBridgeDriver) ||
+ if ((gIOPMPCIHostBridgeWakeDelay ? (parent == pciHostBridgeDriver) : (parent->metaCast("IOPCIDevice") != NULL)) ||
(parent == this))
{
if (OSDynamicCast(IOPowerConnection, child))
{
IOPowerConnection * conn = (IOPowerConnection *) child;
conn->delayChildNotification = true;
+ DLOG("delayChildNotification for 0x%llx\n", conn->getRegistryEntryID());
}
break;
}
IONotifier * notifier __unused)
{
#if !NO_KERNEL_HID
- // found the display wrangler, check for any display assertions already created
- gRootDomain->evaluateWranglerAssertions();
// install a handler
if( !newService->registerInterest( gIOGeneralInterest,
&displayWranglerNotification, target, 0) )
}
}
-void IOPMrootDomain::evaluateWranglerAssertions()
-{
- if (gIOPMWorkLoop->inGate() == false) {
- gIOPMWorkLoop->runAction(
- OSMemberFunctionCast(IOWorkLoop::Action, this, &IOPMrootDomain::evaluateWranglerAssertions),
- (OSObject *)this);
-
- return;
- }
-
- if (pmAssertions->getActivatedAssertions() & kIOPMDriverAssertionPreventDisplaySleepBit) {
- DLOG("wrangler setIgnoreIdleTimer\(1) on matching\n");
- wrangler->setIgnoreIdleTimer( true );
- }
-}
-
// MARK: -
// MARK: Statistics
return sCPULatencyHolderName[kCpuDelayBusStall];
}
+const char *getCpuInterruptDelayHolderName(void);
+const char *getCpuInterruptDelayHolderName(void) {
+ return sCPULatencyHolderName[kCpuDelayInterrupt];
+}
+
}
#endif
notifiers = victim->copyNotifiers(gIOWillTerminateNotification, 0, 0xffffffff);
victim->invokeNotifiers(¬ifiers);
- if( 0 == victim->getClient()) {
-
- // no clients - will go to finalize
- victim->scheduleFinalize(false);
-
- } else {
- _workLoopAction( (IOWorkLoop::Action) &actionWillTerminate,
+ _workLoopAction( (IOWorkLoop::Action) &actionWillTerminate,
victim, (void *)(uintptr_t) options, (void *)(uintptr_t) doPhase2List );
- }
+
didPhase2List->headQ( victim );
}
victim->release();
}
while( (victim = (IOService *) didPhase2List->getObject(0)) ) {
-
- if( victim->lockForArbitration( true )) {
+ bool scheduleFinalize = false;
+ if( victim->lockForArbitration( true )) {
victim->__state[1] |= kIOServiceTermPhase3State;
+ scheduleFinalize = (0 == victim->getClient());
victim->unlockForArbitration();
}
_workLoopAction( (IOWorkLoop::Action) &actionDidTerminate,
_workLoopAction( (IOWorkLoop::Action) &actionDidStop,
victim, (void *)(uintptr_t) options, NULL );
}
+ // no clients - will go to finalize
+ if (scheduleFinalize) victim->scheduleFinalize(false);
didPhase2List->removeObject(0);
}
IOLockLock( gJobsLock );
doPhase3 = false;
// finalize leaves
while( (victim = (IOService *) gIOFinalizeList->getObject(0))) {
-
+ bool sendFinal = false;
IOLockUnlock( gJobsLock );
- _workLoopAction( (IOWorkLoop::Action) &actionFinalize,
+ if (victim->lockForArbitration(true)) {
+ sendFinal = (0 == (victim->__state[1] & kIOServiceFinalized));
+ if (sendFinal) victim->__state[1] |= kIOServiceFinalized;
+ victim->unlockForArbitration();
+ }
+ if (sendFinal) {
+ _workLoopAction( (IOWorkLoop::Action) &actionFinalize,
victim, (void *)(uintptr_t) options );
+ }
IOLockLock( gJobsLock );
// hold off free
freeList->setObject( victim );
} else {
// a terminated client is not ready for stop if it has clients, skip it
- if( (kIOServiceInactiveState & client->__state[0]) && client->getClient()) {
- TLOG("%s[0x%qx]::defer stop(%s[0x%qx])\n",
- client->getName(), regID2,
- client->getClient()->getName(), client->getClient()->getRegistryEntryID());
- IOServiceTrace(
- IOSERVICE_TERMINATE_STOP_DEFER,
- (uintptr_t) regID1,
- (uintptr_t) (regID1 >> 32),
- (uintptr_t) regID2,
- (uintptr_t) (regID2 >> 32));
-
- idx++;
- continue;
- }
-
+ bool deferStop = (0 != (kIOServiceInactiveState & client->__state[0]));
IOLockUnlock( gJobsLock );
+ if (deferStop && client->lockForArbitration(true)) {
+ deferStop = (0 == (client->__state[1] & kIOServiceFinalized));
+ //deferStop = (!deferStop && (0 != client->getClient()));
+ //deferStop = (0 != client->getClient());
+ client->unlockForArbitration();
+ if (deferStop) {
+ TLOG("%s[0x%qx]::defer stop()\n", client->getName(), regID2);
+ IOServiceTrace(IOSERVICE_TERMINATE_STOP_DEFER,
+ (uintptr_t) regID1,
+ (uintptr_t) (regID1 >> 32),
+ (uintptr_t) regID2,
+ (uintptr_t) (regID2 >> 32));
+
+ idx++;
+ IOLockLock( gJobsLock );
+ continue;
+ }
+ }
_workLoopAction( (IOWorkLoop::Action) &actionStop,
provider, (void *) client );
IOLockLock( gJobsLock );
kIOServiceNeedWillTerminate = 0x00080000,
kIOServiceWaitDetachState = 0x00040000,
kIOServiceConfigRunning = 0x00020000,
+ kIOServiceFinalized = 0x00010000,
};
// notify state
}
// Read head and tail with acquire barrier
+ // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
IODataQueueEntry * entry;
// Force a single read of head and tail
- head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
+ // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
+ head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
// Check for overflow of entrySize
if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
// Send notification (via mach message) that data is available.
if ( ( tail == head ) /* queue was empty prior to enqueue() */
- || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED) ) ) /* queue was emptied during enqueue() */
+ || ( tail == __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE) ) ) /* queue was emptied during enqueue() */
{
sendDataAvailableNotification();
}
}
// Read head and tail with acquire barrier
- tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
- headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
+ // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
+ headOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
+ tailOffset = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_ACQUIRE);
if (headOffset != tailOffset) {
IODataQueueEntry * head = 0;
AbsoluteTime abstime, AbsoluteTime leeway)
{
AbsoluteTime end;
- clock_continuoustime_interval_to_deadline(abstime, &end);
- return wakeAtTime(options, end, leeway);
+ if (options & kIOTimeOptionsContinuous)
+ clock_continuoustime_interval_to_deadline(abstime, &end);
+ else
+ clock_absolutetime_interval_to_deadline(abstime, &end);
+ return wakeAtTime(options, end, leeway);
}
IOReturn IOTimerEventSource::wakeAtTimeTicks(UInt32 ticks)
467DAFD4157E8AF200CE68F0 /* guarded_open_np.c in Sources */ = {isa = PBXBuildFile; fileRef = 467DAFD3157E8AF200CE68F0 /* guarded_open_np.c */; };
4BDD5F1D1891AB2F004BF300 /* mach_approximate_time.c in Sources */ = {isa = PBXBuildFile; fileRef = 4BDD5F1B1891AB2F004BF300 /* mach_approximate_time.c */; };
4BDD5F1E1891AB2F004BF300 /* mach_approximate_time.s in Sources */ = {isa = PBXBuildFile; fileRef = 4BDD5F1C1891AB2F004BF300 /* mach_approximate_time.s */; };
+ 726D915520ACD7FC0039A2FE /* mach_bridge_remote_time.c in Sources */ = {isa = PBXBuildFile; fileRef = 726D915420ACD7FC0039A2FE /* mach_bridge_remote_time.c */; };
729B7D0A15C8938C000E2501 /* carbon_delete.c in Sources */ = {isa = PBXBuildFile; fileRef = FB50F1B315AB7DE700F814BA /* carbon_delete.c */; };
72B1E6ED190723DB00FB3FA2 /* guarded_open_dprotected_np.c in Sources */ = {isa = PBXBuildFile; fileRef = 72B1E6EC190723DB00FB3FA2 /* guarded_open_dprotected_np.c */; };
72E09E941B444B19006F11A4 /* mach_continuous_time.c in Sources */ = {isa = PBXBuildFile; fileRef = 72FB18801B437F7A00181A5B /* mach_continuous_time.c */; };
467DAFD3157E8AF200CE68F0 /* guarded_open_np.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = guarded_open_np.c; sourceTree = "<group>"; };
4BDD5F1B1891AB2F004BF300 /* mach_approximate_time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach_approximate_time.c; sourceTree = "<group>"; };
4BDD5F1C1891AB2F004BF300 /* mach_approximate_time.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = mach_approximate_time.s; sourceTree = "<group>"; };
+ 726D915420ACD7FC0039A2FE /* mach_bridge_remote_time.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; name = mach_bridge_remote_time.c; path = wrappers/mach_bridge_remote_time.c; sourceTree = "<group>"; };
72B1E6EC190723DB00FB3FA2 /* guarded_open_dprotected_np.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = guarded_open_dprotected_np.c; sourceTree = "<group>"; };
72FB18801B437F7A00181A5B /* mach_continuous_time.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = mach_continuous_time.c; sourceTree = "<group>"; };
7466C923170CB99B004557CC /* vm_page_size.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = vm_page_size.h; sourceTree = "<group>"; };
08FB7794FE84155DC02AAC07 /* mach */ = {
isa = PBXGroup;
children = (
+ 726D915420ACD7FC0039A2FE /* mach_bridge_remote_time.c */,
C9D9BE0F114FFADC0000D8B9 /* Libsyscall.xcconfig */,
24D1158911E672270063D54D /* Platforms */,
24D1156511E671B20063D54D /* custom */,
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
+ 726D915520ACD7FC0039A2FE /* mach_bridge_remote_time.c in Sources */,
403C7CEE1E1F4E4400D6FEEF /* os_packet.c in Sources */,
E214BDC81C2E358300CEE8A3 /* clonefile.c in Sources */,
C9D9BD19114B00600000D8B9 /* clock_priv.defs in Sources */,
do {
TimeStamp_tick = *gtod_TimeStamp_tick_p;
- TimeStamp_sec = *gtod_TimeStamp_sec_p;
- TimeStamp_frac = *gtod_TimeStamp_frac_p;
- Tick_scale = *gtod_Ticks_scale_p;
- Ticks_per_sec = *gtod_Ticks_per_sec_p;
-
/*
* This call contains an instruction barrier which will ensure that the
* second read of the abs time isn't speculated above the reads of the
* other values above
*/
now = mach_absolute_time();
+ TimeStamp_sec = *gtod_TimeStamp_sec_p;
+ TimeStamp_frac = *gtod_TimeStamp_frac_p;
+ Tick_scale = *gtod_Ticks_scale_p;
+ Ticks_per_sec = *gtod_Ticks_per_sec_p;
+ /*
+ * This barrier prevents the reordering of the second read of gtod_TimeStamp_tick_p
+ * w.r.t the values read just after mach_absolute_time is invoked.
+ */
+#if (__ARM_ARCH__ >= 7)
+ __asm__ volatile("dmb ishld" ::: "memory");
+#endif
} while (TimeStamp_tick != *gtod_TimeStamp_tick_p);
if (TimeStamp_tick == 0)
--- /dev/null
+/*
+ * Copyright (c) 2018 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+#include <sys/types.h>
+#include <machine/cpu_capabilities.h>
+#include <kern/remote_time.h>
+#include <mach/mach_time.h>
+#include "strings.h"
+#include <TargetConditionals.h>
+
+#define BT_RESET_SENTINEL_TS (~3ULL) /* from machine/machine_remote_time.h */
+
+extern uint64_t __mach_bridge_remote_time(uint64_t local_time);
+
+#if TARGET_OS_BRIDGE && defined(__arm64__)
+static uint64_t
+absolutetime_to_nanoseconds(uint64_t abs_time)
+{
+ mach_timebase_info_data_t info;
+ mach_timebase_info(&info);
+ uint64_t time_in_ns = (uint64_t)(((double)info.numer / (double)info.denom) * abs_time);
+
+ return time_in_ns;
+}
+
+uint64_t
+mach_bridge_remote_time(__unused uint64_t local_time)
+{
+ uint64_t remote_time = 0;
+ uint64_t local_time_ns = 0;
+ uint64_t now = 0;
+ struct bt_params params = {};
+
+ volatile struct bt_params *commpage_bt_params_p = (struct bt_params *)_COMM_PAGE_REMOTETIME_PARAMS;
+ volatile uint64_t *base_local_ts_p = &commpage_bt_params_p->base_local_ts;
+ volatile uint64_t *base_remote_ts_p = &commpage_bt_params_p->base_remote_ts;
+ volatile double *rate_p = &commpage_bt_params_p->rate;
+
+ do {
+ params.base_local_ts = *base_local_ts_p;
+ if (*base_local_ts_p == BT_RESET_SENTINEL_TS) {
+ return 0;
+ }
+ /*
+ * This call contains an instruction barrier that ensures the second read of
+ * base_local_ts is not speculated above the first read of base_local_ts.
+ */
+ now = mach_absolute_time();
+ params.base_remote_ts = *base_remote_ts_p;
+ params.rate = *rate_p;
+ /*
+ * This barrier prevents the second read of base_local_ts from being reordered
+ * w.r.t the reads of other values in bt_params.
+ */
+ __asm__ volatile("dmb ishld" ::: "memory");
+ } while(params.base_local_ts && (params.base_local_ts != commpage_bt_params_p->base_local_ts));
+
+ if (!local_time) {
+ local_time = now;
+ }
+ local_time_ns = absolutetime_to_nanoseconds(local_time);
+ if (local_time_ns < params.base_local_ts) {
+ remote_time = __mach_bridge_remote_time(local_time);
+ } else {
+ remote_time = mach_bridge_compute_timestamp(local_time_ns, ¶ms);
+ }
+ return remote_time;
+}
+#endif /* TARGET_OS_BRIDGE && defined(__arm64__) */
#include <arm/rtclock.h>
#include <libkern/OSAtomic.h>
#include <stdatomic.h>
+#include <kern/remote_time.h>
+#include <machine/machine_remote_time.h>
#include <sys/kdebug.h>
#endif /* __arm64__ */
}
}
+
+/*
+ * set the commpage's remote time params for
+ * userspace call to mach_bridge_remote_time()
+ */
+ void
+ commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts)
+ {
+ if (commPagePtr) {
+#ifdef __arm64__
+ struct bt_params *paramsp = (struct bt_params *)(_COMM_PAGE_REMOTETIME_PARAMS + _COMM_PAGE_RW_OFFSET);
+ paramsp->base_local_ts = 0;
+ __asm__ volatile("dmb ish" ::: "memory");
+ paramsp->rate = rate;
+ paramsp->base_remote_ts = base_remote_ts;
+ __asm__ volatile("dmb ish" ::: "memory");
+ paramsp->base_local_ts = base_local_ts; //This will act as a generation count
+#else
+ (void)rate;
+ (void)base_local_ts;
+ (void)base_remote_ts;
+#endif /* __arm64__ */
+ }
+}
extern void commpage_update_mach_continuous_time(uint64_t sleeptime);
extern void commpage_update_multiuser_config(uint32_t);
extern void commpage_update_boottime(uint64_t boottime_usec);
+extern void commpage_set_remotetime_params(double rate, uint64_t base_local_ts, uint64_t base_remote_ts);
#endif /* _ARM_COMMPAGE_H */
#define _COMM_PAGE_NEWTIMEOFDAY_DATA (_COMM_PAGE_START_ADDRESS+0x120) // used by gettimeofday(). Currently, sizeof(new_commpage_timeofday_data_t) = 40.
+
#define _COMM_PAGE_END (_COMM_PAGE_START_ADDRESS+0x1000) // end of common page
#endif /* _ARM_CPU_CAPABILITIES_H */
osfmk/i386/cpu_topology.c standard
osfmk/i386/i386_timer.c standard
osfmk/i386/fpu.c standard
+osfmk/i386/fp_simd.s standard
osfmk/i386/i386_lock.s standard
osfmk/i386/i386_init.c standard
osfmk/i386/i386_vm_init.c standard
if (kVCDarkBackground & vc_progress_options.options) vc_progress_white = TRUE;
else if (kVCLightBackground & vc_progress_options.options) vc_progress_white = FALSE;
+#if !defined(XNU_TARGET_OS_BRIDGE)
vc_progress_set( graphics_now, delay );
+#endif /* !defined(XNU_TARGET_OS_BRIDGE) */
gc_enable( !graphics_now );
gc_acquired = TRUE;
gc_desire_text = FALSE;
ml_set_interrupts_enabled(FALSE);
}
+ if (current_cpu_datap()->cpu_hibernate) {
+ /* Call hibernate_write_image() to put disk to low power state */
+ hibernate_write_image();
+ cpu_datap(0)->cpu_hibernate = 0;
+ }
+
/*
* Call back to caller to indicate that interrupts will remain
* disabled while we deep idle, wake and return.
cpu_uber_t cpu_uber;
/* Double-mapped per-CPU exception stack address */
uintptr_t cd_estack;
+ int cpu_xstate;
/* Address of shadowed, partially mirrored CPU data structures located
* in the double mapped PML4
*/
--- /dev/null
+/*
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ * Created 2018 Derek Kumar
+ */
+#include <i386/asm.h>
+
+.macro VPX
+ vpxord $0, $0, $0
+.endmacro
+
+.macro PX
+ pxor $0, $0
+.endmacro
+
+Entry(vzeroall)
+ vzeroall
+ ret
+
+Entry(xmmzeroall)
+ PX %xmm0
+ PX %xmm1
+ PX %xmm2
+ PX %xmm3
+
+ PX %xmm4
+ PX %xmm5
+ PX %xmm6
+ PX %xmm7
+
+ PX %xmm8
+ PX %xmm9
+ PX %xmm10
+ PX %xmm11
+
+ PX %xmm12
+ PX %xmm13
+ PX %xmm14
+ PX %xmm15
+
+ ret
/*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
-/*
- */
-
#include <mach/exception_types.h>
#include <mach/i386/thread_status.h>
static xstate_t thread_xstate(thread_t);
x86_ext_thread_state_t initial_fp_state __attribute((aligned(64)));
-
+x86_ext_thread_state_t default_avx512_state __attribute((aligned(64)));
+x86_ext_thread_state_t default_avx_state __attribute((aligned(64)));
+x86_ext_thread_state_t default_fx_state __attribute((aligned(64)));
/* Global MXCSR capability bitmask */
static unsigned int mxcsr_capability_mask;
static void fpu_store_registers(void *, boolean_t);
static void fpu_load_registers(void *);
-#define FP_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE))
-#define AVX_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE | XFEM_YMM))
#if !defined(RC_HIDE_XNU_J137)
-#define AVX512_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE | XFEM_YMM | XFEM_ZMM))
static const uint32_t xstate_xmask[] = {
[FP] = FP_XMASK,
[AVX] = AVX_XMASK,
};
#endif
-static inline void xsetbv(uint32_t mask_hi, uint32_t mask_lo) {
- __asm__ __volatile__("xsetbv" :: "a"(mask_lo), "d"(mask_hi), "c" (XCR0));
-}
-
static inline void xsave(struct x86_fx_thread_state *a, uint32_t rfbm) {
__asm__ __volatile__("xsave %0" :"=m" (*a) : "a"(rfbm), "d"(0));
}
}
#if !defined(RC_HIDE_XNU_J137)
-static inline void vzeroupper(void) {
+__unused static inline void vzeroupper(void) {
__asm__ __volatile__("vzeroupper" ::);
}
-#if DEVELOPMENT || DEBUG
-static inline uint64_t xgetbv(uint32_t c) {
- uint32_t mask_hi, mask_lo;
- __asm__ __volatile__("xgetbv" : "=a"(mask_lo), "=d"(mask_hi) : "c" (c));
- return ((uint64_t) mask_hi<<32) + (uint64_t) mask_lo;
-}
-#endif
static boolean_t fpu_thread_promote_avx512(thread_t); /* Forward */
fps->fx.fp_save_layout = fpu_YMM_capable ? XSAVE32: FXSAVE32;
fpu_load_registers(fps);
+ if (fpu_ZMM_capable) {
+ xsave64((struct x86_fx_thread_state *)&default_avx512_state, xstate_xmask[AVX512]);
+ }
+ if (fpu_YMM_capable) {
+ xsave64((struct x86_fx_thread_state *)&default_avx_state, xstate_xmask[AVX]);
+ } else {
+ fxsave64((struct x86_fx_thread_state *)&default_fx_state);
+ }
+
/* Poison values to trap unsafe usage */
fps->fx.fp_valid = 0xFFFFFFFF;
fps->fx.fp_save_layout = FP_UNUSED;
set_ts();
}
+int fpsimd_fault_popc = 0;
/*
* Look for FPU and initialize it.
* Called on each CPU.
fpu_capability = fpu_default = FP;
+ PE_parse_boot_argn("fpsimd_fault_popc", &fpsimd_fault_popc, sizeof(fpsimd_fault_popc));
+
#if !defined(RC_HIDE_XNU_J137)
static boolean_t is_avx512_enabled = TRUE;
if (cpu_number() == master_cpu) {
fp_state_size[fpu_capability]);
fpinit();
+ current_cpu_datap()->cpu_xstate = fpu_default;
/*
* Trap wait instructions. Turn off FPU for now.
* Always save old thread`s FPU context but don't load new .. allow that to fault-in.
* Switch to the new task's xstate.
*/
+
void
fpu_switch_context(thread_t old, thread_t new)
{
struct x86_fx_thread_state *ifps;
- boolean_t is_ts_cleared = FALSE;
+ cpu_data_t *cdp = current_cpu_datap();
+ xstate_t new_xstate = new ? thread_xstate(new) : fpu_default;
assert(ml_get_interrupts_enabled() == FALSE);
ifps = (old)->machine.ifps;
* (such as sendsig & sigreturn) manipulate TS directly.
*/
clear_ts();
- is_ts_cleared = TRUE;
/* registers are in FPU - save to memory */
fpu_store_registers(ifps, (thread_is_64bit(old) && is_saved_state64(old->machine.iss)));
ifps->fp_valid = TRUE;
+
+ if (fpu_ZMM_capable && (cdp->cpu_xstate == AVX512)) {
+ xrstor64((struct x86_fx_thread_state *)&default_avx512_state, xstate_xmask[AVX512]);
+ } else if (fpu_YMM_capable) {
+ xrstor64((struct x86_fx_thread_state *) &default_avx_state, xstate_xmask[AVX]);
+ } else {
+ fxrstor64((struct x86_fx_thread_state *)&default_fx_state);
+ }
}
-#if !defined(RC_HIDE_XNU_J137)
- xstate_t old_xstate = thread_xstate(old);
- xstate_t new_xstate = new ? thread_xstate(new) : fpu_default;
- if (old_xstate == AVX512 && ifps != 0) {
- DBG_AVX512_STATE((struct x86_avx512_thread_state *) ifps);
- /*
- * Clear upper bits for potential power-saving
- * but first ensure the TS bit is clear.
- */
- if (!is_ts_cleared)
- clear_ts();
- vzeroupper();
- }
- if (new_xstate != old_xstate) {
+
+ assertf(fpu_YMM_capable ? (xgetbv(XCR0) == xstate_xmask[cdp->cpu_xstate]) : TRUE, "XCR0 mismatch: 0x%llx 0x%x 0x%x", xgetbv(XCR0), cdp->cpu_xstate, xstate_xmask[cdp->cpu_xstate]);
+ if (new_xstate != cdp->cpu_xstate) {
DBG("fpu_switch_context(%p,%p) new xstate: %s\n",
old, new, xstate_name[new_xstate]);
xsetbv(0, xstate_xmask[new_xstate]);
+ cdp->cpu_xstate = new_xstate;
}
-#else
-#pragma unused(new)
-#endif
set_ts();
}
fp_state_free(new_ifps, xstate);
}
-
/*
* Initialize FPU.
- *
+ * FNINIT programs the x87 control word to 0x37f, which matches
+ * the desired default for macOS.
*/
void
-fpinit(void)
-{
- unsigned short control;
-
+fpinit(void) {
+ boolean_t istate = ml_set_interrupts_enabled(FALSE);
clear_ts();
fninit();
+#if DEBUG
+ /* We skip this power-on-default verification sequence on
+ * non-DEBUG, as dirtying the x87 control word may slow down
+ * xsave/xrstor and affect energy use.
+ */
+ unsigned short control, control2;
fnstcw(&control);
+ control2 = control;
control &= ~(FPC_PC|FPC_RC); /* Clear precision & rounding control */
control |= (FPC_PC_64 | /* Set precision */
FPC_RC_RN | /* round-to-nearest */
FPC_IE | /* Allow NaNQs and +-INF */
FPC_DE | /* Allow denorms as operands */
FPC_PE); /* No trap for precision loss */
+ assert(control == control2);
fldcw(control);
-
+#endif
/* Initialize SSE/SSE2 */
__builtin_ia32_ldmxcsr(0x1f80);
+ if (fpu_YMM_capable) {
+ vzeroall();
+ } else {
+ xmmzeroall();
+ }
+ ml_set_interrupts_enabled(istate);
}
/*
clear_fpu();
xsetbv(0, AVX512_XMASK);
-
+ current_cpu_datap()->cpu_xstate = AVX512;
(void)ml_set_interrupts_enabled(intr);
}
assert(ifps->fp.fp_valid);
fpu_switch_addrmode(thread_t thread, boolean_t is_64bit)
{
struct x86_fx_thread_state *ifps = thread->machine.ifps;
+ mp_disable_preemption();
if (ifps && ifps->fp_valid) {
if (thread_xstate(thread) == FP) {
ifps->fp_save_layout = is_64bit ? XSAVE64 : XSAVE32;
}
}
+ mp_enable_preemption();
+}
+
+static inline uint32_t fpsimd_pop(uintptr_t ins, int sz) {
+ uint32_t rv = 0;
+
+
+ while (sz >= 16) {
+ uint32_t rv1, rv2;
+ uint64_t *ins64 = (uint64_t *) ins;
+ uint64_t *ins642 = (uint64_t *) (ins + 8);
+ rv1 = __builtin_popcountll(*ins64);
+ rv2 = __builtin_popcountll(*ins642);
+ rv += rv1 + rv2;
+ sz -= 16;
+ ins += 16;
+ }
+
+ while (sz >= 4) {
+ uint32_t *ins32 = (uint32_t *) ins;
+ rv += __builtin_popcount(*ins32);
+ sz -= 4;
+ ins += 4;
+ }
+
+ while (sz > 0) {
+ char *ins8 = (char *)ins;
+ rv += __builtin_popcount(*ins8);
+ sz--;
+ ins++;
+ }
+ return rv;
+}
+
+uint32_t thread_fpsimd_hash(thread_t ft) {
+ if (fpsimd_fault_popc == 0)
+ return 0;
+
+ uint32_t prv = 0;
+ boolean_t istate = ml_set_interrupts_enabled(FALSE);
+ struct x86_fx_thread_state *pifps = THREAD_TO_PCB(ft)->ifps;
+
+ if (pifps) {
+ if (pifps->fp_valid) {
+ prv = fpsimd_pop((uintptr_t) &pifps->fx_XMM_reg[0][0],
+ sizeof(pifps->fx_XMM_reg));
+ } else {
+ uintptr_t cr0 = get_cr0();
+ clear_ts();
+ fp_save(ft);
+ prv = fpsimd_pop((uintptr_t) &pifps->fx_XMM_reg[0][0],
+ sizeof(pifps->fx_XMM_reg));
+ pifps->fp_valid = FALSE;
+ if (cr0 & CR0_TS) {
+ set_cr0(cr0);
+ }
+ }
+ }
+ ml_set_interrupts_enabled(istate);
+ return prv;
}
#include <mach/i386/thread_status.h>
#include <i386/proc_reg.h>
+#define FP_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE))
+#define AVX_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE | XFEM_YMM))
+#define AVX512_XMASK ((uint32_t) (XFEM_X87 | XFEM_SSE | XFEM_YMM | XFEM_ZMM))
+
typedef enum {
FXSAVE32 = 1,
FXSAVE64 = 2,
UNDEFINED,
FP,
AVX,
-#if !defined(RC_HIDE_XNU_J137)
AVX512
-#endif
} xstate_t;
+static inline uint64_t xgetbv(uint32_t c) {
+ uint32_t mask_hi, mask_lo;
+ __asm__ __volatile__("xgetbv" : "=a"(mask_lo), "=d"(mask_hi) : "c" (c));
+ return ((uint64_t) mask_hi<<32) + (uint64_t) mask_lo;
+}
+
+static inline void xsetbv(uint32_t mask_hi, uint32_t mask_lo) {
+ __asm__ __volatile__("xsetbv" :: "a"(mask_lo), "d"(mask_hi), "c" (XCR0));
+}
+
extern void init_fpu(void);
extern void fpu_module_init(void);
extern void fpu_free(
boolean_t is_64bit);
extern xstate_t fpu_default;
+extern xstate_t fpu_capability;
extern xstate_t current_xstate(void);
extern void fpUDflt(user_addr_t rip);
-
+#ifdef MACH_KERNEL_PRIVATE
+extern uint32_t thread_fpsimd_hash(thread_t);
+extern void vzeroall(void);
+extern void xmmzeroall(void);
+#endif /* MKP */
#endif /* _I386_FPU_H_ */
/*
- * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2018 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
return;
case T_SSE_FLOAT_ERROR:
- fpSSEexterrflt();
+ fpSSEexterrflt();
return;
- case T_DEBUG:
+
+ case T_INVALID_OPCODE:
+ fpUDflt(kern_ip);
+ goto debugger_entry;
+
+ case T_DEBUG:
if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS)
{
/* We've somehow encountered a debug
return;
}
goto debugger_entry;
-#ifdef __x86_64__
case T_INT3:
goto debugger_entry;
-#endif
case T_PAGE_FAULT:
#if CONFIG_DTRACE
extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
#endif
+#if DEBUG
+uint32_t fsigs[2];
+uint32_t fsigns, fsigcs;
+#endif
+
/*
* Trap from user mode.
*/
prot |= VM_PROT_WRITE;
if (__improbable(err & T_PF_EXECUTE))
prot |= VM_PROT_EXECUTE;
+#if DEVELOPMENT || DEBUG
+ uint32_t fsig = 0;
+ fsig = thread_fpsimd_hash(thread);
+#if DEBUG
+ fsigs[0] = fsig;
+#endif
+#endif
kret = vm_fault(thread->map,
vaddr,
prot, FALSE, VM_KERN_MEMORY_NONE,
THREAD_ABORTSAFE, NULL, 0);
-
+#if DEVELOPMENT || DEBUG
+ if (fsig) {
+ uint32_t fsig2 = thread_fpsimd_hash(thread);
+#if DEBUG
+ fsigcs++;
+ fsigs[1] = fsig2;
+#endif
+ if (fsig != fsig2) {
+ panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread, fsig, fsig2);
+ }
+ } else {
+#if DEBUG
+ fsigns++;
+#endif
+ }
+#endif
if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) {
thread_exception_return();
/*NOTREACHED*/
#include <i386/cpuid.h>
#include <vm/vm_kern.h>
#include <i386/mp.h> // mp_broadcast
+#include <i386/fpu.h>
#include <machine/cpu_number.h> // cpu_number
#include <pexpert/pexpert.h> // boot-args
lck_spin_unlock(ucode_slock);
}
+static void
+ucode_cpuid_set_info(void)
+{
+ uint64_t saved_xcr0, dest_xcr0;
+ int need_xcr0_restore = 0;
+ boolean_t intrs_enabled = ml_set_interrupts_enabled(FALSE);
+
+ /*
+ * Before we cache the CPUID information, we must configure XCR0 with the maximal set of
+ * features to ensure the save area returned in the xsave leaf is correctly-sized.
+ *
+ * Since we are guaranteed that init_fpu() has already happened, we can use state
+ * variables set there that were already predicated on the presence of explicit
+ * boot-args enables/disables.
+ */
+
+ if (fpu_capability == AVX512 || fpu_capability == AVX) {
+ saved_xcr0 = xgetbv(XCR0);
+ dest_xcr0 = (fpu_capability == AVX512) ? AVX512_XMASK : AVX_XMASK;
+ assert((get_cr4() & CR4_OSXSAVE) != 0);
+ if (saved_xcr0 != dest_xcr0) {
+ need_xcr0_restore = 1;
+ xsetbv(dest_xcr0 >> 32, dest_xcr0 & 0xFFFFFFFFUL);
+ }
+ }
+
+ cpuid_set_info();
+
+ if (need_xcr0_restore) {
+ xsetbv(saved_xcr0 >> 32, saved_xcr0 & 0xFFFFFFFFUL);
+ }
+
+ ml_set_interrupts_enabled(intrs_enabled);
+}
+
/* Farm an update out to all CPUs */
static void
xcpu_update(void)
mp_broadcast(cpu_update, NULL);
/* Update the cpuid info */
- cpuid_set_info();
-
+ ucode_cpuid_set_info();
}
/*
no32exec_35914211_helper: INVALID_ARCHS = x86_64
no32exec_35914211: INVALID_ARCHS = i386
+ifneq ($(PLATFORM),BridgeOS)
+EXCLUDED_SOURCES += remote_time.c
+else
+remote_time: INVALID_ARCHS = armv7 armv7s arm64_32
+endif
+
include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.targets
--- /dev/null
+#include <darwintest.h>
+#include <System/kern/remote_time.h>
+#include <mach/mach_time.h>
+#include <stdint.h>
+#include <sys/sysctl.h>
+#include <TargetConditionals.h>
+extern uint64_t __mach_bridge_remote_time(uint64_t);
+
+T_DECL(remote_time_syscall, "test mach_bridge_remote_time syscall",
+ T_META_CHECK_LEAKS(false))
+{
+#if TARGET_OS_BRIDGE
+ uint64_t local_time = mach_absolute_time();
+ uint64_t remote_time1 = mach_bridge_remote_time(local_time);
+ uint64_t remote_time2 = __mach_bridge_remote_time(local_time);
+ T_LOG("local_time = %llu, remote_time1 = %llu, remote_time2 = %llu",
+ local_time, remote_time1, remote_time2);
+ T_ASSERT_EQ(remote_time1, remote_time2, "syscall works");
+#else
+ T_SKIP("Skipping test");
+#endif /* TARGET_OS_BRIDGE */
+}
reason = "ResetTrue"
elseif buf[3] == 3 then
reason = "RateZero"
+ elseif buf[3] == 4 then
+ reason = "TSMismatch"
end
printf("%s %-15s ( %-10s %-10s ) ----------------------------------------\n",
prefix, reason, format_timestamp_arm(buf[1]), format_timestamp_intel(buf[2]))
printf("%s ( %-10s Reset )\n",
prefix, format_timestamp_arm(buf[1]), format_timestamp_intel(buf[2]))
else
- printf("%s ( %-10s %-10s )\n",
- prefix, format_timestamp_arm(buf[1]), format_timestamp_intel(buf[2]))
+ local skip = ""
+ if buf[3] == 1 then
+ skip = "Int handler"
+ end
+ printf("%s ( %-10s %-10s ) %s\n",
+ prefix, format_timestamp_arm(buf[1]), format_timestamp_intel(buf[2]), skip)
end
end)
+trace_codename("MACH_CLOCK_BRIDGE_SKIP_TS", function(buf)
+ local prefix = get_prefix(buf, "*")
+
+ if buf[4] > 0 then
+ printf("%s SKIP_RESET:%3d (Cur: %-10s Prev:%-10s) %-10s\n",
+ prefix, buf[4], format_timestamp_arm(buf[1]), format_timestamp_arm(buf[3]),
+ format_timestamp_intel(buf[2]))
+ else
+ printf("%s SKIP_DISTANCE: (Cur: %-10s Prev: %-10s) %-10s\n",
+ prefix, format_timestamp_arm(buf[1]), format_timestamp_arm(buf[3]),
+ format_timestamp_intel(buf[2]))
+ end
+
+end)
+
+trace_codename("MACH_CLOCK_BRIDGE_TS_MISMATCH", function(buf)
+ local prefix = get_prefix(buf, "?")
+
+ local diff = (math.abs(buf[2] - buf[3]))/1000000
+
+ printf("%s ( Cur: %-10s Pred: %-10s Diff: %5.6f ms ) @ %-20s\n",
+ prefix, format_timestamp_intel(buf[2]), format_timestamp_intel(buf[3]),
+ diff, format_timestamp_arm(buf[1]))
+
+end)
+
+trace_codename("MACH_CLOCK_BRIDGE_OBSV_RATE", function(buf)
+ local prefix = get_prefix(buf, "=")
+
+ local rate
+ if darwin.uint64_to_double then
+ rate = darwin.uint64_to_double(buf[1])
+ else
+ rate = math.nan
+ end
+
+ printf("%s obsv_rate = %f exceeded limits(0.8, 1.2)\n", prefix, rate)
+
+end)