export MakeInc_rule=${VERSDIR}/makedefs/MakeInc.rule
export MakeInc_dir=${VERSDIR}/makedefs/MakeInc.dir
+
#
# Dispatch non-xnu build aliases to their own build
# systems. All xnu variants start with MakeInc_top.
install install_desktop install_embedded \
install_release_embedded install_development_embedded \
install_kernels \
- cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \
+ cscope tags TAGS \
help
DEFAULT_TARGET = all
$ make cscope # this will build cscope database
-Code Style
-==========
-
-Source files can be reformatted to comply with the xnu code style using the "restyle" make target invoked from the
-top-level project directory.
-
- $ make restyle # re-format all source files to be xnu code style conformant.
-
-Compliance can be checked using the "checkstyle" make target.
-
- $ make checkstyle # Check all relevant source files for xnu code style conformance.
-
How to install a new header file from XNU
=========================================
*/
#define CONFIGDEP 0x01 /* obsolete? */
#define OPTIONSDEF 0x02 /* options definition entry */
+#define LIBRARYDEP 0x04 /* include file in library build */
struct device {
int d_type; /* CONTROLLER, DEVICE, bus adaptor */
#include "config.h"
void read_files(void);
-void do_objs(FILE *fp, const char *msg, int ext);
+void do_objs(FILE *fp, const char *msg, int ext, int flags);
void do_files(FILE *fp, const char *msg, char ext);
void do_machdep(FILE *ofp);
void do_rules(FILE *f);
continue;
percent:
if (eq(line, "%OBJS\n")) {
- do_objs(ofp, "OBJS=", -1);
+ do_objs(ofp, "OBJS=", -1, 0);
+ } else if (eq(line, "%LIBOBJS\n")) {
+ do_objs(ofp, "LIBOBJS=", -1, LIBRARYDEP);
} else if (eq(line, "%CFILES\n")) {
do_files(ofp, "CFILES=", 'c');
- do_objs(ofp, "COBJS=", 'c');
+ do_objs(ofp, "COBJS=", 'c', 0);
} else if (eq(line, "%CXXFILES\n")) {
do_files(ofp, "CXXFILES=", 'p');
- do_objs(ofp, "CXXOBJS=", 'p');
+ do_objs(ofp, "CXXOBJS=", 'p', 0);
} else if (eq(line, "%SFILES\n")) {
do_files(ofp, "SFILES=", 's');
- do_objs(ofp, "SOBJS=", 's');
+ do_objs(ofp, "SOBJS=", 's', 0);
} else if (eq(line, "%MACHDEP\n")) {
do_machdep(ofp);
} else if (eq(line, "%RULES\n")) {
const char *devorprof;
int options;
int not_option;
+ int for_xnu_lib;
char pname[BUFSIZ];
char fname[1024];
char *rest = (char *) 0;
nreqs = 0;
devorprof = "";
needs = 0;
+ for_xnu_lib = 0;
if (eq(wd, "standard")) {
goto checkdev;
}
next_word(fp, wd);
goto save;
}
+ if (eq(wd, "xnu-library")) {
+ for_xnu_lib = 1;
+ goto nextopt;
+ }
nreqs++;
if (needs == 0 && nreqs == 1) {
needs = ns(wd);
goto getrest;
}
next_word(fp, wd);
+ if (wd && eq(wd, "xnu-library")) {
+ for_xnu_lib = 1;
+ next_word(fp, wd);
+ }
if (wd) {
devorprof = wd;
next_word(fp, wd);
if (pf && pf->f_type == INVISIBLE) {
pf->f_flags = 1; /* mark as duplicate */
}
+ if (for_xnu_lib) {
+ tp->f_flags |= LIBRARYDEP;
+ }
goto next;
}
}
void
-do_objs(FILE *fp, const char *msg, int ext)
+do_objs(FILE *fp, const char *msg, int ext, int flags)
{
struct file_list *tp;
int lpos, len;
continue;
}
+ /*
+ * Check flags (if any)
+ */
+ if (flags && ((tp->f_flags & flags) != flags)) {
+ continue;
+ }
+
/*
* Check for '.o' file in list
*/
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
MAKESYSCALLS = $(SRCROOT)/bsd/kern/makesyscalls.sh
init_sysent.c: $(TARGET)/bsd.syscalls.master
@$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$(<F)$(Color0)"
$(_v)$(MAKESYSCALLS) $< systrace > /dev/null
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
bsd/kern/posix_sem.c standard
bsd/kern/posix_shm.c standard
# XXXdbg - I need this in the journaling and block cache code
-bsd/kern/qsort.c standard
+bsd/kern/qsort.c standard xnu-library
bsd/kern/kpi_socket.c optional sockets
bsd/kern/kpi_socketfilter.c optional sockets
bsd/kern/proc_info.c standard
#include <kern/task.h>
#include <vm/vm_map.h>
+#include <vm/pmap.h>
#include <vm/vm_kern.h>
if (p->p_csflags & CS_VALID) {
p->p_csflags |= CS_DEBUGGED;
}
+#if PMAP_CS
+ task_t procTask = proc_task(p);
+ if (procTask) {
+ vm_map_t proc_map = get_task_map_reference(procTask);
+ if (proc_map) {
+ if (vm_map_cs_wx_enable(proc_map) != KERN_SUCCESS) {
+ printf("CODE SIGNING: cs_allow_invalid() not allowed by pmap: pid %d\n", p->p_pid);
+ }
+ vm_map_deallocate(proc_map);
+ }
+ }
+#endif // MAP_CS
proc_unlock(p);
/* allow a debugged process to hide some (debug-only!) memory */
proc_fdunlock(p);
return error;
}
- if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ if (fp_isguarded(fp, GUARD_DUP)) {
error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
(void) fp_drop(p, old, fp, 1);
proc_fdunlock(p);
proc_fdunlock(p);
return error;
}
- if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ if (fp_isguarded(fp, GUARD_DUP)) {
error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
(void) fp_drop(p, old, fp, 1);
proc_fdunlock(p);
}
if ((nfp = fdp->fd_ofiles[new]) != NULL) {
- if (FP_ISGUARDED(nfp, GUARD_CLOSE)) {
+ if (fp_isguarded(nfp, GUARD_CLOSE)) {
fp_drop(p, old, fp, 1);
error = fp_guard_exception(p,
new, nfp, kGUARD_EXC_CLOSE);
switch (uap->cmd) {
case F_DUPFD:
case F_DUPFD_CLOEXEC:
- if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ if (fp_isguarded(fp, GUARD_DUP)) {
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
goto out;
}
if (uap->arg & FD_CLOEXEC) {
*pop |= UF_EXCLOSE;
} else {
- if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ if (fp_isguarded(fp, 0)) {
error = fp_guard_exception(p,
fd, fp, kGUARD_EXC_NOCLOEXEC);
goto out;
return EBADF;
}
- if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ if (fp_isguarded(fp, GUARD_CLOSE)) {
int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
proc_fdunlock(p);
return error;
goto out_unlock;
}
- if (FP_ISGUARDED(fp, GUARD_FILEPORT)) {
+ if (fp_isguarded(fp, GUARD_FILEPORT)) {
err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
goto out_unlock;
}
*/
switch (error) {
case ENODEV:
- if (FP_ISGUARDED(wfp, GUARD_DUP)) {
+ if (fp_isguarded(wfp, GUARD_DUP)) {
proc_fdunlock(p);
return EPERM;
}
proc_fdlock(p);
if ((fp = fp_get_noref_locked(p, psfa->psfaa_filedes)) == NULL) {
error = EBADF;
- } else if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ } else if (fp_isguarded(fp, 0)) {
error = fp_guard_exception(p, psfa->psfaa_filedes,
fp, kGUARD_EXC_NOCLOEXEC);
} else {
#include <sys/reason.h>
#endif
-
#define f_flag fp_glob->fg_flag
extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
user_addr_t bufp, user_size_t nbyte, off_t offset,
struct guarded_fileproc {
struct fileproc gf_fileproc;
- u_int gf_magic;
u_int gf_attrs;
guardid_t gf_guard;
};
-const size_t sizeof_guarded_fileproc = sizeof(struct guarded_fileproc);
+ZONE_DECLARE(gfp_zone, "guarded_fileproc",
+ sizeof(struct guarded_fileproc),
+ ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
-#define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp))
-#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
+static inline struct guarded_fileproc *
+FP_TO_GFP(struct fileproc *fp)
+{
+ struct guarded_fileproc *gfp =
+ __container_of(fp, struct guarded_fileproc, gf_fileproc);
+
+ zone_require(gfp_zone, gfp);
+ return gfp;
+}
-#define GUARDED_FILEPROC_MAGIC 0x29083
+#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
struct gfp_crarg {
guardid_t gca_guard;
struct gfp_crarg *aarg = crarg;
struct guarded_fileproc *gfp;
- if ((gfp = kalloc(sizeof(*gfp))) == NULL) {
- return NULL;
- }
-
- bzero(gfp, sizeof(*gfp));
+ gfp = zalloc_flags(gfp_zone, Z_WAITOK | Z_ZERO);
struct fileproc *fp = &gfp->gf_fileproc;
os_ref_init(&fp->fp_iocount, &f_refgrp);
fp->fp_flags = FTYPE_GUARDED;
- gfp->gf_magic = GUARDED_FILEPROC_MAGIC;
gfp->gf_guard = aarg->gca_guard;
gfp->gf_attrs = aarg->gca_attrs;
guarded_fileproc_free(struct fileproc *fp)
{
struct guarded_fileproc *gfp = FP_TO_GFP(fp);
-
- if (FILEPROC_TYPE(fp) != FTYPE_GUARDED ||
- GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt fp %p flags %x", __func__, fp, fp->fp_flags);
- }
-
- kfree(gfp, sizeof(*gfp));
+ zfree(gfp_zone, gfp);
}
static int
}
struct guarded_fileproc *gfp = FP_TO_GFP(fp);
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt fp %p", __func__, fp);
- }
-
if (guard != gfp->gf_guard) {
(void) fp_drop(p, fd, fp, locked);
return EPERM; /* *not* a mismatch exception */
/*
* Expected use pattern:
*
- * if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ * if (fp_isguarded(fp, GUARD_CLOSE)) {
* error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
* proc_fdunlock(p);
* return error;
* }
+ *
+ * Passing `0` to `attrs` returns whether the fp is guarded at all.
*/
int
fp_isguarded(struct fileproc *fp, u_int attrs)
{
if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
- struct guarded_fileproc *gfp = FP_TO_GFP(fp);
-
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt gfp %p flags %x",
- __func__, gfp, fp->fp_flags);
- }
- return (attrs & gfp->gf_attrs) == attrs;
+ return (attrs & FP_TO_GFP(fp)->gf_attrs) == attrs;
}
return 0;
}
*/
struct guarded_fileproc *gfp = FP_TO_GFP(fp);
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt gfp %p flags %x",
- __func__, gfp, fp->fp_flags);
- }
-
if (oldg == gfp->gf_guard &&
uap->guardflags == gfp->gf_attrs) {
/*
goto dropout;
}
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt gfp %p flags %x",
- __func__, gfp, fp->fp_flags);
- }
-
if (oldg != gfp->gf_guard ||
uap->guardflags != gfp->gf_attrs) {
error = EPERM;
{
struct lockf *block;
struct lockf **head = lock->lf_head;
- struct lockf **prev, *overlap, *ltmp;
+ struct lockf **prev, *overlap;
static const char lockstr[] = "lockf";
int priority, needtolink, error;
struct vnode *vp = lock->lf_vnode;
lf_wakelock(overlap, TRUE);
}
overlap->lf_type = lock->lf_type;
+ lf_move_blocked(overlap, lock);
FREE(lock, M_LOCKF);
lock = overlap; /* for lf_coalesce_adjacent() */
break;
* Check for common starting point and different types.
*/
if (overlap->lf_type == lock->lf_type) {
+ lf_move_blocked(overlap, lock);
FREE(lock, M_LOCKF);
lock = overlap; /* for lf_coalesce_adjacent() */
break;
overlap->lf_type == F_WRLCK) {
lf_wakelock(overlap, TRUE);
} else {
- while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
- ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
- TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
- lf_block);
- TAILQ_INSERT_TAIL(&lock->lf_blkhd,
- ltmp, lf_block);
- ltmp->lf_next = lock;
- }
+ lf_move_blocked(lock, overlap);
}
/*
* Add the new lock if necessary and delete the overlap.
fproc->fi_status |= PROC_FP_CLFORK;
}
}
- if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ if (fp_isguarded(fp, 0)) {
fproc->fi_status |= PROC_FP_GUARDED;
fproc->fi_guardflags = 0;
if (fp_isguarded(fp, GUARD_CLOSE)) {
}
if ((fp->f_flag & FWRITE) == 0) {
error = EBADF;
- } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+ } else if (fp_isguarded(fp, GUARD_WRITE)) {
proc_fdlock(p);
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
proc_fdunlock(p);
if ((fp->f_flag & FWRITE) == 0) {
error = EBADF;
- } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+ } else if (fp_isguarded(fp, GUARD_WRITE)) {
proc_fdlock(p);
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
proc_fdunlock(p);
error = EBADF;
goto ExitThisRoutine;
}
- if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+ if (fp_isguarded(fp, GUARD_WRITE)) {
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
goto ExitThisRoutine;
}
} else if (!fg_sendable(tmpfp->fp_glob)) {
proc_fdunlock(p);
return EINVAL;
- } else if (FP_ISGUARDED(tmpfp, GUARD_SOCKET_IPC)) {
+ } else if (fp_isguarded(tmpfp, GUARD_SOCKET_IPC)) {
error = fp_guard_exception(p,
fds[i], tmpfp, kGUARD_EXC_SOCKET_IPC);
proc_fdunlock(p);
cfc->cf_flags |= CFF_FLOW_CONTROLLED;
- cfil_rw_unlock_exclusive(&cfil_lck_rw);
+ cfil_rw_lock_exclusive_to_shared(&cfil_lck_rw);
} else if (error != 0) {
OSIncrementAtomic(&cfil_stats.cfs_stats_event_fail);
}
rtinfo->rti_info[i] = &sa_zero;
return 0; /* should be EINVAL but for compat */
}
+ if (sa->sa_len < offsetof(struct sockaddr, sa_data)) {
+ return EINVAL;
+ }
/* accept it */
rtinfo->rti_info[i] = sa;
ADVANCE32(cp, sa);
errno_t
flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p)
{
+#if CONTENT_FILTER
+ if (SOCK_TYPE(so) == SOCK_STREAM && !(so->so_flags & SOF_CONTENT_FILTER)) {
+ int error = cfil_sock_attach(so, NULL, to, CFS_CONNECTION_DIR_OUT);
+ if (error != 0) {
+ struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
+ FDLOG(LOG_ERR, fd_cb, "Failed to attach cfil: %d", error);
+ return error;
+ }
+ }
+#endif /* CONTENT_FILTER */
+
return flow_divert_connect_out_internal(so, to, p, false);
}
inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
inp->inp_flow |=
- (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
/* reset the incomp processing flag */
if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
inp->inp_flow |=
- (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
tcp_set_max_rwinscale(tp, so);
}
#endif
eip6 = (struct ip6_hdr *)(icmp6 + 1);
+ bzero(&icmp6dst, sizeof(icmp6dst));
/* Detect the upper level protocol */
{
int eoff = off + sizeof(struct icmp6_hdr) +
sizeof(struct ip6_hdr);
struct ip6ctlparam ip6cp;
- struct in6_addr *finaldst = NULL;
int icmp6type = icmp6->icmp6_type;
struct ip6_frag *fh;
struct ip6_rthdr *rth;
/* just ignore a bogus header */
if ((rth0->ip6r0_len % 2) == 0 &&
(hops = rth0->ip6r0_len / 2)) {
- finaldst = (struct in6_addr *)(void *)(rth0 + 1) + (hops - 1);
+ icmp6dst.sin6_addr = *((struct in6_addr *)(void *)(rth0 + 1) + (hops - 1));
}
}
eoff += rthlen;
*/
eip6 = (struct ip6_hdr *)(icmp6 + 1);
- bzero(&icmp6dst, sizeof(icmp6dst));
icmp6dst.sin6_len = sizeof(struct sockaddr_in6);
icmp6dst.sin6_family = AF_INET6;
- if (finaldst == NULL) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&icmp6dst.sin6_addr)) {
icmp6dst.sin6_addr = eip6->ip6_dst;
- } else {
- icmp6dst.sin6_addr = *finaldst;
}
if (in6_setscope(&icmp6dst.sin6_addr, m->m_pkthdr.rcvif, NULL)) {
goto freeit;
icmp6src.sin6_flowinfo =
(eip6->ip6_flow & IPV6_FLOWLABEL_MASK);
- if (finaldst == NULL) {
- finaldst = &eip6->ip6_dst;
- }
ip6cp.ip6c_m = m;
ip6cp.ip6c_icmp6 = icmp6;
ip6cp.ip6c_ip6 = (struct ip6_hdr *)(icmp6 + 1);
ip6cp.ip6c_off = eoff;
- ip6cp.ip6c_finaldst = finaldst;
+ ip6cp.ip6c_finaldst = &icmp6dst.sin6_addr;
ip6cp.ip6c_src = &icmp6src;
ip6cp.ip6c_nxt = nxt;
.ru_reseed = 0
};
-static struct randomtab randomtab_20 = {
- .ru_bits = 20, /* resulting bits */
- .ru_out = 180, /* Time after wich will be reseeded */
- .ru_max = 200000, /* Uniq cycle, avoid blackjack prediction */
- .ru_gen = 2, /* Starting generator */
- .ru_n = 524269, /* RU_N-1 = 2^2*3^2*14563 */
- .ru_agen = 7, /* determine ru_a as RU_AGEN^(2*rand) */
- .ru_m = 279936, /* RU_M = 2^7*3^7 - don't change */
- .pfacts = { 2, 3, 14563, 0 }, /* factors of ru_n */
- .ru_counter = 0,
- .ru_msb = 0,
- .ru_x = 0,
- .ru_seed = 0,
- .ru_seed2 = 0,
- .ru_a = 0,
- .ru_b = 0,
- .ru_g = 0,
- .ru_reseed = 0
-};
-
static u_int32_t pmod(u_int32_t, u_int32_t, u_int32_t);
static void initid(struct randomtab *);
static u_int32_t randomid(struct randomtab *);
u_int32_t
ip6_randomflowlabel(void)
{
- return randomid(&randomtab_20) & 0xfffff;
+ return RandomULong() & IPV6_FLOWLABEL_MASK;
}
/* construct new IPv4 header. see RFC 2401 5.1.2.1 */
/* ECN consideration. */
- ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
+ ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
if (plen + sizeof(struct ip) < IP_MAXPACKET) {
ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
/* construct new IPv6 header. see RFC 2401 5.1.2.2 */
/* ECN consideration. */
- ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
+ ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
ip6->ip6_plen = htons((u_int16_t)plen);
} else {
if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) {
in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK;
in6p->inp_flow |=
- (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
M_PREPEND(m, sizeof(*ip6), M_WAIT, 1);
if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) {
in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK;
in6p->inp_flow |=
- (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
if (af == AF_INET) {
inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
inp->inp_flow |=
- (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
}
return error;
#define FILEPROC_TYPE(fp) ((fp)->fp_flags & FP_TYPEMASK)
-#define FP_ISGUARDED(fp, attribs) \
- ((FILEPROC_TYPE(fp) == FTYPE_GUARDED) ? fp_isguarded(fp, attribs) : 0)
-
typedef enum {
FTYPE_SIMPLE = 0,
FTYPE_GUARDED = (1 << _FP_TYPESHIFT)
struct nameidata nd;
char smallname[64];
char *filename = NULL;
- size_t len;
+ size_t alloc_len;
+ size_t copy_len;
if ((dvp == NULLVP) ||
(basename == NULL) || (basename[0] == '\0') ||
return;
}
filename = &smallname[0];
- len = snprintf(filename, sizeof(smallname), "._%s", basename);
- if (len >= sizeof(smallname)) {
- len++; /* snprintf result doesn't include '\0' */
- filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
- len = snprintf(filename, len, "._%s", basename);
+ alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
+ if (alloc_len >= sizeof(smallname)) {
+ alloc_len++; /* snprintf result doesn't include '\0' */
+ filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
+ copy_len = snprintf(filename, alloc_len, "._%s", basename);
}
NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
CAST_USER_ADDR_T(filename), ctx);
vnode_put(xvp);
out2:
if (filename && filename != &smallname[0]) {
- kheap_free(KHEAP_TEMP, filename, len);
+ kheap_free(KHEAP_TEMP, filename, alloc_len);
}
}
#endif /* CONFIG_APPLEDOUBLE */
_err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
}
DTRACE_FSINFO(advlock, vnode_t, vp);
- if (op == F_UNLCK && flags == F_FLOCK) {
+ if (op == F_UNLCK &&
+ (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
post_event_if_success(vp, _err, NOTE_FUNLOCK);
}
}
char smallname[64];
char *filename = NULL;
const char *basename = NULL;
- size_t len;
+ size_t alloc_len;
+ size_t copy_len;
errno_t error;
int opened = 0;
int referenced = 0;
goto out;
}
filename = &smallname[0];
- len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename);
- if (len >= sizeof(smallname)) {
- len++; /* snprintf result doesn't include '\0' */
- filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
- len = snprintf(filename, len, "%s%s", ATTR_FILE_PREFIX, basename);
+ alloc_len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename);
+ if (alloc_len >= sizeof(smallname)) {
+ alloc_len++; /* snprintf result doesn't include '\0' */
+ filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
+ copy_len = snprintf(filename, alloc_len, "%s%s", ATTR_FILE_PREFIX, basename);
}
/*
* Note that the lookup here does not authorize. Since we are looking
vnode_putname(basename);
}
if (filename && filename != &smallname[0]) {
- kheap_free(KHEAP_TEMP, filename, len);
+ kheap_free(KHEAP_TEMP, filename, alloc_len);
}
*xvpp = xvp; /* return a referenced vnode */
$(_v)$(SOURCE)/generate_combined_symbolsets_plist.sh $@ $^ $(_vstdout)
$(_v)$(PLUTIL) -convert binary1 -s $@
+ifneq ($(RC_ProjectName),xnu_libraries)
$(OBJPATH)/allsymbols: $(OBJPATH)/$(KERNEL_FILE_NAME)
$(_v)$(NM) -gj $< | sort -u > $@
$(OBJPATH)/$(MI_SUPPORTED_KPI_FILENAME)
do_config_all:: build_symbol_sets
+else
+# We are building XNU as a static library - avoid creating symbol sets
+endif
# There's no simple static pattern rule for these paths, so hardcode dependencies in the command list
$(SYMROOT_INSTALL_KEXT_MACHO_FILES): ALWAYS
$(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
ifneq ($(INSTALL_KASAN_ONLY),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
do_config_install:: $(SYMROOT_INSTALL_KEXT_MACHO_FILES) \
$(SYMROOT_INSTALL_KEXT_PLISTS) \
$(DSTROOT_INSTALL_KEXT_MACHO_FILES) \
$(DSTROOT)/$(KRESDIR)/$(MD_SUPPORTED_KPI_FILENAME) \
$(DSTROOT)/$(KRESDIR)/$(MI_SUPPORTED_KPI_FILENAME)
endif
+endif
$(OBJPATH)/all-kpi.exp: $(EXPORTS_FILES)
$(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ $(Kasan_EXPORTS)
-20.2.0
+20.3.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
+++ /dev/null
-ARMv8.3 Pointer Authentication in xnu
-=====================================
-
-Introduction
-------------
-
-This document describes xnu's use of the ARMv8.3-PAuth extension. Specifically,
-xnu uses ARMv8.3-PAuth to protect against Return-Oriented-Programming (ROP)
-and Jump-Oriented-Programming (JOP) attacks, which attempt to gain control flow
-over a victim program by overwriting return addresses or function pointers
-stored in memory.
-
-It is assumed the reader is already familar with the basic concepts behind
-ARMv8.3-PAuth and what its instructions do. The "ARMv8.3-A Pointer
-Authentication" section of Google Project Zero's ["Examining Pointer
-Authentication on the iPhone
-XS"](https://googleprojectzero.blogspot.com/2019/02/examining-pointer-authentication-on.html)
-provides a good introduction to ARMv8.3-PAuth. The reader may find more
-comprehensive background material in:
-
-* The "Pointer authentication in AArch64 state" section of the [ARMv8
- ARM](https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile)
- describes the new instructions and registers associated with ARMv8.3-PAuth.
-
-* [LLVM's Pointer Authentication
- documentation](https://github.com/apple/llvm-project/blob/apple/master/clang/docs/PointerAuthentication.rst)
- outlines how clang uses ARMv8.3-PAuth instructions to harden key C, C++,
- Swift, and Objective-C language constructs.
-
-### Threat model
-
-Pointer authentication's threat model assumes that an attacker has found a gadget
-to read and write arbitrary memory belonging to a victim process, which may
-include the kernel. The attacker does *not* have the ability to execute
-arbitrary code in that process's context. Pointer authentication aims to
-prevent the attacker from gaining control flow over the victim process by
-overwriting sensitive pointers in its address space (e.g., return addresses
-stored on the stack).
-
-Following this threat model, xnu takes a two-pronged approach to prevent the
-attacker from gaining control flow over the victim process:
-
-1. Both xnu and first-party binaries are built with LLVM's `-arch arm64e` flag,
- which generates pointer-signing and authentication instructions to protect
- addresses stored in memory (including ones pushed to the stack). This
- process is generally transparent to xnu, with exceptions discussed below.
-
-2. On exception entry, xnu hashes critical register state before it is spilled
- to memory. On exception return, the reloaded state is validated against this
- hash.
-
-The ["xnu PAC infrastructure"](#xnu-pac-infrastructure) section discusses how
-these hardening techniques are implemented in xnu in more detail.
-
-
-Key generation on Apple CPUs
-----------------------------
-
-ARMv8.3-PAuth implementations may use an <span style="font-variant:
-small-caps">implementation defined</span> cipher. Apple CPUs implement an
-optional custom cipher with two key-generation changes relevant to xnu.
-
-
-### Per-boot diversifier
-
-Apple's optional cipher adds a per-boot diversifier. In effect, even if xnu
-initializes the "ARM key" registers (`APIAKey`, `APGAKey`, etc.) with constants,
-signing a given value will still produce different signatures from boot to boot.
-
-
-### Kernel/userspace diversifier
-
-Apple CPUs also contain a second diversifier known as `KERNKey`. `KERNKey` is
-automatically mixed into the final signing key (or not) based on the CPU's
-exception level. When xnu needs to sign or authenticate userspace-signed
-pointers, it uses the `ml_enable_user_jop_key` and `ml_disable_user_jop_key`
-routines to manually enable or disable `KERNKey`. `KERNKey` allows the CPU to
-effectively use different signing keys for userspace and kernel, without needing
-to explicitly reprogram the generic ARM keys on every kernel entry and exit.
-
-
-xnu PAC infrastructure
-----------------------
-
-For historical reasons, the xnu codebase collectively refers to xnu + iOS's
-pointer authentication infrastructure as Pointer Authentication Codes (PAC). The
-remainder of this document will follow this terminology for consistency with
-xnu.
-
-### arm64e binary "slice"
-
-Binaries with PAC instructions are not fully backwards-compatible with non-PAC
-CPUs. Hence LLVM/iOS treat PAC-enabled binaries as a distinct ABI "slice" named
-arm64e. xnu enforces this distinction by disabling the PAC keys when returning
-to non-arm64e userspace, effectively turning ARMv8.3-PAuth auth and sign
-instructions into no-ops (see the ["SCTLR_EL1"](#sctlr-el1) heading below for
-more details).
-
-### Kernel pointer signing
-
-xnu is built with `-arch arm64e`, which causes LLVM to automatically sign and
-authenticate function pointers and return addresses spilled onto the stack. This
-process is largely transparent to software, with some exceptions:
-
-- During early boot, xnu rebases and signs the pointers stored in its own
- `__thread_starts` section (see `rebase_threaded_starts` in
- `osfmk/arm/arm_init.c`).
-
-- As parts of the userspace shared region are paged in, the page-in handler must
- also slide and re-sign any signed pointers stored in it. The ["Signed
- pointers in shared regions"](#signed-pointers-in-shared-regions) section
- discusses this in further detail.
-
-- Assembly routines must manually sign the return address with `pacibsp` before
- pushing it onto the stack, and use an authenticating `retab` instruction in
- place of `ret`. xnu provides assembly macros `ARM64_STACK_PROLOG` and
- `ARM64_STACK_EPILOG` which emit the appropriate instructions for both arm64
- and arm64e targets.
-
- Likewise, branches in assembly to signed C function pointers must use the
- authenticating `blraa` instruction in place of `blr`.
-
-- Signed pointers must be stripped with `ptrauth_strip` before they can be
- compared against compile-time constants like `VM_MIN_KERNEL_ADDRESS`.
-
-### Testing data pointer signing
-
-xnu contains tests for each manually qualified data pointer that should be
-updated as new pointers are qualified. The tests allocate a structure
-containing a __ptrauth qualified member, and write a pointer to that member.
-We can then compare the stored value, which should be signed, with a manually
-constructed signature. See `ALLOC_VALIDATE_DATA_PTR`.
-
-Tests are triggered by setting the `kern.run_ptrauth_data_tests` sysctl. The
-sysctl is implemented, and BSD structures are tested, in `bsd/tests/ptrauth_data_tests_sysctl.c`.
-Mach structures are tested in `osfmk/tests/ptrauth_data_tests.c`.
-
-### Managing PAC register state
-
-xnu generally tries to avoid reprogramming the CPU's PAC-related registers on
-kernel entry and exit, since this could add significant overhead to a hot
-codepath. Instead, xnu uses the following strategies to manage the PAC register
-state.
-
-#### A keys
-
-Userspace processes' A keys (`AP{IA,DA,GA}Key`) are derived from the field
-`jop_pid` inside `struct task`. For implementation reasons, an exact duplicate
-of this field is cached in the corresponding `struct machine_thread`.
-
-
-A keys are randomly generated at shared region initialization time (see ["Signed
-pointers in shared regions"](#signed-pointers-in-shared-regions) below) and
-copied into `jop_pid` during process activation. This shared region, and hence
-associated A keys, may be shared among arm64e processes under specific
-circumstances:
-
-1. "System processes" (i.e., processes launched from first-party signed binaries
- on the iOS system image) generally use a common shared region with a default
- `jop_pid` value, separate from non-system processes.
-
- If a system process wishes to isolate its A keys even from other system
- processes, it may opt into a custom shared region using an entitlement in
- the form `com.apple.pac.shared_region_id=[...]`. That is, two processes with
- the entitlement `com.apple.pac.shared_region_id=foo` would share A keys and
- shared regions with each other, but not with other system processes.
-
-2. Other arm64e processes automatically use the same shared region/A keys if
- their respective binaries are signed with the same team-identifier strings.
-
-3. `posix_spawnattr_set_ptrauth_task_port_np()` allows explicit "inheriting" of
- A keys during `posix_spawn()`, using a supplied mach task port. This API is
- intended to support debugging tools that may need to auth or sign pointers
- using the target process's keys.
-
-#### B keys
-
-Each process is assigned a random set of "B keys" (`AP{IB,DB}Key`) on process
-creation. As a special exception, processes which inherit their parents' memory
-address space (e.g., during `fork`) will also inherit their parents' B keys.
-These keys are stored as the field `rop_pid` inside `struct task`, with an exact
-duplicate in `struct machine_thread` for implementation reasons.
-
-xnu reprograms the ARM B-key registers during context switch, via the macro
-`set_process_dependent_keys_and_sync_context` in `cswitch.s`.
-
-xnu uses the B keys internally to sign pointers pushed onto the kernel stack,
-such as stashed LR values. Note that xnu does *not* need to explicitly switch
-to a dedicated set of "kernel B keys" to do this:
-
-1. The `KERNKey` diversifier already ensures that the actual signing keys are
- different between xnu and userspace.
-
-2. Although reprogramming the ARM B-key registers will affect xnu's signing keys
- as well, pointers pushed onto the stack are inherently short-lived.
- Specifically, there will never be a situation where a stack pointer value is
- signed with one `current_task()`, but needs to be authed under a different
- active `current_task()`.
-
-#### SCTLR_EL1
-
-As discussed above, xnu disables the ARM keys when returning to non-arm64e
-userspace processes. This is implemented by manipulating the `EnIA`, `EnIB`,
-and `EnDA`, and `EnDB` bits in the ARM `SCTLR_EL1` system register. When
-these bits are cleared, auth or sign instruction using the respective keys
-will simply pass through their inputs unmodified.
-
-Initially, xnu cleared these bits during every `exception_return` to a
-non-arm64e process. Since xnu itself uses these keys, the exception vector
-needs to restore the same bits on every exception entry (implemented in the
-`EL0_64_VECTOR` macro).
-
-Apple A13 CPUs now have controls that allow xnu to keep the PAC keys enabled at
-EL1, independent of `SCTLR_EL1` settings. On these CPUs, xnu only needs to
-reconfigure `SCTLR_EL1` when context-switching from a "vanilla" arm64 process to
-an arm64e process, or vice-versa (`pmap_switch_user_ttb_internal`).
-
-### Signed pointers in shared regions
-
-Each userspace process has a *shared region* mapped into its address space,
-consisting of code and data shared across all processes of the same processor
-type, bitness, root directory, and (for arm64e processes) team ID. Comments at
-the top of `osfmk/vm/vm_shared_region.c` discuss this region, and the process of
-populating it, in more detail.
-
-As the VM layer pages in parts of the shared region, any embedded pointers must
-be rebased. Although this process is not new, PAC adds a new step: these
-embedded pointers may be signed, and must be re-signed after they are rebased.
-This process is implemented as `vm_shared_region_slide_page_v3` in
-`osfmk/vm/vm_shared_region.c`.
-
-xnu signs these embedded pointers using a shared-region-specific A key
-(`sr_jop_key`), which is randomly generated when the shared region is created.
-Since these pointers will be consumed by userspace processes, xnu temporarily
-switches to the userspace A keys when re-signing them.
-
-### Signing spilled register state
-
-xnu saves register state into kernel memory when taking exceptions, and reloads
-this state on exception return. If an attacker has write access to kernel
-memory, it can modify this saved state and effectively get control over a
-victim thread's control flow.
-
-xnu hardens against this attack by calling `ml_sign_thread_state` on exception
-entry to hash certain registers before they're saved to memory. On exception
-return, it calls the complementary `ml_check_signed_state` function to ensure
-that the reloaded values still match this hash. `ml_sign_thread_state` hashes a
-handful of particularly sensitive registers:
-
-* `pc, lr`: directly affect control-flow
-* `cpsr`: controls process's exception level
-* `x16, x17`: used by LLVM to temporarily store unauthenticated addresses
-
-`ml_sign_thread_state` also uses the address of the thread's `arm_saved_state_t`
-as a diversifier. This step keeps attackers from using `ml_sign_thread_state`
-as a signing oracle. An attacker may attempt to create a sacrificial thread,
-set this thread to some desired state, and use kernel memory access gadgets to
-transplant the xnu-signed state onto a victim thread. Because the victim
-process has a different `arm_saved_state_t` address as a diversifier,
-`ml_check_signed_state` will detect a hash mismatch in the victim thread.
-
-Apart from exception entry and return, xnu calls `ml_check_signed_state` and
-`ml_sign_thread_state` whenever it needs to mutate one of these sensitive
-registers (e.g., advancing the PC to the next instruction). This process looks
-like:
-
-1. Disable interrupts
-2. Load `pc, lr, cpsr, x16, x17` values and hash from thread's
- `arm_saved_state_t` into registers
-3. Call `ml_check_signed_state` to ensure values have not been tampered with
-4. Mutate one or more of these values using *only* register-to-register
- instructions
-5. Call `ml_sign_thread_state` to re-hash the mutated thread state
-6. Store the mutated values and new hash back into thread's `arm_saved_state_t`.
-7. Restore old interrupt state
-
-Critically, none of the sensitive register values can be spilled to memory
-between steps 1 and 7. Otherwise an attacker with kernel memory access could
-modify one of these values and use step 5 as a signing oracle. xnu implements
-these routines entirely in assembly to ensure full control over register use,
-using a macro `MANIPULATE_SIGNED_THREAD_STATE()` to generate boilerplate
-instructions.
-
-Interrupts must be disabled whenever `ml_check_signed_state` or
-`ml_sign_thread_state` are called, starting *before* their inputs (`x0`--`x5`)
-are populated. To understand why, consider what would happen if the CPU could
-be interrupted just before step 5 above. xnu's exception handler would spill
-the entire register state to memory. If an attacker has kernel memory access,
-they could attempt to replace the spilled `x0`--`x5` values. These modified
-values would then be reloaded into the CPU during exception return; and
-`ml_sign_thread_state` would be called with new, attacker-controlled inputs.
-
-### thread_set_state
-
-The `thread_set_state` call lets userspace modify the register state of a target
-thread. Signed userspace state adds a wrinkle to this process, since the
-incoming FP, LR, SP, and PC values are signed using the *userspace process's*
-key.
-
-xnu handles this in two steps. First, `machine_thread_state_convert_from_user`
-converts the userspace thread state representation into an in-kernel
-representation. Signed values are authenticated using `pmap_auth_user_ptr`,
-which involves temporarily switching to the userspace keys.
-
-Second, `thread_state64_to_saved_state` applies this converted state to the
-target thread. Whenever `thread_state64_to_saved_state` modifies a register
-that makes up part of the thread state hash, it uses
-`MANIPULATE_SIGNED_THREAD_STATE()` as described above to update this hash.
-
-
-### Signing arbitrary data blobs
-
-xnu provides `ptrauth_utils_sign_blob_generic` and `ptrauth_utils_auth_blob_generic`
-to sign and authenticate arbitrary blobs of data. Callers are responsible for
-storing the pointer-sized signature returned. The signature is a rolling MAC
-of the data, using the `pacga` instruction, mixed with a provided salt and optionally
-further diversified by storage address.
-
-Use of these functions is inherently racy. The data must be read from memory
-before each pointer-sized block can be added to the signature. In normal operation,
-standard thread-safety semantics protect from corruption, however in the malicious
-case, it may be possible to time overwriting the buffer before signing or after
-authentication.
-
-Callers of these functions must take care to minimise these race windows by
-using them immediately preceeding/following a write/read of the blob's data.
static uint32_t gIOHibernateFreeRatio = 0; // free page target (percent)
uint32_t gIOHibernateFreeTime = 0 * 1000; // max time to spend freeing pages (ms)
-static uint64_t gIOHibernateCompression = 0x80; // default compression 50%
+
+enum {
+ HIB_COMPR_RATIO_ARM64 = (0xa5), // compression ~65%. Since we don't support retries we start higher.
+ HIB_COMPR_RATIO_INTEL = (0x80) // compression 50%
+};
+
+#if defined(__arm64__)
+static uint64_t gIOHibernateCompression = HIB_COMPR_RATIO_ARM64;
+#else
+static uint64_t gIOHibernateCompression = HIB_COMPR_RATIO_INTEL;
+#endif /* __arm64__ */
boolean_t gIOHibernateStandbyDisabled;
static IODTNVRAM * gIOOptionsEntry;
header->sleepTime = gIOLastSleepTime.tv_sec;
header->compression = ((uint32_t)((compressedSize << 8) / uncompressedSize));
+#if defined(__arm64__)
+ /*
+ * We don't support retry on hibernation failure and so
+ * we don't want to set this value to anything smaller
+ * just because we may have been lucky this time around.
+ * Though we'll let it go higher.
+ */
+ if (header->compression < HIB_COMPR_RATIO_ARM64) {
+ header->compression = HIB_COMPR_RATIO_ARM64;
+ }
+#endif /* __arm64__ */
+
gIOHibernateCompression = header->compression;
count = vars->fileVars->fileExtents->getLength();
isRTCAlarmWake = true;
fullWakeReason = kFullWakeReasonLocalUser;
requestUserActive(this, "RTC debug alarm");
+ } else {
+#if HIBERNATION
+ OSSharedPtr<OSObject> hibOptionsProp = copyProperty(kIOHibernateOptionsKey);
+ OSNumber * hibOptions = OSDynamicCast(OSNumber, hibOptionsProp.get());
+ if (hibOptions && !(hibOptions->unsigned32BitValue() & kIOHibernateOptionDarkWake)) {
+ fullWakeReason = kFullWakeReasonLocalUser;
+ requestUserActive(this, "hibernate user wake");
+ }
+#endif
}
// stay awake for at least 30 seconds
// Clean up and reboot!
do_reboot:
- if (nvram != NULL) {
- nvram->release();
- }
-
if (boot_command_recover != NULL) {
boot_command_recover->release();
}
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
ifeq ($(LIBTOOL),)
export LIBTOOL := $(shell $(XCRUN) -sdk $(SDKROOT) -find libtool)
endif
+ifeq ($(OTOOL),)
+ export OTOOL := $(shell $(XCRUN) -sdk $(SDKROOT) -find otool)
+endif
ifeq ($(NM),)
export NM := $(shell $(XCRUN) -sdk $(SDKROOT) -find nm)
endif
MACHINE_FLAGS_ARM64_T8011 = -DARM64_BOARD_CONFIG_T8011 -mcpu=hurricane
MACHINE_FLAGS_ARM64_BCM2837 = -DARM64_BOARD_CONFIG_BCM2837
MACHINE_FLAGS_ARM64_T8020 = -DARM64_BOARD_CONFIG_T8020 -mcpu=vortex
-MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -mcpu=firestorm
-MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -mcpu=firestorm
+MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -D__ARM_ARCH_8_5__=1
+MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -D__ARM_ARCH_8_5__=1
#
ARCH_FLAGS_X86_64 = -arch x86_64
ARCH_FLAGS_X86_64H = -arch x86_64h
+ifeq ($(RC_ProjectName),xnu_libraries)
+WILL_BUILD_STATIC_KC := 1
+BUILD_STATIC_LINK := 1
+BUILD_XNU_LIBRARY := 1
+RC_NONARCH_CFLAGS += -D__BUILDING_XNU_LIBRARY__=1
+endif
+
ifneq ($(filter ARM ARM64,$(CURRENT_ARCH_CONFIG)),)
ifneq ($(findstring _Sim,$(RC_ProjectName)),)
#
LD_KERNEL_LIBS = -lcc_kext
LD_KERNEL_ARCHIVES = $(LDFLAGS_KERNEL_SDK) -lfirehose_kernel
-
# Link opensource binary library
-ifneq ($(filter T8020 T8020 T8101 T8101,$(CURRENT_MACHINE_CONFIG)),)
-LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
+ifneq ($(filter T8020 T8101 T8020 T8101,$(CURRENT_MACHINE_CONFIG)),)
+ LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
endif
#
# Rules for the highly parallel "build" phase, where each build configuration
# writes into their own $(TARGET) independent of other build configs
#
-# There are 4 primary build outputs:
+# There are 5 primary build outputs:
# 1) $(KERNEL_FILE_NAME).unstripped (raw linked kernel, unstripped)
# 2) $(KERNEL_FILE_NAME) (stripped kernel, with optional CTF data)
# 3) $(KERNEL_FILE_NAME).dSYM (dSYM)
# 4) $(KERNEL_FILE_NAME).link (bits for static linking)
+# 5) lib$(KERNEL_FILE_NAME).a (static archive for testing)
ifeq ($(BUILD_STATIC_LINK),1)
+ifeq ($(BUILD_XNU_LIBRARY),1)
+
+KERNEL_STATIC_LINK_TARGETS = \
+ $(TARGET)/lib$(KERNEL_FILE_NAME).a
+KERNEL_STATIC_LINK_DST = \
+ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/lib$(KERNEL_FILE_NAME).a
+
+else
KERNEL_STATIC_LINK_TARGETS = \
$(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a
$(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/$(KERNEL_LLDBBOOTSTRAP_NAME)
endif
+endif
do_build_all:: do_build_kernel
.PHONY: do_build_kernel
+ifeq ($(BUILD_XNU_LIBRARY),1)
+do_build_kernel: $(KERNEL_STATIC_LINK_TARGETS)
+
+else
+
do_build_kernel: $(TARGET)/$(KERNEL_FILE_NAME) $(TARGET)/$(KERNEL_FILE_NAME).unstripped $(KERNEL_STATIC_LINK_TARGETS)
@:
do_build_kernel_dSYM: $(TARGET)/$(KERNEL_FILE_NAME).dSYM
@:
+endif
+
.LDFLAGS: ALWAYS
$(_v)$(REPLACECONTENTS) $@ $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) $(LD_KERNEL_LIBS)
.CFLAGS: ALWAYS
$(_v)$(MV) $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME).unstripped $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
$(_v)$(TOUCH) $@
+ifeq ($(BUILD_XNU_LIBRARY),1)
+$(TARGET)/lib$(KERNEL_FILE_NAME).a: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).libfilelist)) nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST))
+ $(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o
+ @$(LOG_LIBTOOL) "$(@F)"
+ $(_v)$(CAT) $(filter %.libfilelist,$+) < /dev/null > link.filelist
+ $(_v)$(LIBTOOL) -static -csD -filelist link.filelist -o $@
+ $(_v)$(LN) $(call function_convert_build_config_to_objdir,$(CURRENT_BUILD_CONFIG))/lib$(KERNEL_FILE_NAME).a $(OBJROOT)/lib$(KERNEL_FILE_NAME).a
+endif
+
$(TARGET)/$(KERNEL_FILE_NAME).unstripped: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).filelist)) lastkerneldataconst.o lastkernelconstructor.o nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST))
$(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o
ifeq ($(PRE_LTO),1)
endif
endif
+ifneq ($(BUILD_XNU_LIBRARY),1)
ifeq ($(INSTALL_XNU_DEBUG_FILES),1)
do_build_install_primary:: do_install_xnu_debug_files
endif
do_install_xnu_debug_files: $(DSTROOT)/$(DEVELOPER_EXTRAS_DIR)/README.DEBUG-kernel.txt
@:
+endif
#
# If the timestamp indicates the DSTROOT kernel is out of
exit $$cmdstatus
ifeq ($(BUILD_STATIC_LINK),1)
+ifeq ($(BUILD_XNU_LIBRARY),1)
+$(DSTROOT)/$(INSTALL_KERNEL_DIR)/lib$(KERNEL_FILE_NAME).a: $(TARGET)/lib$(KERNEL_FILE_NAME).a ALWAYS
+ $(_v)$(MKDIR) $(dir $@)
+ @$(LOG_INSTALL) "$(@F)"
+ $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
+
+else
$(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a ALWAYS
$(_v)$(MKDIR) $(dir $@)
@$(LOG_INSTALL) "$(@F)"
$(_v)$(MKDIR) $(dir $@)
@$(LOG_INSTALL) "$(@F)"
$(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
+endif
# BUILD_STATIC_LINK
endif
exit $$cmdstatus
.PHONY: do_install_machine_specific_kernel do_install_machine_specific_kernel_dSYM
+.PHONY: do_install_machine_specific_KDK_dSYM
+
+ifeq ($(BUILD_XNU_LIBRARY),1)
+
+do_install_machine_specific_kernel: $(KERNEL_STATIC_LINK_DST)
+ @:
+do_install_machine_specific_kernel_dSYM:
+ @:
+
+else
do_install_machine_specific_kernel: $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME) \
$(SYMROOT)/$(KERNEL_FILE_NAME) \
$(SYMROOT)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
@:
-.PHONY: do_install_machine_specific_KDK_dSYM
-
do_install_machine_specific_KDK_dSYM: \
$(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMINFODIR)/Info.plist \
$(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros \
$(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
@:
+endif
+
# The $(RM) is needed so that the $(LN) doesn't dereference an existing
# symlink during incremental builds and create a new symlink inside
# the target of the existing symlink
install: installhdrs_desktop
else
-install: installhdrs install_textfiles install_config install_kernels install_aliases checkstyle
+install: installhdrs install_textfiles install_config install_kernels install_aliases
endif
.PHONY: install_embedded install_release_embedded install_development_embedded install_desktop
@-cat cscope.files | etags -l auto -S - 2> /dev/null
@rm -f cscope.files 2> /dev/null
-#
-# Check or reformat source code for official xnu code style
-#
-.PHONY: checkstyle restyle check_uncrustify uncrustify
-
-# User-friendly aliases for those who prefer to remember the name of the tool.
-check_uncrustify: checkstyle
-uncrustify: restyle
-
-checkstyle:
- ${_V}$(SRCROOT)/tools/uncrustify.sh
-
-restyle:
- ${_V}$(SRCROOT)/tools/uncrustify.sh -f
.PHONY: help
cpu_data_init(&BootCpuData);
#if defined(HAS_APPLE_PAC)
/* bootstrap cpu process dependent key for kernel has been loaded by start.s */
- BootCpuData.rop_key = KERNEL_ROP_ID;
+ BootCpuData.rop_key = ml_default_rop_pid();
BootCpuData.jop_key = ml_default_jop_pid();
#endif /* defined(HAS_APPLE_PAC) */
#define UNSIGN_PTR(p) \
SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK)
+uint64_t ml_default_rop_pid(void);
uint64_t ml_default_jop_pid(void);
void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit);
void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit);
*/
#include <pexpert/pexpert.h>
+#if __arm64__
+#include <pexpert/arm64/board_config.h>
+#endif /* __arm64__ */
#include <arm/cpuid_internal.h>
#include <arm/pmap.h>
}
#endif /* __arm64__ */
+
+#if HAS_APPLE_PAC
+uint64_t
+ml_default_rop_pid(void)
+{
+ return 0;
+}
+
+uint64_t
+ml_default_jop_pid(void)
+{
+ return 0;
+}
+#endif /* HAS_APPLE_PAC */
extern unsigned long segSizePPLDATA;
extern vm_offset_t segPPLTEXTB;
extern unsigned long segSizePPLTEXT;
-#if __APRR_SUPPORTED__
-extern vm_offset_t segPPLTRAMPB;
-extern unsigned long segSizePPLTRAMP;
-extern void ppl_trampoline_start;
-extern void ppl_trampoline_end;
-#endif
extern vm_offset_t segPPLDATACONSTB;
extern unsigned long segSizePPLDATACONST;
static void pmap_trim_self(pmap_t pmap);
static void pmap_trim_subord(pmap_t subord);
-#if __APRR_SUPPORTED__
-static uint64_t pte_to_xprr_perm(pt_entry_t pte);
-static pt_entry_t xprr_perm_to_pte(uint64_t perm);
-#endif /* __APRR_SUPPORTED__*/
/*
* Temporary prototypes, while we wait for pmap_enter to move to taking an
#endif
-#if __APRR_SUPPORTED__
-/*
- * Indicates whether the given PTE has special restrictions due to the current
- * APRR settings.
- */
-static boolean_t
-is_pte_aprr_protected(pt_entry_t pte)
-{
- uint64_t aprr_el0_value;
- uint64_t aprr_el1_value;
- uint64_t aprr_index;
-
- MRS(aprr_el0_value, APRR_EL0);
- MRS(aprr_el1_value, APRR_EL1);
- aprr_index = PTE_TO_APRR_INDEX(pte);
-
- /* Check to see if this mapping had APRR restrictions. */
- if ((APRR_EXTRACT_IDX_ATTR(aprr_el0_value, aprr_index) != APRR_EXTRACT_IDX_ATTR(APRR_EL0_RESET, aprr_index)) ||
- (APRR_EXTRACT_IDX_ATTR(aprr_el1_value, aprr_index) != APRR_EXTRACT_IDX_ATTR(APRR_EL1_RESET, aprr_index))
- ) {
- return TRUE;
- }
-
- return FALSE;
-}
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-static boolean_t
-is_pte_xprr_protected(pmap_t pmap __unused, pt_entry_t pte)
-{
-#if __APRR_SUPPORTED__
- return is_pte_aprr_protected(pte);
-#else /* __APRR_SUPPORTED__ */
-#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
-}
-#endif /* __APRR_SUPPORTED__*/
-#if __APRR_SUPPORTED__
-static uint64_t
-__unused pte_to_xprr_perm(pt_entry_t pte)
-{
-#if __APRR_SUPPORTED__
- switch (PTE_TO_APRR_INDEX(pte)) {
- case APRR_FIRM_RX_INDEX: return XPRR_FIRM_RX_PERM;
- case APRR_FIRM_RO_INDEX: return XPRR_FIRM_RO_PERM;
- case APRR_PPL_RW_INDEX: return XPRR_PPL_RW_PERM;
- case APRR_KERN_RW_INDEX: return XPRR_KERN_RW_PERM;
- case APRR_FIRM_RW_INDEX: return XPRR_FIRM_RW_PERM;
- case APRR_KERN0_RW_INDEX: return XPRR_KERN0_RW_PERM;
- case APRR_USER_JIT_INDEX: return XPRR_USER_JIT_PERM;
- case APRR_USER_RW_INDEX: return XPRR_USER_RW_PERM;
- case APRR_PPL_RX_INDEX: return XPRR_PPL_RX_PERM;
- case APRR_KERN_RX_INDEX: return XPRR_KERN_RX_PERM;
- case APRR_USER_XO_INDEX: return XPRR_USER_XO_PERM;
- case APRR_KERN_RO_INDEX: return XPRR_KERN_RO_PERM;
- case APRR_KERN0_RX_INDEX: return XPRR_KERN0_RO_PERM;
- case APRR_KERN0_RO_INDEX: return XPRR_KERN0_RO_PERM;
- case APRR_USER_RX_INDEX: return XPRR_USER_RX_PERM;
- case APRR_USER_RO_INDEX: return XPRR_USER_RO_PERM;
- default: return XPRR_MAX_PERM;
- }
-#else
-#error "XPRR configuration error"
-#endif /**/
-}
-#if __APRR_SUPPORTED__
-static uint64_t
-xprr_perm_to_aprr_index(uint64_t perm)
-{
- switch (perm) {
- case XPRR_FIRM_RX_PERM: return APRR_FIRM_RX_INDEX;
- case XPRR_FIRM_RO_PERM: return APRR_FIRM_RO_INDEX;
- case XPRR_PPL_RW_PERM: return APRR_PPL_RW_INDEX;
- case XPRR_KERN_RW_PERM: return APRR_KERN_RW_INDEX;
- case XPRR_FIRM_RW_PERM: return APRR_FIRM_RW_INDEX;
- case XPRR_KERN0_RW_PERM: return APRR_KERN0_RW_INDEX;
- case XPRR_USER_JIT_PERM: return APRR_USER_JIT_INDEX;
- case XPRR_USER_RW_PERM: return APRR_USER_RW_INDEX;
- case XPRR_PPL_RX_PERM: return APRR_PPL_RX_INDEX;
- case XPRR_KERN_RX_PERM: return APRR_KERN_RX_INDEX;
- case XPRR_USER_XO_PERM: return APRR_USER_XO_INDEX;
- case XPRR_KERN_RO_PERM: return APRR_KERN_RO_INDEX;
- case XPRR_KERN0_RX_PERM: return APRR_KERN0_RO_INDEX;
- case XPRR_KERN0_RO_PERM: return APRR_KERN0_RO_INDEX;
- case XPRR_USER_RX_PERM: return APRR_USER_RX_INDEX;
- case XPRR_USER_RO_PERM: return APRR_USER_RO_INDEX;
- default: return APRR_MAX_INDEX;
- }
-}
-#endif /* __APRR_SUPPORTED__ */
-
-static pt_entry_t
-__unused xprr_perm_to_pte(uint64_t perm)
-{
-#if __APRR_SUPPORTED__
- return APRR_INDEX_TO_PTE(xprr_perm_to_aprr_index(perm));
-#else
-#error "XPRR configuration error"
-#endif /**/
-}
-#endif /* __APRR_SUPPORTED__*/
/*
}
#endif /* CONFIG_CSR_FROM_DT */
-#if __APRR_SUPPORTED__
- if (((uintptr_t)(&ppl_trampoline_start)) % PAGE_SIZE) {
- panic("%s: ppl_trampoline_start is not page aligned, "
- "vstart=%#lx",
- __FUNCTION__,
- vstart);
- }
-
- if (((uintptr_t)(&ppl_trampoline_end)) % PAGE_SIZE) {
- panic("%s: ppl_trampoline_end is not page aligned, "
- "vstart=%#lx",
- __FUNCTION__,
- vstart);
- }
-#endif /* __APRR_SUPPORTED__ */
#endif /* XNU_MONITOR */
#if DEVELOPMENT || DEBUG
/* PPL text is RX for the PPL, RO for the kernel. */
pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
-#if __APRR_SUPPORTED__
- monitor_start_pa = kvtophys(segPPLTRAMPB);
- monitor_end_pa = monitor_start_pa + segSizePPLTRAMP;
-
- /*
- * The PPLTRAMP pages will be a mix of PPL RX/kernel RO and
- * PPL RX/kernel RX. However, all of these pages belong to the PPL.
- */
- pa_set_range_monitor(monitor_start_pa, monitor_end_pa);
-#endif
/*
* In order to support DTrace, the save areas for the PPL must be
pmap_set_range_xprr_perm(monitor_start_va, monitor_end_va, XPRR_PPL_RW_PERM, XPRR_KERN_RW_PERM);
}
-#if __APRR_SUPPORTED__
- /* The trampoline must also be specially protected. */
- pmap_set_range_xprr_perm((vm_offset_t)&ppl_trampoline_start, (vm_offset_t)&ppl_trampoline_end, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
-#endif
if (segSizePPLDATACONST > 0) {
monitor_start_pa = kvtophys(segPPLDATACONSTB);
{
/* Mark the PPL as being locked down. */
-#if __APRR_SUPPORTED__
- pmap_ppl_locked_down = TRUE;
- /* Force a trap into to the PPL to update APRR_EL1. */
- pmap_return(FALSE, FALSE);
-#else
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
}
#endif /* XNU_MONITOR */
pt_entry_t spte;
boolean_t managed = FALSE;
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
#if CONFIG_PGTRACE
if (pgtrace_enabled) {
if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_info(cpte)->refcnt)) <= 0) {
panic("pmap_remove_range_options: over-release of ptdp %p for pte %p", ptep_get_ptd(cpte), cpte);
}
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
}
/*
* It may be possible for the pte to transition from managed
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
pa = pte_to_pa(spte);
if (pai == (int)pa_index(pa)) {
managed = TRUE;
tmplate |= pt_attr_leaf_xn(pt_attr);
}
-#if __APRR_SUPPORTED__
- /**
- * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact.
- *
- * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to
- * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only
- * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX. This is similar for
- * user execute-only mappings.
- */
- if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
- && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
- panic("%s: modifying an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x",
- __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum);
- }
-
- /**
- * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only
- * mappings (which doesn't compromise the security of the PPL since it's userspace-specific).
- */
- if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
- panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x",
- __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum);
- }
-#endif /* __APRR_SUPPORTED__*/
if (*pte_p != ARM_PTE_TYPE_FAULT &&
!ARM_PTE_IS_COMPRESSED(*pte_p, pte_p) &&
if (pmap == kernel_pmap) {
panic("%s: called with kernel_pmap\n", __func__);
}
+ VALIDATE_PMAP(pmap);
pmap->disable_jop = true;
}
boolean_t force_write = FALSE;
#endif
- spte = *pte_p;
+ spte = *((volatile pt_entry_t*)pte_p);
if ((spte == ARM_PTE_TYPE_FAULT) ||
ARM_PTE_IS_COMPRESSED(spte, pte_p)) {
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
- spte = *pte_p;
+ spte = *((volatile pt_entry_t*)pte_p);
pa = pte_to_pa(spte);
if (pai == (int)pa_index(pa)) {
managed = TRUE;
/* We do not expect to write fast fault the entry. */
pte_set_was_writeable(tmplate, false);
-#if __APRR_SUPPORTED__
- /**
- * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact.
- *
- * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to
- * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only
- * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX/XO. This is similar
- * for user execute-only mappings.
- */
- if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
- && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
- panic("%s: modifying a PPL mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
- __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
- }
-
- /**
- * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only
- * mappings (which doesn't compromise the security of the PPL since it's userspace-specific).
- */
- if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
- panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
- __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
- }
-#endif /* __APRR_SUPPORTED__*/
WRITE_PTE_FAST(pte_p, tmplate);
if (managed) {
const pt_attr_t * pt_attr = pmap_get_pt_attr(pmap);
pte_p = pmap_pte(pmap, v);
- assert(pte_p != PT_ENTRY_NULL);
- pa = pte_to_pa(*pte_p);
+ if (pte_p == PT_ENTRY_NULL) {
+ if (!wired) {
+ /*
+ * The PTE may have already been cleared by a disconnect/remove operation, and the L3 table
+ * may have been freed by a remove operation.
+ */
+ goto pmap_change_wiring_return;
+ } else {
+ panic("%s: Attempt to wire nonexistent PTE for pmap %p", __func__, pmap);
+ }
+ }
+ /*
+ * Use volatile loads to prevent the compiler from collapsing references to 'pa' back to loads of pte_p
+ * until we've grabbed the final PVH lock; PTE contents may change during this time.
+ */
+ pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
while (pa_valid(pa)) {
pmap_paddr_t new_pa;
LOCK_PVH((int)pa_index(pa));
- new_pa = pte_to_pa(*pte_p);
+ new_pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
if (pa == new_pa) {
break;
pa = new_pa;
}
+ /* PTE checks must be performed after acquiring the PVH lock (if applicable for the PA) */
+ if ((*pte_p == ARM_PTE_EMPTY) || (ARM_PTE_IS_COMPRESSED(*pte_p, pte_p))) {
+ if (!wired) {
+ /* PTE cleared by prior remove/disconnect operation */
+ goto pmap_change_wiring_cleanup;
+ } else {
+ panic("%s: Attempt to wire empty/compressed PTE %p (=0x%llx) for pmap %p",
+ __func__, pte_p, (uint64_t)*pte_p, pmap);
+ }
+ }
+
+ assertf((*pte_p & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", pte_p, (uint64_t)*pte_p);
if (wired != pte_is_wired(*pte_p)) {
pte_set_wired(pmap, pte_p, wired);
if (pmap != kernel_pmap) {
}
}
+pmap_change_wiring_cleanup:
if (pa_valid(pa)) {
UNLOCK_PVH((int)pa_index(pa));
}
+pmap_change_wiring_return:
pmap_unlock(pmap);
}
if (pa) {
return pa;
}
- pa = ((pmap_paddr_t)pmap_vtophys(kernel_pmap, va)) << PAGE_SHIFT;
- if (pa) {
- pa |= (va & PAGE_MASK);
- }
-
- return (pmap_paddr_t)pa;
+ return pmap_vtophys(kernel_pmap, va);
}
pmap_paddr_t
end_pte_p = start_pte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr));
assert(end_pte_p >= start_pte_p);
for (curr_pte_p = start_pte_p; curr_pte_p < end_pte_p; curr_pte_p++) {
- pmap_paddr_t pa = pte_to_pa(*curr_pte_p);
+ pmap_paddr_t pa = pte_to_pa(*((volatile pt_entry_t*)curr_pte_p));
if (pa_valid(pa)) {
ppnum_t pn = (ppnum_t) atop(pa);
phys_attribute_clear_with_flush_range(pn, bits, options, NULL, flush_range);
pmap_clear_user_ttb_internal();
}
-#if defined(HAS_APPLE_PAC) && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- if (!arm_user_jop_disabled()) {
- uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
- bool jop_enabled = sctlr & SCTLR_JOP_KEYS_ENABLED;
- if (!jop_enabled && !pmap->disable_jop) {
- // turn on JOP
- sctlr |= SCTLR_JOP_KEYS_ENABLED;
- __builtin_arm_wsr64("SCTLR_EL1", sctlr);
- arm_context_switch_requires_sync();
- } else if (jop_enabled && pmap->disable_jop) {
- // turn off JOP
- sctlr &= ~SCTLR_JOP_KEYS_ENABLED;
- __builtin_arm_wsr64("SCTLR_EL1", sctlr);
- arm_context_switch_requires_sync();
- }
- }
-#endif /* HAS_APPLE_PAC && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
#endif /* (__ARM_VMSA__ == 7) */
}
ptep = pmap_pte(pmap, va);
if (ptep != PT_ENTRY_NULL) {
while (true) {
- spte = *ptep;
+ spte = *((volatile pt_entry_t*)ptep);
pa = pte_to_pa(spte);
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
-#if __APRR_SUPPORTED__
- if (*ptep == spte) {
- /*
- * Double-check the spte value, as we care
- * about the AF bit.
- */
- break;
- }
- UNLOCK_PVH(pai);
-#else /* !(__APRR_SUPPORTED__*/
break;
-#endif /* !(__APRR_SUPPORTED__*/
}
} else {
pmap_unlock(pmap);
return result;
}
-#if __APRR_SUPPORTED__
- /* Check to see if this mapping had APRR restrictions. */
- if (is_pte_xprr_protected(pmap, spte)) {
- /*
- * We have faulted on an XPRR managed mapping; decide if the access should be
- * reattempted or if it should cause an exception. Now that all JIT entitled
- * task threads always have MPRR enabled we're only here because of
- * an AF fault or an actual permission fault. AF faults will have result
- * changed to KERN_SUCCESS below upon arm_clear_fast_fault return.
- */
- if (was_af_fault && (spte & ARM_PTE_AF)) {
- result = KERN_SUCCESS;
- goto out;
- } else {
- result = KERN_PROTECTION_FAILURE;
- }
- }
-#endif /* __APRR_SUPPORTED__*/
if ((IS_REFFAULT_PAGE(pai)) ||
((fault_type & VM_PROT_WRITE) && IS_MODFAULT_PAGE(pai))) {
}
}
-#if __APRR_SUPPORTED__
-out:
-#endif /* __APRR_SUPPORTED__*/
UNLOCK_PVH(pai);
pmap_unlock(pmap);
return result;
if ((*cpte != ARM_PTE_TYPE_FAULT)
&& (!ARM_PTE_IS_COMPRESSED(*cpte, cpte))) {
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
while (!managed) {
pa = pte_to_pa(spte);
if (!pa_valid(pa)) {
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
pa = pte_to_pa(spte);
if (pai == (int)pa_index(pa)) {
managed = TRUE;
goto done;
}
- pa = pte_to_pa(*pte);
+ pa = pte_to_pa(*((volatile pt_entry_t*)pte));
if (pa == 0) {
if (ARM_PTE_IS_COMPRESSED(*pte, pte)) {
disp |= PMAP_QUERY_PAGE_COMPRESSED;
T_LOG("Make the first mapping XO.");
pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false);
-#if __APRR_SUPPORTED__
- T_LOG("Validate that reads to our mapping fault.");
- pmap_test_read(pmap, va_base, true);
-#else
T_LOG("Validate that reads to our mapping do not fault.");
pmap_test_read(pmap, va_base, false);
-#endif
T_LOG("Validate that writes to our mapping fault.");
pmap_test_write(pmap, va_base, true);
#define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
-#define PMAP_COUNT 73
+
+#define PMAP_COUNT 74
#define PMAP_INVALID_CPU_NUM (~0U)
void
trust_cache_init(void)
{
- size_t const len = segSizeEXTRADATA;
+ size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
+ size_t const len = segSizeEXTRADATA - locked_down_dt_size;
if (len == 0) {
-#if XNU_TARGET_OS_OSX
+ // We allow no trust cache at all.
printf("No external trust cache found (region len is 0).");
-#else
- panic("No external trust cache found (region len is 0).");
-#endif
return;
}
- size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
-
pmap_serialized_trust_caches = (struct serialized_trust_caches*)(segEXTRADATA +
locked_down_dt_size);
// Engineering Trust Caches.
if (pmap_serialized_trust_caches->num_caches > engineering_trust_cache_index) {
-#if DEVELOPMENT || DEBUG
for (uint32_t i = engineering_trust_cache_index; i < pmap_serialized_trust_caches->num_caches; i++) {
struct trust_cache_module1 const *module =
(struct trust_cache_module1 const *)(
(TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
}
}
-#else
- panic("Number of trust caches: %d. How could we let this happen?",
- pmap_serialized_trust_caches->num_caches);
-#endif
}
}
static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START) + 1) * 2ULL) <= (ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK));
#endif /* __ARM_KERNEL_PROTECT__ */
-#if __APRR_SUPPORTED__ && XNU_MONITOR
-#define ARM_DYNAMIC_TABLE_XN ARM_TTE_TABLE_PXN
-#else
#define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN)
-#endif
#if KASAN
extern vm_offset_t shadow_pbase;
#include <machine/asm.h>
#include <arm64/machine_machdep.h>
#include <arm64/machine_routines_asm.h>
-#include <arm64/pac_asm.h>
#include <arm64/proc_reg.h>
#include "assym.s"
#endif
-#if defined(HAS_APPLE_PAC)
- ldr \new_key, [\thread, TH_ROP_PID]
- ldr \tmp_key, [\cpudatap, CPU_ROP_KEY]
- cmp \new_key, \tmp_key
- b.eq 1f
- str \new_key, [\cpudatap, CPU_ROP_KEY]
- msr APIBKeyLo_EL1, \new_key
- add \new_key, \new_key, #1
- msr APIBKeyHi_EL1, \new_key
- add \new_key, \new_key, #1
- msr APDBKeyLo_EL1, \new_key
- add \new_key, \new_key, #1
- msr APDBKeyHi_EL1, \new_key
- mov \wsync, #1
-1:
-
-#if HAS_PAC_FAST_A_KEY_SWITCHING
- IF_PAC_SLOW_A_KEY_SWITCHING Lskip_jop_keys_\@, \new_key
- ldr \new_key, [\thread, TH_JOP_PID]
- REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
- mov \wsync, #1
-Lskip_jop_keys_\@:
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */
-
-#endif /* defined(HAS_APPLE_PAC) */
cbz \wsync, 1f
isb sy
#include <config_dtrace.h>
#include "assym.s"
#include <arm64/exception_asm.h>
-#include <arm64/pac_asm.h>
#include "dwarf_unwind.h"
#if __ARM_KERNEL_PROTECT__
/* Return to the PPL. */
mov x15, #0
mov w10, #PPL_STATE_EXCEPTION
-#if __APRR_SUPPORTED__
- b Ldisable_aif_and_enter_ppl
-#else
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
1:
.endmacro
-#if __APRR_SUPPORTED__
-/*
- * EL1_SP0_VECTOR_PPL_CHECK
- *
- * Check to see if the exception was taken by the kernel or the PPL. Falls
- * through if kernel, hands off to the given label if PPL. Expects to run on
- * SP1.
- * arg0 - Label to go to if this was a PPL exception.
- */
-.macro EL1_SP0_VECTOR_PPL_CHECK
- sub sp, sp, ARM_CONTEXT_SIZE
- stp x0, x1, [sp, SS64_X0]
- mrs x0, APRR_EL1
- MOV64 x1, APRR_EL1_DEFAULT
- cmp x0, x1
- b.ne $0
- ldp x0, x1, [sp, SS64_X0]
- add sp, sp, ARM_CONTEXT_SIZE
-.endmacro
-
-#define STAY_ON_SP1 0
-#define SWITCH_TO_SP0 1
-
-#define INVOKE_PREFLIGHT 0
-#define NO_INVOKE_PREFLIGHT 1
-
-/*
- * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- *
- * Verify whether an exception came from the PPL or from the kernel. If it came
- * from the PPL, save off the PPL state and transition out of the PPL.
- * arg0 - Label to go to if this was a kernel exception
- * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
- * arg2 - Indicates if this should switch back to SP0
- * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
- */
-.macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- /* Spill some more registers. */
- stp x2, x3, [sp, SS64_X2]
-
- /*
- * Check if the PPL is locked down; if not, we can treat this as a
- * kernel execption.
- */
- adrp x1, EXT(pmap_ppl_locked_down)@page
- ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
- cbz x1, 2f
-
- /* Ensure that APRR_EL1 is actually in PPL mode. */
- MOV64 x1, APRR_EL1_PPL
- cmp x0, x1
- b.ne .
-
- /*
- * Check if the CPU is in the PPL; if not we can treat this as a
- * kernel exception.
- */
- GET_PMAP_CPU_DATA x3, x1, x2
- ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
- cmp x1, #PPL_STATE_KERNEL
- b.eq 2f
-
- /* Ensure that the CPU is in the expected PPL state. */
- cmp x1, #PPL_STATE_DISPATCH
- b.ne .
-
- /* Mark the CPU as dealing with an exception. */
- mov x1, #PPL_STATE_EXCEPTION
- str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
-
- /* Load the bounds of the PPL trampoline. */
- adrp x0, EXT(ppl_no_exception_start)@page
- add x0, x0, EXT(ppl_no_exception_start)@pageoff
- adrp x1, EXT(ppl_no_exception_end)@page
- add x1, x1, EXT(ppl_no_exception_end)@pageoff
-
- /*
- * Ensure that the exception did not occur in the trampoline. If it
- * did, we are either being attacked or our state machine is
- * horrifically broken.
- */
- mrs x2, ELR_EL1
- cmp x2, x0
- b.lo 1f
- cmp x2, x1
- b.hi 1f
-
- /* We might be under attack; spin. */
- b .
-
-1:
- /* Get the PPL save area. */
- mov x1, x3
- ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
-
- /* Save our x0, x1 state. */
- ldp x2, x3, [sp, SS64_X0]
- stp x2, x3, [x0, SS64_X0]
-
- /* Restore SP1 to its original state. */
- mov x3, sp
- add sp, sp, ARM_CONTEXT_SIZE
-
- .if $2 == SWITCH_TO_SP0
- /* Switch back to SP0. */
- msr SPSel, #0
- mov x2, sp
- .else
- /* Load the SP0 value. */
- mrs x2, SP_EL0
- .endif
-
- /* Save off the stack pointer. */
- str x2, [x0, SS64_SP]
-
- INIT_SAVED_STATE_FLAVORS x0, w1, w2
-
- /* Save the context that was interrupted. */
- ldp x2, x3, [x3, SS64_X2]
- SPILL_REGISTERS PPL_MODE
-
- /*
- * Stash the function we wish to be invoked to deal with the exception;
- * usually this is some preflight function for the fleh_* handler.
- */
- adrp x25, $1@page
- add x25, x25, $1@pageoff
-
- /*
- * Indicate that this is a PPL exception, and that we should return to
- * the PPL.
- */
- mov x26, #1
-
- /* Transition back to kernel mode. */
- mov x15, #PPL_EXIT_EXCEPTION
- b ppl_return_to_kernel_mode
-2:
- /* Restore SP1 state. */
- ldp x2, x3, [sp, SS64_X2]
- ldp x0, x1, [sp, SS64_X0]
- add sp, sp, ARM_CONTEXT_SIZE
-
- /* Go to the specified label (usually the original exception vector). */
- b $0
-.endmacro
-#endif /* __APRR_SUPPORTED__ */
#endif /* XNU_MONITOR */
.endmacro
el1_sp0_synchronous_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- /*
- * We do not have enough space for new instructions in this vector, so
- * jump to outside code to check if this exception was taken in the PPL.
- */
- b el1_sp0_synchronous_vector_ppl_check
-Lel1_sp0_synchronous_vector_kernel:
-#endif
stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
mrs x1, ESR_EL1 // Get the exception syndrome
/* If the stack pointer is corrupt, it will manifest either as a data abort
b fleh_dispatch64
el1_sp0_irq_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
-Lel1_sp0_irq_vector_kernel:
-#endif
EL1_SP0_VECTOR
SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_irq)@page // Load address for fleh
el1_sp0_fiq_vector_long:
// ARM64_TODO write optimized decrementer
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
-Lel1_sp0_fiq_vector_kernel:
-#endif
EL1_SP0_VECTOR
SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_fiq)@page // Load address for fleh
b fleh_dispatch64
el1_sp0_serror_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
-Lel1_sp0_serror_vector_kernel:
-#endif
EL1_SP0_VECTOR
adrp x1, EXT(fleh_serror)@page // Load address for fleh
add x1, x1, EXT(fleh_serror)@pageoff
add x1, x1, fleh_serror_sp1@pageoff
b fleh_dispatch64
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-/**
- * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
- * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
- */
-#define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
-#define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
-#endif
.macro EL0_64_VECTOR
stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
#if __ARM_KERNEL_PROTECT__
mov x18, #0 // Zero x18 to avoid leaking data to user SS
#endif
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- // enable JOP for kernel
- mrs x0, SCTLR_EL1
- tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
- // if (!jop_running) {
- MOV64 x1, SCTLR_JOP_KEYS_ENABLED
- orr x0, x0, x1
- msr SCTLR_EL1, x0
- isb sy
- MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
- cmp x0, x1
- bne .
- // }
-1:
-#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
mrs x0, TPIDR_EL1 // Load the thread register
mrs x1, SP_EL0 // Load the user stack pointer
add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
add x1, x1, EXT(fleh_serror)@pageoff
b fleh_dispatch64
-#if XNU_MONITOR && __APRR_SUPPORTED__
-el1_sp0_synchronous_vector_ppl_check:
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
-
- /* Jump back to the primary exception vector if we fell through. */
- b Lel1_sp0_synchronous_vector_kernel
-#endif
/*
* check_exception_stack
CMSR FPCR, x5, x4, 1
1:
-#if defined(HAS_APPLE_PAC)
- // if (eret to userspace) {
- and x2, x2, #(PSR64_MODE_EL_MASK)
- cmp x2, #(PSR64_MODE_EL0)
- bne Ldone_reconfigure_jop
- // thread_t thread = current_thread();
- // bool disable_jop;
- // if (arm_user_jop_disabled()) {
- // /* if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) */
- // disable_jop = true;
- // } else {
- // disable_jop = thread->machine.disable_user_jop;
- // }
- mrs x2, TPIDR_EL1
- ldrb w1, [x2, TH_DISABLE_USER_JOP]
- cbz w1, Lenable_jop
- // if (disable_jop) {
- // if (cpu does not have discrete JOP-at-EL1 bit) {
- // disable_sctlr_jop_keys();
- // }
- // } else {
- // if (cpu does not have fast A-key switching) {
- // reprogram_jop_keys(thread->machine.jop_pid);
- // }
- // }
- // }
-Ldisable_jop:
-#if !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- MOV64 x1, SCTLR_JOP_KEYS_ENABLED
- mrs x4, SCTLR_EL1
- bic x4, x4, x1
- msr SCTLR_EL1, x4
- MOV64 x1, SCTLR_EL1_EXPECTED
- cmp x4, x1
- bne .
-#endif /* !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
- b Ldone_reconfigure_jop
-Lenable_jop:
-#if HAS_PAC_SLOW_A_KEY_SWITCHING
- IF_PAC_FAST_A_KEY_SWITCHING Ldone_reconfigure_jop, x1
- ldr x1, [x2, TH_JOP_PID]
- ldr x2, [x2, ACT_CPUDATAP]
- REPROGRAM_JOP_KEYS Ldone_reconfigure_jop, x1, x2, x3
-#if defined(__ARM_ARCH_8_5__)
- /**
- * The new keys will be used after eret to userspace, so explicit sync is
- * required iff eret is non-synchronizing.
- */
- isb sy
-#endif /* defined(__ARM_ARCH_8_5__) */
-#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
-Ldone_reconfigure_jop:
-#endif /* defined(HAS_APPLE_PAC) */
/* Restore arm_neon_saved_state64 */
ldp q0, q1, [x0, NS64_Q0]
#endif /* __ARM_KERNEL_PROTECT__ */
#if XNU_MONITOR
-#if __APRR_SUPPORTED__
- .text
- .align 2
-el1_sp0_synchronous_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
-
- .text
- .align 2
-el1_sp0_fiq_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
-
- .text
- .align 2
-el1_sp0_irq_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
-
- .text
- .align 2
-el1_sp0_serror_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
-#endif /* __APRR_SUPPORTED__ */
/*
* Functions to preflight the fleh handlers when the PPL has taken an exception;
b EXT(fleh_serror)
-#if XNU_MONITOR && __APRR_SUPPORTED__
-/*
- * aprr_ppl_enter
- *
- * Invokes the PPL
- * x15 - The index of the requested PPL function.
- */
- .text
- .align 2
- .globl EXT(aprr_ppl_enter)
-LEXT(aprr_ppl_enter)
- /* Push a frame. */
- ARM64_STACK_PROLOG
- stp x20, x21, [sp, #-0x20]!
- stp x29, x30, [sp, #0x10]
- add x29, sp, #0x10
-
- /* Increase the preemption count. */
- mrs x10, TPIDR_EL1
- ldr w12, [x10, ACT_PREEMPT_CNT]
- add w12, w12, #1
- str w12, [x10, ACT_PREEMPT_CNT]
-
- /* Is the PPL currently locked down? */
- adrp x13, EXT(pmap_ppl_locked_down)@page
- add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
- ldr w14, [x13]
- cmp w14, wzr
-
- /* If not, just perform the call in the current context. */
- b.eq EXT(ppl_bootstrap_dispatch)
-
- mov w10, #PPL_STATE_KERNEL
- b Ldisable_aif_and_enter_ppl
-
- /* We align this to land the next few instructions on their own page. */
- .section __PPLTRAMP,__text,regular,pure_instructions
- .align 14
- .space (16*1024)-(4*8) // 8 insns
-
- /*
- * This label is used by exception handlers that are trying to return
- * to the PPL.
- */
-Ldisable_aif_and_enter_ppl:
- /* We must trampoline to the PPL context; disable AIF. */
- mrs x20, DAIF
- msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-
- .globl EXT(ppl_no_exception_start)
-LEXT(ppl_no_exception_start)
- /* Switch APRR_EL1 to PPL mode. */
- MOV64 x14, APRR_EL1_PPL
- msr APRR_EL1, x14
-
- /* This ISB should be the last instruction on a page. */
- // TODO: can we static assert this?
- isb
-#endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
// x15: ppl call number
.globl EXT(ppl_trampoline_start)
LEXT(ppl_trampoline_start)
-#if __APRR_SUPPORTED__
- /* Squash AIF AGAIN, because someone may have attacked us. */
- msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-#endif /* __APRR_SUPPORTED__ */
-#if __APRR_SUPPORTED__
- /* Verify the state of APRR_EL1. */
- MOV64 x14, APRR_EL1_PPL
- mrs x21, APRR_EL1
-#else /* __APRR_SUPPORTED__ */
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
cmp x14, x21
b.ne Lppl_fail_dispatch
/* Find the save area, and return to the saved PPL context. */
ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
mov sp, x0
-#if __APRR_SUPPORTED__
- b Lexception_return_restore_registers
-#else
b EXT(return_to_ppl)
-#endif /* __APRR_SUPPORTED__ */
Lppl_mark_cpu_as_dispatching:
cmp w10, #PPL_STATE_KERNEL
/* Return to the kernel. */
b ppl_return_to_kernel_mode
-#if __APRR_SUPPORTED__
- /* We align this to land the next few instructions on their own page. */
- .align 14
- .space (16*1024)-(4*5) // 5 insns
-
-ppl_return_to_kernel_mode:
- /* Switch APRR_EL1 back to the kernel mode. */
- // must be 5 instructions
- MOV64 x14, APRR_EL1_DEFAULT
- msr APRR_EL1, x14
-
- .globl EXT(ppl_trampoline_end)
-LEXT(ppl_trampoline_end)
-
- /* This should be the first instruction on a page. */
- isb
-
- .globl EXT(ppl_no_exception_end)
-LEXT(ppl_no_exception_end)
- b ppl_exit
-#endif /* __APRR_SUPPORTED__ */
.text
}
#if defined(HAS_APPLE_PAC)
-static inline bool
-cpu_supports_userkeyen()
-{
-#if defined(APPLEFIRESTORM)
- return __builtin_arm_rsr64(ARM64_REG_APCTL_EL1) & APCTL_EL1_UserKeyEn;
-#elif HAS_APCTL_EL1_USERKEYEN
- return true;
-#else
- return false;
-#endif
-}
-
-/**
- * Returns the default JOP key. Depending on how the CPU diversifies userspace
- * JOP keys, this value may reflect either KERNKeyLo or APIAKeyLo.
- */
-uint64_t
-ml_default_jop_pid(void)
-{
- if (cpu_supports_userkeyen()) {
- return KERNEL_KERNKEY_ID;
- } else {
- return KERNEL_JOP_ID;
- }
-}
-
void
ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop)
{
#include <machine/asm.h>
#include <arm64/exception_asm.h>
#include <arm64/machine_machdep.h>
-#include <arm64/pac_asm.h>
#include <arm64/proc_reg.h>
#include <arm/pmap.h>
#include <pexpert/arm64/board_config.h>
#include "assym.s"
-#if defined(HAS_APPLE_PAC)
-
-.macro SET_KERN_KEY dst, apctl_el1
- orr \dst, \apctl_el1, #APCTL_EL1_KernKeyEn
-.endmacro
-
-.macro CLEAR_KERN_KEY dst, apctl_el1
- and \dst, \apctl_el1, #~APCTL_EL1_KernKeyEn
-.endmacro
-
-/*
- * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key)
- */
- .align 2
- .globl EXT(ml_enable_user_jop_key)
-LEXT(ml_enable_user_jop_key)
- mov x1, x0
- mrs x2, TPIDR_EL1
- ldr x2, [x2, ACT_CPUDATAP]
- ldr x0, [x2, CPU_JOP_KEY]
-
- cmp x0, x1
- b.eq Lskip_program_el0_jop_key
- /*
- * We can safely write to the JOP key registers without updating
- * current_cpu_datap()->jop_key. The complementary
- * ml_disable_user_jop_key() call will put back the old value. Interrupts
- * are also disabled, so nothing else will read this field in the meantime.
- */
- SET_JOP_KEY_REGISTERS x1, x2
-Lskip_program_el0_jop_key:
-
- /*
- * if (cpu has APCTL_EL1.UserKeyEn) {
- * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL0 keys
- * } else {
- * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL0 keys
- * }
- */
- mrs x1, ARM64_REG_APCTL_EL1
-#if defined(APPLEFIRESTORM)
- SET_KERN_KEY x2, x1
- CLEAR_KERN_KEY x3, x1
- tst x1, #(APCTL_EL1_UserKeyEn)
- csel x1, x2, x3, ne
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
- SET_KERN_KEY x1, x1
-#else
- CLEAR_KERN_KEY x1, x1
-#endif
- msr ARM64_REG_APCTL_EL1, x1
- isb
- ret
-
-/*
- * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state)
- */
- .align 2
- .globl EXT(ml_disable_user_jop_key)
-LEXT(ml_disable_user_jop_key)
- cmp x0, x1
- b.eq Lskip_program_prev_jop_key
- SET_JOP_KEY_REGISTERS x1, x2
-Lskip_program_prev_jop_key:
-
- /*
- * if (cpu has APCTL_EL1.UserKeyEn) {
- * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL1 keys
- * } else {
- * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL1 keys
- * }
- */
- mrs x1, ARM64_REG_APCTL_EL1
-#if defined(APPLEFIRESTORM)
- CLEAR_KERN_KEY x2, x1
- SET_KERN_KEY x3, x1
- tst x1, #(APCTL_EL1_UserKeyEn)
- csel x1, x2, x3, ne
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
- CLEAR_KERN_KEY x1, x1
-#else
- SET_KERN_KEY x1, x1
-#endif
- msr ARM64_REG_APCTL_EL1, x1
- isb
- ret
-
-#endif /* defined(HAS_APPLE_PAC) */
#if HAS_BP_RET
+++ /dev/null
-/*
- * Copyright (c) 2019 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-#ifndef _ARM64_PAC_ASM_H_
-#define _ARM64_PAC_ASM_H_
-
-#ifndef __ASSEMBLER__
-#error "This header should only be used in .s files"
-#endif
-
-#include <pexpert/arm64/board_config.h>
-#include <arm64/proc_reg.h>
-#include "assym.s"
-
-#if defined(HAS_APPLE_PAC)
-
-#if defined(APPLEFIRESTORM)
-/* H13 may use either fast or slow A-key switching, depending on CPU model and revision */
-#define HAS_PAC_FAST_A_KEY_SWITCHING 1
-#define HAS_PAC_SLOW_A_KEY_SWITCHING 1
-
-/* BEGIN IGNORE CODESTYLE */
-
-/**
- * IF_PAC_FAST_A_KEY_SWITCHING
- *
- * Branch to a specified label if this H13 model + revision supports fast A-key switching.
- *
- * label - label to branch to
- * tmp - scratch register
- */
-.macro IF_PAC_FAST_A_KEY_SWITCHING label, tmp
- /**
- * start.s attempts to set APCTL_EL1.UserKeyEn. If this H13 CPU doesn't
- * actually support this bit, it will be RaZ.
- */
- mrs \tmp, APCTL_EL1
- tbnz \tmp, #APCTL_EL1_UserKeyEn_OFFSET, \label
-.endmacro
-
-/**
- * IF_PAC_SLOW_A_KEY_SWITCHING
- *
- * Branch to a specified label if this H13 model + revision doesn't support fast A-key switching.
- *
- * label - label to branch to
- * tmp - scratch register
- */
-.macro IF_PAC_SLOW_A_KEY_SWITCHING label, tmp
- mrs \tmp, APCTL_EL1
- tbz \tmp, #APCTL_EL1_UserKeyEn_OFFSET, \label
-.endmacro
-
-/* END IGNORE CODESTYLE */
-
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
-#define HAS_PAC_FAST_A_KEY_SWITCHING 1
-#define HAS_PAC_SLOW_A_KEY_SWITCHING 0
-
-.macro IF_PAC_FAST_A_KEY_SWITCHING label, tmp
-.error "This macro should never need to be used on this CPU family."
-.endmacro
-
-/* We know at compile time that this CPU family definitely doesn't need slow A-key switching */
-.macro IF_PAC_SLOW_A_KEY_SWITCHING label, tmp
-.endmacro
-
-#else /* !defined(APPLEFIRESTORM) && !defined(HAS_APCTL_EL1_USERKEYEN) */
-#define HAS_PAC_FAST_A_KEY_SWITCHING 0
-#define HAS_PAC_SLOW_A_KEY_SWITCHING 1
-
-/* We know at compile time that this CPU family definitely doesn't support fast A-key switching */
-.macro IF_PAC_FAST_A_KEY_SWITCHING label, tmp
-.endmacro
-
-.macro IF_PAC_SLOW_A_KEY_SWITCHING label, tmp
-.error "This macro should never need to be used on this CPU family."
-.endmacro
-
-#endif /* defined(APPLEFIRESTORM) */
-
-/* BEGIN IGNORE CODESTYLE */
-
-/**
- * REPROGRAM_JOP_KEYS
- *
- * Reprograms the A-key registers if needed, and updates current_cpu_datap()->jop_key.
- *
- * On CPUs where fast A-key switching is implemented, this macro reprograms KERNKey_EL1.
- * On other CPUs, it reprograms AP{D,I}AKey_EL1.
- *
- * skip_label - branch to this label if new_jop_key is already loaded into CPU
- * new_jop_key - new APIAKeyLo value
- * cpudatap - current cpu_data_t *
- * tmp - scratch register
- */
-.macro REPROGRAM_JOP_KEYS skip_label, new_jop_key, cpudatap, tmp
- ldr \tmp, [\cpudatap, CPU_JOP_KEY]
- cmp \new_jop_key, \tmp
- b.eq \skip_label
- SET_JOP_KEY_REGISTERS \new_jop_key, \tmp
- str \new_jop_key, [\cpudatap, CPU_JOP_KEY]
-.endmacro
-
-/**
- * SET_JOP_KEY_REGISTERS
- *
- * Unconditionally reprograms the A-key registers. The caller is responsible for
- * updating current_cpu_datap()->jop_key as needed.
- *
- * new_jop_key - new APIAKeyLo value
- * tmp - scratch register
- */
-.macro SET_JOP_KEY_REGISTERS new_jop_key, tmp
-#if HAS_PAC_FAST_A_KEY_SWITCHING
- IF_PAC_SLOW_A_KEY_SWITCHING Lslow_reprogram_jop_keys_\@, \tmp
- msr KERNKeyLo_EL1, \new_jop_key
- add \tmp, \new_jop_key, #1
- msr KERNKeyHi_EL1, \tmp
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */
-#if HAS_PAC_FAST_A_KEY_SWITCHING && HAS_PAC_SLOW_A_KEY_SWITCHING
- b Lset_jop_key_registers_done_\@
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING && HAS_PAC_SLOW_A_KEY_SWITCHING */
-
-#if HAS_PAC_SLOW_A_KEY_SWITCHING
-Lslow_reprogram_jop_keys_\@:
- msr APIAKeyLo_EL1, \new_jop_key
- add \tmp, \new_jop_key, #1
- msr APIAKeyHi_EL1, \tmp
- add \tmp, \tmp, #1
- msr APDAKeyLo_EL1, \tmp
- add \tmp, \tmp, #1
- msr APDAKeyHi_EL1, \tmp
-#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
-
-Lset_jop_key_registers_done_\@:
-.endmacro
-
-/* END IGNORE CODESTYLE */
-
-#endif /* defined(HAS_APPLE_PAC) */
-
-#endif /* _ARM64_PAC_ASM_H_ */
-
-/* vim: set ts=4 ft=asm: */
check_instruction x2, x3, __pinst_spsel_1, 0xd65f03c0d50041bf
b __pinst_spsel_1
-#if __APRR_SUPPORTED__
-
-/*
- * APRR registers aren't covered by VMSA lockdown, so we'll keep these
- * gadgets in pinst for protection against undesired execution.
- */
-
- .text
- .section __LAST,__pinst
- .align 2
-
-__pinst_set_aprr_el0:
- msr APRR_EL0, x0
- ret
-
-__pinst_set_aprr_el1:
- msr APRR_EL1, x0
- ret
-
-__pinst_set_aprr_shadow_mask_en_el1:
- msr APRR_SHADOW_MASK_EN_EL1, x0
-
- ret
-
- .text
- .section __TEXT_EXEC,__text
- .align 2
-
- .globl _pinst_set_aprr_el0
-_pinst_set_aprr_el0:
- check_instruction x2, x3, __pinst_set_aprr_el0, 0xd65f03c0d51cf200
- b __pinst_set_aprr_el0
-
- .globl _pinst_set_aprr_el1
-_pinst_set_aprr_el1:
- check_instruction x2, x3, __pinst_set_aprr_el1, 0xd65f03c0d51cf220
- b __pinst_set_aprr_el1
-
- .globl _pinst_set_aprr_shadow_mask_en_el1
-_pinst_set_aprr_shadow_mask_en_el1:
- check_instruction x2, x3, __pinst_set_aprr_shadow_mask_en_el1, 0xd65f03c0d51cf2c0
- b __pinst_set_aprr_shadow_mask_en_el1
-#endif /* __APRR_SUPPORTED__ */
#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
#if defined(HAS_APPLE_PAC)
-/*
- *
- * arm64_ropjop_test - basic xnu ROP/JOP test plan
- *
- * - assert ROP/JOP configured and running status match
- * - assert all AppleMode ROP/JOP features enabled
- * - ensure ROP/JOP keys are set and diversified
- * - sign a KVA (the address of this function),assert it was signed (changed)
- * - authenticate the newly signed KVA
- * - assert the authed KVA is the original KVA
- * - corrupt a signed ptr, auth it, ensure auth failed
- * - assert the failed authIB of corrupted pointer is tagged
- *
- */
kern_return_t
arm64_ropjop_test()
boolean_t config_jop_enabled = TRUE;
- /* assert all AppleMode ROP/JOP features enabled */
- uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
-#if __APSTS_SUPPORTED__
- uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
- T_EXPECT(apsts & APSTS_EL1_MKEYVld, NULL);
-#else
- T_EXPECT(apctl & APCTL_EL1_MKEYVld, NULL);
-#endif /* __APSTS_SUPPORTED__ */
- T_EXPECT(apctl & APCTL_EL1_AppleMode, NULL);
-
- bool kernkeyen = apctl & APCTL_EL1_KernKeyEn;
-#if HAS_APCTL_EL1_USERKEYEN
- bool userkeyen = apctl & APCTL_EL1_UserKeyEn;
-#else
- bool userkeyen = false;
-#endif
- /* for KernKey to work as a diversifier, it must be enabled at exactly one of {EL0, EL1/2} */
- T_EXPECT(kernkeyen || userkeyen, "KernKey is enabled");
- T_EXPECT(!(kernkeyen && userkeyen), "KernKey is not simultaneously enabled at userspace and kernel space");
-
- /* ROP/JOP keys enabled current status */
- bool status_jop_enabled, status_rop_enabled;
-#if __APSTS_SUPPORTED__ /* H13+ */
- status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
-#elif __APCFG_SUPPORTED__ /* H12 */
- uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
- status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
-#else /* !__APCFG_SUPPORTED__ H11 */
- uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
- status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
- status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
-#endif /* __APSTS_SUPPORTED__ */
-
- /* assert configured and running status match */
- T_EXPECT(config_rop_enabled == status_rop_enabled, NULL);
- T_EXPECT(config_jop_enabled == status_jop_enabled, NULL);
-
-
if (config_jop_enabled) {
/* jop key */
uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
- /* ensure JOP key is set and diversified */
- T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
}
uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
- /* ensure ROP key is set and diversified */
- T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
/* sign a KVA (the address of this function) */
#define CORESIGHT_REGIONS 4
#define CORESIGHT_SIZE 0x1000
-#if __APRR_SUPPORTED__
-/*
- * APRR_EL0/APRR_EL1
- *
- * 63 0
- * +--------------------+
- * | Attr[15:0]RWX[3:0] |
- * +--------------------+
- *
- * These registers consist of 16 4-bit fields.
- *
- * The attribute index consists of the access protection
- * and execution protections on a mapping. The index
- * for a given mapping type is constructed as follows.
- *
- * Attribute Index
- *
- * 3 2 1 0
- * +-------+-------+-----+----+
- * | AP[1] | AP[0] | PXN | XN |
- * +-------+-------+-----+----+
- *
- * The attribute for a given index determines what
- * protections are disabled for that mappings type
- * (protections beyond the scope of the standard ARM
- * protections for a mapping cannot be granted via
- * APRR).
- *
- * Attribute
- *
- * 3 2 1 0
- * +----------+---+---+---+
- * | Reserved | R | W | X |
- * +----------+---+---+---+
- *
- * Where:
- * R: Read is allowed.
- * W: Write is allowed.
- * X: Execute is allowed.
- */
-
-#define APRR_IDX_XN (1ULL)
-#define APRR_IDX_PXN (2ULL)
-
-
-#define APRR_IDX_XN_SHIFT (0ULL)
-#define APRR_IDX_PXN_SHIFT (1ULL)
-#define APRR_IDX_APSHIFT (2ULL)
-
-#endif /* __APRR_SUPPORTED__ */
-
-
-#if __APRR_SUPPORTED__
-
-#define APRR_ATTR_X (1ULL)
-#define APRR_ATTR_W (2ULL)
-#define APRR_ATTR_R (4ULL)
-
-#define APRR_ATTR_WX (APRR_ATTR_W | APRR_ATTR_X)
-#define APRR_ATTR_RX (APRR_ATTR_R | APRR_ATTR_X)
-#define APRR_ATTR_RWX (APRR_ATTR_R | APRR_ATTR_W | APRR_ATTR_X)
-
-#define APRR_ATTR_NONE (0ULL)
-#define APRR_ATTR_MASK (APRR_ATTR_RWX)
-
-#define APRR_RESERVED_MASK (0x8888888888888888ULL)
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-#define XPRR_FIRM_RX_PERM (0ULL)
-#define XPRR_PPL_RW_PERM (1ULL)
-#define XPRR_FIRM_RO_PERM (2ULL)
-#define XPRR_KERN_RW_PERM (3ULL)
-#define XPRR_FIRM_RW_PERM (4ULL)
-#define XPRR_USER_JIT_PERM (5ULL)
-#define XPRR_KERN0_RW_PERM (6ULL)
-#define XPRR_USER_RW_PERM (7ULL)
-#define XPRR_PPL_RX_PERM (8ULL)
-#define XPRR_USER_XO_PERM (9ULL)
-#define XPRR_KERN_RX_PERM (10ULL)
-#define XPRR_KERN_RO_PERM (11ULL)
-#define XPRR_KERN0_RX_PERM (12ULL)
-#define XPRR_USER_RX_PERM (13ULL)
-#define XPRR_KERN0_RO_PERM (14ULL)
-#define XPRR_USER_RO_PERM (15ULL)
-#define XPRR_MAX_PERM (15ULL)
-
-#define XPRR_VERSION_NONE (0ULL)
-#define XPRR_VERSION_APRR (1ULL)
-
-
-#endif /* __APRR_SUPPORTED__*/
-
-#if __APRR_SUPPORTED__
-/* Indices for attributes, named based on how we intend to use them. */
-#define APRR_FIRM_RX_INDEX (0ULL) /* AP_RWNA, PX, X */
-#define APRR_FIRM_RO_INDEX (1ULL) /* AP_RWNA, PX, XN */
-#define APRR_PPL_RW_INDEX (2ULL) /* AP_RWNA, PXN, X */
-#define APRR_KERN_RW_INDEX (3ULL) /* AP_RWNA, PXN, XN */
-#define APRR_FIRM_RW_INDEX (4ULL) /* AP_RWRW, PX, X */
-#define APRR_KERN0_RW_INDEX (5ULL) /* AP_RWRW, PX, XN */
-#define APRR_USER_JIT_INDEX (6ULL) /* AP_RWRW, PXN, X */
-#define APRR_USER_RW_INDEX (7ULL) /* AP_RWRW, PXN, XN */
-#define APRR_PPL_RX_INDEX (8ULL) /* AP_RONA, PX, X */
-#define APRR_KERN_RX_INDEX (9ULL) /* AP_RONA, PX, XN */
-#define APRR_USER_XO_INDEX (10ULL) /* AP_RONA, PXN, X */
-#define APRR_KERN_RO_INDEX (11ULL) /* AP_RONA, PXN, XN */
-#define APRR_KERN0_RX_INDEX (12ULL) /* AP_RORO, PX, X */
-#define APRR_KERN0_RO_INDEX (13ULL) /* AP_RORO, PX, XN */
-#define APRR_USER_RX_INDEX (14ULL) /* AP_RORO, PXN, X */
-#define APRR_USER_RO_INDEX (15ULL) /* AP_RORO, PXN, XN */
-#define APRR_MAX_INDEX (15ULL) /* For sanity checking index values */
-#endif /* __APRR_SUPPORTED */
-
-
-#if __APRR_SUPPORTED__
-#define APRR_SHIFT_FOR_IDX(x) \
- ((x) << 2ULL)
-
-/* Shifts for attributes, named based on how we intend to use them. */
-#define APRR_FIRM_RX_SHIFT (0ULL) /* AP_RWNA, PX, X */
-#define APRR_FIRM_RO_SHIFT (4ULL) /* AP_RWNA, PX, XN */
-#define APRR_PPL_RW_SHIFT (8ULL) /* AP_RWNA, PXN, X */
-#define APRR_KERN_RW_SHIFT (12ULL) /* AP_RWNA, PXN, XN */
-#define APRR_FIRM_RW_SHIFT (16ULL) /* AP_RWRW, PX, X */
-#define APRR_KERN0_RW_SHIFT (20ULL) /* AP_RWRW, PX, XN */
-#define APRR_USER_JIT_SHIFT (24ULL) /* AP_RWRW, PXN, X */
-#define APRR_USER_RW_SHIFT (28ULL) /* AP_RWRW, PXN, XN */
-#define APRR_PPL_RX_SHIFT (32ULL) /* AP_RONA, PX, X */
-#define APRR_KERN_RX_SHIFT (36ULL) /* AP_RONA, PX, XN */
-#define APRR_USER_XO_SHIFT (40ULL) /* AP_RONA, PXN, X */
-#define APRR_KERN_RO_SHIFT (44ULL) /* AP_RONA, PXN, XN */
-#define APRR_KERN0_RX_SHIFT (48ULL) /* AP_RORO, PX, X */
-#define APRR_KERN0_RO_SHIFT (52ULL) /* AP_RORO, PX, XN */
-#define APRR_USER_RX_SHIFT (56ULL) /* AP_RORO, PXN, X */
-#define APRR_USER_RO_SHIFT (60ULL) /* AP_RORO, PXN, XN */
-
-#define ARM_PTE_APRR_MASK \
- (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)
-
-#define ARM_PTE_XPRR_MASK ARM_PTE_APRR_MASK
-
-#define APRR_INDEX_TO_PTE(x) \
- ((pt_entry_t) \
- (((x) & 0x8) ? ARM_PTE_AP(0x2) : 0) | \
- (((x) & 0x4) ? ARM_PTE_AP(0x1) : 0) | \
- (((x) & 0x2) ? ARM_PTE_PNX : 0) | \
- (((x) & 0x1) ? ARM_PTE_NX : 0))
-
-#define PTE_TO_APRR_INDEX(x) \
- ((ARM_PTE_EXTRACT_AP(x) << APRR_IDX_APSHIFT) | \
- (((x) & ARM_PTE_PNXMASK) ? APRR_IDX_PXN : 0) | \
- (((x) & ARM_PTE_NXMASK) ? APRR_IDX_XN : 0))
-
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-
-#define APRR_EXTRACT_IDX_ATTR(_aprr_value, _idx) \
- (((_aprr_value) >> APRR_SHIFT_FOR_IDX(_idx)) & APRR_ATTR_MASK)
-
-#define APRR_REMOVE(x) (~(x))
-
-#define APRR_EL1_UNRESTRICTED (0x4455445566666677ULL)
-
-#define APRR_EL1_RESET \
- APRR_EL1_UNRESTRICTED
-
-/*
- * XO mappings bypass PAN protection (rdar://58360875)
- * Revoke ALL kernel access permissions for XO mappings.
- */
-#define APRR_EL1_BASE \
- (APRR_EL1_UNRESTRICTED & \
- APRR_REMOVE(APRR_ATTR_R << APRR_USER_XO_SHIFT))
-
-#if XNU_MONITOR
-#define APRR_EL1_DEFAULT \
- (APRR_EL1_BASE & \
- (APRR_REMOVE((APRR_ATTR_WX << APRR_PPL_RW_SHIFT) | \
- (APRR_ATTR_WX << APRR_USER_XO_SHIFT) | \
- (APRR_ATTR_WX << APRR_PPL_RX_SHIFT))))
-
-#define APRR_EL1_PPL \
- (APRR_EL1_BASE & \
- (APRR_REMOVE((APRR_ATTR_X << APRR_PPL_RW_SHIFT) | \
- (APRR_ATTR_WX << APRR_USER_XO_SHIFT) | \
- (APRR_ATTR_W << APRR_PPL_RX_SHIFT))))
-#else
-#define APRR_EL1_DEFAULT \
- APRR_EL1_BASE
-#endif
-#define APRR_EL0_UNRESTRICTED (0x4545010167670101ULL)
-#define APRR_EL0_RESET \
- APRR_EL0_UNRESTRICTED
-#if XNU_MONITOR
-#define APRR_EL0_BASE \
- (APRR_EL0_UNRESTRICTED & \
- (APRR_REMOVE((APRR_ATTR_RWX << APRR_PPL_RW_SHIFT) | \
- (APRR_ATTR_RWX << APRR_PPL_RX_SHIFT) | \
- (APRR_ATTR_RWX << APRR_USER_XO_SHIFT))))
-#else
-#define APRR_EL0_BASE \
- APRR_EL0_UNRESTRICTED
-#endif
-#define APRR_EL0_JIT_RW \
- (APRR_EL0_BASE & APRR_REMOVE(APRR_ATTR_X << APRR_USER_JIT_SHIFT))
-#define APRR_EL0_JIT_RX \
- (APRR_EL0_BASE & APRR_REMOVE(APRR_ATTR_W << APRR_USER_JIT_SHIFT))
-#define APRR_EL0_JIT_RWX \
- APRR_EL0_BASE
-#define APRR_EL0_DEFAULT \
- APRR_EL0_BASE
-
-#endif /* __APRR_SUPPORTED__ */
/*
#define ID_AA64ISAR0_EL1_AES_PMULL_EN (2ull << ID_AA64ISAR0_EL1_AES_OFFSET)
-#if __APCFG_SUPPORTED__
-/*
- * APCFG_EL1
- *
- * 63 2 1 0
- * +----------+-+-+
- * | reserved |K|R|
- * +----------+-+-+
- *
- * where:
- * R: Reserved
- * K: ElXEnKey - Enable ARMV8.3 defined {IA,IB,DA,DB} keys when CPU is
- * operating in EL1 (or higher) and when under Apple-Mode
- */
-
-#define APCFG_EL1_ELXENKEY_OFFSET 1
-#define APCFG_EL1_ELXENKEY_MASK (0x1ULL << APCFG_EL1_ELXENKEY_OFFSET)
-#define APCFG_EL1_ELXENKEY APCFG_EL1_ELXENKEY_MASK
-#endif /* __APCFG_SUPPORTED__ */
#define APSTATE_G_SHIFT (0)
#define APSTATE_P_SHIFT (1)
#define APSTATE_A_SHIFT (2)
#define APSTATE_AP_MASK ((1ULL << APSTATE_A_SHIFT) | (1ULL << APSTATE_P_SHIFT))
-#ifdef __APSTS_SUPPORTED__
-#define APCTL_EL1_AppleMode (1ULL << 0)
-#define APCTL_EL1_KernKeyEn (1ULL << 1)
-#define APCTL_EL1_EnAPKey0 (1ULL << 2)
-#define APCTL_EL1_EnAPKey1 (1ULL << 3)
-#ifdef HAS_APCTL_EL1_USERKEYEN
-#define APCTL_EL1_UserKeyEn_OFFSET 4
-#define APCTL_EL1_UserKeyEn (1ULL << APCTL_EL1_UserKeyEn_OFFSET)
-#endif /* HAS_APCTL_EL1_USERKEYEN */
-#define APSTS_EL1_MKEYVld (1ULL << 0)
-#else
-#define APCTL_EL1_AppleMode (1ULL << 0)
-#define APCTL_EL1_MKEYVld (1ULL << 1)
-#define APCTL_EL1_KernKeyEn (1ULL << 2)
-#endif
#define ACTLR_EL1_EnTSO (1ULL << 1)
#define ACTLR_EL1_EnAPFLG (1ULL << 4)
#endif /* __ARM_KERNEL_PROTECT__ */
-#if __APRR_SUPPORTED__
-
-.macro MSR_APRR_EL1_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
- bl EXT(pinst_set_aprr_el1)
-#else
- msr APRR_EL1, x0
-#endif
-.endmacro
-
-.macro MSR_APRR_EL0_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
- bl EXT(pinst_set_aprr_el0)
-#else
- msr APRR_EL0, x0
-#endif
-.endmacro
-
-.macro MSR_APRR_SHADOW_MASK_EN_EL1_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
- bl EXT(pinst_set_aprr_shadow_mask_en_el1)
-#else
- msr APRR_SHADOW_MASK_EN_EL1, x0
-#endif
-.endmacro
-
-#endif /* __APRR_SUPPORTED__ */
.macro MSR_VBAR_EL1_X0
#if defined(KERNEL_INTEGRITY_KTRR)
msr VBAR_EL1, x0
#endif
-#if __APRR_SUPPORTED__
- MOV64 x0, APRR_EL1_DEFAULT
-#if XNU_MONITOR
- adrp x4, EXT(pmap_ppl_locked_down)@page
- ldrb w5, [x4, #EXT(pmap_ppl_locked_down)@pageoff]
- cmp w5, #0
- b.ne 1f
-
- // If the PPL is not locked down, we start in PPL mode.
- MOV64 x0, APRR_EL1_PPL
-1:
-#endif /* XNU_MONITOR */
-
- MSR_APRR_EL1_X0
-
- // Load up the default APRR_EL0 value.
- MOV64 x0, APRR_EL0_DEFAULT
- MSR_APRR_EL0_X0
-#endif /* __APRR_SUPPORTED__ */
#if defined(KERNEL_INTEGRITY_KTRR)
/*
add x0, x0, EXT(LowExceptionVectorBase)@pageoff
MSR_VBAR_EL1_X0
-#if __APRR_SUPPORTED__
- // Save the LR
- mov x1, lr
-
-#if XNU_MONITOR
- // If the PPL is supported, we start out in PPL mode.
- MOV64 x0, APRR_EL1_PPL
-#else
- // Otherwise, we start out in default mode.
- MOV64 x0, APRR_EL1_DEFAULT
-#endif
-
- // Set the APRR state for EL1.
- MSR_APRR_EL1_X0
-
- // Set the APRR state for EL0.
- MOV64 x0, APRR_EL0_DEFAULT
- MSR_APRR_EL0_X0
-
-
- // Restore the LR.
- mov lr, x1
-#endif /* __APRR_SUPPORTED__ */
// Get the kernel memory parameters from the boot args
ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
1:
#ifdef HAS_APPLE_PAC
-#ifdef __APSTS_SUPPORTED__
- mrs x0, ARM64_REG_APSTS_EL1
- and x1, x0, #(APSTS_EL1_MKEYVld)
- cbz x1, 1b // Poll APSTS_EL1.MKEYVld
- mrs x0, ARM64_REG_APCTL_EL1
- orr x0, x0, #(APCTL_EL1_AppleMode)
-#ifdef HAS_APCTL_EL1_USERKEYEN
- orr x0, x0, #(APCTL_EL1_UserKeyEn)
- and x0, x0, #~(APCTL_EL1_KernKeyEn)
-#else /* !HAS_APCTL_EL1_USERKEYEN */
- orr x0, x0, #(APCTL_EL1_KernKeyEn)
-#endif /* HAS_APCTL_EL1_USERKEYEN */
- and x0, x0, #~(APCTL_EL1_EnAPKey0)
- msr ARM64_REG_APCTL_EL1, x0
-
-#if defined(APPLEFIRESTORM)
- IF_PAC_FAST_A_KEY_SWITCHING 1f, x0
- orr x0, x0, #(APCTL_EL1_KernKeyEn)
- msr ARM64_REG_APCTL_EL1, x0
-1:
-#endif /* APPLEFIRESTORM */
-
-#else
- mrs x0, ARM64_REG_APCTL_EL1
- and x1, x0, #(APCTL_EL1_MKEYVld)
- cbz x1, 1b // Poll APCTL_EL1.MKEYVld
- orr x0, x0, #(APCTL_EL1_AppleMode)
- orr x0, x0, #(APCTL_EL1_KernKeyEn)
- msr ARM64_REG_APCTL_EL1, x0
-#endif /* APSTS_SUPPORTED */
-
- /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */
- isb sy
- /* Load static kernel key diversification values */
- ldr x0, =KERNEL_ROP_ID
- /* set ROP key. must write at least once to pickup mkey per boot diversification */
- msr APIBKeyLo_EL1, x0
- add x0, x0, #1
- msr APIBKeyHi_EL1, x0
- add x0, x0, #1
- msr APDBKeyLo_EL1, x0
- add x0, x0, #1
- msr APDBKeyHi_EL1, x0
- add x0, x0, #1
- msr ARM64_REG_KERNELKEYLO_EL1, x0
- add x0, x0, #1
- msr ARM64_REG_KERNELKEYHI_EL1, x0
- /* set JOP key. must write at least once to pickup mkey per boot diversification */
- add x0, x0, #1
- msr APIAKeyLo_EL1, x0
- add x0, x0, #1
- msr APIAKeyHi_EL1, x0
- add x0, x0, #1
- msr APDAKeyLo_EL1, x0
- add x0, x0, #1
- msr APDAKeyHi_EL1, x0
- /* set G key */
- add x0, x0, #1
- msr APGAKeyLo_EL1, x0
- add x0, x0, #1
- msr APGAKeyHi_EL1, x0
// Enable caches, MMU, ROP and JOP
MOV64 x0, SCTLR_EL1_DEFAULT
orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
-#if __APCFG_SUPPORTED__
- // for APCFG systems, JOP keys are always on for EL1.
- // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled
-#else /* __APCFG_SUPPORTED__ */
MOV64 x1, SCTLR_JOP_KEYS_ENABLED
orr x0, x0, x1
-#endif /* !__APCFG_SUPPORTED__ */
#else /* HAS_APPLE_PAC */
// Enable caches and MMU
MOV64 x1, SCTLR_EL1_DEFAULT
#if HAS_APPLE_PAC
orr x1, x1, #(SCTLR_PACIB_ENABLED)
-#if !__APCFG_SUPPORTED__
MOV64 x2, SCTLR_JOP_KEYS_ENABLED
orr x1, x1, x2
-#endif /* !__APCFG_SUPPORTED__ */
#endif /* HAS_APPLE_PAC */
cmp x0, x1
bne .
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
osfmk/arm64/sleh.c standard
osfmk/arm64/start.s optional nos_arm_asm
osfmk/arm64/pinst.s optional nos_arm_asm
-osfmk/arm64/cswitch.s standard
+osfmk/arm64/cswitch.s optional nos_arm_asm
osfmk/arm/machine_cpuid.c standard
osfmk/arm/machine_routines_common.c standard
osfmk/arm64/machine_routines.c standard
/* check for a backup port */
pdrequest = port->ip_pdrequest;
+ /*
+ * Panic if a special reply has ip_pdrequest or ip_tempowner
+ * set, as this causes a type confusion while accessing the
+ * kdata union.
+ */
+ if (special_reply && (pdrequest || port->ip_tempowner)) {
+ panic("ipc_port_destroy: invalid state");
+ }
+
#if IMPORTANCE_INHERITANCE
/* determine how many assertions to drop and from whom */
if (port->ip_tempowner != 0) {
}
/*
- * Disallow moving receive-right kobjects, e.g. mk_timer ports
+ * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
* The ipc_port structure uses the kdata union of kobject and
* imp_task exclusively. Thus, general use of a kobject port as
* a receive right can cause type confusion in the importance
* code.
*/
- if (io_kotype(entry->ie_object) != IKOT_NONE) {
+ if (io_is_kobject(entry->ie_object) ||
+ io_is_kolabeled(entry->ie_object)) {
/*
* Distinguish an invalid right, e.g., trying to move
* a send right as a receive right, from this
assert(port->ip_receiver_name == name);
assert(port->ip_receiver == space);
- if (port->ip_immovable_receive) {
+ if (port->ip_immovable_receive || port->ip_specialreply) {
assert(port->ip_receiver != ipc_space_kernel);
ip_unlock(port);
assert(current_task() != kernel_task);
assert(port->ip_mscount == 0);
assert(port->ip_receiver_name == MACH_PORT_NULL);
+ /*
+ * Don't copyout kobjects or kolabels as receive right
+ */
+ if (io_is_kobject(entry->ie_object) ||
+ io_is_kolabeled(entry->ie_object)) {
+ panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
+ }
+
imq_lock(&port->ip_messages);
dest = port->ip_destination;
/* redeem of previous values is the value */
if (0 < prev_value_count) {
elem = (user_data_element_t)prev_values[0];
+
+ user_data_lock();
assert(0 < elem->e_made);
elem->e_made++;
- *out_value = prev_values[0];
+ user_data_unlock();
+
+ *out_value = (mach_voucher_attr_value_handle_t)elem;
return KERN_SUCCESS;
}
}
/* port is locked and active */
- /* you cannot register for port death notifications on a kobject */
- if (ip_kotype(port) != IKOT_NONE) {
+ /*
+ * you cannot register for port death notifications on a kobject,
+ * kolabel or special reply port
+ */
+ if (ip_is_kobject(port) || ip_is_kolabeled(port) ||
+ port->ip_specialreply) {
ip_unlock(port);
return KERN_INVALID_RIGHT;
}
timer_start(&processor->system_state, processor->last_dispatch);
processor->current_state = &processor->system_state;
+#if __AMP__
+ if (processor->processor_set->pset_cluster_type == PSET_AMP_P) {
+ timer_start(&thread->ptime, processor->last_dispatch);
+ }
+#endif
cpu_quiescent_counter_join(processor->last_dispatch);
{ panic("task_init\n");}
#if defined(HAS_APPLE_PAC)
- kernel_task->rop_pid = KERNEL_ROP_ID;
+ kernel_task->rop_pid = ml_default_rop_pid();
kernel_task->jop_pid = ml_default_jop_pid();
// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
return KERN_INVALID_ARGUMENT;
}
+ bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
+
+ thread_mtx_lock(thread);
+ /*
+ * Once the thread is started, we will look at `ith_voucher` without
+ * holding any lock.
+ *
+ * Setting the voucher hence can only be done by current_thread() or
+ * before it started. "started" flips under the thread mutex and must be
+ * tested under it too.
+ */
if (thread != current_thread() && thread->started) {
+ thread_mtx_unlock(thread);
return KERN_INVALID_ARGUMENT;
}
ipc_voucher_reference(voucher);
- bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
-
- thread_mtx_lock(thread);
old_voucher = thread->ith_voucher;
thread->ith_voucher = voucher;
thread->ith_voucher_name = MACH_PORT_NULL;
__kpi_deprecated_arm64_macos_unavailable
extern char *strncat(char *, const char *, size_t);
-/* strcmp() is deprecated. Please use strncmp() instead. */
-__kpi_deprecated_arm64_macos_unavailable
extern int strcmp(const char *, const char *);
+extern int strncmp(const char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
extern size_t strlcat(char *, const char *, size_t);
-extern int strncmp(const char *, const char *, size_t);
extern int strcasecmp(const char *s1, const char *s2);
extern int strncasecmp(const char *s1, const char *s2, size_t n);
kern_return_t (*memory_object_data_reclaim)(
memory_object_t mem_obj,
boolean_t reclaim_backing_store);
+ boolean_t (*memory_object_backing_object)(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
const char *memory_object_pager_name;
} * memory_object_pager_ops_t;
__BEGIN_DECLS
extern void memory_object_reference(memory_object_t object);
extern void memory_object_deallocate(memory_object_t object);
+extern boolean_t memory_object_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
extern void memory_object_default_reference(memory_object_default_t);
extern void memory_object_default_deallocate(memory_object_default_t);
.memory_object_map = vnode_pager_map,
.memory_object_last_unmap = vnode_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "vnode pager"
};
.memory_object_map = device_pager_map,
.memory_object_last_unmap = device_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "device pager"
};
reclaim_backing_store);
}
+boolean_t
+memory_object_backing_object
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ if (memory_object->mo_pager_ops->memory_object_backing_object == NULL) {
+ return FALSE;
+ }
+ return (memory_object->mo_pager_ops->memory_object_backing_object)(
+ memory_object,
+ offset,
+ backing_object,
+ backing_offset);
+}
+
upl_t
convert_port_to_upl(
ipc_port_t port)
extern ledger_t pmap_ledger_alloc(void);
extern void pmap_ledger_free(ledger_t);
+extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap);
+
#if __arm64__
extern bool pmap_is_exotic(pmap_t pmap);
#else /* __arm64__ */
kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
vm_prot_t prot);
kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
+boolean_t apple_protect_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
#define CRYPT_INFO_DEBUG 0
void crypt_info_reference(struct pager_crypt_info *crypt_info);
.memory_object_map = apple_protect_pager_map,
.memory_object_last_unmap = apple_protect_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = apple_protect_pager_backing_object,
.memory_object_pager_name = "apple_protect"
};
return KERN_SUCCESS;
}
+boolean_t
+apple_protect_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ apple_protect_pager_t pager;
+
+ PAGER_DEBUG(PAGER_ALL,
+ ("apple_protect_pager_backing_object: %p\n", mem_obj));
+
+ pager = apple_protect_pager_lookup(mem_obj);
+
+ *backing_object = pager->backing_object;
+ *backing_offset = pager->backing_offset + offset;
+
+ return TRUE;
+}
/*
*
.memory_object_map = compressor_memory_object_map,
.memory_object_last_unmap = compressor_memory_object_last_unmap,
.memory_object_data_reclaim = compressor_memory_object_data_reclaim,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "compressor pager"
};
.memory_object_map = fourk_pager_map,
.memory_object_last_unmap = fourk_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "fourk_pager"
};
do {
new_entry = vm_map_entry_insert(map,
entry, tmp_start, tmp_end,
- object, offset, needs_copy,
- FALSE, FALSE,
+ object, offset, vmk_flags,
+ needs_copy, FALSE, FALSE,
cur_protection, max_protection,
VM_BEHAVIOR_DEFAULT,
(entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ?
VM_MAP_PAGE_MASK(map)),
copy_object,
0, /* offset */
+ vmk_flags,
FALSE, /* needs_copy */
FALSE,
FALSE,
return KERN_PROTECTION_FAILURE;
}
+ if (current->used_for_jit &&
+ pmap_has_prot_policy(map->pmap, current->translated_allow_execute, current->protection)) {
+ vm_map_unlock(map);
+ return KERN_PROTECTION_FAILURE;
+ }
+
if ((new_prot & VM_PROT_WRITE) &&
(new_prot & VM_PROT_EXECUTE) &&
#if XNU_TARGET_OS_OSX
vm_map_offset_t end,
vm_object_t object,
vm_object_offset_t offset,
+ vm_map_kernel_flags_t vmk_flags,
boolean_t needs_copy,
boolean_t is_shared,
boolean_t in_transition,
* Insert the new entry into the list.
*/
- vm_map_store_entry_link(map, insp_entry, new_entry,
- VM_MAP_KERNEL_FLAGS_NONE);
+ vm_map_store_entry_link(map, insp_entry, new_entry, vmk_flags);
map->size += end - start;
/*
if (!copy) {
if (src_entry->used_for_jit == TRUE) {
if (same_map) {
-#if __APRR_SUPPORTED__
- /*
- * Disallow re-mapping of any JIT regions on APRR devices.
- */
- result = KERN_PROTECTION_FAILURE;
- break;
-#endif /* __APRR_SUPPORTED__*/
} else if (!VM_MAP_POLICY_ALLOW_JIT_SHARING(map)) {
/*
* Cannot allow an entry describing a JIT
return map->cs_enforcement;
}
+kern_return_t
+vm_map_cs_wx_enable(
+ vm_map_t map)
+{
+ return pmap_cs_allow_invalid(vm_map_pmap(map));
+}
+
void
vm_map_cs_enforcement_set(
vm_map_t map,
vm_map_offset_t end,
vm_object_t object,
vm_object_offset_t offset,
+ vm_map_kernel_flags_t vmk_flags,
boolean_t needs_copy,
boolean_t is_shared,
boolean_t in_transition,
vm_map_t map,
boolean_t val);
+extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
+
/* wire down a region */
#ifdef XNU_KERNEL_PRIVATE
if (upl->flags & UPL_SHADOWED) {
offset = 0;
} else {
- offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) + upl->map_object->paging_offset;
+ offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
}
size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
kern_return_t shared_region_pager_map(memory_object_t mem_obj,
vm_prot_t prot);
kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
+boolean_t shared_region_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
/*
* Vector of VM operations for this EMM.
.memory_object_map = shared_region_pager_map,
.memory_object_last_unmap = shared_region_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = shared_region_pager_backing_object,
.memory_object_pager_name = "shared_region"
};
return KERN_SUCCESS;
}
+boolean_t
+shared_region_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ shared_region_pager_t pager;
+
+ PAGER_DEBUG(PAGER_ALL,
+ ("shared_region_pager_backing_object: %p\n", mem_obj));
+
+ pager = shared_region_pager_lookup(mem_obj);
+
+ *backing_object = pager->srp_backing_object;
+ *backing_offset = pager->srp_backing_offset + offset;
+
+ return TRUE;
+}
+
/*
*
.memory_object_map = swapfile_pager_map,
.memory_object_last_unmap = swapfile_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "swapfile pager"
};
required_protection = protections;
}
cur_prot = VM_PROT_ALL;
- vmk_flags.vmkf_copy_pageable = TRUE;
+ if (target_map->pmap == kernel_pmap) {
+ /*
+ * Get "reserved" map entries to avoid deadlocking
+ * on the kernel map or a kernel submap if we
+ * run out of VM map entries and need to refill that
+ * zone.
+ */
+ vmk_flags.vmkf_copy_pageable = FALSE;
+ } else {
+ vmk_flags.vmkf_copy_pageable = TRUE;
+ }
vmk_flags.vmkf_copy_same_map = FALSE;
assert(map_size != 0);
kr = vm_map_copy_extract(target_map,
// Unsupported on this architecture.
}
+kern_return_t
+pmap_cs_allow_invalid(__unused pmap_t pmap)
+{
+ // Unsupported on this architecture.
+ return KERN_SUCCESS;
+}
+
void *
pmap_claim_reserved_ppl_page(void)
{
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
/* Optional CPU features -- an SoC may #undef these */
#define ARM_PARAMETERIZED_PMAP 1
#define __ARM_MIXED_PAGE_SIZE__ 1
-#define HAS_APCTL_EL1_USERKEYEN 1 /* Supports use of KernKey in EL0 */
-
-/*
- * APSTS_SUPPORTED: Pointer authentication status registers, MKEYVld flag moved here from APCTL on APPLELIGHTNING (H12)
- */
-#define __APSTS_SUPPORTED__ 1
#define __ARM_RANGE_TLBI__ 1
#define __ARM_E2H__ 1
#if defined(CPU_HAS_APPLE_PAC) && defined(__arm64e__)
#define HAS_APPLE_PAC 1 /* Has Apple ARMv8.3a pointer authentication */
-#define KERNEL_ROP_ID 0xfeedfacefeedfacf /* placeholder static kernel ROP diversifier */
-#define KERNEL_KERNKEY_ID (KERNEL_ROP_ID + 4)
-#define KERNEL_JOP_ID (KERNEL_KERNKEY_ID + 2)
#endif
#include <pexpert/arm64/apple_arm64_regs.h>
#if defined(HAS_APPLE_PAC)
-#ifdef ASSEMBLER
-#define ARM64_REG_APCTL_EL1 S3_4_c15_c0_4
-#define ARM64_REG_APSTS_EL1 S3_6_c15_c12_4
-#else /* ASSEMBLER */
-#define ARM64_REG_APCTL_EL1 "S3_4_c15_c0_4"
-#define ARM64_REG_APSTS_EL1 "S3_6_c15_c12_4"
-#endif /* ASSEMBLER */
#if ASSEMBLER
-#define ARM64_REG_KERNELKEYLO_EL1 S3_4_c15_c1_0
-#define ARM64_REG_KERNELKEYHI_EL1 S3_4_c15_c1_1
-
#define ARM64_REG_APIAKEYLO_EL1 S3_0_c2_c1_0
#define ARM64_REG_APIAKEYHI_EL1 S3_0_c2_c1_1
#define ARM64_REG_APIBKEYLO_EL1 S3_0_c2_c1_2
#define ARM64_REG_APGAKEYLO_EL1 S3_0_c2_c3_0
#define ARM64_REG_APGAKEYHI_EL1 S3_0_c2_c3_1
#else /* ASSEMBLER */
-#define ARM64_REG_APCTL_EL1 "S3_4_c15_c0_4"
-
-#define ARM64_REG_KERNELKEYLO_EL1 "S3_4_c15_c1_0"
-#define ARM64_REG_KERNELKEYHI_EL1 "S3_4_c15_c1_1"
-
#define ARM64_REG_APIAKEYLO_EL1 "S3_0_c2_c1_0"
#define ARM64_REG_APIAKEYHI_EL1 "S3_0_c2_c1_1"
#define ARM64_REG_APIBKEYLO_EL1 "S3_0_c2_c1_2"
___asan_version_mismatch_check_apple_1000
___asan_version_mismatch_check_apple_1001
___asan_version_mismatch_check_apple_clang_1100
+___asan_version_mismatch_check_apple_clang_1200
___asan_init
___asan_memcpy
___asan_memmove
SYMBOL_SET_BUILD += $(OBJPATH)/Kasan_kasan.symbolset
endif
+ifneq ($(RC_ProjectName),xnu_libraries)
# Our external dependency on allsymbols is fine because this runs in a later phase (config_install vs. config_all)
$(OBJPATH)/%.symbolset: $(SOURCE)/%.exports
@$(LOG_SYMBOLSET) "$*$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))"
exit $$cmdstatus
do_config_install:: $(SYMROOT_KEXT) $(DSTROOT_KEXT)
-
+else
+# We are building XNU as a static library - no need for the symbol kexts
+endif
# Install helper scripts
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
$(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h: $(SRCROOT)/$(COMPONENT)/kasan-blacklist-dynamic
@$(LOG_GENERATE) "$(notdir $@)"
@$(SRCROOT)/$(COMPONENT)/tools/generate_dynamic_blacklist.py "$<" > "$@"
$(SRCROOT)/$(COMPONENT)/kasan_dynamic_blacklist.c: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1200, void);
void OS_NORETURN UNSUPPORTED_API(__asan_init_v5, void);
void OS_NORETURN UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
LLDBMACROS_DEST:=$(LLDBMACROS_BOOTSTRAP_DEST)/lldbmacros/
LLDBMACROS_USERDEBUG_FILES=
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
KERNEL_STATIC_DSYM_LLDBMACROS := $(OBJPATH)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros/
endif
+endif
LLDBMACROS_USERDEBUG_FILES:= \
usertaskdebugging/__init__.py \
$(eval $(call INSTALLPYTHON_RULE_template,$(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbpydir,$(DATA_UNIFDEF),$(LLDBMACROS_BOOTSTRAP_DEST)/))
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES=$(addprefix $(KERNEL_STATIC_DSYM_LLDBMACROS), $(LLDBMACROS_PYTHON_FILES))
$(eval $(call INSTALLPYTHON_RULE_template,$(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES),$(LLDBMACROS_SOURCE)%,sdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)))
$(eval $(call INSTALLPYTHON_RULE_template,$(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbsdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)/../))
endif
+endif
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS := \
$(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES) \
$(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME)
endif
+endif
lldbmacros_install: $(INSTALL_LLDBMACROS_PYTHON_FILES) $(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME) $(STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS)
$(_v)$(MKDIR) $(LLDBMACROS_DEST)/builtinkexts
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
$(_v)$(MKDIR) $(KERNEL_STATIC_DSYM_LLDBMACROS)/builtinkexts
endif
+endif