]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-7195.81.3.tar.gz macos-112 v7195.81.3
authorApple <opensource@apple.com>
Tue, 26 Jan 2021 20:33:42 +0000 (20:33 +0000)
committerApple <opensource@apple.com>
Tue, 26 Jan 2021 20:33:42 +0000 (20:33 +0000)
92 files changed:
Makefile
README.md
SETUP/config/config.h
SETUP/config/mkmakefile.c
bsd/conf/Makefile.template
bsd/conf/files
bsd/kern/kern_cs.c
bsd/kern/kern_descrip.c
bsd/kern/kern_exec.c
bsd/kern/kern_guarded.c
bsd/kern/kern_lockf.c
bsd/kern/proc_info.c
bsd/kern/sys_generic.c
bsd/kern/uipc_usrreq.c
bsd/net/content_filter.c
bsd/net/rtsock.c
bsd/netinet/flow_divert.c
bsd/netinet/tcp_input.c
bsd/netinet/tcp_usrreq.c
bsd/netinet6/icmp6.c
bsd/netinet6/ip6_id.c
bsd/netinet6/ipsec.c
bsd/netinet6/raw_ip6.c
bsd/netinet6/udp6_output.c
bsd/netinet6/udp6_usrreq.c
bsd/sys/file_internal.h
bsd/vfs/kpi_vfs.c
bsd/vfs/vfs_xattr.c
config/Makefile
config/MasterVersion
doc/pac.md [deleted file]
iokit/Kernel/IOHibernateIO.cpp
iokit/Kernel/IOPMrootDomain.cpp
iokit/bsddev/IOKitBSDInit.cpp
iokit/conf/Makefile.template
libkern/conf/Makefile.template
libsa/conf/Makefile.template
makedefs/MakeInc.cmd
makedefs/MakeInc.def
makedefs/MakeInc.kernel
makedefs/MakeInc.top
osfmk/arm/arm_init.c
osfmk/arm/machine_routines.h
osfmk/arm/machine_routines_apple.c
osfmk/arm/pmap.c
osfmk/arm/pmap.h
osfmk/arm/trustcache.c
osfmk/arm64/arm_vm_init.c
osfmk/arm64/cswitch.s
osfmk/arm64/locore.s
osfmk/arm64/machine_routines.c
osfmk/arm64/machine_routines_asm.s
osfmk/arm64/pac_asm.h [deleted file]
osfmk/arm64/pinst.s
osfmk/arm64/platform_tests.c
osfmk/arm64/proc_reg.h
osfmk/arm64/start.s
osfmk/conf/Makefile.template
osfmk/conf/files.arm64
osfmk/ipc/ipc_port.c
osfmk/ipc/ipc_right.c
osfmk/ipc/ipc_voucher.c
osfmk/ipc/mach_port.c
osfmk/kern/startup.c
osfmk/kern/task.c
osfmk/kern/thread.c
osfmk/libsa/string.h
osfmk/mach/memory_object_types.h
osfmk/vm/bsd_vm.c
osfmk/vm/device_vm.c
osfmk/vm/memory_object.c
osfmk/vm/pmap.h
osfmk/vm/vm_apple_protect.c
osfmk/vm/vm_compressor_pager.c
osfmk/vm/vm_fourk_pager.c
osfmk/vm/vm_map.c
osfmk/vm/vm_map.h
osfmk/vm/vm_pageout.c
osfmk/vm/vm_shared_region_pager.c
osfmk/vm/vm_swapfile_pager.c
osfmk/vm/vm_user.c
osfmk/x86_64/pmap.c
pexpert/conf/Makefile.template
pexpert/pexpert/arm64/H13.h
pexpert/pexpert/arm64/apple_arm64_common.h
pexpert/pexpert/arm64/apple_arm64_regs.h
san/Kasan_kasan.exports
san/Makefile
san/conf/Makefile.template
san/kasan.c
security/conf/Makefile.template
tools/lldbmacros/Makefile

index a3afec9d09dd54b2dccc3b2d85d1e964cb0eecec..8b1e30f654286e49c82164e05d236cf4554f3ceb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -31,6 +31,7 @@ export MakeInc_def=${VERSDIR}/makedefs/MakeInc.def
 export MakeInc_rule=${VERSDIR}/makedefs/MakeInc.rule
 export MakeInc_dir=${VERSDIR}/makedefs/MakeInc.dir
 
+
 #
 # Dispatch non-xnu build aliases to their own build
 # systems. All xnu variants start with MakeInc_top.
@@ -186,7 +187,7 @@ TOP_TARGETS = \
        install install_desktop install_embedded \
        install_release_embedded install_development_embedded \
        install_kernels \
-       cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \
+       cscope tags TAGS \
        help
 
 DEFAULT_TARGET = all
index be335d459af2879d41f5086df01b989482fd9f19..da6bb7bc15a8eec1644a6223709f21f3be93dbae 100644 (file)
--- a/README.md
+++ b/README.md
@@ -169,18 +169,6 @@ Set up your build environment and from the top directory, run:
     $ make cscope   # this will build cscope database
 
 
-Code Style
-==========
-
-Source files can be reformatted to comply with the xnu code style using the "restyle" make target invoked from the
-top-level project directory.
-
-   $ make restyle      # re-format all source files to be xnu code style conformant.
-
-Compliance can be checked using the "checkstyle" make target.
-
-   $ make checkstyle   # Check all relevant source files for xnu code style conformance.
-
 How to install a new header file from XNU
 =========================================
 
index 54c44128999b0a5613067614167efcc906e02987..a5f04b9dbcc6c21bc7a41a1408d42adc13bde0ac 100644 (file)
@@ -83,6 +83,7 @@ struct file_list {
  */
 #define CONFIGDEP       0x01    /* obsolete? */
 #define OPTIONSDEF      0x02    /* options definition entry */
+#define LIBRARYDEP      0x04    /* include file in library build */
 
 struct device {
        int     d_type;                 /* CONTROLLER, DEVICE, bus adaptor */
index 9614ba1952fd586e474dbece9b39f74656f41570..a17fef6097305a4a381164fc5945923013582022 100644 (file)
@@ -65,7 +65,7 @@ static char sccsid[] __attribute__((used)) = "@(#)mkmakefile.c        5.21 (Berkeley) 6
 #include "config.h"
 
 void    read_files(void);
-void    do_objs(FILE *fp, const char *msg, int ext);
+void    do_objs(FILE *fp, const char *msg, int ext, int flags);
 void    do_files(FILE *fp, const char *msg, char ext);
 void    do_machdep(FILE *ofp);
 void    do_rules(FILE *f);
@@ -243,16 +243,18 @@ makefile(void)
                continue;
 percent:
                if (eq(line, "%OBJS\n")) {
-                       do_objs(ofp, "OBJS=", -1);
+                       do_objs(ofp, "OBJS=", -1, 0);
+               } else if (eq(line, "%LIBOBJS\n")) {
+                       do_objs(ofp, "LIBOBJS=", -1, LIBRARYDEP);
                } else if (eq(line, "%CFILES\n")) {
                        do_files(ofp, "CFILES=", 'c');
-                       do_objs(ofp, "COBJS=", 'c');
+                       do_objs(ofp, "COBJS=", 'c', 0);
                } else if (eq(line, "%CXXFILES\n")) {
                        do_files(ofp, "CXXFILES=", 'p');
-                       do_objs(ofp, "CXXOBJS=", 'p');
+                       do_objs(ofp, "CXXOBJS=", 'p', 0);
                } else if (eq(line, "%SFILES\n")) {
                        do_files(ofp, "SFILES=", 's');
-                       do_objs(ofp, "SOBJS=", 's');
+                       do_objs(ofp, "SOBJS=", 's', 0);
                } else if (eq(line, "%MACHDEP\n")) {
                        do_machdep(ofp);
                } else if (eq(line, "%RULES\n")) {
@@ -287,6 +289,7 @@ read_files(void)
        const char *devorprof;
        int options;
        int not_option;
+       int for_xnu_lib;
        char pname[BUFSIZ];
        char fname[1024];
        char *rest = (char *) 0;
@@ -346,6 +349,7 @@ next:
        nreqs = 0;
        devorprof = "";
        needs = 0;
+       for_xnu_lib = 0;
        if (eq(wd, "standard")) {
                goto checkdev;
        }
@@ -371,6 +375,10 @@ nextopt:
                next_word(fp, wd);
                goto save;
        }
+       if (eq(wd, "xnu-library")) {
+               for_xnu_lib = 1;
+               goto nextopt;
+       }
        nreqs++;
        if (needs == 0 && nreqs == 1) {
                needs = ns(wd);
@@ -469,6 +477,10 @@ checkdev:
                        goto getrest;
                }
                next_word(fp, wd);
+               if (wd && eq(wd, "xnu-library")) {
+                       for_xnu_lib = 1;
+                       next_word(fp, wd);
+               }
                if (wd) {
                        devorprof = wd;
                        next_word(fp, wd);
@@ -508,6 +520,9 @@ getrest:
        if (pf && pf->f_type == INVISIBLE) {
                pf->f_flags = 1;                /* mark as duplicate */
        }
+       if (for_xnu_lib) {
+               tp->f_flags |= LIBRARYDEP;
+       }
        goto next;
 }
 
@@ -541,7 +556,7 @@ put_source_file_name(FILE *fp, struct file_list *tp)
 }
 
 void
-do_objs(FILE *fp, const char *msg, int ext)
+do_objs(FILE *fp, const char *msg, int ext, int flags)
 {
        struct file_list *tp;
        int lpos, len;
@@ -556,6 +571,13 @@ do_objs(FILE *fp, const char *msg, int ext)
                        continue;
                }
 
+               /*
+                * Check flags (if any)
+                */
+               if (flags && ((tp->f_flags & flags) != flags)) {
+                       continue;
+               }
+
                /*
                 *      Check for '.o' file in list
                 */
index 56588cf827a1376489a06be5536fcef0072c8aa2..56b97280f06dc398c9ee2d61ac6ac7ac521b3449 100644 (file)
@@ -65,6 +65,8 @@ COMP_SUBDIRS =
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -606,6 +608,12 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
 MAKESYSCALLS = $(SRCROOT)/bsd/kern/makesyscalls.sh
 
 init_sysent.c: $(TARGET)/bsd.syscalls.master
@@ -624,7 +632,11 @@ systrace_args.c: $(TARGET)/bsd.syscalls.master
        @$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$(<F)$(Color0)"
        $(_v)$(MAKESYSCALLS) $< systrace > /dev/null
 
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 6a99242b6c379eb047ee644384792441aba9112e..f151c7312ab1a5d179dc273f36f8d0d4c93c2d02 100644 (file)
@@ -494,7 +494,7 @@ bsd/kern/mach_loader.c                      standard
 bsd/kern/posix_sem.c                   standard
 bsd/kern/posix_shm.c                   standard
 # XXXdbg - I need this in the journaling and block cache code
-bsd/kern/qsort.c                               standard
+bsd/kern/qsort.c                               standard xnu-library
 bsd/kern/kpi_socket.c                  optional sockets
 bsd/kern/kpi_socketfilter.c            optional sockets
 bsd/kern/proc_info.c                   standard
index ad1eb76efdbd85e70da8de32270a256fbd90165b..c3cc2afa82f30f57d9e2b79b71a5e10d00fb454a 100644 (file)
@@ -63,6 +63,7 @@
 #include <kern/task.h>
 
 #include <vm/vm_map.h>
+#include <vm/pmap.h>
 #include <vm/vm_kern.h>
 
 
@@ -231,6 +232,18 @@ cs_allow_invalid(struct proc *p)
        if (p->p_csflags & CS_VALID) {
                p->p_csflags |= CS_DEBUGGED;
        }
+#if PMAP_CS
+       task_t procTask = proc_task(p);
+       if (procTask) {
+               vm_map_t proc_map = get_task_map_reference(procTask);
+               if (proc_map) {
+                       if (vm_map_cs_wx_enable(proc_map) != KERN_SUCCESS) {
+                               printf("CODE SIGNING: cs_allow_invalid() not allowed by pmap: pid %d\n", p->p_pid);
+                       }
+                       vm_map_deallocate(proc_map);
+               }
+       }
+#endif // MAP_CS
        proc_unlock(p);
 
        /* allow a debugged process to hide some (debug-only!) memory */
index cb5705e8e9826f371714633ff0c43be692d32315..8e7f964f6bbfd6bc4c258b54a03980fdeceea799 100644 (file)
@@ -760,7 +760,7 @@ sys_dup(proc_t p, struct dup_args *uap, int32_t *retval)
                proc_fdunlock(p);
                return error;
        }
-       if (FP_ISGUARDED(fp, GUARD_DUP)) {
+       if (fp_isguarded(fp, GUARD_DUP)) {
                error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
                (void) fp_drop(p, old, fp, 1);
                proc_fdunlock(p);
@@ -820,7 +820,7 @@ startover:
                proc_fdunlock(p);
                return error;
        }
-       if (FP_ISGUARDED(fp, GUARD_DUP)) {
+       if (fp_isguarded(fp, GUARD_DUP)) {
                error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
                (void) fp_drop(p, old, fp, 1);
                proc_fdunlock(p);
@@ -861,7 +861,7 @@ closeit:
                }
 
                if ((nfp = fdp->fd_ofiles[new]) != NULL) {
-                       if (FP_ISGUARDED(nfp, GUARD_CLOSE)) {
+                       if (fp_isguarded(nfp, GUARD_CLOSE)) {
                                fp_drop(p, old, fp, 1);
                                error = fp_guard_exception(p,
                                    new, nfp, kGUARD_EXC_CLOSE);
@@ -1047,7 +1047,7 @@ sys_fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
        switch (uap->cmd) {
        case F_DUPFD:
        case F_DUPFD_CLOEXEC:
-               if (FP_ISGUARDED(fp, GUARD_DUP)) {
+               if (fp_isguarded(fp, GUARD_DUP)) {
                        error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
                        goto out;
                }
@@ -1075,7 +1075,7 @@ sys_fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
                if (uap->arg & FD_CLOEXEC) {
                        *pop |= UF_EXCLOSE;
                } else {
-                       if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+                       if (fp_isguarded(fp, 0)) {
                                error = fp_guard_exception(p,
                                    fd, fp, kGUARD_EXC_NOCLOEXEC);
                                goto out;
@@ -3332,7 +3332,7 @@ close_nocancel(proc_t p, int fd)
                return EBADF;
        }
 
-       if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+       if (fp_isguarded(fp, GUARD_CLOSE)) {
                int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
                proc_fdunlock(p);
                return error;
@@ -5290,7 +5290,7 @@ sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap,
                goto out_unlock;
        }
 
-       if (FP_ISGUARDED(fp, GUARD_FILEPORT)) {
+       if (fp_isguarded(fp, GUARD_FILEPORT)) {
                err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
                goto out_unlock;
        }
@@ -5517,7 +5517,7 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error)
         */
        switch (error) {
        case ENODEV:
-               if (FP_ISGUARDED(wfp, GUARD_DUP)) {
+               if (fp_isguarded(wfp, GUARD_DUP)) {
                        proc_fdunlock(p);
                        return EPERM;
                }
index 0385bf1b48cbb46ebc556efc2cbee4a5d6cbd521..e8a1e25e037127ac3381e9ae9eb6e32983b9fa96 100644 (file)
@@ -2463,7 +2463,7 @@ exec_handle_file_actions(struct image_params *imgp, short psa_flags)
                        proc_fdlock(p);
                        if ((fp = fp_get_noref_locked(p, psfa->psfaa_filedes)) == NULL) {
                                error = EBADF;
-                       } else if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+                       } else if (fp_isguarded(fp, 0)) {
                                error = fp_guard_exception(p, psfa->psfaa_filedes,
                                    fp, kGUARD_EXC_NOCLOEXEC);
                        } else {
index 5e10308d84fa8523338d4d5806b08dee2983db3a..6827a927fad9b69e3fdc6032e7d5cd21fa946c0b 100644 (file)
@@ -56,7 +56,6 @@
 #include <sys/reason.h>
 #endif
 
-
 #define f_flag fp_glob->fg_flag
 extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
     user_addr_t bufp, user_size_t nbyte, off_t offset,
@@ -86,17 +85,25 @@ kern_return_t task_violated_guard(mach_exception_code_t, mach_exception_subcode_
 
 struct guarded_fileproc {
        struct fileproc gf_fileproc;
-       u_int           gf_magic;
        u_int           gf_attrs;
        guardid_t       gf_guard;
 };
 
-const size_t sizeof_guarded_fileproc = sizeof(struct guarded_fileproc);
+ZONE_DECLARE(gfp_zone, "guarded_fileproc",
+    sizeof(struct guarded_fileproc),
+    ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
 
-#define FP_TO_GFP(fp)   ((struct guarded_fileproc *)(fp))
-#define GFP_TO_FP(gfp)  (&(gfp)->gf_fileproc)
+static inline struct guarded_fileproc *
+FP_TO_GFP(struct fileproc *fp)
+{
+       struct guarded_fileproc *gfp =
+           __container_of(fp, struct guarded_fileproc, gf_fileproc);
+
+       zone_require(gfp_zone, gfp);
+       return gfp;
+}
 
-#define GUARDED_FILEPROC_MAGIC  0x29083
+#define GFP_TO_FP(gfp)  (&(gfp)->gf_fileproc)
 
 struct gfp_crarg {
        guardid_t gca_guard;
@@ -109,17 +116,12 @@ guarded_fileproc_alloc_init(void *crarg)
        struct gfp_crarg *aarg = crarg;
        struct guarded_fileproc *gfp;
 
-       if ((gfp = kalloc(sizeof(*gfp))) == NULL) {
-               return NULL;
-       }
-
-       bzero(gfp, sizeof(*gfp));
+       gfp = zalloc_flags(gfp_zone, Z_WAITOK | Z_ZERO);
 
        struct fileproc *fp = &gfp->gf_fileproc;
        os_ref_init(&fp->fp_iocount, &f_refgrp);
        fp->fp_flags = FTYPE_GUARDED;
 
-       gfp->gf_magic = GUARDED_FILEPROC_MAGIC;
        gfp->gf_guard = aarg->gca_guard;
        gfp->gf_attrs = aarg->gca_attrs;
 
@@ -130,13 +132,7 @@ void
 guarded_fileproc_free(struct fileproc *fp)
 {
        struct guarded_fileproc *gfp = FP_TO_GFP(fp);
-
-       if (FILEPROC_TYPE(fp) != FTYPE_GUARDED ||
-           GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
-               panic("%s: corrupt fp %p flags %x", __func__, fp, fp->fp_flags);
-       }
-
-       kfree(gfp, sizeof(*gfp));
+       zfree(gfp_zone, gfp);
 }
 
 static int
@@ -155,10 +151,6 @@ fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
        }
        struct guarded_fileproc *gfp = FP_TO_GFP(fp);
 
-       if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
-               panic("%s: corrupt fp %p", __func__, fp);
-       }
-
        if (guard != gfp->gf_guard) {
                (void) fp_drop(p, fd, fp, locked);
                return EPERM; /* *not* a mismatch exception */
@@ -172,24 +164,20 @@ fp_lookup_guarded(proc_t p, int fd, guardid_t guard,
 /*
  * Expected use pattern:
  *
- * if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ * if (fp_isguarded(fp, GUARD_CLOSE)) {
  *      error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
  *      proc_fdunlock(p);
  *      return error;
  * }
+ *
+ * Passing `0` to `attrs` returns whether the fp is guarded at all.
  */
 
 int
 fp_isguarded(struct fileproc *fp, u_int attrs)
 {
        if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
-               struct guarded_fileproc *gfp = FP_TO_GFP(fp);
-
-               if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
-                       panic("%s: corrupt gfp %p flags %x",
-                           __func__, gfp, fp->fp_flags);
-               }
-               return (attrs & gfp->gf_attrs) == attrs;
+               return (attrs & FP_TO_GFP(fp)->gf_attrs) == attrs;
        }
        return 0;
 }
@@ -581,11 +569,6 @@ restart:
                         */
                        struct guarded_fileproc *gfp = FP_TO_GFP(fp);
 
-                       if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
-                               panic("%s: corrupt gfp %p flags %x",
-                                   __func__, gfp, fp->fp_flags);
-                       }
-
                        if (oldg == gfp->gf_guard &&
                            uap->guardflags == gfp->gf_attrs) {
                                /*
@@ -674,11 +657,6 @@ restart:
                                goto dropout;
                        }
 
-                       if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
-                               panic("%s: corrupt gfp %p flags %x",
-                                   __func__, gfp, fp->fp_flags);
-                       }
-
                        if (oldg != gfp->gf_guard ||
                            uap->guardflags != gfp->gf_attrs) {
                                error = EPERM;
index 782346ed20ae8701523e1f071b5d1786b89ad240..03f86bef2ab9b6e6bfec2f5ccedca0832fa46af6 100644 (file)
@@ -506,7 +506,7 @@ lf_setlock(struct lockf *lock, struct timespec *timeout)
 {
        struct lockf *block;
        struct lockf **head = lock->lf_head;
-       struct lockf **prev, *overlap, *ltmp;
+       struct lockf **prev, *overlap;
        static const char lockstr[] = "lockf";
        int priority, needtolink, error;
        struct vnode *vp = lock->lf_vnode;
@@ -851,6 +851,7 @@ scan:
                                lf_wakelock(overlap, TRUE);
                        }
                        overlap->lf_type = lock->lf_type;
+                       lf_move_blocked(overlap, lock);
                        FREE(lock, M_LOCKF);
                        lock = overlap; /* for lf_coalesce_adjacent() */
                        break;
@@ -860,6 +861,7 @@ scan:
                         * Check for common starting point and different types.
                         */
                        if (overlap->lf_type == lock->lf_type) {
+                               lf_move_blocked(overlap, lock);
                                FREE(lock, M_LOCKF);
                                lock = overlap; /* for lf_coalesce_adjacent() */
                                break;
@@ -891,14 +893,7 @@ scan:
                            overlap->lf_type == F_WRLCK) {
                                lf_wakelock(overlap, TRUE);
                        } else {
-                               while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
-                                       ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
-                                       TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
-                                           lf_block);
-                                       TAILQ_INSERT_TAIL(&lock->lf_blkhd,
-                                           ltmp, lf_block);
-                                       ltmp->lf_next = lock;
-                               }
+                               lf_move_blocked(lock, overlap);
                        }
                        /*
                         * Add the new lock if necessary and delete the overlap.
index b34ca8058d901b5d96fe23d52d0a9c8136108bf2..accfd3f2d2ef628ca846885e2389ee399edb4217 100644 (file)
@@ -2477,7 +2477,7 @@ fill_fileinfo(struct fileproc * fp, proc_t proc, int fd, struct proc_fileinfo *
                        fproc->fi_status |= PROC_FP_CLFORK;
                }
        }
-       if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+       if (fp_isguarded(fp, 0)) {
                fproc->fi_status |= PROC_FP_GUARDED;
                fproc->fi_guardflags = 0;
                if (fp_isguarded(fp, GUARD_CLOSE)) {
index 25b46a6e1fad51221a5e9cfe34a57add46428760..77b32e3d47405bce68a3c2cd6c1ffd5fd7562aaf 100644 (file)
@@ -502,7 +502,7 @@ write_nocancel(struct proc *p, struct write_nocancel_args *uap, user_ssize_t *re
        }
        if ((fp->f_flag & FWRITE) == 0) {
                error = EBADF;
-       } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+       } else if (fp_isguarded(fp, GUARD_WRITE)) {
                proc_fdlock(p);
                error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
                proc_fdunlock(p);
@@ -552,7 +552,7 @@ pwrite_nocancel(struct proc *p, struct pwrite_nocancel_args *uap, user_ssize_t *
 
        if ((fp->f_flag & FWRITE) == 0) {
                error = EBADF;
-       } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+       } else if (fp_isguarded(fp, GUARD_WRITE)) {
                proc_fdlock(p);
                error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
                proc_fdunlock(p);
@@ -670,7 +670,7 @@ preparefilewrite(struct proc *p, struct fileproc **fp_ret, int fd, int check_for
                error = EBADF;
                goto ExitThisRoutine;
        }
-       if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+       if (fp_isguarded(fp, GUARD_WRITE)) {
                error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
                goto ExitThisRoutine;
        }
index 2bffce23111083168a3293c6200dfbd1f4e798ae..c363f4cf391473e9932acb2cbf3dbd147e7942ad 100644 (file)
@@ -2295,7 +2295,7 @@ unp_internalize(struct mbuf *control, proc_t p)
                } else if (!fg_sendable(tmpfp->fp_glob)) {
                        proc_fdunlock(p);
                        return EINVAL;
-               } else if (FP_ISGUARDED(tmpfp, GUARD_SOCKET_IPC)) {
+               } else if (fp_isguarded(tmpfp, GUARD_SOCKET_IPC)) {
                        error = fp_guard_exception(p,
                            fds[i], tmpfp, kGUARD_EXC_SOCKET_IPC);
                        proc_fdunlock(p);
index ee3a5b63062c4dff4e26a495f75398d0ff086598..7617e7757fac51fda31126a25d37c3ba1393f49d 100644 (file)
@@ -7384,7 +7384,7 @@ done:
 
                cfc->cf_flags |= CFF_FLOW_CONTROLLED;
 
-               cfil_rw_unlock_exclusive(&cfil_lck_rw);
+               cfil_rw_lock_exclusive_to_shared(&cfil_lck_rw);
        } else if (error != 0) {
                OSIncrementAtomic(&cfil_stats.cfs_stats_event_fail);
        }
index b676b9a13482ec40b666ceca7d37612120834bfb..992d0877e96087a5047fa4b48b34ae2af62746c4 100644 (file)
@@ -1096,6 +1096,9 @@ rt_xaddrs(caddr_t cp, caddr_t cplim, struct rt_addrinfo *rtinfo)
                        rtinfo->rti_info[i] = &sa_zero;
                        return 0; /* should be EINVAL but for compat */
                }
+               if (sa->sa_len < offsetof(struct sockaddr, sa_data)) {
+                       return EINVAL;
+               }
                /* accept it */
                rtinfo->rti_info[i] = sa;
                ADVANCE32(cp, sa);
index 6a68dac812447608f6be73f07f1086dfad4df7d5..e278115c2717d0ca1b6e5177913be27b748e5471 100644 (file)
@@ -3383,6 +3383,17 @@ done:
 errno_t
 flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p)
 {
+#if CONTENT_FILTER
+       if (SOCK_TYPE(so) == SOCK_STREAM && !(so->so_flags & SOF_CONTENT_FILTER)) {
+               int error = cfil_sock_attach(so, NULL, to, CFS_CONNECTION_DIR_OUT);
+               if (error != 0) {
+                       struct flow_divert_pcb  *fd_cb  = so->so_fd_pcb;
+                       FDLOG(LOG_ERR, fd_cb, "Failed to attach cfil: %d", error);
+                       return error;
+               }
+       }
+#endif /* CONTENT_FILTER */
+
        return flow_divert_connect_out_internal(so, to, p, false);
 }
 
index 078745a2408645bbdc06256189320644aafd2d5a..bb33ba1acf0b41b2e40bf929d6fa70eb48ce1a38 100644 (file)
@@ -3273,7 +3273,7 @@ findpcb:
                    inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
                        inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
                        inp->inp_flow |=
-                           (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+                           (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
                }
 
                /* reset the incomp processing flag */
index 2799b8fea9d5ea6672e4613df893f016f11dcf22..fca0d56a6d27855550e7b22fb2602f2c13f3bd4b 100644 (file)
@@ -1527,7 +1527,7 @@ tcp6_connect(struct tcpcb *tp, struct sockaddr *nam, struct proc *p)
        if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
                inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
                inp->inp_flow |=
-                   (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+                   (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
        }
 
        tcp_set_max_rwinscale(tp, so);
index 9c9fd07c89f37db1519600ccd97a9e2f2cf72a8e..08954c8ef688dc342ffa93a72b70748ec1b8ebb0 100644 (file)
@@ -986,6 +986,7 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code)
        }
 #endif
        eip6 = (struct ip6_hdr *)(icmp6 + 1);
+       bzero(&icmp6dst, sizeof(icmp6dst));
 
        /* Detect the upper level protocol */
        {
@@ -994,7 +995,6 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code)
                int eoff = off + sizeof(struct icmp6_hdr) +
                    sizeof(struct ip6_hdr);
                struct ip6ctlparam ip6cp;
-               struct in6_addr *finaldst = NULL;
                int icmp6type = icmp6->icmp6_type;
                struct ip6_frag *fh;
                struct ip6_rthdr *rth;
@@ -1080,7 +1080,7 @@ icmp6_notify_error(struct mbuf *m, int off, int icmp6len, int code)
                                        /* just ignore a bogus header */
                                        if ((rth0->ip6r0_len % 2) == 0 &&
                                            (hops = rth0->ip6r0_len / 2)) {
-                                               finaldst = (struct in6_addr *)(void *)(rth0 + 1) + (hops - 1);
+                                               icmp6dst.sin6_addr = *((struct in6_addr *)(void *)(rth0 + 1) + (hops - 1));
                                        }
                                }
                                eoff += rthlen;
@@ -1148,13 +1148,10 @@ notify:
                 */
                eip6 = (struct ip6_hdr *)(icmp6 + 1);
 
-               bzero(&icmp6dst, sizeof(icmp6dst));
                icmp6dst.sin6_len = sizeof(struct sockaddr_in6);
                icmp6dst.sin6_family = AF_INET6;
-               if (finaldst == NULL) {
+               if (IN6_IS_ADDR_UNSPECIFIED(&icmp6dst.sin6_addr)) {
                        icmp6dst.sin6_addr = eip6->ip6_dst;
-               } else {
-                       icmp6dst.sin6_addr = *finaldst;
                }
                if (in6_setscope(&icmp6dst.sin6_addr, m->m_pkthdr.rcvif, NULL)) {
                        goto freeit;
@@ -1169,14 +1166,11 @@ notify:
                icmp6src.sin6_flowinfo =
                    (eip6->ip6_flow & IPV6_FLOWLABEL_MASK);
 
-               if (finaldst == NULL) {
-                       finaldst = &eip6->ip6_dst;
-               }
                ip6cp.ip6c_m = m;
                ip6cp.ip6c_icmp6 = icmp6;
                ip6cp.ip6c_ip6 = (struct ip6_hdr *)(icmp6 + 1);
                ip6cp.ip6c_off = eoff;
-               ip6cp.ip6c_finaldst = finaldst;
+               ip6cp.ip6c_finaldst = &icmp6dst.sin6_addr;
                ip6cp.ip6c_src = &icmp6src;
                ip6cp.ip6c_nxt = nxt;
 
index ff6b49af311de6c6543c62bef6067f90eba2f3ae..24780a44637018418cb1f2a1ae69bed0c49cc09a 100644 (file)
@@ -168,26 +168,6 @@ static struct randomtab randomtab_32 = {
        .ru_reseed = 0
 };
 
-static struct randomtab randomtab_20 = {
-       .ru_bits = 20,                  /* resulting bits */
-       .ru_out = 180,                  /* Time after wich will be reseeded */
-       .ru_max = 200000,                       /* Uniq cycle, avoid blackjack prediction */
-       .ru_gen = 2,                    /* Starting generator */
-       .ru_n = 524269,                 /* RU_N-1 = 2^2*3^2*14563 */
-       .ru_agen = 7,                   /* determine ru_a as RU_AGEN^(2*rand) */
-       .ru_m = 279936,                 /* RU_M = 2^7*3^7 - don't change */
-       .pfacts = { 2, 3, 14563, 0 },   /* factors of ru_n */
-       .ru_counter = 0,
-       .ru_msb = 0,
-       .ru_x = 0,
-       .ru_seed = 0,
-       .ru_seed2 = 0,
-       .ru_a = 0,
-       .ru_b = 0,
-       .ru_g = 0,
-       .ru_reseed = 0
-};
-
 static u_int32_t pmod(u_int32_t, u_int32_t, u_int32_t);
 static void initid(struct randomtab *);
 static u_int32_t randomid(struct randomtab *);
@@ -311,5 +291,5 @@ ip6_randomid(void)
 u_int32_t
 ip6_randomflowlabel(void)
 {
-       return randomid(&randomtab_20) & 0xfffff;
+       return RandomULong() & IPV6_FLOWLABEL_MASK;
 }
index 061e6f45cdf2f15b2ad13a8ab25422d8d331c088..48affe33aa30721c30c61d93616c28f1fe84bb53 100644 (file)
@@ -2519,7 +2519,7 @@ ipsec64_encapsulate(struct mbuf *m, struct secasvar *sav)
 
        /* construct new IPv4 header. see RFC 2401 5.1.2.1 */
        /* ECN consideration. */
-       ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
+       ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
 
        if (plen + sizeof(struct ip) < IP_MAXPACKET) {
                ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
@@ -2784,7 +2784,7 @@ ipsec46_encapsulate(struct ipsec_output_state *state, struct secasvar *sav)
 
        /* construct new IPv6 header. see RFC 2401 5.1.2.2 */
        /* ECN consideration. */
-       ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
+       ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
        if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
                ip6->ip6_plen = htons((u_int16_t)plen);
        } else {
index fc705f4f0d84fda280d3d40c9837d0a206c9f0fa..dcabe4257e7cf8eccc3117060ed5ac5d4294b741 100644 (file)
@@ -531,7 +531,7 @@ rip6_output(
        if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) {
                in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK;
                in6p->inp_flow |=
-                   (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+                   (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
        }
 
        M_PREPEND(m, sizeof(*ip6), M_WAIT, 1);
index 8b8c031cd8dcc8c19e47befb227be97ae831a318..8a6ea15ff0b9ac339be2195451b5119f16f8fbf4 100644 (file)
@@ -372,7 +372,7 @@ udp6_output(struct in6pcb *in6p, struct mbuf *m, struct sockaddr *addr6,
        if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) {
                in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK;
                in6p->inp_flow |=
-                   (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+                   (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
        }
 
        if (af == AF_INET) {
index 3f589dae02f84f70e6534d13804cdf3afb195c8d..f5b92e94b882358723b022353fcd925251e3b1ed 100644 (file)
@@ -927,7 +927,7 @@ do_flow_divert:
                    inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
                        inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
                        inp->inp_flow |=
-                           (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+                           (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
                }
        }
        return error;
index 1bebabf824438203d963e899895db66ba5e9cbfe..95f791a0f6e168de5da24bd558f2bc90f5e2166e 100644 (file)
@@ -123,9 +123,6 @@ struct fileproc {
 
 #define FILEPROC_TYPE(fp)       ((fp)->fp_flags & FP_TYPEMASK)
 
-#define FP_ISGUARDED(fp, attribs)  \
-               ((FILEPROC_TYPE(fp) == FTYPE_GUARDED) ? fp_isguarded(fp, attribs) : 0)
-
 typedef enum {
        FTYPE_SIMPLE    = 0,
        FTYPE_GUARDED   = (1 << _FP_TYPESHIFT)
index f4ef76113f593be558e8cc5a3ab76ac088ad9b8f..0b4f7a6285050c10b7993e43c3008ceb7f16805d 100644 (file)
@@ -4990,7 +4990,8 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
        struct nameidata nd;
        char smallname[64];
        char *filename = NULL;
-       size_t len;
+       size_t alloc_len;
+       size_t copy_len;
 
        if ((dvp == NULLVP) ||
            (basename == NULL) || (basename[0] == '\0') ||
@@ -4998,11 +4999,11 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
                return;
        }
        filename = &smallname[0];
-       len = snprintf(filename, sizeof(smallname), "._%s", basename);
-       if (len >= sizeof(smallname)) {
-               len++;  /* snprintf result doesn't include '\0' */
-               filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
-               len = snprintf(filename, len, "._%s", basename);
+       alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
+       if (alloc_len >= sizeof(smallname)) {
+               alloc_len++;  /* snprintf result doesn't include '\0' */
+               filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
+               copy_len = snprintf(filename, alloc_len, "._%s", basename);
        }
        NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
            CAST_USER_ADDR_T(filename), ctx);
@@ -5028,7 +5029,7 @@ xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
        vnode_put(xvp);
 out2:
        if (filename && filename != &smallname[0]) {
-               kheap_free(KHEAP_TEMP, filename, len);
+               kheap_free(KHEAP_TEMP, filename, alloc_len);
        }
 }
 #endif /* CONFIG_APPLEDOUBLE */
@@ -5436,7 +5437,8 @@ VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags,
                        _err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
                }
                DTRACE_FSINFO(advlock, vnode_t, vp);
-               if (op == F_UNLCK && flags == F_FLOCK) {
+               if (op == F_UNLCK &&
+                   (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
                        post_event_if_success(vp, _err, NOTE_FUNLOCK);
                }
        }
index 00ee6c94c277a748bb5f6f95ce459d3be856d391..0dfcf949f0f92ac2384c2d2efe4399082e1afc1c 100644 (file)
@@ -2463,7 +2463,8 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context)
        char smallname[64];
        char *filename = NULL;
        const char *basename = NULL;
-       size_t len;
+       size_t alloc_len;
+       size_t copy_len;
        errno_t error;
        int opened = 0;
        int referenced = 0;
@@ -2493,11 +2494,11 @@ open_xattrfile(vnode_t vp, int fileflags, vnode_t *xvpp, vfs_context_t context)
                goto out;
        }
        filename = &smallname[0];
-       len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename);
-       if (len >= sizeof(smallname)) {
-               len++;  /* snprintf result doesn't include '\0' */
-               filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
-               len = snprintf(filename, len, "%s%s", ATTR_FILE_PREFIX, basename);
+       alloc_len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename);
+       if (alloc_len >= sizeof(smallname)) {
+               alloc_len++;  /* snprintf result doesn't include '\0' */
+               filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
+               copy_len = snprintf(filename, alloc_len, "%s%s", ATTR_FILE_PREFIX, basename);
        }
        /*
         * Note that the lookup here does not authorize.  Since we are looking
@@ -2687,7 +2688,7 @@ out:
                vnode_putname(basename);
        }
        if (filename && filename != &smallname[0]) {
-               kheap_free(KHEAP_TEMP, filename, len);
+               kheap_free(KHEAP_TEMP, filename, alloc_len);
        }
 
        *xvpp = xvp;  /* return a referenced vnode */
index e5a5fb5c9bccd60f6ba01c85f0a8a02c1293f35e..5e583ae3b90e0df7b663351d184b1c2a351475c4 100644 (file)
@@ -85,6 +85,7 @@ $(OBJPATH)/symbolsets.plist: $(SYMBOL_SET_PLIST_BUILD)
        $(_v)$(SOURCE)/generate_combined_symbolsets_plist.sh $@ $^ $(_vstdout)
        $(_v)$(PLUTIL) -convert binary1 -s $@
 
+ifneq ($(RC_ProjectName),xnu_libraries)
 $(OBJPATH)/allsymbols: $(OBJPATH)/$(KERNEL_FILE_NAME)
        $(_v)$(NM) -gj $< | sort -u > $@
 
@@ -119,6 +120,9 @@ build_symbol_sets: check_all_exports $(SYMBOL_SET_BUILD) $(OBJPATH)/allsymbols \
                        $(OBJPATH)/$(MI_SUPPORTED_KPI_FILENAME)
 
 do_config_all::        build_symbol_sets
+else
+# We are building XNU as a static library - avoid creating symbol sets
+endif
 
 # There's no simple static pattern rule for these paths, so hardcode dependencies in the command list
 $(SYMROOT_INSTALL_KEXT_MACHO_FILES): ALWAYS
@@ -155,6 +159,7 @@ $(DSTROOT)/$(KRESDIR)/$(MD_SUPPORTED_KPI_FILENAME) $(DSTROOT)/$(KRESDIR)/$(MI_SU
        $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
 
 ifneq ($(INSTALL_KASAN_ONLY),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
 do_config_install::    $(SYMROOT_INSTALL_KEXT_MACHO_FILES) \
                                $(SYMROOT_INSTALL_KEXT_PLISTS) \
                                $(DSTROOT_INSTALL_KEXT_MACHO_FILES) \
@@ -162,6 +167,7 @@ do_config_install:: $(SYMROOT_INSTALL_KEXT_MACHO_FILES) \
                                $(DSTROOT)/$(KRESDIR)/$(MD_SUPPORTED_KPI_FILENAME) \
                                $(DSTROOT)/$(KRESDIR)/$(MI_SUPPORTED_KPI_FILENAME)
 endif
+endif
 
 $(OBJPATH)/all-kpi.exp: $(EXPORTS_FILES)
        $(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ $(Kasan_EXPORTS)
index 3f2bda89b984e00970255b64da46f6318b169a20..7da98bbff60f2f5d2411c4bbabfad07ac7a14d15 100644 (file)
@@ -1,4 +1,4 @@
-20.2.0
+20.3.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
diff --git a/doc/pac.md b/doc/pac.md
deleted file mode 100644 (file)
index 09bd890..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-ARMv8.3 Pointer Authentication in xnu
-=====================================
-
-Introduction
-------------
-
-This document describes xnu's use of the ARMv8.3-PAuth extension. Specifically,
-xnu uses ARMv8.3-PAuth to protect against Return-Oriented-Programming (ROP)
-and Jump-Oriented-Programming (JOP) attacks, which attempt to gain control flow
-over a victim program by overwriting return addresses or function pointers
-stored in memory.
-
-It is assumed the reader is already familar with the basic concepts behind
-ARMv8.3-PAuth and what its instructions do.  The "ARMv8.3-A Pointer
-Authentication" section of Google Project Zero's ["Examining Pointer
-Authentication on the iPhone
-XS"](https://googleprojectzero.blogspot.com/2019/02/examining-pointer-authentication-on.html)
-provides a good introduction to ARMv8.3-PAuth. The reader may find more
-comprehensive background material in:
-
-* The "Pointer authentication in AArch64 state" section of the [ARMv8
-  ARM](https://developer.arm.com/docs/ddi0487/latest/arm-architecture-reference-manual-armv8-for-armv8-a-architecture-profile)
-  describes the new instructions and registers associated with ARMv8.3-PAuth.
-
-* [LLVM's Pointer Authentication
-  documentation](https://github.com/apple/llvm-project/blob/apple/master/clang/docs/PointerAuthentication.rst)
-  outlines how clang uses ARMv8.3-PAuth instructions to harden key C, C++,
-  Swift, and Objective-C language constructs.
-
-### Threat model
-
-Pointer authentication's threat model assumes that an attacker has found a gadget
-to read and write arbitrary memory belonging to a victim process, which may
-include the kernel. The attacker does *not* have the ability to execute
-arbitrary code in that process's context.  Pointer authentication aims to
-prevent the attacker from gaining control flow over the victim process by
-overwriting sensitive pointers in its address space (e.g., return addresses
-stored on the stack).
-
-Following this threat model, xnu takes a two-pronged approach to prevent the
-attacker from gaining control flow over the victim process:
-
-1. Both xnu and first-party binaries are built with LLVM's `-arch arm64e` flag,
-   which generates pointer-signing and authentication instructions to protect
-   addresses stored in memory (including ones pushed to the stack).  This
-   process is generally transparent to xnu, with exceptions discussed below.
-
-2. On exception entry, xnu hashes critical register state before it is spilled
-   to memory.  On exception return, the reloaded state is validated against this
-   hash.
-
-The ["xnu PAC infrastructure"](#xnu-pac-infrastructure) section discusses how
-these hardening techniques are implemented in xnu in more detail.
-
-
-Key generation on Apple CPUs
-----------------------------
-
-ARMv8.3-PAuth implementations may use an <span style="font-variant:
-small-caps">implementation defined</span> cipher.  Apple CPUs implement an
-optional custom cipher with two key-generation changes relevant to xnu.
-
-
-### Per-boot diversifier
-
-Apple's optional cipher adds a per-boot diversifier.  In effect, even if xnu
-initializes the "ARM key" registers (`APIAKey`, `APGAKey`, etc.) with constants,
-signing a given value will still produce different signatures from boot to boot.
-
-
-### Kernel/userspace diversifier
-
-Apple CPUs also contain a second diversifier known as `KERNKey`.  `KERNKey` is
-automatically mixed into the final signing key (or not) based on the CPU's
-exception level. When xnu needs to sign or authenticate userspace-signed
-pointers, it uses the `ml_enable_user_jop_key` and `ml_disable_user_jop_key`
-routines to manually enable or disable `KERNKey`. `KERNKey` allows the CPU to
-effectively use different signing keys for userspace and kernel, without needing
-to explicitly reprogram the generic ARM keys on every kernel entry and exit.
-
-
-xnu PAC infrastructure
-----------------------
-
-For historical reasons, the xnu codebase collectively refers to xnu + iOS's
-pointer authentication infrastructure as Pointer Authentication Codes (PAC). The
-remainder of this document will follow this terminology for consistency with
-xnu.
-
-### arm64e binary "slice"
-
-Binaries with PAC instructions are not fully backwards-compatible with non-PAC
-CPUs. Hence LLVM/iOS treat PAC-enabled binaries as a distinct ABI "slice" named
-arm64e. xnu enforces this distinction by disabling the PAC keys when returning
-to non-arm64e userspace, effectively turning ARMv8.3-PAuth auth and sign
-instructions into no-ops (see the ["SCTLR_EL1"](#sctlr-el1) heading below for
-more details).
-
-### Kernel pointer signing
-
-xnu is built with `-arch arm64e`, which causes LLVM to automatically sign and
-authenticate function pointers and return addresses spilled onto the stack. This
-process is largely transparent to software, with some exceptions:
-
-- During early boot, xnu rebases and signs the pointers stored in its own
-  `__thread_starts` section (see `rebase_threaded_starts` in
-  `osfmk/arm/arm_init.c`).
-
-- As parts of the userspace shared region are paged in, the page-in handler must
-  also slide and re-sign any signed pointers stored in it.  The ["Signed
-  pointers in shared regions"](#signed-pointers-in-shared-regions) section
-  discusses this in further detail.
-
-- Assembly routines must manually sign the return address with `pacibsp` before
-  pushing it onto the stack, and use an authenticating `retab` instruction in
-  place of `ret`.  xnu provides assembly macros `ARM64_STACK_PROLOG` and
-  `ARM64_STACK_EPILOG` which emit the appropriate instructions for both arm64
-  and arm64e targets.
-
-  Likewise, branches in assembly to signed C function pointers must use the
-  authenticating `blraa` instruction in place of `blr`.
-
-- Signed pointers must be stripped with `ptrauth_strip` before they can be
-  compared against compile-time constants like `VM_MIN_KERNEL_ADDRESS`.
-
-### Testing data pointer signing
-
-xnu contains tests for each manually qualified data pointer that should be
-updated as new pointers are qualified. The tests allocate a structure
-containing a __ptrauth qualified member, and write a pointer to that member.
-We can then compare the stored value, which should be signed, with a manually
-constructed signature. See `ALLOC_VALIDATE_DATA_PTR`.
-
-Tests are triggered by setting the `kern.run_ptrauth_data_tests` sysctl. The
-sysctl is implemented, and BSD structures are tested, in `bsd/tests/ptrauth_data_tests_sysctl.c`.
-Mach structures are tested in `osfmk/tests/ptrauth_data_tests.c`.
-
-### Managing PAC register state
-
-xnu generally tries to avoid reprogramming the CPU's PAC-related registers on
-kernel entry and exit, since this could add significant overhead to a hot
-codepath. Instead, xnu uses the following strategies to manage the PAC register
-state.
-
-#### A keys
-
-Userspace processes' A keys (`AP{IA,DA,GA}Key`) are derived from the field
-`jop_pid` inside `struct task`.  For implementation reasons, an exact duplicate
-of this field is cached in the corresponding `struct machine_thread`.
-
-
-A keys are randomly generated at shared region initialization time (see ["Signed
-pointers in shared regions"](#signed-pointers-in-shared-regions) below) and
-copied into `jop_pid` during process activation.  This shared region, and hence
-associated A keys, may be shared among arm64e processes under specific
-circumstances:
-
-1. "System processes" (i.e., processes launched from first-party signed binaries
-   on the iOS system image) generally use a common shared region with a default
-   `jop_pid` value, separate from non-system processes.
-
-   If a system process wishes to isolate its A keys even from other system
-   processes, it may opt into a custom shared region using an entitlement in
-   the form `com.apple.pac.shared_region_id=[...]`.  That is, two processes with
-   the entitlement `com.apple.pac.shared_region_id=foo` would share A keys and
-   shared regions with each other, but not with other system processes.
-
-2. Other arm64e processes automatically use the same shared region/A keys if
-   their respective binaries are signed with the same team-identifier strings.
-
-3. `posix_spawnattr_set_ptrauth_task_port_np()` allows explicit "inheriting" of
-   A keys during `posix_spawn()`, using a supplied mach task port.  This API is
-   intended to support debugging tools that may need to auth or sign pointers
-   using the target process's keys.
-
-#### B keys
-
-Each process is assigned a random set of "B keys" (`AP{IB,DB}Key`) on process
-creation.  As a special exception, processes which inherit their parents' memory
-address space (e.g., during `fork`) will also inherit their parents' B keys.
-These keys are stored as the field `rop_pid` inside `struct task`, with an exact
-duplicate in `struct machine_thread` for implementation reasons.
-
-xnu reprograms the ARM B-key registers during context switch, via the macro
-`set_process_dependent_keys_and_sync_context` in `cswitch.s`.
-
-xnu uses the B keys internally to sign pointers pushed onto the kernel stack,
-such as stashed LR values.  Note that xnu does *not* need to explicitly switch
-to a dedicated set of "kernel B keys" to do this:
-
-1. The `KERNKey` diversifier already ensures that the actual signing keys are
-   different between xnu and userspace.
-
-2. Although reprogramming the ARM B-key registers will affect xnu's signing keys
-   as well, pointers pushed onto the stack are inherently short-lived.
-   Specifically, there will never be a situation where a stack pointer value is
-   signed with one `current_task()`, but needs to be authed under a different
-   active `current_task()`.
-
-#### SCTLR_EL1
-
-As discussed above, xnu disables the ARM keys when returning to non-arm64e
-userspace processes.  This is implemented by manipulating the `EnIA`, `EnIB`,
-and `EnDA`, and `EnDB` bits in the ARM `SCTLR_EL1` system register.  When
-these bits are cleared, auth or sign instruction using the respective keys
-will simply pass through their inputs unmodified.
-
-Initially, xnu cleared these bits during every `exception_return` to a
-non-arm64e process.  Since xnu itself uses these keys, the exception vector
-needs to restore the same bits on every exception entry (implemented in the
-`EL0_64_VECTOR` macro).
-
-Apple A13 CPUs now have controls that allow xnu to keep the PAC keys enabled at
-EL1, independent of `SCTLR_EL1` settings.  On these CPUs, xnu only needs to
-reconfigure `SCTLR_EL1` when context-switching from a "vanilla" arm64 process to
-an arm64e process, or vice-versa (`pmap_switch_user_ttb_internal`).
-
-### Signed pointers in shared regions
-
-Each userspace process has a *shared region* mapped into its address space,
-consisting of code and data shared across all processes of the same processor
-type, bitness, root directory, and (for arm64e processes) team ID.  Comments at
-the top of `osfmk/vm/vm_shared_region.c` discuss this region, and the process of
-populating it, in more detail.
-
-As the VM layer pages in parts of the shared region, any embedded pointers must
-be rebased.  Although this process is not new, PAC adds a new step: these
-embedded pointers may be signed, and must be re-signed after they are rebased.
-This process is implemented as `vm_shared_region_slide_page_v3` in
-`osfmk/vm/vm_shared_region.c`.
-
-xnu signs these embedded pointers using a shared-region-specific A key
-(`sr_jop_key`), which is randomly generated when the shared region is created.
-Since these pointers will be consumed by userspace processes, xnu temporarily
-switches to the userspace A keys when re-signing them.
-
-### Signing spilled register state
-
-xnu saves register state into kernel memory when taking exceptions, and reloads
-this state on exception return.  If an attacker has write access to kernel
-memory, it can modify this saved state and effectively get control over a
-victim thread's control flow.
-
-xnu hardens against this attack by calling `ml_sign_thread_state` on exception
-entry to hash certain registers before they're saved to memory.  On exception
-return, it calls the complementary `ml_check_signed_state` function to ensure
-that the reloaded values still match this hash.  `ml_sign_thread_state` hashes a
-handful of particularly sensitive registers:
-
-* `pc, lr`: directly affect control-flow
-* `cpsr`: controls process's exception level
-* `x16, x17`: used by LLVM to temporarily store unauthenticated addresses
-
-`ml_sign_thread_state` also uses the address of the thread's `arm_saved_state_t`
-as a diversifier.  This step keeps attackers from using `ml_sign_thread_state`
-as a signing oracle.  An attacker may attempt to create a sacrificial thread,
-set this thread to some desired state, and use kernel memory access gadgets to
-transplant the xnu-signed state onto a victim thread.  Because the victim
-process has a different `arm_saved_state_t` address as a diversifier,
-`ml_check_signed_state` will detect a hash mismatch in the victim thread.
-
-Apart from exception entry and return, xnu calls `ml_check_signed_state` and
-`ml_sign_thread_state` whenever it needs to mutate one of these sensitive
-registers (e.g., advancing the PC to the next instruction).  This process looks
-like:
-
-1. Disable interrupts
-2. Load `pc, lr, cpsr, x16, x17` values and hash from thread's
-   `arm_saved_state_t` into registers
-3. Call `ml_check_signed_state` to ensure values have not been tampered with
-4. Mutate one or more of these values using *only* register-to-register
-   instructions
-5. Call `ml_sign_thread_state` to re-hash the mutated thread state
-6. Store the mutated values and new hash back into thread's `arm_saved_state_t`.
-7. Restore old interrupt state
-
-Critically, none of the sensitive register values can be spilled to memory
-between steps 1 and 7.  Otherwise an attacker with kernel memory access could
-modify one of these values and use step 5 as a signing oracle. xnu implements
-these routines entirely in assembly to ensure full control over register use,
-using a macro `MANIPULATE_SIGNED_THREAD_STATE()` to generate boilerplate
-instructions.
-
-Interrupts must be disabled whenever `ml_check_signed_state` or
-`ml_sign_thread_state` are called, starting *before* their inputs (`x0`--`x5`)
-are populated.  To understand why, consider what would happen if the CPU could
-be interrupted just before step 5 above.  xnu's exception handler would spill
-the entire register state to memory.  If an attacker has kernel memory access,
-they could attempt to replace the spilled `x0`--`x5` values.  These modified
-values would then be reloaded into the CPU during exception return; and
-`ml_sign_thread_state` would be called with new, attacker-controlled inputs.
-
-### thread_set_state
-
-The `thread_set_state` call lets userspace modify the register state of a target
-thread.  Signed userspace state adds a wrinkle to this process, since the
-incoming FP, LR, SP, and PC values are signed using the *userspace process's*
-key.
-
-xnu handles this in two steps.  First, `machine_thread_state_convert_from_user`
-converts the userspace thread state representation into an in-kernel
-representation.  Signed values are authenticated using `pmap_auth_user_ptr`,
-which involves temporarily switching to the userspace keys.
-
-Second, `thread_state64_to_saved_state` applies this converted state to the
-target thread.  Whenever `thread_state64_to_saved_state` modifies a register
-that makes up part of the thread state hash, it uses
-`MANIPULATE_SIGNED_THREAD_STATE()` as described above to update this hash.
-
-
-### Signing arbitrary data blobs
-
-xnu provides `ptrauth_utils_sign_blob_generic` and `ptrauth_utils_auth_blob_generic`
-to sign and authenticate arbitrary blobs of data. Callers are responsible for
-storing the pointer-sized signature returned. The signature is a rolling MAC
-of the data, using the `pacga` instruction, mixed with a provided salt and optionally
-further diversified by storage address.
-
-Use of these functions is inherently racy. The data must be read from memory
-before each pointer-sized block can be added to the signature. In normal operation,
-standard thread-safety semantics protect from corruption, however in the malicious
-case, it may be possible to time overwriting the buffer before signing or after
-authentication.
-
-Callers of these functions must take care to minimise these race windows by
-using them immediately preceeding/following a write/read of the blob's data.
index 1320d8d0cb04efc84f0ce23057ed92b0a759e501..2fc024f6e1fa20b9a78b35e2fca5799c1943d7ee 100644 (file)
@@ -201,7 +201,17 @@ static uuid_string_t            gIOHibernateBridgeBootSessionUUIDString;
 
 static uint32_t                 gIOHibernateFreeRatio = 0;       // free page target (percent)
 uint32_t                        gIOHibernateFreeTime  = 0 * 1000;  // max time to spend freeing pages (ms)
-static uint64_t                 gIOHibernateCompression = 0x80;  // default compression 50%
+
+enum {
+       HIB_COMPR_RATIO_ARM64  = (0xa5),  // compression ~65%. Since we don't support retries we start higher.
+       HIB_COMPR_RATIO_INTEL  = (0x80)   // compression 50%
+};
+
+#if defined(__arm64__)
+static uint64_t                 gIOHibernateCompression = HIB_COMPR_RATIO_ARM64;
+#else
+static uint64_t                 gIOHibernateCompression = HIB_COMPR_RATIO_INTEL;
+#endif /* __arm64__ */
 boolean_t                       gIOHibernateStandbyDisabled;
 
 static IODTNVRAM *              gIOOptionsEntry;
@@ -2179,6 +2189,18 @@ hibernate_write_image(void)
                header->sleepTime    = gIOLastSleepTime.tv_sec;
 
                header->compression     = ((uint32_t)((compressedSize << 8) / uncompressedSize));
+#if defined(__arm64__)
+               /*
+                * We don't support retry on hibernation failure and so
+                * we don't want to set this value to anything smaller
+                * just because we may have been lucky this time around.
+                * Though we'll let it go higher.
+                */
+               if (header->compression < HIB_COMPR_RATIO_ARM64) {
+                       header->compression  = HIB_COMPR_RATIO_ARM64;
+               }
+#endif /* __arm64__ */
+
                gIOHibernateCompression = header->compression;
 
                count = vars->fileVars->fileExtents->getLength();
index 2aec3e5c37ebf4a426e354cf8883181d85f3a9ea..ed78c3d76c4e267ba981d335ffd4bb8409a91c9d 100644 (file)
@@ -3060,6 +3060,15 @@ IOPMrootDomain::powerChangeDone( unsigned long previousPowerState )
                        isRTCAlarmWake = true;
                        fullWakeReason = kFullWakeReasonLocalUser;
                        requestUserActive(this, "RTC debug alarm");
+               } else {
+#if HIBERNATION
+                       OSSharedPtr<OSObject> hibOptionsProp = copyProperty(kIOHibernateOptionsKey);
+                       OSNumber * hibOptions = OSDynamicCast(OSNumber, hibOptionsProp.get());
+                       if (hibOptions && !(hibOptions->unsigned32BitValue() & kIOHibernateOptionDarkWake)) {
+                               fullWakeReason = kFullWakeReasonLocalUser;
+                               requestUserActive(this, "hibernate user wake");
+                       }
+#endif
                }
 
                // stay awake for at least 30 seconds
index 903cda7953957d639d46468eca227e0b2a603e51..8205d93bc8535b1efbf8ae5bf5db45255e180b2c 100644 (file)
@@ -528,10 +528,6 @@ IOSetRecoveryBoot(bsd_bootfail_mode_t mode, uuid_t volume_uuid, boolean_t reboot
 
        // Clean up and reboot!
 do_reboot:
-       if (nvram != NULL) {
-               nvram->release();
-       }
-
        if (boot_command_recover != NULL) {
                boot_command_recover->release();
        }
index 64215c70d1fe9e34f25d4569323dd476c446649b..933c01bf646ca4110f9fd6ada87c3fbb116933c3 100644 (file)
@@ -47,6 +47,8 @@ COMP_SUBDIRS = \
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -222,7 +224,17 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 077b90f9b03625edae0af57f94fe88fae76fc59f..2f78dc30e0c7cce44ee363a3d1ca1c35c4de42bd 100644 (file)
@@ -43,6 +43,8 @@ COMP_SUBDIRS =
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -157,7 +159,17 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 3d68f7aa8c0d192441000ca18c06fa5bc71f3398..308dc426a9fc1d5673eb730101c440acd51f7b4a 100644 (file)
@@ -40,6 +40,8 @@ COMP_SUBDIRS =
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -74,8 +76,18 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
 
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 8f654850aaf4a7f6e5e02e5eabaf9bbf73262c8c..63dd0de0346b8aee8d490586f8b0f0e295cf3afd 100644 (file)
@@ -185,6 +185,9 @@ endif
 ifeq ($(LIBTOOL),)
        export LIBTOOL := $(shell $(XCRUN) -sdk $(SDKROOT) -find libtool)
 endif
+ifeq ($(OTOOL),)
+       export OTOOL := $(shell $(XCRUN) -sdk $(SDKROOT) -find otool)
+endif
 ifeq ($(NM),)
        export NM := $(shell $(XCRUN) -sdk $(SDKROOT) -find nm)
 endif
index 508db7f967b449663037177b73bcdf9f0a126000..5c5ef1a9263da31d04609489b476b938e100181f 100644 (file)
@@ -69,8 +69,8 @@ MACHINE_FLAGS_ARM64_T8010 = -DARM64_BOARD_CONFIG_T8010 -mcpu=hurricane
 MACHINE_FLAGS_ARM64_T8011 = -DARM64_BOARD_CONFIG_T8011 -mcpu=hurricane
 MACHINE_FLAGS_ARM64_BCM2837 = -DARM64_BOARD_CONFIG_BCM2837
 MACHINE_FLAGS_ARM64_T8020 = -DARM64_BOARD_CONFIG_T8020 -mcpu=vortex
-MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -mcpu=firestorm
-MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -mcpu=firestorm
+MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -D__ARM_ARCH_8_5__=1
+MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -D__ARM_ARCH_8_5__=1
 
 
 #
@@ -228,6 +228,13 @@ endef
 ARCH_FLAGS_X86_64        = -arch x86_64
 ARCH_FLAGS_X86_64H       = -arch x86_64h
 
+ifeq ($(RC_ProjectName),xnu_libraries)
+WILL_BUILD_STATIC_KC := 1
+BUILD_STATIC_LINK := 1
+BUILD_XNU_LIBRARY := 1
+RC_NONARCH_CFLAGS += -D__BUILDING_XNU_LIBRARY__=1
+endif
+
 ifneq ($(filter ARM ARM64,$(CURRENT_ARCH_CONFIG)),)
 
 ifneq ($(findstring _Sim,$(RC_ProjectName)),)
@@ -744,10 +751,9 @@ LDFLAGS_KERNEL_ONLY  +=   \
 #
 LD_KERNEL_LIBS    = -lcc_kext
 LD_KERNEL_ARCHIVES = $(LDFLAGS_KERNEL_SDK) -lfirehose_kernel
-
 # Link opensource binary library
-ifneq ($(filter T8020 T8020 T8101 T8101,$(CURRENT_MACHINE_CONFIG)),)
-LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
+ifneq ($(filter T8020 T8101 T8020 T8101,$(CURRENT_MACHINE_CONFIG)),)
+       LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
 endif
 
 #
index 222b355d1d2eef04ff5dce7737f8a3f4a0e891b8..90726f0f370a5c733c52a4c121a1aebadee1682d 100644 (file)
@@ -46,13 +46,22 @@ endif
 # Rules for the highly parallel "build" phase, where each build configuration
 # writes into their own $(TARGET) independent of other build configs
 #
-# There are 4 primary build outputs:
+# There are 5 primary build outputs:
 # 1) $(KERNEL_FILE_NAME).unstripped    (raw linked kernel, unstripped)
 # 2) $(KERNEL_FILE_NAME)               (stripped kernel, with optional CTF data)
 # 3) $(KERNEL_FILE_NAME).dSYM          (dSYM)
 # 4) $(KERNEL_FILE_NAME).link          (bits for static linking)
+# 5) lib$(KERNEL_FILE_NAME).a          (static archive for testing)
 
 ifeq ($(BUILD_STATIC_LINK),1)
+ifeq ($(BUILD_XNU_LIBRARY),1)
+
+KERNEL_STATIC_LINK_TARGETS = \
+       $(TARGET)/lib$(KERNEL_FILE_NAME).a
+KERNEL_STATIC_LINK_DST = \
+       $(DSTROOT)/$(INSTALL_KERNEL_DIR)/lib$(KERNEL_FILE_NAME).a
+
+else
 
 KERNEL_STATIC_LINK_TARGETS = \
        $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a
@@ -67,11 +76,17 @@ KERNEL_STATIC_LINK_DST = \
                        $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/$(KERNEL_LLDBBOOTSTRAP_NAME)
 
 endif
+endif
 
 do_build_all:: do_build_kernel
 
 .PHONY: do_build_kernel
 
+ifeq ($(BUILD_XNU_LIBRARY),1)
+do_build_kernel: $(KERNEL_STATIC_LINK_TARGETS)
+
+else
+
 do_build_kernel: $(TARGET)/$(KERNEL_FILE_NAME) $(TARGET)/$(KERNEL_FILE_NAME).unstripped $(KERNEL_STATIC_LINK_TARGETS)
        @:
 
@@ -84,6 +99,8 @@ endif
 do_build_kernel_dSYM: $(TARGET)/$(KERNEL_FILE_NAME).dSYM
        @:
 
+endif
+
 .LDFLAGS: ALWAYS
        $(_v)$(REPLACECONTENTS) $@ $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) $(LD_KERNEL_LIBS)
 .CFLAGS: ALWAYS
@@ -113,6 +130,15 @@ $(TARGET)/$(KERNEL_FILE_NAME).dSYM: $(TARGET)/$(KERNEL_FILE_NAME).unstripped
        $(_v)$(MV) $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME).unstripped $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
        $(_v)$(TOUCH) $@
 
+ifeq ($(BUILD_XNU_LIBRARY),1)
+$(TARGET)/lib$(KERNEL_FILE_NAME).a: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).libfilelist)) nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST))
+       $(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o
+       @$(LOG_LIBTOOL) "$(@F)"
+       $(_v)$(CAT) $(filter %.libfilelist,$+) < /dev/null > link.filelist
+       $(_v)$(LIBTOOL) -static -csD -filelist link.filelist -o $@
+       $(_v)$(LN) $(call function_convert_build_config_to_objdir,$(CURRENT_BUILD_CONFIG))/lib$(KERNEL_FILE_NAME).a $(OBJROOT)/lib$(KERNEL_FILE_NAME).a
+endif
+
 $(TARGET)/$(KERNEL_FILE_NAME).unstripped: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).filelist)) lastkerneldataconst.o lastkernelconstructor.o nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST))
        $(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o
 ifeq ($(PRE_LTO),1)
@@ -242,6 +268,7 @@ do_build_install_non_primary:: do_install_machine_specific_KDK_dSYM
 endif
 endif
 
+ifneq ($(BUILD_XNU_LIBRARY),1)
 ifeq ($(INSTALL_XNU_DEBUG_FILES),1)
 do_build_install_primary:: do_install_xnu_debug_files
 endif
@@ -250,6 +277,7 @@ endif
 
 do_install_xnu_debug_files:    $(DSTROOT)/$(DEVELOPER_EXTRAS_DIR)/README.DEBUG-kernel.txt
        @:
+endif
 
 #
 # If the timestamp indicates the DSTROOT kernel is out of
@@ -273,7 +301,14 @@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME): $(TARGET)/$(KERNEL_FILE_NA
        exit $$cmdstatus
 
 ifeq ($(BUILD_STATIC_LINK),1)
+ifeq ($(BUILD_XNU_LIBRARY),1)
 
+$(DSTROOT)/$(INSTALL_KERNEL_DIR)/lib$(KERNEL_FILE_NAME).a: $(TARGET)/lib$(KERNEL_FILE_NAME).a ALWAYS
+       $(_v)$(MKDIR) $(dir $@)
+       @$(LOG_INSTALL) "$(@F)"
+       $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
+
+else
 $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a ALWAYS
        $(_v)$(MKDIR) $(dir $@)
        @$(LOG_INSTALL) "$(@F)"
@@ -298,6 +333,7 @@ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).al
        $(_v)$(MKDIR) $(dir $@)
        @$(LOG_INSTALL) "$(@F)"
        $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
+endif
 
 # BUILD_STATIC_LINK
 endif
@@ -355,6 +391,16 @@ $(SYMROOT)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME) $(DSTROO
        exit $$cmdstatus
 
 .PHONY: do_install_machine_specific_kernel do_install_machine_specific_kernel_dSYM
+.PHONY: do_install_machine_specific_KDK_dSYM
+
+ifeq ($(BUILD_XNU_LIBRARY),1)
+
+do_install_machine_specific_kernel: $(KERNEL_STATIC_LINK_DST)
+       @:
+do_install_machine_specific_kernel_dSYM:
+       @:
+
+else
 
 do_install_machine_specific_kernel: $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME)                \
                        $(SYMROOT)/$(KERNEL_FILE_NAME)                                                              \
@@ -368,8 +414,6 @@ do_install_machine_specific_kernel_dSYM: \
                        $(SYMROOT)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
        @:
 
-.PHONY: do_install_machine_specific_KDK_dSYM
-
 do_install_machine_specific_KDK_dSYM: \
                        $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMINFODIR)/Info.plist \
                        $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros \
@@ -377,6 +421,8 @@ do_install_machine_specific_KDK_dSYM: \
                        $(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
        @:
 
+endif
+
 # The $(RM) is needed so that the $(LN) doesn't dereference an existing
 # symlink during incremental builds and create a new symlink inside
 # the target of the existing symlink
index e1ab49bc32fe8aac7da1efcc6dce7a17b4ce2d73..c5a98999c50b9043f6743fd0780ed6fb14e49f45 100644 (file)
@@ -606,7 +606,7 @@ else ifneq ($(filter $(RC_ProjectName),xnu_headers_driverkit),)
 install: installhdrs_desktop
 else
 
-install: installhdrs install_textfiles install_config install_kernels install_aliases checkstyle
+install: installhdrs install_textfiles install_config install_kernels install_aliases
 endif
 
 .PHONY: install_embedded install_release_embedded install_development_embedded install_desktop
@@ -724,20 +724,6 @@ TAGS: cscope.files
        @-cat cscope.files | etags -l auto -S - 2> /dev/null
        @rm -f cscope.files 2> /dev/null
 
-#
-# Check or reformat source code for official xnu code style
-#
-.PHONY: checkstyle restyle check_uncrustify uncrustify
-
-# User-friendly aliases for those who prefer to remember the name of the tool.
-check_uncrustify: checkstyle
-uncrustify: restyle
-
-checkstyle:
-       ${_V}$(SRCROOT)/tools/uncrustify.sh
-
-restyle:
-       ${_V}$(SRCROOT)/tools/uncrustify.sh -f
 
 .PHONY: help
 
index 80a448ecb2cb4d4805fd43a81f656534a547f4f4..44610bf50a674505a70ff3df0db1ad281c2b5496 100644 (file)
@@ -308,7 +308,7 @@ arm_init(
        cpu_data_init(&BootCpuData);
 #if defined(HAS_APPLE_PAC)
        /* bootstrap cpu process dependent key for kernel has been loaded by start.s */
-       BootCpuData.rop_key = KERNEL_ROP_ID;
+       BootCpuData.rop_key = ml_default_rop_pid();
        BootCpuData.jop_key = ml_default_jop_pid();
 #endif /* defined(HAS_APPLE_PAC) */
 
index 24b9c76982c7c378252284b788e78f046cd3211c..22cb5a66fab8a622fe78194c458264d76b6ee2fe 100644 (file)
@@ -1269,6 +1269,7 @@ uint32_t ml_update_cluster_wfe_recommendation(uint32_t wfe_cluster_id, uint64_t
 #define UNSIGN_PTR(p) \
        SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK)
 
+uint64_t ml_default_rop_pid(void);
 uint64_t ml_default_jop_pid(void);
 void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit);
 void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit);
index 9ce4fa8cf9d016cdf09d764ef48804f5a8fa8640..0c5617c3044c526b4ee2d62a7f2a6f49e4a0212f 100644 (file)
@@ -27,6 +27,9 @@
  */
 
 #include <pexpert/pexpert.h>
+#if __arm64__
+#include <pexpert/arm64/board_config.h>
+#endif /* __arm64__ */
 
 #include <arm/cpuid_internal.h>
 #include <arm/pmap.h>
@@ -51,3 +54,17 @@ configure_misc_apple_regs(void)
 }
 
 #endif /* __arm64__ */
+
+#if HAS_APPLE_PAC
+uint64_t
+ml_default_rop_pid(void)
+{
+       return 0;
+}
+
+uint64_t
+ml_default_jop_pid(void)
+{
+       return 0;
+}
+#endif /* HAS_APPLE_PAC */
index 38b238d82f29fef956f432c72be11f850acea417..4719ce5b87adca2b2589eeef570a55e6e1e6371f 100644 (file)
@@ -691,12 +691,6 @@ extern vm_offset_t   segPPLDATAB;
 extern unsigned long segSizePPLDATA;
 extern vm_offset_t   segPPLTEXTB;
 extern unsigned long segSizePPLTEXT;
-#if __APRR_SUPPORTED__
-extern vm_offset_t   segPPLTRAMPB;
-extern unsigned long segSizePPLTRAMP;
-extern void ppl_trampoline_start;
-extern void ppl_trampoline_end;
-#endif
 extern vm_offset_t   segPPLDATACONSTB;
 extern unsigned long segSizePPLDATACONST;
 
@@ -1943,10 +1937,6 @@ static void pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes);
 static void pmap_trim_self(pmap_t pmap);
 static void pmap_trim_subord(pmap_t subord);
 
-#if __APRR_SUPPORTED__
-static uint64_t pte_to_xprr_perm(pt_entry_t pte);
-static pt_entry_t xprr_perm_to_pte(uint64_t perm);
-#endif /* __APRR_SUPPORTED__*/
 
 /*
  * Temporary prototypes, while we wait for pmap_enter to move to taking an
@@ -4094,111 +4084,9 @@ pmap_pte(
 
 #endif
 
-#if __APRR_SUPPORTED__
-/*
- * Indicates whether the given PTE has special restrictions due to the current
- * APRR settings.
- */
-static boolean_t
-is_pte_aprr_protected(pt_entry_t pte)
-{
-       uint64_t aprr_el0_value;
-       uint64_t aprr_el1_value;
-       uint64_t aprr_index;
-
-       MRS(aprr_el0_value, APRR_EL0);
-       MRS(aprr_el1_value, APRR_EL1);
-       aprr_index = PTE_TO_APRR_INDEX(pte);
-
-       /* Check to see if this mapping had APRR restrictions. */
-       if ((APRR_EXTRACT_IDX_ATTR(aprr_el0_value, aprr_index) != APRR_EXTRACT_IDX_ATTR(APRR_EL0_RESET, aprr_index)) ||
-           (APRR_EXTRACT_IDX_ATTR(aprr_el1_value, aprr_index) != APRR_EXTRACT_IDX_ATTR(APRR_EL1_RESET, aprr_index))
-           ) {
-               return TRUE;
-       }
-
-       return FALSE;
-}
-#endif /* __APRR_SUPPORTED__ */
-
 
-#if __APRR_SUPPORTED__
-static boolean_t
-is_pte_xprr_protected(pmap_t pmap __unused, pt_entry_t pte)
-{
-#if __APRR_SUPPORTED__
-       return is_pte_aprr_protected(pte);
-#else /* __APRR_SUPPORTED__ */
-#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
-}
-#endif /* __APRR_SUPPORTED__*/
 
-#if __APRR_SUPPORTED__
-static uint64_t
-__unused pte_to_xprr_perm(pt_entry_t pte)
-{
-#if   __APRR_SUPPORTED__
-       switch (PTE_TO_APRR_INDEX(pte)) {
-       case APRR_FIRM_RX_INDEX:  return XPRR_FIRM_RX_PERM;
-       case APRR_FIRM_RO_INDEX:  return XPRR_FIRM_RO_PERM;
-       case APRR_PPL_RW_INDEX:   return XPRR_PPL_RW_PERM;
-       case APRR_KERN_RW_INDEX:  return XPRR_KERN_RW_PERM;
-       case APRR_FIRM_RW_INDEX:  return XPRR_FIRM_RW_PERM;
-       case APRR_KERN0_RW_INDEX: return XPRR_KERN0_RW_PERM;
-       case APRR_USER_JIT_INDEX: return XPRR_USER_JIT_PERM;
-       case APRR_USER_RW_INDEX:  return XPRR_USER_RW_PERM;
-       case APRR_PPL_RX_INDEX:   return XPRR_PPL_RX_PERM;
-       case APRR_KERN_RX_INDEX:  return XPRR_KERN_RX_PERM;
-       case APRR_USER_XO_INDEX:  return XPRR_USER_XO_PERM;
-       case APRR_KERN_RO_INDEX:  return XPRR_KERN_RO_PERM;
-       case APRR_KERN0_RX_INDEX: return XPRR_KERN0_RO_PERM;
-       case APRR_KERN0_RO_INDEX: return XPRR_KERN0_RO_PERM;
-       case APRR_USER_RX_INDEX:  return XPRR_USER_RX_PERM;
-       case APRR_USER_RO_INDEX:  return XPRR_USER_RO_PERM;
-       default:                  return XPRR_MAX_PERM;
-       }
-#else
-#error "XPRR configuration error"
-#endif /**/
-}
 
-#if __APRR_SUPPORTED__
-static uint64_t
-xprr_perm_to_aprr_index(uint64_t perm)
-{
-       switch (perm) {
-       case XPRR_FIRM_RX_PERM:  return APRR_FIRM_RX_INDEX;
-       case XPRR_FIRM_RO_PERM:  return APRR_FIRM_RO_INDEX;
-       case XPRR_PPL_RW_PERM:   return APRR_PPL_RW_INDEX;
-       case XPRR_KERN_RW_PERM:  return APRR_KERN_RW_INDEX;
-       case XPRR_FIRM_RW_PERM:  return APRR_FIRM_RW_INDEX;
-       case XPRR_KERN0_RW_PERM: return APRR_KERN0_RW_INDEX;
-       case XPRR_USER_JIT_PERM: return APRR_USER_JIT_INDEX;
-       case XPRR_USER_RW_PERM:  return APRR_USER_RW_INDEX;
-       case XPRR_PPL_RX_PERM:   return APRR_PPL_RX_INDEX;
-       case XPRR_KERN_RX_PERM:  return APRR_KERN_RX_INDEX;
-       case XPRR_USER_XO_PERM:  return APRR_USER_XO_INDEX;
-       case XPRR_KERN_RO_PERM:  return APRR_KERN_RO_INDEX;
-       case XPRR_KERN0_RX_PERM: return APRR_KERN0_RO_INDEX;
-       case XPRR_KERN0_RO_PERM: return APRR_KERN0_RO_INDEX;
-       case XPRR_USER_RX_PERM:  return APRR_USER_RX_INDEX;
-       case XPRR_USER_RO_PERM:  return APRR_USER_RO_INDEX;
-       default:                 return APRR_MAX_INDEX;
-       }
-}
-#endif /* __APRR_SUPPORTED__ */
-
-static pt_entry_t
-__unused xprr_perm_to_pte(uint64_t perm)
-{
-#if   __APRR_SUPPORTED__
-       return APRR_INDEX_TO_PTE(xprr_perm_to_aprr_index(perm));
-#else
-#error "XPRR configuration error"
-#endif /**/
-}
-#endif /* __APRR_SUPPORTED__*/
 
 
 /*
@@ -4712,21 +4600,6 @@ pmap_bootstrap(
        }
 #endif /* CONFIG_CSR_FROM_DT */
 
-#if __APRR_SUPPORTED__
-       if (((uintptr_t)(&ppl_trampoline_start)) % PAGE_SIZE) {
-               panic("%s: ppl_trampoline_start is not page aligned, "
-                   "vstart=%#lx",
-                   __FUNCTION__,
-                   vstart);
-       }
-
-       if (((uintptr_t)(&ppl_trampoline_end)) % PAGE_SIZE) {
-               panic("%s: ppl_trampoline_end is not page aligned, "
-                   "vstart=%#lx",
-                   __FUNCTION__,
-                   vstart);
-       }
-#endif /* __APRR_SUPPORTED__ */
 #endif /* XNU_MONITOR */
 
 #if DEVELOPMENT || DEBUG
@@ -5035,16 +4908,6 @@ pmap_static_allocations_done(void)
        /* PPL text is RX for the PPL, RO for the kernel. */
        pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
 
-#if __APRR_SUPPORTED__
-       monitor_start_pa = kvtophys(segPPLTRAMPB);
-       monitor_end_pa = monitor_start_pa + segSizePPLTRAMP;
-
-       /*
-        * The PPLTRAMP pages will be a mix of PPL RX/kernel RO and
-        * PPL RX/kernel RX.  However, all of these pages belong to the PPL.
-        */
-       pa_set_range_monitor(monitor_start_pa, monitor_end_pa);
-#endif
 
        /*
         * In order to support DTrace, the save areas for the PPL must be
@@ -5058,10 +4921,6 @@ pmap_static_allocations_done(void)
                pmap_set_range_xprr_perm(monitor_start_va, monitor_end_va, XPRR_PPL_RW_PERM, XPRR_KERN_RW_PERM);
        }
 
-#if __APRR_SUPPORTED__
-       /* The trampoline must also be specially protected. */
-       pmap_set_range_xprr_perm((vm_offset_t)&ppl_trampoline_start, (vm_offset_t)&ppl_trampoline_end, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
-#endif
 
        if (segSizePPLDATACONST > 0) {
                monitor_start_pa = kvtophys(segPPLDATACONSTB);
@@ -5086,13 +4945,7 @@ pmap_lockdown_ppl(void)
 {
        /* Mark the PPL as being locked down. */
 
-#if __APRR_SUPPORTED__
-       pmap_ppl_locked_down = TRUE;
-       /* Force a trap into to the PPL to update APRR_EL1. */
-       pmap_return(FALSE, FALSE);
-#else
 #error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
 
 }
 #endif /* XNU_MONITOR */
@@ -6654,7 +6507,7 @@ pmap_remove_range_options(
                pt_entry_t      spte;
                boolean_t       managed = FALSE;
 
-               spte = *cpte;
+               spte = *((volatile pt_entry_t*)cpte);
 
 #if CONFIG_PGTRACE
                if (pgtrace_enabled) {
@@ -6689,7 +6542,7 @@ pmap_remove_range_options(
                                if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_info(cpte)->refcnt)) <= 0) {
                                        panic("pmap_remove_range_options: over-release of ptdp %p for pte %p", ptep_get_ptd(cpte), cpte);
                                }
-                               spte = *cpte;
+                               spte = *((volatile pt_entry_t*)cpte);
                        }
                        /*
                         * It may be possible for the pte to transition from managed
@@ -6713,7 +6566,7 @@ pmap_remove_range_options(
                        }
                        pai = (int)pa_index(pa);
                        LOCK_PVH(pai);
-                       spte = *cpte;
+                       spte = *((volatile pt_entry_t*)cpte);
                        pa = pte_to_pa(spte);
                        if (pai == (int)pa_index(pa)) {
                                managed = TRUE;
@@ -7469,30 +7322,6 @@ pmap_page_protect_options_with_flush_range(
                                tmplate |= pt_attr_leaf_xn(pt_attr);
                        }
 
-#if __APRR_SUPPORTED__
-                       /**
-                        * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact.
-                        *
-                        * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to
-                        * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only
-                        * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX. This is similar for
-                        * user execute-only mappings.
-                        */
-                       if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
-                           && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
-                               panic("%s: modifying an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x",
-                                   __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum);
-                       }
-
-                       /**
-                        * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only
-                        * mappings (which doesn't compromise the security of the PPL since it's userspace-specific).
-                        */
-                       if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
-                               panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x",
-                                   __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum);
-                       }
-#endif /* __APRR_SUPPORTED__*/
 
                        if (*pte_p != ARM_PTE_TYPE_FAULT &&
                            !ARM_PTE_IS_COMPRESSED(*pte_p, pte_p) &&
@@ -7612,6 +7441,7 @@ pmap_disable_user_jop_internal(pmap_t pmap)
        if (pmap == kernel_pmap) {
                panic("%s: called with kernel_pmap\n", __func__);
        }
+       VALIDATE_PMAP(pmap);
        pmap->disable_jop = true;
 }
 
@@ -7739,7 +7569,7 @@ pmap_protect_options_internal(
                        boolean_t  force_write = FALSE;
 #endif
 
-                       spte = *pte_p;
+                       spte = *((volatile pt_entry_t*)pte_p);
 
                        if ((spte == ARM_PTE_TYPE_FAULT) ||
                            ARM_PTE_IS_COMPRESSED(spte, pte_p)) {
@@ -7763,7 +7593,7 @@ pmap_protect_options_internal(
                                }
                                pai = (int)pa_index(pa);
                                LOCK_PVH(pai);
-                               spte = *pte_p;
+                               spte = *((volatile pt_entry_t*)pte_p);
                                pa = pte_to_pa(spte);
                                if (pai == (int)pa_index(pa)) {
                                        managed = TRUE;
@@ -7871,30 +7701,6 @@ pmap_protect_options_internal(
                        /* We do not expect to write fast fault the entry. */
                        pte_set_was_writeable(tmplate, false);
 
-#if __APRR_SUPPORTED__
-                       /**
-                        * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact.
-                        *
-                        * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to
-                        * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only
-                        * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX/XO. This is similar
-                        * for user execute-only mappings.
-                        */
-                       if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
-                           && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
-                               panic("%s: modifying a PPL mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
-                                   __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
-                       }
-
-                       /**
-                        * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only
-                        * mappings (which doesn't compromise the security of the PPL since it's userspace-specific).
-                        */
-                       if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
-                               panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
-                                   __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
-                       }
-#endif /* __APRR_SUPPORTED__*/
                        WRITE_PTE_FAST(pte_p, tmplate);
 
                        if (managed) {
@@ -8834,14 +8640,28 @@ pmap_change_wiring_internal(
        const pt_attr_t * pt_attr = pmap_get_pt_attr(pmap);
 
        pte_p = pmap_pte(pmap, v);
-       assert(pte_p != PT_ENTRY_NULL);
-       pa = pte_to_pa(*pte_p);
+       if (pte_p == PT_ENTRY_NULL) {
+               if (!wired) {
+                       /*
+                        * The PTE may have already been cleared by a disconnect/remove operation, and the L3 table
+                        * may have been freed by a remove operation.
+                        */
+                       goto pmap_change_wiring_return;
+               } else {
+                       panic("%s: Attempt to wire nonexistent PTE for pmap %p", __func__, pmap);
+               }
+       }
+       /*
+        * Use volatile loads to prevent the compiler from collapsing references to 'pa' back to loads of pte_p
+        * until we've grabbed the final PVH lock; PTE contents may change during this time.
+        */
+       pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
 
        while (pa_valid(pa)) {
                pmap_paddr_t new_pa;
 
                LOCK_PVH((int)pa_index(pa));
-               new_pa = pte_to_pa(*pte_p);
+               new_pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
 
                if (pa == new_pa) {
                        break;
@@ -8851,6 +8671,18 @@ pmap_change_wiring_internal(
                pa = new_pa;
        }
 
+       /* PTE checks must be performed after acquiring the PVH lock (if applicable for the PA) */
+       if ((*pte_p == ARM_PTE_EMPTY) || (ARM_PTE_IS_COMPRESSED(*pte_p, pte_p))) {
+               if (!wired) {
+                       /* PTE cleared by prior remove/disconnect operation */
+                       goto pmap_change_wiring_cleanup;
+               } else {
+                       panic("%s: Attempt to wire empty/compressed PTE %p (=0x%llx) for pmap %p",
+                           __func__, pte_p, (uint64_t)*pte_p, pmap);
+               }
+       }
+
+       assertf((*pte_p & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", pte_p, (uint64_t)*pte_p);
        if (wired != pte_is_wired(*pte_p)) {
                pte_set_wired(pmap, pte_p, wired);
                if (pmap != kernel_pmap) {
@@ -8865,10 +8697,12 @@ pmap_change_wiring_internal(
                }
        }
 
+pmap_change_wiring_cleanup:
        if (pa_valid(pa)) {
                UNLOCK_PVH((int)pa_index(pa));
        }
 
+pmap_change_wiring_return:
        pmap_unlock(pmap);
 }
 
@@ -8980,12 +8814,7 @@ kvtophys(
        if (pa) {
                return pa;
        }
-       pa = ((pmap_paddr_t)pmap_vtophys(kernel_pmap, va)) << PAGE_SHIFT;
-       if (pa) {
-               pa |= (va & PAGE_MASK);
-       }
-
-       return (pmap_paddr_t)pa;
+       return pmap_vtophys(kernel_pmap, va);
 }
 
 pmap_paddr_t
@@ -9601,7 +9430,7 @@ phys_attribute_clear_twig_internal(
                end_pte_p = start_pte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr));
                assert(end_pte_p >= start_pte_p);
                for (curr_pte_p = start_pte_p; curr_pte_p < end_pte_p; curr_pte_p++) {
-                       pmap_paddr_t pa = pte_to_pa(*curr_pte_p);
+                       pmap_paddr_t pa = pte_to_pa(*((volatile pt_entry_t*)curr_pte_p));
                        if (pa_valid(pa)) {
                                ppnum_t pn = (ppnum_t) atop(pa);
                                phys_attribute_clear_with_flush_range(pn, bits, options, NULL, flush_range);
@@ -10128,23 +9957,6 @@ pmap_switch_user_ttb_internal(
                pmap_clear_user_ttb_internal();
        }
 
-#if defined(HAS_APPLE_PAC) && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-       if (!arm_user_jop_disabled()) {
-               uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
-               bool jop_enabled = sctlr & SCTLR_JOP_KEYS_ENABLED;
-               if (!jop_enabled && !pmap->disable_jop) {
-                       // turn on JOP
-                       sctlr |= SCTLR_JOP_KEYS_ENABLED;
-                       __builtin_arm_wsr64("SCTLR_EL1", sctlr);
-                       arm_context_switch_requires_sync();
-               } else if (jop_enabled && pmap->disable_jop) {
-                       // turn off JOP
-                       sctlr &= ~SCTLR_JOP_KEYS_ENABLED;
-                       __builtin_arm_wsr64("SCTLR_EL1", sctlr);
-                       arm_context_switch_requires_sync();
-               }
-       }
-#endif /* HAS_APPLE_PAC && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
 #endif /* (__ARM_VMSA__ == 7) */
 }
 
@@ -10630,7 +10442,7 @@ arm_fast_fault_internal(
        ptep = pmap_pte(pmap, va);
        if (ptep != PT_ENTRY_NULL) {
                while (true) {
-                       spte = *ptep;
+                       spte = *((volatile pt_entry_t*)ptep);
 
                        pa = pte_to_pa(spte);
 
@@ -10651,42 +10463,13 @@ arm_fast_fault_internal(
                        }
                        pai = (int)pa_index(pa);
                        LOCK_PVH(pai);
-#if __APRR_SUPPORTED__
-                       if (*ptep == spte) {
-                               /*
-                                * Double-check the spte value, as we care
-                                * about the AF bit.
-                                */
-                               break;
-                       }
-                       UNLOCK_PVH(pai);
-#else /* !(__APRR_SUPPORTED__*/
                        break;
-#endif /* !(__APRR_SUPPORTED__*/
                }
        } else {
                pmap_unlock(pmap);
                return result;
        }
 
-#if __APRR_SUPPORTED__
-       /* Check to see if this mapping had APRR restrictions. */
-       if (is_pte_xprr_protected(pmap, spte)) {
-               /*
-                * We have faulted on an XPRR managed mapping; decide if the access should be
-                * reattempted or if it should cause an exception. Now that all JIT entitled
-                * task threads always have MPRR enabled we're only here because of
-                * an AF fault or an actual permission fault. AF faults will have result
-                * changed to KERN_SUCCESS below upon arm_clear_fast_fault return.
-                */
-               if (was_af_fault && (spte & ARM_PTE_AF)) {
-                       result = KERN_SUCCESS;
-                       goto out;
-               } else {
-                       result = KERN_PROTECTION_FAILURE;
-               }
-       }
-#endif /* __APRR_SUPPORTED__*/
 
        if ((IS_REFFAULT_PAGE(pai)) ||
            ((fault_type & VM_PROT_WRITE) && IS_MODFAULT_PAGE(pai))) {
@@ -10717,9 +10500,6 @@ arm_fast_fault_internal(
                }
        }
 
-#if __APRR_SUPPORTED__
-out:
-#endif /* __APRR_SUPPORTED__*/
        UNLOCK_PVH(pai);
        pmap_unlock(pmap);
        return result;
@@ -11971,7 +11751,7 @@ pmap_unnest_options_internal(
 
                                        if ((*cpte != ARM_PTE_TYPE_FAULT)
                                            && (!ARM_PTE_IS_COMPRESSED(*cpte, cpte))) {
-                                               spte = *cpte;
+                                               spte = *((volatile pt_entry_t*)cpte);
                                                while (!managed) {
                                                        pa = pte_to_pa(spte);
                                                        if (!pa_valid(pa)) {
@@ -11979,7 +11759,7 @@ pmap_unnest_options_internal(
                                                        }
                                                        pai = (int)pa_index(pa);
                                                        LOCK_PVH(pai);
-                                                       spte = *cpte;
+                                                       spte = *((volatile pt_entry_t*)cpte);
                                                        pa = pte_to_pa(spte);
                                                        if (pai == (int)pa_index(pa)) {
                                                                managed = TRUE;
@@ -14694,7 +14474,7 @@ pmap_query_page_info_internal(
                goto done;
        }
 
-       pa = pte_to_pa(*pte);
+       pa = pte_to_pa(*((volatile pt_entry_t*)pte));
        if (pa == 0) {
                if (ARM_PTE_IS_COMPRESSED(*pte, pte)) {
                        disp |= PMAP_QUERY_PAGE_COMPRESSED;
@@ -15299,13 +15079,8 @@ pmap_test_test_config(unsigned int flags)
        T_LOG("Make the first mapping XO.");
        pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false);
 
-#if __APRR_SUPPORTED__
-       T_LOG("Validate that reads to our mapping fault.");
-       pmap_test_read(pmap, va_base, true);
-#else
        T_LOG("Validate that reads to our mapping do not fault.");
        pmap_test_read(pmap, va_base, false);
-#endif
 
        T_LOG("Validate that writes to our mapping fault.");
        pmap_test_write(pmap, va_base, true);
index 062e8b006217739e33ccd7c9a4949b826a10c99f..760592fd699dd4adc37316e1444d1877cdc26035 100644 (file)
@@ -636,7 +636,8 @@ pmap_disable_user_jop(pmap_t pmap);
 
 #define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
 
-#define PMAP_COUNT 73
+
+#define PMAP_COUNT 74
 
 #define PMAP_INVALID_CPU_NUM (~0U)
 
index 125a25051c55821620fe7ed278ab299dd0e683dd..de15c2cd7b9201251e1dd26d43f7d387f9e5ea55 100644 (file)
@@ -59,19 +59,15 @@ extern unsigned long segSizeEXTRADATA;
 void
 trust_cache_init(void)
 {
-       size_t const len = segSizeEXTRADATA;
+       size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
+       size_t const len = segSizeEXTRADATA - locked_down_dt_size;
 
        if (len == 0) {
-#if XNU_TARGET_OS_OSX
+               // We allow no trust cache at all.
                printf("No external trust cache found (region len is 0).");
-#else
-               panic("No external trust cache found (region len is 0).");
-#endif
                return;
        }
 
-       size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
-
        pmap_serialized_trust_caches = (struct serialized_trust_caches*)(segEXTRADATA +
            locked_down_dt_size);
 
@@ -203,7 +199,6 @@ lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN])
 
                // Engineering Trust Caches.
                if (pmap_serialized_trust_caches->num_caches > engineering_trust_cache_index) {
-#if DEVELOPMENT || DEBUG
                        for (uint32_t i = engineering_trust_cache_index; i < pmap_serialized_trust_caches->num_caches; i++) {
                                struct trust_cache_module1 const *module =
                                    (struct trust_cache_module1 const *)(
@@ -215,10 +210,6 @@ lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN])
                                               (TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
                                }
                        }
-#else
-                       panic("Number of trust caches: %d. How could we let this happen?",
-                           pmap_serialized_trust_caches->num_caches);
-#endif
                }
        }
 
index fe6523791d3523ade1843417137d00ff94245cf3..10c0a455eb55ada3fd983b298975e379acf1beb7 100644 (file)
@@ -70,11 +70,7 @@ static_assert((KERNEL_PMAP_HEAP_RANGE_START & ~ARM_TT_ROOT_OFFMASK) > ARM_KERNEL
 static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START) + 1) * 2ULL) <= (ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK));
 #endif /* __ARM_KERNEL_PROTECT__ */
 
-#if __APRR_SUPPORTED__ && XNU_MONITOR
-#define ARM_DYNAMIC_TABLE_XN ARM_TTE_TABLE_PXN
-#else
 #define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN)
-#endif
 
 #if KASAN
 extern vm_offset_t shadow_pbase;
index 996783d64e4ed6ee48abf574af2dacc046cec676..8ba394661040fa4bfac94b395999d3784358bc74 100644 (file)
@@ -28,7 +28,6 @@
 #include <machine/asm.h>
 #include <arm64/machine_machdep.h>
 #include <arm64/machine_routines_asm.h>
-#include <arm64/pac_asm.h>
 #include <arm64/proc_reg.h>
 #include "assym.s"
 
 #endif
 
 
-#if defined(HAS_APPLE_PAC)
-       ldr             \new_key, [\thread, TH_ROP_PID]
-       ldr             \tmp_key, [\cpudatap, CPU_ROP_KEY]
-       cmp             \new_key, \tmp_key
-       b.eq    1f
-       str             \new_key, [\cpudatap, CPU_ROP_KEY]
-       msr             APIBKeyLo_EL1, \new_key
-       add             \new_key, \new_key, #1
-       msr             APIBKeyHi_EL1, \new_key
-       add             \new_key, \new_key, #1
-       msr             APDBKeyLo_EL1, \new_key
-       add             \new_key, \new_key, #1
-       msr             APDBKeyHi_EL1, \new_key
-       mov             \wsync, #1
-1:
-
-#if HAS_PAC_FAST_A_KEY_SWITCHING
-       IF_PAC_SLOW_A_KEY_SWITCHING     Lskip_jop_keys_\@, \new_key
-       ldr             \new_key, [\thread, TH_JOP_PID]
-       REPROGRAM_JOP_KEYS      Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
-       mov             \wsync, #1
-Lskip_jop_keys_\@:
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */
-
-#endif /* defined(HAS_APPLE_PAC) */
 
        cbz             \wsync, 1f
        isb     sy
index 5694d2fa1008b3450c514509029b37f53812948d..c85b85ebc4e6cbbf98471fb0446979bce8692997 100644 (file)
@@ -35,7 +35,6 @@
 #include <config_dtrace.h>
 #include "assym.s"
 #include <arm64/exception_asm.h>
-#include <arm64/pac_asm.h>
 #include "dwarf_unwind.h"
 
 #if __ARM_KERNEL_PROTECT__
        /* Return to the PPL. */
        mov             x15, #0
        mov             w10, #PPL_STATE_EXCEPTION
-#if __APRR_SUPPORTED__
-       b               Ldisable_aif_and_enter_ppl
-#else
 #error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
 1:
 .endmacro
 
-#if __APRR_SUPPORTED__
-/*
- * EL1_SP0_VECTOR_PPL_CHECK
- *
- * Check to see if the exception was taken by the kernel or the PPL.  Falls
- * through if kernel, hands off to the given label if PPL.  Expects to run on
- * SP1.
- *   arg0 - Label to go to if this was a PPL exception.
- */
-.macro EL1_SP0_VECTOR_PPL_CHECK
-       sub             sp, sp, ARM_CONTEXT_SIZE
-       stp             x0, x1, [sp, SS64_X0]
-       mrs             x0, APRR_EL1
-       MOV64           x1, APRR_EL1_DEFAULT
-       cmp             x0, x1
-       b.ne            $0
-       ldp             x0, x1, [sp, SS64_X0]
-       add             sp, sp, ARM_CONTEXT_SIZE
-.endmacro
-
-#define STAY_ON_SP1 0
-#define SWITCH_TO_SP0 1
-
-#define INVOKE_PREFLIGHT 0
-#define NO_INVOKE_PREFLIGHT 1
-
-/*
- * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- *
- * Verify whether an exception came from the PPL or from the kernel.  If it came
- * from the PPL, save off the PPL state and transition out of the PPL.
- *   arg0 - Label to go to if this was a kernel exception
- *   arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
- *   arg2 - Indicates if this should switch back to SP0
- *   x0   - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
- */
-.macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
-       /* Spill some more registers. */
-       stp             x2, x3, [sp, SS64_X2]
-
-       /*
-        * Check if the PPL is locked down; if not, we can treat this as a
-        * kernel execption.
-        */
-       adrp    x1, EXT(pmap_ppl_locked_down)@page
-       ldr             w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
-       cbz             x1, 2f
-
-       /* Ensure that APRR_EL1 is actually in PPL mode. */
-       MOV64           x1, APRR_EL1_PPL
-       cmp             x0, x1
-       b.ne            .
-
-       /*
-        * Check if the CPU is in the PPL; if not we can treat this as a
-        * kernel exception.
-        */
-       GET_PMAP_CPU_DATA       x3, x1, x2
-       ldr             w1, [x3, PMAP_CPU_DATA_PPL_STATE]
-       cmp             x1, #PPL_STATE_KERNEL
-       b.eq            2f
-
-       /* Ensure that the CPU is in the expected PPL state. */
-       cmp             x1, #PPL_STATE_DISPATCH
-       b.ne            .
-
-       /* Mark the CPU as dealing with an exception. */
-       mov             x1, #PPL_STATE_EXCEPTION
-       str             w1, [x3, PMAP_CPU_DATA_PPL_STATE]
-
-       /* Load the bounds of the PPL trampoline. */
-       adrp    x0, EXT(ppl_no_exception_start)@page
-       add             x0, x0, EXT(ppl_no_exception_start)@pageoff
-       adrp    x1, EXT(ppl_no_exception_end)@page
-       add             x1, x1, EXT(ppl_no_exception_end)@pageoff
-
-       /*
-        * Ensure that the exception did not occur in the trampoline.  If it
-        * did, we are either being attacked or our state machine is
-        * horrifically broken.
-        */
-       mrs             x2, ELR_EL1
-       cmp             x2, x0
-       b.lo            1f
-       cmp             x2, x1
-       b.hi            1f
-
-       /* We might be under attack; spin. */
-       b               .
-
-1:
-       /* Get the PPL save area. */
-       mov             x1, x3
-       ldr             x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
-
-       /* Save our x0, x1 state. */
-       ldp             x2, x3, [sp, SS64_X0]
-       stp             x2, x3, [x0, SS64_X0]
-
-       /* Restore SP1 to its original state. */
-       mov             x3, sp
-       add             sp, sp, ARM_CONTEXT_SIZE
-
-       .if $2 == SWITCH_TO_SP0
-       /* Switch back to SP0. */
-       msr             SPSel, #0
-       mov             x2, sp
-       .else
-       /* Load the SP0 value. */
-       mrs             x2, SP_EL0
-       .endif
-
-       /* Save off the stack pointer. */
-       str             x2, [x0, SS64_SP]
-
-       INIT_SAVED_STATE_FLAVORS x0, w1, w2
-
-       /* Save the context that was interrupted. */ 
-       ldp             x2, x3, [x3, SS64_X2]
-       SPILL_REGISTERS PPL_MODE
-
-       /*
-        * Stash the function we wish to be invoked to deal with the exception;
-        * usually this is some preflight function for the fleh_* handler.
-        */
-       adrp            x25, $1@page
-       add             x25, x25, $1@pageoff
-
-       /*
-        * Indicate that this is a PPL exception, and that we should return to
-        * the PPL.
-        */
-       mov             x26, #1
-
-       /* Transition back to kernel mode. */
-       mov             x15, #PPL_EXIT_EXCEPTION
-       b               ppl_return_to_kernel_mode
-2:
-       /* Restore SP1 state. */
-       ldp             x2, x3, [sp, SS64_X2]
-       ldp             x0, x1, [sp, SS64_X0]
-       add             sp, sp, ARM_CONTEXT_SIZE
-
-       /* Go to the specified label (usually the original exception vector). */
-       b               $0
-.endmacro
-#endif /* __APRR_SUPPORTED__ */
 
 #endif /* XNU_MONITOR */
 
@@ -474,14 +322,6 @@ Lel0_serror_vector_64:
 .endmacro
 
 el1_sp0_synchronous_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
-       /*
-        * We do not have enough space for new instructions in this vector, so
-        * jump to outside code to check if this exception was taken in the PPL.
-        */
-       b               el1_sp0_synchronous_vector_ppl_check
-Lel1_sp0_synchronous_vector_kernel:
-#endif
        stp             x0, x1, [sp, #-16]!                             // Save x0 and x1 to the exception stack
        mrs             x1, ESR_EL1                                                     // Get the exception syndrome
        /* If the stack pointer is corrupt, it will manifest either as a data abort
@@ -498,10 +338,6 @@ Lkernel_stack_valid:
        b               fleh_dispatch64
 
 el1_sp0_irq_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
-       EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
-Lel1_sp0_irq_vector_kernel:
-#endif
        EL1_SP0_VECTOR
        SWITCH_TO_INT_STACK
        adrp    x1, EXT(fleh_irq)@page                                  // Load address for fleh
@@ -510,10 +346,6 @@ Lel1_sp0_irq_vector_kernel:
 
 el1_sp0_fiq_vector_long:
        // ARM64_TODO write optimized decrementer
-#if XNU_MONITOR && __APRR_SUPPORTED__
-       EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
-Lel1_sp0_fiq_vector_kernel:
-#endif
        EL1_SP0_VECTOR
        SWITCH_TO_INT_STACK
        adrp    x1, EXT(fleh_fiq)@page                                  // Load address for fleh
@@ -521,10 +353,6 @@ Lel1_sp0_fiq_vector_kernel:
        b               fleh_dispatch64
 
 el1_sp0_serror_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
-       EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
-Lel1_sp0_serror_vector_kernel:
-#endif
        EL1_SP0_VECTOR
        adrp    x1, EXT(fleh_serror)@page                               // Load address for fleh
        add             x1, x1, EXT(fleh_serror)@pageoff
@@ -569,35 +397,12 @@ el1_sp1_serror_vector_long:
        add             x1, x1, fleh_serror_sp1@pageoff
        b               fleh_dispatch64
 
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-/**
- * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
- * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
- */
-#define SCTLR_EL1_INITIAL      (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
-#define SCTLR_EL1_EXPECTED     ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
-#endif
 
 .macro EL0_64_VECTOR
        stp             x0, x1, [sp, #-16]!                                     // Save x0 and x1 to the exception stack
 #if __ARM_KERNEL_PROTECT__
        mov             x18, #0                                                 // Zero x18 to avoid leaking data to user SS
 #endif
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-       // enable JOP for kernel
-       mrs             x0, SCTLR_EL1
-       tbnz    x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
-       //      if (!jop_running) {
-       MOV64   x1, SCTLR_JOP_KEYS_ENABLED
-       orr             x0, x0, x1
-       msr             SCTLR_EL1, x0
-       isb             sy
-       MOV64   x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
-       cmp             x0, x1
-       bne             .
-       //      }
-1:
-#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
        mrs             x0, TPIDR_EL1                                           // Load the thread register
        mrs             x1, SP_EL0                                                      // Load the user stack pointer
        add             x0, x0, ACT_CONTEXT                                     // Calculate where we store the user context pointer
@@ -643,13 +448,6 @@ el0_serror_vector_64_long:
        add             x1, x1, EXT(fleh_serror)@pageoff
        b               fleh_dispatch64
 
-#if XNU_MONITOR && __APRR_SUPPORTED__
-el1_sp0_synchronous_vector_ppl_check:
-       EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
-
-       /* Jump back to the primary exception vector if we fell through. */
-       b               Lel1_sp0_synchronous_vector_kernel
-#endif
 
 /*
  * check_exception_stack
@@ -1212,59 +1010,6 @@ Lexception_return_restore_registers:
        CMSR FPCR, x5, x4, 1
 1:
 
-#if defined(HAS_APPLE_PAC)
-       //      if (eret to userspace) {
-       and             x2, x2, #(PSR64_MODE_EL_MASK)
-       cmp             x2, #(PSR64_MODE_EL0)
-       bne             Ldone_reconfigure_jop
-       //              thread_t thread = current_thread();
-       //              bool disable_jop;
-       //              if (arm_user_jop_disabled()) {
-       //                      /* if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) */
-       //                      disable_jop = true;
-       //              } else {
-       //                      disable_jop = thread->machine.disable_user_jop;
-       //              }
-       mrs             x2, TPIDR_EL1
-       ldrb    w1, [x2, TH_DISABLE_USER_JOP]
-       cbz             w1, Lenable_jop
-       //              if (disable_jop) {
-       //                      if (cpu does not have discrete JOP-at-EL1 bit) {
-       //                              disable_sctlr_jop_keys();
-       //                      }
-       //              } else {
-       //                      if (cpu does not have fast A-key switching) {
-       //                              reprogram_jop_keys(thread->machine.jop_pid);
-       //                      }
-       //              }
-       //      }
-Ldisable_jop:
-#if !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-       MOV64   x1, SCTLR_JOP_KEYS_ENABLED
-       mrs             x4, SCTLR_EL1
-       bic             x4, x4, x1
-       msr             SCTLR_EL1, x4
-       MOV64   x1, SCTLR_EL1_EXPECTED
-       cmp             x4, x1
-       bne             .
-#endif /* !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
-       b               Ldone_reconfigure_jop
-Lenable_jop:
-#if HAS_PAC_SLOW_A_KEY_SWITCHING
-       IF_PAC_FAST_A_KEY_SWITCHING     Ldone_reconfigure_jop, x1
-       ldr             x1, [x2, TH_JOP_PID]
-       ldr             x2, [x2, ACT_CPUDATAP]
-       REPROGRAM_JOP_KEYS      Ldone_reconfigure_jop, x1, x2, x3
-#if defined(__ARM_ARCH_8_5__)
-       /**
-        * The new keys will be used after eret to userspace, so explicit sync is
-        * required iff eret is non-synchronizing.
-        */
-       isb             sy
-#endif /* defined(__ARM_ARCH_8_5__) */
-#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
-Ldone_reconfigure_jop:
-#endif /* defined(HAS_APPLE_PAC) */
 
        /* Restore arm_neon_saved_state64 */
        ldp             q0, q1, [x0, NS64_Q0]
@@ -1407,27 +1152,6 @@ LEXT(ExceptionVectorsEnd)
 #endif /* __ARM_KERNEL_PROTECT__ */
 
 #if XNU_MONITOR
-#if __APRR_SUPPORTED__
-       .text
-       .align 2
-el1_sp0_synchronous_vector_not_in_kernel_mode:
-       EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
-
-       .text
-       .align 2
-el1_sp0_fiq_vector_not_in_kernel_mode:
-       EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
-
-       .text
-       .align 2
-el1_sp0_irq_vector_not_in_kernel_mode:
-       EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
-
-       .text
-       .align 2
-el1_sp0_serror_vector_not_in_kernel_mode:
-       EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
-#endif /* __APRR_SUPPORTED__ */
 
 /*
  * Functions to preflight the fleh handlers when the PPL has taken an exception;
@@ -1502,65 +1226,6 @@ fleh_serror_from_ppl:
        b               EXT(fleh_serror)
 
 
-#if XNU_MONITOR && __APRR_SUPPORTED__
-/*
- * aprr_ppl_enter
- *
- * Invokes the PPL
- *   x15 - The index of the requested PPL function.
- */
-       .text
-       .align 2
-       .globl EXT(aprr_ppl_enter)
-LEXT(aprr_ppl_enter)
-       /* Push a frame. */
-       ARM64_STACK_PROLOG
-       stp             x20, x21, [sp, #-0x20]!
-       stp             x29, x30, [sp, #0x10]
-       add             x29, sp, #0x10
-
-       /* Increase the preemption count. */
-       mrs             x10, TPIDR_EL1
-       ldr             w12, [x10, ACT_PREEMPT_CNT]
-       add             w12, w12, #1
-       str             w12, [x10, ACT_PREEMPT_CNT]
-
-       /* Is the PPL currently locked down? */
-       adrp            x13, EXT(pmap_ppl_locked_down)@page
-       add             x13, x13, EXT(pmap_ppl_locked_down)@pageoff
-       ldr             w14, [x13]
-       cmp             w14, wzr
-
-       /* If not, just perform the call in the current context. */
-       b.eq            EXT(ppl_bootstrap_dispatch)
-
-       mov             w10, #PPL_STATE_KERNEL
-       b               Ldisable_aif_and_enter_ppl
-
-       /* We align this to land the next few instructions on their own page. */
-       .section __PPLTRAMP,__text,regular,pure_instructions
-       .align 14
-       .space (16*1024)-(4*8) // 8 insns
-
-       /*
-        * This label is used by exception handlers that are trying to return
-        * to the PPL.
-        */
-Ldisable_aif_and_enter_ppl:
-       /* We must trampoline to the PPL context; disable AIF. */
-       mrs             x20, DAIF
-       msr             DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-
-       .globl EXT(ppl_no_exception_start)
-LEXT(ppl_no_exception_start)
-       /* Switch APRR_EL1 to PPL mode. */
-       MOV64   x14, APRR_EL1_PPL
-       msr             APRR_EL1, x14
-
-       /* This ISB should be the last instruction on a page. */
-       // TODO: can we static assert this?
-       isb
-#endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
 
 
        // x15: ppl call number
@@ -1569,18 +1234,8 @@ LEXT(ppl_no_exception_start)
        .globl EXT(ppl_trampoline_start)
 LEXT(ppl_trampoline_start)
 
-#if __APRR_SUPPORTED__
-       /* Squash AIF AGAIN, because someone may have attacked us. */
-       msr             DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-#endif /* __APRR_SUPPORTED__ */
 
-#if __APRR_SUPPORTED__
-       /* Verify the state of APRR_EL1. */
-       MOV64   x14, APRR_EL1_PPL
-       mrs             x21, APRR_EL1
-#else /* __APRR_SUPPORTED__ */
 #error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
        cmp             x14, x21
        b.ne    Lppl_fail_dispatch
 
@@ -1617,11 +1272,7 @@ LEXT(ppl_trampoline_start)
        /* Find the save area, and return to the saved PPL context. */
        ldr             x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
        mov             sp, x0
-#if __APRR_SUPPORTED__
-       b               Lexception_return_restore_registers
-#else
        b               EXT(return_to_ppl)
-#endif /* __APRR_SUPPORTED__ */
 
 Lppl_mark_cpu_as_dispatching:
        cmp             w10, #PPL_STATE_KERNEL
@@ -1693,27 +1344,6 @@ Lppl_dispatch_exit:
        /* Return to the kernel. */
        b ppl_return_to_kernel_mode
 
-#if __APRR_SUPPORTED__
-       /* We align this to land the next few instructions on their own page. */
-       .align 14
-       .space (16*1024)-(4*5) // 5 insns
-
-ppl_return_to_kernel_mode:
-       /* Switch APRR_EL1 back to the kernel mode. */
-       // must be 5 instructions
-       MOV64   x14, APRR_EL1_DEFAULT
-       msr             APRR_EL1, x14
-
-       .globl EXT(ppl_trampoline_end)
-LEXT(ppl_trampoline_end)
-
-       /* This should be the first instruction on a page. */
-       isb
-
-       .globl EXT(ppl_no_exception_end)
-LEXT(ppl_no_exception_end)
-       b ppl_exit
-#endif /* __APRR_SUPPORTED__ */
 
 
        .text
index 03f8a510b8b6540dec8ff017f20eb2ba3cd8c065..c40e24e6f1cbb4c1e779fc171161def396719a43 100644 (file)
@@ -2289,32 +2289,6 @@ ex_cb_invoke(
 }
 
 #if defined(HAS_APPLE_PAC)
-static inline bool
-cpu_supports_userkeyen()
-{
-#if defined(APPLEFIRESTORM)
-       return __builtin_arm_rsr64(ARM64_REG_APCTL_EL1) & APCTL_EL1_UserKeyEn;
-#elif HAS_APCTL_EL1_USERKEYEN
-       return true;
-#else
-       return false;
-#endif
-}
-
-/**
- * Returns the default JOP key.  Depending on how the CPU diversifies userspace
- * JOP keys, this value may reflect either KERNKeyLo or APIAKeyLo.
- */
-uint64_t
-ml_default_jop_pid(void)
-{
-       if (cpu_supports_userkeyen()) {
-               return KERNEL_KERNKEY_ID;
-       } else {
-               return KERNEL_JOP_ID;
-       }
-}
-
 void
 ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop)
 {
index d9f59fa5167f462277023c60bae5cc63d418ea26..5c88ab5c4b6aa4ea28fe7a0802207e9e142f5216 100644 (file)
@@ -29,7 +29,6 @@
 #include <machine/asm.h>
 #include <arm64/exception_asm.h>
 #include <arm64/machine_machdep.h>
-#include <arm64/pac_asm.h>
 #include <arm64/proc_reg.h>
 #include <arm/pmap.h>
 #include <pexpert/arm64/board_config.h>
 #include "assym.s"
 
 
-#if defined(HAS_APPLE_PAC)
-
-.macro SET_KERN_KEY            dst, apctl_el1
-       orr             \dst, \apctl_el1, #APCTL_EL1_KernKeyEn
-.endmacro
-
-.macro CLEAR_KERN_KEY  dst, apctl_el1
-       and             \dst, \apctl_el1, #~APCTL_EL1_KernKeyEn
-.endmacro
-
-/*
- * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key)
- */
-       .align 2
-       .globl EXT(ml_enable_user_jop_key)
-LEXT(ml_enable_user_jop_key)
-       mov             x1, x0
-       mrs             x2, TPIDR_EL1
-       ldr             x2, [x2, ACT_CPUDATAP]
-       ldr             x0, [x2, CPU_JOP_KEY]
-
-       cmp             x0, x1
-       b.eq    Lskip_program_el0_jop_key
-       /*
-        * We can safely write to the JOP key registers without updating
-        * current_cpu_datap()->jop_key.  The complementary
-        * ml_disable_user_jop_key() call will put back the old value.  Interrupts
-        * are also disabled, so nothing else will read this field in the meantime.
-        */
-       SET_JOP_KEY_REGISTERS   x1, x2
-Lskip_program_el0_jop_key:
-
-       /*
-        * if (cpu has APCTL_EL1.UserKeyEn) {
-        *   set APCTL_EL1.KernKeyEn            // KERNKey is mixed into EL0 keys
-        * } else {
-        *   clear APCTL_EL1.KernKeyEn          // KERNKey is not mixed into EL0 keys
-        * }
-        */
-       mrs             x1, ARM64_REG_APCTL_EL1
-#if defined(APPLEFIRESTORM)
-       SET_KERN_KEY    x2, x1
-       CLEAR_KERN_KEY  x3, x1
-       tst             x1, #(APCTL_EL1_UserKeyEn)
-       csel    x1, x2, x3, ne
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
-       SET_KERN_KEY    x1, x1
-#else
-       CLEAR_KERN_KEY  x1, x1
-#endif
-       msr             ARM64_REG_APCTL_EL1, x1
-       isb
-       ret
-
-/*
- * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state)
- */
-       .align 2
-       .globl EXT(ml_disable_user_jop_key)
-LEXT(ml_disable_user_jop_key)
-       cmp             x0, x1
-       b.eq    Lskip_program_prev_jop_key
-       SET_JOP_KEY_REGISTERS   x1, x2
-Lskip_program_prev_jop_key:
-
-       /*
-        * if (cpu has APCTL_EL1.UserKeyEn) {
-        *   clear APCTL_EL1.KernKeyEn          // KERNKey is not mixed into EL1 keys
-        * } else {
-        *   set APCTL_EL1.KernKeyEn            // KERNKey is mixed into EL1 keys
-        * }
-        */
-       mrs             x1, ARM64_REG_APCTL_EL1
-#if defined(APPLEFIRESTORM)
-       CLEAR_KERN_KEY  x2, x1
-       SET_KERN_KEY    x3, x1
-       tst             x1, #(APCTL_EL1_UserKeyEn)
-       csel    x1, x2, x3, ne
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
-       CLEAR_KERN_KEY  x1, x1
-#else
-       SET_KERN_KEY    x1, x1
-#endif
-       msr             ARM64_REG_APCTL_EL1, x1
-       isb
-       ret
-
-#endif /* defined(HAS_APPLE_PAC) */
 
 #if HAS_BP_RET
 
diff --git a/osfmk/arm64/pac_asm.h b/osfmk/arm64/pac_asm.h
deleted file mode 100644 (file)
index 9a3981d..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright (c) 2019 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-#ifndef _ARM64_PAC_ASM_H_
-#define _ARM64_PAC_ASM_H_
-
-#ifndef __ASSEMBLER__
-#error "This header should only be used in .s files"
-#endif
-
-#include <pexpert/arm64/board_config.h>
-#include <arm64/proc_reg.h>
-#include "assym.s"
-
-#if defined(HAS_APPLE_PAC)
-
-#if defined(APPLEFIRESTORM)
-/* H13 may use either fast or slow A-key switching, depending on CPU model and revision */
-#define HAS_PAC_FAST_A_KEY_SWITCHING    1
-#define HAS_PAC_SLOW_A_KEY_SWITCHING    1
-
-/* BEGIN IGNORE CODESTYLE */
-
-/**
- * IF_PAC_FAST_A_KEY_SWITCHING
- *
- * Branch to a specified label if this H13 model + revision supports fast A-key switching.
- *
- *   label - label to branch to
- *   tmp - scratch register
- */
-.macro IF_PAC_FAST_A_KEY_SWITCHING     label, tmp
-       /**
-        * start.s attempts to set APCTL_EL1.UserKeyEn.  If this H13 CPU doesn't
-        * actually support this bit, it will be RaZ.
-        */
-       mrs             \tmp, APCTL_EL1
-       tbnz    \tmp, #APCTL_EL1_UserKeyEn_OFFSET, \label
-.endmacro
-
-/**
- * IF_PAC_SLOW_A_KEY_SWITCHING
- *
- * Branch to a specified label if this H13 model + revision doesn't support fast A-key switching.
- *
- *   label - label to branch to
- *   tmp - scratch register
- */
-.macro IF_PAC_SLOW_A_KEY_SWITCHING     label, tmp
-       mrs             \tmp, APCTL_EL1
-       tbz             \tmp, #APCTL_EL1_UserKeyEn_OFFSET, \label
-.endmacro
-
-/* END IGNORE CODESTYLE */
-
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
-#define HAS_PAC_FAST_A_KEY_SWITCHING    1
-#define HAS_PAC_SLOW_A_KEY_SWITCHING    0
-
-.macro IF_PAC_FAST_A_KEY_SWITCHING      label, tmp
-.error "This macro should never need to be used on this CPU family."
-.endmacro
-
-/* We know at compile time that this CPU family definitely doesn't need slow A-key switching */
-.macro IF_PAC_SLOW_A_KEY_SWITCHING      label, tmp
-.endmacro
-
-#else /* !defined(APPLEFIRESTORM) && !defined(HAS_APCTL_EL1_USERKEYEN) */
-#define HAS_PAC_FAST_A_KEY_SWITCHING    0
-#define HAS_PAC_SLOW_A_KEY_SWITCHING    1
-
-/* We know at compile time that this CPU family definitely doesn't support fast A-key switching */
-.macro IF_PAC_FAST_A_KEY_SWITCHING      label, tmp
-.endmacro
-
-.macro IF_PAC_SLOW_A_KEY_SWITCHING      label, tmp
-.error "This macro should never need to be used on this CPU family."
-.endmacro
-
-#endif /* defined(APPLEFIRESTORM) */
-
-/* BEGIN IGNORE CODESTYLE */
-
-/**
- * REPROGRAM_JOP_KEYS
- *
- * Reprograms the A-key registers if needed, and updates current_cpu_datap()->jop_key.
- *
- * On CPUs where fast A-key switching is implemented, this macro reprograms KERNKey_EL1.
- * On other CPUs, it reprograms AP{D,I}AKey_EL1.
- *
- *   skip_label - branch to this label if new_jop_key is already loaded into CPU
- *   new_jop_key - new APIAKeyLo value
- *   cpudatap - current cpu_data_t *
- *   tmp - scratch register
- */
-.macro REPROGRAM_JOP_KEYS      skip_label, new_jop_key, cpudatap, tmp
-       ldr             \tmp, [\cpudatap, CPU_JOP_KEY]
-       cmp             \new_jop_key, \tmp
-       b.eq    \skip_label
-       SET_JOP_KEY_REGISTERS   \new_jop_key, \tmp
-       str             \new_jop_key, [\cpudatap, CPU_JOP_KEY]
-.endmacro
-
-/**
- * SET_JOP_KEY_REGISTERS
- *
- * Unconditionally reprograms the A-key registers.  The caller is responsible for
- * updating current_cpu_datap()->jop_key as needed.
- *
- *   new_jop_key - new APIAKeyLo value
- *   tmp - scratch register
- */
-.macro SET_JOP_KEY_REGISTERS   new_jop_key, tmp
-#if HAS_PAC_FAST_A_KEY_SWITCHING
-       IF_PAC_SLOW_A_KEY_SWITCHING     Lslow_reprogram_jop_keys_\@, \tmp
-       msr             KERNKeyLo_EL1, \new_jop_key
-       add             \tmp, \new_jop_key, #1
-       msr             KERNKeyHi_EL1, \tmp
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */
-#if HAS_PAC_FAST_A_KEY_SWITCHING && HAS_PAC_SLOW_A_KEY_SWITCHING
-       b               Lset_jop_key_registers_done_\@
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING && HAS_PAC_SLOW_A_KEY_SWITCHING */
-
-#if HAS_PAC_SLOW_A_KEY_SWITCHING
-Lslow_reprogram_jop_keys_\@:
-       msr             APIAKeyLo_EL1, \new_jop_key
-       add             \tmp, \new_jop_key, #1
-       msr             APIAKeyHi_EL1, \tmp
-       add             \tmp, \tmp, #1
-       msr             APDAKeyLo_EL1, \tmp
-       add             \tmp, \tmp, #1
-       msr             APDAKeyHi_EL1, \tmp
-#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
-
-Lset_jop_key_registers_done_\@:
-.endmacro
-
-/* END IGNORE CODESTYLE */
-
-#endif /* defined(HAS_APPLE_PAC) */
-
-#endif /* _ARM64_PAC_ASM_H_ */
-
-/* vim: set ts=4 ft=asm: */
index 71c1230f8450fe7ee21fee4342d70c4a1b279909..868ff8d18bad42ee7b43f6b9393cbee6254528be 100644 (file)
@@ -123,48 +123,5 @@ _pinst_spsel_1:
        check_instruction x2, x3, __pinst_spsel_1, 0xd65f03c0d50041bf
        b __pinst_spsel_1
 
-#if __APRR_SUPPORTED__
-
-/*
- * APRR registers aren't covered by VMSA lockdown, so we'll keep these
- * gadgets in pinst for protection against undesired execution. 
- */
-
-       .text
-       .section        __LAST,__pinst
-       .align 2
-
-__pinst_set_aprr_el0:
-       msr             APRR_EL0, x0
-       ret
-
-__pinst_set_aprr_el1:
-       msr             APRR_EL1, x0
-       ret
-
-__pinst_set_aprr_shadow_mask_en_el1:
-       msr             APRR_SHADOW_MASK_EN_EL1, x0
-
-       ret
-
-       .text
-       .section        __TEXT_EXEC,__text
-       .align 2
-
-       .globl _pinst_set_aprr_el0
-_pinst_set_aprr_el0:
-       check_instruction x2, x3, __pinst_set_aprr_el0, 0xd65f03c0d51cf200
-       b __pinst_set_aprr_el0
-
-       .globl _pinst_set_aprr_el1
-_pinst_set_aprr_el1:
-       check_instruction x2, x3, __pinst_set_aprr_el1, 0xd65f03c0d51cf220
-       b __pinst_set_aprr_el1
-
-       .globl _pinst_set_aprr_shadow_mask_en_el1
-_pinst_set_aprr_shadow_mask_en_el1:
-       check_instruction x2, x3, __pinst_set_aprr_shadow_mask_en_el1, 0xd65f03c0d51cf2c0
-       b __pinst_set_aprr_shadow_mask_en_el1
-#endif /* __APRR_SUPPORTED__ */
 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
 
index 4531e4de2603c8904c1edfab0e7ad3d2f1bfb346..37eb2dda12ff00014e80d8448b97412dc8b4e6dd 100644 (file)
@@ -1170,20 +1170,6 @@ ex_cb_test()
 
 #if defined(HAS_APPLE_PAC)
 
-/*
- *
- *  arm64_ropjop_test - basic xnu ROP/JOP test plan
- *
- *  - assert ROP/JOP configured and running status match
- *  - assert all AppleMode ROP/JOP features enabled
- *  - ensure ROP/JOP keys are set and diversified
- *  - sign a KVA (the address of this function),assert it was signed (changed)
- *  - authenticate the newly signed KVA
- *  - assert the authed KVA is the original KVA
- *  - corrupt a signed ptr, auth it, ensure auth failed
- *  - assert the failed authIB of corrupted pointer is tagged
- *
- */
 
 kern_return_t
 arm64_ropjop_test()
@@ -1195,51 +1181,11 @@ arm64_ropjop_test()
        boolean_t config_jop_enabled = TRUE;
 
 
-       /* assert all AppleMode ROP/JOP features enabled */
-       uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
-#if __APSTS_SUPPORTED__
-       uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
-       T_EXPECT(apsts & APSTS_EL1_MKEYVld, NULL);
-#else
-       T_EXPECT(apctl & APCTL_EL1_MKEYVld, NULL);
-#endif /* __APSTS_SUPPORTED__ */
-       T_EXPECT(apctl & APCTL_EL1_AppleMode, NULL);
-
-       bool kernkeyen = apctl & APCTL_EL1_KernKeyEn;
-#if HAS_APCTL_EL1_USERKEYEN
-       bool userkeyen = apctl & APCTL_EL1_UserKeyEn;
-#else
-       bool userkeyen = false;
-#endif
-       /* for KernKey to work as a diversifier, it must be enabled at exactly one of {EL0, EL1/2} */
-       T_EXPECT(kernkeyen || userkeyen, "KernKey is enabled");
-       T_EXPECT(!(kernkeyen && userkeyen), "KernKey is not simultaneously enabled at userspace and kernel space");
-
-       /* ROP/JOP keys enabled current status */
-       bool status_jop_enabled, status_rop_enabled;
-#if __APSTS_SUPPORTED__ /* H13+ */
-       status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
-#elif __APCFG_SUPPORTED__ /* H12 */
-       uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
-       status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
-#else /* !__APCFG_SUPPORTED__ H11 */
-       uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
-       status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
-       status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
-#endif /* __APSTS_SUPPORTED__ */
-
-       /* assert configured and running status match */
-       T_EXPECT(config_rop_enabled == status_rop_enabled, NULL);
-       T_EXPECT(config_jop_enabled == status_jop_enabled, NULL);
-
-
        if (config_jop_enabled) {
                /* jop key */
                uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
                uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
 
-               /* ensure JOP key is set and diversified */
-               T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
                T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
        }
 
@@ -1248,8 +1194,6 @@ arm64_ropjop_test()
                uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
                uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
 
-               /* ensure ROP key is set and diversified */
-               T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
                T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
 
                /* sign a KVA (the address of this function) */
index d1c2225083cd94f2ca9fb305cae1aee147676396..307027f7c455f8b704e12fa0d3696169e5898c55 100644 (file)
@@ -1736,228 +1736,13 @@ typedef enum {
 #define CORESIGHT_REGIONS   4
 #define CORESIGHT_SIZE      0x1000
 
-#if __APRR_SUPPORTED__
-/*
- * APRR_EL0/APRR_EL1
- *
- *  63                 0
- * +--------------------+
- * | Attr[15:0]RWX[3:0] |
- * +--------------------+
- *
- * These registers consist of 16 4-bit fields.
- *
- * The attribute index consists of the access protection
- * and execution protections on a mapping.  The index
- * for a given mapping type is constructed as follows.
- *
- * Attribute Index
- *
- *     3       2      1     0
- * +-------+-------+-----+----+
- * | AP[1] | AP[0] | PXN | XN |
- * +-------+-------+-----+----+
- *
- * The attribute for a given index determines what
- * protections are disabled for that mappings type
- * (protections beyond the scope of the standard ARM
- * protections for a mapping cannot be granted via
- * APRR).
- *
- * Attribute
- *
- *       3      2   1   0
- * +----------+---+---+---+
- * | Reserved | R | W | X |
- * +----------+---+---+---+
- *
- * Where:
- *   R: Read is allowed.
- *   W: Write is allowed.
- *   X: Execute is allowed.
- */
-
-#define APRR_IDX_XN  (1ULL)
-#define APRR_IDX_PXN (2ULL)
-
-
-#define APRR_IDX_XN_SHIFT (0ULL)
-#define APRR_IDX_PXN_SHIFT  (1ULL)
-#define APRR_IDX_APSHIFT   (2ULL)
-
-#endif /* __APRR_SUPPORTED__ */
-
-
-#if __APRR_SUPPORTED__
-
-#define APRR_ATTR_X (1ULL)
-#define APRR_ATTR_W (2ULL)
-#define APRR_ATTR_R (4ULL)
-
-#define APRR_ATTR_WX  (APRR_ATTR_W | APRR_ATTR_X)
-#define APRR_ATTR_RX  (APRR_ATTR_R | APRR_ATTR_X)
-#define APRR_ATTR_RWX (APRR_ATTR_R | APRR_ATTR_W | APRR_ATTR_X)
-
-#define APRR_ATTR_NONE (0ULL)
-#define APRR_ATTR_MASK (APRR_ATTR_RWX)
-
-#define APRR_RESERVED_MASK (0x8888888888888888ULL)
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-#define XPRR_FIRM_RX_PERM  (0ULL)
-#define XPRR_PPL_RW_PERM   (1ULL)
-#define XPRR_FIRM_RO_PERM  (2ULL)
-#define XPRR_KERN_RW_PERM  (3ULL)
-#define XPRR_FIRM_RW_PERM  (4ULL)
-#define XPRR_USER_JIT_PERM (5ULL)
-#define XPRR_KERN0_RW_PERM (6ULL)
-#define XPRR_USER_RW_PERM  (7ULL)
-#define XPRR_PPL_RX_PERM   (8ULL)
-#define XPRR_USER_XO_PERM  (9ULL)
-#define XPRR_KERN_RX_PERM  (10ULL)
-#define XPRR_KERN_RO_PERM  (11ULL)
-#define XPRR_KERN0_RX_PERM (12ULL)
-#define XPRR_USER_RX_PERM  (13ULL)
-#define XPRR_KERN0_RO_PERM (14ULL)
-#define XPRR_USER_RO_PERM  (15ULL)
-#define XPRR_MAX_PERM      (15ULL)
-
-#define XPRR_VERSION_NONE    (0ULL)
-#define XPRR_VERSION_APRR    (1ULL)
-
-
-#endif /* __APRR_SUPPORTED__*/
-
-#if __APRR_SUPPORTED__
-/* Indices for attributes, named based on how we intend to use them. */
-#define APRR_FIRM_RX_INDEX  (0ULL)  /* AP_RWNA, PX, X */
-#define APRR_FIRM_RO_INDEX  (1ULL)  /* AP_RWNA, PX, XN */
-#define APRR_PPL_RW_INDEX   (2ULL)  /* AP_RWNA, PXN, X */
-#define APRR_KERN_RW_INDEX  (3ULL)  /* AP_RWNA, PXN, XN */
-#define APRR_FIRM_RW_INDEX  (4ULL)  /* AP_RWRW, PX, X */
-#define APRR_KERN0_RW_INDEX (5ULL)  /* AP_RWRW, PX, XN */
-#define APRR_USER_JIT_INDEX (6ULL)  /* AP_RWRW, PXN, X */
-#define APRR_USER_RW_INDEX  (7ULL)  /* AP_RWRW, PXN, XN */
-#define APRR_PPL_RX_INDEX   (8ULL)  /* AP_RONA, PX, X */
-#define APRR_KERN_RX_INDEX  (9ULL)  /* AP_RONA, PX, XN */
-#define APRR_USER_XO_INDEX  (10ULL) /* AP_RONA, PXN, X */
-#define APRR_KERN_RO_INDEX  (11ULL) /* AP_RONA, PXN, XN */
-#define APRR_KERN0_RX_INDEX (12ULL) /* AP_RORO, PX, X */
-#define APRR_KERN0_RO_INDEX (13ULL) /* AP_RORO, PX, XN */
-#define APRR_USER_RX_INDEX  (14ULL) /* AP_RORO, PXN, X */
-#define APRR_USER_RO_INDEX  (15ULL) /* AP_RORO, PXN, XN */
-#define APRR_MAX_INDEX      (15ULL) /* For sanity checking index values */
-#endif /* __APRR_SUPPORTED */
-
-
-#if __APRR_SUPPORTED__
-#define APRR_SHIFT_FOR_IDX(x) \
-       ((x) << 2ULL)
-
-/* Shifts for attributes, named based on how we intend to use them. */
-#define APRR_FIRM_RX_SHIFT  (0ULL)  /* AP_RWNA, PX, X */
-#define APRR_FIRM_RO_SHIFT  (4ULL)  /* AP_RWNA, PX, XN */
-#define APRR_PPL_RW_SHIFT   (8ULL)  /* AP_RWNA, PXN, X */
-#define APRR_KERN_RW_SHIFT  (12ULL) /* AP_RWNA, PXN, XN */
-#define APRR_FIRM_RW_SHIFT  (16ULL) /* AP_RWRW, PX, X */
-#define APRR_KERN0_RW_SHIFT (20ULL) /* AP_RWRW, PX, XN */
-#define APRR_USER_JIT_SHIFT (24ULL) /* AP_RWRW, PXN, X */
-#define APRR_USER_RW_SHIFT  (28ULL) /* AP_RWRW, PXN, XN */
-#define APRR_PPL_RX_SHIFT   (32ULL) /* AP_RONA, PX, X */
-#define APRR_KERN_RX_SHIFT  (36ULL) /* AP_RONA, PX, XN */
-#define APRR_USER_XO_SHIFT  (40ULL) /* AP_RONA, PXN, X */
-#define APRR_KERN_RO_SHIFT  (44ULL) /* AP_RONA, PXN, XN */
-#define APRR_KERN0_RX_SHIFT (48ULL) /* AP_RORO, PX, X */
-#define APRR_KERN0_RO_SHIFT (52ULL) /* AP_RORO, PX, XN */
-#define APRR_USER_RX_SHIFT  (56ULL) /* AP_RORO, PXN, X */
-#define APRR_USER_RO_SHIFT  (60ULL) /* AP_RORO, PXN, XN */
-
-#define ARM_PTE_APRR_MASK \
-       (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)
-
-#define ARM_PTE_XPRR_MASK ARM_PTE_APRR_MASK
-
-#define APRR_INDEX_TO_PTE(x) \
-       ((pt_entry_t) \
-        (((x) & 0x8) ? ARM_PTE_AP(0x2) : 0) | \
-        (((x) & 0x4) ? ARM_PTE_AP(0x1) : 0) | \
-        (((x) & 0x2) ? ARM_PTE_PNX : 0) | \
-        (((x) & 0x1) ? ARM_PTE_NX : 0))
-
-#define PTE_TO_APRR_INDEX(x) \
-       ((ARM_PTE_EXTRACT_AP(x) << APRR_IDX_APSHIFT) | \
-       (((x) & ARM_PTE_PNXMASK) ? APRR_IDX_PXN : 0) | \
-       (((x) & ARM_PTE_NXMASK) ? APRR_IDX_XN : 0))
-
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-
-#define APRR_EXTRACT_IDX_ATTR(_aprr_value, _idx) \
-       (((_aprr_value) >> APRR_SHIFT_FOR_IDX(_idx)) & APRR_ATTR_MASK)
-
-#define APRR_REMOVE(x) (~(x))
-
-#define APRR_EL1_UNRESTRICTED (0x4455445566666677ULL)
-
-#define APRR_EL1_RESET \
-       APRR_EL1_UNRESTRICTED
-
-/*
- * XO mappings bypass PAN protection (rdar://58360875)
- * Revoke ALL kernel access permissions for XO mappings.
- */
-#define APRR_EL1_BASE \
-       (APRR_EL1_UNRESTRICTED & \
-       APRR_REMOVE(APRR_ATTR_R << APRR_USER_XO_SHIFT))
-
-#if XNU_MONITOR
-#define APRR_EL1_DEFAULT \
-       (APRR_EL1_BASE & \
-        (APRR_REMOVE((APRR_ATTR_WX << APRR_PPL_RW_SHIFT) | \
-        (APRR_ATTR_WX << APRR_USER_XO_SHIFT) | \
-        (APRR_ATTR_WX << APRR_PPL_RX_SHIFT))))
-
-#define APRR_EL1_PPL \
-       (APRR_EL1_BASE & \
-        (APRR_REMOVE((APRR_ATTR_X << APRR_PPL_RW_SHIFT) | \
-        (APRR_ATTR_WX << APRR_USER_XO_SHIFT) | \
-        (APRR_ATTR_W << APRR_PPL_RX_SHIFT))))
-#else
-#define APRR_EL1_DEFAULT \
-       APRR_EL1_BASE
-#endif
 
-#define APRR_EL0_UNRESTRICTED (0x4545010167670101ULL)
 
-#define APRR_EL0_RESET \
-       APRR_EL0_UNRESTRICTED
 
-#if XNU_MONITOR
-#define APRR_EL0_BASE \
-       (APRR_EL0_UNRESTRICTED & \
-        (APRR_REMOVE((APRR_ATTR_RWX << APRR_PPL_RW_SHIFT) | \
-        (APRR_ATTR_RWX << APRR_PPL_RX_SHIFT) | \
-        (APRR_ATTR_RWX << APRR_USER_XO_SHIFT))))
-#else
-#define APRR_EL0_BASE \
-       APRR_EL0_UNRESTRICTED
-#endif
 
-#define APRR_EL0_JIT_RW \
-       (APRR_EL0_BASE & APRR_REMOVE(APRR_ATTR_X << APRR_USER_JIT_SHIFT))
 
-#define APRR_EL0_JIT_RX \
-       (APRR_EL0_BASE & APRR_REMOVE(APRR_ATTR_W << APRR_USER_JIT_SHIFT))
 
-#define APRR_EL0_JIT_RWX \
-       APRR_EL0_BASE
 
-#define APRR_EL0_DEFAULT \
-       APRR_EL0_BASE
-
-#endif /* __APRR_SUPPORTED__ */
 
 
 /*
@@ -1999,46 +1784,12 @@ typedef enum {
 #define ID_AA64ISAR0_EL1_AES_PMULL_EN  (2ull << ID_AA64ISAR0_EL1_AES_OFFSET)
 
 
-#if __APCFG_SUPPORTED__
-/*
- * APCFG_EL1
- *
- *  63       2 1 0
- * +----------+-+-+
- * | reserved |K|R|
- * +----------+-+-+
- *
- * where:
- *   R: Reserved
- *   K: ElXEnKey - Enable ARMV8.3 defined {IA,IB,DA,DB} keys when CPU is
- *                 operating in EL1 (or higher) and when under Apple-Mode
- */
-
-#define APCFG_EL1_ELXENKEY_OFFSET      1
-#define APCFG_EL1_ELXENKEY_MASK        (0x1ULL << APCFG_EL1_ELXENKEY_OFFSET)
-#define APCFG_EL1_ELXENKEY             APCFG_EL1_ELXENKEY_MASK
-#endif /* __APCFG_SUPPORTED__ */
 
 #define APSTATE_G_SHIFT  (0)
 #define APSTATE_P_SHIFT  (1)
 #define APSTATE_A_SHIFT  (2)
 #define APSTATE_AP_MASK  ((1ULL << APSTATE_A_SHIFT) | (1ULL << APSTATE_P_SHIFT))
 
-#ifdef __APSTS_SUPPORTED__
-#define APCTL_EL1_AppleMode  (1ULL << 0)
-#define APCTL_EL1_KernKeyEn  (1ULL << 1)
-#define APCTL_EL1_EnAPKey0   (1ULL << 2)
-#define APCTL_EL1_EnAPKey1   (1ULL << 3)
-#ifdef HAS_APCTL_EL1_USERKEYEN
-#define APCTL_EL1_UserKeyEn_OFFSET      4
-#define APCTL_EL1_UserKeyEn             (1ULL << APCTL_EL1_UserKeyEn_OFFSET)
-#endif /* HAS_APCTL_EL1_USERKEYEN */
-#define APSTS_EL1_MKEYVld    (1ULL << 0)
-#else
-#define APCTL_EL1_AppleMode  (1ULL << 0)
-#define APCTL_EL1_MKEYVld    (1ULL << 1)
-#define APCTL_EL1_KernKeyEn  (1ULL << 2)
-#endif
 
 #define ACTLR_EL1_EnTSO   (1ULL << 1)
 #define ACTLR_EL1_EnAPFLG (1ULL << 4)
index a4b64906c64d29d14d5a1799b7f3586c37ca78d7..10d16d12204c42e7b4d3dd5e45ad3270c3281e02 100644 (file)
 #endif /* __ARM_KERNEL_PROTECT__ */
 
 
-#if __APRR_SUPPORTED__
-
-.macro MSR_APRR_EL1_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
-       bl              EXT(pinst_set_aprr_el1)
-#else
-       msr             APRR_EL1, x0
-#endif
-.endmacro
-
-.macro MSR_APRR_EL0_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
-       bl              EXT(pinst_set_aprr_el0)
-#else
-       msr             APRR_EL0, x0
-#endif
-.endmacro
-
-.macro MSR_APRR_SHADOW_MASK_EN_EL1_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
-       bl              EXT(pinst_set_aprr_shadow_mask_en_el1)
-#else
-       msr             APRR_SHADOW_MASK_EN_EL1, x0
-#endif
-.endmacro
-
-#endif /* __APRR_SUPPORTED__ */
 
 .macro MSR_VBAR_EL1_X0
 #if defined(KERNEL_INTEGRITY_KTRR)
@@ -163,25 +136,6 @@ LEXT(reset_vector)
        msr     VBAR_EL1, x0
 #endif
 
-#if __APRR_SUPPORTED__
-       MOV64   x0, APRR_EL1_DEFAULT
-#if XNU_MONITOR
-       adrp    x4, EXT(pmap_ppl_locked_down)@page
-       ldrb    w5, [x4, #EXT(pmap_ppl_locked_down)@pageoff]
-       cmp             w5, #0
-       b.ne    1f
-
-       // If the PPL is not locked down, we start in PPL mode.
-       MOV64   x0, APRR_EL1_PPL
-1:
-#endif /* XNU_MONITOR */
-
-       MSR_APRR_EL1_X0
-
-       // Load up the default APRR_EL0 value.
-       MOV64   x0, APRR_EL0_DEFAULT
-       MSR_APRR_EL0_X0
-#endif /* __APRR_SUPPORTED__ */
 
 #if defined(KERNEL_INTEGRITY_KTRR)
        /*
@@ -607,29 +561,6 @@ LEXT(start_first_cpu)
        add             x0, x0, EXT(LowExceptionVectorBase)@pageoff
        MSR_VBAR_EL1_X0
 
-#if __APRR_SUPPORTED__
-       // Save the LR
-       mov             x1, lr
-
-#if XNU_MONITOR
-       // If the PPL is supported, we start out in PPL mode.
-       MOV64   x0, APRR_EL1_PPL
-#else
-       // Otherwise, we start out in default mode.
-       MOV64   x0, APRR_EL1_DEFAULT
-#endif
-
-       // Set the APRR state for EL1.
-       MSR_APRR_EL1_X0
-
-       // Set the APRR state for EL0.
-       MOV64   x0, APRR_EL0_DEFAULT
-       MSR_APRR_EL0_X0
-
-
-       // Restore the LR.
-       mov     lr, x1
-#endif /* __APRR_SUPPORTED__ */
 
        // Get the kernel memory parameters from the boot args
        ldr             x22, [x20, BA_VIRT_BASE]                        // Get the kernel virt base
@@ -904,79 +835,13 @@ common_start:
 
 1:
 #ifdef HAS_APPLE_PAC
-#ifdef __APSTS_SUPPORTED__
-       mrs             x0, ARM64_REG_APSTS_EL1
-       and             x1, x0, #(APSTS_EL1_MKEYVld)
-       cbz             x1, 1b                                                                          // Poll APSTS_EL1.MKEYVld
-       mrs             x0, ARM64_REG_APCTL_EL1
-       orr             x0, x0, #(APCTL_EL1_AppleMode)
-#ifdef HAS_APCTL_EL1_USERKEYEN
-       orr             x0, x0, #(APCTL_EL1_UserKeyEn)
-       and             x0, x0, #~(APCTL_EL1_KernKeyEn)
-#else /* !HAS_APCTL_EL1_USERKEYEN */
-       orr             x0, x0, #(APCTL_EL1_KernKeyEn)
-#endif /* HAS_APCTL_EL1_USERKEYEN */
-       and             x0, x0, #~(APCTL_EL1_EnAPKey0)
-       msr             ARM64_REG_APCTL_EL1, x0
-
-#if defined(APPLEFIRESTORM)
-       IF_PAC_FAST_A_KEY_SWITCHING     1f, x0
-       orr             x0, x0, #(APCTL_EL1_KernKeyEn)
-       msr             ARM64_REG_APCTL_EL1, x0
-1:
-#endif /* APPLEFIRESTORM */
-
-#else
-       mrs             x0, ARM64_REG_APCTL_EL1
-       and             x1, x0, #(APCTL_EL1_MKEYVld)
-       cbz             x1, 1b                                                                          // Poll APCTL_EL1.MKEYVld
-       orr             x0, x0, #(APCTL_EL1_AppleMode)
-       orr             x0, x0, #(APCTL_EL1_KernKeyEn)
-       msr             ARM64_REG_APCTL_EL1, x0
-#endif /* APSTS_SUPPORTED */
-
-       /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */
-       isb             sy
-       /* Load static kernel key diversification values */
-       ldr             x0, =KERNEL_ROP_ID
-       /* set ROP key. must write at least once to pickup mkey per boot diversification */
-       msr             APIBKeyLo_EL1, x0
-       add             x0, x0, #1
-       msr             APIBKeyHi_EL1, x0
-       add             x0, x0, #1
-       msr             APDBKeyLo_EL1, x0
-       add             x0, x0, #1
-       msr             APDBKeyHi_EL1, x0
-       add             x0, x0, #1
-       msr             ARM64_REG_KERNELKEYLO_EL1, x0
-       add             x0, x0, #1
-       msr             ARM64_REG_KERNELKEYHI_EL1, x0
-       /* set JOP key. must write at least once to pickup mkey per boot diversification */
-       add             x0, x0, #1
-       msr             APIAKeyLo_EL1, x0
-       add             x0, x0, #1
-       msr             APIAKeyHi_EL1, x0
-       add             x0, x0, #1
-       msr             APDAKeyLo_EL1, x0
-       add             x0, x0, #1
-       msr             APDAKeyHi_EL1, x0
-       /* set G key */
-       add             x0, x0, #1
-       msr             APGAKeyLo_EL1, x0
-       add             x0, x0, #1
-       msr             APGAKeyHi_EL1, x0
 
        // Enable caches, MMU, ROP and JOP
        MOV64   x0, SCTLR_EL1_DEFAULT
        orr             x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
 
-#if __APCFG_SUPPORTED__
-       // for APCFG systems, JOP keys are always on for EL1.
-       // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled
-#else /* __APCFG_SUPPORTED__ */
        MOV64   x1, SCTLR_JOP_KEYS_ENABLED
        orr     x0, x0, x1
-#endif /* !__APCFG_SUPPORTED__ */
 #else  /* HAS_APPLE_PAC */
 
        // Enable caches and MMU
@@ -988,10 +853,8 @@ common_start:
        MOV64   x1, SCTLR_EL1_DEFAULT
 #if HAS_APPLE_PAC
        orr             x1, x1, #(SCTLR_PACIB_ENABLED)
-#if !__APCFG_SUPPORTED__
        MOV64   x2, SCTLR_JOP_KEYS_ENABLED
        orr             x1, x1, x2
-#endif /* !__APCFG_SUPPORTED__ */
 #endif /* HAS_APPLE_PAC */
        cmp             x0, x1
        bne             .
index 304d4ae7b97b1fbbdfda22a269414c17a1f655d0..bdf7fe02e3ea4e4edd0000be9142a562a449acdd 100644 (file)
@@ -61,6 +61,8 @@ COMP_SUBDIRS = \
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -394,7 +396,17 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 8aacc9817774ca0d339bd44243cc80bd8092fbac..b8f235bdb9e629ce951269c76688a0e20f6670dc 100644 (file)
@@ -42,7 +42,7 @@ osfmk/arm64/lowmem_vectors.c  standard
 osfmk/arm64/sleh.c                     standard
 osfmk/arm64/start.s    optional nos_arm_asm
 osfmk/arm64/pinst.s    optional nos_arm_asm
-osfmk/arm64/cswitch.s  standard
+osfmk/arm64/cswitch.s  optional nos_arm_asm
 osfmk/arm/machine_cpuid.c      standard
 osfmk/arm/machine_routines_common.c            standard
 osfmk/arm64/machine_routines.c         standard
index 2d6f7e7d14bb41c59a9beea2e12f43be6f3a8c51..2aa10cceb2a90fcf09c76db56b9dbbc4e54fd30a 100644 (file)
@@ -967,6 +967,15 @@ ipc_port_destroy(ipc_port_t port)
        /* check for a backup port */
        pdrequest = port->ip_pdrequest;
 
+       /*
+        * Panic if a special reply has ip_pdrequest or ip_tempowner
+        * set, as this causes a type confusion while accessing the
+        * kdata union.
+        */
+       if (special_reply && (pdrequest || port->ip_tempowner)) {
+               panic("ipc_port_destroy: invalid state");
+       }
+
 #if IMPORTANCE_INHERITANCE
        /* determine how many assertions to drop and from whom */
        if (port->ip_tempowner != 0) {
index 00a256e177fa5ddd57bbb21b392495cbe90e0eb1..78fd32c6e7fafd277e834b918f72f0a0d5c29d43 100644 (file)
@@ -2024,13 +2024,14 @@ ipc_right_copyin(
                }
 
                /*
-                * Disallow moving receive-right kobjects, e.g. mk_timer ports
+                * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
                 * The ipc_port structure uses the kdata union of kobject and
                 * imp_task exclusively. Thus, general use of a kobject port as
                 * a receive right can cause type confusion in the importance
                 * code.
                 */
-               if (io_kotype(entry->ie_object) != IKOT_NONE) {
+               if (io_is_kobject(entry->ie_object) ||
+                   io_is_kolabeled(entry->ie_object)) {
                        /*
                         * Distinguish an invalid right, e.g., trying to move
                         * a send right as a receive right, from this
@@ -2049,7 +2050,7 @@ ipc_right_copyin(
                assert(port->ip_receiver_name == name);
                assert(port->ip_receiver == space);
 
-               if (port->ip_immovable_receive) {
+               if (port->ip_immovable_receive || port->ip_specialreply) {
                        assert(port->ip_receiver != ipc_space_kernel);
                        ip_unlock(port);
                        assert(current_task() != kernel_task);
@@ -2718,6 +2719,14 @@ ipc_right_copyout(
                assert(port->ip_mscount == 0);
                assert(port->ip_receiver_name == MACH_PORT_NULL);
 
+               /*
+                * Don't copyout kobjects or kolabels as receive right
+                */
+               if (io_is_kobject(entry->ie_object) ||
+                   io_is_kolabeled(entry->ie_object)) {
+                       panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
+               }
+
                imq_lock(&port->ip_messages);
                dest = port->ip_destination;
 
index d37ce1b37facd81d27adacdf43488d56d88327c6..4a753c7371144b27d52231d70d50cdbd7fda6b2b 100644 (file)
@@ -3129,9 +3129,13 @@ user_data_get_value(
                /* redeem of previous values is the value */
                if (0 < prev_value_count) {
                        elem = (user_data_element_t)prev_values[0];
+
+                       user_data_lock();
                        assert(0 < elem->e_made);
                        elem->e_made++;
-                       *out_value = prev_values[0];
+                       user_data_unlock();
+
+                       *out_value = (mach_voucher_attr_value_handle_t)elem;
                        return KERN_SUCCESS;
                }
 
index 86d9ddf2a51bc664e94e21d38ecbdc8aa8f2a539..2c0a4d854b4b4dd86fc18cf41c422c67931132e4 100644 (file)
@@ -1676,8 +1676,12 @@ mach_port_request_notification(
                }
                /* port is locked and active */
 
-               /* you cannot register for port death notifications on a kobject */
-               if (ip_kotype(port) != IKOT_NONE) {
+               /*
+                * you cannot register for port death notifications on a kobject,
+                * kolabel or special reply port
+                */
+               if (ip_is_kobject(port) || ip_is_kolabeled(port) ||
+                   port->ip_specialreply) {
                        ip_unlock(port);
                        return KERN_INVALID_RIGHT;
                }
index 481b9ef485c75e015ae1611cb4ad4c0ca7a3077c..5f19428395bf96f3c4402f98def07bf4784b4d8d 100644 (file)
@@ -933,6 +933,11 @@ load_context(
        timer_start(&processor->system_state, processor->last_dispatch);
        processor->current_state = &processor->system_state;
 
+#if __AMP__
+       if (processor->processor_set->pset_cluster_type == PSET_AMP_P) {
+               timer_start(&thread->ptime, processor->last_dispatch);
+       }
+#endif
 
        cpu_quiescent_counter_join(processor->last_dispatch);
 
index 0a336a8f0fb5d42546121170bb34e78ec142996a..1ace4c3ec20ffb7e0b8fa26e86ad93e645c78067 100644 (file)
@@ -904,7 +904,7 @@ task_init(void)
        { panic("task_init\n");}
 
 #if defined(HAS_APPLE_PAC)
-       kernel_task->rop_pid = KERNEL_ROP_ID;
+       kernel_task->rop_pid = ml_default_rop_pid();
        kernel_task->jop_pid = ml_default_jop_pid();
        // kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
        // disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
index 63ee8528a250808d96c7b350971d2f37a40f74ab..0c7bbc603640d22ceb122735ec8443b7c199a3de 100644 (file)
@@ -2916,14 +2916,23 @@ thread_set_mach_voucher(
                return KERN_INVALID_ARGUMENT;
        }
 
+       bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
+
+       thread_mtx_lock(thread);
+       /*
+        * Once the thread is started, we will look at `ith_voucher` without
+        * holding any lock.
+        *
+        * Setting the voucher hence can only be done by current_thread() or
+        * before it started. "started" flips under the thread mutex and must be
+        * tested under it too.
+        */
        if (thread != current_thread() && thread->started) {
+               thread_mtx_unlock(thread);
                return KERN_INVALID_ARGUMENT;
        }
 
        ipc_voucher_reference(voucher);
-       bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
-
-       thread_mtx_lock(thread);
        old_voucher = thread->ith_voucher;
        thread->ith_voucher = voucher;
        thread->ith_voucher_name = MACH_PORT_NULL;
index a5a10300aec1573556f40ee2b6f0334cced7362b..3112f5791fd49c464059cca99a2b1083f9682faa 100644 (file)
@@ -95,13 +95,11 @@ extern char     *strcat(char *, const char *) __deprecated;
 __kpi_deprecated_arm64_macos_unavailable
 extern char     *strncat(char *, const char *, size_t);
 
-/* strcmp() is deprecated. Please use strncmp() instead. */
-__kpi_deprecated_arm64_macos_unavailable
 extern int      strcmp(const char *, const char *);
+extern int      strncmp(const char *, const char *, size_t);
 
 extern size_t   strlcpy(char *, const char *, size_t);
 extern size_t   strlcat(char *, const char *, size_t);
-extern int      strncmp(const char *, const char *, size_t);
 
 extern int      strcasecmp(const char *s1, const char *s2);
 extern int      strncasecmp(const char *s1, const char *s2, size_t n);
index d591237482d2977b0924adeca7ca2d58c5d4cdc2..b7d4a4659692a4bfbcb38d290ae25e661b86f106 100644 (file)
@@ -166,6 +166,11 @@ typedef const struct memory_object_pager_ops {
        kern_return_t (*memory_object_data_reclaim)(
                memory_object_t mem_obj,
                boolean_t reclaim_backing_store);
+       boolean_t (*memory_object_backing_object)(
+               memory_object_t mem_obj,
+               memory_object_offset_t mem_obj_offset,
+               vm_object_t *backing_object,
+               vm_object_offset_t *backing_offset);
        const char *memory_object_pager_name;
 } * memory_object_pager_ops_t;
 
@@ -301,6 +306,11 @@ typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t;
 __BEGIN_DECLS
 extern void memory_object_reference(memory_object_t object);
 extern void memory_object_deallocate(memory_object_t object);
+extern boolean_t memory_object_backing_object(
+       memory_object_t mem_obj,
+       memory_object_offset_t offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset);
 
 extern void memory_object_default_reference(memory_object_default_t);
 extern void memory_object_default_deallocate(memory_object_default_t);
index 0063500388e4dbc2384dd91baf44111f71ab708b..d85190a4a3e1b4c1d467255093bfb3a6660cd6b1 100644 (file)
@@ -109,6 +109,7 @@ const struct memory_object_pager_ops vnode_pager_ops = {
        .memory_object_map = vnode_pager_map,
        .memory_object_last_unmap = vnode_pager_last_unmap,
        .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = NULL,
        .memory_object_pager_name = "vnode pager"
 };
 
index 631cc4e37801322127a6c45d9bd6b0b5454bc142..76e537501d969589080b3b88b6ec84a907888ee4 100644 (file)
@@ -78,6 +78,7 @@ const struct memory_object_pager_ops device_pager_ops = {
        .memory_object_map = device_pager_map,
        .memory_object_last_unmap = device_pager_last_unmap,
        .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = NULL,
        .memory_object_pager_name = "device pager"
 };
 
index e4013fd8248c433b7617d10acd65c0bb21c33410..7fa63cc7a9c9ed7fb7fc3e63c9829167f685a881 100644 (file)
@@ -2332,6 +2332,24 @@ memory_object_data_reclaim
                reclaim_backing_store);
 }
 
+boolean_t
+memory_object_backing_object
+(
+       memory_object_t memory_object,
+       memory_object_offset_t offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset)
+{
+       if (memory_object->mo_pager_ops->memory_object_backing_object == NULL) {
+               return FALSE;
+       }
+       return (memory_object->mo_pager_ops->memory_object_backing_object)(
+               memory_object,
+               offset,
+               backing_object,
+               backing_offset);
+}
+
 upl_t
 convert_port_to_upl(
        ipc_port_t      port)
index c8aaa014375f917b944a91b6c9b61e621d9f627c..63fee92d7ce83e0a28484e5f526a338ebf231751 100644 (file)
@@ -911,6 +911,8 @@ extern void pmap_ledger_alloc_init(size_t);
 extern ledger_t pmap_ledger_alloc(void);
 extern void pmap_ledger_free(ledger_t);
 
+extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap);
+
 #if __arm64__
 extern bool pmap_is_exotic(pmap_t pmap);
 #else /* __arm64__ */
index 3174fcdc5578d48054dcab83920daf8691baee71..17b667c2c2b3ef579e1bbe11e8d8ab2896163f1e 100644 (file)
@@ -113,6 +113,11 @@ kern_return_t apple_protect_pager_synchronize(memory_object_t mem_obj,
 kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
     vm_prot_t prot);
 kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
+boolean_t apple_protect_pager_backing_object(
+       memory_object_t mem_obj,
+       memory_object_offset_t mem_obj_offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset);
 
 #define CRYPT_INFO_DEBUG 0
 void crypt_info_reference(struct pager_crypt_info *crypt_info);
@@ -135,6 +140,7 @@ const struct memory_object_pager_ops apple_protect_pager_ops = {
        .memory_object_map = apple_protect_pager_map,
        .memory_object_last_unmap = apple_protect_pager_last_unmap,
        .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = apple_protect_pager_backing_object,
        .memory_object_pager_name = "apple_protect"
 };
 
@@ -992,6 +998,25 @@ apple_protect_pager_last_unmap(
        return KERN_SUCCESS;
 }
 
+boolean_t
+apple_protect_pager_backing_object(
+       memory_object_t mem_obj,
+       memory_object_offset_t offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset)
+{
+       apple_protect_pager_t   pager;
+
+       PAGER_DEBUG(PAGER_ALL,
+           ("apple_protect_pager_backing_object: %p\n", mem_obj));
+
+       pager = apple_protect_pager_lookup(mem_obj);
+
+       *backing_object = pager->backing_object;
+       *backing_offset = pager->backing_offset + offset;
+
+       return TRUE;
+}
 
 /*
  *
index a989ed0c2af43d9b45d671d5f3cee6f7e3cd93fd..2b7dfe4bbc6e98e838c904cac0c1af3a8c9e503b 100644 (file)
@@ -133,6 +133,7 @@ const struct memory_object_pager_ops compressor_pager_ops = {
        .memory_object_map = compressor_memory_object_map,
        .memory_object_last_unmap = compressor_memory_object_last_unmap,
        .memory_object_data_reclaim = compressor_memory_object_data_reclaim,
+       .memory_object_backing_object = NULL,
        .memory_object_pager_name = "compressor pager"
 };
 
index ba214a77f966dfa5375c928fcdd61a2d0dfe30c0..73bfa3a244f40091447777c6788011a2e8a9b2fe 100644 (file)
@@ -130,6 +130,7 @@ const struct memory_object_pager_ops fourk_pager_ops = {
        .memory_object_map = fourk_pager_map,
        .memory_object_last_unmap = fourk_pager_last_unmap,
        .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = NULL,
        .memory_object_pager_name = "fourk_pager"
 };
 
index f3b6248447bb3b7f31ca58f35ffc0cbb5f80497f..436bd9368dc0cf5eb9e490e255def4d800dea1bf 100644 (file)
@@ -3203,8 +3203,8 @@ StartAgain:;
                do {
                        new_entry = vm_map_entry_insert(map,
                            entry, tmp_start, tmp_end,
-                           object, offset, needs_copy,
-                           FALSE, FALSE,
+                           object, offset, vmk_flags,
+                           needs_copy, FALSE, FALSE,
                            cur_protection, max_protection,
                            VM_BEHAVIOR_DEFAULT,
                            (entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ?
@@ -3868,6 +3868,7 @@ vm_map_enter_fourk(
            VM_MAP_PAGE_MASK(map)),
            copy_object,
            0,                         /* offset */
+           vmk_flags,
            FALSE,                         /* needs_copy */
            FALSE,
            FALSE,
@@ -6158,6 +6159,12 @@ vm_map_protect(
                        return KERN_PROTECTION_FAILURE;
                }
 
+               if (current->used_for_jit &&
+                   pmap_has_prot_policy(map->pmap, current->translated_allow_execute, current->protection)) {
+                       vm_map_unlock(map);
+                       return KERN_PROTECTION_FAILURE;
+               }
+
                if ((new_prot & VM_PROT_WRITE) &&
                    (new_prot & VM_PROT_EXECUTE) &&
 #if XNU_TARGET_OS_OSX
@@ -16199,6 +16206,7 @@ vm_map_entry_insert(
        vm_map_offset_t         end,
        vm_object_t             object,
        vm_object_offset_t      offset,
+       vm_map_kernel_flags_t   vmk_flags,
        boolean_t               needs_copy,
        boolean_t               is_shared,
        boolean_t               in_transition,
@@ -16313,8 +16321,7 @@ vm_map_entry_insert(
         *      Insert the new entry into the list.
         */
 
-       vm_map_store_entry_link(map, insp_entry, new_entry,
-           VM_MAP_KERNEL_FLAGS_NONE);
+       vm_map_store_entry_link(map, insp_entry, new_entry, vmk_flags);
        map->size += end - start;
 
        /*
@@ -16804,13 +16811,6 @@ RestartCopy:
                if (!copy) {
                        if (src_entry->used_for_jit == TRUE) {
                                if (same_map) {
-#if __APRR_SUPPORTED__
-                                       /*
-                                        * Disallow re-mapping of any JIT regions on APRR devices.
-                                        */
-                                       result = KERN_PROTECTION_FAILURE;
-                                       break;
-#endif /* __APRR_SUPPORTED__*/
                                } else if (!VM_MAP_POLICY_ALLOW_JIT_SHARING(map)) {
                                        /*
                                         * Cannot allow an entry describing a JIT
@@ -20123,6 +20123,13 @@ vm_map_cs_enforcement(
        return map->cs_enforcement;
 }
 
+kern_return_t
+vm_map_cs_wx_enable(
+       vm_map_t map)
+{
+       return pmap_cs_allow_invalid(vm_map_pmap(map));
+}
+
 void
 vm_map_cs_enforcement_set(
        vm_map_t map,
index c20382971ed67be2c7e375c78c327e8eb9c6723c..cd1364f2b1b97fc160aab6166e937f74efc834e8 100644 (file)
@@ -794,6 +794,7 @@ extern vm_map_entry_t   vm_map_entry_insert(
        vm_map_offset_t         end,
        vm_object_t             object,
        vm_object_offset_t      offset,
+       vm_map_kernel_flags_t   vmk_flags,
        boolean_t               needs_copy,
        boolean_t               is_shared,
        boolean_t               in_transition,
@@ -1267,6 +1268,8 @@ extern void vm_map_cs_enforcement_set(
        vm_map_t                map,
        boolean_t               val);
 
+extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
+
 /* wire down a region */
 
 #ifdef XNU_KERNEL_PRIVATE
index ac74045a0b2e03d272dfbe18031eab9ab4e0d64a..63dd004ea1de41ae93fe6a1c0a68d764cf75cee8 100644 (file)
@@ -7055,7 +7055,7 @@ process_upl_to_enter:
        if (upl->flags & UPL_SHADOWED) {
                offset = 0;
        } else {
-               offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) + upl->map_object->paging_offset;
+               offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
        }
 
        size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
index 4c0025051f9f018c8ee991321d2c13ef6391bdfa..3a1bb8fc94ae342412e4197d5e9f98806f02f246 100644 (file)
@@ -115,6 +115,11 @@ kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj,
 kern_return_t shared_region_pager_map(memory_object_t mem_obj,
     vm_prot_t prot);
 kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
+boolean_t shared_region_pager_backing_object(
+       memory_object_t mem_obj,
+       memory_object_offset_t mem_obj_offset,
+       vm_object_t *backing_object,
+       vm_object_offset_t *backing_offset);
 
 /*
  * Vector of VM operations for this EMM.
@@ -133,6 +138,7 @@ const struct memory_object_pager_ops shared_region_pager_ops = {
        .memory_object_map = shared_region_pager_map,
        .memory_object_last_unmap = shared_region_pager_last_unmap,
        .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = shared_region_pager_backing_object,
        .memory_object_pager_name = "shared_region"
 };
 
@@ -1095,6 +1101,26 @@ shared_region_pager_last_unmap(
        return KERN_SUCCESS;
 }
 
+boolean_t
+shared_region_pager_backing_object(
+       memory_object_t         mem_obj,
+       memory_object_offset_t  offset,
+       vm_object_t             *backing_object,
+       vm_object_offset_t      *backing_offset)
+{
+       shared_region_pager_t   pager;
+
+       PAGER_DEBUG(PAGER_ALL,
+           ("shared_region_pager_backing_object: %p\n", mem_obj));
+
+       pager = shared_region_pager_lookup(mem_obj);
+
+       *backing_object = pager->srp_backing_object;
+       *backing_offset = pager->srp_backing_offset + offset;
+
+       return TRUE;
+}
+
 
 /*
  *
index 39526d9b6d2a68eb25fe315d4161c7c4cb38de07..388d0fb569162029fc4e03f27e106fd0a03c3dfb 100644 (file)
@@ -127,6 +127,7 @@ const struct memory_object_pager_ops swapfile_pager_ops = {
        .memory_object_map = swapfile_pager_map,
        .memory_object_last_unmap = swapfile_pager_last_unmap,
        .memory_object_data_reclaim = NULL,
+       .memory_object_backing_object = NULL,
        .memory_object_pager_name = "swapfile pager"
 };
 
index b73513d8ae1372f4fa23a7bb99f3cafe409b1e47..360289f4757b7d1e071b0c2e35beb56d544edcec 100644 (file)
@@ -2727,7 +2727,17 @@ mach_make_memory_entry_internal(
                        required_protection = protections;
                }
                cur_prot = VM_PROT_ALL;
-               vmk_flags.vmkf_copy_pageable = TRUE;
+               if (target_map->pmap == kernel_pmap) {
+                       /*
+                        * Get "reserved" map entries to avoid deadlocking
+                        * on the kernel map or a kernel submap if we
+                        * run out of VM map entries and need to refill that
+                        * zone.
+                        */
+                       vmk_flags.vmkf_copy_pageable = FALSE;
+               } else {
+                       vmk_flags.vmkf_copy_pageable = TRUE;
+               }
                vmk_flags.vmkf_copy_same_map = FALSE;
                assert(map_size != 0);
                kr = vm_map_copy_extract(target_map,
index 7291e516a371a090ecd6c48ea1868e3ee89649dd..79ad35a0eb81ca09e3e640d0fce40ea24799f9ca 100644 (file)
@@ -3288,6 +3288,13 @@ pmap_lockdown_image4_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len
        // Unsupported on this architecture.
 }
 
+kern_return_t
+pmap_cs_allow_invalid(__unused pmap_t pmap)
+{
+       // Unsupported on this architecture.
+       return KERN_SUCCESS;
+}
+
 void *
 pmap_claim_reserved_ppl_page(void)
 {
index 92b00f7ac91651094251318cd355f61149342ea3..c33854c0e6e2cbb7a3255f25132d0f87df1209ac 100644 (file)
@@ -40,6 +40,8 @@ COMP_SUBDIRS =
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -87,7 +89,17 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 1da2d2eb0f86c4038a379417f38d9f03486d6adc..d4a5a8d33c0dfda24e0f685caa1dd209235ad0c9 100644 (file)
 /* Optional CPU features -- an SoC may #undef these */
 #define ARM_PARAMETERIZED_PMAP               1
 #define __ARM_MIXED_PAGE_SIZE__              1
-#define HAS_APCTL_EL1_USERKEYEN              1 /* Supports use of KernKey in EL0 */
-
-/*
- * APSTS_SUPPORTED: Pointer authentication status registers, MKEYVld flag moved here from APCTL on APPLELIGHTNING (H12)
- */
-#define __APSTS_SUPPORTED__                  1
 #define __ARM_RANGE_TLBI__                   1
 #define __ARM_E2H__                          1
 
index b0085a13eec3cd094d46bb6aae1f85f7d19d1d3b..d1bb4554a19d580ee988ccc7116ded1b719f6431 100644 (file)
@@ -57,9 +57,6 @@
 
 #if defined(CPU_HAS_APPLE_PAC) && defined(__arm64e__)
 #define HAS_APPLE_PAC                        1 /* Has Apple ARMv8.3a pointer authentication */
-#define KERNEL_ROP_ID 0xfeedfacefeedfacf /* placeholder static kernel ROP diversifier */
-#define KERNEL_KERNKEY_ID (KERNEL_ROP_ID + 4)
-#define KERNEL_JOP_ID (KERNEL_KERNKEY_ID + 2)
 #endif
 
 #include <pexpert/arm64/apple_arm64_regs.h>
index 8572786349ff0c72acdd071e5bfa8ece523c7845..27025566708853989fdc7792cf2094a12a2cbb02 100644 (file)
 
 #if defined(HAS_APPLE_PAC)
 
-#ifdef ASSEMBLER
-#define ARM64_REG_APCTL_EL1            S3_4_c15_c0_4
-#define ARM64_REG_APSTS_EL1            S3_6_c15_c12_4
-#else /* ASSEMBLER */
-#define ARM64_REG_APCTL_EL1            "S3_4_c15_c0_4"
-#define ARM64_REG_APSTS_EL1            "S3_6_c15_c12_4"
-#endif /* ASSEMBLER */
 
 #if ASSEMBLER
-#define ARM64_REG_KERNELKEYLO_EL1      S3_4_c15_c1_0
-#define ARM64_REG_KERNELKEYHI_EL1      S3_4_c15_c1_1
-
 #define ARM64_REG_APIAKEYLO_EL1        S3_0_c2_c1_0
 #define ARM64_REG_APIAKEYHI_EL1        S3_0_c2_c1_1
 #define ARM64_REG_APIBKEYLO_EL1        S3_0_c2_c1_2
 #define ARM64_REG_APGAKEYLO_EL1        S3_0_c2_c3_0
 #define ARM64_REG_APGAKEYHI_EL1        S3_0_c2_c3_1
 #else /* ASSEMBLER */
-#define ARM64_REG_APCTL_EL1            "S3_4_c15_c0_4"
-
-#define ARM64_REG_KERNELKEYLO_EL1      "S3_4_c15_c1_0"
-#define ARM64_REG_KERNELKEYHI_EL1      "S3_4_c15_c1_1"
-
 #define ARM64_REG_APIAKEYLO_EL1        "S3_0_c2_c1_0"
 #define ARM64_REG_APIAKEYHI_EL1        "S3_0_c2_c1_1"
 #define ARM64_REG_APIBKEYLO_EL1        "S3_0_c2_c1_2"
index fdca03b8c64f6891fa7a7d537208fbd3c13281a0..72f916f33bc415f6531182c5d6469f4a57a4c883 100644 (file)
@@ -102,6 +102,7 @@ ___asan_version_mismatch_check_apple_902
 ___asan_version_mismatch_check_apple_1000
 ___asan_version_mismatch_check_apple_1001
 ___asan_version_mismatch_check_apple_clang_1100
+___asan_version_mismatch_check_apple_clang_1200
 ___asan_init
 ___asan_memcpy
 ___asan_memmove
index c22e3a9d1d8bd9b939822f51fd764a59fdcb60eb..44c8554d8b0bffb341d37552d3893e2ad1bad1e9 100644 (file)
@@ -58,6 +58,7 @@ SYMROOT_KEXT += $(SYMROOT_KEXT_PATH)/Kasan_kasan
 SYMBOL_SET_BUILD += $(OBJPATH)/Kasan_kasan.symbolset
 endif
 
+ifneq ($(RC_ProjectName),xnu_libraries)
 # Our external dependency on allsymbols is fine because this runs in a later phase (config_install vs. config_all)
 $(OBJPATH)/%.symbolset: $(SOURCE)/%.exports
        @$(LOG_SYMBOLSET) "$*$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))"
@@ -85,7 +86,9 @@ $(SYMROOT_KEXT): $(SYMBOL_SET_BUILD) ALWAYS
        exit $$cmdstatus
 
 do_config_install:: $(SYMROOT_KEXT) $(DSTROOT_KEXT)
-
+else
+# We are building XNU as a static library - no need for the symbol kexts
+endif
 
 # Install helper scripts
 
index 6c765fa402fb0f6d5d52c39465fc3c467d49f48c..f798e2fffa26251b3bd55f0c8ad220b9b5ec21db 100644 (file)
@@ -36,6 +36,8 @@ COMP_SUBDIRS =
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -68,13 +70,23 @@ $(COMPONENT).filelist: $(OBJS) .KASANFLAGS
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
 $(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h: $(SRCROOT)/$(COMPONENT)/kasan-blacklist-dynamic
        @$(LOG_GENERATE) "$(notdir $@)"
        @$(SRCROOT)/$(COMPONENT)/tools/generate_dynamic_blacklist.py "$<" > "$@"
 
 $(SRCROOT)/$(COMPONENT)/kasan_dynamic_blacklist.c: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h
 
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index 39f046220ebdc5f95ae9934042f697c6df06b477..004393d743b92df333b46bae3451e0d34f03760c 100644 (file)
@@ -1466,6 +1466,7 @@ UNUSED_ABI(__asan_version_mismatch_check_apple_902, void);
 UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
 UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
 UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1200, void);
 
 void OS_NORETURN UNSUPPORTED_API(__asan_init_v5, void);
 void OS_NORETURN UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
index e75fe162e166a427e4e4b86d4d55aeaa3a1f6aac..24a7728b66e3f3cfc3a5c0d8765f6aa4b7d8321f 100644 (file)
@@ -45,6 +45,8 @@ COMP_SUBDIRS =
 
 %OBJS
 
+%LIBOBJS
+
 %CFILES
 
 %CXXFILES
@@ -90,7 +92,17 @@ $(COMPONENT).filelist: $(OBJS)
                 $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
        done > $(COMPONENT).filelist
 
+$(COMPONENT).libfilelist: $(LIBOBJS)
+       @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+       $(_v)for obj in ${LIBOBJS}; do  \
+                $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+       done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
 do_all: $(COMPONENT).filelist
+endif
 
 do_build_all:: do_all
 
index e4e0aec7d72b060e5910a7dfa99ee58787ebcc5d..f00ff970a707e8f4ac248df8456c5036df5043c6 100644 (file)
@@ -15,8 +15,10 @@ LLDBMACROS_BOOTSTRAP_DEST:=$(OBJPATH)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSD
 LLDBMACROS_DEST:=$(LLDBMACROS_BOOTSTRAP_DEST)/lldbmacros/
 LLDBMACROS_USERDEBUG_FILES=
 ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
 KERNEL_STATIC_DSYM_LLDBMACROS := $(OBJPATH)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros/
 endif
+endif
 
 LLDBMACROS_USERDEBUG_FILES:= \
        usertaskdebugging/__init__.py \
@@ -93,19 +95,25 @@ $(eval $(call INSTALLPYTHON_RULE_template,$(INSTALL_LLDBMACROS_PYTHON_FILES),$(L
 $(eval $(call INSTALLPYTHON_RULE_template,$(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbpydir,$(DATA_UNIFDEF),$(LLDBMACROS_BOOTSTRAP_DEST)/))
 
 ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
 INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES=$(addprefix $(KERNEL_STATIC_DSYM_LLDBMACROS), $(LLDBMACROS_PYTHON_FILES))
 $(eval $(call INSTALLPYTHON_RULE_template,$(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES),$(LLDBMACROS_SOURCE)%,sdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)))
 $(eval $(call INSTALLPYTHON_RULE_template,$(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbsdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)/../))
 endif
+endif
 
 ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
 STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS := \
        $(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES) \
        $(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME)
 endif
+endif
 
 lldbmacros_install: $(INSTALL_LLDBMACROS_PYTHON_FILES) $(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME) $(STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS)
        $(_v)$(MKDIR) $(LLDBMACROS_DEST)/builtinkexts
 ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
        $(_v)$(MKDIR) $(KERNEL_STATIC_DSYM_LLDBMACROS)/builtinkexts
 endif
+endif