]> git.saurik.com Git - apple/xnu.git/commitdiff
xnu-7195.60.75.tar.gz v7195.60.75
authorApple <opensource@apple.com>
Tue, 2 Feb 2021 00:24:03 +0000 (00:24 +0000)
committerApple <opensource@apple.com>
Tue, 2 Feb 2021 00:24:03 +0000 (00:24 +0000)
183 files changed:
EXTERNAL_HEADERS/corecrypto/cc.h
EXTERNAL_HEADERS/corecrypto/ccdigest.h
EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h
EXTERNAL_HEADERS/corecrypto/cckprng.h
EXTERNAL_HEADERS/corecrypto/ccmode_impl.h
EXTERNAL_HEADERS/corecrypto/ccmode_siv.h
EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h
EXTERNAL_HEADERS/corecrypto/ccrng.h
Makefile
SETUP/kextsymboltool/kextsymboltool.c
bsd/conf/Makefile.arm64
bsd/conf/files
bsd/crypto/entropy/Makefile
bsd/crypto/entropy/diag_entropy_sysctl.c [deleted file]
bsd/crypto/entropy/diag_entropy_sysctl.h [deleted file]
bsd/crypto/entropy/entropy_sysctl.c [new file with mode: 0644]
bsd/crypto/entropy/entropy_sysctl.h [new file with mode: 0644]
bsd/dev/arm/kern_machdep.c
bsd/dev/arm64/sysctl.c
bsd/dev/dtrace/fasttrap.c
bsd/kern/kern_authorization.c
bsd/kern/kern_descrip.c
bsd/kern/kern_exec.c
bsd/kern/kern_memorystatus.c
bsd/kern/kern_memorystatus_freeze.c
bsd/kern/proc_info.c
bsd/kern/ubc_subr.c
bsd/kern/uipc_mbuf.c
bsd/kern/uipc_socket.c
bsd/kern/uipc_usrreq.c
bsd/miscfs/bindfs/bind_subr.c
bsd/miscfs/nullfs/null_subr.c
bsd/net/content_filter.c
bsd/net/ether_inet6_pr_module.c
bsd/net/ether_inet_pr_module.c
bsd/net/if_6lowpan.c
bsd/net/multicast_list.c
bsd/net/ndrv.c
bsd/net/necp.c
bsd/net/necp.h
bsd/netinet/flow_divert.c
bsd/netinet/flow_divert_proto.h
bsd/netinet/in_arp.c
bsd/netinet/ip_output.c
bsd/netinet/mptcp_subr.c
bsd/netinet/tcp_input.c
bsd/netinet/tcp_usrreq.c
bsd/netinet6/ip6_output.c
bsd/netinet6/ipsec.c
bsd/nfs/nfs_bio.c
bsd/sys/file_internal.h
bsd/sys/kern_memorystatus.h
bsd/sys/kern_memorystatus_freeze.h
bsd/sys/mbuf.h
bsd/sys/proc_internal.h
bsd/vm/vm_unix.c
config/MASTER
config/MASTER.arm
config/MASTER.arm64
config/MASTER.arm64.BridgeOS
config/MASTER.arm64.MacOSX
config/MASTER.arm64.bcm2837
config/MASTER.arm64.iPhoneOS
config/MASTER.x86_64
config/MasterVersion
config/Private.arm64.exports
iokit/IOKit/IOHibernatePrivate.h
iokit/IOKit/IONVRAM.h
iokit/Kernel/IOHibernateIO.cpp
iokit/Kernel/IOHibernateInternal.h
iokit/Kernel/IOHibernateRestoreKernel.c
iokit/Kernel/IONVRAM.cpp
iokit/Kernel/IOPMrootDomain.cpp
iokit/Kernel/IOPolledInterface.cpp
iokit/conf/Makefile.arm64
libkern/c++/OSKext.cpp
libkern/libkern/section_keywords.h
makedefs/MakeInc.cmd
makedefs/MakeInc.color [new file with mode: 0644]
makedefs/MakeInc.def
makedefs/MakeInc.kernel
makedefs/MakeInc.rule
makedefs/MakeInc.top
osfmk/arm/arm_init.c
osfmk/arm/caches_asm.s
osfmk/arm/caches_macros.s [deleted file]
osfmk/arm/cpu_data_internal.h
osfmk/arm/cpuid.c
osfmk/arm/cpuid.h
osfmk/arm/genassym.c
osfmk/arm/globals_asm.h
osfmk/arm/locore.s
osfmk/arm/machine_routines.h
osfmk/arm/machine_routines_apple.c [new file with mode: 0644]
osfmk/arm/machine_routines_asm.s
osfmk/arm/misc_protos.h
osfmk/arm/pmap.c
osfmk/arm/pmap.h
osfmk/arm/proc_reg.h
osfmk/arm/start.s
osfmk/arm/thread.h
osfmk/arm/trustcache.c [new file with mode: 0644]
osfmk/arm64/amcc_rorgn.c
osfmk/arm64/arm_vm_init.c
osfmk/arm64/cpu.c
osfmk/arm64/cswitch.s
osfmk/arm64/exception_asm.h
osfmk/arm64/genassym.c
osfmk/arm64/hibernate_arm64.c
osfmk/arm64/hibernate_ppl_hmac.c
osfmk/arm64/hibernate_restore.c
osfmk/arm64/locore.s
osfmk/arm64/machine_routines.c
osfmk/arm64/machine_routines_asm.s
osfmk/arm64/pac_asm.h
osfmk/arm64/pal_hibernate.h
osfmk/arm64/pcb.c
osfmk/arm64/proc_reg.h
osfmk/arm64/sleh.c
osfmk/arm64/start.s
osfmk/arm64/status.c
osfmk/arm64/tlb.h
osfmk/arm64/tunables/tunables.s
osfmk/arm64/tunables/tunables_h13.s [new file with mode: 0644]
osfmk/conf/Makefile.arm64
osfmk/conf/files.arm
osfmk/conf/files.arm64
osfmk/i386/machine_routines.c
osfmk/i386/machine_routines.h
osfmk/i386/trap.c
osfmk/ipc/ipc_importance.c
osfmk/ipc/ipc_kmsg.c
osfmk/ipc/ipc_kmsg.h
osfmk/ipc/ipc_port.c
osfmk/ipc/ipc_port.h
osfmk/kern/backtrace.c
osfmk/kern/host.c
osfmk/kern/processor.c
osfmk/kern/processor.h
osfmk/kern/restartable.c
osfmk/kern/sched_prim.c
osfmk/kern/sched_prim.h
osfmk/kern/startup.c
osfmk/kern/startup.h
osfmk/kern/zalloc.c
osfmk/mach/arm/vm_param.h
osfmk/mach/machine.h
osfmk/prng/entropy.c
osfmk/prng/entropy.h
osfmk/prng/prng_random.c
osfmk/tests/pmap_tests.c
osfmk/vm/pmap.h
osfmk/vm/vm_fault.c
osfmk/vm/vm_map.c
osfmk/vm/vm_map.h
osfmk/vm/vm_map_store.c
osfmk/vm/vm_protos.h
osfmk/vm/vm_resident.c
osfmk/vm/vm_shared_region.c
osfmk/vm/vm_shared_region.h
osfmk/x86_64/kpc_x86.c
pexpert/gen/device_tree.c
pexpert/pexpert/arm64/H11.h [new file with mode: 0644]
pexpert/pexpert/arm64/H13.h [new file with mode: 0644]
pexpert/pexpert/arm64/Makefile
pexpert/pexpert/arm64/apple_arm64_common.h
pexpert/pexpert/arm64/board_config.h
pexpert/pexpert/device_tree.h
tests/fd.c
tests/kpc.c
tests/kperf_backtracing.c
tests/kperf_helpers.h
tests/memorystatus_freeze_test.c
tests/posix_spawn_archpref.c
tests/posix_spawn_archpref_helper.c
tests/processor_info.c
tests/ptrauth_failure.c
tests/shared_cache_reslide_test.c
tests/sr_entitlement.c
tests/sysctl_hw.c
tools/lldbmacros/mbufs.py
tools/lldbmacros/memory.py
tools/lldbmacros/net.py

index 4ea8e63d08b117dd3177f68587c33f156d5c18d6..3a08ba57f389daa2c4f53440f43651d62c12d795 100644 (file)
@@ -167,4 +167,19 @@ int cc_cmp_safe (size_t num, const void * ptr1, const void * ptr2);
 /* Return the minimum value between S and T. */
 #define CC_MIN(S, T) ({__typeof__(S) _cc_min_s = S; __typeof__(T) _cc_min_t = T; _cc_min_s <= _cc_min_t ? _cc_min_s : _cc_min_t;})
 
+/*
+ When building with "-nostdinc" (i.e. iboot), ptrauth.h is in a non-standard location.
+ This requires a new flag to be used when building iboot: -ibuiltininc.
+ This flag doesn't seem present at the moment in clang. For now lets not
+ diversify in iBoot.
+*/
+#if __has_feature(ptrauth_calls) && (CC_KERNEL || CC_USE_L4 || CC_USE_SEPROM)
+#include <ptrauth.h>
+#define CC_SPTR(_sn_, _n_) \
+    __ptrauth(ptrauth_key_process_independent_code, 1, ptrauth_string_discriminator("cc_" #_sn_ #_n_)) _n_
+#else
+#define CC_SPTR(_sn_, _n_) _n_
+#endif
+
 #endif /* _CORECRYPTO_CC_H_ */
index ce84aa8d4f136593cf196944d72d544c3038f292..0edd802613f21d861031dbcc6713b463702440c5 100644 (file)
@@ -46,9 +46,9 @@ struct ccdigest_info {
     size_t oid_size;
     const unsigned char *oid;
     const void *initial_state;
-    void(*compress)(ccdigest_state_t state, size_t nblocks,
+    void(* CC_SPTR(ccdigest_info, compress))(ccdigest_state_t state, size_t nblocks,
                     const void *data);
-    void(*final)(const struct ccdigest_info *di, ccdigest_ctx_t ctx,
+    void(* CC_SPTR(ccdigest_info, final))(const struct ccdigest_info *di, ccdigest_ctx_t ctx,
                  unsigned char *digest);
 };
 
index 263dded51603cb45b9e6d66d4c2cedaa1583e9f1..700828a0ca1f2beb6df3da2401c47d5b1a2bbcc4 100644 (file)
@@ -27,7 +27,7 @@ struct ccdrbg_info {
      @param in         Additional input bytes
      @return 0 if successful
      */
-    int (*init)(const struct ccdrbg_info *info, struct ccdrbg_state *drbg,
+    int (*CC_SPTR(ccdrbg_info, init))(const struct ccdrbg_info *info, struct ccdrbg_state *drbg,
                 size_t entropyLength, const void* entropy,
                 size_t nonceLength, const void* nonce,
                 size_t psLength, const void* ps);
@@ -40,7 +40,7 @@ struct ccdrbg_info {
      @param in         Additional input bytes
      @return 0 if successful
      */
-    int (*reseed)(struct ccdrbg_state *prng,
+    int (*CC_SPTR(ccdrbg_info, reseed))(struct ccdrbg_state *prng,
                   size_t entropylen, const void *entropy,
                   size_t inlen, const void *in);
 
@@ -52,14 +52,14 @@ struct ccdrbg_info {
      @param in      Additional input bytes
      @return 0 if successfull
      */
-    int (*generate)(struct ccdrbg_state *prng,
+    int (*CC_SPTR(ccdrbg_info, generate))(struct ccdrbg_state *prng,
                     size_t outlen, void *out,
                     size_t inlen, const void *in);
 
     /*! Terminate a PRNG state
      @param prng   The PRNG state to terminate
      */
-    void (*done)(struct ccdrbg_state *prng);
+    void (*CC_SPTR(ccdrbg_info, done))(struct ccdrbg_state *prng);
 
     /** private parameters */
     const void *custom;
index 0c97177ffd7a1be78b644cc0680277a7458a2280..79fe22fd3113fd687a5c1b3b013224aed0ef989c 100644 (file)
@@ -220,6 +220,20 @@ struct cckprng_sched_ctx {
     unsigned pool_idx;
 };
 
+// A function pointer to fill an entropy buffer. It should return some
+// estimate of entropy (e.g. the number of timing samples resident in
+// the buffer). The implementation may return zero if no entropy is
+// available. The implementation should return negative in case of an
+// error (e.g. a failure in continuous health tests).
+//
+// The caller should set entropy_nbytes to the maximum size of the
+// input buffer, and the implementation should set it to the number of
+// bytes it has initialized. The third argument is arbitrary state the
+// implementation provides and receives back on each call.
+typedef int32_t (*cckprng_getentropy)(size_t *entropy_nbytes,
+                                      void *entropy,
+                                      void *arg);
+
 struct cckprng_ctx {
     // The master secret of the PRNG
     struct cckprng_key_ctx key;
@@ -250,24 +264,38 @@ struct cckprng_ctx {
 
     // Diagnostics for the PRNG
     struct cckprng_diag diag;
+
+    // A function pointer to get entropy
+    cckprng_getentropy getentropy;
+
+    // An arbitrary piece of state to be provided to the entropy function
+    void *getentropy_arg;
 };
 
 // This collection of function pointers is just a convenience for
 // registering the PRNG with xnu
 struct cckprng_funcs {
-    void (*init)(struct cckprng_ctx *ctx,
-                 unsigned max_ngens,
-                 size_t entropybuf_nbytes,
-                 const void *entropybuf,
-                 const uint32_t *entropybuf_nsamples,
-                 size_t seed_nbytes,
-                 const void *seed,
-                 size_t nonce_nbytes,
-                 const void *nonce);
-    void (*initgen)(struct cckprng_ctx *ctx, unsigned gen_idx);
-    void (*reseed)(struct cckprng_ctx *ctx, size_t nbytes, const void *seed);
-    void (*refresh)(struct cckprng_ctx *ctx);
-    void (*generate)(struct cckprng_ctx *ctx, unsigned gen_idx, size_t nbytes, void *out);
+    void (*CC_SPTR(cckprng_funcs, init))(struct cckprng_ctx *ctx,
+                                                                                unsigned max_ngens,
+                                                                                size_t entropybuf_nbytes,
+                                                                                const void *entropybuf,
+                                                                                const uint32_t *entropybuf_nsamples,
+                                                                                size_t seed_nbytes,
+                                                                                const void *seed,
+                                                                                size_t nonce_nbytes,
+                                                                                const void *nonce);
+    void (*CC_SPTR(cckprng_funcs, initgen))(struct cckprng_ctx *ctx, unsigned gen_idx);
+    void (*CC_SPTR(cckprng_funcs, reseed))(struct cckprng_ctx *ctx, size_t nbytes, const void *seed);
+    void (*CC_SPTR(cckprng_funcs, refresh))(struct cckprng_ctx *ctx);
+    void (*CC_SPTR(cckprng_funcs, generate))(struct cckprng_ctx *ctx, unsigned gen_idx, size_t nbytes, void *out);
+       void (*CC_SPTR(cckprng_funcs, init_with_getentropy))(struct cckprng_ctx *ctx,
+                                                                                                                unsigned max_ngens,
+                                                                                                                size_t seed_nbytes,
+                                                                                                                const void *seed,
+                                                                                                                size_t nonce_nbytes,
+                                                                                                                const void *nonce,
+                                                                                                                cckprng_getentropy getentropy,
+                                                                                                                void *getentropy_arg);
 };
 
 /*
@@ -296,6 +324,30 @@ void cckprng_init(struct cckprng_ctx *ctx,
                   size_t nonce_nbytes,
                   const void *nonce);
 
+/*
+  @function cckprng_init_with_getentropy
+  @abstract Initialize a kernel PRNG context.
+
+  @param ctx Context for this instance
+  @param max_ngens Maximum count of generators that may be allocated
+  @param seed_nbytes Length of the seed in bytes
+  @param seed Pointer to a high-entropy seed
+  @param nonce_nbytes Length of the nonce in bytes
+  @param seed Pointer to a single-use nonce
+  @param getentropy A function pointer to fill an entropy buffer
+  @param getentropy_arg State provided to the entropy function
+
+  @discussion @p max_ngens should be set based on an upper bound of CPUs available on the device. See the @p cckprng_getentropy type definition for discussion on its semantics.
+*/
+void cckprng_init_with_getentropy(struct cckprng_ctx *ctx,
+                                  unsigned max_ngens,
+                                  size_t seed_nbytes,
+                                  const void *seed,
+                                  size_t nonce_nbytes,
+                                  const void *nonce,
+                                  cckprng_getentropy getentropy,
+                                  void *getentropy_arg);
+
 /*
   @function cckprng_initgen
   @abstract Initialize an output generator.
index 849881ed49d74e4b9ab8ec1427e263055e6c2d3d..ff8486b24d2c6b12da7f43922e724d4d85677d9b 100644 (file)
 /* ECB mode. */
 cc_aligned_struct(16) ccecb_ctx;
 
-
 /* Actual symmetric algorithm implementation should provide you one of these. */
 struct ccmode_ecb {
-    size_t size;        /* first argument to ccecb_ctx_decl(). */
+    size_t size; /* first argument to ccecb_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_ecb *ecb, ccecb_ctx *ctx,
-                size_t key_nbytes, const void *key);
-    int (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in,
-               void *out);
-    void (*roundkey)(const ccecb_ctx *ctx, unsigned r, void *key);
+    int (*CC_SPTR(ccmode_ecb, init))(const struct ccmode_ecb *ecb, ccecb_ctx *ctx, size_t key_nbytes, const void *key);
+    int (*CC_SPTR(ccmode_ecb, ecb))(const ccecb_ctx *ctx, size_t nblocks, const void *in, void *out);
+    void (*CC_SPTR(ccmode_ecb, roundkey))(const ccecb_ctx *ctx, unsigned r, void *key);
 };
 
 /*!
@@ -64,13 +61,11 @@ cc_aligned_struct(16) cccbc_ctx;
 cc_aligned_struct(16) cccbc_iv;
 
 struct ccmode_cbc {
-    size_t size;        /* first argument to cccbc_ctx_decl(). */
+    size_t size; /* first argument to cccbc_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_cbc *cbc, cccbc_ctx *ctx,
-                size_t key_len, const void *key);
+    int (*CC_SPTR(ccmode_cbc, init))(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, size_t key_len, const void *key);
     /* cbc encrypt or decrypt nblocks from in to out, iv will be used and updated. */
-    int (*cbc)(const cccbc_ctx *ctx, cccbc_iv *iv,
-               size_t nblocks, const void *in, void *out);
+    int (*CC_SPTR(ccmode_cbc, cbc))(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks, const void *in, void *out);
     const void *custom;
 };
 
@@ -78,11 +73,11 @@ struct ccmode_cbc {
 cc_aligned_struct(16) cccfb_ctx;
 
 struct ccmode_cfb {
-    size_t size;        /* first argument to cccfb_ctx_decl(). */
+    size_t size; /* first argument to cccfb_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_cfb *cfb, cccfb_ctx *ctx,
-                size_t key_len, const void *key, const void *iv);
-    int (*cfb)(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out);
+    int (*CC_SPTR(ccmode_cfb,
+                  init))(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, size_t key_len, const void *key, const void *iv);
+    int (*CC_SPTR(ccmode_cfb, cfb))(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out);
     const void *custom;
 };
 
@@ -90,11 +85,11 @@ struct ccmode_cfb {
 cc_aligned_struct(16) cccfb8_ctx;
 
 struct ccmode_cfb8 {
-    size_t size;        /* first argument to cccfb8_ctx_decl(). */
+    size_t size; /* first argument to cccfb8_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx,
-                size_t key_len, const void *key, const void *iv);
-    int (*cfb8)(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out);
+    int (*CC_SPTR(ccmode_cfb8,
+                  init))(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, size_t key_len, const void *key, const void *iv);
+    int (*CC_SPTR(ccmode_cfb8, cfb8))(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out);
     const void *custom;
 };
 
@@ -102,13 +97,13 @@ struct ccmode_cfb8 {
 cc_aligned_struct(16) ccctr_ctx;
 
 struct ccmode_ctr {
-    size_t size;        /* first argument to ccctr_ctx_decl(). */
-    size_t block_size;  /* for historical reasons, this is set to 1 */
-    size_t ecb_block_size;  /* the actual block size of the underlying cipher */
-    int (*init)(const struct ccmode_ctr *mode, ccctr_ctx *ctx,
-                size_t key_len, const void *key, const void *iv);
-    int (*setctr)(const struct ccmode_ctr *mode, ccctr_ctx *ctx, const void *ctr);
-    int (*ctr)(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out);
+    size_t size;           /* first argument to ccctr_ctx_decl(). */
+    size_t block_size;     /* for historical reasons, this is set to 1 */
+    size_t ecb_block_size; /* the actual block size of the underlying cipher */
+    int (*CC_SPTR(ccmode_ctr,
+                  init))(const struct ccmode_ctr *mode, ccctr_ctx *ctx, size_t key_len, const void *key, const void *iv);
+    int (*CC_SPTR(ccmode_ctr, setctr))(const struct ccmode_ctr *mode, ccctr_ctx *ctx, const void *ctr);
+    int (*CC_SPTR(ccmode_ctr, ctr))(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out);
     const void *custom;
 };
 
@@ -116,11 +111,11 @@ struct ccmode_ctr {
 cc_aligned_struct(16) ccofb_ctx;
 
 struct ccmode_ofb {
-    size_t size;        /* first argument to ccofb_ctx_decl(). */
+    size_t size; /* first argument to ccofb_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_ofb *ofb, ccofb_ctx *ctx,
-                size_t key_len, const void *key, const void *iv);
-    int (*ofb)(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out);
+    int (*CC_SPTR(ccmode_ofb,
+                  init))(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, size_t key_len, const void *key, const void *iv);
+    int (*CC_SPTR(ccmode_ofb, ofb))(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out);
     const void *custom;
 };
 
@@ -129,8 +124,8 @@ cc_aligned_struct(16) ccxts_ctx;
 cc_aligned_struct(16) ccxts_tweak;
 
 struct ccmode_xts {
-    size_t size;        /* first argument to ccxts_ctx_decl(). Size of the ctx data structure */
-    size_t tweak_size;  /* first argument to ccxts_tweak_decl(). Size of the tweak structure, not the expected tweak size */
+    size_t size;       /* first argument to ccxts_ctx_decl(). Size of the ctx data structure */
+    size_t tweak_size; /* first argument to ccxts_tweak_decl(). Size of the tweak structure, not the expected tweak size */
     size_t block_size;
 
     /* Create a xts key from a xts mode object.
@@ -139,72 +134,83 @@ struct ccmode_xts {
      key and tweak_key must differ.
      Returns nonzero on failure.
      */
-    int (*init)(const struct ccmode_xts *xts, ccxts_ctx *ctx,
-                size_t key_nbytes, const void *data_key, const void *tweak_key);
-
-    void (*key_sched)(const struct ccmode_xts *xts, ccxts_ctx *ctx,
-                      size_t key_nbytes, const void *data_key, const void *tweak_key);
+    int (*CC_SPTR(ccmode_xts, init))(const struct ccmode_xts *xts,
+                                     ccxts_ctx *ctx,
+                                     size_t key_nbytes,
+                                     const void *data_key,
+                                     const void *tweak_key);
+
+    void (*CC_SPTR(ccmode_xts, key_sched))(const struct ccmode_xts *xts,
+                                           ccxts_ctx *ctx,
+                                           size_t key_nbytes,
+                                           const void *data_key,
+                                           const void *tweak_key);
 
     /* Set the tweak (sector number), the block within the sector zero. */
-    int (*set_tweak)(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv);
+    int (*CC_SPTR(ccmode_xts, set_tweak))(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv);
 
     /* Encrypt blocks for a sector, clients must call set_tweak before calling
        this function. Return a pointer to the tweak buffer */
-    void *(*xts)(const ccxts_ctx *ctx, ccxts_tweak *tweak,
-                 size_t nblocks, const void *in, void *out);
+    void *(*CC_SPTR(ccmode_xts, xts))(const ccxts_ctx *ctx, ccxts_tweak *tweak, size_t nblocks, const void *in, void *out);
 
     const void *custom;
     const void *custom1;
 };
 
-//7- GCM mode, statful
+// 7- GCM mode, statful
 cc_aligned_struct(16) ccgcm_ctx;
-#define  CCMODE_GCM_DECRYPTOR 78647
-#define  CCMODE_GCM_ENCRYPTOR 4073947
+#define CCMODE_GCM_DECRYPTOR 78647
+#define CCMODE_GCM_ENCRYPTOR 4073947
 
 struct ccmode_gcm {
-    size_t size;        /* first argument to ccgcm_ctx_decl(). */
-    int encdec;        //is it encrypt or decrypt object
+    size_t size; /* first argument to ccgcm_ctx_decl(). */
+    int encdec;  // is it encrypt or decrypt object
     size_t block_size;
-    int (*init)(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx,
-                 size_t key_nbytes, const void *key);
-    int (*set_iv)(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv);
-    int (*gmac)(ccgcm_ctx *ctx, size_t nbytes, const void *in);  // could just be gcm with NULL out
-    int (*gcm)(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out);
-    int (*finalize)(ccgcm_ctx *key, size_t tag_nbytes, void *tag);
-    int (*reset)(ccgcm_ctx *ctx);
+    int (*CC_SPTR(ccmode_gcm, init))(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx, size_t key_nbytes, const void *key);
+    int (*CC_SPTR(ccmode_gcm, set_iv))(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv);
+    int (*CC_SPTR(ccmode_gcm, gmac))(ccgcm_ctx *ctx, size_t nbytes, const void *in); // could just be gcm with NULL out
+    int (*CC_SPTR(ccmode_gcm, gcm))(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out);
+    int (*CC_SPTR(ccmode_gcm, finalize))(ccgcm_ctx *key, size_t tag_nbytes, void *tag);
+    int (*CC_SPTR(ccmode_gcm, reset))(ccgcm_ctx *ctx);
     const void *custom;
 };
 
-//8- CCM mode, stateful
+// 8- CCM mode, stateful
 cc_aligned_struct(16) ccccm_ctx;
 cc_aligned_struct(16) ccccm_nonce;
 
 struct ccmode_ccm {
-    size_t size;        /* first argument to ccccm_ctx_decl(). */
-    size_t nonce_size;  /* first argument to ccccm_nonce_decl(). */
+    size_t size;       /* first argument to ccccm_ctx_decl(). */
+    size_t nonce_size; /* first argument to ccccm_nonce_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_ccm *ccm, ccccm_ctx *ctx,
-                 size_t key_len, const void *key);
-    int (*set_iv)(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nonce_len, const void *nonce,
-                   size_t mac_size, size_t auth_len, size_t data_len);
-    int (*cbcmac)(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nbytes, const void *in);  // could just be ccm with NULL out
-    int (*ccm)(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nbytes, const void *in, void *out);
-    int (*finalize)(ccccm_ctx *key, ccccm_nonce *nonce_ctx, void *mac);
-    int (*reset)(ccccm_ctx *key, ccccm_nonce *nonce_ctx);
+    int (*CC_SPTR(ccmode_ccm, init))(const struct ccmode_ccm *ccm, ccccm_ctx *ctx, size_t key_len, const void *key);
+    int (*CC_SPTR(ccmode_ccm, set_iv))(ccccm_ctx *ctx,
+                                       ccccm_nonce *nonce_ctx,
+                                       size_t nonce_len,
+                                       const void *nonce,
+                                       size_t mac_size,
+                                       size_t auth_len,
+                                       size_t data_len);
+    int (*CC_SPTR(ccmode_ccm, cbcmac))(ccccm_ctx *ctx,
+                                       ccccm_nonce *nonce_ctx,
+                                       size_t nbytes,
+                                       const void *in); // could just be ccm with NULL out
+    int (*CC_SPTR(ccmode_ccm, ccm))(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nbytes, const void *in, void *out);
+    int (*CC_SPTR(ccmode_ccm, finalize))(ccccm_ctx *key, ccccm_nonce *nonce_ctx, void *mac);
+    int (*CC_SPTR(ccmode_ccm, reset))(ccccm_ctx *key, ccccm_nonce *nonce_ctx);
     const void *custom;
 };
 
 /* We need to expose this (currently)to keep CommonCrypto happy. */
 struct _ccmode_ccm_nonce {
-    unsigned char A_i[16];      /* crypto block iv */
-    unsigned char B_i[16];      /* mac block iv */
-    unsigned char MAC[16];      /* crypted mac */
-    unsigned char buf[16];      /* crypt buffer */
+    unsigned char A_i[16]; /* crypto block iv */
+    unsigned char B_i[16]; /* mac block iv */
+    unsigned char MAC[16]; /* crypted mac */
+    unsigned char buf[16]; /* crypt buffer */
 
-    uint32_t mode;         /* mode: IV -> AD -> DATA */
-    uint32_t buflen;       /* length of data in buf */
-    uint32_t b_i_len;      /* length of cbcmac data in B_i */
+    uint32_t mode;    /* mode: IV -> AD -> DATA */
+    uint32_t buflen;  /* length of data in buf */
+    uint32_t b_i_len; /* length of cbcmac data in B_i */
 
     size_t nonce_size;
     size_t mac_size;
@@ -214,12 +220,11 @@ struct _ccmode_ccm_nonce {
 cc_aligned_struct(16) ccomac_ctx;
 
 struct ccmode_omac {
-    size_t size;        /* first argument to ccomac_ctx_decl(). */
+    size_t size; /* first argument to ccomac_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_omac *omac, ccomac_ctx *ctx,
-                size_t tweak_len, size_t key_len, const void *key);
-    int (*omac)(ccomac_ctx *ctx, size_t nblocks,
-                const void *tweak, const void *in, void *out);
+    int (*CC_SPTR(ccmode_omac,
+                  init))(const struct ccmode_omac *omac, ccomac_ctx *ctx, size_t tweak_len, size_t key_len, const void *key);
+    int (*CC_SPTR(ccmode_omac, omac))(ccomac_ctx *ctx, size_t nblocks, const void *tweak, const void *in, void *out);
     const void *custom;
 };
 
index 5d40c1dd13e5615485c489ae7c86893a076dd950..a1df1a480b8f516d5c6b417444cd55666188ed94 100644 (file)
@@ -29,12 +29,12 @@ cc_aligned_struct(16) ccsiv_ctx;
 struct ccmode_siv {
     size_t size;        /* first argument to ccsiv_ctx_decl(). */
     size_t block_size;
-    int (*init)(const struct ccmode_siv *siv, ccsiv_ctx *ctx,
+    int (*CC_SPTR(ccmode_siv, init))(const struct ccmode_siv *siv, ccsiv_ctx *ctx,
                  size_t key_len, const uint8_t *key);
-    int (*set_nonce)(ccsiv_ctx *ctx,  size_t nbytes, const uint8_t *in);  // could just be ccm with NULL out
-    int (*auth)(ccsiv_ctx *ctx,  size_t nbytes, const uint8_t *in);  // could just be ccm with NULL out
-    int (*crypt)(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out);
-    int (*reset)(ccsiv_ctx *ctx);
+    int (*CC_SPTR(ccmode_siv, set_nonce))(ccsiv_ctx *ctx,  size_t nbytes, const uint8_t *in);  // could just be ccm with NULL out
+    int (*CC_SPTR(ccmode_siv, auth))(ccsiv_ctx *ctx,  size_t nbytes, const uint8_t *in);  // could just be ccm with NULL out
+    int (*CC_SPTR(ccmode_siv, crypt))(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out);
+    int (*CC_SPTR(ccmode_siv, reset))(ccsiv_ctx *ctx);
     const struct ccmode_cbc *cbc;
     const struct ccmode_ctr *ctr;
 };
index eba951c50bbcd3e36bb352a8092968db8c2094f7..c828135da63257b32eb983aebf26c15a8329efac 100644 (file)
@@ -34,15 +34,15 @@ struct ccmode_siv_hmac {
     size_t size; /* first argument to ccsiv_hmac_ctx_decl(). */
     size_t block_size;
     
-    int (*init)(const struct ccmode_siv_hmac *sivhmac,
+    int (*CC_SPTR(ccmode_siv_hmac, init))(const struct ccmode_siv_hmac *sivhmac,
                 ccsiv_hmac_ctx *ctx,
                 size_t key_len,
                 const uint8_t *key,
                 const size_t tag_size);
-    int (*set_nonce)(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in);
-    int (*auth)(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in);      
-    int (*crypt)(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out);
-    int (*reset)(ccsiv_hmac_ctx *ctx);
+    int (*CC_SPTR(ccmode_siv_hmac, set_nonce))(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in);
+    int (*CC_SPTR(ccmode_siv_hmac, auth))(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in);
+    int (*CC_SPTR(ccmode_siv_hmac, crypt))(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out);
+    int (*CC_SPTR(ccmode_siv_hmac, reset))(ccsiv_hmac_ctx *ctx);
     const struct ccdigest_info *hmac_digest; // Digest to be used in HMAC;
     const struct ccmode_ctr *ctr;
 };
index 4582ddab6855c5dd53d663328e9e94822d38af79..d38115a8b2416af1b07c0f86ecf4238d63914bbf 100644 (file)
@@ -15,7 +15,7 @@
 #include <corecrypto/cc.h>
 
 #define CCRNG_STATE_COMMON \
-    int (*generate)(struct ccrng_state *rng, size_t outlen, void *out);
+    int (*CC_SPTR(ccrng_state, generate))(struct ccrng_state *rng, size_t outlen, void *out);
 
 /*!
  @type      struct ccrng_state
index 30e496301ed2c0c9429fc280b3fa45fa7da465c2..a3afec9d09dd54b2dccc3b2d85d1e964cb0eecec 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 1999-2016 Apple Inc. All rights reserved.
+# Copyright (C) 1999-2020 Apple Inc. All rights reserved.
 #
 ifndef VERSDIR
 export VERSDIR := $(shell /bin/pwd)
@@ -186,7 +186,6 @@ TOP_TARGETS = \
        install install_desktop install_embedded \
        install_release_embedded install_development_embedded \
        install_kernels \
-       installopensource \
        cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \
        help
 
@@ -317,6 +316,7 @@ xnu_tests_driverkit:
        $(MAKE) -C $(SRCROOT)/tests/driverkit $(if $(filter -j,$(MAKEFLAGS)),,$(MAKEJOBS)) \
                SRCROOT=$(SRCROOT)/tests/driverkit
 
+
 #
 # The "analyze" target defined below invokes Clang Static Analyzer
 # with a predefined set of checks and options for the project.
index 46f644b552d3f85206b1a9cb31ca19957ffc21b5..2954a391f074b74055ad9b2e37b75f57e14e2e0a 100644 (file)
@@ -476,6 +476,7 @@ lookup_arch(const char *archstring)
                { "armv7s", 12 /* CPU_TYPE_ARM */, 11 /* CPU_SUBTYPE_ARM_V7S */, NX_LittleEndian, NULL },
                { "armv7k", 12 /* CPU_TYPE_ARM */, 12 /* CPU_SUBTYPE_ARM_V7K */, NX_LittleEndian, NULL },
                { "arm64", 0x0100000c /* CPU_TYPE_ARM64 */, 0 /* CPU_SUBTYPE_ARM64_ALL */, NX_LittleEndian, NULL },
+               { "arm64e", 0x0100000c /* CPU_TYPE_ARM64 */, 2 /* CPU_SUBTYPE_ARM64_E */, NX_LittleEndian, NULL },
        };
        unsigned long i;
 
index 333857edb9e68c2bc62c808445239c6f81d13bff..c7d4f4a2e40373ed4530d2f2e296b055351a0c83 100644 (file)
@@ -5,6 +5,7 @@
 # Inline assembly doesn't interact well with LTO
 fbt_arm.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG)
 
+kern_csr.o_CFLAGS_ADD += -I$(SRCROOT)/osfmk
 
 ######################################################################
 #END    Machine dependent Makefile fragment for arm
index 258176a7ed42c2a4e93d744d096032f7af67a3ff..6a99242b6c379eb047ee644384792441aba9112e 100644 (file)
@@ -371,7 +371,7 @@ bsd/netkey/keydb.c                  optional ipsec
 
 bsd/net/multi_layer_pkt_log.c          optional inet inet ipsec ipsec_esp
 
-bsd/crypto/entropy/diag_entropy_sysctl.c standard
+bsd/crypto/entropy/entropy_sysctl.c standard
 
 #bsd/netpm/pm_aTT.c        optional pm
 #bsd/netpm/pm_ams.c        optional pm
index a3ad2c884c67e36d49c535f357597978f3816673..2d1197ce747ef90d49c48bec522cacbacd37ebed 100644 (file)
@@ -7,7 +7,7 @@ include $(MakeInc_cmd)
 include $(MakeInc_def)
 
 DATAFILES = \
-       diag_entropy_sysctl.h
+       entropy_sysctl.h
 
 INSTALL_MI_LIST =
 
diff --git a/bsd/crypto/entropy/diag_entropy_sysctl.c b/bsd/crypto/entropy/diag_entropy_sysctl.c
deleted file mode 100644 (file)
index af53298..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2019 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-#include <sys/sysctl.h>
-#include <crypto/entropy/diag_entropy_sysctl.h>
-#include <prng/entropy.h>
-
-extern entropy_data_t EntropyData;
-
-static int
-sysctl_entropy_collect(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
-{
-       if (!req->oldptr || req->oldlen > EntropyData.buffer_size) {
-               return EINVAL;
-       }
-       return SYSCTL_OUT(req, EntropyData.buffer, req->oldlen);
-}
-
-SYSCTL_NODE(_kern, OID_AUTO, entropy, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, 0, NULL);
-// Get current size of entropy buffer in bytes
-SYSCTL_UINT(_kern_entropy, OID_AUTO, entropy_buffer_size, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, &EntropyData.buffer_size, 0, NULL);
-// Collect contents from entropy buffer
-SYSCTL_PROC(_kern_entropy, OID_AUTO, entropy_collect, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, NULL, 0, sysctl_entropy_collect, "-", NULL);
-
-void
-register_entropy_sysctl(void)
-{
-       sysctl_register_oid(&sysctl__kern_entropy);
-       sysctl_register_oid(&sysctl__kern_entropy_entropy_buffer_size);
-       sysctl_register_oid(&sysctl__kern_entropy_entropy_collect);
-}
diff --git a/bsd/crypto/entropy/diag_entropy_sysctl.h b/bsd/crypto/entropy/diag_entropy_sysctl.h
deleted file mode 100644 (file)
index c05650e..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2019 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-#ifndef _SYS_CRYPTO_ENTROPY_DIAG_ENTROPY_SYSCTL_H_
-#define _SYS_CRYPTO_ENTROPY_DIAG_ENTROPY_SYSCTL_H_
-
-void register_entropy_sysctl(void);
-
-#endif
diff --git a/bsd/crypto/entropy/entropy_sysctl.c b/bsd/crypto/entropy/entropy_sysctl.c
new file mode 100644 (file)
index 0000000..39502f7
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2019 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <sys/sysctl.h>
+#include <kern/zalloc.h>
+#include <kern/percpu.h>
+#include <crypto/entropy/entropy_sysctl.h>
+#include <prng/entropy.h>
+#include <libkern/section_keywords.h>
+
+SYSCTL_NODE(_kern, OID_AUTO, entropy, CTLFLAG_RD, 0, NULL);
+SYSCTL_NODE(_kern_entropy, OID_AUTO, health, CTLFLAG_RD, 0, NULL);
+
+SYSCTL_INT(_kern_entropy_health, OID_AUTO, startup_done, CTLFLAG_RD, &entropy_health_startup_done, 0, NULL);
+
+SYSCTL_NODE(_kern_entropy_health, OID_AUTO, repetition_count_test, CTLFLAG_RD, 0, NULL);
+SYSCTL_UINT(_kern_entropy_health_repetition_count_test, OID_AUTO, reset_count, CTLFLAG_RD, &entropy_health_rct_stats.reset_count, 0, NULL);
+SYSCTL_UINT(_kern_entropy_health_repetition_count_test, OID_AUTO, failure_count, CTLFLAG_RD, &entropy_health_rct_stats.failure_count, 0, NULL);
+SYSCTL_UINT(_kern_entropy_health_repetition_count_test, OID_AUTO, max_observation_count, CTLFLAG_RD, &entropy_health_rct_stats.max_observation_count, 0, NULL);
+
+SYSCTL_NODE(_kern_entropy_health, OID_AUTO, adaptive_proportion_test, CTLFLAG_RD, 0, NULL);
+SYSCTL_UINT(_kern_entropy_health_adaptive_proportion_test, OID_AUTO, reset_count, CTLFLAG_RD, &entropy_health_apt_stats.reset_count, 0, NULL);
+SYSCTL_UINT(_kern_entropy_health_adaptive_proportion_test, OID_AUTO, failure_count, CTLFLAG_RD, &entropy_health_apt_stats.failure_count, 0, NULL);
+SYSCTL_UINT(_kern_entropy_health_adaptive_proportion_test, OID_AUTO, max_observation_count, CTLFLAG_RD, &entropy_health_apt_stats.max_observation_count, 0, NULL);
+
+static int
+sysctl_entropy_collect(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
+{
+       if (!req->oldptr || req->oldlen > entropy_analysis_buffer_size) {
+               return EINVAL;
+       }
+
+       return SYSCTL_OUT(req, entropy_analysis_buffer, req->oldlen);
+}
+
+// Get current size of entropy buffer in bytes
+SYSCTL_UINT(_kern_entropy, OID_AUTO, entropy_buffer_size, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, &entropy_analysis_buffer_size, 0, NULL);
+// Collect contents from entropy buffer
+SYSCTL_PROC(_kern_entropy, OID_AUTO, entropy_collect, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, NULL, 0, sysctl_entropy_collect, "-", NULL);
+
+void
+entropy_analysis_register_sysctls(void)
+{
+       sysctl_register_oid(&sysctl__kern_entropy_entropy_buffer_size);
+       sysctl_register_oid(&sysctl__kern_entropy_entropy_collect);
+}
diff --git a/bsd/crypto/entropy/entropy_sysctl.h b/bsd/crypto/entropy/entropy_sysctl.h
new file mode 100644 (file)
index 0000000..4e957fb
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2019 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef _SYS_CRYPTO_ENTROPY_ENTROPYSYSCTL_H_
+#define _SYS_CRYPTO_ENTROPY_ENTROPYSYSCTL_H_
+
+// This function is used only for test purposes. We collect a large
+// number of entropy samples during boot and analyze them offline.
+//
+// See entropy.c to understand the initialization of this module via
+// boot arg and the collection of the samples.
+//
+// See entropy_sysctl.c to understand the semantics of the sysctl
+// that exposes the samples for analysis.
+void entropy_analysis_register_sysctls(void);
+
+#endif
index 9caed58b187234a13e741eda4f069c7f808da2c8..02c9e723c22b27c574335122f98891cfb1c75947 100644 (file)
@@ -37,6 +37,31 @@ cpu_subtype32()
        }
 }
 
+static int
+grade_arm64e_binary(cpu_subtype_t execfeatures)
+{
+#if XNU_TARGET_OS_IOS
+       /*
+        * iOS 13 toolchains produced unversioned arm64e slices which are not
+        * ABI compatible with this release.
+        */
+       if ((execfeatures & CPU_SUBTYPE_PTRAUTH_ABI) == 0) {
+#if DEBUG || DEVELOPMENT
+               printf("%s: arm64e prerelease ABI cannot be used with this kernel\n", __func__);
+#endif /* DEBUG || DEVELOPMENT */
+               return 0;
+       }
+#endif /* XNU_TARGET_OS_IOS */
+
+       /* The current ABI version is preferred over arm64 */
+       if (CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(execfeatures) ==
+           CPU_SUBTYPE_ARM64_PTR_AUTH_CURRENT_VERSION) {
+               return 12;
+       }
+
+       /* Future ABIs are allowed, but exec_mach_imgact will treat it like an arm64 slice */
+       return 11;
+}
 #endif /* __arm64__ */
 
 /**********************************************************************
@@ -70,6 +95,15 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, cpu_subtype_t execf
                        }
                        break;
 
+               case CPU_SUBTYPE_ARM64E:
+                       switch (execsubtype) {
+                       case CPU_SUBTYPE_ARM64E:
+                               return grade_arm64e_binary(execfeatures);
+                       case CPU_SUBTYPE_ARM64_V8:
+                               return 10;
+                       case CPU_SUBTYPE_ARM64_ALL:
+                               return 9;
+                       }
                } /* switch (hostsubtype) */
                break;
 
index ef46d1af728c138d6409d98fdbddaeaf97c35279..70c3bf468433334425a4fd0b1177352c137c7228 100644 (file)
@@ -10,7 +10,9 @@
 #include <mach/host_info.h>
 #include <mach/mach_host.h>
 #include <arm/cpuid.h>
+#include <kern/zalloc.h>
 #include <libkern/libkern.h>
+#include <pexpert/device_tree.h>
 
 #if HYPERVISOR
 #include <kern/hv_support.h>
@@ -159,12 +161,49 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count,
     sizeof(integer_t),
     arm_host_info, "I", "Number of enabled threads per package");
 
+static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL;
+static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0;
+
+/*
+ * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(),
+ * so we load the brand string (if available) in a startup handler.
+ */
+__startup_func
+static void
+sysctl_load_brand_string(void)
+{
+       DTEntry node;
+       void const *value = NULL;
+       unsigned int size = 0;
+
+       if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) {
+               return;
+       }
+
+       if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) {
+               return;
+       }
+
+       if (size == 0) {
+               return;
+       }
+
+       brand_string = zalloc_permanent(size, ZALIGN_NONE);
+       if (brand_string == NULL) {
+               return;
+       }
+
+       memcpy(brand_string, value, size);
+       brand_string_len = size;
+}
+STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string);
+
 /*
  * machdep.cpu.brand_string
  *
  * x86: derived from CPUID data.
- * ARM: cons something up from the CPUID register. Could include cpufamily
- *     here and map it to a "marketing" name, but there's no obvious need;
+ * ARM: Grab the product string from the device tree, if it exists.
+ *      Otherwise, cons something up from the CPUID register.
  *      the value is already exported via the commpage. So keep it simple.
  */
 static int
@@ -174,6 +213,10 @@ make_brand_string SYSCTL_HANDLER_ARGS
        __unused void *unused_arg1 = arg1;
        __unused int unused_arg2 = arg2;
 
+       if (brand_string != NULL) {
+               return SYSCTL_OUT(req, brand_string, brand_string_len);
+       }
+
        const char *impl;
 
        switch (cpuid_info()->arm_info.arm_implementor) {
@@ -258,54 +301,6 @@ SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1);
 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1);
 // ARM64: AArch64 Instruction Set Attribute Register 1
 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1);
-/*
- * ARM64: AArch64 Guarded Execution Mode GENTER Vector
- *
- * Workaround for pre-H13, since register cannot be read unless in guarded
- * mode, thus expose software convention that GXF_ENTRY_EL1 is always set
- * to the address of the gxf_ppl_entry_handler.
- */
-#endif /* DEVELOPMENT || DEBUG */
 
-#if HYPERVISOR
-SYSCTL_NODE(_kern, OID_AUTO, hv, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Hypervisor info");
-
-SYSCTL_INT(_kern_hv, OID_AUTO, supported,
-    CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
-    &hv_support_available, 0, "");
-
-extern unsigned int arm64_num_vmids;
-
-SYSCTL_UINT(_kern_hv, OID_AUTO, max_address_spaces,
-    CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
-    &arm64_num_vmids, 0, "");
-
-extern uint64_t pmap_ipa_size(uint64_t granule);
-
-static int
-sysctl_ipa_size_16k SYSCTL_HANDLER_ARGS
-{
-#pragma unused(arg1, arg2, oidp)
-       uint64_t return_value = pmap_ipa_size(16384);
-       return SYSCTL_OUT(req, &return_value, sizeof(return_value));
-}
-
-SYSCTL_PROC(_kern_hv, OID_AUTO, ipa_size_16k,
-    CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
-    0, 0, sysctl_ipa_size_16k, "P",
-    "Maximum size allowed for 16K-page guest IPA spaces");
-
-static int
-sysctl_ipa_size_4k SYSCTL_HANDLER_ARGS
-{
-#pragma unused(arg1, arg2, oidp)
-       uint64_t return_value = pmap_ipa_size(4096);
-       return SYSCTL_OUT(req, &return_value, sizeof(return_value));
-}
-
-SYSCTL_PROC(_kern_hv, OID_AUTO, ipa_size_4k,
-    CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED,
-    0, 0, sysctl_ipa_size_4k, "P",
-    "Maximum size allowed for 4K-page guest IPA spaces");
+#endif /* DEVELOPMENT || DEBUG */
 
-#endif // HYPERVISOR
index b519a7a3b961a13794e6e39cfb53782671f753b6..e95eb2e1fe259150f33bf06544937f9ff5f7b2e6 100644 (file)
@@ -2415,6 +2415,48 @@ fasttrap_validatestr(char const* str, size_t maxlen) {
        return utf8_validatestr((unsigned const char*) str, len);
 }
 
+/*
+ * Checks that provided credentials are allowed to debug target process.
+ */
+static int
+fasttrap_check_cred_priv(cred_t *cr, proc_t *p)
+{
+       int err = 0;
+
+       /* Only root can use DTrace. */
+       if (!kauth_cred_issuser(cr)) {
+               err = EPERM;
+               goto out;
+       }
+
+       /* Process is marked as no attach. */
+       if (ISSET(p->p_lflag, P_LNOATTACH)) {
+               err = EBUSY;
+               goto out;
+       }
+
+#if CONFIG_MACF
+       /* Check with MAC framework when enabled. */
+       struct proc_ident cur_ident = proc_ident(current_proc());
+       struct proc_ident p_ident = proc_ident(p);
+
+       /* Do not hold ref to proc here to avoid deadlock. */
+       proc_rele(p);
+       err = mac_proc_check_debug(&cur_ident, cr, &p_ident);
+
+       if (proc_find_ident(&p_ident) == PROC_NULL) {
+               err = ESRCH;
+               goto out_no_proc;
+       }
+#endif /* CONFIG_MACF */
+
+out:
+       proc_rele(p);
+
+out_no_proc:
+       return err;
+}
+
 /*ARGSUSED*/
 static int
 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
@@ -2486,15 +2528,11 @@ fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *
                                ret = ESRCH;
                                goto err;
                        }
-                       // proc_lock(p);
-                       // FIXME! How is this done on OS X?
-                       // if ((ret = priv_proc_cred_perm(cr, p, NULL,
-                       //     VREAD | VWRITE)) != 0) {
-                       //      mutex_exit(&p->p_lock);
-                       //      return (ret);
-                       // }
-                       // proc_unlock(p);
-                       proc_rele(p);
+
+                       ret = fasttrap_check_cred_priv(cr, p);
+                       if (ret != 0) {
+                               goto err;
+                       }
                }
 
                ret = fasttrap_add_probe(probe);
@@ -2508,7 +2546,7 @@ err:
                fasttrap_instr_query_t instr;
                fasttrap_tracepoint_t *tp;
                uint_t index;
-               // int ret;
+               int ret;
 
                if (copyin(arg, &instr, sizeof (instr)) != 0)
                        return (EFAULT);
@@ -2526,15 +2564,11 @@ err:
                                        proc_rele(p);
                                return (ESRCH);
                        }
-                       //proc_lock(p);
-                       // FIXME! How is this done on OS X?
-                       // if ((ret = priv_proc_cred_perm(cr, p, NULL,
-                       //     VREAD)) != 0) {
-                       //      mutex_exit(&p->p_lock);
-                       //      return (ret);
-                       // }
-                       // proc_unlock(p);
-                       proc_rele(p);
+
+                       ret = fasttrap_check_cred_priv(cr, p);
+                       if (ret != 0) {
+                               return (ret);
+                       }
                }
 
                index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
index e1000e3d45301efdd98279a787d3cee1f5072799..0181ee93de8d2c4a120183cacb21675835dad8e4 100644 (file)
@@ -1217,7 +1217,11 @@ kauth_acl_alloc(int count)
 void
 kauth_acl_free(kauth_acl_t aclp)
 {
-       FREE(aclp, M_KAUTH);
+       /*
+        * It's possible this may have have been allocated in a kext using
+        * MALLOC. Using KHEAP_ANY will allow us to free it here.
+        */
+       kheap_free_addr(KHEAP_ANY, aclp);
 }
 
 
index efca619e89bc73860731442e61e1ccaed48e1e7a..cb5705e8e9826f371714633ff0c43be692d32315 100644 (file)
@@ -227,11 +227,22 @@ fg_free(struct fileglob *fg)
 
 OS_ALWAYS_INLINE
 void
-fg_ref(struct fileglob *fg)
+fg_ref(proc_t p, struct fileglob *fg)
 {
+#if DEBUG || DEVELOPMENT
+       proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
+#else
+       (void)p;
+#endif
        os_ref_retain_raw(&fg->fg_count, &f_refgrp);
 }
 
+void
+fg_drop_live(struct fileglob *fg)
+{
+       os_ref_release_live_raw(&fg->fg_count, &f_refgrp);
+}
+
 int
 fg_drop(proc_t p, struct fileglob *fg)
 {
@@ -3256,7 +3267,7 @@ finishdup(proc_t p,
                return ENOMEM;
        }
 
-       fg_ref(ofp->fp_glob);
+       fg_ref(p, ofp->fp_glob);
        nfp->fp_glob = ofp->fp_glob;
 
 #if DIAGNOSTIC
@@ -4919,7 +4930,7 @@ fdcopy(proc_t p, vnode_t uth_cdir)
                                        fp->fp_flags |=
                                            (ofp->fp_flags & ~FP_TYPEMASK);
                                        fp->fp_glob = ofp->fp_glob;
-                                       fg_ref(fp->fp_glob);
+                                       fg_ref(p, fp->fp_glob);
                                        *fpp = fp;
                                }
                        } else {
@@ -5284,18 +5295,19 @@ sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap,
                goto out_unlock;
        }
 
+       /* Dropped when port is deallocated */
+       fg_ref(p, fg);
+
        proc_fdunlock(p);
 
        /* Allocate and initialize a port */
        fileport = fileport_alloc(fg);
        if (fileport == IPC_PORT_NULL) {
+               fg_drop_live(fg);
                err = EAGAIN;
                goto out;
        }
 
-       /* Dropped when port is deallocated */
-       fg_ref(fg);
-
        /* Add an entry.  Deallocates port on failure. */
        name = ipc_port_copyout_send(fileport, get_task_ipcspace(p->task));
        if (!MACH_PORT_VALID(name)) {
@@ -5382,7 +5394,7 @@ fileport_makefd(proc_t p, ipc_port_t port, int uf_flags, int *retval)
        }
 
        fp->fp_glob = fg;
-       fg_ref(fg);
+       fg_ref(p, fg);
 
        procfdtbl_releasefd(p, fd, fp);
        proc_fdunlock(p);
@@ -5525,7 +5537,7 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error)
                if (fp->fp_glob) {
                        fg_free(fp->fp_glob);
                }
-               fg_ref(wfp->fp_glob);
+               fg_ref(p, wfp->fp_glob);
                fp->fp_glob = wfp->fp_glob;
 
                fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd] |
index db94eab6a39ed9ee900f133b53aa557554743509..0385bf1b48cbb46ebc556efc2cbee4a5d6cbd521 100644 (file)
@@ -3939,17 +3939,8 @@ bad:
                 * received by the child in a partially constructed state.
                 */
                proc_signalend(p, 0);
-
-               /* flag the 'fork' has occurred */
-               proc_knote(p->p_pptr, NOTE_FORK | p->p_pid);
        }
 
-       /* flag exec has occurred, notify only if it has not failed due to FP Key error */
-       if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
-               proc_knote(p, NOTE_EXEC);
-       }
-
-
        if (error == 0) {
                /*
                 * We need to initialize the bank context behind the protection of
@@ -4077,6 +4068,15 @@ bad:
                }
        }
 
+       if (spawn_no_exec) {
+               /* flag the 'fork' has occurred */
+               proc_knote(p->p_pptr, NOTE_FORK | p->p_pid);
+       }
+
+       /* flag exec has occurred, notify only if it has not failed due to FP Key error */
+       if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) {
+               proc_knote(p, NOTE_EXEC);
+       }
 
        if (imgp != NULL) {
                if (imgp->ip_vp) {
index 3d8198474c756038c6d9f7dc331ee95609baa0d0..69e5f6d69ab161b390aeb7818cfaf18a3059db75 100644 (file)
@@ -2045,6 +2045,9 @@ memorystatus_add(proc_t p, boolean_t locked)
        if (isSysProc(p)) {
                p->p_memstat_state |= P_MEMSTAT_FREEZE_DISABLED;
        }
+#if CONFIG_FREEZE
+       memorystatus_freeze_init_proc(p);
+#endif
 
        bucket = &memstat_bucket[p->p_memstat_effectivepriority];
 
@@ -4903,8 +4906,10 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause,
                        entry->jse_idle_delta = p->p_memstat_idle_delta;
 #if CONFIG_FREEZE
                        entry->jse_thaw_count = p->p_memstat_thaw_count;
+                       entry->jse_freeze_skip_reason = p->p_memstat_freeze_skip_reason;
 #else /* CONFIG_FREEZE */
                        entry->jse_thaw_count = 0;
+                       entry->jse_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
 #endif /* CONFIG_FREEZE */
 
                        /*
@@ -5179,9 +5184,11 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna
        entry->jse_idle_delta = p->p_memstat_idle_delta; /* Most recent timespan spent in idle-band */
 
 #if CONFIG_FREEZE
+       entry->jse_freeze_skip_reason = p->p_memstat_freeze_skip_reason;
        entry->jse_thaw_count = p->p_memstat_thaw_count;
 #else /* CONFIG_FREEZE */
        entry->jse_thaw_count = 0;
+       entry->jse_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
 #endif /* CONFIG_FREEZE */
 
        proc_coalitionids(p, cids);
@@ -7884,8 +7891,10 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args *
     #pragma unused(jetsam_reason)
 #endif
 
-       /* We don't need entitlements if we're setting/ querying the freeze preference for a process. Skip the check below. */
-       if (args->command == MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE || args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE) {
+       /* We don't need entitlements if we're setting / querying the freeze preference or frozen status for a process. */
+       if (args->command == MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE ||
+           args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE ||
+           args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN) {
                skip_auth_check = TRUE;
        }
 
@@ -8023,6 +8032,9 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args *
        case MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE:
                error = memorystatus_get_process_is_freezable(args->pid, ret);
                break;
+       case MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN:
+               error = memorystatus_get_process_is_frozen(args->pid, ret);
+               break;
 
        case MEMORYSTATUS_CMD_FREEZER_CONTROL:
                error = memorystatus_freezer_control(args->flags, args->buffer, args->buffersize, ret);
index 08c86e4f6d18062fcf2a6b6edad08fe84e0da252..1dfa926e9a02ce3a42cb5dff12e46af38639ad8f 100644 (file)
@@ -106,8 +106,8 @@ unsigned int memorystatus_freeze_pages_min = 0;
 unsigned int memorystatus_freeze_pages_max = 0;
 unsigned int memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT;
 unsigned int memorystatus_freeze_daily_mb_max = FREEZE_DAILY_MB_MAX_DEFAULT;
-uint64_t     memorystatus_freeze_budget_pages_remaining = 0; //remaining # of pages that can be frozen to disk
-boolean_t memorystatus_freeze_degradation = FALSE; //protected by the freezer mutex. Signals we are in a degraded freeze mode.
+uint64_t     memorystatus_freeze_budget_pages_remaining = 0; /* Remaining # of pages that can be frozen to disk */
+boolean_t memorystatus_freeze_degradation = FALSE; /* Protected by the freezer mutex. Signals we are in a degraded freeze mode. */
 
 unsigned int memorystatus_max_frozen_demotions_daily = 0;
 unsigned int memorystatus_thaw_count_demotion_threshold = 0;
@@ -215,11 +215,9 @@ extern int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task);
 
 static void memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed);
 static void memorystatus_demote_frozen_processes(boolean_t force_one);
-/*
- * Converts the freezer_error_code into a string and updates freezer error counts.
- */
-static void memorystatus_freezer_stringify_error(const int freezer_error_code, char* buffer, size_t len);
 
+static void memorystatus_freeze_handle_error(proc_t p, const int freezer_error_code, bool was_refreeze, pid_t pid, const coalition_t coalition, const char* log_prefix);
+static void memorystatus_freeze_out_of_slots(void);
 static uint64_t memorystatus_freezer_thread_next_run_ts = 0;
 
 /* Sysctls needed for aggd stats */
@@ -275,6 +273,8 @@ SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_shared_pages_skipped, CTLFLAG_
 SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_bytes_refrozen, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_bytes_refrozen, "");
 SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_refreeze_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_refreeze_count, "");
 
+static_assert(_kMemorystatusFreezeSkipReasonMax <= UINT8_MAX);
+
 
 /*
  * Calculates the hit rate for the freezer.
@@ -449,11 +449,7 @@ again:
                }
 
                if (error) {
-                       char reason[FREEZER_ERROR_STRING_LENGTH];
-                       memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason));
-
-                       printf("sysctl_freeze: task_freeze failed: %s\n", reason);
-
+                       memorystatus_freeze_handle_error(p, freezer_error_code, state & P_MEMSTAT_FROZEN, pid, coal, "sysctl_freeze");
                        if (error == KERN_NO_SPACE) {
                                /* Make it easy to distinguish between failures due to low compressor/ swap space and other failures. */
                                error = ENOSPC;
@@ -464,7 +460,11 @@ again:
                        proc_list_lock();
                        if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) {
                                p->p_memstat_state |= P_MEMSTAT_FROZEN;
+                               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
                                memorystatus_frozen_count++;
+                               if (memorystatus_frozen_count == memorystatus_frozen_processes_max) {
+                                       memorystatus_freeze_out_of_slots();
+                               }
                        } else {
                                // This was a re-freeze
                                if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
@@ -1219,6 +1219,9 @@ memorystatus_is_process_eligible_for_freeze(proc_t p)
        state = p->p_memstat_state;
 
        if (state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) {
+               if (state & P_MEMSTAT_FREEZE_DISABLED) {
+                       p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonDisabled;
+               }
                goto out;
        }
 
@@ -1308,6 +1311,7 @@ memorystatus_is_process_eligible_for_freeze(proc_t p)
                if (first_consideration) {
                        memorystatus_freezer_stats.mfs_error_below_min_pages_count++;
                }
+               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonBelowMinPages;
                goto out;
        }
 
@@ -1320,6 +1324,7 @@ memorystatus_is_process_eligible_for_freeze(proc_t p)
                if (first_consideration) {
                        memorystatus_freezer_stats.mfs_error_other_count++;
                }
+               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOther;
                goto out;
        }
 
@@ -1339,19 +1344,27 @@ memorystatus_is_process_eligible_for_freeze(proc_t p)
                        if (first_consideration) {
                                memorystatus_freezer_stats.mfs_error_low_probability_of_use_count++;
                        }
+                       p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonLowProbOfUse;
                        goto out;
                }
        }
 
        should_freeze = TRUE;
 out:
-       if (should_freeze && !first_consideration && !(state & P_MEMSTAT_FROZEN)) {
+       if (should_freeze && !(state & P_MEMSTAT_FROZEN)) {
                /*
-                * We're freezing this for the first time and we previously considered it ineligible.
-                * Bump the considered count so that we track this as 1 failure
-                * and 1 success.
+                * Reset the skip reason. If it's killed before we manage to actually freeze it
+                * we failed to consider it early enough.
                 */
-               memorystatus_freezer_stats.mfs_process_considered_count++;
+               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
+               if (!first_consideration) {
+                       /*
+                        * We're freezing this for the first time and we previously considered it ineligible.
+                        * Bump the considered count so that we track this as 1 failure
+                        * and 1 success.
+                        */
+                       memorystatus_freezer_stats.mfs_process_considered_count++;
+               }
        }
        return should_freeze;
 }
@@ -1460,7 +1473,11 @@ memorystatus_freeze_process_sync(proc_t p)
 
                        if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) {
                                p->p_memstat_state |= P_MEMSTAT_FROZEN;
+                               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
                                memorystatus_frozen_count++;
+                               if (memorystatus_frozen_count == memorystatus_frozen_processes_max) {
+                                       memorystatus_freeze_out_of_slots();
+                               }
                        } else {
                                // This was a re-freeze
                                if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
@@ -1511,11 +1528,7 @@ memorystatus_freeze_process_sync(proc_t p)
                                 */
                        }
                } else {
-                       char reason[FREEZER_ERROR_STRING_LENGTH];
-                       memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason));
-
-                       os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...skipped (%s)",
-                           aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason);
+                       memorystatus_freeze_handle_error(p, freezer_error_code, p->p_memstat_state & P_MEMSTAT_FROZEN, aPid, NULL, "memorystatus_freeze_process_sync");
                        p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE;
                }
 
@@ -1541,7 +1554,7 @@ memorystatus_freeze_top_process(void)
        proc_t p = PROC_NULL, next_p = PROC_NULL;
        unsigned int i = 0;
        unsigned int band = JETSAM_PRIORITY_IDLE;
-       boolean_t refreeze_processes = FALSE;
+       bool refreeze_processes = false;
        task_t curr_task = NULL;
        coalition_t coal = COALITION_NULL;
        pid_t pid_list[MAX_XPC_SERVICE_PIDS];
@@ -1558,7 +1571,7 @@ memorystatus_freeze_top_process(void)
                 * try to refreeze any processes we might have thawed
                 * in the past and push out their compressed state out.
                 */
-               refreeze_processes = TRUE;
+               refreeze_processes = true;
                band = (unsigned int) memorystatus_freeze_jetsam_band;
        }
 
@@ -1570,6 +1583,7 @@ freeze_process:
                uint32_t purgeable, wired, clean, dirty, shared;
                uint64_t max_pages = 0;
                int    freezer_error_code = 0;
+               bool was_refreeze = false;
 
                p = next_p;
 
@@ -1703,13 +1717,18 @@ freeze_process:
 
                        if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) {
                                p->p_memstat_state |= P_MEMSTAT_FROZEN;
+                               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
                                memorystatus_frozen_count++;
+                               if (memorystatus_frozen_count == memorystatus_frozen_processes_max) {
+                                       memorystatus_freeze_out_of_slots();
+                               }
                        } else {
                                // This was a re-freeze
                                if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
                                        memorystatus_freezer_stats.mfs_bytes_refrozen += dirty * PAGE_SIZE;
                                        memorystatus_freezer_stats.mfs_refreeze_count++;
                                }
+                               was_refreeze = true;
                        }
 
                        p->p_memstat_frozen_count++;
@@ -1738,7 +1757,7 @@ freeze_process:
                        }
                        memorystatus_freeze_update_throttle(&memorystatus_freeze_budget_pages_remaining);
                        os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: %sfreezing (%s) pid %d [%s] done, memorystatus_freeze_budget_pages_remaining %llu %sfroze %u pages\n",
-                           refreeze_processes? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_freeze_budget_pages_remaining, refreeze_processes? "Re" : "", dirty);
+                           was_refreeze ? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_freeze_budget_pages_remaining, was_refreeze ? "Re" : "", dirty);
 
                        proc_list_lock();
 
@@ -1825,7 +1844,7 @@ freeze_process:
                        p->p_memstat_state &= ~P_MEMSTAT_LOCKED;
                        wakeup(&p->p_memstat_state);
 
-                       if (refreeze_processes == TRUE) {
+                       if (refreeze_processes) {
                                if ((freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) ||
                                    (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO)) {
                                        /*
@@ -1844,12 +1863,7 @@ freeze_process:
                        } else {
                                p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE;
                        }
-
-                       char reason[FREEZER_ERROR_STRING_LENGTH];
-                       memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason));
-
-                       os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: %sfreezing (%s) pid %d [%s]...skipped (%s)\n",
-                           refreeze_processes? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason);
+                       memorystatus_freeze_handle_error(p, p->p_memstat_state & P_MEMSTAT_FROZEN, freezer_error_code, aPid, coal, "memorystatus_freeze_top_process");
 
                        proc_rele_locked(p);
 
@@ -1861,7 +1875,7 @@ freeze_process:
 
        if ((ret == -1) &&
            (memorystatus_refreeze_eligible_count >= MIN_THAW_REFREEZE_THRESHOLD) &&
-           (refreeze_processes == FALSE)) {
+           (!refreeze_processes)) {
                /*
                 * We failed to freeze a process from the IDLE
                 * band AND we have some thawed  processes
@@ -1873,7 +1887,7 @@ freeze_process:
 
                band = (unsigned int) memorystatus_freeze_jetsam_band;
 
-               refreeze_processes = TRUE;
+               refreeze_processes = true;
 
                goto freeze_process;
        }
@@ -2121,27 +2135,86 @@ memorystatus_freeze_calculate_new_budget(
        return (uint32_t) MIN(new_budget, UINT32_MAX);
 }
 
+/*
+ * Mark all non frozen, freezer-eligible processes as skipped for the given reason.
+ * Used when we hit some system freeze limit and know that we won't be considering remaining processes.
+ * If you're using this for a new reason, make sure to add it to memorystatus_freeze_init_proc so that
+ * it gets set for new processes.
+ * NB: These processes will retain this skip reason until they are reconsidered by memorystatus_is_process_eligible_for_freeze.
+ */
 static void
-memorystatus_freezer_stringify_error(
+memorystatus_freeze_mark_eligible_processes_with_skip_reason(memorystatus_freeze_skip_reason_t reason, bool locked)
+{
+       LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED);
+       LCK_MTX_ASSERT(proc_list_mlock, locked ? LCK_MTX_ASSERT_OWNED : LCK_MTX_ASSERT_NOTOWNED);
+       unsigned int band = JETSAM_PRIORITY_IDLE;
+       proc_t p;
+
+       if (!locked) {
+               proc_list_lock();
+       }
+       p = memorystatus_get_first_proc_locked(&band, FALSE);
+       while (p) {
+               assert(p->p_memstat_effectivepriority == (int32_t) band);
+               if (!(p->p_memstat_state & P_MEMSTAT_FROZEN) && memorystatus_is_process_eligible_for_freeze(p)) {
+                       assert(p->p_memstat_freeze_skip_reason == kMemorystatusFreezeSkipReasonNone);
+                       p->p_memstat_freeze_skip_reason = (uint8_t) reason;
+               }
+               p = memorystatus_get_next_proc_locked(&band, p, FALSE);
+       }
+       if (!locked) {
+               proc_list_unlock();
+       }
+}
+
+/*
+ * Called after we fail to freeze a process.
+ * Logs the failure, marks the process with the failure reason, and updates freezer stats.
+ */
+static void
+memorystatus_freeze_handle_error(
+       proc_t p,
        const int freezer_error_code,
-       char* buffer,
-       size_t len)
+       bool was_refreeze,
+       pid_t pid,
+       const coalition_t coalition,
+       const char* log_prefix)
 {
-       if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) {
+       const char *reason;
+       memorystatus_freeze_skip_reason_t skip_reason;
+
+       switch (freezer_error_code) {
+       case FREEZER_ERROR_EXCESS_SHARED_MEMORY:
                memorystatus_freezer_stats.mfs_error_excess_shared_memory_count++;
-               strlcpy(buffer, "too much shared memory", len);
-       } else if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) {
+               reason = "too much shared memory";
+               skip_reason = kMemorystatusFreezeSkipReasonExcessSharedMemory;
+               break;
+       case FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO:
                memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count++;
-               strlcpy(buffer, "low private-shared pages ratio", len);
-       } else if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) {
+               reason = "private-shared pages ratio";
+               skip_reason = kMemorystatusFreezeSkipReasonLowPrivateSharedRatio;
+               break;
+       case FREEZER_ERROR_NO_COMPRESSOR_SPACE:
                memorystatus_freezer_stats.mfs_error_no_compressor_space_count++;
-               strlcpy(buffer, "no compressor space", len);
-       } else if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) {
+               reason = "no compressor space";
+               skip_reason = kMemorystatusFreezeSkipReasonNoCompressorSpace;
+               break;
+       case FREEZER_ERROR_NO_SWAP_SPACE:
                memorystatus_freezer_stats.mfs_error_no_swap_space_count++;
-               strlcpy(buffer, "no swap space", len);
-       } else {
-               strlcpy(buffer, "unknown error", len);
+               reason = "no swap space";
+               skip_reason = kMemorystatusFreezeSkipReasonNoSwapSpace;
+               break;
+       default:
+               reason = "unknown error";
+               skip_reason = kMemorystatusFreezeSkipReasonOther;
        }
+
+       p->p_memstat_freeze_skip_reason = (uint8_t) skip_reason;
+
+       os_log_with_startup_serial(OS_LOG_DEFAULT, "%s: %sfreezing (%s) pid %d [%s]...skipped (%s)\n",
+           log_prefix, was_refreeze ? "re" : "",
+           (coalition == NULL ? "general" : "coalition-driven"), pid,
+           ((p && *p->p_name) ? p->p_name : "unknown"), reason);
 }
 
 /*
@@ -2152,6 +2225,7 @@ static void
 memorystatus_freeze_start_normal_throttle_interval(uint32_t new_budget, mach_timespec_t start_ts)
 {
        LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED);
+       LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
 
        normal_throttle_window->max_pageouts = new_budget;
        normal_throttle_window->ts.tv_sec = normal_throttle_window->mins * 60;
@@ -2190,6 +2264,54 @@ SYSCTL_PROC(_vm, OID_AUTO, memorystatus_freeze_calculate_new_budget, CTLTYPE_INT
 
 #endif /* DEVELOPMENT || DEBUG */
 
+/*
+ * Called when we first run out of budget in an interval.
+ * Marks idle processes as not frozen due to lack of budget.
+ * NB: It might be worth having a CA event here.
+ */
+static void
+memorystatus_freeze_out_of_budget(const struct throttle_interval_t *interval)
+{
+       LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED);
+       LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED);
+
+       mach_timespec_t time_left = {0, 0};
+       mach_timespec_t now_ts;
+       clock_sec_t sec;
+       clock_nsec_t nsec;
+
+       time_left.tv_sec = interval->ts.tv_sec;
+       time_left.tv_nsec = 0;
+       clock_get_system_nanotime(&sec, &nsec);
+       now_ts.tv_sec = (unsigned int)(MIN(sec, UINT32_MAX));
+       now_ts.tv_nsec = nsec;
+
+       SUB_MACH_TIMESPEC(&time_left, &now_ts);
+       os_log(OS_LOG_DEFAULT,
+           "memorystatus_freeze: Out of NAND write budget with %u minutes left in the current freezer interval. %u procs are frozen.\n",
+           time_left.tv_sec / 60, memorystatus_frozen_count);
+
+       memorystatus_freeze_mark_eligible_processes_with_skip_reason(kMemorystatusFreezeSkipReasonOutOfBudget, false);
+}
+
+/*
+ * Called when we cross over the threshold of maximum frozen processes allowed.
+ * Marks remaining idle processes as not frozen due to lack of slots.
+ */
+static void
+memorystatus_freeze_out_of_slots(void)
+{
+       LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED);
+       LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED);
+       assert(memorystatus_frozen_count == memorystatus_frozen_processes_max);
+
+       os_log(OS_LOG_DEFAULT,
+           "memorystatus_freeze: Out of slots in the freezer. %u procs are frozen.\n",
+           memorystatus_frozen_count);
+
+       memorystatus_freeze_mark_eligible_processes_with_skip_reason(kMemorystatusFreezeSkipReasonOutOfSlots, true);
+}
+
 /*
  * This function will do 4 things:
  *
@@ -2220,6 +2342,7 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed)
 
        unsigned int freeze_daily_pageouts_max = 0;
        uint32_t budget_rollover = 0;
+       bool started_with_budget = (*budget_pages_allowed > 0);
 
 #if DEVELOPMENT || DEBUG
        if (!memorystatus_freeze_throttle_enabled) {
@@ -2278,6 +2401,9 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed)
                if (memorystatus_freeze_degradation == FALSE) {
                        if (interval->pageouts >= interval->max_pageouts) {
                                *budget_pages_allowed = 0;
+                               if (started_with_budget) {
+                                       memorystatus_freeze_out_of_budget(interval);
+                               }
                        } else {
                                int budget_left = interval->max_pageouts - interval->pageouts;
                                int budget_threshold = (freeze_daily_pageouts_max * FREEZE_DEGRADATION_BUDGET_THRESHOLD) / 100;
@@ -2351,10 +2477,8 @@ memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused)
        }
 
        /*
-        * We use memorystatus_apps_idle_delay_time because if/when we adopt aging for applications,
-        * it'll tie neatly into running the freezer once we age an application.
-        *
-        * Till then, it serves as a good interval that can be tuned via a sysctl too.
+        * Give applications currently in the aging band a chance to age out into the idle band before
+        * running the freezer again.
         */
        memorystatus_freezer_thread_next_run_ts = mach_absolute_time() + memorystatus_apps_idle_delay_time;
 
@@ -2443,6 +2567,31 @@ memorystatus_get_process_is_freezable(pid_t pid, int *is_freezable)
        return 0;
 }
 
+errno_t
+memorystatus_get_process_is_frozen(pid_t pid, int *is_frozen)
+{
+       proc_t p = PROC_NULL;
+
+       if (pid == 0) {
+               return EINVAL;
+       }
+
+       /*
+        * Only allow this on the current proc for now.
+        * We can check for privileges and allow targeting another process in the future.
+        */
+       p = current_proc();
+       if (p->p_pid != pid) {
+               return EPERM;
+       }
+
+       proc_list_lock();
+       *is_frozen = (p->p_memstat_state & P_MEMSTAT_FROZEN) != 0;
+       proc_list_unlock();
+
+       return 0;
+}
+
 int
 memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable)
 {
@@ -2495,6 +2644,23 @@ memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable)
        return 0;
 }
 
+/*
+ * Called when process is created before it is added to a memorystatus bucket.
+ */
+void
+memorystatus_freeze_init_proc(proc_t p)
+{
+       /* NB: Process is not on the memorystatus lists yet so it's safe to modify the skip reason without the freezer mutex. */
+       if (memorystatus_freeze_budget_pages_remaining == 0) {
+               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOutOfBudget;
+       } else if ((memorystatus_frozen_count >= memorystatus_frozen_processes_max)) {
+               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOutOfSlots;
+       } else {
+               p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone;
+       }
+}
+
+
 static int
 sysctl_memorystatus_do_fastwake_warmup_all  SYSCTL_HANDLER_ARGS
 {
index bcdc1c18d353bd3fed9e4f59e89b671fd8888731..b34ca8058d901b5d96fe23d52d0a9c8136108bf2 100644 (file)
@@ -2716,7 +2716,7 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffers
        break;
 
        case PROC_PIDFDPSEMINFO: {
-               if ((error = fp_get_ftype(p, fd, DTYPE_PSXSHM, EBADF, &fp)) != 0) {
+               if ((error = fp_get_ftype(p, fd, DTYPE_PSXSEM, EBADF, &fp)) != 0) {
                        goto out1;
                }
                error = pid_pseminfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval);
index 7968e04910f9a150e44555f827bb2fe44c5bf00a..8304c009d34845c7d3e0f10f93319a6245df89f8 100644 (file)
@@ -2957,11 +2957,6 @@ ubc_cs_blob_deallocate(
        vm_offset_t     blob_addr,
        vm_size_t       blob_size)
 {
-#if PMAP_CS
-       if (blob_size > pmap_cs_blob_limit) {
-               kmem_free(kernel_map, blob_addr, blob_size);
-       } else
-#endif
        {
                kfree(blob_addr, blob_size);
        }
@@ -3560,40 +3555,6 @@ ubc_cs_blob_add(
                blob->csb_entitlements_blob = new_entitlements;
                blob->csb_reconstituted = true;
        }
-#elif PMAP_CS
-       /*
-        * When pmap_cs is enabled, there's an expectation that large blobs are
-        * relocated to their own page.  Above, this happens under
-        * ubc_cs_reconstitute_code_signature() but that discards parts of the
-        * signatures that are necessary on some platforms (eg, requirements).
-        * So in this case, just copy everything.
-        */
-       if (pmap_cs && (blob->csb_mem_size > pmap_cs_blob_limit)) {
-               vm_offset_t cd_offset, ent_offset;
-               vm_size_t new_mem_size = round_page(blob->csb_mem_size);
-               vm_address_t new_mem_kaddr = 0;
-
-               kr = kmem_alloc_kobject(kernel_map, &new_mem_kaddr, new_mem_size, VM_KERN_MEMORY_SECURITY);
-               if (kr != KERN_SUCCESS) {
-                       printf("failed to allocate %lu bytes to relocate blob: %d\n", new_mem_size, kr);
-                       error = ENOMEM;
-                       goto out;
-               }
-
-               cd_offset = (vm_address_t) blob->csb_cd - blob->csb_mem_kaddr;
-               ent_offset = (vm_address_t) blob->csb_entitlements_blob - blob->csb_mem_kaddr;
-
-               memcpy((void *) new_mem_kaddr, (const void *) blob->csb_mem_kaddr, blob->csb_mem_size);
-               ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size);
-               blob->csb_cd = (const CS_CodeDirectory *) (new_mem_kaddr + cd_offset);
-               /* Only update the entitlements blob pointer if it is non-NULL.  If it is NULL, then
-                * the blob has no entitlements and ent_offset is garbage. */
-               if (blob->csb_entitlements_blob != NULL) {
-                       blob->csb_entitlements_blob = (const CS_GenericBlob *) (new_mem_kaddr + ent_offset);
-               }
-               blob->csb_mem_kaddr = new_mem_kaddr;
-               blob->csb_mem_size = new_mem_size;
-       }
 #endif
 
 
@@ -4906,66 +4867,3 @@ ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp)
 }
 #endif /* CHECK_CS_VALIDATION_BITMAP */
 
-#if PMAP_CS
-kern_return_t
-cs_associate_blob_with_mapping(
-       void                    *pmap,
-       vm_map_offset_t         start,
-       vm_map_size_t           size,
-       vm_object_offset_t      offset,
-       void                    *blobs_p)
-{
-       off_t                   blob_start_offset, blob_end_offset;
-       kern_return_t           kr;
-       struct cs_blob          *blobs, *blob;
-       vm_offset_t             kaddr;
-       struct pmap_cs_code_directory *cd_entry = NULL;
-
-       if (!pmap_cs) {
-               return KERN_NOT_SUPPORTED;
-       }
-
-       blobs = (struct cs_blob *)blobs_p;
-
-       for (blob = blobs;
-           blob != NULL;
-           blob = blob->csb_next) {
-               blob_start_offset = (blob->csb_base_offset +
-                   blob->csb_start_offset);
-               blob_end_offset = (blob->csb_base_offset +
-                   blob->csb_end_offset);
-               if ((off_t) offset < blob_start_offset ||
-                   (off_t) offset >= blob_end_offset ||
-                   (off_t) (offset + size) <= blob_start_offset ||
-                   (off_t) (offset + size) > blob_end_offset) {
-                       continue;
-               }
-               kaddr = blob->csb_mem_kaddr;
-               if (kaddr == 0) {
-                       /* blob data has been released */
-                       continue;
-               }
-               cd_entry = blob->csb_pmap_cs_entry;
-               if (cd_entry == NULL) {
-                       continue;
-               }
-
-               break;
-       }
-
-       if (cd_entry != NULL) {
-               kr = pmap_cs_associate(pmap,
-                   cd_entry,
-                   start,
-                   size,
-                   offset - blob_start_offset);
-       } else {
-               kr = KERN_CODESIGN_ERROR;
-       }
-#if 00
-       printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr);
-       kr = KERN_SUCCESS;
-#endif
-       return kr;
-}
-#endif /* PMAP_CS */
index 1a0e04ac973ab617d42661aff753516e8df28037..ab5dbd3246aeedc7f4d196b4d956ec8722338def 100644 (file)
@@ -719,6 +719,7 @@ static unsigned int mb_drain_maxint = 60;
 #else /* XNU_TARGET_OS_OSX */
 static unsigned int mb_drain_maxint = 0;
 #endif /* XNU_TARGET_OS_OSX */
+static unsigned int mb_memory_pressure_percentage = 80;
 
 uintptr_t mb_obscure_extfree __attribute__((visibility("hidden")));
 uintptr_t mb_obscure_extref __attribute__((visibility("hidden")));
@@ -1431,6 +1432,52 @@ mbuf_table_init(void)
        mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL);
 }
 
+int
+mbuf_get_class(struct mbuf *m)
+{
+       if (m->m_flags & M_EXT) {
+               uint32_t composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE);
+               m_ext_free_func_t m_free_func = m_get_ext_free(m);
+
+               if (m_free_func == NULL) {
+                       if (composite) {
+                               return MC_MBUF_CL;
+                       } else {
+                               return MC_CL;
+                       }
+               } else if (m_free_func == m_bigfree) {
+                       if (composite) {
+                               return MC_MBUF_BIGCL;
+                       } else {
+                               return MC_BIGCL;
+                       }
+               } else if (m_free_func == m_16kfree) {
+                       if (composite) {
+                               return MC_MBUF_16KCL;
+                       } else {
+                               return MC_16KCL;
+                       }
+               }
+       }
+
+       return MC_MBUF;
+}
+
+bool
+mbuf_class_under_pressure(struct mbuf *m)
+{
+       int mclass = mbuf_get_class(m); // TODO - how can we get the class easily???
+
+       if (m_total(mclass) >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) {
+               os_log(OS_LOG_DEFAULT,
+                   "%s memory-pressure on mbuf due to class %u, total %u max %u",
+                   __func__, mclass, m_total(mclass), m_maxlimit(mclass));
+               return true;
+       }
+
+       return false;
+}
+
 #if defined(__LP64__)
 typedef struct ncl_tbl {
        uint64_t nt_maxmem;     /* memory (sane) size */
@@ -3770,8 +3817,8 @@ m_free(struct mbuf *m)
        }
 
        if (m->m_flags & M_EXT) {
-               u_int16_t refcnt;
-               u_int32_t composite;
+               uint16_t refcnt;
+               uint32_t composite;
                m_ext_free_func_t m_free_func;
 
                if (MBUF_IS_PAIRED(m) && m_free_paired(m)) {
@@ -4168,6 +4215,12 @@ m_copy_pftag(struct mbuf *to, struct mbuf *from)
 #endif /* PF_ECN */
 }
 
+void
+m_copy_necptag(struct mbuf *to, struct mbuf *from)
+{
+       memcpy(m_necptag(to), m_necptag(from), sizeof(struct necp_mtag_));
+}
+
 void
 m_classifier_init(struct mbuf *m, uint32_t pktf_mask)
 {
@@ -8811,3 +8864,6 @@ SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force,
 SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint,
     CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0,
     "Minimum time interval between garbage collection");
+SYSCTL_INT(_kern_ipc, OID_AUTO, mb_memory_pressure_percentage,
+    CTLFLAG_RW | CTLFLAG_LOCKED, &mb_memory_pressure_percentage, 0,
+    "Percentage of when we trigger memory-pressure for an mbuf-class");
index 607af6d3c5aba890df75bbf09b4240c933bc4e38..e1a5241a2ddb1f81f65e81a85bf531ef84a529ac 100644 (file)
@@ -2497,9 +2497,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
                                if (error) {
                                        if (error == EJUSTRETURN) {
                                                error = 0;
-                                               clen = 0;
-                                               control = NULL;
-                                               top = NULL;
+                                               goto packet_consumed;
                                        }
                                        goto out_locked;
                                }
@@ -2523,6 +2521,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
                        error = (*so->so_proto->pr_usrreqs->pru_send)
                            (so, sendflags, top, addr, control, p);
 
+packet_consumed:
                        if (dontroute) {
                                so->so_options &= ~SO_DONTROUTE;
                        }
index 91eb43b4a17d1acb33a6d8b3c7bd380a8fc54d19..2bffce23111083168a3293c6200dfbd1f4e798ae 100644 (file)
@@ -2031,7 +2031,7 @@ fg_insertuipc_mark(struct fileglob * fg)
                msleep(&fg->fg_lflags, &fg->fg_lock, 0, "fg_insertuipc", NULL);
        }
 
-       os_ref_retain_locked_raw(&fg->fg_count, &f_refgrp);
+       os_ref_retain_raw(&fg->fg_count, &f_refgrp);
        fg->fg_msgcount++;
        if (fg->fg_msgcount == 1) {
                fg->fg_lflags |= FG_INSMSGQ;
index 2c82cbcc8c74219cf57bae05f0004f65a83419fc..58b67705ef87c7edb9460831e290a9ca9c5c3a0d 100644 (file)
@@ -196,6 +196,7 @@ bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
        struct bind_node_hashhead * hd;
        struct bind_node * a;
        struct vnode * vp = NULL;
+       uint32_t vp_vid = 0;
        int error = ENOENT;
 
        /*
@@ -214,6 +215,8 @@ bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
                                /*lowervp has reved */
                                error = EIO;
                                vp = NULL;
+                       } else {
+                               vp_vid = a->bind_myvid;
                        }
                        break;
                }
@@ -221,7 +224,7 @@ bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
        lck_mtx_unlock(&bind_hashmtx);
 
        if (vp != NULL) {
-               error = vnode_getwithvid(vp, a->bind_myvid);
+               error = vnode_getwithvid(vp, vp_vid);
                if (error == 0) {
                        *vpp = vp;
                }
@@ -243,6 +246,7 @@ bind_hashins(struct mount * mp, struct bind_node * xp, struct vnode ** vpp)
        struct bind_node_hashhead * hd;
        struct bind_node * oxp;
        struct vnode * ovp = NULL;
+       uint32_t oxp_vid = 0;
        int error = 0;
 
        hd = BIND_NHASH(xp->bind_lowervp);
@@ -259,6 +263,8 @@ bind_hashins(struct mount * mp, struct bind_node * xp, struct vnode ** vpp)
                                 *  don't add it.*/
                                error = EIO;
                                ovp = NULL;
+                       } else {
+                               oxp_vid = oxp->bind_myvid;
                        }
                        goto end;
                }
@@ -271,7 +277,7 @@ end:
        lck_mtx_unlock(&bind_hashmtx);
        if (ovp != NULL) {
                /* if we found something in the hash map then grab an iocount */
-               error = vnode_getwithvid(ovp, oxp->bind_myvid);
+               error = vnode_getwithvid(ovp, oxp_vid);
                if (error == 0) {
                        *vpp = ovp;
                }
index 746f09e6a9b3f022f0da7a31d46a9832be4189d4..caffb546ae3eee8e0bdc121468e343b53773064b 100644 (file)
@@ -195,6 +195,7 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
        struct null_node_hashhead * hd = NULL;
        struct null_node * a = NULL;
        struct vnode * vp = NULL;
+       uint32_t vp_vid = 0;
        int error = ENOENT;
 
        /*
@@ -214,6 +215,8 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
                                /*lowervp has reved */
                                error = EIO;
                                vp = NULL;
+                       } else {
+                               vp_vid = a->null_myvid;
                        }
                        // In the case of a succesful look-up we should consider moving the object to the top of the head
                        break;
@@ -221,7 +224,7 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp)
        }
        lck_mtx_unlock(&null_hashmtx);
        if (vp != NULL) {
-               error = vnode_getwithvid(vp, a->null_myvid);
+               error = vnode_getwithvid(vp, vp_vid);
                if (error == 0) {
                        *vpp = vp;
                }
@@ -239,6 +242,7 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp)
        struct null_node_hashhead * hd = NULL;
        struct null_node * oxp = NULL;
        struct vnode * ovp = NULL;
+       uint32_t oxp_vid = 0;
        int error = 0;
 
        hd = NULL_NHASH(xp->null_lowervp);
@@ -259,6 +263,8 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp)
                                 *  don't add it.*/
                                error = EIO;
                                ovp = NULL;
+                       } else {
+                               oxp_vid = oxp->null_myvid;
                        }
                        goto end;
                }
@@ -271,7 +277,7 @@ end:
        lck_mtx_unlock(&null_hashmtx);
        if (ovp != NULL) {
                /* if we found something in the hash map then grab an iocount */
-               error = vnode_getwithvid(ovp, oxp->null_myvid);
+               error = vnode_getwithvid(ovp, oxp_vid);
                if (error == 0) {
                        *vpp = ovp;
                }
index d0f3b06b39480f73e193f42a5c02af7fccb461e2..ee3a5b63062c4dff4e26a495f75398d0ff086598 100644 (file)
@@ -1618,6 +1618,38 @@ cfil_socket_safe_lock(struct inpcb *inp)
        return false;
 }
 
+/*
+ * cfil_socket_safe_lock_rip -
+ * This routine attempts to lock the rip socket safely.
+ * The passed in ripcbinfo is assumed to be locked and must be unlocked (regardless
+ * of success/failure) before calling socket_unlock().  This is to avoid double
+ * locking since rip_unlock() will lock ripcbinfo if it needs to dispose inpcb when
+ * so_usecount is 0.
+ */
+static bool
+cfil_socket_safe_lock_rip(struct inpcb *inp, struct inpcbinfo *pcbinfo)
+{
+       struct socket *so = NULL;
+
+       VERIFY(pcbinfo != NULL);
+
+       if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) {
+               so = inp->inp_socket;
+               socket_lock(so, 1);
+               if (in_pcb_checkstate(inp, WNT_RELEASE, 1) != WNT_STOPUSING) {
+                       lck_rw_done(pcbinfo->ipi_lock);
+                       return true;
+               }
+       }
+
+       lck_rw_done(pcbinfo->ipi_lock);
+
+       if (so) {
+               socket_unlock(so, 1);
+       }
+       return false;
+}
+
 static struct socket *
 cfil_socket_from_sock_id(cfil_sock_id_t cfil_sock_id, bool udp_only)
 {
@@ -1670,6 +1702,9 @@ find_udp:
                }
        }
        lck_rw_done(pcbinfo->ipi_lock);
+       if (so != NULL) {
+               goto done;
+       }
 
        pcbinfo = &ripcbinfo;
        lck_rw_lock_shared(pcbinfo->ipi_lock);
@@ -1678,10 +1713,11 @@ find_udp:
                    inp->inp_socket != NULL &&
                    inp->inp_socket->so_cfil_db != NULL &&
                    (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) {
-                       if (cfil_socket_safe_lock(inp)) {
+                       if (cfil_socket_safe_lock_rip(inp, pcbinfo)) {
                                so = inp->inp_socket;
                        }
-                       break;
+                       /* pcbinfo is already unlocked, we are done. */
+                       goto done;
                }
        }
        lck_rw_done(pcbinfo->ipi_lock);
@@ -2836,6 +2872,7 @@ cfil_sock_attach(struct socket *so, struct sockaddr *local, struct sockaddr *rem
        if (so->so_cfil != NULL) {
                OSIncrementAtomic(&cfil_stats.cfs_sock_attach_already);
                CFIL_LOG(LOG_ERR, "already attached");
+               goto done;
        } else {
                cfil_info_alloc(so, NULL);
                if (so->so_cfil == NULL) {
@@ -4738,7 +4775,9 @@ cfil_update_entry_offsets(struct socket *so, struct cfil_info *cfil_info, int ou
                }
 
                entrybuf->cfe_ctl_q.q_start += datalen;
-               entrybuf->cfe_pass_offset = entrybuf->cfe_ctl_q.q_start;
+               if (entrybuf->cfe_pass_offset < entrybuf->cfe_ctl_q.q_start) {
+                       entrybuf->cfe_pass_offset = entrybuf->cfe_ctl_q.q_start;
+               }
                entrybuf->cfe_peeked = entrybuf->cfe_ctl_q.q_start;
                if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) {
                        entrybuf->cfe_peek_offset = entrybuf->cfe_pass_offset;
@@ -4780,6 +4819,11 @@ cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, s
 
        datalen = cfil_data_length(data, &mbcnt, &mbnum);
 
+       if (datalen == 0) {
+               error = 0;
+               goto done;
+       }
+
        if (outgoing) {
                cfi_buf = &cfil_info->cfi_snd;
                cfil_info->cfi_byte_outbound_count += datalen;
index 2b077473ab38c4b183ba8f70b394e813d8dae5dc..c332fd523682b6460644a31b2c85498f23548ed5 100644 (file)
@@ -161,7 +161,7 @@ ether_inet6_pre_output(ifnet_t ifp, protocol_family_t protocol_family,
 {
 #pragma unused(protocol_family)
        errno_t result;
-       struct sockaddr_dl sdl;
+       struct sockaddr_dl sdl = {};
        struct mbuf *m = *m0;
 
        /*
index 9830039bec6c5d074c9c31c600c79243125ace17..e004477544e2b1df49007962828a34ac54e87594 100644 (file)
@@ -246,7 +246,7 @@ ether_inet_pre_output(ifnet_t ifp, protocol_family_t protocol_family,
 
        switch (dst_netaddr->sa_family) {
        case AF_INET: {
-               struct sockaddr_dl ll_dest;
+               struct sockaddr_dl ll_dest = {};
 
                result = arp_lookup_ip(ifp,
                    (const struct sockaddr_in *)(uintptr_t)(size_t)dst_netaddr,
index 2d6246d31e2e292f1882596dd84903ea024d59d6..8337acaaaa7756285cc25acf433f6d55a041452d 100644 (file)
@@ -918,7 +918,7 @@ sixlowpan_proto_pre_output(ifnet_t ifp,
 {
 #pragma unused(protocol_family)
        errno_t result = 0;
-       struct sockaddr_dl sdl;
+       struct sockaddr_dl sdl = {};
        struct sockaddr_in6 *dest6 =  (struct sockaddr_in6 *)(uintptr_t)(size_t)dest;
 
        if (!IN6_IS_ADDR_MULTICAST(&dest6->sin6_addr)) {
index 5169de4b9001b407495034ffb4a5a40887b571bd..2bf7c2d23c62e4b1053cf0d357cd41f9170a396d 100644 (file)
@@ -101,7 +101,7 @@ multicast_list_program(struct multicast_list * mc_list,
        int                         i;
        struct multicast_entry *    mc = NULL;
        struct multicast_list       new_mc_list;
-       struct sockaddr_dl          source_sdl;
+       struct sockaddr_dl          source_sdl = {};
        ifmultiaddr_t *             source_multicast_list;
        struct sockaddr_dl          target_sdl;
 
index 2075bbec1ad42386c329b9b81d45735bcbac1c1c..df0845a7614ca185c8504d61d6dcd44e94a075c7 100644 (file)
@@ -170,7 +170,7 @@ ndrv_input(
        char                            *frame_header)
 {
        struct socket *so;
-       struct sockaddr_dl ndrvsrc;
+       struct sockaddr_dl ndrvsrc = {};
        struct ndrv_cb *np;
        int error = 0;
 
index ac3b6fbb367d06dd38f1c4c9071d0b746ca8bc0b..a06dc3914133efd2a11e1c66f3d7e90a8a826852 100644 (file)
 u_int32_t necp_drop_all_order = 0;
 u_int32_t necp_drop_all_level = 0;
 
-u_int32_t necp_pass_loopback = 1; // 0=Off, 1=On
+#define NECP_LOOPBACK_PASS_ALL         1  // Pass all loopback traffic
+#define NECP_LOOPBACK_PASS_WITH_FILTER 2  // Pass all loopback traffic, but activate content filter and/or flow divert if applicable
+
+#if defined(XNU_TARGET_OS_OSX)
+#define NECP_LOOPBACK_PASS_DEFAULT NECP_LOOPBACK_PASS_WITH_FILTER
+#else
+#define NECP_LOOPBACK_PASS_DEFAULT NECP_LOOPBACK_PASS_ALL
+#endif
+
+u_int32_t necp_pass_loopback = NECP_LOOPBACK_PASS_DEFAULT;
 u_int32_t necp_pass_keepalives = 1; // 0=Off, 1=On
 u_int32_t necp_pass_interpose = 1; // 0=Off, 1=On
 u_int32_t necp_restrict_multicast = 1; // 0=Off, 1=On
@@ -241,12 +250,19 @@ ZONE_DECLARE(necp_ip_policy_zone, "necp_ip_policy",
 #define NECP_KERNEL_CONDITION_SDK_VERSION                       0x8000000
 #define NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER                0x10000000
 #define NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS                0x20000000
+#define NECP_KERNEL_CONDITION_IS_LOOPBACK                       0x40000000
 
 #define NECP_MAX_POLICY_RESULT_SIZE                                     512
 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE                         1024
 #define NECP_MAX_CONDITIONS_ARRAY_SIZE                          4096
 #define NECP_MAX_POLICY_LIST_COUNT                                      1024
 
+typedef enum {
+       NECP_BYPASS_TYPE_NONE = 0,
+       NECP_BYPASS_TYPE_INTCOPROC = 1,
+       NECP_BYPASS_TYPE_LOOPBACK = 2,
+} necp_socket_bypass_type_t;
+
 // Cap the policy size at the max result + conditions size, with room for extra TLVs
 #define NECP_MAX_POLICY_SIZE                                            (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE)
 
@@ -301,7 +317,8 @@ struct necp_socket_info {
        unsigned has_client : 1;
        unsigned is_platform_binary : 1;
        unsigned used_responsible_pid : 1;
-       unsigned __pad_bits : 5;
+       unsigned is_loopback : 1;
+       unsigned __pad_bits : 4;
 };
 
 static  lck_grp_attr_t  *necp_kernel_policy_grp_attr    = NULL;
@@ -2076,6 +2093,10 @@ necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t poli
                }
                break;
        }
+       case NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK: {
+               validated = TRUE;
+               break;
+       }
        default: {
                validated = FALSE;
                break;
@@ -2733,6 +2754,9 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length)
                                condition_tlv_length += sizeof(u_int16_t);
                                num_conditions++;
                        }
+                       if (condition_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) {
+                               num_conditions++;
+                       }
                }
 
                condition_tlv_length += num_conditions * (sizeof(u_int8_t) + sizeof(u_int32_t)); // These are for the condition TLVs. The space for "value" is already accounted for above.
@@ -2892,6 +2916,9 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length)
                        if (condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) {
                                cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PACKET_FILTER_TAGS, sizeof(policy->cond_packet_filter_tags), &policy->cond_packet_filter_tags, cond_buf, condition_tlv_length);
                        }
+                       if (condition_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) {
+                               cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK, 0, "", cond_buf, condition_tlv_length);
+                       }
                }
 
                cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_CONDITION, cond_buf_cursor - cond_buf, cond_buf, tlv_buffer, total_allocated_bytes);
@@ -3596,6 +3623,14 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli
                        }
                        break;
                }
+               case NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK: {
+                       master_condition_mask |= NECP_KERNEL_CONDITION_IS_LOOPBACK;
+                       if (condition_is_negative) {
+                               master_condition_negated_mask |= NECP_KERNEL_CONDITION_IS_LOOPBACK;
+                       }
+                       socket_only_conditions = TRUE;
+                       break;
+               }
                default: {
                        break;
                }
@@ -3951,7 +3986,7 @@ necp_kernel_policy_get_new_id(bool socket_level)
        return newid;
 }
 
-#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY | NECP_KERNEL_CONDITION_SDK_VERSION | NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER | NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS)
+#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY | NECP_KERNEL_CONDITION_SDK_VERSION | NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER | NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS | NECP_KERNEL_CONDITION_IS_LOOPBACK)
 
 static necp_kernel_policy_id
 necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, struct necp_policy_condition_sdk_version *cond_sdk_version, u_int32_t cond_client_flags, char *cond_signing_identifier, u_int16_t cond_packet_filter_tags, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter)
@@ -6134,7 +6169,7 @@ necp_check_restricted_multicast_drop(proc_t proc, struct necp_socket_info *info,
 
 #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_LOCAL_NETWORKS)
 static void
-necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, uuid_t responsible_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, u_int16_t local_port, u_int16_t remote_port, bool has_client, proc_t proc, proc_t responsible_proc, u_int32_t drop_order, u_int32_t client_flags, struct necp_socket_info *info)
+necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, uuid_t responsible_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, u_int16_t local_port, u_int16_t remote_port, bool has_client, proc_t proc, proc_t responsible_proc, u_int32_t drop_order, u_int32_t client_flags, struct necp_socket_info *info, bool is_loopback)
 {
        memset(info, 0, sizeof(struct necp_socket_info));
 
@@ -6146,6 +6181,7 @@ necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_applic
        info->has_client = has_client;
        info->drop_order = drop_order;
        info->client_flags = client_flags;
+       info->is_loopback = is_loopback;
 
        if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID && !uuid_is_null(application_uuid)) {
                struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(application_uuid);
@@ -6348,6 +6384,7 @@ necp_application_find_policy_match_internal(proc_t proc,
        proc_t responsible_proc = PROC_NULL;
        proc_t effective_proc = proc;
        bool release_eproc = false;
+       necp_socket_bypass_type_t bypass_type = NECP_BYPASS_TYPE_NONE;
 
        u_int32_t flow_divert_aggregate_unit = 0;
 
@@ -6571,6 +6608,10 @@ necp_application_find_policy_match_internal(proc_t proc,
 
        // Check for loopback exception
        if (necp_pass_loopback > 0 && necp_is_loopback(&local_addr.sa, &remote_addr.sa, NULL, NULL, bound_interface_index)) {
+               bypass_type = NECP_BYPASS_TYPE_LOOPBACK;
+       }
+
+       if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && necp_pass_loopback == NECP_LOOPBACK_PASS_ALL) {
                returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
                returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_PASS;
                returned_result->routed_interface_index = lo_ifp->if_index;
@@ -6599,8 +6640,31 @@ necp_application_find_policy_match_internal(proc_t proc,
 
        u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES];
        size_t route_rule_id_array_count = 0;
-       necp_application_fillout_info_locked(application_uuid, real_application_uuid, responsible_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, local_port, remote_port, has_client, effective_proc, responsible_proc, drop_order, client_flags, &info);
+       necp_application_fillout_info_locked(application_uuid, real_application_uuid, responsible_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, local_port, remote_port, has_client, effective_proc, responsible_proc, drop_order, client_flags, &info, (bypass_type == NECP_BYPASS_TYPE_LOOPBACK));
        matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, netagent_use_flags, NECP_MAX_NETAGENTS, required_agent_types, num_required_agent_types, info.used_responsible_pid ? responsible_proc : effective_proc, 0, NULL, NULL, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit);
+
+       // Check for loopback exception again after the policy match
+       if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK &&
+           necp_pass_loopback == NECP_LOOPBACK_PASS_WITH_FILTER &&
+           (matched_policy == NULL || matched_policy->result != NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT)) {
+               if (filter_control_unit == NECP_FILTER_UNIT_NO_FILTER) {
+                       returned_result->filter_control_unit = 0;
+               } else {
+                       returned_result->filter_control_unit = filter_control_unit;
+               }
+
+               if (flow_divert_aggregate_unit > 0) {
+                       returned_result->flow_divert_aggregate_unit = flow_divert_aggregate_unit;
+               }
+
+               returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
+               returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_PASS;
+               returned_result->routed_interface_index = lo_ifp->if_index;
+               *flags |= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL | NECP_CLIENT_RESULT_FLAG_IS_DIRECT);
+               error = 0;
+               goto done;
+       }
+
        if (matched_policy) {
                returned_result->policy_id = matched_policy->id;
                returned_result->routing_result = matched_policy->result;
@@ -7036,6 +7100,8 @@ necp_application_find_policy_match_internal(proc_t proc,
                }
                rt = NULL;
        }
+
+done:
        // Unlock
        lck_rw_done(&necp_kernel_policy_lock);
 
@@ -7103,7 +7169,7 @@ done:
 }
 
 static bool
-necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, bool has_client, uint32_t client_flags, int is_platform_binary, proc_t proc, u_int16_t pf_tag, struct rtentry *rt)
+necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, bool has_client, uint32_t client_flags, int is_platform_binary, proc_t proc, u_int16_t pf_tag, struct rtentry *rt, bool is_loopback)
 {
        if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) {
                if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) {
@@ -7482,6 +7548,18 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a
                }
        }
 
+       if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) {
+               if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) {
+                       if (is_loopback) {
+                               return FALSE;
+                       }
+               } else {
+                       if (!is_loopback) {
+                               return FALSE;
+                       }
+               }
+       }
+
        return TRUE;
 }
 
@@ -7492,7 +7570,7 @@ necp_socket_calc_flowhash_locked(struct necp_socket_info *info)
 }
 
 static void
-necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, u_int32_t drop_order, proc_t *socket_proc, struct necp_socket_info *info)
+necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, u_int32_t drop_order, proc_t *socket_proc, struct necp_socket_info *info, bool is_loopback)
 {
        struct socket *so = NULL;
        proc_t sock_proc = NULL;
@@ -7503,6 +7581,7 @@ necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_loc
        so = inp->inp_socket;
 
        info->drop_order = drop_order;
+       info->is_loopback = is_loopback;
 
        if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PID) {
                info->pid = ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid);
@@ -7771,7 +7850,7 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy
                                continue;
                        }
 
-                       if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, required_agent_types, num_required_agent_types, info->has_client, info->client_flags, info->is_platform_binary, proc, pf_tag, rt)) {
+                       if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, required_agent_types, num_required_agent_types, info->has_client, info->client_flags, info->is_platform_binary, proc, pf_tag, rt, info->is_loopback)) {
                                if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER) {
                                        if (return_filter && *return_filter != NECP_FILTER_UNIT_NO_FILTER) {
                                                necp_kernel_policy_filter control_unit = policy_search_array[i]->result_parameter.filter_control_unit;
@@ -7924,16 +8003,16 @@ necp_socket_is_connected(struct inpcb *inp)
        return inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING);
 }
 
-static inline bool
+static inline necp_socket_bypass_type_t
 necp_socket_bypass(struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, struct inpcb *inp)
 {
        if (necp_pass_loopback > 0 && necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL, IFSCOPE_NONE)) {
-               return true;
+               return NECP_BYPASS_TYPE_LOOPBACK;
        } else if (necp_is_intcoproc(inp, NULL)) {
-               return true;
+               return NECP_BYPASS_TYPE_INTCOPROC;
        }
 
-       return false;
+       return NECP_BYPASS_TYPE_NONE;
 }
 
 static inline void
@@ -7963,6 +8042,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local
        u_int32_t drop_dest_policy_result = NECP_KERNEL_POLICY_RESULT_NONE;
        necp_drop_all_bypass_check_result_t drop_all_bypass = NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE;
        proc_t socket_proc = NULL;
+       necp_socket_bypass_type_t bypass_type = NECP_BYPASS_TYPE_NONE;
 
        u_int32_t netagent_ids[NECP_MAX_NETAGENTS];
        memset(&netagent_ids, 0, sizeof(netagent_ids));
@@ -8002,7 +8082,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local
                        inp->inp_policyresult.results.filter_control_unit = 0;
                        inp->inp_policyresult.results.flow_divert_aggregate_unit = 0;
                        inp->inp_policyresult.results.route_rule_id = 0;
-                       if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
+                       if (necp_socket_bypass(override_local_addr, override_remote_addr, inp) != NECP_BYPASS_TYPE_NONE) {
                                inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
                        } else {
                                inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP;
@@ -8012,7 +8092,8 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local
        }
 
        // Check for loopback exception
-       if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
+       bypass_type = necp_socket_bypass(override_local_addr, override_remote_addr, inp);
+       if (bypass_type == NECP_BYPASS_TYPE_INTCOPROC || (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && necp_pass_loopback == NECP_LOOPBACK_PASS_ALL)) {
                if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
                        // If the previous policy result was "socket scoped", un-scope the socket.
                        inp->inp_flags &= ~INP_BOUND_IF;
@@ -8033,7 +8114,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local
 
        // Lock
        lck_rw_lock_shared(&necp_kernel_policy_lock);
-       necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, drop_order, &socket_proc, &info);
+       necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, drop_order, &socket_proc, &info, (bypass_type == NECP_BYPASS_TYPE_LOOPBACK));
 
        // Check info
        u_int32_t flowhash = necp_socket_calc_flowhash_locked(&info);
@@ -8060,6 +8141,36 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local
        size_t route_rule_id_array_count = 0;
        matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), 0, &skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit);
 
+       // Check for loopback exception again after the policy match
+       if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK &&
+           necp_pass_loopback == NECP_LOOPBACK_PASS_WITH_FILTER &&
+           (matched_policy == NULL || matched_policy->result != NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT)) {
+               if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) {
+                       // If the previous policy result was "socket scoped", un-scope the socket.
+                       inp->inp_flags &= ~INP_BOUND_IF;
+                       inp->inp_boundifp = NULL;
+               }
+               // Mark socket as a pass
+               inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
+               inp->inp_policyresult.skip_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH;
+               inp->inp_policyresult.policy_gencount = 0;
+               inp->inp_policyresult.app_id = 0;
+               inp->inp_policyresult.flowhash = 0;
+               inp->inp_policyresult.results.filter_control_unit = filter_control_unit;
+               inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit;
+               inp->inp_policyresult.results.route_rule_id = 0;
+               inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS;
+
+               // Unlock
+               lck_rw_done(&necp_kernel_policy_lock);
+
+               if (socket_proc) {
+                       proc_rele(socket_proc);
+               }
+
+               return NECP_KERNEL_POLICY_ID_NONE;
+       }
+
        // If the socket matched a scoped service policy, mark as Drop if not registered.
        // This covers the cases in which a service is required (on demand) but hasn't started yet.
        if ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED ||
@@ -9511,6 +9622,7 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr
        necp_kernel_policy_filter filter_control_unit = 0;
        u_int32_t pass_flags = 0;
        u_int32_t flow_divert_aggregate_unit = 0;
+       necp_socket_bypass_type_t bypass_type = NECP_BYPASS_TYPE_NONE;
 
        memset(&netagent_ids, 0, sizeof(netagent_ids));
 
@@ -9541,7 +9653,7 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr
        if (necp_kernel_socket_policies_count == 0 ||
            (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) {
                if (necp_drop_all_order > 0 || drop_order > 0) {
-                       if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
+                       if (necp_socket_bypass(override_local_addr, override_remote_addr, inp) != NECP_BYPASS_TYPE_NONE) {
                                allowed_to_receive = TRUE;
                        } else {
                                allowed_to_receive = FALSE;
@@ -9593,14 +9705,15 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr
        }
 
        // Check for loopback exception
-       if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) {
+       bypass_type = necp_socket_bypass(override_local_addr, override_remote_addr, inp);
+       if (bypass_type == NECP_BYPASS_TYPE_INTCOPROC || (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && necp_pass_loopback == NECP_LOOPBACK_PASS_ALL)) {
                allowed_to_receive = TRUE;
                goto done;
        }
 
        // Actually calculate policy result
        lck_rw_lock_shared(&necp_kernel_policy_lock);
-       necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, drop_order, &socket_proc, &info);
+       necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, drop_order, &socket_proc, &info, (bypass_type == NECP_BYPASS_TYPE_LOOPBACK));
 
        flowhash = necp_socket_calc_flowhash_locked(&info);
        if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE &&
@@ -9635,6 +9748,22 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr
        size_t route_rule_id_array_count = 0;
        struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), pf_tag, return_skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit);
 
+       // Check for loopback exception again after the policy match
+       if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK &&
+           necp_pass_loopback == NECP_LOOPBACK_PASS_WITH_FILTER &&
+           (matched_policy == NULL || matched_policy->result != NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT)) {
+               // Polices have changed since last evaluation, update inp result with new filter state
+               if (inp->inp_policyresult.results.filter_control_unit != filter_control_unit) {
+                       inp->inp_policyresult.results.filter_control_unit = filter_control_unit;
+               }
+               if (inp->inp_policyresult.results.flow_divert_aggregate_unit != flow_divert_aggregate_unit) {
+                       inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit;
+               }
+               allowed_to_receive = TRUE;
+               lck_rw_done(&necp_kernel_policy_lock);
+               goto done;
+       }
+
        if (route_rule_id_array_count == 1) {
                route_rule_id = route_rule_id_array[0];
        } else if (route_rule_id_array_count > 1) {
index 041682c5b451a054f0ec0b1c6ccc8eb958241499..c2f39c6af5346d063985e6b2bf709bee8919ae3a 100644 (file)
@@ -144,6 +144,7 @@ struct necp_packet_header {
 #define NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR          21      // necp_policy_condition_addr
 #define NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE     22      // necp_policy_condition_addr_range
 #define NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE    23      // necp_policy_condition_addr_range
+#define NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK          31      // N/A
 // Socket/Application conditions, continued
 #define NECP_POLICY_CONDITION_CLIENT_FLAGS              24      // u_int32_t, values from NECP_CLIENT_PARAMETER_FLAG_*
 #define NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY     25      // N/A
index 8a2b4c4f86b737afc8c4f1fe436438e5c41af05e..6a68dac812447608f6be73f07f1086dfad4df7d5 100644 (file)
@@ -1176,6 +1176,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr
        size_t                  cfil_id_size            = 0;
        struct inpcb            *inp = sotoinpcb(so);
        struct ifnet *ifp = NULL;
+       uint32_t flags = 0;
 
        error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT, &connect_packet);
        if (error) {
@@ -1268,7 +1269,16 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr
        }
 
        if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) {
-               uint32_t flags = FLOW_DIVERT_TOKEN_FLAG_TFO;
+               flags |= FLOW_DIVERT_TOKEN_FLAG_TFO;
+       }
+
+       if ((inp->inp_flags & INP_BOUND_IF) ||
+           ((inp->inp_vflag & INP_IPV6) && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) ||
+           ((inp->inp_vflag & INP_IPV4) && inp->inp_laddr.s_addr != INADDR_ANY)) {
+               flags |= FLOW_DIVERT_TOKEN_FLAG_BOUND;
+       }
+
+       if (flags != 0) {
                error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_FLAGS, sizeof(flags), &flags);
                if (error) {
                        goto done;
index 424a5bbde2981c4440a1c8cb87e37e7dfe0d9481..ecc07f4a20e33b31f1301a5d528674a2d7c1d9cd 100644 (file)
@@ -85,6 +85,7 @@
 #define FLOW_DIVERT_TOKEN_FLAG_VALIDATED        0x0000001
 #define FLOW_DIVERT_TOKEN_FLAG_TFO              0x0000002
 #define FLOW_DIVERT_TOKEN_FLAG_MPTCP            0x0000004
+#define FLOW_DIVERT_TOKEN_FLAG_BOUND            0x0000008
 
 #define FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP       0x0000001
 
index 01d60970a992136f6d825af3cd41c5cb95ee68f8..2b583d3af4a9a0882be66a6006a14d5d6a7028aa 100644 (file)
@@ -1254,7 +1254,7 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest,
        struct ifaddr *rt_ifa;
        struct sockaddr *sa;
        uint32_t rtflags;
-       struct sockaddr_dl sdl;
+       struct sockaddr_dl sdl = {};
        boolean_t send_probe_notif = FALSE;
        boolean_t enqueued = FALSE;
 
@@ -1632,7 +1632,7 @@ arp_ip_handle_input(ifnet_t ifp, u_short arpop,
     const struct sockaddr_in *target_ip)
 {
        char ipv4str[MAX_IPv4_STR_LEN];
-       struct sockaddr_dl proxied;
+       struct sockaddr_dl proxied = {};
        struct sockaddr_dl *gateway, *target_hw = NULL;
        struct ifaddr *ifa;
        struct in_ifaddr *ia;
index ea79bcea43a10dff58f917907f59b0dde8ff3680..ec6a8ecb4bfca5fc4a0f9736fb09391f88ca8d6f 100644 (file)
@@ -1865,6 +1865,7 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, uint32_t mtu, int sw_csum)
 
                M_COPY_CLASSIFIER(m, m0);
                M_COPY_PFTAG(m, m0);
+               M_COPY_NECPTAG(m, m0);
 
 #if BYTE_ORDER != BIG_ENDIAN
                HTONS(mhip->ip_off);
index c253fc4f82fb5cf246c3cbba7de2b9ed45f2c285..f00002616c4ecd12183327d2de590b19475ebf23 100644 (file)
@@ -1928,7 +1928,11 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn,
                return 0;
        }
 
-       if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
+       if (!(m->m_flags & M_PKTHDR)) {
+               return 0;
+       }
+
+       if (m->m_pkthdr.pkt_flags & PKTF_MPTCP) {
                if (off && (dsn != m->m_pkthdr.mp_dsn ||
                    rseq != m->m_pkthdr.mp_rseq ||
                    dlen != m->m_pkthdr.mp_rlen)) {
@@ -1941,34 +1945,38 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn,
                        soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
                        return -1;
                }
-               m->m_pkthdr.mp_dsn += off;
-               m->m_pkthdr.mp_rseq += off;
+       }
 
-               VERIFY(m_pktlen(m) < UINT16_MAX);
-               m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m);
-       } else {
-               if (!(mpts->mpts_flags & MPTSF_FULLY_ESTABLISHED)) {
-                       /* data arrived without an DSS option mapping */
+       /* If mbuf is beyond right edge of the mapping, we need to split */
+       if (m_pktlen(m) > dlen - off) {
+               struct mbuf *new = m_split(m, dlen - off, M_DONTWAIT);
+               if (new == NULL) {
+                       os_log_error(mptcp_log_handle, "%s - %lx: m_split failed dlen %u off %d pktlen %d, killing subflow %d",
+                           __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpts->mpts_mpte),
+                           dlen, off, m_pktlen(m),
+                           mpts->mpts_connid);
 
-                       /* initial subflow can fallback right after SYN handshake */
-                       if (mpts->mpts_flags & MPTSF_INITIAL_SUB) {
-                               mptcp_notify_mpfail(so);
-                       } else {
-                               soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
+                       soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
+                       return -1;
+               }
 
-                               return -1;
-                       }
-               } else if (m->m_flags & M_PKTHDR) {
-                       /* We need to fake the DATA-mapping */
-                       m->m_pkthdr.pkt_flags |= PKTF_MPTCP;
-                       m->m_pkthdr.mp_dsn = dsn + off;
-                       m->m_pkthdr.mp_rseq = rseq + off;
+               m->m_next = new;
+               sballoc(&so->so_rcv, new);
+               /* Undo, as sballoc will add to it as well */
+               so->so_rcv.sb_cc -= new->m_len;
 
-                       VERIFY(m_pktlen(m) < UINT16_MAX);
-                       m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m);
+               if (so->so_rcv.sb_mbtail == m) {
+                       so->so_rcv.sb_mbtail = new;
                }
        }
 
+       m->m_pkthdr.pkt_flags |= PKTF_MPTCP;
+       m->m_pkthdr.mp_dsn = dsn + off;
+       m->m_pkthdr.mp_rseq = rseq + off;
+
+       VERIFY(m_pktlen(m) < UINT16_MAX);
+       m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m);
+
        mpts->mpts_flags |= MPTSF_FULLY_ESTABLISHED;
 
        return 0;
@@ -1982,11 +1990,15 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa,
     struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
 {
 #pragma unused(uio)
-       struct socket *mp_so = mptetoso(tptomptp(sototcpcb(so))->mpt_mpte);
+       struct socket *mp_so;
+       struct mptses *mpte;
+       struct mptcb *mp_tp;
        int flags, error = 0;
-       struct proc *p = current_proc();
        struct mbuf *m, **mp = mp0;
-       boolean_t proc_held = FALSE;
+
+       mpte = tptomptp(sototcpcb(so))->mpt_mpte;
+       mp_so = mptetoso(mpte);
+       mp_tp = mpte->mpte_mptcb;
 
        VERIFY(so->so_proto->pr_flags & PR_CONNREQUIRED);
 
@@ -2107,16 +2119,6 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa,
 
        mptcp_update_last_owner(so, mp_so);
 
-       if (mp_so->last_pid != proc_pid(p)) {
-               p = proc_find(mp_so->last_pid);
-               if (p == PROC_NULL) {
-                       p = current_proc();
-               } else {
-                       proc_held = TRUE;
-               }
-       }
-
-       OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgrcv);
        SBLASTRECORDCHK(&so->so_rcv, "mptcp_subflow_soreceive 1");
        SBLASTMBUFCHK(&so->so_rcv, "mptcp_subflow_soreceive 1");
 
@@ -2130,18 +2132,9 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa,
 
                VERIFY(m->m_nextpkt == NULL);
 
-               if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
-                       orig_dlen = dlen = m->m_pkthdr.mp_rlen;
-                       dsn = m->m_pkthdr.mp_dsn;
-                       sseq = m->m_pkthdr.mp_rseq;
-                       csum = m->m_pkthdr.mp_csum;
-               } else {
-                       /* We did fallback */
-                       if (mptcp_adj_rmap(so, m, 0, 0, 0, 0)) {
-                               error = EIO;
-                               *mp0 = NULL;
-                               goto release;
-                       }
+               if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) {
+fallback:
+                       /* Just move mbuf to MPTCP-level */
 
                        sbfree(&so->so_rcv, m);
 
@@ -2159,20 +2152,93 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa,
                        }
 
                        continue;
-               }
+               } else if (!(m->m_flags & M_PKTHDR) || !(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
+                       struct mptsub *mpts = sototcpcb(so)->t_mpsub;
+                       boolean_t found_mapping = false;
+                       int parsed_length = 0;
+                       struct mbuf *m_iter;
+
+                       /*
+                        * No MPTCP-option in the header. Either fallback or
+                        * wait for additional mappings.
+                        */
+                       if (!(mpts->mpts_flags & MPTSF_FULLY_ESTABLISHED)) {
+                               /* data arrived without a DSS option mapping */
+
+                               /* initial subflow can fallback right after SYN handshake */
+                               if (mpts->mpts_flags & MPTSF_INITIAL_SUB) {
+                                       mptcp_notify_mpfail(so);
+
+                                       goto fallback;
+                               } else {
+                                       os_log_error(mptcp_log_handle, "%s - %lx: No DSS on secondary subflow. Killing %d\n",
+                                           __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
+                                           mpts->mpts_connid);
+                                       soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
+
+                                       error = EIO;
+                                       *mp0 = NULL;
+                                       goto release;
+                               }
+                       }
+
+                       /* Thus, let's look for an mbuf with the mapping */
+                       m_iter = m->m_next;
+                       parsed_length = m->m_len;
+                       while (m_iter != NULL && parsed_length < UINT16_MAX) {
+                               if (!(m_iter->m_flags & M_PKTHDR) || !(m_iter->m_pkthdr.pkt_flags & PKTF_MPTCP)) {
+                                       parsed_length += m_iter->m_len;
+                                       m_iter = m_iter->m_next;
+                                       continue;
+                               }
+
+                               found_mapping = true;
+
+                               /* Found an mbuf with a DSS-mapping */
+                               orig_dlen = dlen = m_iter->m_pkthdr.mp_rlen;
+                               dsn = m_iter->m_pkthdr.mp_dsn;
+                               sseq = m_iter->m_pkthdr.mp_rseq;
+                               csum = m_iter->m_pkthdr.mp_csum;
+
+                               if (m_iter->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) {
+                                       dfin = 1;
+                               }
+
+                               break;
+                       }
+
+                       if (!found_mapping && parsed_length < UINT16_MAX) {
+                               /* Mapping not yet present, we can wait! */
+                               if (*mp0 == NULL) {
+                                       error = EWOULDBLOCK;
+                               }
+                               goto release;
+                       } else if (!found_mapping && parsed_length >= UINT16_MAX) {
+                               os_log_error(mptcp_log_handle, "%s - %lx: Received more than 64KB without DSS mapping. Killing %d\n",
+                                   __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte),
+                                   mpts->mpts_connid);
+                               /* Received 64KB without DSS-mapping. We should kill the subflow */
+                               soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST);
+
+                               error = EIO;
+                               *mp0 = NULL;
+                               goto release;
+                       }
+               } else {
+                       orig_dlen = dlen = m->m_pkthdr.mp_rlen;
+                       dsn = m->m_pkthdr.mp_dsn;
+                       sseq = m->m_pkthdr.mp_rseq;
+                       csum = m->m_pkthdr.mp_csum;
 
-               if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) {
-                       dfin = 1;
+                       if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) {
+                               dfin = 1;
+                       }
                }
 
                /*
                 * Check if the full mapping is now present
                 */
                if ((int)so->so_rcv.sb_cc < dlen - dfin) {
-                       mptcplog((LOG_INFO, "%s not enough data (%u) need %u for dsn %u\n",
-                           __func__, so->so_rcv.sb_cc, dlen, (uint32_t)dsn),
-                           MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG);
-
                        if (*mp0 == NULL) {
                                error = EWOULDBLOCK;
                        }
@@ -2238,10 +2304,6 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa,
 release:
        sbunlock(&so->so_rcv, TRUE);
 
-       if (proc_held) {
-               proc_rele(p);
-       }
-
        return error;
 }
 
@@ -2253,8 +2315,8 @@ mptcp_subflow_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
     struct mbuf *top, struct mbuf *control, int flags)
 {
        struct socket *mp_so = mptetoso(tptomptp(sototcpcb(so))->mpt_mpte);
-       struct proc *p = current_proc();
        boolean_t en_tracing = FALSE, proc_held = FALSE;
+       struct proc *p = current_proc();
        int en_tracing_val;
        int sblocked = 1; /* Pretend as if it is already locked, so we won't relock it */
        int error;
@@ -2301,8 +2363,6 @@ mptcp_subflow_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
        inp_update_necp_policy(sotoinpcb(so), NULL, NULL, 0);
 #endif /* NECP */
 
-       OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd);
-
        error = sosendcheck(so, NULL, top->m_pkthdr.len, 0, 1, 0, &sblocked);
        if (error) {
                goto out;
index 901c0338de3f60d9a403ae02ce8f5ea8031c96a0..078745a2408645bbdc06256189320644aafd2d5a 100644 (file)
@@ -579,6 +579,21 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m,
        }
 #endif /* TRAFFIC_MGT */
 
+       if (th->th_seq != tp->rcv_nxt) {
+               struct mbuf *tmp = m;
+               while (tmp != NULL) {
+                       if (mbuf_class_under_pressure(tmp)) {
+                               m_freem(m);
+                               tcp_reass_overflows++;
+                               tcpstat.tcps_rcvmemdrop++;
+                               *tlenp = 0;
+                               return 0;
+                       }
+
+                       tmp = tmp->m_next;
+               }
+       }
+
        /*
         * Limit the number of segments in the reassembly queue to prevent
         * holding on to too many segments (and thus running out of mbufs).
index c2389347c57c4bacfb2767a7d9c9a4c5d6dec79e..2799b8fea9d5ea6672e4613df893f016f11dcf22 100644 (file)
@@ -371,7 +371,7 @@ tcp_usr_listen(struct socket *so, struct proc *p)
        struct inpcb *inp = sotoinpcb(so);
        struct tcpcb *tp;
 
-       COMMON_START();
+       COMMON_START_ALLOW_FLOW_DIVERT(true);
        if (inp->inp_lport == 0) {
                error = in_pcbbind(inp, NULL, p);
        }
@@ -389,7 +389,7 @@ tcp6_usr_listen(struct socket *so, struct proc *p)
        struct inpcb *inp = sotoinpcb(so);
        struct tcpcb *tp;
 
-       COMMON_START();
+       COMMON_START_ALLOW_FLOW_DIVERT(true);
        if (inp->inp_lport == 0) {
                inp->inp_vflag &= ~INP_IPV4;
                if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) {
index 60508e3a453a9f754b13967aa81a70973d03aa19..92ee660edfd1b8cfe510b4446042891240e40665 100644 (file)
@@ -1910,6 +1910,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp,
 
                        M_COPY_CLASSIFIER(new_m, morig);
                        M_COPY_PFTAG(new_m, morig);
+                       M_COPY_NECPTAG(new_m, morig);
 
                        ip6f->ip6f_reserved = 0;
                        ip6f->ip6f_ident = id;
index 2e11f4515eafe4e78b258daeab7a690c6a2118bc..061e6f45cdf2f15b2ad13a8ab25422d8d331c088 100644 (file)
@@ -4415,13 +4415,21 @@ ipsec6_tunnel_validate(
                panic("too short mbuf on ipsec6_tunnel_validate");
        }
 #endif
-       if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) {
+       if (nxt == IPPROTO_IPV4) {
+               if (m->m_pkthdr.len < off + sizeof(struct ip)) {
+                       ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
+                       return 0;
+               }
+       } else if (nxt == IPPROTO_IPV6) {
+               if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
+                       ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr)));
+                       return 0;
+               }
+       } else {
+               ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate invalid nxt(%u) protocol", nxt));
                return 0;
        }
 
-       if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) {
-               return 0;
-       }
        /* do not decapsulate if the SA is for transport mode only */
        if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) {
                return 0;
index 0e7c29ea9463da0761c2bc9595221d3fc5bf1117..b9c2b5ac1e773f564de12066f909239ea7b27176 100644 (file)
@@ -2987,6 +2987,17 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred
                return error;
        }
 
+       if (length == 0) {
+               /* We should never get here  */
+#if DEVELOPMENT
+               printf("nfs_buf_write_rpc: Got request with zero length. np %p, bp %p, offset %lld\n", np, bp, offset);
+#else
+               printf("nfs_buf_write_rpc: Got request with zero length.\n");
+#endif /* DEVELOPMENT */
+               nfs_buf_iodone(bp);
+               return 0;
+       }
+
        auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE,
            UIO_WRITE, &uio_buf, sizeof(uio_buf));
        NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length);
@@ -3204,7 +3215,7 @@ finish:
                bp->nb_verf = wverf;
        }
 
-       if ((rlen > 0) && (bp->nb_offio < (offset + (int)rlen))) {
+       if (!ISSET(bp->nb_flags, NB_STALEWVERF) && rlen > 0 && (bp->nb_offio < (offset + (int)rlen))) {
                bp->nb_offio = offset + rlen;
        }
 
index 90175d11c432c298eb9abfa37ef88be0a575326d..1bebabf824438203d963e899895db66ba5e9cbfe 100644 (file)
@@ -206,11 +206,30 @@ os_refgrp_decl_extern(f_refgrp);        /* os_refgrp_t for file refcounts */
  * @brief
  * Acquire a file reference on the specified file.
  *
+ * @description
+ * The @c proc must be locked while this operation is being performed
+ * to avoid races with setting the FG_CONFINED flag.
+ *
+ * @param proc
+ * The proc this file reference is taken on behalf of.
+ *
  * @param fg
  * The specified file
  */
 void
-fg_ref(struct fileglob *fg);
+fg_ref(proc_t proc, struct fileglob *fg);
+
+/*!
+ * @function fg_drop_live
+ *
+ * @brief
+ * Drops a file reference on the specified file that isn't the last one.
+ *
+ * @param fg
+ * The file whose reference is being dropped.
+ */
+void
+fg_drop_live(struct fileglob *fg);
 
 /*!
  * @function fg_drop
index 48b03e0de3b1abdee62aa34d0ed69fb5efd409e3..4cbfd6b8f0c4ccae1e95c7428e868a146df620e6 100644 (file)
@@ -161,6 +161,20 @@ typedef struct memorystatus_kernel_stats {
        char     largest_zone_name[MACH_ZONE_NAME_MAX_LEN];
 } memorystatus_kernel_stats_t;
 
+typedef enum memorystatus_freeze_skip_reason {
+       kMemorystatusFreezeSkipReasonNone = 0,
+       kMemorystatusFreezeSkipReasonExcessSharedMemory = 1,
+       kMemorystatusFreezeSkipReasonLowPrivateSharedRatio = 2,
+       kMemorystatusFreezeSkipReasonNoCompressorSpace = 3,
+       kMemorystatusFreezeSkipReasonNoSwapSpace = 4,
+       kMemorystatusFreezeSkipReasonBelowMinPages = 5,
+       kMemorystatusFreezeSkipReasonLowProbOfUse = 6,
+       kMemorystatusFreezeSkipReasonOther = 7,
+       kMemorystatusFreezeSkipReasonOutOfBudget = 8,
+       kMemorystatusFreezeSkipReasonOutOfSlots = 9,
+       kMemorystatusFreezeSkipReasonDisabled = 10,
+       _kMemorystatusFreezeSkipReasonMax
+} memorystatus_freeze_skip_reason_t;
 /*
 ** This is a variable-length struct.
 ** Allocate a buffer of the size returned by the sysctl, cast to a memorystatus_snapshot_t *
@@ -172,6 +186,7 @@ typedef struct jetsam_snapshot_entry {
        int32_t  priority;
        uint32_t state;
        uint32_t fds;
+       memorystatus_freeze_skip_reason_t jse_freeze_skip_reason; /* why wasn't this process frozen? */
        uint8_t  uuid[16];
        uint64_t user_data;
        uint64_t killed;
@@ -352,6 +367,8 @@ int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, void *bu
 #define MEMORYSTATUS_CMD_SET_JETSAM_SNAPSHOT_OWNERSHIP 23 /* Used by unit tests in the development kernel only. */
 #endif /* PRIVATE */
 
+#define MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN 24 /* Check if the process is frozen. */
+
 /* Commands that act on a group of processes */
 #define MEMORYSTATUS_CMD_GRP_SET_PROPERTIES           100
 
@@ -505,7 +522,6 @@ typedef struct memorystatus_memlimit_properties2 {
 #define P_MEMSTAT_PRIORITY_ASSERTION              0x00020000   /* jetsam priority is being driven by an assertion */
 #define P_MEMSTAT_FREEZE_CONSIDERED               0x00040000   /* This process has been considered for the freezer. */
 
-
 /*
  * p_memstat_relaunch_flags holds
  *      - relaunch behavior when jetsammed
index c56ba0b4e114d0cf8adaeb9f3f1f7ce014fe8607..dd01e09ce8f2efd2e6049fc3477bcd645b024cf1 100644 (file)
@@ -112,6 +112,8 @@ boolean_t memorystatus_freeze_thread_should_run(void);
 int memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable);
 int memorystatus_get_process_is_freezable(pid_t pid, int *is_freezable);
 int memorystatus_freezer_control(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval);
+void memorystatus_freeze_init_proc(proc_t p);
+errno_t memorystatus_get_process_is_frozen(pid_t pid, int *is_freezable);
 
 #endif /* CONFIG_FREEZE */
 
index 75369750d339bfb89e80320c74b2d2d809bf5d28..0e8be447e685946c3d8ba9eaf8a14991564d841a 100644 (file)
@@ -615,6 +615,7 @@ struct mbuf {
 #define m_dat           M_dat.M_databuf
 #define m_pktlen(_m)    ((_m)->m_pkthdr.len)
 #define m_pftag(_m)     (&(_m)->m_pkthdr.builtin_mtag._net_mtag._pf_mtag)
+#define m_necptag(_m)   (&(_m)->m_pkthdr.builtin_mtag._net_mtag._necp_mtag)
 
 /* mbuf flags (private) */
 #define M_EXT           0x0001  /* has associated external storage */
@@ -814,6 +815,8 @@ union m16kcluster {
 
 #define M_COPY_PFTAG(to, from)          m_copy_pftag(to, from)
 
+#define M_COPY_NECPTAG(to, from)        m_copy_necptag(to, from)
+
 #define M_COPY_CLASSIFIER(to, from)     m_copy_classifier(to, from)
 
 /*
@@ -1276,6 +1279,8 @@ extern struct mbuf *m_prepend_2(struct mbuf *, int, int, int);
 extern struct mbuf *m_pullup(struct mbuf *, int);
 extern struct mbuf *m_split(struct mbuf *, int, int);
 extern void m_mclfree(caddr_t p);
+extern int mbuf_get_class(struct mbuf *m);
+extern bool mbuf_class_under_pressure(struct mbuf *m);
 
 /*
  * On platforms which require strict alignment (currently for anything but
@@ -1434,6 +1439,7 @@ __private_extern__ caddr_t m_mclalloc(int);
 __private_extern__ int m_mclhasreference(struct mbuf *);
 __private_extern__ void m_copy_pkthdr(struct mbuf *, struct mbuf *);
 __private_extern__ void m_copy_pftag(struct mbuf *, struct mbuf *);
+__private_extern__ void m_copy_necptag(struct mbuf *, struct mbuf *);
 __private_extern__ void m_copy_classifier(struct mbuf *, struct mbuf *);
 
 __private_extern__ struct mbuf *m_dtom(void *);
index 2bf615a7128e17a881468c48987faacc893ef526..46a610413bc22b524a858e485fdf6d242673727c 100644 (file)
@@ -389,6 +389,9 @@ struct  proc {
        int32_t           p_memstat_requestedpriority;  /* active priority */
        int32_t           p_memstat_assertionpriority;  /* assertion driven priority */
        uint32_t          p_memstat_dirty;              /* dirty state */
+#if CONFIG_FREEZE
+       uint8_t           p_memstat_freeze_skip_reason; /* memorystaus_freeze_skipped_reason_t. Protected by the freezer mutex. */
+#endif
        uint64_t          p_memstat_userdata;           /* user state */
        uint64_t          p_memstat_idledeadline;       /* time at which process became clean */
        uint64_t          p_memstat_idle_start;         /* abstime process transitions into the idle band */
index 9d8c3820995c2c67d87a1d54d2f9283d4b30be15..9421ee0f9655530125aeabfc02b06747f9b12bff 100644 (file)
@@ -2052,8 +2052,9 @@ shared_region_map_and_slide_setup(
        uint32_t                            mappings_count,
        struct shared_file_mapping_slide_np *mappings,
        struct _sr_file_mappings            **sr_file_mappings,
-       struct vm_shared_region             **shared_region,
-       struct vnode                        **scdir_vp)
+       struct vm_shared_region             **shared_region_ptr,
+       struct vnode                        **scdir_vp,
+       struct vnode                        *rdir_vp)
 {
        int                             error = 0;
        struct _sr_file_mappings        *srfmp;
@@ -2064,6 +2065,7 @@ shared_region_map_and_slide_setup(
        vm_prot_t                       maxprot = VM_PROT_ALL;
 #endif
        uint32_t                        i;
+       struct vm_shared_region         *shared_region;
 
        SHARED_REGION_TRACE_DEBUG(
                ("shared_region: %p [%d(%s)] -> map\n",
@@ -2113,8 +2115,9 @@ shared_region_map_and_slide_setup(
        }
 
        /* get the process's shared region (setup in vm_map_exec()) */
-       *shared_region = vm_shared_region_trim_and_get(current_task());
-       if (*shared_region == NULL) {
+       shared_region = vm_shared_region_trim_and_get(current_task());
+       *shared_region_ptr = shared_region;
+       if (shared_region == NULL) {
                SHARED_REGION_TRACE_ERROR(
                        ("shared_region: %p [%d(%s)] map(): "
                        "no shared region\n",
@@ -2124,6 +2127,22 @@ shared_region_map_and_slide_setup(
                goto done;
        }
 
+       /*
+        * Check the shared region matches the current root
+        * directory of this process.  Deny the mapping to
+        * avoid tainting the shared region with something that
+        * doesn't quite belong into it.
+        */
+       struct vnode *sr_vnode = vm_shared_region_root_dir(shared_region);
+       if (sr_vnode != NULL ?  rdir_vp != sr_vnode : rdir_vp != rootvnode) {
+               SHARED_REGION_TRACE_ERROR(
+                       ("shared_region: map(%p) root_dir mismatch\n",
+                       (void *)VM_KERNEL_ADDRPERM(current_thread())));
+               error = EPERM;
+               goto done;
+       }
+
+
        for (srfmp = &(*sr_file_mappings)[0];
            srfmp < &(*sr_file_mappings)[files_count];
            srfmp++) {
@@ -2311,11 +2330,8 @@ after_root_check:
 #else /* CONFIG_CSR */
                /* Devices without SIP/ROSP need to make sure that the shared cache is on the root volume. */
 
-               struct vnode *root_vp = p->p_fd->fd_rdir;
-               if (root_vp == NULL) {
-                       root_vp = rootvnode;
-               }
-               if (srfmp->vp->v_mount != root_vp->v_mount) {
+               assert(rdir_vp != NULL);
+               if (srfmp->vp->v_mount != rdir_vp->v_mount) {
                        SHARED_REGION_TRACE_ERROR(
                                ("shared_region: %p [%d(%s)] map(%p:'%s'): "
                                "not on process's root volume\n",
@@ -2409,9 +2425,9 @@ after_root_check:
        }
 done:
        if (error != 0) {
-               shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, *shared_region, *scdir_vp);
+               shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, shared_region, *scdir_vp);
                *sr_file_mappings = NULL;
-               *shared_region = NULL;
+               *shared_region_ptr = NULL;
                *scdir_vp = NULL;
        }
        return error;
@@ -2439,23 +2455,35 @@ _shared_region_map_and_slide(
        kern_return_t                   kr = KERN_SUCCESS;
        struct _sr_file_mappings        *sr_file_mappings = NULL;
        struct vnode                    *scdir_vp = NULL;
+       struct vnode                    *rdir_vp = NULL;
        struct vm_shared_region         *shared_region = NULL;
 
+       /*
+        * Get a reference to the current proc's root dir.
+        * Need this to prevent racing with chroot.
+        */
+       proc_fdlock(p);
+       rdir_vp = p->p_fd->fd_rdir;
+       if (rdir_vp == NULL) {
+               rdir_vp = rootvnode;
+       }
+       assert(rdir_vp != NULL);
+       vnode_get(rdir_vp);
+       proc_fdunlock(p);
+
        /*
         * Turn files, mappings into sr_file_mappings and other setup.
         */
        error = shared_region_map_and_slide_setup(p, files_count,
            files, mappings_count, mappings,
-           &sr_file_mappings, &shared_region, &scdir_vp);
+           &sr_file_mappings, &shared_region, &scdir_vp, rdir_vp);
        if (error != 0) {
+               vnode_put(rdir_vp);
                return error;
        }
 
        /* map the file(s) into that shared region's submap */
-       kr = vm_shared_region_map_file(shared_region,
-           (void *) p->p_fd->fd_rdir,
-           files_count,
-           sr_file_mappings);
+       kr = vm_shared_region_map_file(shared_region, files_count, sr_file_mappings);
        if (kr != KERN_SUCCESS) {
                SHARED_REGION_TRACE_ERROR(("shared_region: %p [%d(%s)] map(): "
                    "vm_shared_region_map_file() failed kr=0x%x\n",
@@ -2491,6 +2519,7 @@ _shared_region_map_and_slide(
                OSBitAndAtomic(~((uint32_t)P_NOSHLIB), &p->p_flag);
        }
 
+       vnode_put(rdir_vp);
        shared_region_map_and_slide_cleanup(p, files_count, sr_file_mappings, shared_region, scdir_vp);
 
        SHARED_REGION_TRACE_DEBUG(
@@ -3293,14 +3322,6 @@ SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_full,
 SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf,
     CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, "");
 
-#if PMAP_CS
-extern uint64_t vm_cs_defer_to_pmap_cs;
-extern uint64_t vm_cs_defer_to_pmap_cs_not;
-SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs,
-    CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, "");
-SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not,
-    CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, "");
-#endif /* PMAP_CS */
 
 extern uint64_t shared_region_pager_copied;
 extern uint64_t shared_region_pager_slid;
index d429f82a2378b621077f92c6fd2af150176221fb..3e8381e78a9a3a4bce0050739d44ee6b5cf5d413 100644 (file)
@@ -326,6 +326,9 @@ options   CONFIG_EMBEDDED                   # <config_embedded>
 options   CONFIG_ARROW              # <config_arrow>
 
 
+options   NOS_ARM_ASM                  # <nos_arm_asm>
+options   NOS_ARM_PMAP                 # <nos_arm_pmap>
+
 # support dynamic signing of code
 #
 options                CONFIG_DYNAMIC_CODE_SIGNING     # <dynamic_codesigning>
@@ -567,8 +570,6 @@ options             CONFIG_SECURE_BSD_ROOT  # secure BSD root       # <config_secure_bsd_root>
 
 options                CONFIG_KAS_INFO         # kas_info support      # <config_kas_info>
 
-options                CONFIG_ZALLOC_SEQUESTER         # Sequester VA for zones # <config_zalloc_sequester>
-
 #
 # MACH configuration options.
 #
index 2b7602e1fb88992c994494bb4e66eea786d422f7..0dbf52e8f645198578dc3b69d9230c4b76efeb11 100644 (file)
@@ -16,7 +16,7 @@
 #  Standard Apple OS Configurations:
 #  -------- ----- -- ---------------
 #
-#  KERNEL_BASE =    [ arm xsmall msgb_small config_embedded config_enforce_signed_code config_zcache config_darkboot ]
+#  KERNEL_BASE =    [ arm xsmall msgb_small config_embedded config_enforce_signed_code config_zcache config_darkboot ARM_EXTRAS_BASE ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
 #  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ]
 #  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ]
index bfeb9956f79aede4406c6942c54dfac4eb4f137d..15846736a1d1886639a31ed329d2da3aed31daa0 100644 (file)
@@ -16,7 +16,7 @@
 #  Standard Apple OS Configurations:
 #  -------- ----- -- ---------------
 #
-#  KERNEL_BASE =    [ arm64 xsmall msgb_small config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ]
+#  KERNEL_BASE =    [ arm64 xsmall msgb_small config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ARM_EXTRAS_BASE ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
 #  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ]
 #  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ]
@@ -64,7 +64,7 @@
 #  VM_RELEASE =     [ VM_BASE ]
 #  VM_DEV =         [ VM_BASE dynamic_codesigning ]
 #  VM_DEBUG =       [ VM_BASE dynamic_codesigning ]
-#  SECURITY_BASE =    [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ]
+#  SECURITY_BASE =    [ config_macf kernel_integrity config_secure_bsd_root ]
 #  SECURITY_RELEASE = [ SECURITY_BASE ]
 #  SECURITY_DEV =     [ SECURITY_BASE config_setuid config_kas_info ]
 #  SECURITY_DEBUG =   [ SECURITY_BASE config_setuid config_kas_info ]
index c4bae4b768c1b612e11fe82157c1d2d236e17725..3fd4f903c35557791ba8331cf9d9a46bf4786fa2 100644 (file)
@@ -16,7 +16,7 @@
 #  Standard Apple OS Configurations:
 #  -------- ----- -- ---------------
 #
-#  KERNEL_BASE =    [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ]
+#  KERNEL_BASE =    [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ARM_EXTRAS_BASE ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
 #  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ]
 #  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ]
@@ -64,7 +64,7 @@
 #  VM_RELEASE =     [ VM_BASE ]
 #  VM_DEV =         [ VM_BASE dynamic_codesigning ]
 #  VM_DEBUG =       [ VM_BASE dynamic_codesigning ]
-#  SECURITY_BASE =    [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ]
+#  SECURITY_BASE =    [ config_macf kernel_integrity config_secure_bsd_root ]
 #  SECURITY_RELEASE = [ SECURITY_BASE ]
 #  SECURITY_DEV =     [ SECURITY_BASE config_setuid config_kas_info ]
 #  SECURITY_DEBUG =   [ SECURITY_BASE config_setuid config_kas_info ]
index fd3ab5e2e4d7ebc5d442b9dc0809a5692b8ef63b..509472214e92048834f57470b9193d43d947820b 100644 (file)
@@ -18,8 +18,8 @@
 #
 #  KERNEL_BASE =    [ arm64 medium msgb_large config_arrow config_requires_u32_munging config_zcache config_delay_idle_sleep config_proc_udata_storage ARM_EXTRAS_BASE ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
-#  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace config_zalloc_sequester ]
-#  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace config_zalloc_sequester ]
+#  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ]
+#  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ]
 #  BSD_BASE =       [ mach_bsd sysv_sem sysv_msg sysv_shm config_netboot config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_personas ]
 #  BSD_RELEASE =    [ BSD_BASE ]
 #  BSD_DEV =        [ BSD_BASE config_vnguard ]
 #  VPN =            [ ipsec flow_divert necp content_filter ]
 #  PF =             [ pf pflog ]
 #  MULTIPATH =      [ multipath mptcp ]
-#ifdef SOC_CONFIG_t8020
 #  HIBERNATION =    [ ]
-#else /*!SOC_CONFIG_t8020*/
-#  HIBERNATION =    [ hibernation ]
-#endif /*!SOC_CONFIG_t8020*/
 #  IOKIT_BASE =     [ iokit iokitcpp no_kernel_hid config_sleep iokitstats HIBERNATION ]
 #  IOKIT_RELEASE =  [ IOKIT_BASE ]
 #  IOKIT_DEV =      [ IOKIT_BASE iotracking ]
index d1c4ef46799d16765c8859aab50a84ec647d5929..e1d6bfd92e843d5fc9710e307090deea6567078a 100644 (file)
@@ -16,7 +16,8 @@
 #  Standard Apple OS Configurations:
 #  -------- ----- -- ---------------
 #
-#  KERNEL_BASE =    [ arm64 xsmall msgb_small config_embedded config_requires_u32_munging config_zcache ]
+#  ARM_EXTRAS_BASE = [ nos_arm_pmap nos_arm_asm ]
+#  KERNEL_BASE =    [ arm64 xsmall msgb_small config_embedded config_requires_u32_munging config_zcache ARM_EXTRAS_BASE ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
 #  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ]
 #  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ]
@@ -62,7 +63,7 @@
 #  VM_RELEASE =     [ VM_BASE ]
 #  VM_DEV =         [ VM_BASE dynamic_codesigning ]
 #  VM_DEBUG =       [ VM_BASE dynamic_codesigning ]
-#  SECURITY_BASE =     [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ]
+#  SECURITY_BASE =     [ config_macf kernel_integrity config_secure_bsd_root ]
 #  SECURITY_RELEASE = [ SECURITY_BASE ]
 #  SECURITY_DEV =     [ SECURITY_BASE config_setuid config_kas_info ]
 #  SECURITY_DEBUG =   [ SECURITY_BASE config_setuid config_kas_info ]
index 506772eb400a61266cc24808cbc61039f2c70d97..98852a7f7bfa5d23d4da998be11969e035f6fdce 100644 (file)
@@ -16,7 +16,7 @@
 #  Standard Apple OS Configurations:
 #  -------- ----- -- ---------------
 #
-#  KERNEL_BASE =    [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ]
+#  KERNEL_BASE =    [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ARM_EXTRAS_BASE ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
 #  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ]
 #  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ]
@@ -64,7 +64,7 @@
 #  VM_RELEASE =     [ VM_BASE ]
 #  VM_DEV =         [ VM_BASE dynamic_codesigning ]
 #  VM_DEBUG =       [ VM_BASE dynamic_codesigning ]
-#  SECURITY_BASE =    [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ]
+#  SECURITY_BASE =    [ config_macf kernel_integrity config_secure_bsd_root ]
 #  SECURITY_RELEASE = [ SECURITY_BASE ]
 #  SECURITY_DEV =     [ SECURITY_BASE config_setuid config_kas_info ]
 #  SECURITY_DEBUG =   [ SECURITY_BASE config_setuid config_kas_info ]
index 5350cb839c9cde55c8ffa818a7602c552a769b6f..31d87fd6f5bdfb1beeddf367ac928583dfa066f8 100644 (file)
@@ -18,8 +18,8 @@
 #
 #  KERNEL_BASE =    [ intel medium msgb_large config_requires_u32_munging config_zcache config_delay_idle_sleep config_proc_udata_storage vsprintf ]
 #  KERNEL_RELEASE = [ KERNEL_BASE ]
-#  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug config_zalloc_sequester ]
-#  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug config_zalloc_sequester ]
+#  KERNEL_DEV =     [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ]
+#  KERNEL_DEBUG =   [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ]
 #  BSD_BASE =       [ mach_bsd sysv_sem sysv_msg sysv_shm config_netboot config_imageboot config_imageboot_chunklist config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_32bit_telemetry config_personas ]
 #  BSD_RELEASE =    [ BSD_BASE ]
 #  BSD_DEV =        [ BSD_BASE config_vnguard ]
index 09a96f00e134b8ab7b19bb2088631c99809b22c9..3f2bda89b984e00970255b64da46f6318b169a20 100644 (file)
@@ -1,4 +1,4 @@
-20.1.0
+20.2.0
 
 # The first line of this file contains the master version number for the kernel.
 # All other instances of the kernel version in xnu are derived from this file.
index f64ac3509def6c6fcccad722503630b733a1c974..1c0e13e390746e35f0cf47386a1d69ba78f66d7d 100644 (file)
@@ -64,9 +64,40 @@ _pgtrace_clear_probe
 _mach_bridge_recv_timestamps
 _mach_bridge_init_timestamp
 _mach_bridge_set_params
+_pmap_iommu_init
+_pmap_iommu_iovmalloc
+_pmap_iommu_map
+_pmap_iommu_unmap
+_pmap_iommu_iovmfree
+_pmap_iommu_ioctl
+_pmap_iommu_grant_page
+_pmap_iommu_alloc_contiguous_pages
+_nvme_ppl_get_desc
+_sart_get_desc
+_t8020dart_get_desc
+_t8020dart_vo_tte
+_uat_get_desc
+_set_invalidate_hmac_function
 _PE_panic_debugging_enabled
 _register_additional_panic_data_buffer
 _apply_func_phys
 _Switch_context
 _gT1Sz
 
+__ZN26IOUnifiedAddressTranslator10gMetaClassE
+__ZN26IOUnifiedAddressTranslator10superClassE
+__ZN26IOUnifiedAddressTranslator17getPageTableEntryEy
+__ZN26IOUnifiedAddressTranslator18setClientContextIDEjb
+__ZN26IOUnifiedAddressTranslator21removeClientContextIDEv
+__ZN26IOUnifiedAddressTranslator19isPageFaultExpectedEyj
+__ZN26IOUnifiedAddressTranslator22registerTaskForServiceEP4taskP9IOService
+__ZN26IOUnifiedAddressTranslator23createMappingInApertureEjP18IOMemoryDescriptorjym
+__ZN26IOUnifiedAddressTranslator23getTotalPageTableMemoryEv
+__ZN26IOUnifiedAddressTranslator3mapEP11IOMemoryMapj
+__ZN26IOUnifiedAddressTranslator5doMapEP18IOMemoryDescriptoryyj
+__ZN26IOUnifiedAddressTranslator5unmapEP11IOMemoryMap
+__ZN26IOUnifiedAddressTranslator7doUnmapEP18IOMemoryDescriptoryy
+__ZN26IOUnifiedAddressTranslator8taskDiedEv
+__ZN26IOUnifiedAddressTranslator12commitUnmapsEv
+__ZN26IOUnifiedAddressTranslator14prepareFWUnmapEyy
+__ZTV26IOUnifiedAddressTranslator
index dd1dd039b38bdd45011d4980570b8f36a33aba9d..af2fb5ac216c8428b4f56ba611ee6f35f31dc240 100644 (file)
@@ -33,7 +33,6 @@
 
 #if defined(__arm64__)
 
-#define HIBERNATE_HMAC_IMAGE 1
 #define HIBERNATE_HAVE_MACHINE_HEADER 1
 
 // enable the hibernation exception handler on DEBUG and DEVELOPMENT kernels
index 91336d2a70050f5b5c5a09b2ea9516977d44228a..17f91c66ab9ccb8a146da9f76f42654c41d8e7d3 100644 (file)
@@ -83,6 +83,7 @@ private:
        OSPtr<const OSSymbol>  _registryPropertiesKey;
        UInt8                  *_nvramImage;
        IOLock                 *_variableLock;
+       IOLock                 *_controllerLock;
        UInt32                 _commonPartitionOffset;
        UInt32                 _commonPartitionSize;
        UInt8                  *_commonImage;
@@ -145,7 +146,7 @@ private:
        UInt32 getNVRAMSize(void);
        void initNVRAMImage(void);
        void initProxyData(void);
-       IOReturn syncVariables(void);
+       IOReturn serializeVariables(void);
        IOReturn setPropertyInternal(const OSSymbol *aKey, OSObject *anObject);
        IOReturn removePropertyInternal(const OSSymbol *aKey);
        IOReturn chooseDictionary(IONVRAMOperation operation, const uuid_t *varGuid,
index bb6e1e85c8b061bcd970942663b25e04d4225dad..1320d8d0cb04efc84f0ce23057ed92b0a759e501 100644 (file)
 #endif /* defined(__i386__) || defined(__x86_64__) */
 #include <san/kasan.h>
 
-#if HIBERNATE_HMAC_IMAGE
-#include <arm64/hibernate_ppl_hmac.h>
-#endif /* HIBERNATE_HMAC_IMAGE */
 
 extern "C" addr64_t             kvtophys(vm_offset_t va);
 extern "C" ppnum_t              pmap_find_phys(pmap_t pmap, addr64_t va);
@@ -258,7 +255,6 @@ enum { kVideoMapSize  = 80 * 1024 * 1024 };
 
 // copy from phys addr to MD
 
-#if !HIBERNATE_HMAC_IMAGE
 static IOReturn
 IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md,
     IOByteCount offset, addr64_t bytes, IOByteCount length)
@@ -296,7 +292,6 @@ IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md,
 
        return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
 }
-#endif /* !HIBERNATE_HMAC_IMAGE */
 
 // copy from MD to phys addr
 
@@ -631,10 +626,6 @@ IOHibernateSystemSleep(void)
                        gIOHibernateCurrentHeader->options |= kIOHibernateOptionProgress;
                }
 
-#if HIBERNATE_HMAC_IMAGE
-               // inform HMAC driver that we're going to hibernate
-               ppl_hmac_hibernate_begin();
-#endif /* HIBERNATE_HMAC_IMAGE */
 
 #if defined(__i386__) || defined(__x86_64__)
                if (vars->volumeCryptKeySize &&
@@ -1144,26 +1135,40 @@ IOHibernateSystemHasSlept(void)
 
 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
 
-#if defined(__i386__) || defined(__x86_64__)
-static DeviceTreeNode *
-MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry, vm_offset_t region_start, vm_size_t region_size)
+static const DeviceTreeNode *
+MergeDeviceTree(const DeviceTreeNode * entry, IORegistryEntry * regEntry, OSSet * entriesToUpdate, vm_offset_t region_start, vm_size_t region_size)
 {
        DeviceTreeNodeProperty * prop;
-       DeviceTreeNode *         child;
+       const DeviceTreeNode *   child;
        IORegistryEntry *        childRegEntry;
        const char *             nameProp;
        unsigned int             propLen, idx;
 
+       bool updateEntry = true;
+       if (!regEntry) {
+               updateEntry = false;
+       } else if (entriesToUpdate && !entriesToUpdate->containsObject(regEntry)) {
+               updateEntry = false;
+       }
+
        prop = (DeviceTreeNodeProperty *) (entry + 1);
        for (idx = 0; idx < entry->nProperties; idx++) {
-               if (regEntry && (0 != strcmp("name", prop->name))) {
+               if (updateEntry && (0 != strcmp("name", prop->name))) {
                        regEntry->setProperty((const char *) prop->name, (void *) (prop + 1), prop->length);
 //         HIBPRINT("%s: %s, %d\n", regEntry->getName(), prop->name, prop->length);
                }
                prop = (DeviceTreeNodeProperty *) (((uintptr_t)(prop + 1)) + ((prop->length + 3) & ~3));
        }
 
-       child = (DeviceTreeNode *) prop;
+       if (entriesToUpdate) {
+               entriesToUpdate->removeObject(regEntry);
+               if (entriesToUpdate->getCount() == 0) {
+                       // we've updated all the entries we care about so we can stop
+                       return NULL;
+               }
+       }
+
+       child = (const DeviceTreeNode *) prop;
        for (idx = 0; idx < entry->nChildren; idx++) {
                if (kSuccess != SecureDTGetPropertyRegion(child, "name", (void const **) &nameProp, &propLen,
                    region_start, region_size)) {
@@ -1171,12 +1176,14 @@ MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry, vm_offset_t
                }
                childRegEntry = regEntry ? regEntry->childFromPath(nameProp, gIODTPlane) : NULL;
 //     HIBPRINT("%s == %p\n", nameProp, childRegEntry);
-               child = MergeDeviceTree(child, childRegEntry, region_start, region_size);
+               child = MergeDeviceTree(child, childRegEntry, entriesToUpdate, region_start, region_size);
+               if (!child) {
+                       // the recursive call updated the last entry we cared about, so we can stop
+                       break;
+               }
        }
        return child;
 }
-#endif
-
 
 IOReturn
 IOHibernateSystemWake(void)
@@ -1276,10 +1283,6 @@ IOHibernateDone(IOHibernateVars * vars)
                vars->srcBuffer->release();
        }
 
-#if HIBERNATE_HMAC_IMAGE
-       // inform HMAC driver that we're done hibernating
-       ppl_hmac_hibernate_end();
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0]));
        if (vars->handoffBuffer) {
@@ -1297,14 +1300,32 @@ IOHibernateDone(IOHibernateVars * vars)
                                        break;
 
                                case kIOHibernateHandoffTypeDeviceTree:
+                               {
 #if defined(__i386__) || defined(__x86_64__)
-                                       MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot(),
-                                           (vm_offset_t)data, (vm_size_t)handoff->bytecount);
-#else
-                                       // On ARM, the device tree is confined to its region covered by CTRR, so effectively immutable.
-                                       panic("kIOHibernateHandoffTypeDeviceTree not supported on this platform.");
+                                       // On Intel, process the entirety of the passed in device tree
+                                       OSSet * entriesToUpdate = NULL;
+#elif defined(__arm64__)
+                                       // On ARM, only allow hibernation to update specific entries
+                                       const char *mergePaths[] = {
+                                               kIODeviceTreePlane ":/chosen/boot-object-manifests",
+                                               kIODeviceTreePlane ":/chosen/secure-boot-hashes",
+                                       };
+                                       const size_t mergePathCount = sizeof(mergePaths) / sizeof(mergePaths[0]);
+                                       OSSet * entriesToUpdate = OSSet::withCapacity(mergePathCount);
+                                       for (size_t i = 0; i < mergePathCount; i++) {
+                                               IORegistryEntry *entry = IORegistryEntry::fromPath(mergePaths[i]);
+                                               if (!entry) {
+                                                       panic("failed to find %s in IORegistry", mergePaths[i]);
+                                               }
+                                               entriesToUpdate->setObject(entry);
+                                               OSSafeReleaseNULL(entry);
+                                       }
 #endif
+                                       MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot(), entriesToUpdate,
+                                           (vm_offset_t)data, (vm_size_t)handoff->bytecount);
+                                       OSSafeReleaseNULL(entriesToUpdate);
                                        break;
+                               }
 
                                case kIOHibernateHandoffTypeKeyStore:
 #if defined(__i386__) || defined(__x86_64__)
@@ -1562,38 +1583,12 @@ IOHibernatePolledFileWrite(IOHibernateVars * vars,
 {
        IOReturn err;
 
-#if HIBERNATE_HMAC_IMAGE
-       uint64_t originalPosition = 0;
-       if (!bytes && !size) {
-               originalPosition = vars->fileVars->position;
-       }
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        err = IOPolledFileWrite(vars->fileVars, bytes, size, cryptvars);
        if ((kIOReturnSuccess == err) && hibernate_should_abort()) {
                err = kIOReturnAborted;
        }
 
-#if HIBERNATE_HMAC_IMAGE
-       if ((kIOReturnSuccess == err) && (vars->imageShaCtx)) {
-               if (!bytes && !size) {
-                       // determine how many bytes were written
-                       size = vars->fileVars->position - originalPosition;
-               }
-               if (bytes) {
-                       SHA256_Update(vars->imageShaCtx, bytes, size);
-               } else {
-                       // update with zeroes
-                       uint8_t zeroes[512] = {};
-                       size_t len = size;
-                       while (len) {
-                               IOByteCount toHash = min(len, sizeof(zeroes));
-                               SHA256_Update(vars->imageShaCtx, zeroes, toHash);
-                               len -= toHash;
-                       }
-               }
-       }
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        return err;
 }
@@ -1630,11 +1625,9 @@ hibernate_write_image(void)
        uint32_t     pageAndCount[2];
        addr64_t     phys64;
        IOByteCount  segLen;
-#if !HIBERNATE_HMAC_IMAGE
        uint32_t     restore1Sum = 0, sum = 0, sum1 = 0, sum2 = 0;
        uintptr_t    hibernateBase;
        uintptr_t    hibernateEnd;
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        AbsoluteTime startTime, endTime;
        AbsoluteTime allTime, compTime;
@@ -1665,13 +1658,6 @@ hibernate_write_image(void)
                return kIOHibernatePostWriteSleep;
        }
 
-#if HIBERNATE_HMAC_IMAGE
-       // set up SHA and HMAC context to hash image1 (wired pages)
-       SHA256_CTX imageShaCtx;
-       vars->imageShaCtx = &imageShaCtx;
-       SHA256_Init(vars->imageShaCtx);
-       ppl_hmac_reset(true);
-#endif /* HIBERNATE_HMAC_IMAGE */
 
 #if !defined(__arm64__)
        if (kIOHibernateModeSleep & gIOHibernateMode) {
@@ -1795,61 +1781,6 @@ hibernate_write_image(void)
                        }
                }
 
-#if HIBERNATE_HMAC_IMAGE
-               if (vars->fileVars->position > UINT32_MAX) {
-                       err = kIOReturnNoSpace;
-                       break;
-               }
-               header->segmentsFileOffset = (uint32_t)vars->fileVars->position;
-
-               // fetch the IOHibernateHibSegInfo and the actual pages to write
-               // we use srcBuffer as scratch space
-               IOHibernateHibSegInfo *segInfo = &header->hibSegInfo;
-               void *segInfoScratch = vars->srcBuffer->getBytesNoCopy();
-
-               // This call also enables PMAP hibernation asserts which will prevent modification
-               // of PMAP data structures. This needs to occur before pages start getting written
-               // into the image.
-               ppl_hmac_fetch_hibseg_and_info(segInfoScratch, vars->srcBuffer->getCapacity(), segInfo);
-
-               // write each segment to the file
-               size_t segInfoScratchPos = 0;
-               int hibSectIdx = -1;
-               uint32_t hibSegPageCount = 0;
-               for (int i = 0; i < NUM_HIBSEGINFO_SEGMENTS; i++) {
-                       hibSegPageCount += segInfo->segments[i].pageCount;
-                       size_t size = ptoa_64(segInfo->segments[i].pageCount);
-                       if (size) {
-                               err = IOHibernatePolledFileWrite(vars,
-                                   (uint8_t*)segInfoScratch + segInfoScratchPos, size, cryptvars);
-                               if (kIOReturnSuccess != err) {
-                                       break;
-                               }
-                               segInfoScratchPos += size;
-
-                               // is this sectHIBTEXTB?
-                               if (ptoa_64(segInfo->segments[i].physPage) == trunc_page(kvtophys(sectHIBTEXTB))) {
-                                       // remember which segment is sectHIBTEXTB because we'll need it later
-                                       hibSectIdx = i;
-                               }
-                       }
-               }
-
-               if (hibSectIdx == -1) {
-                       panic("couldn't find sectHIBTEXTB in segInfo");
-               }
-
-               // set the header fields corresponding to the HIB segments
-               header->restore1CodePhysPage = segInfo->segments[hibSectIdx].physPage;
-               header->restore1CodeVirt = trunc_page(sectHIBTEXTB);
-               header->restore1PageCount = hibSegPageCount;
-               header->restore1CodeOffset = (uint32_t)(((uintptr_t) &hibernate_machine_entrypoint) - header->restore1CodeVirt);
-
-               // set restore1StackOffset to the physical page of the top of the stack to simplify the restore code
-               vm_offset_t stackFirstPage, stackPageSize;
-               pal_hib_get_stack_pages(&stackFirstPage, &stackPageSize);
-               header->restore1StackOffset = (uint32_t)(stackFirstPage + stackPageSize);
-#else /* !HIBERNATE_HMAC_IMAGE */
                hibernateBase = HIB_BASE; /* Defined in PAL headers */
                hibernateEnd = (segHIBB + segSizeHIB);
 
@@ -1907,7 +1838,6 @@ hibernate_write_image(void)
                                break;
                        }
                }
-#endif /* !HIBERNATE_HMAC_IMAGE */
 
                if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) {
                        vars->fileVars->encryptStart = (vars->fileVars->position & ~(AES_BLOCK_SIZE - 1));
@@ -1948,14 +1878,7 @@ hibernate_write_image(void)
 
                        for (page = 0; page < count; page += page_size) {
                                phys64 = vars->previewBuffer->getPhysicalSegment(page, NULL, kIOMemoryMapperNone);
-#if HIBERNATE_HMAC_IMAGE
-                               err = ppl_hmac_update_and_compress_page(atop_64_ppnum(phys64), NULL, NULL);
-                               if (kIOReturnSuccess != err) {
-                                       break;
-                               }
-#else /* !HIBERNATE_HMAC_IMAGE */
                                sum1 += hibernate_sum_page(src + page, atop_64_ppnum(phys64));
-#endif /* !HIBERNATE_HMAC_IMAGE */
                        }
                        if (kIOReturnSuccess != err) {
                                break;
@@ -2042,12 +1965,6 @@ hibernate_write_image(void)
                    bitmap_size, header->previewSize,
                    pageCount, vars->fileVars->position);
 
-#if HIBERNATE_HMAC_IMAGE
-               // we don't need to sign the page data into imageHeaderHMAC because it's
-               // already signed into image1PagesHMAC/image2PagesHMAC
-               vars->imageShaCtx = NULL;
-               header->imageHeaderHMACSize = (uint32_t)vars->fileVars->position;
-#endif /* HIBERNATE_HMAC_IMAGE */
 
                enum
                // pageType
@@ -2126,9 +2043,6 @@ hibernate_write_image(void)
                                }
 
                                for (page = ppnum; page < (ppnum + count); page++) {
-#if HIBERNATE_HMAC_IMAGE
-                                       wkresult = ppl_hmac_update_and_compress_page(page, (void **)&src, compressed);
-#else /* !HIBERNATE_HMAC_IMAGE */
                                        err = IOMemoryDescriptorWriteFromPhysical(vars->srcBuffer, 0, ptoa_64(page), page_size);
                                        if (err) {
                                                HIBLOG("IOMemoryDescriptorWriteFromPhysical %d [%ld] %x\n", __LINE__, (long)page, err);
@@ -2147,7 +2061,6 @@ hibernate_write_image(void)
                                            (WK_word*) compressed,
                                            (WK_word*) scratch,
                                            (uint32_t) (page_size - 4));
-#endif /* !HIBERNATE_HMAC_IMAGE */
 
                                        clock_get_uptime(&endTime);
                                        ADD_ABSOLUTETIME(&compTime, &endTime);
@@ -2239,12 +2152,6 @@ hibernate_write_image(void)
                                image1Size = vars->fileVars->position;
                                HIBLOG("image1Size 0x%qx, encryptStart1 0x%qx, End1 0x%qx\n",
                                    image1Size, header->encryptStart, header->encryptEnd);
-#if HIBERNATE_HMAC_IMAGE
-                               // compute the image1 HMAC
-                               ppl_hmac_final(header->image1PagesHMAC, sizeof(header->image1PagesHMAC));
-                               // reset the PPL context so we can compute the image2 (non-wired pages) HMAC
-                               ppl_hmac_reset(false);
-#endif /* HIBERNATE_HMAC_IMAGE */
                        }
                }
                if (kIOReturnSuccess != err) {
@@ -2258,10 +2165,6 @@ hibernate_write_image(void)
                        break;
                }
 
-#if HIBERNATE_HMAC_IMAGE
-               // compute the image2 HMAC
-               ppl_hmac_final(header->image2PagesHMAC, sizeof(header->image2PagesHMAC));
-#endif /* HIBERNATE_HMAC_IMAGE */
 
                // Header:
 
@@ -2270,11 +2173,9 @@ hibernate_write_image(void)
                header->bitmapSize   = bitmap_size;
                header->pageCount    = pageCount;
 
-#if !HIBERNATE_HMAC_IMAGE
                header->restore1Sum  = restore1Sum;
                header->image1Sum    = sum1;
                header->image2Sum    = sum2;
-#endif /* !HIBERNATE_HMAC_IMAGE */
                header->sleepTime    = gIOLastSleepTime.tv_sec;
 
                header->compression     = ((uint32_t)((compressedSize << 8) / uncompressedSize));
@@ -2294,17 +2195,6 @@ hibernate_write_image(void)
                header->lastHibAbsTime  = mach_absolute_time();
                header->lastHibContTime = mach_continuous_time();
 
-#if HIBERNATE_HMAC_IMAGE
-               // include the headers in the SHA calculation
-               SHA256_Update(&imageShaCtx, header, sizeof(*header));
-
-               // finalize the image header SHA
-               uint8_t imageHash[CCSHA256_OUTPUT_SIZE];
-               SHA256_Final(imageHash, &imageShaCtx);
-
-               // compute the header HMAC
-               ppl_hmac_finalize_image(imageHash, sizeof(imageHash), header->imageHeaderHMAC, sizeof(header->imageHeaderHMAC));
-#endif /* HIBERNATE_HMAC_IMAGE */
 
                IOPolledFileSeek(vars->fileVars, 0);
                err = IOHibernatePolledFileWrite(vars,
@@ -2343,9 +2233,7 @@ hibernate_write_image(void)
            uncompressedSize, atop_32(uncompressedSize), compressedSize,
            uncompressedSize ? ((int) ((compressedSize * 100ULL) / uncompressedSize)) : 0);
 
-#if !HIBERNATE_HMAC_IMAGE
        HIBLOG("\nsum1 %x, sum2 %x\n", sum1, sum2);
-#endif /* !HIBERNATE_HMAC_IMAGE */
 
        HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n",
            svPageCount, zvPageCount, wiredPagesEncrypted, wiredPagesClear, dirtyPagesEncrypted);
@@ -2643,10 +2531,6 @@ hibernate_machine_init(void)
 
        HIBLOG("hibernate_machine_init reading\n");
 
-#if HIBERNATE_HMAC_IMAGE
-       // Reset SHA context to verify image2 hash (non-wired pages).
-       ppl_hmac_reset(false);
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        uint32_t * header = (uint32_t *) src;
        sum = 0;
@@ -2721,12 +2605,6 @@ hibernate_machine_init(void)
                                panic("Hibernate restore error %x", err);
                        }
 
-#if HIBERNATE_HMAC_IMAGE
-                       err = ppl_hmac_update_and_compress_page(ppnum, NULL, NULL);
-                       if (err) {
-                               panic("Hibernate restore error %x", err);
-                       }
-#endif /* HIBERNATE_HMAC_IMAGE */
 
                        ppnum++;
                        pagesDone++;
@@ -2753,13 +2631,6 @@ hibernate_machine_init(void)
                panic("Hibernate restore error %x", err);
        }
 
-#if HIBERNATE_HMAC_IMAGE
-       uint8_t image2PagesHMAC[HIBERNATE_HMAC_SIZE];
-       ppl_hmac_final(image2PagesHMAC, sizeof(image2PagesHMAC));
-       if (memcmp(image2PagesHMAC, gIOHibernateCurrentHeader->image2PagesHMAC, sizeof(image2PagesHMAC)) != 0) {
-               panic("image2 pages corrupted");
-       }
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        gIOHibernateCurrentHeader->actualImage2Sum = sum;
        gIOHibernateCompression = gIOHibernateCurrentHeader->compression;
index b28a270a3d8ef110f67a86fdbca0c7c84745b4db..cbb8cc2eb8b6906fca8b9f3aeca86c571290e63e 100644 (file)
 
 #ifdef __cplusplus
 
-#if HIBERNATE_HMAC_IMAGE
-#include <libkern/crypto/sha2.h>
-#endif /* HIBERNATE_HMAC_IMAGE */
 
 enum { kIOHibernateAESKeySize = 16 };  /* bytes */
 
-#if HIBERNATE_HMAC_IMAGE
-// when we call out to PPL to compute IOHibernateHibSegInfo, we use
-// srcBuffer as a temporary buffer, to copy out all of the required
-// HIB segments, so it should be big enough to contain those segments
-#define HIBERNATION_SRC_BUFFER_SIZE (16 * 1024 * 1024)
-#else
 // srcBuffer has to be big enough for a source page, the WKDM
 // compressed output, and a scratch page needed by WKDM
 #define HIBERNATION_SRC_BUFFER_SIZE (2 * page_size + WKdm_SCRATCH_BUF_SIZE_INTERNAL)
-#endif
 
 struct IOHibernateVars {
        hibernate_page_list_t *             page_list;
@@ -73,9 +63,6 @@ struct IOHibernateVars {
        uint8_t                             cryptKey[kIOHibernateAESKeySize];
        size_t                              volumeCryptKeySize;
        uint8_t                             volumeCryptKey[64];
-#if HIBERNATE_HMAC_IMAGE
-       SHA256_CTX *                        imageShaCtx;
-#endif /* HIBERNATE_HMAC_IMAGE */
 };
 typedef struct IOHibernateVars IOHibernateVars;
 
index f0ec6fc8d90e03f1617145dc35246123876a530b..9ab45602287573e84345f44e565c9e2e4756d918 100644 (file)
@@ -1369,7 +1369,6 @@ __attribute__((optnone))
        debug_code('  sp', context->ss.ss_64.sp);
        debug_code('  pc', context->ss.ss_64.pc);
        debug_code('cpsr', context->ss.ss_64.cpsr);
-       debug_code('asps', context->ss.ss_64.aspsr);
        debug_code(' far', context->ss.ss_64.far);
        debug_code(' esr', context->ss.ss_64.esr);
 
index daa6cf7cb712f8df26fc4652b01148552a673c58..69725d4036657eaf7c4a6ca8da913764f24fd422 100644 (file)
@@ -100,6 +100,18 @@ OSDefineMetaClassAndStructors(IODTNVRAM, IOService);
 
 #define DEBUG_ERROR DEBUG_ALWAYS
 
+#define CONTROLLERLOCK()                             \
+({                                                   \
+       if (preemption_enabled() && !panic_active()) \
+               IOLockLock(_controllerLock);         \
+})
+
+#define CONTROLLERUNLOCK()                           \
+({                                                   \
+       if (preemption_enabled() && !panic_active()) \
+               IOLockUnlock(_controllerLock);       \
+})
+
 #define NVRAMLOCK()                              \
 ({                                               \
        if (preemption_enabled() && !panic_active()) \
@@ -660,6 +672,11 @@ IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane)
                return false;
        }
 
+       _controllerLock = IOLockAlloc();
+       if (!_controllerLock) {
+               return false;
+       }
+
        PE_parse_boot_argn("nvram-log", &gNVRAMLogging, sizeof(gNVRAMLogging));
 
        dict =  OSDictionary::withCapacity(1);
@@ -763,6 +780,8 @@ IODTNVRAM::getNVRAMSize(void)
 void
 IODTNVRAM::registerNVRAMController(IONVRAMController *nvram)
 {
+       IOReturn ret;
+
        if (_nvramController != nullptr) {
                DEBUG_ERROR("Duplicate controller set\n");
                return;
@@ -823,16 +842,15 @@ no_system:
 
                if (!_commonService->start(this)) {
                        DEBUG_ERROR("Unable to start the common service!\n");
-                       _systemService->detach(this);
+                       _commonService->detach(this);
                        OSSafeReleaseNULL(_commonService);
                        goto no_common;
                }
        }
 
 no_common:
-       NVRAMLOCK();
-       (void) syncVariables();
-       NVRAMUNLOCK();
+       ret = serializeVariables();
+       DEBUG_INFO("serializeVariables ret=0x%08x\n", ret);
 }
 
 void
@@ -927,9 +945,10 @@ IODTNVRAM::syncInternal(bool rateLimit)
        }
 
        DEBUG_INFO("Calling sync()\n");
-       NVRAMLOCK();
+
+       CONTROLLERLOCK();
        _nvramController->sync();
-       NVRAMUNLOCK();
+       CONTROLLERUNLOCK();
 }
 
 void
@@ -942,49 +961,53 @@ bool
 IODTNVRAM::serializeProperties(OSSerialize *s) const
 {
        const OSSymbol                    *key;
-       OSSharedPtr<OSDictionary>         dict;
+       OSSharedPtr<OSDictionary>         systemDict, commonDict, dict;
        OSSharedPtr<OSCollectionIterator> iter;
        bool                              result = false;
        unsigned int                      totalCapacity = 0;
 
        NVRAMLOCK();
        if (_commonDict) {
-               totalCapacity += _commonDict->getCapacity();
+               commonDict = OSDictionary::withDictionary(_commonDict.get());
        }
 
        if (_systemDict) {
-               totalCapacity += _systemDict->getCapacity();
+               systemDict = OSDictionary::withDictionary(_systemDict.get());
        }
+       NVRAMUNLOCK();
+
+       totalCapacity += (commonDict != nullptr) ? commonDict->getCapacity() : 0;
+       totalCapacity += (systemDict != nullptr) ? systemDict->getCapacity() : 0;
 
        dict = OSDictionary::withCapacity(totalCapacity);
 
        if (dict == nullptr) {
                DEBUG_ERROR("No dictionary\n");
-               goto unlock;
+               goto exit;
        }
 
        // Copy system entries first if present then copy unique common entries
-       if (_systemDict != nullptr) {
-               iter = OSCollectionIterator::withCollection(_systemDict.get());
+       if (systemDict != nullptr) {
+               iter = OSCollectionIterator::withCollection(systemDict.get());
                if (iter == nullptr) {
                        DEBUG_ERROR("failed to create iterator\n");
-                       goto unlock;
+                       goto exit;
                }
 
                while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) {
                        if (verifyPermission(kIONVRAMOperationRead, &gAppleSystemVariableGuid, key)) {
-                               dict->setObject(key, _systemDict->getObject(key));
+                               dict->setObject(key, systemDict->getObject(key));
                        }
                }
 
                iter.reset();
        }
 
-       if (_commonDict != nullptr) {
-               iter = OSCollectionIterator::withCollection(_commonDict.get());
+       if (commonDict != nullptr) {
+               iter = OSCollectionIterator::withCollection(commonDict.get());
                if (iter == nullptr) {
                        DEBUG_ERROR("failed to create common iterator\n");
-                       goto unlock;
+                       goto exit;
                }
 
                while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) {
@@ -993,16 +1016,14 @@ IODTNVRAM::serializeProperties(OSSerialize *s) const
                                continue;
                        }
                        if (verifyPermission(kIONVRAMOperationRead, &gAppleNVRAMGuid, key)) {
-                               dict->setObject(key, _commonDict->getObject(key));
+                               dict->setObject(key, commonDict->getObject(key));
                        }
                }
        }
 
        result = dict->serialize(s);
 
-unlock:
-       NVRAMUNLOCK();
-
+exit:
        DEBUG_INFO("result=%d\n", result);
 
        return result;
@@ -1053,8 +1074,6 @@ IODTNVRAM::handleSpecialVariables(const char *name, uuid_t *guid, OSObject *obj,
 
                        _commonDict->flushCollection();
                        DEBUG_INFO("system & common dictionary flushed\n");
-
-                       err = syncVariables();
                }
 
                special = true;
@@ -1112,7 +1131,6 @@ IODTNVRAM::handleSpecialVariables(const char *name, uuid_t *guid, OSObject *obj,
                }
 
                special = true;
-               err = syncVariables();
        }
 
 exit:
@@ -1132,6 +1150,15 @@ IODTNVRAM::copyProperty(const OSSymbol *aKey) const
        OSDictionary          *dict;
        OSSharedPtr<OSObject> theObject = nullptr;
 
+       if (aKey->isEqualTo(kIOBSDNameKey) ||
+           aKey->isEqualTo(kIOBSDNamesKey) ||
+           aKey->isEqualTo(kIOBSDMajorKey) ||
+           aKey->isEqualTo(kIOBSDMinorKey) ||
+           aKey->isEqualTo(kIOBSDUnitKey)) {
+               // These will never match.
+               // Check here and exit to avoid logging spam
+               return nullptr;
+       }
        DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy());
 
        parseVariableName(aKey->getCStringNoCopy(), &varGuid, &variableName);
@@ -1204,6 +1231,7 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject)
        uuid_t                varGuid;
        OSDictionary          *dict;
        bool                  deletePropertyKey, syncNowPropertyKey, forceSyncNowPropertyKey;
+       bool                  ok;
        size_t                propDataSize = 0;
 
        DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy());
@@ -1308,11 +1336,15 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject)
        }
 
        NVRAMLOCK();
+       ok = handleSpecialVariables(variableName, &varGuid, propObject.get(), &result);
+       NVRAMUNLOCK();
 
-       if (handleSpecialVariables(variableName, &varGuid, propObject.get(), &result)) {
-               goto unlock;
+       if (ok) {
+               serializeVariables();
+               goto exit;
        }
 
+       NVRAMLOCK();
        oldObject.reset(dict->getObject(variableName), OSRetain);
        if (remove == false) {
                DEBUG_INFO("Adding object\n");
@@ -1328,17 +1360,22 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject)
                        result = kIOReturnNotFound;
                }
        }
+       NVRAMUNLOCK();
 
        if (result == kIOReturnSuccess) {
-               result = syncVariables();
+               result = serializeVariables();
                if (result != kIOReturnSuccess) {
-                       DEBUG_ERROR("syncVariables failed, result=0x%08x\n", result);
+                       DEBUG_ERROR("serializeVariables failed, result=0x%08x\n", result);
+
+                       NVRAMLOCK();
                        if (oldObject) {
                                dict->setObject(variableName, oldObject.get());
                        } else {
                                dict->removeObject(variableName);
                        }
-                       (void) syncVariables();
+                       NVRAMUNLOCK();
+
+                       (void) serializeVariables();
                        result = kIOReturnNoMemory;
                }
        }
@@ -1350,9 +1387,6 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject)
                propObject.reset();
        }
 
-unlock:
-       NVRAMUNLOCK();
-
 exit:
        DEBUG_INFO("result=0x%08x\n", result);
 
@@ -1371,12 +1405,12 @@ IODTNVRAM::removeProperty(const OSSymbol *aKey)
        IOReturn ret;
 
        NVRAMLOCK();
-
        ret = removePropertyInternal(aKey);
-
        NVRAMUNLOCK();
 
-       if (ret != kIOReturnSuccess) {
+       if (ret == kIOReturnSuccess) {
+               serializeVariables();
+       } else {
                DEBUG_INFO("removePropertyInternal failed, ret=0x%08x\n", ret);
        }
 }
@@ -1409,7 +1443,6 @@ IODTNVRAM::removePropertyInternal(const OSSymbol *aKey)
        // If the object exists, remove it from the dictionary.
        if (dict->getObject(variableName) != nullptr) {
                dict->removeObject(variableName);
-               result = syncVariables();
        }
 
 exit:
@@ -1601,7 +1634,6 @@ IODTNVRAM::initVariables(void)
        OSSharedPtr<const OSSymbol> propSymbol;
        OSSharedPtr<OSObject>       propObject;
        NVRAMRegionInfo             *currentRegion;
-
        NVRAMRegionInfo             variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage},
                                                          { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} };
 
@@ -1682,20 +1714,22 @@ IODTNVRAM::syncOFVariables(void)
 }
 
 IOReturn
-IODTNVRAM::syncVariables(void)
+IODTNVRAM::serializeVariables(void)
 {
+       IOReturn                          ret;
        bool                              ok;
        UInt32                            length, maxLength, regionIndex;
        UInt8                             *buffer, *tmpBuffer;
        const OSSymbol                    *tmpSymbol;
        OSObject                          *tmpObject;
        OSSharedPtr<OSCollectionIterator> iter;
+       OSSharedPtr<OSNumber>             sizeUsed;
+       UInt32                            systemUsed = 0;
+       UInt32                            commonUsed = 0;
+       OSSharedPtr<OSData>               nvramImage;
        NVRAMRegionInfo                   *currentRegion;
-
-       NVRAMRegionInfo             variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage},
-                                                         { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} };
-
-       NVRAMLOCKASSERT();
+       NVRAMRegionInfo                   variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage},
+                                                               { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} };
 
        if (_systemPanicked) {
                return kIOReturnNotReady;
@@ -1708,8 +1742,9 @@ IODTNVRAM::syncVariables(void)
 
        DEBUG_INFO("...\n");
 
+       NVRAMLOCK();
+
        for (regionIndex = 0; regionIndex < ARRAY_SIZE(variableRegions); regionIndex++) {
-               OSSharedPtr<OSNumber> sizeUsed;
                currentRegion = &variableRegions[regionIndex];
 
                if (currentRegion->size == 0) {
@@ -1755,16 +1790,14 @@ IODTNVRAM::syncVariables(void)
 
                IODelete(buffer, UInt8, currentRegion->size);
 
-               sizeUsed = OSNumber::withNumber(maxLength, 32);
-               _nvramController->setProperty(currentRegion->name, sizeUsed.get());
-               sizeUsed.reset();
-
                if ((strncmp(currentRegion->name, NVRAM_CHRP_PARTITION_NAME_SYSTEM, strlen(NVRAM_CHRP_PARTITION_NAME_SYSTEM)) == 0) &&
                    (_systemService != nullptr)) {
                        _systemService->setProperties(_systemDict.get());
+                       systemUsed = maxLength;
                } else if ((strncmp(currentRegion->name, NVRAM_CHRP_PARTITION_NAME_COMMON, strlen(NVRAM_CHRP_PARTITION_NAME_COMMON)) == 0) &&
                    (_commonService != nullptr)) {
                        _commonService->setProperties(_commonDict.get());
+                       commonUsed = maxLength;
                }
 
                if (!ok) {
@@ -1772,9 +1805,31 @@ IODTNVRAM::syncVariables(void)
                }
        }
 
+       nvramImage = OSData::withBytes(_nvramImage, _nvramSize);
+
+       NVRAMUNLOCK();
+
        DEBUG_INFO("ok=%d\n", ok);
 
-       return _nvramController->write(0, _nvramImage, _nvramSize);
+       CONTROLLERLOCK();
+
+       if (_systemService) {
+               sizeUsed = OSNumber::withNumber(systemUsed, 32);
+               _nvramController->setProperty("SystemUsed", sizeUsed.get());
+               sizeUsed.reset();
+       }
+
+       if (_commonService) {
+               sizeUsed = OSNumber::withNumber(commonUsed, 32);
+               _nvramController->setProperty("CommonUsed", sizeUsed.get());
+               sizeUsed.reset();
+       }
+
+       ret = _nvramController->write(0, (uint8_t *)nvramImage->getBytesNoCopy(), nvramImage->getLength());
+
+       CONTROLLERUNLOCK();
+
+       return ret;
 }
 
 UInt32
@@ -2344,22 +2399,25 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry,
                ok = _commonDict->setObject(_registryPropertiesKey.get(), data.get());
        }
 
+       NVRAMUNLOCK();
+
        if (ok) {
-               if (syncVariables() != kIOReturnSuccess) {
+               if (serializeVariables() != kIOReturnSuccess) {
+                       NVRAMLOCK();
                        if (oldData) {
                                _commonDict->setObject(_registryPropertiesKey.get(), oldData.get());
                        } else {
                                _commonDict->removeObject(_registryPropertiesKey.get());
                        }
-                       (void) syncVariables();
+                       NVRAMUNLOCK();
+
+                       (void) serializeVariables();
                        ok = false;
                }
        }
 
        oldData.reset();
 
-       NVRAMUNLOCK();
-
        return ok ? kIOReturnSuccess : kIOReturnNoMemory;
 }
 
index b4faa6330b2c054aec5742b99e1d7b7bb1c72296..2aec3e5c37ebf4a426e354cf8883181d85f3a9ea 100644 (file)
@@ -57,9 +57,6 @@
 #include "IOKitKernelInternal.h"
 #if HIBERNATION
 #include <IOKit/IOHibernatePrivate.h>
-#if __arm64__
-#include <arm64/ppl/ppl_hib.h>
-#endif /* __arm64__ */
 #endif /* HIBERNATION */
 #include <console/video_console.h>
 #include <sys/syslog.h>
@@ -578,7 +575,6 @@ defaultSleepPolicyHandler(void *ctx, const IOPMSystemSleepPolicyVariables *vars,
 
        // Hibernation enabled and either user forced hibernate or low battery sleep
        if ((vars->hibernateMode & kIOHibernateModeOn) &&
-           ppl_hib_hibernation_supported() &&
            (((vars->hibernateMode & kIOHibernateModeSleep) == 0) ||
            (vars->sleepFactors & kIOPMSleepFactorBatteryLow))) {
                sleepType = kIOPMSleepTypeHibernate;
@@ -1773,9 +1769,6 @@ IOPMrootDomain::start( IOService * nub )
 
 #if HIBERNATION
 #if defined(__arm64__)
-       if (ppl_hib_hibernation_supported()) {
-               publishFeature(kIOHibernateFeatureKey);
-       }
 #endif /* defined(__arm64__) */
        IOHibernateSystemInit(this);
 #endif
index ccbea3d900956e0d6c0c2ae4ebcaf13b98bc398e..9ff9f8043c7631244f9bca0f29e6d333701a357e 100644 (file)
@@ -591,19 +591,7 @@ IOGetHibernationCryptKey(uint8_t * hibernationKey,
     uint32_t *swSeed
     )
 {
-#if XNU_MONITOR_PPL_HIB
-       SEPHibernator *hibernator = SEPHibernator::sepHibernator();
-       sephib_wrapped_key_t wrappedKey = {};
-       sephib_seprom_hib_payload_t sepromPayload = {};
-       hibernator->prepareToHibernate(&wrappedKey, &sepromPayload);
-       *swSeed = sepromPayload.sw_seed;
-       assert(*keySize >= sizeof(wrappedKey.data));
-       *keySize = sizeof(wrappedKey.data);
-       memcpy(hibernationKey, wrappedKey.data, *keySize);
-       return kIOReturnSuccess;
-#else
        return kIOReturnNotFound;
-#endif
 }
 #endif /* defined(__arm64__) */
 
index f40ee0aafb511742d6a2d2f6abb6849090e4998e..b0684974c481919ad37ea703589d79bd391425d0 100644 (file)
@@ -19,8 +19,6 @@ HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS))
 IOHibernateRestoreKernel.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) -fno-sanitize=address -UKASAN
 # Stack protector and stack check must be disabled because the stack protector runtime isn't available
 IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check
-# signing keys aren't set up yet, so ptrauth must be disabled
-IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-ptrauth-calls
 
 IOHibernateIO.cpo_CFLAGS_ADD += -I$(SRCROOT)/osfmk
 IOHibernateRestoreKernel.o_CFLAGS_ADD += -I$(SRCROOT)/osfmk
index aa1c83d5bc50c53756fad56d70151c27c2d7c550..8b7c090e35ac80810187351efd7822c879e7302f 100644 (file)
@@ -3828,11 +3828,13 @@ OSKext::lookupKextWithAddress(vm_address_t address)
        OSSharedPtr<OSKext> foundKext;             // returned
        uint32_t count, i;
        kmod_info_t *kmod_info;
+       vm_address_t originalAddress;
 #if defined(__arm64__)
        uint64_t   textExecBase;
        size_t     textExecSize;
 #endif /* defined(__arm64__) */
 
+       originalAddress = address;
 #if  __has_feature(ptrauth_calls)
        address = (vm_address_t)VM_KERNEL_STRIP_PTR(address);
 #endif /*  __has_feature(ptrauth_calls) */
@@ -3868,7 +3870,8 @@ OSKext::lookupKextWithAddress(vm_address_t address)
        }
        /*
         * DriverKit userspace executables do not have a kernel linkedExecutable,
-        * so we "fake" their address range with the LoadTag.
+        * so we "fake" their address range with the LoadTag. We cannot use the ptrauth-stripped address
+        * here, so use the original address passed to this method.
         *
         * This is supposed to be used for logging reasons only. When logd
         * calls this function it ors the address with FIREHOSE_TRACEPOINT_PC_KERNEL_MASK, so we
@@ -3876,7 +3879,7 @@ OSKext::lookupKextWithAddress(vm_address_t address)
         * Also we need to remove FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT set when emitting the log line.
         */
 
-       address = address & ~(FIREHOSE_TRACEPOINT_PC_KERNEL_MASK | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT);
+       address = originalAddress & ~(FIREHOSE_TRACEPOINT_PC_KERNEL_MASK | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT);
        count = sLoadedDriverKitKexts->getCount();
        for (i = 0; i < count; i++) {
                OSKext * thisKext = OSDynamicCast(OSKext, sLoadedDriverKitKexts->getObject(i));
@@ -3901,14 +3904,6 @@ OSKext::copyKextUUIDForAddress(OSNumber *address)
                return NULL;
        }
 
-       uintptr_t addr = ml_static_slide((uintptr_t)address->unsigned64BitValue());
-       if (addr == 0) {
-               return NULL;
-       }
-#if  __has_feature(ptrauth_calls)
-       addr = (uintptr_t)VM_KERNEL_STRIP_PTR(addr);
-#endif /*  __has_feature(ptrauth_calls) */
-
 #if CONFIG_MACF
        /* Is the calling process allowed to query kext info? */
        if (current_task() != kernel_task) {
@@ -3928,10 +3923,28 @@ OSKext::copyKextUUIDForAddress(OSNumber *address)
                }
        }
 #endif
-       kext = lookupKextWithAddress(addr);
-       if (kext) {
-               uuid = kext->copyTextUUID();
+
+       uintptr_t slidAddress = ml_static_slide((uintptr_t)address->unsigned64BitValue());
+       if (slidAddress != 0) {
+               kext = lookupKextWithAddress(slidAddress);
+               if (kext) {
+                       uuid = kext->copyTextUUID();
+               }
+       }
+
+       if (!uuid) {
+               /*
+                * If we still don't have a UUID, then we failed to match the slid + stripped address with
+                * a kext. This might have happened because the log message came from a dext.
+                *
+                * Try again with the original address.
+                */
+               kext = lookupKextWithAddress((vm_address_t)address->unsigned64BitValue());
+               if (kext && kext->isDriverKit()) {
+                       uuid = kext->copyTextUUID();
+               }
        }
+
        return uuid;
 }
 
@@ -5392,10 +5405,8 @@ OSKext::loadCodelessKext(OSString *kextIdentifier, OSDictionary *requestDict)
                OSKext::recordIdentifierRequest(OSDynamicCast(OSString, newKext->getIdentifier()));
 
                result = kOSReturnSuccess;
-               /* send the kext's personalities to the IOCatalog */
-               if (!newKext->flags.requireExplicitLoad) {
-                       result = newKext->sendPersonalitiesToCatalog(true, NULL);
-               }
+               /* Send the kext's personalities to the IOCatalog. This is an explicit load. */
+               result = newKext->sendPersonalitiesToCatalog(true, NULL);
        }
 
 finish:
@@ -11843,6 +11854,13 @@ OSKext::loadFileSetKexts(OSDictionary * requestDict __unused)
 
        IORecursiveLockLock(sKextLock);
 
+       if (!sLoadEnabled) {
+               OSKextLog(NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag,
+                   "KextLog: Kext loading is disabled (attempt to load KCs).");
+               IORecursiveLockUnlock(sKextLock);
+               return ret;
+       }
+
        pageable_filepath = OSDynamicCast(OSString,
            requestArgs->getObject(kKextRequestArgumentPageableKCFilename));
 
index 90382aa4de13fe95c51727958048745966e15f59..9763726b6453fc3191b627ee1f1e6bc33adf513b 100644 (file)
 #define __SECTION_START_SYM(seg, sect) asm("section$start$" seg "$" sect)
 #define __SECTION_END_SYM(seg, sect)   asm("section$end$" seg "$" sect)
 
+#if defined(__arm64__) || defined (__x86_64__)
+
+#define SECURITY_SEGMENT_NAME           "__DATA"
+#define SECURITY_SECTION_NAME           "__const"
+#define SECURITY_SEGMENT_SECTION_NAME   "__DATA,__const"
+
+#define __security_const_early const
+#define __security_const_late __attribute__((section(SECURITY_SEGMENT_SECTION_NAME)))
+#define __security_read_write
+
+#if HIBERNATION
+#define MARK_AS_HIBERNATE_TEXT __attribute__((section("__HIB, __text, regular, pure_instructions")))
+#define MARK_AS_HIBERNATE_DATA __attribute__((section("__HIB, __data")))
+#define MARK_AS_HIBERNATE_DATA_CONST_LATE __attribute__((section("__HIB, __const")))
+#endif /* HIBERNATION */
+#endif /* __arm64__ || __x86_64__ */
 
 #ifndef __security_const_early
 #define __security_const_early const
index 2bbd75c3803ef04fe7c2f17b19d4ca36f3ddd902..8f654850aaf4a7f6e5e02e5eabaf9bbf73262c8c 100644 (file)
@@ -54,6 +54,7 @@ _LOG_HOST_LINK = $(call LOG,$1,$(ColorH),$(ColorLF),$(LOG_PFX_LEN))
 LOG_LDFILELIST = $(call LOG,LDFILELIST,$(ColorL),$(ColorLF),$(LOG_PFX_LEN_ADJ))
 LOG_MIG = $(call LOG,MIG,$(ColorM),$(ColorF),$(LOG_PFX_LEN_ADJ))
 LOG_LD = $(call LOG,LD,$(ColorL),$(ColorF),$(LOG_PFX_LEN_ADJ))
+LOG_ALIGN = $(call LOG,--------->,$(Color0),$(Color0),$(LOG_PFX_LEN))
 
 # Compiling/machine-specific operations.
 LOG_CC = $(call _LOG_COMP,CC)
@@ -81,6 +82,7 @@ LOG_ALIAS = $(call _LOG_HOST,ALIAS)
 LOG_STRIP = $(call _LOG_HOST,STRIP)
 LOG_DSYMUTIL = $(call _LOG_HOST,DSYMUTIL)
 LOG_LIBTOOL = $(call _LOG_HOST,LIBTOOL)
+LOG_FILEPREP = $(call _LOG_HOST,FILEPREP)
 
 # Host-side linking operations.
 LOG_GENASSYM = $(call _LOG_HOST_LINK,GENASSYM)
diff --git a/makedefs/MakeInc.color b/makedefs/MakeInc.color
new file mode 100644 (file)
index 0000000..ba3cfd7
--- /dev/null
@@ -0,0 +1,55 @@
+# -*- mode: makefile;-*-
+#
+# Copyright (C) 2020 Apple Inc. All rights reserved.
+#
+# MakeInc.color defines macros used to enable
+# colored output of the build log.
+#
+
+define _setup_build_log_colors
+ifeq ($${XNU_LOGCOLORS},y)
+    LOGCOLORS ?= y
+endif
+ifeq ($$(LOGCOLORS),y)
+    # Get a nice list of device code names associated with the build platform
+    ifndef CDevs
+        #ifdef EMBEDDED_DEVICE_MAP
+        #    export CDevs := $$(shell $$(EMBEDDED_DEVICE_MAP) -db $$(EDM_DBPATH) -query "SELECT DISTINCT TargetType FROM Targets WHERE KernelPlatform = '$$(CURRENT_MACHINE_CONFIG_LC)'" | tr '[\r\n]' ':' | sed 's,:$$$$,,')
+        #endif
+    endif
+    ifndef MACHINE_PFX
+        export _MACHINE := $$(CURRENT_MACHINE_CONFIG_LC)
+        ifeq ($$(CURRENT_MACHINE_CONFIG),NONE)
+            export _MACHINE := $$(subst OSX,,$$(PLATFORM))
+        endif
+        export MACHINE_PFX := $$(shell __A="$$(CURRENT_ARCH_CONFIG_LC)"; \
+                                         __As=$$$$((6-$$$${\#__A})); \
+                                         printf "%-.6s%*.*s %9.9s" \
+                                                "$$$${__A}" \
+                                                $$$${__As} $$$${__As} " " \
+                                                "$$(_MACHINE)")
+    endif
+    override LOG_PFX_LEN := 30
+    override LOG_PFX_LEN_ADJ := $$(shell __TMP="$$(MACHINE_PFX)"; \
+            printf "%d" $$$$(($$(LOG_PFX_LEN) - $$$${\#__TMP} - 3)))
+    MACHINE_PFX_COL = $$(shell printf "\\033[1m%s\\033[m" "$$(MACHINE_PFX)")
+    # Turn off colored output
+    Color0:=$$(shell printf "\\033[m")
+    # Start a host command: bold text
+    ColorH:=$$(shell printf "\\033[1m")
+    # Start a compilation-related command: blue text
+    ColorC:=$$(shell printf "[$$(MACHINE_PFX_COL)] \\033[1;34m")
+    # Start a MIG command: green text
+    ColorM:=$$(shell printf "[$$(MACHINE_PFX_COL)] \\033[1;32m")
+    # Start a linking command: purple text
+    ColorL:=$$(shell printf "[$$(MACHINE_PFX_COL)] \\033[1;35m")
+    # Start a filename
+    ColorF:=$$(shell printf "")
+    # Start a linked file name: italic text
+    ColorLF:=$$(shell printf "\\033[3m")
+    # Error strings: red text
+    ColorErr:=$$(shell printf "\033[31m")
+endif
+endef
+
+# vim: set ft=make:
index d53f3e1a77168c68c2e641be538b20c5aff97da0..508db7f967b449663037177b73bcdf9f0a126000 100644 (file)
@@ -32,7 +32,7 @@ SUPPORTED_ARM_MACHINE_CONFIGS = NONE
 SUPPORTED_ARM64_MACHINE_CONFIGS = NONE
 else
 SUPPORTED_ARM_MACHINE_CONFIGS = T8002 T8004
-SUPPORTED_ARM64_MACHINE_CONFIGS = T7000 T7001 S8000 S8001 T8010 T8011 BCM2837
+SUPPORTED_ARM64_MACHINE_CONFIGS = T7000 T7001 S8000 S8001 T8010 T8011 BCM2837 T8020 T8101 T8103
 
 endif
 
@@ -68,6 +68,9 @@ MACHINE_FLAGS_ARM_T8004 = -DARM_BOARD_CONFIG_T8004
 MACHINE_FLAGS_ARM64_T8010 = -DARM64_BOARD_CONFIG_T8010 -mcpu=hurricane
 MACHINE_FLAGS_ARM64_T8011 = -DARM64_BOARD_CONFIG_T8011 -mcpu=hurricane
 MACHINE_FLAGS_ARM64_BCM2837 = -DARM64_BOARD_CONFIG_BCM2837
+MACHINE_FLAGS_ARM64_T8020 = -DARM64_BOARD_CONFIG_T8020 -mcpu=vortex
+MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -mcpu=firestorm
+MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -mcpu=firestorm
 
 
 #
@@ -240,6 +243,9 @@ export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell $(EMBEDDED_DEVICE_MAP)
 else
 # Without embdedded device map, use a default arch string
 export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell echo $(CURRENT_ARCH_CONFIG) | tr A-Z a-z)
+ifneq ($(filter ARM64,$(CURRENT_ARCH_CONFIG)),)
+export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := arm64e
+endif
 endif
 endif
 
@@ -275,7 +281,7 @@ endif
 # Default CFLAGS
 #
 ifdef RC_NONARCH_CFLAGS
-OTHER_CFLAGS = $(RC_NONARCH_CLFAGS)
+OTHER_CFLAGS = $(RC_NONARCH_CFLAGS)
 endif
 
 #
@@ -680,9 +686,10 @@ LDFLAGS_KERNEL_GENARM64 = \
        -Wl,-sectcreate,"__PLK_LLVM_COV",__llvm_covmap,/dev/null \
        -Wl,-sectcreate,"__PLK_LINKEDIT",__data,/dev/null
 
-
-LDFLAGS_KERNEL_SEGARM64 ?= \
-       -Wl,-segment_order,__TEXT:__DATA_CONST:__LINKEDIT:__TEXT_EXEC:__LAST:__KLD:__DATA:__BOOTDATA
+LDFLAGS_KERNEL_SEGARM64 = \
+       -Wl,-rename_section,__PPLDATA,__const,__PPLDATA_CONST,__const \
+       -Wl,-segment_order,__TEXT:__DATA_CONST:__LINKEDIT:__TEXT_EXEC:__PPLTEXT:__PPLTRAMP:__PPLDATA_CONST:__LASTDATA_CONST:__LAST:__PPLDATA:__KLD:__DATA:__HIBDATA:__BOOTDATA \
+       -Wl,-segprot,__PPLTEXT,r-x,r-x  -Wl,-segprot,__PPLTRAMP,r-x,r-x -Wl,-segprot,__PPLDATA_CONST,r--,r-- -Wl,-segprot,__LASTDATA_CONST,r--,r-- -Wl,-segprot,__LAST,r-x,r-x
 
 LDFLAGS_KERNEL_RELEASEARM64     = \
        $(LDFLAGS_KERNEL_GENARM64) \
@@ -726,7 +733,7 @@ LDFLAGS_KERNEL      = $(LDFLAGS_KERNEL_GEN) \
                  $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_))) \
                  $(DEPLOYMENT_TARGET_FLAGS)
 
-LDFLAGS_KERNEL_ONLY   =   \
+LDFLAGS_KERNEL_ONLY  +=   \
                  $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_ONLY_CONFIG_))) \
                  $($(addsuffix $(ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG),LDFLAGS_KERNEL_ONLY_SUBARCH_)) \
                  -Wl,-alias_list,$(TARGET)/all-alias.exp \
@@ -738,6 +745,11 @@ LDFLAGS_KERNEL_ONLY   =   \
 LD_KERNEL_LIBS    = -lcc_kext
 LD_KERNEL_ARCHIVES = $(LDFLAGS_KERNEL_SDK) -lfirehose_kernel
 
+# Link opensource binary library
+ifneq ($(filter T8020 T8020 T8101 T8101,$(CURRENT_MACHINE_CONFIG)),)
+LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
+endif
+
 #
 # DTrace support
 #
index f630a4ba05a9f23c1129ab7484a8c329e940d874..222b355d1d2eef04ff5dce7737f8a3f4a0e891b8 100644 (file)
@@ -1,6 +1,6 @@
 # -*- mode: makefile;-*-
 #
-# Copyright (C) 1999-2016 Apple Inc. All rights reserved.
+# Copyright (C) 1999-2020 Apple Inc. All rights reserved.
 #
 # MakeInc.kernel augments the single-architecture
 # recursive build system with rules specific
@@ -416,3 +416,5 @@ install_alias: $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(ALIAS_FILE_NAME)
 
 print_exports:
        $(_v)printenv | sort
+
+# vim: set ft=make:
index ad66233b6ff832dbba1b636fd716222cb5a49e8d..d95e8186c19832be68034b8f34a3b2db62de9530 100644 (file)
@@ -1,6 +1,6 @@
 # -*- mode: makefile;-*-
 #
-# Copyright (C) 1999-2016 Apple Inc. All rights reserved.
+# Copyright (C) 1999-2020 Apple Inc. All rights reserved.
 #
 # MakeInc.rule defines the targets and rules for
 # leaf directories once MakeInc.dir has recursed
@@ -8,6 +8,7 @@
 # to allow the Makefile in the source directory
 # to augment the actions that will be performed.
 #
+include $(SRCROOT)/makedefs/MakeInc.color
 
 #
 # Generic Install rules
@@ -45,50 +46,7 @@ ifndef INSTALL_KF_MD_GEN_LIST
     INSTALL_KF_MD_GEN_LIST = $(EXPORT_MD_GEN_LIST)
 endif
 
-ifeq (${XNU_LOGCOLORS},y)
-    LOGCOLORS ?= y
-endif
-
-ifeq ($(LOGCOLORS),y)
-    # Get a nice list of device code names associated with the build platform
-    ifndef CDevs
-        #ifdef EMBEDDED_DEVICE_MAP
-        #    export CDevs := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) -query "SELECT DISTINCT TargetType FROM Targets WHERE KernelPlatform = '$(CURRENT_MACHINE_CONFIG_LC)'" | tr '[\r\n]' ':' | sed 's,:$$,,')
-        #endif
-    endif
-    ifndef MACHINE_PFX
-        export _MACHINE := $(CURRENT_MACHINE_CONFIG_LC)
-        ifeq ($(CURRENT_MACHINE_CONFIG),NONE)
-            export _MACHINE := $(subst OSX,,$(PLATFORM))
-        endif
-        export MACHINE_PFX := $(shell __A="$(CURRENT_ARCH_CONFIG_LC)"; \
-                                         __As=$$((6-$${\#__A})); \
-                                         printf "%-.6s%*.*s %9.9s" \
-                                                "$${__A}" \
-                                                $${__As} $${__As} " " \
-                                                "$(_MACHINE)")
-    endif
-    override LOG_PFX_LEN := 30
-    override LOG_PFX_LEN_ADJ := $(shell __TMP="$(MACHINE_PFX)"; \
-            printf "%d" $$(($(LOG_PFX_LEN) - $${\#__TMP} - 3)))
-    MACHINE_PFX_COL = $(shell printf "\\033[1m%s\\033[m" "$(MACHINE_PFX)")
-    # Turn off colored output
-    Color0:=$(shell printf "\\033[m")
-    # Start a host command: bold text
-    ColorH:=$(shell printf "\\033[1m")
-    # Start a compilation-related command: blue text
-    ColorC:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;34m")
-    # Start a MIG command: green text
-    ColorM:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;32m")
-    # Start a linking command: purple text
-    ColorL:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;35m")
-    # Start a filename
-    ColorF:=$(shell printf "")
-    # Start a linked file name: italic text
-    ColorLF:=$(shell printf "\\033[3m")
-    # Error strings: red text
-    ColorErr:=$(shell printf "\033[31m")
-endif
+$(eval $(call _setup_build_log_colors))
 
 .PHONY: ALWAYS
 
index 213d0cec19ea3bc88ca6c77587ad93d9d57d8752..e1ab49bc32fe8aac7da1efcc6dce7a17b4ce2d73 100644 (file)
@@ -1,6 +1,6 @@
 # -*- mode: makefile;-*-
 #
-# Copyright (C) 2010-2016 Apple Inc. All rights reserved.
+# Copyright (C) 2010-2020 Apple Inc. All rights reserved.
 #
 # MakeInc.top is the top-level makefile for the xnu
 # build system. All the main XBS targets
@@ -758,3 +758,5 @@ $(eval $(generated_top_level_print_exports))
 .PHONY: print_exports_first_build_config
 
 print_exports_first_build_config: print_exports_bootstrap
+
+# vim: set ft=make:
index 9c11427c2f0da1afea408c2206ece71b80e91190..80a448ecb2cb4d4805fd43a81f656534a547f4f4 100644 (file)
@@ -111,7 +111,6 @@ int             debug_task;
 bool need_wa_rdar_55577508 = false;
 SECURITY_READ_ONLY_LATE(bool) static_kernelcache = false;
 
-
 #if HAS_BP_RET
 /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */
 uint32_t bp_ret = 3;
@@ -154,6 +153,9 @@ void arm_init(boot_args * args);
 
 #if __arm64__
 unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */
+
+extern void configure_misc_apple_boot_args(void);
+extern void configure_misc_apple_regs(void);
 #endif /* __arm64__ */
 
 
@@ -282,33 +284,6 @@ arm_auxkc_init(void *mh, void *base)
 #endif /* defined(HAS_APPLE_PAC) */
 }
 
-#if HAS_IC_INVAL_FILTERS
-static void
-configure_misc_apple_regs(void)
-{
-       uint64_t actlr, __unused acfg, __unused ahcr;
-
-       actlr = get_aux_control();
-
-#if HAS_IC_INVAL_FILTERS
-       ahcr = __builtin_arm_rsr64(ARM64_REG_AHCR_EL2);
-       ahcr |= AHCR_IC_IVAU_EnRegime;
-       ahcr |= AHCR_IC_IVAU_EnVMID;
-       ahcr |= AHCR_IC_IALLU_EnRegime;
-       ahcr |= AHCR_IC_IALLU_EnVMID;
-       __builtin_arm_wsr64(ARM64_REG_AHCR_EL2, ahcr);
-#endif /* HAS_IC_INVAL_FILTERS */
-
-
-#if HAS_IC_INVAL_FILTERS
-       actlr |= ACTLR_EL1_IC_IVAU_EnASID;
-#endif /* HAS_IC_INVAL_FILTERS */
-
-       set_aux_control(actlr);
-
-}
-#endif /* HAS_IC_INVAL_FILTERS */
-
 /*
  *             Routine:                arm_init
  *             Function:               Runs on the boot CPU, once, on entry from iBoot.
@@ -341,25 +316,10 @@ arm_init(
 
 #if __arm64__
        wfe_timeout_configure();
-#if HAS_IC_INVAL_FILTERS
+
+       configure_misc_apple_boot_args();
        configure_misc_apple_regs();
-#endif /* HAS_IC_INVAL_FILTERS */
 
-#if defined(HAS_APPLE_PAC)
-#if DEVELOPMENT || DEBUG
-       boolean_t user_jop = TRUE;
-       PE_parse_boot_argn("user_jop", &user_jop, sizeof(user_jop));
-       if (!user_jop) {
-               args->bootFlags |= kBootFlagsDisableUserJOP;
-       }
-#endif /* DEVELOPMENT || DEBUG */
-       boolean_t user_ts_jop = TRUE;
-       PE_parse_boot_argn("user_ts_jop", &user_ts_jop, sizeof(user_ts_jop));
-       if (!user_ts_jop) {
-               args->bootFlags |= kBootFlagsDisableUserThreadStateJOP;
-       }
-       PE_parse_boot_argn("diversify_user_jop", &diversify_user_jop, sizeof(diversify_user_jop));
-#endif /* defined(HAS_APPLE_PAC) */
 
        {
                /*
@@ -507,6 +467,9 @@ arm_init(
         */
 #if __arm64__
        need_wa_rdar_55577508 = cpuid_get_cpufamily() == CPUFAMILY_ARM_LIGHTNING_THUNDER;
+#ifndef RC_HIDE_XNU_FIRESTORM
+       need_wa_rdar_55577508 |= (cpuid_get_cpufamily() == CPUFAMILY_ARM_FIRESTORM_ICESTORM && get_arm_cpu_version() == CPU_VERSION_A0);
+#endif
 #endif
 
        /* setup debugging output if one has been chosen */
@@ -623,9 +586,9 @@ arm_init_cpu(
        __builtin_arm_wsr("pan", 1);
 #endif
 
-#if HAS_IC_INVAL_FILTERS
+#ifdef __arm64__
        configure_misc_apple_regs();
-#endif /* HAS_IC_INVAL_FILTERS */
+#endif
 
        cpu_data_ptr->cpu_flags &= ~SleepState;
 #if     defined(ARMA7)
index 5556a00ae0c57d9659ab12d68c41b425a0b9770a..d30f034a06e074bd58fb352ebf1458538ad7b7e3 100644 (file)
@@ -31,7 +31,6 @@
 #include <arm/pmap.h>
 #include <sys/errno.h>
 #include "assym.s"
-#include "caches_macros.s"
 
 
 /*
@@ -144,37 +143,31 @@ fmir_loop:
 LEXT(CleanPoC_Dcache)
 LEXT(clean_mmu_dcache)
 #if    !defined(__ARM_L1_WT_CACHE__)
-       mov             r0, #0
-       GET_CACHE_CONFIG r0, r1, r2, r3
        mov             r0, #0
        dsb
 clean_dcacheway:
 clean_dcacheline:              
        mcr             p15, 0, r0, c7, c10, 2                           // clean dcache line by way/set
-       add             r0, r0, r1                                                       // increment set index
-       tst             r0, r2                                                           // look for overflow
+       add             r0, r0, #1 << MMU_I7SET                          // increment set index
+       tst             r0, #1 << (MMU_NSET + MMU_I7SET)         // look for overflow
        beq             clean_dcacheline
-       bic             r0, r0, r2                                                       // clear set overflow
-       adds    r0, r0, r3                                                       // increment way
+       bic             r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
+       adds    r0, r0, #1 << MMU_I7WAY                          // increment way
        bcc             clean_dcacheway                                          // loop
 #endif
-       HAS_L2_CACHE r0
-       cmp             r0, #0
-       beq             clean_skipl2dcache
-       mov             r0, #1
-       GET_CACHE_CONFIG r0, r1, r2, r3
+#if __ARM_L2CACHE__
        dsb
        mov             r0, #2
 clean_l2dcacheway:
 clean_l2dcacheline:            
        mcr             p15, 0, r0, c7, c10, 2                           // clean dcache line by way/set
-       add             r0, r0, r1                                                       // increment set index
-       tst             r0, r2                                                           // look for overflow
+       add             r0, r0, #1 << L2_I7SET                           // increment set index
+       tst             r0, #1 << (L2_NSET + L2_I7SET)           // look for overflow
        beq             clean_l2dcacheline
-       bic             r0, r0, r2                                                       // clear set overflow
-       adds    r0, r0, r3                                                       // increment way
+       bic             r0, r0, #1 << (L2_NSET + L2_I7SET)       // clear set overflow
+       adds    r0, r0, #1 << L2_I7WAY                           // increment way
        bcc             clean_l2dcacheway                                        // loop
-clean_skipl2dcache:
+#endif
        dsb
        bx              lr
                
@@ -188,18 +181,16 @@ clean_skipl2dcache:
        .globl EXT(CleanPoU_Dcache)
 LEXT(CleanPoU_Dcache)
 #if    !defined(__ARM_PoU_WT_CACHE__)
-       mov             r0, #0
-       GET_CACHE_CONFIG r0, r1, r2, r3
        mov             r0, #0
        dsb
 clean_dcacheway_idle:
 clean_dcacheline_idle:         
        mcr             p15, 0, r0, c7, c10, 2                           // clean dcache line by way/set
-       add             r0, r0, r1                                                       // increment set index
-       tst             r0, r2                                                           // look for overflow
+       add             r0, r0, #1 << MMU_I7SET                          // increment set index
+       tst             r0, #1 << (MMU_NSET + MMU_I7SET)         // look for overflow
        beq             clean_dcacheline_idle
-       bic             r0, r0, r2                                                       // clear set overflow
-       adds    r0, r0, r3                                                       // increment way
+       bic             r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
+       adds    r0, r0, #1 << MMU_I7WAY                          // increment way
        bcc             clean_dcacheway_idle                             // loop
 #endif
        dsb
@@ -248,6 +239,7 @@ LEXT(CleanPoC_DcacheRegion_Force)
        add             r1, r1, r2
        sub             r1, r1, #1
        mov             r1, r1, LSR #MMU_CLINE                          // Set cache line counter
+       dsb
 ccdr_loop:
        mcr             p15, 0, r0, c7, c10, 1                          // Clean dcache line to PoC
        add             r0, r0, #1<<MMU_CLINE                           // Get next cache aligned addr
@@ -265,36 +257,30 @@ ccdr_loop:
        .align 2
        .globl EXT(FlushPoC_Dcache)
 LEXT(FlushPoC_Dcache)
-       mov             r0, #0
-       GET_CACHE_CONFIG r0, r1, r2, r3
        mov             r0, #0
        dsb
 cleanflush_dcacheway:
 cleanflush_dcacheline:         
        mcr             p15, 0, r0, c7, c14, 2                           // cleanflush dcache line by way/set
-       add             r0, r0, r1                                                       // increment set index
-       tst             r0, r2                                                           // look for overflow
+       add             r0, r0, #1 << MMU_I7SET                          // increment set index
+       tst             r0, #1 << (MMU_NSET + MMU_I7SET)         // look for overflow
        beq             cleanflush_dcacheline
-       bic             r0, r0, r2                                                       // clear set overflow
-       adds    r0, r0, r3                                                       // increment way
+       bic             r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
+       adds    r0, r0, #1 << MMU_I7WAY                          // increment way
        bcc             cleanflush_dcacheway                             // loop
-       HAS_L2_CACHE r0
-       cmp             r0, #0
-       beq             cleanflush_skipl2dcache
-       mov             r0, #1
-       GET_CACHE_CONFIG r0, r1, r2, r3
+#if __ARM_L2CACHE__
        dsb
        mov             r0, #2
 cleanflush_l2dcacheway:
 cleanflush_l2dcacheline:               
        mcr             p15, 0, r0, c7, c14, 2                           // cleanflush dcache line by way/set
-       add             r0, r0, r1                                                       // increment set index
-       tst             r0, r2                                                           // look for overflow
+       add             r0, r0, #1 << L2_I7SET                           // increment set index
+       tst             r0, #1 << (L2_NSET + L2_I7SET)          // look for overflow
        beq             cleanflush_l2dcacheline
-       bic             r0, r0, r2                                                       // clear set overflow
-       adds    r0, r0, r3                                                       // increment way
+       bic             r0, r0, #1 << (L2_NSET + L2_I7SET)       // clear set overflow
+       adds    r0, r0, #1 << L2_I7WAY                           // increment way
        bcc             cleanflush_l2dcacheway                           // loop
-cleanflush_skipl2dcache:
+#endif
        dsb
        bx              lr
 
@@ -307,18 +293,16 @@ cleanflush_skipl2dcache:
        .align 2
        .globl EXT(FlushPoU_Dcache)
 LEXT(FlushPoU_Dcache)
-       mov             r0, #0
-       GET_CACHE_CONFIG r0, r1, r2, r3
        mov             r0, #0
        dsb
 fpud_way:
 fpud_line:             
        mcr             p15, 0, r0, c7, c14, 2                           // cleanflush dcache line by way/set
-       add             r0, r0, r1                                                       // increment set index
-       tst             r0, r2                                                           // look for overflow
+       add             r0, r0, #1 << MMU_I7SET                          // increment set index
+       tst             r0, #1 << (MMU_NSET + MMU_I7SET)         // look for overflow
        beq             fpud_line
-       bic             r0, r0, r2                                                       // clear set overflow
-       adds    r0, r0, r3                                                       // increment way
+       bic             r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
+       adds    r0, r0, #1 << MMU_I7WAY                          // increment way
        bcc             fpud_way                                                         // loop
        dsb
        bx              lr
diff --git a/osfmk/arm/caches_macros.s b/osfmk/arm/caches_macros.s
deleted file mode 100644 (file)
index c028677..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (c) 2019 Apple Inc. All rights reserved.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. The rights granted to you under the License
- * may not be used to create, or enable the creation or redistribution of,
- * unlawful or unlicensed copies of an Apple operating system, or to
- * circumvent, violate, or enable the circumvention or violation of, any
- * terms of an Apple operating system software license agreement.
- *
- * Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
- *
- * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
- */
-
-/*
- *     Obtains cache physical layout information required for way/set
- *     data cache maintenance operations.
- *
- *     $0: Data cache level, starting from 0
- *     $1: Output register for set increment
- *     $2: Output register for last valid set
- *     $3: Output register for way increment
- */
-.macro GET_CACHE_CONFIG
-       lsl             $0, $0, #1
-       mcr             p15, 2, $0, c0, c0, 0                           // Select appropriate cache
-       isb                                                                                     // Synchronize context
-
-       mrc             p15, 1, $0, c0, c0, 0
-       ubfx    $1, $0, #3, #10                                         // extract number of ways - 1
-       mov             $2, $1
-       add             $1, $1, #1                                                      // calculate number of ways
-
-       mov             $0, #31
-       and             $2, $2, $1
-       cmp             $2, #0
-       addne   $0, $0, #1
-       clz             $1, $1
-       sub             $0, $0, $1
-
-       mov     $1, #32                                                         // calculate way increment
-       sub             $3, $1, $0
-       mov             $1, #1
-       lsl             $3, $1, $3
-
-       mrc             p15, 1, $0, c0, c0, 0
-       ubfx    $1, $0, #0, #3                                          // extract log2(line size) - 4
-       add             $1, $1, #4                                                      // calculate log2(line size)
-       mov             $2, #1
-       lsl             $1, $2, $1                                                      // calculate set increment
-
-       ubfx    $2, $0, #13, #15                                        // extract number of sets - 1
-       add             $2, $2, #1                                                      // calculate number of sets
-       mul             $2, $1, $2                                                      // calculate last valid set
-.endmacro
-
-/*
- *     Detects the presence of an L2 cache and returns 1 if implemented,
- *     zero otherwise.
- *
- *     $0: Output register
- */
-.macro HAS_L2_CACHE
-       mrc             p15, 1, $0, c0, c0, 1
-       ubfx    $0, $0, #3, #3                                          // extract L2 cache Ctype
-       cmp             $0, #0x1
-       movls   $0, #0
-       movhi   $0, #1
-.endmacro
\ No newline at end of file
index 28690d7d847469b96d3dde194cc797e0643d96b3..d9343a0e3c8903d4f7d82432aa2ef58dff74d8db 100644 (file)
@@ -169,6 +169,9 @@ typedef struct cpu_data {
        bool                            cpu_hibernate; /* This cpu is currently hibernating the system */
        bool                            cpu_running;
        bool                            cluster_master;
+#if __ARM_ARCH_8_5__
+       bool                            sync_on_cswitch;
+#endif /* __ARM_ARCH_8_5__ */
        /* true if processor_start() or processor_exit() is operating on this CPU */
        bool                            in_state_transition;
 
index 5225154f3c3ff3c721623c0dd2595ed75a86ceb3..8a3f4c68f24ba11ba8d0272362e0803659d7fb17 100644 (file)
@@ -181,8 +181,19 @@ cpuid_get_cpufamily(void)
                        break;
                case CPU_PART_LIGHTNING:
                case CPU_PART_THUNDER:
+#ifndef RC_HIDE_XNU_FIRESTORM
+               case CPU_PART_THUNDER_M10:
+#endif
                        cpufamily = CPUFAMILY_ARM_LIGHTNING_THUNDER;
                        break;
+#ifndef RC_HIDE_XNU_FIRESTORM
+               case CPU_PART_FIRESTORM:
+               case CPU_PART_ICESTORM:
+               case CPU_PART_FIRESTORM_TONGA:
+               case CPU_PART_ICESTORM_TONGA:
+                       cpufamily = CPUFAMILY_ARM_FIRESTORM_ICESTORM;
+                       break;
+#endif
                default:
                        cpufamily = CPUFAMILY_UNKNOWN;
                        break;
@@ -216,6 +227,10 @@ cpuid_get_cpusubfamily(void)
        case CPU_PART_TEMPEST:
        case CPU_PART_LIGHTNING:
        case CPU_PART_THUNDER:
+#ifndef RC_HIDE_XNU_FIRESTORM
+       case CPU_PART_FIRESTORM:
+       case CPU_PART_ICESTORM:
+#endif
                cpusubfamily = CPUSUBFAMILY_ARM_HP;
                break;
        case CPU_PART_TYPHOON_CAPRI:
@@ -223,9 +238,16 @@ cpuid_get_cpusubfamily(void)
        case CPU_PART_HURRICANE_MYST:
        case CPU_PART_VORTEX_ARUBA:
        case CPU_PART_TEMPEST_ARUBA:
+#ifndef RC_HIDE_XNU_FIRESTORM
+       case CPU_PART_FIRESTORM_TONGA:
+       case CPU_PART_ICESTORM_TONGA:
+#endif
                cpusubfamily = CPUSUBFAMILY_ARM_HG;
                break;
        case CPU_PART_TEMPEST_M9:
+#ifndef RC_HIDE_XNU_FIRESTORM
+       case CPU_PART_THUNDER_M10:
+#endif
                cpusubfamily = CPUSUBFAMILY_ARM_M;
                break;
        default:
index b9ae985835571fd07ae51b0769aea89f5e9383bb..ab0fa5934d5e1b03efe709ecac88e6a752a4478d 100644 (file)
@@ -154,7 +154,31 @@ typedef union {
 /* H12 e-Core (ARMv8 architecture) */
 #define CPU_PART_THUNDER            0x13
 
+#ifndef RC_HIDE_XNU_FIRESTORM
+/*
+ * Whilst this is a Thunder-based SoC, it
+ * hasn't been released and should remain
+ * hidden in 2020 seeds.
+ */
+/* M10 e-Core (ARMv8 architecture) */
+#define CPU_PART_THUNDER_M10        0x26
+#endif
+
+#ifndef RC_HIDE_XNU_FIRESTORM
+
+/* H13 e-Core */
+#define CPU_PART_ICESTORM           0x20
+
+/* H13 p-Core */
+#define CPU_PART_FIRESTORM          0x21
+
+/* H13G e-Core */
+#define CPU_PART_ICESTORM_TONGA     0x22
+
+/* H13G p-Core */
+#define CPU_PART_FIRESTORM_TONGA    0x23
 
+#endif /* !RC_HIDE_XNU_FIRESTORM */
 
 
 
index 33e1b8ca3a4b48a70ac103cb873f25ac0d9acd07..d5c950ae9bf57ff2ca9faa34de35e1ef70acc8a8 100644 (file)
@@ -349,14 +349,5 @@ main(
        DECLARE("BA_TOP_OF_KERNEL_DATA",
            offsetof(struct boot_args, topOfKernelData));
 
-       DECLARE("ENTROPY_SAMPLE_COUNT",
-           offsetof(entropy_data_t, sample_count));
-       DECLARE("ENTROPY_BUFFER",
-           offsetof(entropy_data_t, buffer));
-       DECLARE("ENTROPY_BUFFER_INDEX_MASK",
-           offsetof(entropy_data_t, buffer_index_mask));
-       DECLARE("ENTROPY_BUFFER_ROR_MASK",
-           offsetof(entropy_data_t, ror_mask));
-
        return 0;
 }
index 62fe102b964186f9058bf43d56e572d43b3d5cdd..1b041c9c3d091fe8b6549336158f3d6f1036c275 100644 (file)
@@ -32,7 +32,6 @@ LOAD_ADDR_GEN_DEF(fiqstack_top)
 LOAD_ADDR_GEN_DEF(gVirtBase)
 LOAD_ADDR_GEN_DEF(gPhysBase)
 LOAD_ADDR_GEN_DEF(gPhysSize)
-LOAD_ADDR_GEN_DEF(EntropyData)
 LOAD_ADDR_GEN_DEF(kdebug_enable)
 #if CONFIG_TELEMETRY
 LOAD_ADDR_GEN_DEF(telemetry_needs_record)
index d3d808330fcbcaa0a7e0346bd0d3469b2bfc8827..2bbed50a3ebe3a00c158b2d9496f0912da422784 100644 (file)
@@ -1360,21 +1360,9 @@ fleh_irq_handler:
        movs    r8, r8
        COND_EXTERN_BLNE(interrupt_trace_exit)
 #endif
-       mrc             p15, 0, r9, c13, c0, 4                          // Reload r9 from TPIDRPRW
-       bl              EXT(ml_get_timebase)                            // get current timebase
-       LOAD_ADDR(r3, EntropyData)
-       ldr             r1, [r3, ENTROPY_SAMPLE_COUNT]
-       ldr             r2, [r3, ENTROPY_BUFFER_INDEX_MASK]
-       add             r4, r1, 1
-       and             r5, r1, r2
-       str             r4, [r3, ENTROPY_SAMPLE_COUNT]
-       ldr             r1, [r3, ENTROPY_BUFFER]
-       ldr             r2, [r3, ENTROPY_BUFFER_ROR_MASK]
-       ldr             r4, [r1, r5, lsl #2]
-       and             r4, r4, r2
-       eor             r0, r0, r4, ror #9
-       str             r0, [r1, r5, lsl #2]
+       bl              EXT(entropy_collect)
 return_from_irq:
+       mrc             p15, 0, r9, c13, c0, 4                          // Reload r9 from TPIDRPRW
        mov             r5, #0
        ldr             r4, [r9, ACT_CPUDATAP]                          // Get current cpu
        str             r5, [r4, CPU_INT_STATE]                         // Clear cpu_int_state
@@ -1569,8 +1557,6 @@ fleh_decirq_handler:
        COND_EXTERN_BLNE(interrupt_trace_exit)
 #endif
 
-       mrc             p15, 0, r9, c13, c0, 4                          // Reload r9 from TPIDRPRW
-
        b               return_from_irq
 
 
@@ -1816,8 +1802,6 @@ LEXT(fleh_dec)
 #endif
        UNALIGN_STACK
 
-       mrc             p15, 0, r9, c13, c0, 4                          // Reload r9 from TPIDRPRW
-
        b       return_from_irq
 
 /*
index c3dd12751d04ae71ba24ee19cba469c7b4250b98..24b9c76982c7c378252284b788e78f046cd3211c 100644 (file)
@@ -270,6 +270,8 @@ typedef struct ml_cpu_info ml_cpu_info_t;
 
 typedef enum {
        CLUSTER_TYPE_SMP,
+       CLUSTER_TYPE_E,
+       CLUSTER_TYPE_P,
 } cluster_type_t;
 
 cluster_type_t ml_get_boot_cluster(void);
@@ -675,6 +677,10 @@ void ml_init_timebase(
 
 uint64_t ml_get_timebase(void);
 
+uint64_t ml_get_speculative_timebase(void);
+
+uint64_t ml_get_timebase_entropy(void);
+
 void ml_init_lock_timeout(void);
 
 boolean_t ml_delay_should_spin(uint64_t interval);
@@ -1272,18 +1278,6 @@ void ml_thread_set_disable_user_jop(thread_t thread, uint8_t disable_user_jop);
 void ml_thread_set_jop_pid(thread_t thread, task_t task);
 void *ml_auth_ptr_unchecked(void *ptr, unsigned key, uint64_t modifier);
 
-/**
- * Temporarily enables a userspace JOP key in kernel space, so that the kernel
- * can sign or auth pointers on that process's behalf.
- *
- * @note The caller must disable interrupts before calling
- * ml_enable_user_jop_key(), and may only re-enable interrupts after the
- * complementary ml_disable_user_jop_key() call.
- *
- * @param user_jop_key  The userspace JOP key to temporarily use
- * @return              Saved JOP state, to be passed to the complementary
- *                      ml_disable_user_jop_key() call
- */
 uint64_t ml_enable_user_jop_key(uint64_t user_jop_key);
 
 /**
@@ -1298,6 +1292,7 @@ uint64_t ml_enable_user_jop_key(uint64_t user_jop_key);
 void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state);
 #endif /* defined(HAS_APPLE_PAC) */
 
+void ml_enable_monitor(void);
 
 
 #endif /* KERNEL_PRIVATE */
diff --git a/osfmk/arm/machine_routines_apple.c b/osfmk/arm/machine_routines_apple.c
new file mode 100644 (file)
index 0000000..9ce4fa8
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <pexpert/pexpert.h>
+
+#include <arm/cpuid_internal.h>
+#include <arm/pmap.h>
+#include <arm/proc_reg.h>
+#include <machine/machine_cpuid.h>
+#include <machine/machine_routines.h>
+
+
+#if __arm64__
+
+void configure_misc_apple_boot_args(void);
+void configure_misc_apple_regs(void);
+
+void
+configure_misc_apple_boot_args(void)
+{
+}
+
+void
+configure_misc_apple_regs(void)
+{
+}
+
+#endif /* __arm64__ */
index 2d3a54065570fae90ffb34a88e33d873e6157734..7de46fcf92ee53ef23d60584c3625d6318580dee 100644 (file)
@@ -1156,16 +1156,12 @@ LEXT(reenable_async_aborts)
        bx              lr
 
 /*
- *     uint64_t ml_get_timebase(void)
+ *     uint64_t ml_get_speculative_timebase(void)
  */
        .text
        .align 2
-       .globl EXT(ml_get_timebase)
-LEXT(ml_get_timebase)
-       mrc             p15, 0, r12, c13, c0, 4                                         // Read TPIDRPRW
-       ldr             r3, [r12, ACT_CPUDATAP]                                         // Get current cpu data
-#if __ARM_TIME__ || __ARM_TIME_TIMEBASE_ONLY__
-       isb                                                                                                     // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
+       .globl EXT(ml_get_speculative_timebase)
+LEXT(ml_get_speculative_timebase)
 1:
        mrrc    p15, 0, r3, r1, c14                                                     // Read the Time Base (CNTPCT), high => r1
        mrrc    p15, 0, r0, r3, c14                                                     // Read the Time Base (CNTPCT), low => r0
@@ -1173,21 +1169,32 @@ LEXT(ml_get_timebase)
        cmp             r1, r2
        bne             1b                                                                                      // Loop until both high values are the same
 
+       mrc             p15, 0, r12, c13, c0, 4                                         // Read TPIDRPRW
        ldr             r3, [r12, ACT_CPUDATAP]                                         // Get current cpu data
        ldr             r2, [r3, CPU_BASE_TIMEBASE_LOW]                         // Add in the offset to
        adds    r0, r0, r2                                                                      // convert to
        ldr             r2, [r3, CPU_BASE_TIMEBASE_HIGH]                        // mach_absolute_time
        adc             r1, r1, r2                                                                      //
-#else /* ! __ARM_TIME__  || __ARM_TIME_TIMEBASE_ONLY__ */
-1:
-       ldr             r2, [r3, CPU_TIMEBASE_HIGH]                                     // Get the saved TBU value
-       ldr             r0, [r3, CPU_TIMEBASE_LOW]                                      // Get the saved TBL value
-       ldr             r1, [r3, CPU_TIMEBASE_HIGH]                                     // Get the saved TBU value
-       cmp             r1, r2                                                                          // Make sure TB has not rolled over
-       bne             1b
-#endif /* __ARM_TIME__ */
        bx              lr                                                                                      // return
 
+/*
+ *     uint64_t ml_get_timebase(void)
+ */
+       .text
+       .align 2
+       .globl EXT(ml_get_timebase)
+LEXT(ml_get_timebase)
+       isb                                                                                                     // Required by ARMV7C.b section B8.1.2, ARMv8 section D6.1.2.
+       b       EXT(ml_get_speculative_timebase)
+
+/*
+ *     uint64_t ml_get_timebase_entropy(void)
+ */
+       .text
+       .align 2
+       .globl EXT(ml_get_timebase_entropy)
+LEXT(ml_get_timebase_entropy)
+       b       EXT(ml_get_speculative_timebase)
 
 /*
  *     uint32_t ml_get_decrementer(void)
index 82ce926285e998e342c20cc82e765a84432ddb60..76308798b60a83047b9b399cbc365569967a9090 100644 (file)
@@ -73,10 +73,14 @@ extern void __dead2 Call_continuation(thread_continue_t, void *, wait_result_t,
  * Prior to ARMv8.5, the eret instruction itself is always synchronizing, and
  * this function is an empty stub which serves only as documentation.
  */
+#if __ARM_ARCH_8_5__
+extern void arm_context_switch_requires_sync(void);
+#else
 static inline void
 arm_context_switch_requires_sync(void)
 {
 }
+#endif /* __ARM_ARCH_8_5__ */
 
 #if __has_feature(ptrauth_calls)
 extern boolean_t arm_user_jop_disabled(void);
index 92337e64c3dbc685bd4692d11d8b59c230c1fcba..38b238d82f29fef956f432c72be11f850acea417 100644 (file)
@@ -662,6 +662,14 @@ int pmap_stats_assert = 1;
 #endif /* DEVELOPMENT || DEBUG */
 
 
+#ifdef PLATFORM_BridgeOS
+static struct pmap_legacy_trust_cache *pmap_legacy_trust_caches MARK_AS_PMAP_DATA = NULL;
+#endif
+static struct pmap_image4_trust_cache *pmap_image4_trust_caches MARK_AS_PMAP_DATA = NULL;
+
+MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_loaded_trust_caches_lock, 0);
+
+
 /*
  * Represents a tlb range that will be flushed before exiting
  * the ppl.
@@ -1115,8 +1123,12 @@ SECURITY_READ_ONLY_LATE(boolean_t)      pmap_initialized = FALSE;       /* Has p
 
 SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm_pmap_max_offset_default  = 0x0;
 #if defined(__arm64__)
+#  ifdef XNU_TARGET_OS_OSX
+SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm64_pmap_max_offset_default = MACH_VM_MAX_ADDRESS;
+#  else
 SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm64_pmap_max_offset_default = 0x0;
-#endif
+#  endif
+#endif /* __arm64__ */
 
 #if PMAP_PANIC_DEV_WIMG_ON_MANAGED && (DEVELOPMENT || DEBUG)
 SECURITY_READ_ONLY_LATE(boolean_t)   pmap_panic_dev_wimg_on_managed = TRUE;
@@ -2245,16 +2257,29 @@ PMAP_SUPPORT_PROTOTYPES(
        addr64_t vstart,
        uint64_t size), PMAP_TRIM_INDEX);
 
-#if HAS_APPLE_PAC && XNU_MONITOR
+#if HAS_APPLE_PAC
 PMAP_SUPPORT_PROTOTYPES(
        void *,
        pmap_sign_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_SIGN_USER_PTR);
 PMAP_SUPPORT_PROTOTYPES(
        void *,
        pmap_auth_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_AUTH_USER_PTR);
-#endif /* HAS_APPLE_PAC && XNU_MONITOR */
+#endif /* HAS_APPLE_PAC */
+
+
+
 
+PMAP_SUPPORT_PROTOTYPES(
+       bool,
+       pmap_is_trust_cache_loaded, (const uuid_t uuid), PMAP_IS_TRUST_CACHE_LOADED_INDEX);
+
+PMAP_SUPPORT_PROTOTYPES(
+       uint32_t,
+       pmap_lookup_in_static_trust_cache, (const uint8_t cdhash[CS_CDHASH_LEN]), PMAP_LOOKUP_IN_STATIC_TRUST_CACHE_INDEX);
 
+PMAP_SUPPORT_PROTOTYPES(
+       bool,
+       pmap_lookup_in_loaded_trust_caches, (const uint8_t cdhash[CS_CDHASH_LEN]), PMAP_LOOKUP_IN_LOADED_TRUST_CACHES_INDEX);
 
 
 #if XNU_MONITOR
@@ -2392,6 +2417,9 @@ const void * __ptrauth_ppl_handler const ppl_handler_table[PMAP_COUNT] = {
        [PMAP_RELEASE_PAGES_TO_KERNEL_INDEX] = pmap_release_ppl_pages_to_kernel_internal,
        [PMAP_SET_VM_MAP_CS_ENFORCED_INDEX] = pmap_set_vm_map_cs_enforced_internal,
        [PMAP_SET_JIT_ENTITLED_INDEX] = pmap_set_jit_entitled_internal,
+       [PMAP_IS_TRUST_CACHE_LOADED_INDEX] = pmap_is_trust_cache_loaded_internal,
+       [PMAP_LOOKUP_IN_STATIC_TRUST_CACHE_INDEX] = pmap_lookup_in_static_trust_cache_internal,
+       [PMAP_LOOKUP_IN_LOADED_TRUST_CACHES_INDEX] = pmap_lookup_in_loaded_trust_caches_internal,
        [PMAP_TRIM_INDEX] = pmap_trim_internal,
        [PMAP_LEDGER_ALLOC_INIT_INDEX] = pmap_ledger_alloc_init_internal,
        [PMAP_LEDGER_ALLOC_INDEX] = pmap_ledger_alloc_internal,
@@ -10957,62 +10985,13 @@ pmap_unmap_cpu_windows_copy(
 
 #if XNU_MONITOR
 
-/*
- * The HMAC SHA driver needs to be able to operate on physical pages in
- * place without copying them out. This function provides an interface
- * to run a callback on a given page, making use of a CPU copy window
- * if necessary.
- *
- * This should only be used during the hibernation process since every DRAM page
- * will be mapped as VM_WIMG_DEFAULT. This can cause coherency issues if the pages
- * were originally mapped as VM_WIMG_IO/RT. In the hibernation case, by the time
- * we start copying memory all other agents shouldn't be writing to memory so we
- * can ignore these coherency issues. Regardless of this code, if other agents
- * were modifying memory during the image creation process, there would be
- * issues anyway.
- */
 MARK_AS_PMAP_TEXT void
 pmap_invoke_with_page(
        ppnum_t page_number,
        void *ctx,
        void (*callback)(void *ctx, ppnum_t page_number, const void *page))
 {
-#if HIBERNATION
-       /* This function should only be used from within a hibernation context. */
-       assert((gIOHibernateState == kIOHibernateStateHibernating) ||
-           (gIOHibernateState == kIOHibernateStateWakingFromHibernate));
-
-       /* from bcopy_phys_internal */
-       vm_offset_t src = ptoa_64(page_number);
-       vm_offset_t tmp_src;
-       bool use_copy_window_src = !pmap_valid_address(src);
-       unsigned int src_index;
-       if (use_copy_window_src) {
-               unsigned int wimg_bits_src = pmap_cache_attributes(page_number);
-
-               /**
-                * Always map DRAM as VM_WIMG_DEFAULT (regardless of whether it's
-                * kernel-managed) to denote that it's safe to use memcpy on it.
-                */
-               if (is_dram_addr(src)) {
-                       wimg_bits_src = VM_WIMG_DEFAULT;
-               }
-
-               src_index = pmap_map_cpu_windows_copy_internal(page_number, VM_PROT_READ, wimg_bits_src);
-               tmp_src = pmap_cpu_windows_copy_addr(pmap_get_cpu_data()->cpu_number, src_index);
-       } else {
-               vm_size_t count = PAGE_SIZE;
-               tmp_src = phystokv_range((pmap_paddr_t)src, &count);
-       }
-
-       callback(ctx, page_number, (const void *)tmp_src);
-
-       if (use_copy_window_src) {
-               pmap_unmap_cpu_windows_copy_internal(src_index);
-       }
-#else
        #pragma unused(page_number, ctx, callback)
-#endif /* HIBERNATION */
 }
 
 /*
@@ -11465,7 +11444,7 @@ pmap_trim(
 #endif
 }
 
-#if HAS_APPLE_PAC && XNU_MONITOR
+#if HAS_APPLE_PAC
 static void *
 pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key)
 {
@@ -11520,7 +11499,7 @@ pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t discriminator, uint64_
 {
        return pmap_auth_user_ptr_internal(value, key, discriminator, jop_key);
 }
-#endif /* HAS_APPLE_PAC && XNU_MONITOR */
+#endif /* HAS_APPLE_PAC */
 
 /*
  *     kern_return_t pmap_nest(grand, subord, vstart, size)
@@ -12682,10 +12661,6 @@ cache_skip_pve:
                }
        }
        if (tlb_flush_needed) {
-               /* For targets that distinguish between mild and strong DSB, mild DSB
-                * will not drain the prefetcher.  This can lead to prefetch-driven
-                * cache fills that defeat the uncacheable requirement of the RT memory type.
-                * In those cases, strong DSB must instead be employed to drain the prefetcher. */
                pmap_sync_tlb((attributes & VM_WIMG_MASK) == VM_WIMG_RT);
        }
 
@@ -14794,6 +14769,161 @@ pmap_return(boolean_t do_panic, boolean_t do_recurse)
 
 
 
+kern_return_t
+pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused *trust_cache,
+    const vm_size_t __unused trust_cache_len)
+{
+       // Unsupported
+       return KERN_NOT_SUPPORTED;
+}
+
+pmap_tc_ret_t
+pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused *trust_cache,
+    const vm_size_t __unused trust_cache_len,
+    uint8_t const * __unused img4_manifest,
+    const vm_size_t __unused img4_manifest_buffer_len,
+    const vm_size_t __unused img4_manifest_actual_len,
+    bool __unused dry_run)
+{
+       // Unsupported
+       return PMAP_TC_UNKNOWN_FORMAT;
+}
+
+bool
+pmap_in_ppl(void)
+{
+       // Unsupported
+       return false;
+}
+
+void
+pmap_lockdown_image4_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len, __unused uint64_t flags)
+{
+       // Unsupported
+}
+
+void *
+pmap_claim_reserved_ppl_page(void)
+{
+       // Unsupported
+       return NULL;
+}
+
+void
+pmap_free_reserved_ppl_page(void __unused *kva)
+{
+       // Unsupported
+}
+
+
+MARK_AS_PMAP_TEXT static bool
+pmap_is_trust_cache_loaded_internal(const uuid_t uuid)
+{
+       bool found = false;
+
+       pmap_simple_lock(&pmap_loaded_trust_caches_lock);
+
+       for (struct pmap_image4_trust_cache const *c = pmap_image4_trust_caches; c != NULL; c = c->next) {
+               if (bcmp(uuid, c->module->uuid, sizeof(uuid_t)) == 0) {
+                       found = true;
+                       goto done;
+               }
+       }
+
+#ifdef PLATFORM_BridgeOS
+       for (struct pmap_legacy_trust_cache const *c = pmap_legacy_trust_caches; c != NULL; c = c->next) {
+               if (bcmp(uuid, c->uuid, sizeof(uuid_t)) == 0) {
+                       found = true;
+                       goto done;
+               }
+       }
+#endif
+
+done:
+       pmap_simple_unlock(&pmap_loaded_trust_caches_lock);
+       return found;
+}
+
+bool
+pmap_is_trust_cache_loaded(const uuid_t uuid)
+{
+#if XNU_MONITOR
+       return pmap_is_trust_cache_loaded_ppl(uuid);
+#else
+       return pmap_is_trust_cache_loaded_internal(uuid);
+#endif
+}
+
+MARK_AS_PMAP_TEXT static bool
+pmap_lookup_in_loaded_trust_caches_internal(const uint8_t cdhash[CS_CDHASH_LEN])
+{
+       struct pmap_image4_trust_cache const *cache = NULL;
+#ifdef PLATFORM_BridgeOS
+       struct pmap_legacy_trust_cache const *legacy = NULL;
+#endif
+
+       pmap_simple_lock(&pmap_loaded_trust_caches_lock);
+
+       for (cache = pmap_image4_trust_caches; cache != NULL; cache = cache->next) {
+               uint8_t hash_type = 0, flags = 0;
+
+               if (lookup_in_trust_cache_module(cache->module, cdhash, &hash_type, &flags)) {
+                       goto done;
+               }
+       }
+
+#ifdef PLATFORM_BridgeOS
+       for (legacy = pmap_legacy_trust_caches; legacy != NULL; legacy = legacy->next) {
+               for (uint32_t i = 0; i < legacy->num_hashes; i++) {
+                       if (bcmp(legacy->hashes[i], cdhash, CS_CDHASH_LEN) == 0) {
+                               goto done;
+                       }
+               }
+       }
+#endif
+
+done:
+       pmap_simple_unlock(&pmap_loaded_trust_caches_lock);
+
+       if (cache != NULL) {
+               return true;
+#ifdef PLATFORM_BridgeOS
+       } else if (legacy != NULL) {
+               return true;
+#endif
+       }
+
+       return false;
+}
+
+bool
+pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN])
+{
+#if XNU_MONITOR
+       return pmap_lookup_in_loaded_trust_caches_ppl(cdhash);
+#else
+       return pmap_lookup_in_loaded_trust_caches_internal(cdhash);
+#endif
+}
+
+MARK_AS_PMAP_TEXT static uint32_t
+pmap_lookup_in_static_trust_cache_internal(const uint8_t cdhash[CS_CDHASH_LEN])
+{
+       // Awkward indirection, because the PPL macros currently force their functions to be static.
+       return lookup_in_static_trust_cache(cdhash);
+}
+
+uint32_t
+pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN])
+{
+#if XNU_MONITOR
+       return pmap_lookup_in_static_trust_cache_ppl(cdhash);
+#else
+       return pmap_lookup_in_static_trust_cache_internal(cdhash);
+#endif
+}
+
+
 MARK_AS_PMAP_TEXT static void
 pmap_footprint_suspend_internal(
        vm_map_t        map,
@@ -15165,12 +15295,6 @@ pmap_test_test_config(unsigned int flags)
        T_LOG("Validate that writes to our mapping do not fault.");
        pmap_test_write(pmap, va_base, false);
 
-#if PMAP_CS
-       bool pmap_cs_enforced = pmap->pmap_cs_enforced;
-
-       T_LOG("Disable PMAP CS enforcement");
-       pmap_cs_configure_enforcement(pmap, false);
-#endif
 
        T_LOG("Make the first mapping XO.");
        pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false);
@@ -15186,10 +15310,6 @@ pmap_test_test_config(unsigned int flags)
        T_LOG("Validate that writes to our mapping fault.");
        pmap_test_write(pmap, va_base, true);
 
-#if PMAP_CS
-       T_LOG("Set PMAP CS enforcement configuration to previous value.");
-       pmap_cs_configure_enforcement(pmap, pmap_cs_enforced);
-#endif
 
        /*
         * For page ratios of greater than 1: validate that writes to the other
index 7fe880f18e568191586950f80227c04363282140..062e8b006217739e33ccd7c9a4949b826a10c99f 100644 (file)
@@ -373,6 +373,10 @@ struct pmap {
        unsigned int            tte_index_max;          /* max tte index in translation table entries */
 #endif
 
+       void *                  reserved0;
+       void *                  reserved1;
+       uint64_t                reserved2;
+       uint64_t                reserved3;
 
        unsigned int            stamp;                  /* creation stamp */
        _Atomic int32_t         ref_count;              /* pmap reference count */
@@ -387,7 +391,13 @@ struct pmap {
        char                    pmap_procname[17];
        bool            pmap_stats_assert;
 #endif /* MACH_ASSERT */
+       bool                    reserved4;
        bool                    pmap_vm_map_cs_enforced;
+       boolean_t               reserved5;
+       uint64_t                reserved6;
+       uint64_t                reserved7;
+       bool                    reserved8;
+       bool                    reserved9;
 #if DEVELOPMENT || DEBUG
        bool            footprint_suspended;
        bool            footprint_was_suspended;
@@ -399,6 +409,8 @@ struct pmap {
        bool            nested_bounds_set;                      /* The nesting bounds have been set */
 #if HAS_APPLE_PAC
        bool            disable_jop;
+#else
+       bool            reserved10;
 #endif /* HAS_APPLE_PAC */
 };
 
@@ -446,7 +458,7 @@ extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va);
 extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
 extern void pmap_collect(pmap_t pmap);
 extern  void pmap_gc(void);
-#if HAS_APPLE_PAC && XNU_MONITOR
+#if HAS_APPLE_PAC
 extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
 extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
 #endif /* HAS_APPLE_PAC && XNU_MONITOR */
index c04e0f478782456f97d1556b87ce5ca217bc70bd..0ebbf526bc9c96bb082c3f84d60000ab3f813752 100644 (file)
 #define MMU_I_CLINE     5                      /* cache line size as 1<<MMU_I_CLINE (32) */
 
 /* D-Cache */
+#define MMU_CSIZE       15                     /* cache size as 1<<MMU_CSIZE (32K) */
 #define MMU_CLINE       6                      /* cache line size as 1<<MMU_CLINE (64) */
+#define MMU_NWAY        2                      /* set associativity 1<<MMU_NWAY (4) */
+#define MMU_I7SET       6                      /* cp15 c7 set incrementer 1<<MMU_I7SET */
+#define MMU_I7WAY       30                     /* cp15 c7 way incrementer 1<<MMU_I7WAY */
+
+#define MMU_SWAY        (MMU_CSIZE - MMU_NWAY) /* set size 1<<MMU_SWAY */
+#define MMU_NSET        (MMU_SWAY - MMU_CLINE) /* lines per way 1<<MMU_NSET */
+
+#define __ARM_L2CACHE__ 1
+
+#define L2_CSIZE        20                       /* cache size as 1<<MMU_CSIZE */
+#define L2_CLINE        6                        /* cache line size as 1<<MMU_CLINE (64) */
+#define L2_NWAY         3                        /* set associativity 1<<MMU_NWAY (8) */
+#define L2_I7SET        6                        /* cp15 c7 set incrementer 1<<MMU_I7SET */
+#define L2_I7WAY        29                       /* cp15 c7 way incrementer 1<<MMU_I7WAY */
+#define L2_I9WAY        29                       /* cp15 c9 way incrementer 1<<MMU_I9WAY */
+
+#define L2_SWAY         (L2_CSIZE - L2_NWAY)     /* set size 1<<MMU_SWAY */
+#define L2_NSET         (L2_SWAY - L2_CLINE)     /* lines per way 1<<MMU_NSET */
 
 #elif defined (APPLETYPHOON)
 
 /* D-Cache, 128KB for Lightning, 8-way. 48KB for Thunder, 6-way. */
 #define MMU_CLINE   6                      /* cache line size is 1<<MMU_CLINE (64) */
 
+#elif defined (APPLEFIRESTORM)
+
+/* I-Cache, 256KB for Firestorm, 128KB for Icestorm, 6-way. */
+#define MMU_I_CLINE 6                      /* cache line size as 1<<MMU_I_CLINE (64) */
+
+/* D-Cache, 160KB for Firestorm, 8-way. 64KB for Icestorm, 6-way. */
+#define MMU_CLINE   6                      /* cache line size is 1<<MMU_CLINE (64) */
+
 #elif defined (BCM2837) /* Raspberry Pi 3 */
 
 /* I-Cache. We don't have detailed spec so we just follow the ARM technical reference. */
index ab928722ea4d42720f9d3a7199e3dea71a3eb74d..be0db89e9f8b3fe7e21ccb81a54017bd808e97a5 100644 (file)
@@ -30,7 +30,6 @@
 #include <arm/proc_reg.h>
 #include <mach_kdp.h>
 #include "assym.s"
-#include "caches_macros.s"
 
        .text
        .align 12
@@ -282,34 +281,31 @@ doneveqp:
 
        // clean the dcache
        mov             r11, #0
-       GET_CACHE_CONFIG r11, r2, r3, r4
-       mov             r11, #0
 cleanflushway:
 cleanflushline:                
        mcr             p15, 0, r11, c7, c14, 2                          // cleanflush dcache line by way/set
-       add             r11, r11, r2                                             // increment set index
-       tst             r11, r3                                                          // look for overflow
+       add             r11, r11, #1 << MMU_I7SET                        // increment set index
+       tst             r11, #1 << (MMU_NSET + MMU_I7SET)        // look for overflow
        beq             cleanflushline
-       bic             r11, r11, r3                                             // clear set overflow
-       adds    r11, r11, r4                                             // increment way
+       bic             r11, r11, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow
+       adds    r11, r11, #1 << MMU_I7WAY                        // increment way
        bcc             cleanflushway                                            // loop
-       HAS_L2_CACHE r11
-       cmp             r11, #0
-       beq             invall2skipl2dcache
+
+#if    __ARM_L2CACHE__
        // Invalidate L2 cache
-       mov             r11, #1
-       GET_CACHE_CONFIG r11, r2, r3, r4
        mov             r11, #2
 invall2flushway:
 invall2flushline:              
        mcr             p15, 0, r11, c7, c14, 2                          // Invalidate dcache line by way/set
-       add             r11, r11, r2                                             // increment set index
-       tst             r11, r3                                                          // look for overflow
+       add             r11, r11, #1 << L2_I7SET                         // increment set index
+       tst             r11, #1 << (L2_NSET + L2_I7SET)          // look for overflow
        beq             invall2flushline
-       bic             r11, r11, r3                                             // clear set overflow
-       adds    r11, r11, r4                                             // increment way
+       bic             r11, r11, #1 << (L2_NSET + L2_I7SET) // clear set overflow
+       adds    r11, r11, #1 << L2_I7WAY                         // increment way
        bcc             invall2flushway                                          // loop
-invall2skipl2dcache:
+
+#endif
+
        mov             r11, #0
        mcr             p15, 0, r11, c13, c0, 3                         // Write TPIDRURO
        LOAD_ADDR(sp, intstack_top)                                     // Get interrupt stack top
index 2386bd58f1ac19120dc0b878952e55598c154bf0..ad331774b6cb64470fd4060fd9cb7c4d3c32572b 100644 (file)
@@ -92,6 +92,10 @@ struct machine_thread {
        unsigned int              uptw_ttb;
        unsigned int              kptw_ttb;
        unsigned int              asid;
+#else
+       unsigned int              reserved0;
+       unsigned int              reserved1;
+       unsigned int              reserved2;
 #endif
 
 #if __arm64__
@@ -99,6 +103,8 @@ struct machine_thread {
        arm_saved_state_t *       XNU_PTRAUTH_SIGNED_PTR("machine_thread.upcb") upcb;   /* pointer to user GPR state */
        arm_neon_saved_state_t *  uNeon;                   /* pointer to user VFP state */
        arm_saved_state_t *       kpcb;                    /* pointer to kernel GPR state */
+       void *                    reserved3;
+       long                      reserved4;
        uint64_t                  recover_far;
 #elif __arm__
        struct arm_saved_state    PcbData;
@@ -125,6 +131,7 @@ struct machine_thread {
 #if __arm64__
        uint64_t                  energy_estimate_nj;
 #endif
+       uint64_t                  reserved5;
 
 #if INTERRUPT_MASKED_DEBUG
        uint64_t                  intmask_timestamp;          /* timestamp of when interrupts were manually masked */
@@ -139,16 +146,22 @@ struct machine_thread {
        volatile uintptr_t                 expected_fault_addr;
 #endif
 
+       uint64_t                  reserved6;
        vm_offset_t               pcpu_data_base;
        struct cpu_data *         CpuDatap;               /* current per cpu data */
        unsigned int              preemption_count;       /* preemption count */
 #if __arm64__
        uint16_t                  exception_trace_code;
 #endif
+       uint8_t                   reserved7;
 #if defined(HAS_APPLE_PAC)
        uint8_t                   disable_user_jop;
        uint64_t                  rop_pid;
        uint64_t                  jop_pid;
+#else
+       uint8_t                   reserved8;
+       uint64_t                  reserved9;
+       uint64_t                  reserved10;
 #endif
 };
 #endif
diff --git a/osfmk/arm/trustcache.c b/osfmk/arm/trustcache.c
new file mode 100644 (file)
index 0000000..125a250
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2011-2018 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <string.h>
+
+#include <arm/pmap.h>
+
+#include <kern/debug.h>
+#include <kern/trustcache.h>
+#include <kern/misc_protos.h>
+
+#include <libkern/section_keywords.h>
+
+#include <mach/machine/vm_types.h>
+
+#include <pexpert/device_tree.h>
+
+#include <sys/cdefs.h>
+
+// All the external+engineering trust caches (accepting only one on RELEASE).
+SECURITY_READ_ONLY_LATE(static struct serialized_trust_caches *)pmap_serialized_trust_caches = NULL;
+
+// Shortcut to the first (= non-engineering, and therefore "static") trust cache.
+SECURITY_READ_ONLY_LATE(static struct trust_cache_module1 *)pmap_static_trust_cache = NULL;
+
+#if CONFIG_SECOND_STATIC_TRUST_CACHE
+SECURITY_READ_ONLY_LATE(static struct trust_cache_module1 *)pmap_secondary_static_trust_cache = NULL;
+#endif
+
+// The EXTRADATA segment is where we find the external trust cache.
+extern vm_offset_t   segEXTRADATA;
+extern unsigned long segSizeEXTRADATA;
+
+void
+trust_cache_init(void)
+{
+       size_t const len = segSizeEXTRADATA;
+
+       if (len == 0) {
+#if XNU_TARGET_OS_OSX
+               printf("No external trust cache found (region len is 0).");
+#else
+               panic("No external trust cache found (region len is 0).");
+#endif
+               return;
+       }
+
+       size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
+
+       pmap_serialized_trust_caches = (struct serialized_trust_caches*)(segEXTRADATA +
+           locked_down_dt_size);
+
+       uint8_t const *region_end = (uint8_t*)pmap_serialized_trust_caches + len;
+
+       /* Validate the trust cache region for consistency.
+        *
+        * Technically, this shouldn't be necessary because any problem
+        * here would indicate that iBoot is either broken or compromised,
+        * but we do it anyway to assist in development, and for defense
+        * in depth.
+        */
+
+       if (len < sizeof(struct serialized_trust_caches)) {
+               panic("short serialized trust cache region: %zu", len);
+       }
+
+       printf("%d external trust cache modules available.\n", pmap_serialized_trust_caches->num_caches);
+
+       if (len < (sizeof(struct serialized_trust_caches) +
+           pmap_serialized_trust_caches->num_caches * sizeof(uint32_t))) {
+               panic("serialized trust cache region too short for its %d entries: %zu",
+                   pmap_serialized_trust_caches->num_caches, len);
+       }
+
+       uint8_t *module_end = (uint8_t*)pmap_serialized_trust_caches;
+
+       for (uint32_t i = 0; i < pmap_serialized_trust_caches->num_caches; i++) {
+               struct trust_cache_module1 *module = (struct trust_cache_module1*)
+                   ((uint8_t*)pmap_serialized_trust_caches + pmap_serialized_trust_caches->offsets[i]);
+
+               if ((uint8_t*)module < module_end) {
+                       panic("trust cache module %d overlaps previous module", i);
+               }
+
+               module_end = (uint8_t*)(module + 1);
+
+               if (module_end > region_end) {
+                       panic("trust cache module %d too short for header", i);
+               }
+
+               if (module->version != 1) {
+                       panic("trust cache module %d has unsupported version %d", i, module->version);
+               }
+
+               module_end += module->num_entries * sizeof(struct trust_cache_entry1);
+
+               if (module_end > region_end) {
+                       panic("trust cache module %d too short for its %u entries", i, module->num_entries);
+               }
+
+               printf("external trust cache module %d with %d entries\n", i, module->num_entries);
+
+               if (i == 0) {
+                       pmap_static_trust_cache = module;
+               }
+#if CONFIG_SECOND_STATIC_TRUST_CACHE
+               else if (i == 1) {
+                       pmap_secondary_static_trust_cache = module;
+               }
+#endif
+       }
+}
+
+
+// Lookup cdhash in a trust cache module.
+// Suitable for all kinds of trust caches (but loadable ones are currently different).
+bool
+lookup_in_trust_cache_module(
+       struct trust_cache_module1 const * const module,
+       uint8_t const   cdhash[CS_CDHASH_LEN],
+       uint8_t * const hash_type,
+       uint8_t * const flags)
+{
+       size_t lim;
+       struct trust_cache_entry1 const *base = &module->entries[0];
+
+       struct trust_cache_entry1 const *entry = NULL;
+
+       bool found = false;
+
+       /* Initialization already (redundantly) verified the size of the module for us. */
+       for (lim = module->num_entries; lim != 0; lim >>= 1) {
+               entry = base + (lim >> 1);
+               int cmp = memcmp(cdhash, entry->cdhash, CS_CDHASH_LEN);
+               if (cmp == 0) {
+                       found = true;
+                       break;
+               }
+               if (cmp > 0) {  /* key > p: move right */
+                       base = entry + 1;
+                       lim--;
+               }               /* else move left */
+       }
+
+       if (found) {
+               *hash_type = entry->hash_type;
+               *flags = entry->flags;
+               return true;
+       }
+
+       return false;
+}
+
+MARK_AS_PMAP_TEXT uint32_t
+lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN])
+{
+       /* We will cram those into a single return value, because output parameters require
+        * some contortion. */
+       uint8_t hash_type = 0, flags = 0;
+       uint32_t engineering_trust_cache_index = 1;
+
+       if (pmap_static_trust_cache != NULL) {
+               // The one real new static trust cache.
+               if (lookup_in_trust_cache_module(pmap_static_trust_cache, cdhash, &hash_type, &flags)) {
+                       return (hash_type << TC_LOOKUP_HASH_TYPE_SHIFT) |
+                              (flags << TC_LOOKUP_FLAGS_SHIFT) |
+                              (TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
+               }
+#if CONFIG_SECOND_STATIC_TRUST_CACHE
+               if (pmap_secondary_static_trust_cache != NULL &&
+                   lookup_in_trust_cache_module(pmap_secondary_static_trust_cache, cdhash, &hash_type, &flags)) {
+                       return (hash_type << TC_LOOKUP_HASH_TYPE_SHIFT) |
+                              (flags << TC_LOOKUP_FLAGS_SHIFT) |
+                              (TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
+               }
+               engineering_trust_cache_index = (pmap_secondary_static_trust_cache != NULL) ? 2 : 1;
+#endif
+
+               // Engineering Trust Caches.
+               if (pmap_serialized_trust_caches->num_caches > engineering_trust_cache_index) {
+#if DEVELOPMENT || DEBUG
+                       for (uint32_t i = engineering_trust_cache_index; i < pmap_serialized_trust_caches->num_caches; i++) {
+                               struct trust_cache_module1 const *module =
+                                   (struct trust_cache_module1 const *)(
+                                       (uint8_t*)pmap_serialized_trust_caches + pmap_serialized_trust_caches->offsets[i]);
+
+                               if (lookup_in_trust_cache_module(module, cdhash, &hash_type, &flags)) {
+                                       return (hash_type << TC_LOOKUP_HASH_TYPE_SHIFT) |
+                                              (flags << TC_LOOKUP_FLAGS_SHIFT) |
+                                              (TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
+                               }
+                       }
+#else
+                       panic("Number of trust caches: %d. How could we let this happen?",
+                           pmap_serialized_trust_caches->num_caches);
+#endif
+               }
+       }
+
+       return 0;
+}
index 35128d53ee7857fd92dd56458bbfc696321299aa..fe0c6f80b307a686776fa4fb72e79171f8c61c51 100644 (file)
@@ -42,7 +42,6 @@
 #include <arm/pmap.h>
 #include <arm64/tlb.h>
 #include <arm64/amcc_rorgn.h>
-#include <memmap_types.h>
 
 #if HIBERNATION
 #include <arm64/pal_hibernate.h>
index 54a45f20290e381a999214d791fdeef53d21e4e0..fe6523791d3523ade1843417137d00ff94245cf3 100644 (file)
@@ -71,13 +71,6 @@ static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START) + 1) * 2ULL) <= (ARM_TT_RO
 #endif /* __ARM_KERNEL_PROTECT__ */
 
 #if __APRR_SUPPORTED__ && XNU_MONITOR
-/*
- * If APRR is supported, setting XN on L1/L2 table entries will shift the effective
- * APRR index of L3 PTEs covering PPL-protected pages in the kernel dynamic region
- * from PPL R/W to kernel R/W.  That will effectively remove PPL write protection
- * from those pages.  Avoid setting XN at the table level for MONITOR-enabled builds
- * that are backed by APRR.
- */
 #define ARM_DYNAMIC_TABLE_XN ARM_TTE_TABLE_PXN
 #else
 #define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN)
@@ -2003,6 +1996,7 @@ arm_vm_init(uint64_t memory_size, boot_args * args)
        arm_vm_physmap_init(args);
        set_mmu_ttb_alternate(cpu_ttep & TTBR_BADDR_MASK);
 
+       ml_enable_monitor();
 
        set_mmu_ttb(invalid_ttep & TTBR_BADDR_MASK);
 
index fc65933e54839c0f9e0dae8e88f907fcba239922..ad3f9a68511705fc28d7987cef52236d5ef895af 100644 (file)
@@ -623,6 +623,9 @@ cpu_init(void)
                case CPU_ARCH_ARMv8:
                        cdp->cpu_subtype = CPU_SUBTYPE_ARM64_V8;
                        break;
+               case CPU_ARCH_ARMv8E:
+                       cdp->cpu_subtype = CPU_SUBTYPE_ARM64E;
+                       break;
                default:
                        //cdp->cpu_subtype = CPU_SUBTYPE_ARM64_ALL;
                        /* this panic doesn't work this early in startup */
index 05c38a36d70db523cb80f75508d57f1976dcae1f..996783d64e4ed6ee48abf574af2dacc046cec676 100644 (file)
        ldr             \cpudatap, [\thread, ACT_CPUDATAP]
 #endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */
 
+#if defined(__ARM_ARCH_8_5__)
+       ldrb    \wsync, [\cpudatap, CPU_SYNC_ON_CSWITCH]
+#else /* defined(__ARM_ARCH_8_5__) */
        mov             \wsync, #0
+#endif
 
 
 #if defined(HAS_APPLE_PAC)
@@ -227,6 +231,9 @@ Lskip_jop_keys_\@:
        cbz             \wsync, 1f
        isb     sy
 
+#if defined(__ARM_ARCH_8_5__)
+       strb    wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH]
+#endif
 1:
 .endmacro
 
index e3ec822bf6b030f31dba73f3c9fab6de03df4aec..18fd6df99d8794e091b363b5c7ef4b4cd18ce3f4 100644 (file)
@@ -37,8 +37,6 @@
 #define PPL_EXIT_BAD_CALL   2 /* The PPL request failed. */
 #define PPL_EXIT_EXCEPTION  3 /* The PPL took an exception. */
 
-/* Guarded mode trap numbers: these are passed as the genter immediate. */
-#define GXF_ENTER_PPL 0
 
 #define KERNEL_MODE_ELR      ELR_GL11
 #define KERNEL_MODE_FAR      FAR_GL11
  * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
  * These keys are deliberately kept loaded into the CPU for later kernel use.
  *
+ *   arg0 - KERNEL_MODE or HIBERNATE_MODE
  *   x0 - Address of the save area
  */
+#define KERNEL_MODE 0
+#define HIBERNATE_MODE 1
 
 .macro SPILL_REGISTERS mode
        stp             x2, x3, [x0, SS64_X2]                                   // Save remaining GPRs
index 75501d84661beee8a099822ca3232ac37a91dfab..afe165947475ed96fde5d3e6f9ecf605ec0cad58 100644 (file)
@@ -322,13 +322,16 @@ main(int     argc,
 #endif /* defined(HAS_APPLE_PAC) */
 
 
+#if __ARM_ARCH_8_5__
+       DECLARE("CPU_SYNC_ON_CSWITCH", offsetof(cpu_data_t, sync_on_cswitch));
+#endif /* __ARM_ARCH_8_5__ */
 
 #if HIBERNATION
        DECLARE("HIBHDR_STACKOFFSET", offsetof(IOHibernateImageHeader, restore1StackOffset));
        DECLARE("HIBTRAMP_TTBR0", offsetof(pal_hib_tramp_result_t, ttbr0));
        DECLARE("HIBTRAMP_TTBR1", offsetof(pal_hib_tramp_result_t, ttbr1));
        DECLARE("HIBTRAMP_MEMSLIDE", offsetof(pal_hib_tramp_result_t, memSlide));
-       DECLARE("HIBTRAMP_KERNELSLIDE", offsetof(pal_hib_tramp_result_t, kernelSlide));
+       DECLARE("HIBGLOBALS_KERNELSLIDE", offsetof(pal_hib_globals_t, kernelSlide));
 #endif /* HIBERNATION */
 
        return 0;
index 5a3e356b4c11ca442a2483b9d61541219b51f6bb..18851d929efa0fe1a59c4f8f6398eba70524b88f 100644 (file)
 #include <arm/cpu_data_internal.h>
 #include <machine/pal_hibernate.h>
 
-#if HIBERNATE_HMAC_IMAGE
-#include <arm64/hibernate_ppl_hmac.h>
-#include <arm64/ppl/ppl_hib.h>
-#endif /* HIBERNATE_HMAC_IMAGE */
 
 extern void
 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
@@ -98,39 +94,9 @@ hibernate_page_list_allocate(boolean_t log)
        hibernate_page_list_t * list;
        hibernate_bitmap_t *    bitmap;
 
-#if HIBERNATE_HMAC_IMAGE
-       // Determine if any PPL-owned I/O ranges need to be hibernated, and if so,
-       // allocate bitmaps to represent those pages.
-       const ppl_hib_io_range *io_ranges = NULL;
-       uint16_t                num_io_ranges = 0;
-       hibernate_bitmap_t *    dram_ranges = NULL;
-       uint32_t                num_banks = 1;
-
-       ppl_hmac_get_io_ranges(&io_ranges, &num_io_ranges);
-
-       // Allocate a single DRAM range to cover kernel-managed memory and one range
-       // per PPL-owned I/O range that needs to be hibernated.
-       if (io_ranges != NULL && num_io_ranges > 0) {
-               num_banks += num_io_ranges;
-       }
-
-       dram_ranges = kheap_alloc(KHEAP_TEMP,
-           num_banks * sizeof(hibernate_bitmap_t), Z_WAITOK);
-       if (!dram_ranges) {
-               return NULL;
-       }
-
-       // The 0th dram range is used to represent kernel-managed memory, so skip it
-       // when adding I/O ranges.
-       for (unsigned int i = 1; i < num_banks; ++i) {
-               dram_ranges[i].first_page = io_ranges[i - 1].first_page;
-               dram_ranges[i].last_page = (io_ranges[i - 1].first_page + io_ranges[i - 1].page_count) - 1;
-       }
-#else
        // Allocate a single DRAM range to cover the kernel-managed memory.
        hibernate_bitmap_t      dram_ranges[1];
        uint32_t                num_banks = sizeof(dram_ranges) / sizeof(dram_ranges[0]);
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        // All of kernel-managed memory can be described by one DRAM range
        set_dram_range(&dram_ranges[0], gPhysBase, gPhysSize);
@@ -176,10 +142,6 @@ hibernate_page_list_allocate(boolean_t log)
        }
 
 out:
-#if HIBERNATE_HMAC_IMAGE
-       kheap_free(KHEAP_TEMP, dram_ranges,
-           num_banks * sizeof(hibernate_bitmap_t));
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        return list;
 }
@@ -294,9 +256,7 @@ hibernate_vm_locks_are_safe(void)
 void
 pal_hib_init(void)
 {
-#if HIBERNATE_HMAC_IMAGE
-       gHibernateGlobals.hmacRegBase = ppl_hmac_get_reg_base();
-#endif /* HIBERNATE_HMAC_IMAGE */
+       gHibernateGlobals.kernelSlide = gVirtBase - gPhysBase;
 }
 
 void
index dcd22884e96a1541eecc232550a595437e12e649..71fcd92fb7b4dd2ce0d726deee30d75e2b4e71b9 100644 (file)
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
-/**
- * These functions are wrappers around the PPL HIB extension. They provide a
- * higher level interface to the PPL HIB ioctl interface, and include logic for
- * turning the HMAC block on when necessary. Refer to the comments in the PPL HIB
- * extension for more details.
- */
 #include "hibernate_ppl_hmac.h"
 
 #include <mach/vm_param.h>
 #include "pal_hibernate.h"
 #include <stdbool.h>
 
-#if XNU_MONITOR_PPL_HIB
-
-
-#error New SoC defined in board_config.h that supports PPL HIB but no \
-        embedded headers included in hibernate_ppl_hmac.c for that SoC.
-
-
-#include <soc/module/address_map.h>
-#include <soc/module/pmgr_soc.h>
-
-static ppl_iommu_state *pplHmacState;
-static void *pplHmacScratchPage;
-
-static void
-ppl_hmac_enable_aes_ps(void)
-{
-       static vm_address_t aes_ps_reg_base;
-       if (!aes_ps_reg_base) {
-               /* map the AES PS registers */
-               aes_ps_reg_base = ml_io_map(PMGR_REG_BASE, PAGE_SIZE);
-       }
-       volatile uint32_t *psreg = (volatile uint32_t *)(aes_ps_reg_base + PMGR_AES_OFFSET);
-       // set PS_MANUAL to on
-       *psreg |= 0xf;
-       while ((*psreg & 0xf) != ((*psreg >> 4) & 0xf)) {
-               // poll until the block's PS_ACTUAL matches PS_MANUAL
-       }
-}
-
-static int
-hibernate_compress_page(const void *src, void *dst)
-{
-       assert((((uint64_t)src) & PAGE_MASK) == 0);
-       assert((((uint64_t)dst) & 63) == 0);
-       struct {
-               uint32_t count:8;
-               uint32_t svp:1;
-               uint32_t reserved:3;
-               uint32_t status:3;
-               uint32_t reserved2:17;
-               uint32_t popcnt:18;
-               uint32_t reserved3:14;
-       } result = { .status = ~0u };
-       __asm__ volatile ("wkdmc %0, %1" : "=r"(result): "r"(dst), "0"(src));
-       if (result.status) {
-               return -1;
-       }
-       if (result.svp) {
-               return 0;
-       }
-       return (result.count + 1) * 64;
-}
-
-/* initialize context needed for ppl computations */
-kern_return_t
-ppl_hmac_init(void)
-{
-       // don't initialize ppl_hib if hibernation isn't supported
-       if (!ppl_hib_hibernation_supported()) {
-               return KERN_FAILURE;
-       }
-
-       if (!pplHmacState) {
-               /* construct context needed to talk to PPL */
-
-               ppl_iommu_state *pplState = NULL;
-               vm_address_t hmac_reg_base = 0;
-
-               // turn on AES_PS
-               ppl_hmac_enable_aes_ps();
-
-               // set up the hmac engine
-               hmac_reg_base = ml_io_map(HMAC_REG_BASE, PAGE_SIZE);
-               ppl_hib_init_data init_data = { .version = PPL_HIB_VERSION, .hmac_reg_base = hmac_reg_base };
-               kern_return_t kr = pmap_iommu_init(ppl_hib_get_desc(), "HMAC", &init_data, sizeof(init_data), &pplState);
-               if (kr != KERN_SUCCESS) {
-                       printf("ppl_hmac_init: failed to initialize PPL state object: 0x%x\n", kr);
-                       if (hmac_reg_base) {
-                               ml_io_unmap(hmac_reg_base, PAGE_SIZE);
-                       }
-                       return kr;
-               }
-
-               pplHmacState = pplState;
-       }
-
-       return KERN_SUCCESS;
-}
-
-/**
- * Reset state for a new signature.
- *
- * @param wired_pages True if this context will be used to hash wired pages (image1),
- *                    false otherwise (image2).
- */
-void
-ppl_hmac_reset(bool wired_pages)
-{
-       // make sure AES_PS is on
-       ppl_hmac_enable_aes_ps();
-
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_RESET,
-           &wired_pages, sizeof(wired_pages), NULL, 0);
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_reset: PPL ioctl PPL_HIB_IOCTL_RESET failed: 0x%x\n", kr);
-       }
-}
-
-/**
- * Inform HMAC driver that we're going to hibernate.
- */
-void
-ppl_hmac_hibernate_begin(void)
-{
-       uintptr_t scratchPage = 0;
-       kern_return_t kr = pmap_iommu_map(pplHmacState, NULL, 0, 0, &scratchPage);
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_register_scratch_page: pmap_iommu_map failed: 0x%x\n", kr);
-       }
-       pplHmacScratchPage = (void *)scratchPage;
-}
-
-/**
- * Inform HMAC driver that we're done hibernating.
- */
-void
-ppl_hmac_hibernate_end(void)
-{
-       pmap_iommu_unmap(pplHmacState, NULL, 0, 0, NULL);
-       pplHmacScratchPage = NULL;
-}
-
-/* get the hmac register base */
-vm_address_t
-ppl_hmac_get_reg_base(void)
-{
-       return HMAC_REG_BASE;
-}
-
-/**
- * Update the PPL HMAC hash computation with the given page.
- *
- * @param  pageNumber   Page to add into the hash.
- * @param  uncompressed Out parameter that receives a pointer to the uncompressed data of the given page.
- * @param  compressed   Buffer that will receive the compressed content of the given page
- * @result              The compressed size, 0 if the page was a single repeated value, or -1 if the page failed to compress.
- */
-int
-ppl_hmac_update_and_compress_page(ppnum_t pageNumber, void **uncompressed, void *compressed)
-{
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_UPDATE_AND_COPY_PAGE,
-           &pageNumber, sizeof(pageNumber), NULL, 0);
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_update_and_compress_page: PPL ioctl PPL_HIB_IOCTL_UPDATE_PAGE failed: 0x%x\n", kr);
-       }
-       // page was copied to scratch, so compress it into compressed
-       int result;
-       if (uncompressed) {
-               *uncompressed = pplHmacScratchPage;
-       }
-       if (compressed) {
-               result = hibernate_compress_page(pplHmacScratchPage, compressed);
-       } else {
-               result = 0;
-       }
-       return result;
-}
-
-/* finalize HMAC calculation */
-void
-ppl_hmac_final(uint8_t *output, size_t outputLen)
-{
-       if (outputLen != HMAC_HASH_SIZE) {
-               panic("ppl_hmac_final: outputLen should be %d but is %zu\n", HMAC_HASH_SIZE, outputLen);
-       }
-       uint8_t hashOutput[HMAC_HASH_SIZE];
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FINAL, NULL, 0, hashOutput, sizeof(hashOutput));
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_final: PPL ioctl PPL_HIB_IOCTL_FINAL failed: 0x%x\n", kr);
-       }
-       memcpy(output, hashOutput, HMAC_HASH_SIZE);
-}
-
-/* HMAC the hibseg and get metadata */
-void
-ppl_hmac_fetch_hibseg_and_info(void *buffer,
-    uint64_t bufferLen,
-    IOHibernateHibSegInfo *info)
-{
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FETCH_HIBSEG, NULL, 0, buffer, bufferLen);
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG failed: 0x%x\n", kr);
-       }
-       IOHibernateHibSegInfo segInfo;
-       kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FETCH_HIBSEG_INFO, NULL, 0, &segInfo, sizeof(segInfo));
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_fetch_hibseg_and_info: PPL ioctl PPL_HIB_IOCTL_FETCH_HIBSEG_INFO failed: 0x%x\n", kr);
-       }
-       memcpy(info, &segInfo, sizeof(segInfo));
-}
-
-/* HMAC the entire read-only region, or compare to previous HMAC */
-void
-ppl_hmac_compute_rorgn_hmac(void)
-{
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC, NULL, 0, NULL, 0);
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_compute_rorgn_hmac: PPL ioctl PPL_HIB_IOCTL_COMPUTE_RORGN_HMAC failed: 0x%x\n", kr);
-       }
-}
-
-/**
- * Finish hashing the hibernation image and return out the signed hash. This also
- * hashes the hibernation header.
- */
-void
-ppl_hmac_finalize_image(const void *header, size_t headerLen, uint8_t *hmac, size_t hmacLen)
-{
-       if (hmacLen != HMAC_HASH_SIZE) {
-               panic("ppl_hmac_finalize_image: hmacLen should be %d but is %zu\n", HMAC_HASH_SIZE, hmacLen);
-       }
-       uint8_t hashOutput[HMAC_HASH_SIZE];
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_FINALIZE_IMAGE, header, headerLen, hashOutput, sizeof(hashOutput));
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_FINALIZE_IMAGE failed: 0x%x\n", kr);
-       }
-       memcpy(hmac, hashOutput, HMAC_HASH_SIZE);
-}
-
-
-/**
- * Return back an array of I/O ranges that need to be included within the hibernation
- * image. If there are no I/O ranges that need hashing, then `*io_ranges` will be
- * NULL and `*num_io_ranges` will be zero.
- */
-void
-ppl_hmac_get_io_ranges(const ppl_hib_io_range **io_ranges, uint16_t *num_io_ranges)
-{
-       assert((io_ranges != NULL) && (num_io_ranges != NULL));
-
-       ppl_hib_get_io_ranges_data io;
-       kern_return_t kr = pmap_iommu_ioctl(pplHmacState, PPL_HIB_IOCTL_GET_IO_RANGES, NULL, 0, &io, sizeof(io));
-       if (kr != KERN_SUCCESS) {
-               panic("ppl_hmac_finalize_image: PPL ioctl PPL_HIB_IOCTL_GET_IO_RANGES failed: 0x%x\n", kr);
-       }
-
-       /**
-        * This returns back pointers to PPL-owned data but this is fine since the
-        * caller only needs read-only access to this data (and the kernel has RO
-        * access to PPL-owned memory).
-        */
-       *io_ranges = io.ranges;
-       *num_io_ranges = io.num_io_ranges;
-}
-
-#endif /* XNU_MONITOR_PPL_HIB */
index 8519f0a8c47cbd558cd3ce56476877abcb11d813..2ed851c12fbaad7b081b6825733fd5e9d4f5317c 100644 (file)
 #include <ptrauth.h>
 #include <arm/cpu_data_internal.h>
 #include <arm/cpu_internal.h>
+#include <libkern/section_keywords.h>
 
-#if HIBERNATE_HMAC_IMAGE
-#include <arm64/ppl/ppl_hib.h>
-#include <corecrypto/ccsha2_internal.h>
-#include <corecrypto/ccdigest_internal.h>
-#endif /* HIBERNATE_HMAC_IMAGE */
 
 pal_hib_tramp_result_t gHibTramp;
-pal_hib_globals_t gHibernateGlobals;
+pal_hib_globals_t gHibernateGlobals MARK_AS_HIBERNATE_DATA_CONST_LATE;
+
+// as a workaround for <rdar://problem/70121432> References between different compile units in xnu shouldn't go through GOT
+// all of the extern symbols that we refer to in this file have to be declared with hidden visibility
+extern IOHibernateImageHeader *gIOHibernateCurrentHeader __attribute__((visibility("hidden")));
+extern const uint32_t ccsha256_initial_state[8] __attribute__((visibility("hidden")));
+extern void AccelerateCrypto_SHA256_compress(ccdigest_state_t state, size_t numBlocks, const void *data) __attribute__((visibility("hidden")));
+extern void ccdigest_final_64be(const struct ccdigest_info *di, ccdigest_ctx_t, unsigned char *digest) __attribute__((visibility("hidden")));
+extern struct pmap_cpu_data_array_entry pmap_cpu_data_array[MAX_CPUS] __attribute__((visibility("hidden")));
+extern bool hib_entry_pmap_lockdown __attribute__((visibility("hidden")));
 
 uintptr_t
 hibernate_restore_phys_page(uint64_t src, uint64_t dst, uint32_t len, __unused uint32_t procFlags)
@@ -81,74 +86,18 @@ pal_hib_restore_pal_state(__unused uint32_t *arg)
 void
 pal_hib_resume_init(pal_hib_ctx_t *ctx, hibernate_page_list_t *map, uint32_t *nextFree)
 {
-#if HIBERNATE_HMAC_IMAGE
-       extern void AccelerateCrypto_SHA256_compress(ccdigest_state_t state, size_t numBlocks, const void *data);
-       ctx->di = (struct ccdigest_info){
-               .output_size = CCSHA256_OUTPUT_SIZE,
-               .state_size = CCSHA256_STATE_SIZE,
-               .block_size = CCSHA256_BLOCK_SIZE,
-               .oid_size = ccoid_sha256_len,
-               .oid = CC_DIGEST_OID_SHA256,
-               .initial_state = ccsha256_initial_state,
-               .compress = AccelerateCrypto_SHA256_compress,
-               .final = ccdigest_final_64be,
-       };
-
-       SHA256_CTX shaCtx;
-
-       // validate signature of handoff
-       uint32_t handoffPages = gIOHibernateCurrentHeader->handoffPages;
-       uint32_t handoffPageCount = gIOHibernateCurrentHeader->handoffPageCount;
-
-       void *handoffSrc = (void *)pal_hib_map(IMAGE_AREA, ptoa_64(handoffPages));
-       ppl_hib_init_context(&ctx->di, &shaCtx, 'HOFF');
-       ccdigest_update(&ctx->di, shaCtx.ctx, sizeof(handoffPages), &handoffPages);
-       ccdigest_update(&ctx->di, shaCtx.ctx, sizeof(handoffPageCount), &handoffPageCount);
-       ccdigest_update(&ctx->di, shaCtx.ctx, ptoa_64(handoffPageCount), handoffSrc);
-       uint8_t handoffHMAC[CCSHA384_OUTPUT_SIZE];
-       ppl_hib_compute_hmac(&ctx->di, &shaCtx, gHibernateGlobals.hmacRegBase, handoffHMAC);
-       HIB_ASSERT(__nosan_memcmp(handoffHMAC, gIOHibernateCurrentHeader->handoffHMAC, sizeof(handoffHMAC)) == 0);
-
-       // construct a hibernate_scratch_t for storing all of the pages we restored
-       hibernate_scratch_init(&ctx->pagesRestored, map, nextFree);
-#endif /* HIBERNATE_HMAC_IMAGE */
 }
 
 void
 pal_hib_restored_page(pal_hib_ctx_t *ctx, pal_hib_restore_stage_t stage, ppnum_t ppnum)
 {
-#if HIBERNATE_HMAC_IMAGE
-       if (stage != pal_hib_restore_stage_handoff_data) {
-               // remember that we restored this page
-               hibernate_scratch_write(&ctx->pagesRestored, &ppnum, sizeof(ppnum));
-       }
-#endif /* HIBERNATE_HMAC_IMAGE */
 }
 
 void
 pal_hib_patchup(pal_hib_ctx_t *ctx)
 {
-#if HIBERNATE_HMAC_IMAGE
-       // compute and validate the HMAC for the wired pages (image1)
-       SHA256_CTX shaCtx;
-
-       hibernate_scratch_start_read(&ctx->pagesRestored);
-       uint64_t pageCount = ctx->pagesRestored.totalLength / sizeof(ppnum_t);
-       ppl_hib_init_context(&ctx->di, &shaCtx, 'PAG1');
-       for (uint64_t i = 0; i < pageCount; i++) {
-               ppnum_t ppnum;
-               hibernate_scratch_read(&ctx->pagesRestored, &ppnum, sizeof(ppnum));
-               vm_offset_t virtAddr = pal_hib_map(DEST_COPY_AREA, ptoa_64(ppnum));
-               ccdigest_update(&ctx->di, shaCtx.ctx, sizeof(ppnum), &ppnum);
-               ccdigest_update(&ctx->di, shaCtx.ctx, PAGE_SIZE, (void *)virtAddr);
-       }
-       uint8_t image1PagesHMAC[CCSHA384_OUTPUT_SIZE];
-       ppl_hib_compute_hmac(&ctx->di, &shaCtx, gHibernateGlobals.hmacRegBase, image1PagesHMAC);
-       HIB_ASSERT(__nosan_memcmp(image1PagesHMAC, gIOHibernateCurrentHeader->image1PagesHMAC, sizeof(image1PagesHMAC)) == 0);
-#endif /* HIBERNATE_HMAC_IMAGE */
 
        // DRAM pages are captured from a PPL context, so here we restore all cpu_data structures to a non-PPL context
-       extern struct pmap_cpu_data_array_entry pmap_cpu_data_array[MAX_CPUS];
        for (int i = 0; i < MAX_CPUS; i++) {
                pmap_cpu_data_array[i].cpu_data.ppl_state = PPL_STATE_KERNEL;
                pmap_cpu_data_array[i].cpu_data.ppl_kern_saved_sp = 0;
@@ -160,7 +109,6 @@ pal_hib_patchup(pal_hib_ctx_t *ctx)
        // Calls into the pmap that could potentially modify pmap data structures
        // during image copying were explicitly blocked on hibernation entry.
        // Resetting this variable to false allows those calls to be made again.
-       extern bool hib_entry_pmap_lockdown;
        hib_entry_pmap_lockdown = false;
 }
 
@@ -381,7 +329,6 @@ pal_hib_resume_tramp(uint32_t headerPpnum)
        HIB_ASSERT(phys_end != 0);
 
        hib_bzero(&gHibTramp, sizeof(gHibTramp));
-       gHibTramp.kernelSlide = header->restore1CodeVirt - hib_text_start;
 
        // During hibernation resume, we create temporary mappings that do not collide with where any of the kernel mappings were originally.
        // Technically, non-collision isn't a requirement, but doing this means that if some code accidentally jumps to a VA in the original
@@ -389,7 +336,7 @@ pal_hib_resume_tramp(uint32_t headerPpnum)
        // The base address of our temporary mappings is adjusted by a random amount as a "poor-man's ASLR". We don’t have a good source of random
        // numbers in this context, so we just use some of the bits from one of imageHeaderHMMAC, which should be random enough.
        uint16_t rand = (uint16_t)(((header->imageHeaderHMAC[0]) << 8) | header->imageHeaderHMAC[1]);
-       uint64_t mem_slide = gHibTramp.kernelSlide - (phys_end - phys_start) * 4 - rand * 256 * PAGE_SIZE;
+       uint64_t mem_slide = gHibernateGlobals.kernelSlide - (phys_end - phys_start) * 4 - rand * 256 * PAGE_SIZE;
 
        // make sure we don't clobber any of the pages we need for restore
        hibernate_reserve_restore_pages(header_phys, header, ctx.bitmap);
@@ -424,7 +371,7 @@ pal_hib_resume_tramp(uint32_t headerPpnum)
                                bool executable = (protection & VM_PROT_EXECUTE);
                                bool writeable = (protection & VM_PROT_WRITE);
                                uint64_t map_flags = executable ? MAP_RX : writeable ? MAP_RW : MAP_RO;
-                               map_range_start_end(&ctx, seg_start, seg_end, gHibTramp.kernelSlide, map_flags);
+                               map_range_start_end(&ctx, seg_start, seg_end, gHibernateGlobals.kernelSlide, map_flags);
                                last_seg_end = seg_end;
                        }
                        if (seg_info->segments[i].physPage == header->restore1CodePhysPage) {
index b18a335e870d1a32a9c08ea3f58257c533ca79e4..5694d2fa1008b3450c514509029b37f53812948d 100644 (file)
@@ -27,7 +27,6 @@
  */
 
 #include <machine/asm.h>
-#include <arm64/hv/hv_regs.h>
 #include <arm64/machine_routines_asm.h>
 #include <arm64/proc_reg.h>
 #include <pexpert/arm64/board_config.h>
@@ -1143,7 +1142,7 @@ no_asts:
 
 #endif
 
-#if defined(APPLELIGHTNING)
+#if defined(APPLELIGHTNING) || defined(APPLEFIRESTORM)
 
        mrs             x12, ARM64_REG_HID1                         // if any debug session ever existed, set forceNexL3ClkOn
        orr             x12, x12, ARM64_REG_HID1_forceNexL3ClkOn
@@ -1226,13 +1225,6 @@ Lexception_return_restore_registers:
        //              } else {
        //                      disable_jop = thread->machine.disable_user_jop;
        //              }
-#if DEVELOPMENT || DEBUG
-       adrp    x4, EXT(const_boot_args)@page
-       add             x4, x4, EXT(const_boot_args)@pageoff
-       ldr             x4, [x4, BA_BOOT_FLAGS]
-       and             x1, x4, BA_BOOT_FLAGS_DISABLE_USER_JOP
-       cbnz    x1, Ldisable_jop
-#endif
        mrs             x2, TPIDR_EL1
        ldrb    w1, [x2, TH_DISABLE_USER_JOP]
        cbz             w1, Lenable_jop
@@ -1263,6 +1255,13 @@ Lenable_jop:
        ldr             x1, [x2, TH_JOP_PID]
        ldr             x2, [x2, ACT_CPUDATAP]
        REPROGRAM_JOP_KEYS      Ldone_reconfigure_jop, x1, x2, x3
+#if defined(__ARM_ARCH_8_5__)
+       /**
+        * The new keys will be used after eret to userspace, so explicit sync is
+        * required iff eret is non-synchronizing.
+        */
+       isb             sy
+#endif /* defined(__ARM_ARCH_8_5__) */
 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
 Ldone_reconfigure_jop:
 #endif /* defined(HAS_APPLE_PAC) */
@@ -1908,8 +1907,6 @@ LEXT(ml_panic_trap_to_debugger)
        ldr             x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP]
        mov             sp, x12
 
-       // we want interrupts to stay masked after exiting PPL when calling into panic to halt system
-       // x10 is used in ppl_return_to_kernel_mode restore desired DAIF state after GEXIT
        mrs             x10, DAIF
        mov             w13, #PPL_STATE_PANIC
        str             w13, [x11, PMAP_CPU_DATA_PPL_STATE]
index 5823cc56aaa6457628db32ef21591ec9943cd9c6..03f8a510b8b6540dec8ff017f20eb2ba3cd8c065 100644 (file)
@@ -62,8 +62,6 @@
 #include <IOKit/IOPlatformExpert.h>
 #if HIBERNATION
 #include <IOKit/IOHibernatePrivate.h>
-#include <arm64/hibernate_ppl_hmac.h>
-#include <arm64/ppl/ppl_hib.h>
 #endif /* HIBERNATION */
 
 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
@@ -152,6 +150,11 @@ extern uint32_t lockdown_done;
  * (pre-mapped) in each user address space.
  */
 SECURITY_READ_ONLY_LATE(static struct vm_reserved_region) vm_reserved_regions[] = {
+       {
+               .vmrr_name = "GPU Carveout",
+               .vmrr_addr = MACH_VM_MIN_GPU_CARVEOUT_ADDRESS,
+               .vmrr_size = (vm_map_size_t)(MACH_VM_MAX_GPU_CARVEOUT_ADDRESS - MACH_VM_MIN_GPU_CARVEOUT_ADDRESS)
+       },
        /*
         * Reserve the virtual memory space representing the commpage nesting region
         * to prevent user processes from allocating memory within it. The actual
@@ -402,6 +405,21 @@ machine_startup(__unused boot_args * args)
        /* NOTREACHED */
 }
 
+typedef void (*invalidate_fn_t)(void);
+
+static SECURITY_READ_ONLY_LATE(invalidate_fn_t) invalidate_hmac_function = NULL;
+
+void set_invalidate_hmac_function(invalidate_fn_t fn);
+
+void
+set_invalidate_hmac_function(invalidate_fn_t fn)
+{
+       if (NULL != invalidate_hmac_function) {
+               panic("Invalidate HMAC function already set");
+       }
+
+       invalidate_hmac_function = fn;
+}
 
 void
 machine_lockdown(void)
@@ -436,25 +454,13 @@ machine_lockdown(void)
        rorgn_lockdown();
 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
 
-#if HIBERNATION
-       /* sign the kernel read-only region */
-       if (ppl_hmac_init() == KERN_SUCCESS) {
-               ppl_hmac_compute_rorgn_hmac();
-       }
-#endif /* HIBERNATION */
 
 #endif /* CONFIG_KERNEL_INTEGRITY */
 
-#if HIBERNATION
-       /* Avoid configuration security issues by panic'ing if hibernation is
-        * supported but we don't know how to invalidate SIO HMAC keys, see
-        * below. */
-       if (ppl_hib_hibernation_supported() &&
-           NULL == invalidate_hmac_function) {
-               panic("Invalidate HMAC function wasn't set when needed");
-       }
-#endif  /* HIBERNATION */
 
+       if (NULL != invalidate_hmac_function) {
+               invalidate_hmac_function();
+       }
 
        lockdown_done = 1;
 }
@@ -903,6 +909,12 @@ ml_parse_cpu_topology(void)
                ml_read_reg_range(child, "coresight-reg", &cpu->coresight_pa, &cpu->coresight_len);
                cpu->cluster_type = CLUSTER_TYPE_SMP;
 
+               int cluster_type = (int)ml_readprop(child, "cluster-type", 0);
+               if (cluster_type == 'E') {
+                       cpu->cluster_type = CLUSTER_TYPE_E;
+               } else if (cluster_type == 'P') {
+                       cpu->cluster_type = CLUSTER_TYPE_P;
+               }
 
                /*
                 * Since we want to keep a linear cluster ID space, we cannot just rely
@@ -912,7 +924,7 @@ ml_parse_cpu_topology(void)
 #if HAS_CLUSTER
                uint32_t phys_cluster_id = MPIDR_CLUSTER_ID(cpu->phys_id);
 #else
-               uint32_t phys_cluster_id = 0;
+               uint32_t phys_cluster_id = (cpu->cluster_type == CLUSTER_TYPE_P);
 #endif
                assert(phys_cluster_id <= MAX_CPU_CLUSTER_PHY_ID);
                cpu->cluster_id = ((cluster_phys_to_logical[phys_cluster_id] == -1) ?
@@ -1817,8 +1829,7 @@ ml_get_timebase()
 /*
  * Get the speculative timebase without an ISB.
  */
-__attribute__((unused))
-static uint64_t
+uint64_t
 ml_get_speculative_timebase()
 {
        uint64_t timebase;
@@ -1828,6 +1839,12 @@ ml_get_speculative_timebase()
        return timebase + getCpuDatap()->cpu_base_timebase;
 }
 
+uint64_t
+ml_get_timebase_entropy(void)
+{
+       return ml_get_speculative_timebase();
+}
+
 uint32_t
 ml_get_decrementer()
 {
@@ -2275,7 +2292,9 @@ ex_cb_invoke(
 static inline bool
 cpu_supports_userkeyen()
 {
-#if   HAS_APCTL_EL1_USERKEYEN
+#if defined(APPLEFIRESTORM)
+       return __builtin_arm_rsr64(ARM64_REG_APCTL_EL1) & APCTL_EL1_UserKeyEn;
+#elif HAS_APCTL_EL1_USERKEYEN
        return true;
 #else
        return false;
@@ -2370,7 +2389,6 @@ ml_thread_set_jop_pid(thread_t thread, task_t task)
 }
 #endif /* defined(HAS_APPLE_PAC) */
 
-
 #if defined(HAS_APPLE_PAC)
 #define _ml_auth_ptr_unchecked(_ptr, _suffix, _modifier) \
        asm volatile ("aut" #_suffix " %[ptr], %[modifier]" : [ptr] "+r"(_ptr) : [modifier] "r"(_modifier));
@@ -2425,8 +2443,6 @@ ml_hibernate_active_pre(void)
 {
 #if HIBERNATION
        if (kIOHibernateStateWakingFromHibernate == gIOHibernateState) {
-               /* validate rorgn hmac */
-               ppl_hmac_compute_rorgn_hmac();
 
                hibernate_rebuild_vm_structs();
        }
index 19e4b96d3a36d3c4972256f7ccdc9dd25ea98b89..d9f59fa5167f462277023c60bae5cc63d418ea26 100644 (file)
@@ -77,7 +77,12 @@ Lskip_program_el0_jop_key:
         * }
         */
        mrs             x1, ARM64_REG_APCTL_EL1
-#if   defined(HAS_APCTL_EL1_USERKEYEN)
+#if defined(APPLEFIRESTORM)
+       SET_KERN_KEY    x2, x1
+       CLEAR_KERN_KEY  x3, x1
+       tst             x1, #(APCTL_EL1_UserKeyEn)
+       csel    x1, x2, x3, ne
+#elif defined(HAS_APCTL_EL1_USERKEYEN)
        SET_KERN_KEY    x1, x1
 #else
        CLEAR_KERN_KEY  x1, x1
@@ -105,7 +110,12 @@ Lskip_program_prev_jop_key:
         * }
         */
        mrs             x1, ARM64_REG_APCTL_EL1
-#if   defined(HAS_APCTL_EL1_USERKEYEN)
+#if defined(APPLEFIRESTORM)
+       CLEAR_KERN_KEY  x2, x1
+       SET_KERN_KEY    x3, x1
+       tst             x1, #(APCTL_EL1_UserKeyEn)
+       csel    x1, x2, x3, ne
+#elif defined(HAS_APCTL_EL1_USERKEYEN)
        CLEAR_KERN_KEY  x1, x1
 #else
        SET_KERN_KEY    x1, x1
index 205cd13a75bd474689a33e9384af8d8aa6629236..9a3981d07129fd58ed89eeed14d9ee17f6606e6d 100644 (file)
 
 #if defined(HAS_APPLE_PAC)
 
-#if   defined(HAS_APCTL_EL1_USERKEYEN)
+#if defined(APPLEFIRESTORM)
+/* H13 may use either fast or slow A-key switching, depending on CPU model and revision */
+#define HAS_PAC_FAST_A_KEY_SWITCHING    1
+#define HAS_PAC_SLOW_A_KEY_SWITCHING    1
+
+/* BEGIN IGNORE CODESTYLE */
+
+/**
+ * IF_PAC_FAST_A_KEY_SWITCHING
+ *
+ * Branch to a specified label if this H13 model + revision supports fast A-key switching.
+ *
+ *   label - label to branch to
+ *   tmp - scratch register
+ */
+.macro IF_PAC_FAST_A_KEY_SWITCHING     label, tmp
+       /**
+        * start.s attempts to set APCTL_EL1.UserKeyEn.  If this H13 CPU doesn't
+        * actually support this bit, it will be RaZ.
+        */
+       mrs             \tmp, APCTL_EL1
+       tbnz    \tmp, #APCTL_EL1_UserKeyEn_OFFSET, \label
+.endmacro
+
+/**
+ * IF_PAC_SLOW_A_KEY_SWITCHING
+ *
+ * Branch to a specified label if this H13 model + revision doesn't support fast A-key switching.
+ *
+ *   label - label to branch to
+ *   tmp - scratch register
+ */
+.macro IF_PAC_SLOW_A_KEY_SWITCHING     label, tmp
+       mrs             \tmp, APCTL_EL1
+       tbz             \tmp, #APCTL_EL1_UserKeyEn_OFFSET, \label
+.endmacro
+
+/* END IGNORE CODESTYLE */
+
+#elif defined(HAS_APCTL_EL1_USERKEYEN)
 #define HAS_PAC_FAST_A_KEY_SWITCHING    1
 #define HAS_PAC_SLOW_A_KEY_SWITCHING    0
 
@@ -51,7 +90,7 @@
 .macro IF_PAC_SLOW_A_KEY_SWITCHING      label, tmp
 .endmacro
 
-#else /* !&& !defined(HAS_APCTL_EL1_USERKEYEN) */
+#else /* !defined(APPLEFIRESTORM) && !defined(HAS_APCTL_EL1_USERKEYEN) */
 #define HAS_PAC_FAST_A_KEY_SWITCHING    0
 #define HAS_PAC_SLOW_A_KEY_SWITCHING    1
 
 .error "This macro should never need to be used on this CPU family."
 .endmacro
 
-#endif /**/
+#endif /* defined(APPLEFIRESTORM) */
 
 /* BEGIN IGNORE CODESTYLE */
 
index 88724f77272c2c860969e8f50bc8a2bd582fc328..0033ec1f8b22dcf808954c851803d18b9fe8aace 100644 (file)
@@ -55,10 +55,6 @@ typedef enum {
  * @discussion  ARM64-specific PAL context; see pal_hib_ctx_t for details.
  */
 struct pal_hib_ctx {
-#if HIBERNATE_HMAC_IMAGE
-       struct ccdigest_info di;
-       hibernate_scratch_t pagesRestored;
-#endif /* HIBERNATE_HMAC_IMAGE */
 };
 
 /*!
@@ -69,12 +65,14 @@ struct pal_hib_ctx {
  * @field       dockChannelWstatMask Mask to apply to dockchannel WSTAT register to compute available FIFO entries
  * @field       hibUartRegBase       Physical address of the UART registers
  * @field       hmacRegBase          Physical address of the hmac block registers
+ * @field       kernelSlide          Offset from physical address to virtual address in the kernel map
  */
 typedef struct {
        uint64_t dockChannelRegBase;
        uint64_t dockChannelWstatMask;
        uint64_t hibUartRegBase;
        uint64_t hmacRegBase;
+       uint64_t kernelSlide;
 } pal_hib_globals_t;
 extern pal_hib_globals_t gHibernateGlobals;
 
@@ -102,13 +100,11 @@ void pal_hib_resume_tramp(uint32_t headerPpnum);
  * @field       ttbr0               Physical address of the first level translation table (low mem)
  * @field       ttbr1               Physical address of the first level translation table (high mem)
  * @field       memSlide            Offset from physical address to virtual address during hibernation resume
- * @field       kernelSlide         Offset from physical address to virtual address in the kernel map
  */
 typedef struct{
        uint64_t ttbr0;
        uint64_t ttbr1;
        uint64_t memSlide;
-       uint64_t kernelSlide;
 } pal_hib_tramp_result_t;
 
 #if HIBERNATE_TRAP_HANDLER
index 5aa70ff7a5cff602d88b75604ddd711b16dd8208..e2bc024d50d0c37d12aba2ec523abd52d0302d8e 100644 (file)
@@ -1058,15 +1058,18 @@ machine_csv(__unused cpuvn_e cve)
        return 0;
 }
 
+#if __ARM_ARCH_8_5__
+void
+arm_context_switch_requires_sync()
+{
+       current_cpu_datap()->sync_on_cswitch = 1;
+}
+#endif
 
 #if __has_feature(ptrauth_calls)
 boolean_t
 arm_user_jop_disabled(void)
 {
-#if DEVELOPMENT || DEBUG
-       return !!(BootArgs->bootFlags & kBootFlagsDisableUserJOP);
-#else
        return FALSE;
-#endif
 }
 #endif /* __has_feature(ptrauth_calls) */
index a160b3b9d68e68b2e5382b930ea780aedb5423ad..d1c2225083cd94f2ca9fb305cae1aee147676396 100644 (file)
 
 #define SPSR_INTERRUPTS_ENABLED(x) (!(x & DAIF_FIQF))
 
+#if __ARM_ARCH_8_5__
+#define PSR64_SSBS_U32_DEFAULT  PSR64_SSBS_32
+#define PSR64_SSBS_U64_DEFAULT  PSR64_SSBS_64
+#define PSR64_SSBS_KRN_DEFAULT  PSR64_SSBS_64
+#else
 #define PSR64_SSBS_U32_DEFAULT  (0)
 #define PSR64_SSBS_U64_DEFAULT  (0)
 #define PSR64_SSBS_KRN_DEFAULT  (0)
+#endif
 
 /*
  * msr DAIFSet, Xn, and msr DAIFClr, Xn transfer
 // 0     M              MMU enable
 #define SCTLR_M_ENABLED           (1ULL << 0)
 
+#if __ARM_ARCH_8_5__
+#define SCTLR_CSEH_DEFAULT        (0)
+#define SCTLR_DSSBS_DEFAULT       SCTLR_DSSBS
+#else
 #define SCTLR_CSEH_DEFAULT        (SCTLR_EIS | SCTLR_EOS)
 #define SCTLR_DSSBS_DEFAULT       (0)
+#endif
 
 #define SCTLR_EL1_DEFAULT \
        (SCTLR_RESERVED | SCTLR_UCI_ENABLED | SCTLR_nTWE_WFE_ENABLED | SCTLR_DZE_ENABLED | \
@@ -1697,6 +1708,12 @@ typedef enum {
 #define MIDR_TURKS            (0x026 << MIDR_EL1_PNUM_SHIFT)
 #endif
 
+#ifdef APPLEFIRESTORM
+#define MIDR_SICILY_ICESTORM            (0x020 << MIDR_EL1_PNUM_SHIFT)
+#define MIDR_SICILY_FIRESTORM           (0x021 << MIDR_EL1_PNUM_SHIFT)
+#define MIDR_TONGA_ICESTORM             (0x022 << MIDR_EL1_PNUM_SHIFT)
+#define MIDR_TONGA_FIRESTORM            (0x023 << MIDR_EL1_PNUM_SHIFT)
+#endif
 
 
 /*
@@ -2033,11 +2050,6 @@ typedef enum {
 #define ACTLR_EL1_DisHWP         ACTLR_EL1_DisHWP_MASK
 
 
-#if HAS_IC_INVAL_FILTERS
-#define ACTLR_EL1_IC_IVAU_EnASID_OFFSET 12
-#define ACTLR_EL1_IC_IVAU_EnASID_MASK   (1ULL << ACTLR_EL1_IC_IVAU_EnASID_OFFSET)
-#define ACTLR_EL1_IC_IVAU_EnASID        ACTLR_EL1_IC_IVAU_EnASID_MASK
-#endif /* HAS_IC_INVAL_FILTERS */
 
 #define AFPCR_DAZ_SHIFT  (0)
 #define AFPCR_FTZ_SHIFT  (1)
index 745e9d17f18697c9077b974603aea4c9d94ddd42..51bd6a69b438a5c8576e60236957aad66866e6bc 100644 (file)
@@ -315,13 +315,6 @@ is_parity_error(fault_status_t status)
        }
 }
 
-static inline unsigned
-__ror(unsigned value, unsigned shift)
-{
-       return ((unsigned)(value) >> (unsigned)(shift)) |
-              (unsigned)(value) << ((unsigned)(sizeof(unsigned) * CHAR_BIT) - (unsigned)(shift));
-}
-
 __dead2
 static void
 arm64_implementation_specific_error(arm_saved_state_t *state, uint32_t esr, vm_offset_t far)
@@ -1742,11 +1735,6 @@ handle_simd_trap(arm_saved_state_t *state, uint32_t esr)
 void
 sleh_irq(arm_saved_state_t *state)
 {
-       uint64_t     timestamp                = 0;
-       uint32_t     old_entropy_data         = 0;
-       uint32_t     old_entropy_sample_count = 0;
-       size_t       entropy_index            = 0;
-       uint32_t *   entropy_data_ptr         = NULL;
        cpu_data_t * cdp __unused             = getCpuDatap();
 #if MACH_ASSERT
        int preemption_level = get_preemption_level();
@@ -1765,25 +1753,7 @@ sleh_irq(arm_saved_state_t *state)
            cdp->interrupt_source);
 #endif
 
-       /* We use interrupt timing as an entropy source. */
-       timestamp = ml_get_timebase();
-
-       /*
-        * The buffer index is subject to races, but as these races should only
-        * result in multiple CPUs updating the same location, the end result
-        * should be that noise gets written into the entropy buffer.  As this
-        * is the entire point of the entropy buffer, we will not worry about
-        * these races for now.
-        */
-       old_entropy_sample_count = EntropyData.sample_count;
-       EntropyData.sample_count += 1;
-
-       entropy_index = old_entropy_sample_count & EntropyData.buffer_index_mask;
-       entropy_data_ptr = EntropyData.buffer + entropy_index;
-
-       /* Mix the timestamp data and the old data together. */
-       old_entropy_data = *entropy_data_ptr;
-       *entropy_data_ptr = (uint32_t)timestamp ^ (__ror(old_entropy_data, 9) & EntropyData.ror_mask);
+       entropy_collect();
 
        sleh_interrupt_handler_epilogue();
 #if MACH_ASSERT
index 1f239f40706748060047dba5badce126cd99f28a..a4b64906c64d29d14d5a1799b7f3586c37ca78d7 100644 (file)
@@ -919,6 +919,12 @@ common_start:
        and             x0, x0, #~(APCTL_EL1_EnAPKey0)
        msr             ARM64_REG_APCTL_EL1, x0
 
+#if defined(APPLEFIRESTORM)
+       IF_PAC_FAST_A_KEY_SWITCHING     1f, x0
+       orr             x0, x0, #(APCTL_EL1_KernKeyEn)
+       msr             ARM64_REG_APCTL_EL1, x0
+1:
+#endif /* APPLEFIRESTORM */
 
 #else
        mrs             x0, ARM64_REG_APCTL_EL1
index 221a2e1c01748a84a56fde48d73f0d43d7aa0dc0..c1a77bf66e4b914eec30d15e42b7ebb32cf95b9d 100644 (file)
@@ -320,8 +320,8 @@ machine_thread_state_convert_to_user(
 
        // Note that kernel threads never have disable_user_jop set
        if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
-           thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
-           (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
+           thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
+           ) {
                ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
                return KERN_SUCCESS;
        }
@@ -420,8 +420,8 @@ machine_thread_state_convert_from_user(
        }
 
        if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
-               if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) ||
-                   (BootArgs->bootFlags & kBootFlagsDisableUserThreadStateJOP)) {
+               if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
+                   ) {
                        return KERN_SUCCESS;
                }
                // Disallow setting unsigned thread state on JOP-enabled processes.
index 03911a0951e5525df481830b51bfe27949d63d66..aa3a73c8e53126623f5d829426d2ca22ac20a501 100644 (file)
@@ -55,6 +55,13 @@ sync_tlb_flush(void)
        __builtin_arm_isb(ISB_SY);
 }
 
+static inline void
+sync_tlb_flush_local(void)
+{
+       __builtin_arm_dsb(DSB_NSH);
+       __builtin_arm_isb(ISB_SY);
+}
+
 // flush_mmu_tlb: full TLB flush on all cores
 static inline void
 flush_mmu_tlb_async(void)
@@ -290,7 +297,7 @@ generate_rtlbi_param(ppnum_t npages, uint32_t asid, vm_offset_t va, uint64_t pma
         * Per the armv8.4 RTLBI extension spec, the range encoded in the rtlbi register operand is defined by:
         * BaseADDR <= VA < BaseADDR+((NUM+1)*2^(5*SCALE+1) * Translation_Granule_Size)
         */
-       unsigned order = (sizeof(npages) * 8) - __builtin_clz(npages - 1) - 1;
+       unsigned order = (unsigned)(sizeof(npages) * 8) - (unsigned)__builtin_clz(npages - 1) - 1;
        unsigned scale = ((order ? order : 1) - 1) / 5;
        unsigned granule = 1 << ((5 * scale) + 1);
        unsigned num = (((npages + granule - 1) & ~(granule - 1)) / granule) - 1;
index 4d295ea90447ac1a826c3984e17578772077368e..fd67d00774e3acbac2ac6501de6a9615bf2f3443 100644 (file)
@@ -39,6 +39,8 @@
 #include "tunables_h11.s"
 #elif defined(APPLELIGHTNING)
 #include "tunables_h12.s"
+#elif defined(APPLEFIRESTORM)
+#include "tunables_h13.s"
 #else
 .macro APPLY_TUNABLES
 .endmacro
diff --git a/osfmk/arm64/tunables/tunables_h13.s b/osfmk/arm64/tunables/tunables_h13.s
new file mode 100644 (file)
index 0000000..d6c12f2
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2019 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+.macro APPLY_TUNABLES
+.endmacro
index fab485b6071c8420b7abe7456b5e64ef8fff7890..7daa433291771211604f8b4aecb67fe27d6263f2 100644 (file)
@@ -37,10 +37,6 @@ endef
 
 $(foreach FILE,$(UNCONFIGURED_HIB_FILES),$(eval $(call ADD_HIB_CFLAGS,$(FILE))))
 
-# hibernate_restore.o uses function pointers but the signing keys aren't set up yet,
-# so compile this file with no ptrauth
-hibernate_restore.o_CFLAGS_ADD += -fno-ptrauth-calls
-
 lz4.o_CFLAGS_ADD += -fbuiltin -O3
 vfp_state_test.o_CFLAGS_ADD += -mno-implicit-float
 
index f2ae9a6ccdfca0f4857442d840f9d771ae765289..74181f5e9a362b6c0210b949d50496155039dfc3 100644 (file)
@@ -32,6 +32,7 @@ osfmk/arm/machine_cpuid.c     standard
 osfmk/arm/machine_routines.c           standard
 osfmk/arm/machine_routines_common.c    standard
 osfmk/arm/machine_routines_asm.s       standard
+osfmk/arm/machine_routines_apple.c     optional nos_arm_asm
 osfmk/arm/machine_task.c               standard
 osfmk/arm/pal_routines.c               standard
 osfmk/arm/mcount.s             optional profile
@@ -47,6 +48,7 @@ osfmk/arm/strnlen.s   standard
 osfmk/arm/strncmp.s    standard
 osfmk/arm/strncpy.c    standard
 osfmk/arm/strlcpy.c    standard
+osfmk/arm/trustcache.c         standard
 
 osfmk/arm/model_dep.c          standard
 osfmk/arm/pcb.c                standard
index 78324c05aae22f8036f6c3c3245f4808ec8280b5..8aacc9817774ca0d339bd44243cc80bd8092fbac 100644 (file)
@@ -7,7 +7,7 @@ osfmk/vm/vm_fourk_pager.c        standard
 
 osfmk/arm64/hi_res_clock_map.c         optional hi_res_clock
 
-osfmk/arm/pmap.c               standard
+osfmk/arm/pmap.c               optional nos_arm_pmap
 
 osfmk/arm64/bsd_arm64.c                optional mach_bsd
 osfmk/arm/machdep_call.c       optional mach_bsd
@@ -36,18 +36,19 @@ osfmk/arm64/arm_vm_init.c   standard
 osfmk/arm/io_map.c             standard
 osfmk/arm64/loose_ends.c       standard
 osfmk/arm/locks_arm.c  standard
-osfmk/arm64/locore.s   standard
-osfmk/arm64/gxf_exceptions.s   standard
+osfmk/arm64/locore.s   optional nos_arm_asm
+osfmk/arm64/gxf_exceptions.s   optional nos_arm_asm
 osfmk/arm64/lowmem_vectors.c   standard
 osfmk/arm64/sleh.c                     standard
-osfmk/arm64/start.s    standard
-osfmk/arm64/pinst.s    standard
+osfmk/arm64/start.s    optional nos_arm_asm
+osfmk/arm64/pinst.s    optional nos_arm_asm
 osfmk/arm64/cswitch.s  standard
 osfmk/arm/machine_cpuid.c      standard
 osfmk/arm/machine_routines_common.c            standard
 osfmk/arm64/machine_routines.c         standard
-osfmk/arm64/machine_routines_asm.s     standard
+osfmk/arm64/machine_routines_asm.s     optional nos_arm_asm
 osfmk/arm64/machine_task.c             standard
+osfmk/arm/machine_routines_apple.c     optional nos_arm_asm
 osfmk/arm/pal_routines.c               standard
 osfmk/arm64/mcount.s           optional profile
 osfmk/arm64/memcmp_zero.s      standard
@@ -55,9 +56,10 @@ osfmk/arm64/strnlen.s   standard
 osfmk/arm64/strncmp.s   standard
 osfmk/arm/strncpy.c     standard
 osfmk/arm/strlcpy.c     standard
+osfmk/arm/trustcache.c         standard
 
 osfmk/arm/model_dep.c          standard
-osfmk/arm64/pcb.c              standard
+osfmk/arm64/pcb.c              optional nos_arm_pmap
 osfmk/arm/rtclock.c            standard
 osfmk/arm64/status.c           standard
 osfmk/arm/status_shared.c      standard
index 6357bdb8d5af79133f0594e0d611c14c455fd326..6ebfd9a7e9df5caa680ef53a281bceb6ae51ea07 100644 (file)
@@ -816,6 +816,12 @@ virtual_timeout_inflate_us(unsigned int vti, uint64_t timeout)
        return virtual_timeout_inflate32(vti, timeout, max_timeout);
 }
 
+uint64_t
+ml_get_timebase_entropy(void)
+{
+       return __builtin_ia32_rdtsc();
+}
+
 /*
  *     Routine:        ml_init_lock_timeout
  *     Function:
@@ -1161,30 +1167,6 @@ ml_timer_forced_evaluation(void)
        return ml_timer_evaluation_in_progress;
 }
 
-/* 32-bit right-rotate n bits */
-static inline uint32_t
-ror32(uint32_t val, const unsigned int n)
-{
-       __asm__ volatile ("rorl %%cl,%0" : "=r" (val) : "0" (val), "c" (n));
-       return val;
-}
-
-void
-ml_entropy_collect(void)
-{
-       uint32_t        tsc_lo, tsc_hi;
-       uint32_t        *ep;
-
-       assert(cpu_number() == master_cpu);
-
-       /* update buffer pointer cyclically */
-       ep = EntropyData.buffer + (EntropyData.sample_count & EntropyData.buffer_index_mask);
-       EntropyData.sample_count += 1;
-
-       rdtsc_nofence(tsc_lo, tsc_hi);
-       *ep = (ror32(*ep, 9) & EntropyData.ror_mask) ^ tsc_lo;
-}
-
 uint64_t
 ml_energy_stat(__unused thread_t t)
 {
index cedcdb7de7cd8cab0a0a88dbeb5d54b4223aa57c..6aaabcba420bcc08576f3ed9b94f4570ef6c70e1 100644 (file)
@@ -80,9 +80,9 @@ void ml_install_interrupt_handler(
        IOInterruptHandler handler,
        void *refCon);
 
-void ml_entropy_collect(void);
-
 uint64_t ml_get_timebase(void);
+uint64_t ml_get_timebase_entropy(void);
+
 void ml_init_lock_timeout(void);
 void ml_init_delay_spin_threshold(int);
 
index 4c74342a2f379104ab89ac57632462b5b46894ba..10b1ac10d4be55130b8fb3d27e8410ab4fda0f31 100644 (file)
@@ -95,6 +95,7 @@
 #include <sys/kdebug.h>
 #include <kperf/kperf.h>
 #include <prng/random.h>
+#include <prng/entropy.h>
 
 #include <string.h>
 
@@ -474,7 +475,7 @@ interrupt(x86_saved_state_t *state)
        }
 
        if (cnum == master_cpu) {
-               ml_entropy_collect();
+               entropy_collect();
        }
 
 #if KPERF
index 47440d332ac6fd23b31d6617d421dd36b37f271a..bc3980f2dfe8f74277399011a6076022ad14ba3a 100644 (file)
@@ -2202,6 +2202,7 @@ ipc_importance_check_circularity(
        ipc_port_t base;
        struct turnstile *send_turnstile = TURNSTILE_NULL;
        struct task_watchport_elem *watchport_elem = NULL;
+       bool took_base_ref = false;
 
        assert(port != IP_NULL);
        assert(dest != IP_NULL);
@@ -2263,23 +2264,7 @@ ipc_importance_check_circularity(
 
        ipc_port_multiple_lock(); /* massive serialization */
 
-       /*
-        *      Search for the end of the chain (a port not in transit),
-        *      acquiring locks along the way.
-        */
-
-       for (;;) {
-               ip_lock(base);
-
-               if (!ip_active(base) ||
-                   (base->ip_receiver_name != MACH_PORT_NULL) ||
-                   (base->ip_destination == IP_NULL)) {
-                       break;
-               }
-
-               base = base->ip_destination;
-       }
-
+       took_base_ref = ipc_port_destination_chain_lock(dest, &base);
        /* all ports in chain from dest to base, inclusive, are locked */
 
        if (port == base) {
@@ -2292,6 +2277,7 @@ ipc_importance_check_circularity(
                require_ip_active(port);
                assert(port->ip_receiver_name == MACH_PORT_NULL);
                assert(port->ip_destination == IP_NULL);
+               assert(!took_base_ref);
 
                base = dest;
                while (base != IP_NULL) {
@@ -2439,6 +2425,9 @@ not_circular:
        }
 
        ip_unlock(base);
+       if (took_base_ref) {
+               ip_release(base);
+       }
 
        /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
        if (send_turnstile) {
index e128f7138c00c943441ffdab892878e0d732a522..04ec259aa1be447189f2f1c2b77c6039a24d0bea 100644 (file)
@@ -228,7 +228,7 @@ ikm_finalize_sig(
        return *scratchp;
 }
 
-#elif defined(CRYPTO_SHA2) && !defined(__x86_64__)
+#elif defined(CRYPTO_SHA2) && !defined(__x86_64__) && !defined(__arm__)
 
 typedef SHA256_CTX ikm_sig_scratch_t;
 
@@ -269,7 +269,7 @@ ikm_finalize_sig(
 }
 
 #else
-/* Stubbed out implementation (for __x86_64__ for now) */
+/* Stubbed out implementation (for __x86_64__, __arm__ for now) */
 
 typedef uintptr_t ikm_sig_scratch_t;
 
@@ -1175,7 +1175,8 @@ ipc_kmsg_trace_send(ipc_kmsg_t kmsg,
 #endif
 
 /* zone for cached ipc_kmsg_t structures */
-ZONE_DECLARE(ipc_kmsg_zone, "ipc kmsgs", IKM_SAVED_KMSG_SIZE, ZC_CACHING);
+ZONE_DECLARE(ipc_kmsg_zone, "ipc kmsgs", IKM_SAVED_KMSG_SIZE,
+    ZC_CACHING | ZC_ZFREE_CLEARMEM);
 static TUNABLE(bool, enforce_strict_reply, "ipc_strict_reply", false);
 
 /*
@@ -1311,19 +1312,21 @@ ipc_kmsg_alloc(
                max_expanded_size = msg_and_trailer_size;
        }
 
-       kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone);
-
-       if (max_expanded_size < IKM_SAVED_MSG_SIZE) {
-               max_expanded_size = IKM_SAVED_MSG_SIZE;         /* round up for ikm_cache */
-               data = NULL;
-       } else if (max_expanded_size > IKM_SAVED_MSG_SIZE) {
+       if (max_expanded_size > IKM_SAVED_MSG_SIZE) {
                data = kheap_alloc(KHEAP_DATA_BUFFERS, max_expanded_size, Z_WAITOK);
+               if (data == NULL) {
+                       return IKM_NULL;
+               }
+       } else {
+               data = NULL;
+               max_expanded_size = IKM_SAVED_MSG_SIZE;
        }
 
-       if (kmsg != IKM_NULL) {
-               ikm_init(kmsg, max_expanded_size);
-               ikm_set_header(kmsg, data, msg_and_trailer_size);
-       }
+       kmsg = zalloc_flags(ipc_kmsg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
+       kmsg->ikm_size = max_expanded_size;
+       ikm_qos_init(kmsg);
+       ikm_set_header(kmsg, data, msg_and_trailer_size);
+       assert((kmsg->ikm_prev = kmsg->ikm_next = IKM_BOGUS));
 
        return kmsg;
 }
index 5a562adf8fa72d0f06d2197a349438ba6bbc590b..dff4370cf010d78f6dc7ae767b4317b6a43bbeeb 100644 (file)
@@ -87,7 +87,8 @@ typedef uint16_t ipc_kmsg_flags_t;
 #define IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND 0x1       /* Dest port contains an immovable send right */
 
 #if (DEVELOPMENT || DEBUG)
-#define IKM_PARTIAL_SIG        1     /* Keep partial message signatures for better debug */
+/* Turn on to keep partial message signatures for better debug */
+#define IKM_PARTIAL_SIG        0
 #endif
 
 /*
@@ -176,46 +177,6 @@ MACRO_BEGIN                                                             \
        (kmsg)->ikm_prealloc = IP_NULL;                                 \
 MACRO_END
 
-#if MACH_FLIPC
-#define ikm_flipc_init(kmsg) (kmsg)->ikm_node = MACH_NODE_NULL
-#else
-#define ikm_flipc_init(kmsg)
-#endif
-
-#if IKM_PARTIAL_SIG
-#define ikm_init(kmsg, size)                                    \
-MACRO_BEGIN                                                     \
-       (kmsg)->ikm_size = (size);                                  \
-       (kmsg)->ikm_flags = 0;                                      \
-       (kmsg)->ikm_prealloc = IP_NULL;                             \
-       (kmsg)->ikm_data = NULL;                                    \
-       (kmsg)->ikm_voucher = IP_NULL;                              \
-       (kmsg)->ikm_importance = IIE_NULL;                          \
-       (kmsg)->ikm_filter_policy_id = 0;                           \
-       (kmsg)->ikm_header_sig = 0;                                 \
-       (kmsg)->ikm_headtrail_sig = 0;                              \
-       (kmsg)->ikm_signature = 0;                                  \
-       ikm_qos_init(kmsg);                                         \
-       ikm_flipc_init(kmsg);                                       \
-       assert((kmsg)->ikm_prev = (kmsg)->ikm_next = IKM_BOGUS);    \
-MACRO_END
-#else
-#define ikm_init(kmsg, size)                                    \
-MACRO_BEGIN                                                     \
-       (kmsg)->ikm_size = (size);                                  \
-       (kmsg)->ikm_flags = 0;                                      \
-       (kmsg)->ikm_prealloc = IP_NULL;                             \
-       (kmsg)->ikm_data = NULL;                                    \
-       (kmsg)->ikm_voucher = IP_NULL;                              \
-       (kmsg)->ikm_importance = IIE_NULL;                          \
-       (kmsg)->ikm_filter_policy_id = 0;                           \
-       (kmsg)->ikm_signature = 0;                                  \
-       ikm_qos_init(kmsg);                                         \
-       ikm_flipc_init(kmsg);                                       \
-       assert((kmsg)->ikm_prev = (kmsg)->ikm_next = IKM_BOGUS);    \
-MACRO_END
-#endif
-
 #define ikm_qos_init(kmsg)                                              \
 MACRO_BEGIN                                                             \
        (kmsg)->ikm_ppriority = MACH_MSG_PRIORITY_UNSPECIFIED;          \
index 8ba9fcf0a05b7c0039a369e78db5c92c52a29840..2d6f7e7d14bb41c59a9beea2e12f43be6f3a8c51 100644 (file)
@@ -1128,6 +1128,57 @@ drop_assertions:
 #endif /* IMPORTANCE_INHERITANCE */
 }
 
+/*
+ *     Routine:        ipc_port_destination_chain_lock
+ *     Purpose:
+ *             Search for the end of the chain (a port not in transit),
+ *             acquiring locks along the way, and return it in `base`.
+ *
+ *             Returns true if a reference was taken on `base`
+ *
+ *     Conditions:
+ *             No ports locked.
+ *             ipc_port_multiple_lock held.
+ */
+boolean_t
+ipc_port_destination_chain_lock(
+       ipc_port_t port,
+       ipc_port_t *base)
+{
+       for (;;) {
+               ip_lock(port);
+
+               if (!ip_active(port)) {
+                       /*
+                        * Active ports that are ip_lock()ed cannot go away.
+                        *
+                        * But inactive ports at the end of walking
+                        * an ip_destination chain are only protected
+                        * from space termination cleanup while the entire
+                        * chain of ports leading to them is held.
+                        *
+                        * Callers of this code tend to unlock the chain
+                        * in the same order than this walk which doesn't
+                        * protect `base` properly when it's inactive.
+                        *
+                        * In that case, take a reference that the caller
+                        * is responsible for releasing.
+                        */
+                       ip_reference(port);
+                       *base = port;
+                       return true;
+               }
+               if ((port->ip_receiver_name != MACH_PORT_NULL) ||
+                   (port->ip_destination == IP_NULL)) {
+                       *base = port;
+                       return false;
+               }
+
+               port = port->ip_destination;
+       }
+}
+
+
 /*
  *     Routine:        ipc_port_check_circularity
  *     Purpose:
@@ -1156,6 +1207,7 @@ ipc_port_check_circularity(
 #else
        ipc_port_t base;
        struct task_watchport_elem *watchport_elem = NULL;
+       bool took_base_ref = false;
 
        assert(port != IP_NULL);
        assert(dest != IP_NULL);
@@ -1193,18 +1245,7 @@ ipc_port_check_circularity(
         *      acquiring locks along the way.
         */
 
-       for (;;) {
-               ip_lock(base);
-
-               if (!ip_active(base) ||
-                   (base->ip_receiver_name != MACH_PORT_NULL) ||
-                   (base->ip_destination == IP_NULL)) {
-                       break;
-               }
-
-               base = base->ip_destination;
-       }
-
+       took_base_ref = ipc_port_destination_chain_lock(dest, &base);
        /* all ports in chain from dest to base, inclusive, are locked */
 
        if (port == base) {
@@ -1216,6 +1257,7 @@ ipc_port_check_circularity(
                require_ip_active(port);
                assert(port->ip_receiver_name == MACH_PORT_NULL);
                assert(port->ip_destination == IP_NULL);
+               assert(!took_base_ref);
 
                base = dest;
                while (base != IP_NULL) {
@@ -1310,6 +1352,9 @@ not_circular:
            (base->ip_destination == IP_NULL));
 
        ip_unlock(base);
+       if (took_base_ref) {
+               ip_release(base);
+       }
 
        /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
        if (send_turnstile) {
@@ -2382,7 +2427,8 @@ ipc_port_importance_delta_internal(
        ipc_importance_task_t   *imp_task)
 {
        ipc_port_t next, base;
-       boolean_t dropped = FALSE;
+       bool dropped = false;
+       bool took_base_ref = false;
 
        *imp_task = IIT_NULL;
 
@@ -2398,18 +2444,14 @@ ipc_port_importance_delta_internal(
        if (ip_active(port) &&
            port->ip_destination != IP_NULL &&
            port->ip_receiver_name == MACH_PORT_NULL) {
-               dropped = TRUE;
+               dropped = true;
 
                ip_unlock(port);
                ipc_port_multiple_lock(); /* massive serialization */
-               ip_lock(base);
 
-               while (ip_active(base) &&
-                   base->ip_destination != IP_NULL &&
-                   base->ip_receiver_name == MACH_PORT_NULL) {
-                       base = base->ip_destination;
-                       ip_lock(base);
-               }
+               took_base_ref = ipc_port_destination_chain_lock(port, &base);
+               /* all ports in chain from port to base, inclusive, are locked */
+
                ipc_port_multiple_unlock();
        }
 
@@ -2475,8 +2517,11 @@ ipc_port_importance_delta_internal(
                ipc_importance_task_reference(*imp_task);
        }
 
-       if (dropped == TRUE) {
+       if (dropped) {
                ip_unlock(base);
+               if (took_base_ref) {
+                       ip_release(base);
+               }
        }
 
        return dropped;
index 6af6e067345138e9cb32960fd12e0bd641594fe6..2784c3c732286974201f17c594f5ceddd33fbd60 100644 (file)
@@ -367,10 +367,18 @@ extern lck_attr_t       ipc_lck_attr;
 extern lck_spin_t ipc_port_multiple_lock_data;
 
 #define ipc_port_multiple_lock()                                        \
-               lck_spin_lock_grp(&ipc_port_multiple_lock_data, &ipc_lck_grp)
+       lck_spin_lock_grp(&ipc_port_multiple_lock_data, &ipc_lck_grp)
 
 #define ipc_port_multiple_unlock()                                      \
-               lck_spin_unlock(&ipc_port_multiple_lock_data)
+       lck_spin_unlock(&ipc_port_multiple_lock_data)
+
+/*
+ *     Search for the end of the chain (a port not in transit),
+ *     acquiring locks along the way.
+ */
+extern boolean_t ipc_port_destination_chain_lock(
+       ipc_port_t port,
+       ipc_port_t *base);
 
 /*
  *     The port timestamp facility provides timestamps
index 205fbc52da141c6b8a8b86da4d145f4a5680f2e9..3697e7bbc62b00665112b83a6e15f752c9c3e484 100644 (file)
 #include <ptrauth.h>
 #endif
 
+#if XNU_MONITOR
+#define IN_PPLSTK_BOUNDS(__addr) \
+       (((uintptr_t)(__addr) >= (uintptr_t)pmap_stacks_start) && \
+       ((uintptr_t)(__addr) < (uintptr_t)pmap_stacks_end))
+#endif
 
 unsigned int __attribute__((noinline))
 backtrace(uintptr_t *bt, unsigned int max_frames, bool *was_truncated_out)
@@ -84,6 +89,9 @@ backtrace_frame(uintptr_t *bt, unsigned int max_frames, void *start_frame,
        ((uintptr_t)(__addr) < (uintptr_t)top))
 
        in_valid_stack = IN_STK_BOUNDS(fp);
+#if XNU_MONITOR
+       in_valid_stack |= IN_PPLSTK_BOUNDS(fp);
+#endif /* XNU_MONITOR */
 
        if (!in_valid_stack) {
                fp = NULL;
@@ -99,6 +107,9 @@ backtrace_frame(uintptr_t *bt, unsigned int max_frames, void *start_frame,
                 * have set this up, so bounds check, as well.
                 */
                in_valid_stack = IN_STK_BOUNDS(next_fp);
+#if XNU_MONITOR
+               in_valid_stack |= IN_PPLSTK_BOUNDS(next_fp);
+#endif /* XNU_MONITOR */
 
                if (next_fp == NULL || !in_valid_stack) {
                        break;
@@ -113,7 +124,25 @@ backtrace_frame(uintptr_t *bt, unsigned int max_frames, void *start_frame,
 
                /* stacks grow down; backtracing should be moving to higher addresses */
                if (next_fp <= fp) {
+#if XNU_MONITOR
+                       bool fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
+                       bool fp_in_kstack = IN_STK_BOUNDS(fp);
+                       bool next_fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
+                       bool next_fp_in_kstack = IN_STK_BOUNDS(fp);
+
+                       /*
+                        * This check is verbose; it is basically checking whether
+                        * we are switching between the kernel stack and the cpu
+                        * stack.  If so, we ignore the fact that fp has switched
+                        * directions (as it is a symptom of switching stacks).
+                        */
+                       if (((fp_in_pplstack) && (next_fp_in_kstack)) ||
+                           ((fp_in_kstack) && (next_fp_in_pplstack))) {
+                               break;
+                       }
+#else /* XNU_MONITOR */
                        break;
+#endif /* !XNU_MONITOR */
                }
                fp = next_fp;
        }
index 526bfce3111f835a25a02c0cc13e6b4fc7e0f9e2..5b60219f7d3ec52c9ed02537129d0a396c8478e5 100644 (file)
@@ -119,45 +119,33 @@ vm_extmod_statistics_data_t host_extmod_statistics;
 kern_return_t
 host_processors(host_priv_t host_priv, processor_array_t * out_array, mach_msg_type_number_t * countp)
 {
-       processor_t processor, *tp;
-       void * addr;
-       unsigned int count, i;
-
        if (host_priv == HOST_PRIV_NULL) {
                return KERN_INVALID_ARGUMENT;
        }
 
        assert(host_priv == &realhost);
 
-       count = processor_count;
+       unsigned int count = processor_count;
        assert(count != 0);
 
-       addr = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
-       if (addr == 0) {
+       static_assert(sizeof(mach_port_t) == sizeof(processor_t));
+
+       mach_port_t* ports = kalloc((vm_size_t)(count * sizeof(mach_port_t)));
+       if (!ports) {
                return KERN_RESOURCE_SHORTAGE;
        }
 
-       tp = (processor_t *)addr;
-       *tp++ = processor = processor_list;
-
-       if (count > 1) {
-               simple_lock(&processor_list_lock, LCK_GRP_NULL);
-
-               for (i = 1; i < count; i++) {
-                       *tp++ = processor = processor->processor_list;
-               }
+       for (unsigned int i = 0; i < count; i++) {
+               processor_t processor = processor_array[i];
+               assert(processor != PROCESSOR_NULL);
 
-               simple_unlock(&processor_list_lock);
+               /* do the conversion that Mig should handle */
+               ipc_port_t processor_port = convert_processor_to_port(processor);
+               ports[i] = processor_port;
        }
 
        *countp = count;
-       *out_array = (processor_array_t)addr;
-
-       /* do the conversion that Mig should handle */
-       tp = (processor_t *)addr;
-       for (i = 0; i < count; i++) {
-               ((mach_port_t *)tp)[i] = (mach_port_t)convert_processor_to_port(tp[i]);
-       }
+       *out_array = (processor_array_t)ports;
 
        return KERN_SUCCESS;
 }
@@ -479,7 +467,6 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
        }
 
        case HOST_CPU_LOAD_INFO: {
-               processor_t processor;
                host_cpu_load_info_t cpu_load_info;
 
                if (*count < HOST_CPU_LOAD_INFO_COUNT) {
@@ -501,7 +488,12 @@ host_statistics(host_t host, host_flavor_t flavor, host_info_t info, mach_msg_ty
 
                simple_lock(&processor_list_lock, LCK_GRP_NULL);
 
-               for (processor = processor_list; processor != NULL; processor = processor->processor_list) {
+               unsigned int pcount = processor_count;
+
+               for (unsigned int i = 0; i < pcount; i++) {
+                       processor_t processor = processor_array[i];
+                       assert(processor != PROCESSOR_NULL);
+
                        timer_t idle_state;
                        uint64_t idle_time_snapshot1, idle_time_snapshot2;
                        uint64_t idle_time_tstamp1, idle_time_tstamp2;
@@ -1153,11 +1145,10 @@ host_processor_info(host_t host,
     mach_msg_type_number_t * out_array_count)
 {
        kern_return_t result;
-       processor_t processor;
        host_t thost;
        processor_info_t info;
-       unsigned int icount, tcount;
-       unsigned int pcount, i;
+       unsigned int icount;
+       unsigned int pcount;
        vm_offset_t addr;
        vm_size_t size, needed;
        vm_map_copy_t copy;
@@ -1182,29 +1173,19 @@ host_processor_info(host_t host,
        }
 
        info = (processor_info_t)addr;
-       processor = processor_list;
-       tcount = icount;
 
-       result = processor_info(processor, flavor, &thost, info, &tcount);
-       if (result != KERN_SUCCESS) {
-               kmem_free(ipc_kernel_map, addr, size);
-               return result;
-       }
+       for (unsigned int i = 0; i < pcount; i++) {
+               processor_t processor = processor_array[i];
+               assert(processor != PROCESSOR_NULL);
 
-       if (pcount > 1) {
-               for (i = 1; i < pcount; i++) {
-                       simple_lock(&processor_list_lock, LCK_GRP_NULL);
-                       processor = processor->processor_list;
-                       simple_unlock(&processor_list_lock);
+               unsigned int tcount = icount;
 
-                       info += icount;
-                       tcount = icount;
-                       result = processor_info(processor, flavor, &thost, info, &tcount);
-                       if (result != KERN_SUCCESS) {
-                               kmem_free(ipc_kernel_map, addr, size);
-                               return result;
-                       }
+               result = processor_info(processor, flavor, &thost, info, &tcount);
+               if (result != KERN_SUCCESS) {
+                       kmem_free(ipc_kernel_map, addr, size);
+                       return result;
                }
+               info += icount;
        }
 
        if (size != needed) {
index c5d47ff8467368a07de4ad3607e6f0a9938c580f..3cffb3fed39dbbebb4e37fbfb9b66cbeebf92abd 100644 (file)
@@ -357,6 +357,7 @@ processor_state_update_idle(processor_t processor)
        processor->current_urgency = THREAD_URGENCY_NONE;
        processor->current_is_NO_SMT = false;
        processor->current_is_bound = false;
+       processor->current_is_eagerpreempt = false;
        os_atomic_store(&processor->processor_set->cpu_running_buckets[processor->cpu_id], TH_BUCKET_SCHED_MAX, relaxed);
 }
 
@@ -378,6 +379,7 @@ processor_state_update_from_thread(processor_t processor, thread_t thread)
        processor->current_urgency = thread_get_urgency(thread, NULL, NULL);
        processor->current_is_NO_SMT = thread_no_smt(thread);
        processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL;
+       processor->current_is_eagerpreempt = thread_is_eager_preempt(thread);
 }
 
 void
@@ -456,6 +458,36 @@ pset_find(
        return pset;
 }
 
+#if !defined(RC_HIDE_XNU_FIRESTORM) && (MAX_CPU_CLUSTERS > 2)
+
+/*
+ * Find the first processor_set for the given pset_cluster_type.
+ * Should be removed with rdar://57340304, as it's only
+ * useful for the workaround described in rdar://57306691.
+ */
+
+processor_set_t
+pset_find_first_by_cluster_type(
+       pset_cluster_type_t pset_cluster_type)
+{
+       simple_lock(&pset_node_lock, LCK_GRP_NULL);
+       pset_node_t node = &pset_node0;
+       processor_set_t pset = NULL;
+
+       do {
+               pset = node->psets;
+               while (pset != NULL) {
+                       if (pset->pset_cluster_type == pset_cluster_type) {
+                               break;
+                       }
+                       pset = pset->pset_list;
+               }
+       } while (pset == NULL && (node = node->node_list) != NULL);
+       simple_unlock(&pset_node_lock);
+       return pset;
+}
+
+#endif /* !defined(RC_HIDE_XNU_FIRESTORM) && (MAX_CPU_CLUSTERS > 2) */
 
 /*
  *     Initialize the given processor_set structure.
index 698deb50b8caa1e577c80cfef88d33c35cd7f25b..eb2246cbd5993dbc2a44b785b421cff4b1714a6c 100644 (file)
@@ -306,6 +306,7 @@ struct processor {
        bool                    is_recommended;
        bool                    current_is_NO_SMT;      /* cached TH_SFLAG_NO_SMT of current thread */
        bool                    current_is_bound;       /* current thread is bound to this processor */
+       bool                    current_is_eagerpreempt;/* current thread is TH_SFLAG_EAGERPREEMPT */
        struct thread          *active_thread;          /* thread running on processor */
        struct thread          *idle_thread;            /* this processor's idle thread. */
        struct thread          *startup_thread;
@@ -465,6 +466,18 @@ extern processor_set_t  pset_find(
        uint32_t                cluster_id,
        processor_set_t         default_pset);
 
+#if !defined(RC_HIDE_XNU_FIRESTORM) && (MAX_CPU_CLUSTERS > 2)
+
+/*
+ * Find the first processor_set for the given pset_cluster_type.
+ * Should be removed with rdar://57340304, as it's only
+ * useful for the workaround described in rdar://57306691.
+ */
+
+extern processor_set_t  pset_find_first_by_cluster_type(
+       pset_cluster_type_t     pset_cluster_type);
+
+#endif /* !defined(RC_HIDE_XNU_FIRESTORM) && (MAX_CPU_CLUSTERS > 2) */
 
 extern kern_return_t    processor_info_count(
        processor_flavor_t      flavor,
index 1ecd4a57d9f372bec60666135a38f0e454101eec..7aea81c7353b84487b075eb21a02f2a59b353b50 100644 (file)
@@ -140,6 +140,10 @@ _ranges_validate(task_t task, task_restartable_range_t *ranges, uint32_t count)
        uint64_t limit = task_has_64Bit_data(task) ? UINT64_MAX : UINT32_MAX;
        uint64_t end, recovery;
 
+       if (count == 0) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
        for (size_t i = 0; i < count; i++) {
                if (ranges[i].length > TASK_RESTARTABLE_OFFSET_MAX ||
                    ranges[i].recovery_offs > TASK_RESTARTABLE_OFFSET_MAX) {
index 5d04d976e6d2f130910e3bd3b950ca746effc064..64075494751c352060d4c19e36244eeceb944ae3 100644 (file)
@@ -3886,7 +3886,7 @@ processor_setrun(
 #endif
        if (SCHED(priority_is_urgent)(thread->sched_pri) && thread->sched_pri > processor->current_pri) {
                preempt = (AST_PREEMPT | AST_URGENT);
-       } else if (processor->active_thread && thread_eager_preemption(processor->active_thread)) {
+       } else if (processor->current_is_eagerpreempt) {
                preempt = (AST_PREEMPT | AST_URGENT);
        } else if ((thread->sched_mode == TH_MODE_TIMESHARE) && (thread->sched_pri < thread->base_pri)) {
                if (SCHED(priority_is_urgent)(thread->base_pri) && thread->sched_pri > processor->current_pri) {
@@ -4738,7 +4738,7 @@ csw_check_locked(
 
        result = SCHED(processor_csw_check)(processor);
        if (result != AST_NONE) {
-               return check_reason | result | (thread_eager_preemption(thread) ? AST_URGENT : AST_NONE);
+               return check_reason | result | (thread_is_eager_preempt(thread) ? AST_URGENT : AST_NONE);
        }
 
        /*
@@ -5862,57 +5862,62 @@ sched_clutch_timeshare_scan(
 
 #endif /* CONFIG_SCHED_TIMESHARE_CORE */
 
-boolean_t
-thread_eager_preemption(thread_t thread)
+bool
+thread_is_eager_preempt(thread_t thread)
 {
-       return (thread->sched_flags & TH_SFLAG_EAGERPREEMPT) != 0;
+       return thread->sched_flags & TH_SFLAG_EAGERPREEMPT;
 }
 
 void
 thread_set_eager_preempt(thread_t thread)
 {
-       spl_t x;
-       processor_t p;
-       ast_t ast = AST_NONE;
+       spl_t s = splsched();
+       thread_lock(thread);
 
-       x = splsched();
-       p = current_processor();
+       assert(!thread_is_eager_preempt(thread));
 
-       thread_lock(thread);
        thread->sched_flags |= TH_SFLAG_EAGERPREEMPT;
 
        if (thread == current_thread()) {
-               ast = csw_check(thread, p, AST_NONE);
+               /* csw_check updates current_is_eagerpreempt on the processor */
+               ast_t ast = csw_check(thread, current_processor(), AST_NONE);
+
                thread_unlock(thread);
+
                if (ast != AST_NONE) {
-                       (void) thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
+                       thread_block_reason(THREAD_CONTINUE_NULL, NULL, ast);
                }
        } else {
-               p = thread->last_processor;
+               processor_t last_processor = thread->last_processor;
 
-               if (p != PROCESSOR_NULL && p->state == PROCESSOR_RUNNING &&
-                   p->active_thread == thread) {
-                       cause_ast_check(p);
+               if (last_processor != PROCESSOR_NULL &&
+                   last_processor->state == PROCESSOR_RUNNING &&
+                   last_processor->active_thread == thread) {
+                       cause_ast_check(last_processor);
                }
 
                thread_unlock(thread);
        }
 
-       splx(x);
+       splx(s);
 }
 
 void
 thread_clear_eager_preempt(thread_t thread)
 {
-       spl_t x;
-
-       x = splsched();
+       spl_t s = splsched();
        thread_lock(thread);
 
+       assert(thread_is_eager_preempt(thread));
+
        thread->sched_flags &= ~TH_SFLAG_EAGERPREEMPT;
 
+       if (thread == current_thread()) {
+               current_processor()->current_is_eagerpreempt = false;
+       }
+
        thread_unlock(thread);
-       splx(x);
+       splx(s);
 }
 
 /*
index fd768d26fd34d0cb9d3c48c870929067a7cfbe11..e73eebbcd99db3c60df420914f1d887249858639 100644 (file)
@@ -392,8 +392,7 @@ extern void             thread_timer_expire(
        void                    *thread,
        void                    *p1);
 
-extern boolean_t        thread_eager_preemption(
-       thread_t thread);
+extern bool thread_is_eager_preempt(thread_t thread);
 
 extern boolean_t sched_generic_direct_dispatch_to_idle_processors;
 
index d2b50a60ac4f9227376e8e01002ad04222613214..481b9ef485c75e015ae1611cb4ad4c0ca7a3077c 100644 (file)
@@ -708,6 +708,10 @@ kernel_bootstrap_thread(void)
        sdt_early_init();
 #endif
 
+#ifndef BCM2837
+       kernel_bootstrap_log("trust_cache_init");
+       trust_cache_init();
+#endif
 
        kernel_startup_initialize_upto(STARTUP_SUB_LOCKDOWN);
 
@@ -914,6 +918,7 @@ load_context(
            ((thread->state & TH_IDLE) || (thread->bound_processor != PROCESSOR_NULL)) ? TH_BUCKET_SCHED_MAX : thread->th_sched_bucket);
        processor->current_is_bound = thread->bound_processor != PROCESSOR_NULL;
        processor->current_is_NO_SMT = false;
+       processor->current_is_eagerpreempt = false;
 #if CONFIG_THREAD_GROUPS
        processor->current_thread_group = thread_group_get(thread);
 #endif
index bd574100a73df7e5d846fae93df2e72652f0af9b..6c5e01b333e9d8b9d1b0ba6394bc997ea49b523d 100644 (file)
@@ -72,6 +72,7 @@ __enum_decl(startup_subsystem_id_t, uint32_t, {
        STARTUP_SUB_CODESIGNING,      /**< codesigning subsystem               */
        STARTUP_SUB_OSLOG,            /**< oslog and kernel loggging           */
        STARTUP_SUB_MACH_IPC,         /**< Mach IPC                            */
+       STARTUP_SUB_SYSCTL,           /**< registers sysctls                   */
        STARTUP_SUB_EARLY_BOOT,       /**< interrupts/premption are turned on  */
 
        STARTUP_SUB_LOCKDOWN = ~0u,   /**< reserved for the startup subsystem  */
index 8853e5cb7b5f56a1e691c19b5eb898895fa35475..1ef23d043ac16388bf8d1ec075437f3f2b5fc3ad 100644 (file)
@@ -276,11 +276,6 @@ static long _Atomic zones_phys_page_count;
 /* number of zone mapped pages used by all zones */
 static long _Atomic zones_phys_page_mapped_count;
 
-#if CONFIG_ZALLOC_SEQUESTER
-#define ZSECURITY_OPTIONS_SEQUESTER_DEFAULT ZSECURITY_OPTIONS_SEQUESTER
-#else
-#define ZSECURITY_OPTIONS_SEQUESTER_DEFAULT 0
-#endif
 /*
  * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not
  * not break third party kexts that haven't yet been recompiled
@@ -294,7 +289,7 @@ static long _Atomic zones_phys_page_mapped_count;
 #endif
 
 #define ZSECURITY_DEFAULT ( \
-               ZSECURITY_OPTIONS_SEQUESTER_DEFAULT | \
+               ZSECURITY_OPTIONS_SEQUESTER | \
                ZSECURITY_OPTIONS_SUBMAP_USER_DATA | \
                ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC | \
                ZSECURITY_OPTIONS_STRICT_IOKIT_FREE_DEFAULT | \
@@ -4651,7 +4646,8 @@ zalloc_log_or_trace_leaks(zone_t zone, vm_offset_t addr)
 
 #if ZONE_ENABLE_LOGGING
        if (DO_LOGGING(zone)) {
-               numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL);
+               numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH,
+                   __builtin_frame_address(0), NULL);
                btlog_add_entry(zone->zlog_btlog, (void *)addr,
                    ZOP_ALLOC, (void **)zbt, numsaved);
        }
@@ -4666,7 +4662,8 @@ zalloc_log_or_trace_leaks(zone_t zone, vm_offset_t addr)
                if (sample_counter(&zone->zleak_capture, zleak_sample_factor)) {
                        /* Avoid backtracing twice if zone logging is on */
                        if (numsaved == 0) {
-                               numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL);
+                               numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH,
+                                   __builtin_frame_address(1), NULL);
                        }
                        /* Sampling can fail if another sample is happening at the same time in a different zone. */
                        if (!zleak_log(zbt, addr, numsaved, zone_elem_size(zone))) {
@@ -4681,7 +4678,8 @@ zalloc_log_or_trace_leaks(zone_t zone, vm_offset_t addr)
                unsigned int count, idx;
                /* Fill element, from tail, with backtrace in reverse order */
                if (numsaved == 0) {
-                       numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL);
+                       numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH,
+                           __builtin_frame_address(1), NULL);
                }
                count = (unsigned int)(zone_elem_size(zone) / sizeof(uintptr_t));
                if (count >= numsaved) {
@@ -4738,7 +4736,8 @@ zfree_log_trace(zone_t zone, vm_offset_t addr)
                         *
                         * Add a record of this zfree operation to log.
                         */
-                       numsaved = backtrace(zbt, MAX_ZTRACE_DEPTH, NULL);
+                       numsaved = backtrace_frame(zbt, MAX_ZTRACE_DEPTH,
+                           __builtin_frame_address(1), NULL);
                        btlog_add_entry(zone->zlog_btlog, (void *)addr, ZOP_FREE,
                            (void **)zbt, numsaved);
                } else {
@@ -4891,7 +4890,14 @@ zalloc_direct_locked(
 
 /*
  *     zalloc returns an element from the specified zone.
+ *
+ *     The function is noinline when zlog can be used so that the backtracing can
+ *     reliably skip the zalloc_ext() and zalloc_log_or_trace_leaks()
+ *     boring frames.
  */
+#if ZONE_ENABLE_LOGGING
+__attribute__((noinline))
+#endif
 void *
 zalloc_ext(
        zone_t          zone,
@@ -5271,6 +5277,14 @@ zfree_direct_locked(zone_t zone, vm_offset_t element, bool poison)
 #endif
 }
 
+/*
+ *     The function is noinline when zlog can be used so that the backtracing can
+ *     reliably skip the zfree_ext() and zfree_log_trace()
+ *     boring frames.
+ */
+#if ZONE_ENABLE_LOGGING
+__attribute__((noinline))
+#endif
 void
 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr)
 {
index c9d7b44f1dde58fcba13f79165aedd960a261456..0ffbb9b6ba25a4922fc819a05da4e2e431ac9bb9 100644 (file)
@@ -176,6 +176,10 @@ extern int PAGE_SHIFT_CONST;
 #define MACH_VM_MIN_ADDRESS     ((mach_vm_offset_t) MACH_VM_MIN_ADDRESS_RAW)
 #define MACH_VM_MAX_ADDRESS     ((mach_vm_offset_t) MACH_VM_MAX_ADDRESS_RAW)
 
+#define MACH_VM_MIN_GPU_CARVEOUT_ADDRESS_RAW 0x0000001000000000ULL
+#define MACH_VM_MAX_GPU_CARVEOUT_ADDRESS_RAW 0x0000007000000000ULL
+#define MACH_VM_MIN_GPU_CARVEOUT_ADDRESS     ((mach_vm_offset_t) MACH_VM_MIN_GPU_CARVEOUT_ADDRESS_RAW)
+#define MACH_VM_MAX_GPU_CARVEOUT_ADDRESS     ((mach_vm_offset_t) MACH_VM_MAX_GPU_CARVEOUT_ADDRESS_RAW)
 
 #else /* defined(__arm64__) */
 #error architecture not supported
index 4b9fef9c529bce404b220bf034d9534f9737dea7..8c124041c9838dcb003b10c77a7e489d19ff1093 100644 (file)
@@ -438,11 +438,18 @@ __END_DECLS
 #define CPUFAMILY_ARM_MONSOON_MISTRAL   0xe81e7ef6
 #define CPUFAMILY_ARM_VORTEX_TEMPEST    0x07d34b9f
 #define CPUFAMILY_ARM_LIGHTNING_THUNDER 0x462504d2
+#ifndef RC_HIDE_XNU_FIRESTORM
+#define CPUFAMILY_ARM_FIRESTORM_ICESTORM 0x1b588bb3
+#endif /* !RC_HIDE_XNU_FIRESTORM */
 
 #define CPUSUBFAMILY_UNKNOWN            0
 #define CPUSUBFAMILY_ARM_HP             1
 #define CPUSUBFAMILY_ARM_HG             2
 #define CPUSUBFAMILY_ARM_M              3
+#ifndef RC_HIDE_XNU_FIRESTORM
+#define CPUSUBFAMILY_ARM_HS             4
+#define CPUSUBFAMILY_ARM_HC_HD          5
+#endif /* !RC_HIDE_XNU_FIRESTORM */
 
 /* The following synonyms are deprecated: */
 #define CPUFAMILY_INTEL_6_23    CPUFAMILY_INTEL_PENRYN
index 40a5e6ee79232b5c95c78255bec782472ffbab4e..ea1ac1dfec195b0824678c9793735e9464ccfe98 100644 (file)
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
+#include <libkern/crypto/sha2.h>
+#include <libkern/crypto/crypto_internal.h>
+#include <os/atomic_private.h>
+#include <kern/assert.h>
+#include <kern/percpu.h>
 #include <kern/zalloc.h>
+#include <kern/lock_group.h>
+#include <kern/locks.h>
+#include <kern/misc_protos.h>
 #include <pexpert/pexpert.h>
 #include <prng/entropy.h>
-#include <crypto/entropy/diag_entropy_sysctl.h>
+#include <crypto/entropy/entropy_sysctl.h>
 #include <machine/machine_routines.h>
+#include <libkern/section_keywords.h>
+#include <sys/cdefs.h>
 
-// Use a static buffer when the entropy collection boot arg is not present and before the
-// RNG has been initialized.
-static uint32_t entropy_buffer[ENTROPY_BUFFER_SIZE];
+// The number of samples we can hold in an entropy buffer.
+#define ENTROPY_MAX_SAMPLE_COUNT (2048)
 
-entropy_data_t EntropyData = {
-       .sample_count = 0,
-       .buffer = entropy_buffer,
-       .buffer_size = ENTROPY_BUFFER_SIZE,
-       .buffer_index_mask = ENTROPY_BUFFER_SIZE - 1,
-       .ror_mask = -1
+// The state for a per-CPU entropy buffer.
+typedef struct entropy_cpu_data {
+       // A buffer to hold entropy samples.
+       entropy_sample_t samples[ENTROPY_MAX_SAMPLE_COUNT];
+
+       // A count of samples resident in the buffer. It also functions as
+       // an index to the buffer. All entries at indices less than the
+       // sample count are considered valid for consumption by the
+       // reader. The reader resets this to zero after consuming the
+       // available entropy.
+       uint32_t _Atomic sample_count;
+} entropy_cpu_data_t;
+
+// This structure holds the state for an instance of a FIPS continuous
+// health test. In practice, we do not expect these tests to fail.
+typedef struct entropy_health_test {
+       // The initial sample observed in this test instance. Tests look
+       // for some repetition of the sample, either consecutively or
+       // within a window.
+       entropy_sample_t init_observation;
+
+       // The count of times the initial observation has recurred within
+       // the span of the current test.
+       uint64_t observation_count;
+
+       // The statistics are only relevant for telemetry and parameter
+       // tuning. They do not drive any actual logic in the module.
+       entropy_health_stats_t *stats;
+} entropy_health_test_t;
+
+typedef enum health_test_result {
+       health_test_failure,
+       health_test_success
+} health_test_result_t;
+
+// Along with various counters and the buffer itself, this includes
+// the state for two FIPS continuous health tests.
+typedef struct entropy_data {
+       // State for a SHA256 computation. This is used to accumulate
+       // entropy samples from across all CPUs. It is finalized when
+       // entropy is provided to the consumer of this module.
+       SHA256_CTX sha256_ctx;
+
+       // Since the corecrypto kext is not loaded when this module is
+       // initialized, we cannot initialize the SHA256 state at that
+       // time. Instead, we initialize it lazily during entropy
+       // consumption. This flag tracks whether initialization is
+       // complete.
+       bool sha256_ctx_init;
+
+       // A total count of entropy samples that have passed through this
+       // structure. It is incremented as new samples are accumulated
+       // from the various per-CPU structures. The "current" count of
+       // samples is the difference between this field and the "read"
+       // sample count below (which see).
+       uint64_t total_sample_count;
+
+       // Initially zero, this flag is reset to the current sample count
+       // if and when we fail a health test. We consider the startup
+       // health tests to be complete when the difference between the
+       // total sample count and this field is at least 1024. In other
+       // words, we must accumulate 1024 good samples to demonstrate
+       // viability. We refuse to provide any entropy before that
+       // threshold is reached.
+       uint64_t startup_sample_count;
+
+       // The count of samples from the last time we provided entropy to
+       // the kernel RNG. We use this to compute how many new samples we
+       // have to contribute. This value is also reset to the current
+       // sample count in case of health test failure.
+       uint64_t read_sample_count;
+
+       // The lock group for this structure; see below.
+       lck_grp_t lock_group;
+
+       // This structure accumulates entropy samples from across all CPUs
+       // for a single point of consumption protected by a mutex.
+       lck_mtx_t mutex;
+
+       // State for the Repetition Count Test.
+       entropy_health_test_t repetition_count_test;
+
+       // State for the Adaptive Proportion Test.
+       entropy_health_test_t adaptive_proportion_test;
+} entropy_data_t;
+
+static entropy_cpu_data_t PERCPU_DATA(entropy_cpu_data);
+
+int entropy_health_startup_done;
+entropy_health_stats_t entropy_health_rct_stats;
+entropy_health_stats_t entropy_health_apt_stats;
+
+static entropy_data_t entropy_data = {
+       .repetition_count_test = {
+               .init_observation = -1,
+               .stats = &entropy_health_rct_stats,
+       },
+       .adaptive_proportion_test = {
+               .init_observation = -1,
+               .stats = &entropy_health_apt_stats,
+       },
 };
 
+__security_const_late entropy_sample_t *entropy_analysis_buffer;
+__security_const_late uint32_t entropy_analysis_buffer_size;
+static __security_const_late uint32_t entropy_analysis_max_sample_count;
+static uint32_t entropy_analysis_sample_count;
+
+__startup_func
+static void
+entropy_analysis_init(uint32_t sample_count)
+{
+       entropy_analysis_max_sample_count = sample_count;
+       entropy_analysis_buffer_size = sample_count * sizeof(entropy_sample_t);
+       entropy_analysis_buffer = zalloc_permanent(entropy_analysis_buffer_size, ZALIGN(entropy_sample_t));
+       entropy_analysis_register_sysctls();
+}
+
+__startup_func
+void
+entropy_init(void)
+{
+       lck_grp_init(&entropy_data.lock_group, "entropy-data", LCK_GRP_ATTR_NULL);
+       lck_mtx_init(&entropy_data.mutex, &entropy_data.lock_group, LCK_ATTR_NULL);
+
+       // The below path is used only for testing. This boot arg is used
+       // to collect raw entropy samples for offline analysis. The "ebsz"
+       // name is supported only until dependent tools can be updated to
+       // use the more descriptive "entropy-analysis-sample-count".
+       uint32_t sample_count = 0;
+       if (__improbable(PE_parse_boot_argn("entropy-analysis-sample-count", &sample_count, sizeof(sample_count)))) {
+               entropy_analysis_init(sample_count);
+       } else if (__improbable(PE_parse_boot_argn("ebsz", &sample_count, sizeof(sample_count)))) {
+               entropy_analysis_init(sample_count);
+       }
+}
+
 void
-entropy_buffer_init(void)
+entropy_collect(void)
+{
+       // This function is called from within the interrupt handler, so
+       // we do not need to disable interrupts.
+
+       entropy_cpu_data_t *e = PERCPU_GET(entropy_cpu_data);
+
+       uint32_t sample_count = os_atomic_load(&e->sample_count, relaxed);
+
+       assert(sample_count <= ENTROPY_MAX_SAMPLE_COUNT);
+
+       // If the buffer is full, we return early without collecting
+       // entropy.
+       if (sample_count == ENTROPY_MAX_SAMPLE_COUNT) {
+               return;
+       }
+
+       e->samples[sample_count] = (entropy_sample_t)ml_get_timebase_entropy();
+
+       // If the consumer has reset the sample count on us, the only
+       // consequence is a dropped sample. We effectively abort the
+       // entropy collection in this case.
+       (void)os_atomic_cmpxchg(&e->sample_count, sample_count, sample_count + 1, release);
+}
+
+// For information on the following tests, see NIST SP 800-90B 4
+// Health Tests. These tests are intended to detect catastrophic
+// degradations in entropy. As noted in that document:
+//
+// > Health tests are expected to raise an alarm in three cases:
+// > 1. When there is a significant decrease in the entropy of the
+// > outputs,
+// > 2. When noise source failures occur, or
+// > 3. When hardware fails, and implementations do not work
+// > correctly.
+//
+// Each entropy accumulator declines to release entropy until the
+// startup tests required by NIST are complete. In the event that a
+// health test does fail, all entropy accumulators are reset and
+// decline to release further entropy until their startup tests can be
+// repeated.
+
+static health_test_result_t
+add_observation(entropy_health_test_t *t, uint64_t bound)
+{
+       t->observation_count += 1;
+       t->stats->max_observation_count = MAX(t->stats->max_observation_count, (uint32_t)t->observation_count);
+       if (__improbable(t->observation_count >= bound)) {
+               t->stats->failure_count += 1;
+               return health_test_failure;
+       }
+
+       return health_test_success;
+}
+
+static void
+reset_test(entropy_health_test_t *t, entropy_sample_t observation)
+{
+       t->stats->reset_count += 1;
+       t->init_observation = observation;
+       t->observation_count = 1;
+       t->stats->max_observation_count = MAX(t->stats->max_observation_count, (uint32_t)t->observation_count);
+}
+
+// 4.4.1 Repetition Count Test
+//
+// Like the name implies, this test counts consecutive occurrences of
+// the same value.
+//
+// We compute the bound C as:
+//
+// A = 2^-128
+// H = 1
+// C = 1 + ceil(-log(A, 2) / H) = 129
+//
+// With A the acceptable chance of false positive and H a conservative
+// estimate for the entropy (in bits) of each sample.
+
+#define REPETITION_COUNT_BOUND (129)
+
+static health_test_result_t
+repetition_count_test(entropy_sample_t observation)
+{
+       entropy_health_test_t *t = &entropy_data.repetition_count_test;
+
+       if (t->init_observation == observation) {
+               return add_observation(t, REPETITION_COUNT_BOUND);
+       } else {
+               reset_test(t, observation);
+       }
+
+       return health_test_success;
+}
+
+// 4.4.2 Adaptive Proportion Test
+//
+// This test counts occurrences of a value within a window of samples.
+//
+// We use a non-binary alphabet, giving us a window size of 512. (In
+// particular, we consider the least-significant byte of each time
+// sample.)
+//
+// Assuming one bit of entropy, we can compute the binomial cumulative
+// distribution function over 512 trials in SageMath as:
+//
+// k = var('k')
+// f(x) = sum(binomial(512, k), k, x, 512) / 2^512
+//
+// We compute the bound C as the minimal x for which:
+//
+// f(x) < 2^-128
+//
+// Is true.
+//
+// Empirically, we have C = 400.
+
+#define ADAPTIVE_PROPORTION_BOUND (400)
+#define ADAPTIVE_PROPORTION_WINDOW (512)
+
+// This mask definition requires the window be a power of two.
+static_assert(__builtin_popcount(ADAPTIVE_PROPORTION_WINDOW) == 1);
+#define ADAPTIVE_PROPORTION_INDEX_MASK (ADAPTIVE_PROPORTION_WINDOW - 1)
+
+static health_test_result_t
+adaptive_proportion_test(entropy_sample_t observation, uint32_t offset)
+{
+       entropy_health_test_t *t = &entropy_data.adaptive_proportion_test;
+
+       // We work in windows of size ADAPTIVE_PROPORTION_WINDOW, so we
+       // can compute our index by taking the entropy buffer's overall
+       // sample count plus the offset of this observation modulo the
+       // window size.
+       uint32_t index = (entropy_data.total_sample_count + offset) & ADAPTIVE_PROPORTION_INDEX_MASK;
+
+       if (index == 0) {
+               reset_test(t, observation);
+       } else if (t->init_observation == observation) {
+               return add_observation(t, ADAPTIVE_PROPORTION_BOUND);
+       }
+
+       return health_test_success;
+}
+
+static health_test_result_t
+entropy_health_test(uint32_t sample_count, entropy_sample_t *samples)
+{
+       health_test_result_t result = health_test_success;
+
+       for (uint32_t i = 0; i < sample_count; i += 1) {
+               // We only consider the low bits of each sample, since that is
+               // where we expect the entropy to be concentrated.
+               entropy_sample_t observation = samples[i] & 0xff;
+
+               if (__improbable(repetition_count_test(observation) == health_test_failure)) {
+                       result = health_test_failure;
+               }
+
+               if (__improbable(adaptive_proportion_test(observation, i) == health_test_failure)) {
+                       result = health_test_failure;
+               }
+       }
+
+       return result;
+}
+
+static void
+entropy_analysis_store(uint32_t sample_count, entropy_sample_t *samples)
+{
+       lck_mtx_assert(&entropy_data.mutex, LCK_MTX_ASSERT_OWNED);
+
+       sample_count = MIN(sample_count, (entropy_analysis_max_sample_count - entropy_analysis_sample_count));
+       if (sample_count == 0) {
+               return;
+       }
+
+       size_t size = sample_count * sizeof(samples[0]);
+       memcpy(&entropy_analysis_buffer[entropy_analysis_sample_count], samples, size);
+       entropy_analysis_sample_count += sample_count;
+}
+
+int32_t
+entropy_provide(size_t *entropy_size, void *entropy, __unused void *arg)
 {
-       uint32_t ebsz = 0;
-       uint32_t *bp;
+#if (DEVELOPMENT || DEBUG)
+       if (*entropy_size < SHA256_DIGEST_LENGTH) {
+               panic("[entropy_provide] recipient entropy buffer is too small\n");
+       }
+#endif
+
+       int32_t sample_count = 0;
+       *entropy_size = 0;
+
+       // The first call to this function comes while the corecrypto kext
+       // is being loaded. We require SHA256 to accumulate entropy
+       // samples.
+       if (__improbable(!g_crypto_funcs)) {
+               return sample_count;
+       }
+
+       // There is only one consumer (the kernel PRNG), but they could
+       // try to consume entropy from different threads. We simply fail
+       // if a consumption is already in progress.
+       if (!lck_mtx_try_lock(&entropy_data.mutex)) {
+               return sample_count;
+       }
+
+       // This only happens on the first call to this function. We cannot
+       // perform this initialization in entropy_init because the
+       // corecrypto kext is not loaded yet.
+       if (__improbable(!entropy_data.sha256_ctx_init)) {
+               SHA256_Init(&entropy_data.sha256_ctx);
+               entropy_data.sha256_ctx_init = true;
+       }
+
+       health_test_result_t health_test_result = health_test_success;
+
+       // We accumulate entropy from all CPUs.
+       percpu_foreach(e, entropy_cpu_data) {
+               // On each CPU, the sample count functions as an index into
+               // the entropy buffer. All samples before that index are valid
+               // for consumption.
+               uint32_t cpu_sample_count = os_atomic_load(&e->sample_count, acquire);
+
+               assert(cpu_sample_count <= ENTROPY_MAX_SAMPLE_COUNT);
 
-       if (PE_parse_boot_argn("ebsz", &ebsz, sizeof(ebsz))) {
-               if (((ebsz & (ebsz - 1)) != 0) || (ebsz < 32)) {
-                       panic("entropy_buffer_init: entropy buffer size must be a power of 2 and >= 32\n");
+               // The health test depends in part on the current state of
+               // the entropy data, so we test the new sample before
+               // accumulating it.
+               if (__improbable(entropy_health_test(cpu_sample_count, e->samples) == health_test_failure)) {
+                       health_test_result = health_test_failure;
                }
 
-               register_entropy_sysctl();
+               // We accumulate the samples regardless of whether the test
+               // failed. It cannot hurt.
+               entropy_data.total_sample_count += cpu_sample_count;
+               SHA256_Update(&entropy_data.sha256_ctx, e->samples, cpu_sample_count * sizeof(e->samples[0]));
 
-               bp = zalloc_permanent(sizeof(uint32_t) * ebsz, ZALIGN(uint32_t));
+               // This code path is only used for testing. Its use is governed by
+               // a boot arg; see its initialization above.
+               if (__improbable(entropy_analysis_buffer)) {
+                       entropy_analysis_store(cpu_sample_count, e->samples);
+               }
+
+               // "Drain" the per-CPU buffer by resetting its sample count.
+               os_atomic_store(&e->sample_count, 0, relaxed);
+       }
 
-               boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
-               EntropyData.buffer = bp;
-               EntropyData.sample_count = 0;
-               EntropyData.buffer_size = sizeof(uint32_t) * ebsz;
-               EntropyData.buffer_index_mask = ebsz - 1;
-               EntropyData.ror_mask = 0;
-               ml_set_interrupts_enabled(interrupt_state);
+       // We expect this never to happen.
+       //
+       // But if it does happen, we need to return negative to signal the
+       // consumer (i.e. the kernel PRNG) that there has been a failure.
+       if (__improbable(health_test_result == health_test_failure)) {
+               entropy_health_startup_done = 0;
+               entropy_data.startup_sample_count = entropy_data.total_sample_count;
+               entropy_data.read_sample_count = entropy_data.total_sample_count;
+               sample_count = -1;
+               goto out;
        }
+
+       // FIPS requires we pass our startup health tests before providing
+       // any entropy. This condition is only true during startup and in
+       // case of reset due to test failure.
+       if (__improbable((entropy_data.total_sample_count - entropy_data.startup_sample_count) < 1024)) {
+               goto out;
+       }
+
+       entropy_health_startup_done = 1;
+
+       // The count of new samples from the consumer's perspective.
+       int32_t n = (int32_t)(entropy_data.total_sample_count - entropy_data.read_sample_count);
+
+       // For performance reasons, we require a small threshold of
+       // samples to have built up before we provide any to the PRNG.
+       if (n < 32) {
+               goto out;
+       }
+
+       SHA256_Final(entropy, &entropy_data.sha256_ctx);
+       SHA256_Init(&entropy_data.sha256_ctx);
+       entropy_data.read_sample_count = entropy_data.total_sample_count;
+
+       sample_count = n;
+       *entropy_size = SHA256_DIGEST_LENGTH;
+
+out:
+       lck_mtx_unlock(&entropy_data.mutex);
+
+       return sample_count;
 }
index 101c9343d6c6ae33bce6545a732ca6ad2e4fde58..171fc17ab689a4ddf049cc1e73db372a25cf898f 100644 (file)
 #ifndef _PRNG_ENTROPY_H_
 #define _PRNG_ENTROPY_H_
 
-__BEGIN_DECLS
-
-#ifdef XNU_KERNEL_PRIVATE
-
-// The below three definitions are utilized when the kernel is in
-// "normal" operation, that is when we are *not* interested in collecting
-// entropy.
-
-// Indicates the number of bytes in the entropy buffer
-#define ENTROPY_BUFFER_BYTE_SIZE 32
-
-// Indicates the number of uint32_t's in the entropy buffer
-#define ENTROPY_BUFFER_SIZE (ENTROPY_BUFFER_BYTE_SIZE / sizeof(uint32_t))
-
-// Mask applied to EntropyData.sample_count to get an
-// index suitable for storing the next sample in
-// EntropyData.buffer. Note that ENTROPY_BUFFER_SIZE must be a power
-// of two for the following mask calculation to be valid.
-#define ENTROPY_BUFFER_INDEX_MASK (ENTROPY_BUFFER_SIZE - 1)
-
-typedef struct entropy_data {
-       /*
-        * TODO: Should sample_count be volatile?  Are we exposed to any races that
-        * we care about if it is not?
-        */
-
-       // At 32 bits, this counter can overflow. Since we're primarily
-       // interested in the delta from one read to the next, we don't
-       // worry about this too much.
-       uint32_t sample_count;
+#include <kern/kern_types.h>
+#include <sys/cdefs.h>
 
-       // We point to either a static array when operating normally or
-       // a dynamically allocated array when we wish to collect entropy
-       // data. This decision is based on the presence of the boot
-       // argument "ebsz".
-       uint32_t *buffer;
-
-       // The entropy buffer size in bytes. This must be a power of 2.
-       uint32_t buffer_size;
-
-       // The mask used to index into the entropy buffer for storing
-       // the next entropy sample.
-       uint32_t buffer_index_mask;
-
-       // The mask used to include the previous entropy buffer contents
-       // when updating the entropy buffer. When in entropy collection
-       // mode this is set to zero so that we can gather the raw entropy.
-       // In normal operation this is set to (uint32_t) -1.
-       uint32_t ror_mask;
-} entropy_data_t;
-
-extern entropy_data_t EntropyData;
-
-/* Trace codes for DBG_SEC_KERNEL: */
-#define ENTROPY_READ(n) SECURITYDBG_CODE(DBG_SEC_KERNEL, n) /* n: 0 .. 3 */
-
-#endif /* XNU_KERNEL_PRIVATE */
+__BEGIN_DECLS
 
-void entropy_buffer_init(void);
+// This module is used to accumulate entropy from hardware interrupts
+// for consumption by a higher-level PRNG.
+//
+// The raw entropy samples are collected from CPU counters during
+// hardware interrupts. We do not perform synchronization before
+// reading the counter (unlike ml_get_timebase and similar functions).
+//
+// This entropy accumulator performs continuous health tests in
+// accordance with NIST SP 800-90B. The parameters have been chosen
+// with the expectation that test failures should never occur except
+// in case of catastrophic hardware failure.
+
+typedef uint32_t entropy_sample_t;
+
+// Called during startup to initialize internal data structures.
+void entropy_init(void);
+
+// Called during hardware interrupts to collect entropy in per-CPU
+// structures.
+void entropy_collect(void);
+
+// Called by the higher-level PRNG. The module performs continuous
+// health tests and decides whether to release entropy based on the
+// values of various counters. Returns negative in case of error
+// (e.g. health test failure).
+int32_t entropy_provide(size_t *entropy_size, void *entropy, void *arg);
+
+extern entropy_sample_t *entropy_analysis_buffer;
+extern uint32_t entropy_analysis_buffer_size;
+
+typedef struct entropy_health_stats {
+       // A total count of times the test has been reset with a new
+       // initial observation. This can be thought of as the number of
+       // tests, but note that a single "test" can theoretically accrue
+       // multiple failures.
+       uint32_t reset_count;
+
+       // A total count of failures of this test instance since
+       // boot. Since we do not expect any test failures (ever) in
+       // practice, this counter should always be zero.
+       uint32_t failure_count;
+
+       // The maximum count of times an initial observation has recurred
+       // across all instances of this test.
+       uint32_t max_observation_count;
+} entropy_health_stats_t;
+
+extern int entropy_health_startup_done;
+extern entropy_health_stats_t entropy_health_rct_stats;
+extern entropy_health_stats_t entropy_health_apt_stats;
 
 __END_DECLS
 
index 31a59996b3c31e3af245a3d125eba747fa4ada43..b52ea68a370f64d63ef820da082684ac0cefa969 100644 (file)
@@ -46,8 +46,6 @@ static struct cckprng_ctx *prng_ctx;
 static SECURITY_READ_ONLY_LATE(struct cckprng_funcs) prng_funcs;
 static SECURITY_READ_ONLY_LATE(int) prng_ready;
 
-extern entropy_data_t EntropyData;
-
 #define SEED_SIZE (SHA256_DIGEST_LENGTH)
 static uint8_t bootseed[SEED_SIZE];
 
@@ -268,13 +266,13 @@ register_and_init_prng(struct cckprng_ctx *ctx, const struct cckprng_funcs *func
        assert(cpu_number() == master_cpu);
        assert(!prng_ready);
 
-       entropy_buffer_init();
+       entropy_init();
 
        prng_ctx = ctx;
        prng_funcs = *funcs;
 
        uint64_t nonce = ml_get_timebase();
-       prng_funcs.init(prng_ctx, MAX_CPUS, EntropyData.buffer_size, EntropyData.buffer, &EntropyData.sample_count, sizeof(bootseed), bootseed, sizeof(nonce), &nonce);
+       prng_funcs.init_with_getentropy(prng_ctx, MAX_CPUS, sizeof(bootseed), bootseed, sizeof(nonce), &nonce, entropy_provide, NULL);
        prng_funcs.initgen(prng_ctx, master_cpu);
        prng_ready = 1;
 
index b4bdf207169e737d7d6b29c668f94d7967162bd5..e413fa55e145a2da7563b53141b7e8ad36a2c641 100644 (file)
@@ -32,9 +32,6 @@
 #include <kern/thread.h>
 #if defined(__arm64__)
 #include <pexpert/arm64/board_config.h>
-#if XNU_MONITOR
-#include <arm64/ppl/tests/shart.h>
-#endif
 #endif
 
 extern ledger_template_t task_ledger_template;
@@ -125,171 +122,7 @@ test_pmap_enter_disconnect(unsigned int num_loops)
 kern_return_t
 test_pmap_iommu_disconnect(void)
 {
-#if XNU_MONITOR
-       kern_return_t kr = KERN_SUCCESS;
-       pmap_t new_pmap = pmap_create_wrapper(0);
-
-       vm_page_t m = vm_page_grab();
-
-       vm_page_lock_queues();
-       if (m != VM_PAGE_NULL) {
-               vm_page_wire(m, VM_KERN_MEMORY_PTE, TRUE);
-       }
-       vm_page_unlock_queues();
-
-       shart_ppl *iommu = NULL;
-       kr = pmap_iommu_init(shart_get_desc(), "sharttest0", NULL, 0, (ppl_iommu_state**)(&iommu));
-
-       if (kr != KERN_SUCCESS) {
-               goto cleanup;
-       }
-
-       if ((new_pmap == NULL) || (m == VM_PAGE_NULL) || (iommu == NULL)) {
-               kr = KERN_FAILURE;
-               goto cleanup;
-       }
-
-       ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
-
-       const ppl_iommu_seg shart_segs[] = {
-               {.iova = 0,
-                .paddr = ptoa(phys_page),
-                .nbytes = PAGE_SIZE,
-                .prot = VM_PROT_READ,
-                .refcon = 0},
-
-               {.iova = 1,
-                .paddr = ptoa(phys_page),
-                .nbytes = PAGE_SIZE,
-                .prot = VM_PROT_READ | VM_PROT_WRITE,
-                .refcon = 0},
-
-               {.iova = 2,
-                .paddr = ptoa(phys_page),
-                .nbytes = PAGE_SIZE,
-                .prot = VM_PROT_READ,
-                .refcon = 0},
-
-               {.iova = 3,
-                .paddr = ptoa(phys_page),
-                .nbytes = PAGE_SIZE,
-                .prot = VM_PROT_READ,
-                .refcon = 0}
-       };
-
-       /* Phase 1: one CPU mapping */
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(pmap_verify_free(phys_page));
-
-       /* Phase 2: two CPU mappings */
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA + PAGE_SIZE, phys_page, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(pmap_verify_free(phys_page));
-
-       /* Phase 3: one IOMMU mapping */
-       kr = pmap_iommu_map(&iommu->super, shart_segs, 1, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(!pmap_verify_free(phys_page));
-       pmap_iommu_unmap(&iommu->super, shart_segs, 1, 0, NULL);
-       assert(pmap_verify_free(phys_page));
-
-       /* Phase 4: two IOMMU mappings */
-       kr = pmap_iommu_map(&iommu->super, shart_segs, 2, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(!pmap_verify_free(phys_page));
-       pmap_iommu_unmap(&iommu->super, &shart_segs[1], 1, 0, NULL);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(!pmap_verify_free(phys_page));
-       pmap_iommu_unmap(&iommu->super, shart_segs, 1, 0, NULL);
-       assert(pmap_verify_free(phys_page));
-
-       /* Phase 5: combined CPU and IOMMU mappings */
-       kr = pmap_iommu_map(&iommu->super, shart_segs, 1, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_iommu_map(&iommu->super, &shart_segs[1], 2, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA + PAGE_SIZE, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_iommu_map(&iommu->super, &shart_segs[3], 1, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(!pmap_verify_free(phys_page));
-       pmap_iommu_unmap(&iommu->super, shart_segs, 4, 0, NULL);
-       assert(pmap_verify_free(phys_page));
-
-       /* Phase 6: differently combined CPU and IOMMU mappings */
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA, phys_page, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_iommu_map(&iommu->super, &shart_segs[1], 3, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA + PAGE_SIZE, phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_iommu_map(&iommu->super, shart_segs, 1, 0, NULL);
-       assert(kr == KERN_SUCCESS);
-       kr = pmap_enter(new_pmap, PMAP_TEST_VA + (2 * PAGE_SIZE), phys_page, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, FALSE);
-       assert(kr == KERN_SUCCESS);
-       assert(!pmap_verify_free(phys_page));
-       pmap_iommu_unmap(&iommu->super, &shart_segs[2], 1, 0, NULL);
-       assert(!pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(!pmap_verify_free(phys_page));
-       pmap_iommu_unmap(&iommu->super, shart_segs, 4, 0, NULL);
-       assert(pmap_verify_free(phys_page));
-       pmap_disconnect(phys_page);
-       assert(pmap_verify_free(phys_page));
-
-       /* Phase 7: allocate contiguous memory and hand it to the shart */
-       shart_more more_shart;
-       more_shart.nbytes = (PAGE_SIZE * 5) + 42;
-       more_shart.baseaddr = pmap_iommu_alloc_contiguous_pages(&iommu->super, more_shart.nbytes, 0, 0, VM_WIMG_DEFAULT);
-       assert(more_shart.baseaddr != 0);
-
-       kr = pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_MORE, &more_shart, sizeof(more_shart), NULL, 0);
-       assert(kr == KERN_SUCCESS);
-       assert(iommu->extra_memory == more_shart.baseaddr);
-       assert(iommu->extra_bytes == more_shart.nbytes);
-
-       more_shart.baseaddr += PAGE_SIZE;
-       more_shart.nbytes -= PAGE_SIZE;
-       kr = pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_MORE, &more_shart, sizeof(more_shart), NULL, 0);
-       assert(kr == KERN_NOT_SUPPORTED);
-       kr = KERN_SUCCESS;
-       assert(iommu->extra_memory == (more_shart.baseaddr - PAGE_SIZE));
-       assert(iommu->extra_bytes == (more_shart.nbytes + PAGE_SIZE));
-
-cleanup:
-
-       if (iommu != NULL) {
-               pmap_iommu_ioctl(&iommu->super, SHART_IOCTL_TEARDOWN, NULL, 0, NULL, 0);
-       }
-       vm_page_lock_queues();
-       if (m != VM_PAGE_NULL) {
-               vm_page_free(m);
-       }
-       vm_page_unlock_queues();
-       if (new_pmap != NULL) {
-               pmap_destroy(new_pmap);
-       }
-
-       return kr;
-#else
        return KERN_SUCCESS;
-#endif
 }
 
 
index d97f5b5e744fa68e4967b80526ed42885dcc275b..c8aaa014375f917b944a91b6c9b61e621d9f627c 100644 (file)
@@ -694,7 +694,11 @@ extern pmap_t   kernel_pmap;                    /* The kernel's map */
 #else
 
 #define PMAP_CREATE_STAGE2         0
+#if __arm64e__
+#define PMAP_CREATE_DISABLE_JOP    0x4
+#else
 #define PMAP_CREATE_DISABLE_JOP    0
+#endif
 #if __ARM_MIXED_PAGE_SIZE__
 #define PMAP_CREATE_FORCE_4K_PAGES 0x8
 #else
index fb7ce25443893e3b32b64722171e3ad069eb8e32..43814d16ee0d6dcfe424ddd333feb86ce0169aa8 100644 (file)
@@ -208,10 +208,6 @@ unsigned long vm_cs_revalidates = 0;
 unsigned long vm_cs_query_modified = 0;
 unsigned long vm_cs_validated_dirtied = 0;
 unsigned long vm_cs_bitmap_validated = 0;
-#if PMAP_CS
-uint64_t vm_cs_defer_to_pmap_cs = 0;
-uint64_t vm_cs_defer_to_pmap_cs_not = 0;
-#endif /* PMAP_CS */
 
 void vm_pre_fault(vm_map_offset_t, vm_prot_t);
 
@@ -2550,28 +2546,7 @@ vm_fault_cs_check_violation(
                /* VM map is locked, so 1 ref will remain on VM object -
                 * so no harm if vm_page_validate_cs drops the object lock */
 
-#if PMAP_CS
-               if (fault_info->pmap_cs_associated &&
-                   pmap_cs_enforced(pmap) &&
-                   !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
-                   !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
-                   !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
-                   (prot & VM_PROT_EXECUTE) &&
-                   (caller_prot & VM_PROT_EXECUTE)) {
-                       /*
-                        * With pmap_cs, the pmap layer will validate the
-                        * code signature for any executable pmap mapping.
-                        * No need for us to validate this page too:
-                        * in pmap_cs we trust...
-                        */
-                       vm_cs_defer_to_pmap_cs++;
-               } else {
-                       vm_cs_defer_to_pmap_cs_not++;
-                       vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
-               }
-#else /* PMAP_CS */
                vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
-#endif /* PMAP_CS */
        }
 
        /* If the map is switched, and is switch-protected, we must protect
@@ -2647,15 +2622,6 @@ vm_fault_cs_check_violation(
                *cs_violation = TRUE;
        } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
            (prot & VM_PROT_EXECUTE)
-#if PMAP_CS
-           /*
-            * Executable pages will be validated by pmap_cs;
-            * in pmap_cs we trust...
-            * If pmap_cs is turned off, this is a code-signing
-            * violation.
-            */
-           && !(pmap_cs_enforced(pmap))
-#endif /* PMAP_CS */
            ) {
                *cs_violation = TRUE;
        } else {
@@ -3330,23 +3296,6 @@ vm_fault_attempt_pmap_enter(
            wired,
            pmap_options,
            kr);
-#if PMAP_CS
-       /*
-        * Retry without execute permission if we encountered a codesigning
-        * failure on a non-execute fault.  This allows applications which
-        * don't actually need to execute code to still map it for read access.
-        */
-       if ((kr == KERN_CODESIGN_ERROR) && pmap_cs_enforced(pmap) &&
-           (*prot & VM_PROT_EXECUTE) && !(caller_prot & VM_PROT_EXECUTE)) {
-               *prot &= ~VM_PROT_EXECUTE;
-               PMAP_ENTER_OPTIONS(pmap, vaddr,
-                   fault_phys_offset,
-                   m, *prot, fault_type, 0,
-                   wired,
-                   pmap_options,
-                   kr);
-       }
-#endif
        return kr;
 }
 
@@ -3540,11 +3489,6 @@ vm_fault_enter_prepare(
                kasan_notify_address(vaddr, PAGE_SIZE);
        }
 #endif
-#if PMAP_CS
-       if (pmap_cs_exempt(pmap)) {
-               cs_bypass = TRUE;
-       }
-#endif
 
        LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
 
index 4ba91fb042cff097248cf6c28ab21c43129fbe23..f3b6248447bb3b7f31ca58f35ffc0cbb5f80497f 100644 (file)
@@ -399,13 +399,8 @@ vm_map_entry_copy_pmap_cs_assoc(
        vm_map_entry_t new __unused,
        vm_map_entry_t old __unused)
 {
-#if PMAP_CS
-       /* when pmap_cs is enabled, we want to reset on copy */
-       new->pmap_cs_associated = FALSE;
-#else /* PMAP_CS */
        /* when pmap_cs is not enabled, assert as a sanity check */
        assert(new->pmap_cs_associated == FALSE);
-#endif /* PMAP_CS */
 }
 
 /*
@@ -2489,9 +2484,6 @@ vm_map_enter(
 #endif /* __arm64__ */
            ))) &&
 #endif /* XNU_TARGET_OS_OSX */
-#if PMAP_CS
-           !pmap_cs_exempt(map->pmap) &&
-#endif
            (VM_MAP_POLICY_WX_FAIL(map) ||
            VM_MAP_POLICY_WX_STRIP_X(map)) &&
            !entry_for_jit) {
@@ -3640,9 +3632,6 @@ vm_map_enter_fourk(
 #endif /* __arm64__ */
            ) &&
 #endif /* XNU_TARGET_OS_OSX */
-#if PMAP_CS
-           !pmap_cs_exempt(map->pmap) &&
-#endif
            !entry_for_jit) {
                DTRACE_VM3(cs_wx,
                    uint64_t, 0,
@@ -6164,11 +6153,6 @@ vm_map_protect(
                }
 
                new_max = current->max_protection;
-#if PMAP_CS
-               if (set_max && (new_prot & VM_PROT_EXECUTE) && pmap_cs_exempt(map->pmap)) {
-                       new_max |= VM_PROT_EXECUTE;
-               }
-#endif
                if ((new_prot & new_max) != new_prot) {
                        vm_map_unlock(map);
                        return KERN_PROTECTION_FAILURE;
@@ -6184,9 +6168,6 @@ vm_map_protect(
 #endif /* __arm64__ */
                    ) &&
 #endif /* XNU_TARGET_OS_OSX */
-#if PMAP_CS
-                   !pmap_cs_exempt(map->pmap) &&
-#endif
                    !(current->used_for_jit)) {
                        DTRACE_VM3(cs_wx,
                            uint64_t, (uint64_t) current->vme_start,
@@ -6610,11 +6591,6 @@ vm_map_wire_nested(
                extra_prots &= ~VM_PROT_COPY_FAIL_IF_EXECUTABLE;
        }
 #endif /* XNU_TARGET_OS_OSX */
-#if PMAP_CS
-       if (pmap_cs_exempt(map->pmap)) {
-               extra_prots &= ~VM_PROT_COPY_FAIL_IF_EXECUTABLE;
-       }
-#endif /* PMAP_CS */
 
        access_type = (caller_prot & VM_PROT_ALL);
 
@@ -7034,10 +7010,6 @@ vm_map_wire_nested(
 #endif /* __arm64__ */
                    )
 #endif /* XNU_TARGET_OS_OSX */
-#if PMAP_CS
-                   &&
-                   !pmap_cs_exempt(map->pmap)
-#endif
                    ) {
 #if MACH_ASSERT
                        printf("pid %d[%s] wiring executable range from "
@@ -8170,25 +8142,6 @@ vm_map_delete(
                        } else if (flags & VM_MAP_REMOVE_IMMUTABLE) {
 //                             printf("FBDP %d[%s] removing permanent entry %p [0x%llx:0x%llx] prot 0x%x/0x%x\n", proc_selfpid(), (current_task()->bsd_info ? proc_name_address(current_task()->bsd_info) : "?"), entry, (uint64_t)entry->vme_start, (uint64_t)entry->vme_end, entry->protection, entry->max_protection);
                                entry->permanent = FALSE;
-#if PMAP_CS
-                       } else if ((entry->protection & VM_PROT_EXECUTE) && !pmap_cs_enforced(map->pmap)) {
-                               entry->permanent = FALSE;
-
-                               printf("%d[%s] %s(0x%llx,0x%llx): "
-                                   "pmap_cs disabled, allowing for permanent executable entry [0x%llx:0x%llx] "
-                                   "prot 0x%x/0x%x\n",
-                                   proc_selfpid(),
-                                   (current_task()->bsd_info
-                                   ? proc_name_address(current_task()->bsd_info)
-                                   : "?"),
-                                   __FUNCTION__,
-                                   (uint64_t) start,
-                                   (uint64_t) end,
-                                   (uint64_t)entry->vme_start,
-                                   (uint64_t)entry->vme_end,
-                                   entry->protection,
-                                   entry->max_protection);
-#endif
                        } else {
                                if (vm_map_executable_immutable_verbose) {
                                        printf("%d[%s] %s(0x%llx,0x%llx): "
@@ -13779,9 +13732,6 @@ RetrySubMap:
 #endif /* __arm64__ */
                            ) &&
 #endif /* XNU_TARGET_OS_OSX */
-#if PMAP_CS
-                           !pmap_cs_exempt(map->pmap) &&
-#endif
                            !(entry->used_for_jit) &&
                            VM_MAP_POLICY_WX_STRIP_X(map)) {
                                DTRACE_VM3(cs_wx,
@@ -13973,9 +13923,6 @@ protection_failure:
                fault_info->stealth = FALSE;
                fault_info->io_sync = FALSE;
                if (entry->used_for_jit ||
-#if PMAP_CS
-                   pmap_cs_exempt(map->pmap) ||
-#endif
                    entry->vme_resilient_codesign) {
                        fault_info->cs_bypass = TRUE;
                } else {
@@ -18011,33 +17958,6 @@ vm_map_remap(
                target_map->size += target_size;
                SAVE_HINT_MAP_WRITE(target_map, insp_entry);
 
-#if PMAP_CS
-               if (*max_protection & VM_PROT_EXECUTE) {
-                       vm_map_address_t region_start = 0, region_size = 0;
-                       struct pmap_cs_code_directory *region_cd = NULL;
-                       vm_map_address_t base = 0;
-                       struct pmap_cs_lookup_results results = {};
-                       vm_map_size_t page_addr = vm_map_trunc_page(memory_address, PAGE_MASK);
-                       vm_map_size_t assoc_size = vm_map_round_page(memory_address + size - page_addr, PAGE_MASK);
-
-                       pmap_cs_lookup(src_map->pmap, memory_address, &results);
-                       region_size = results.region_size;
-                       region_start = results.region_start;
-                       region_cd = results.region_cd_entry;
-                       base = results.base;
-
-                       if (region_cd != NULL && (page_addr != region_start || assoc_size != region_size)) {
-                               *cur_protection = VM_PROT_READ;
-                               *max_protection = VM_PROT_READ;
-                               printf("mismatched remap of executable range 0x%llx-0x%llx to 0x%llx, "
-                                   "region_start 0x%llx, region_size 0x%llx, cd_entry %sNULL, making non-executable.\n",
-                                   page_addr, page_addr + assoc_size, *address,
-                                   region_start, region_size,
-                                   region_cd != NULL ? "not " : ""                     // Don't leak kernel slide
-                                   );
-                       }
-               }
-#endif
        }
        vm_map_unlock(target_map);
 
@@ -21070,128 +20990,6 @@ vm_map_set_high_start(
 }
 #endif /* XNU_TARGET_OS_OSX */
 
-#if PMAP_CS
-kern_return_t
-vm_map_entry_cs_associate(
-       vm_map_t                map,
-       vm_map_entry_t          entry,
-       vm_map_kernel_flags_t   vmk_flags)
-{
-       vm_object_t cs_object, cs_shadow;
-       vm_object_offset_t cs_offset;
-       void *cs_blobs;
-       struct vnode *cs_vnode;
-       kern_return_t cs_ret;
-
-       if (map->pmap == NULL ||
-           entry->is_sub_map || /* XXX FBDP: recurse on sub-range? */
-           pmap_cs_exempt(map->pmap) ||
-           VME_OBJECT(entry) == VM_OBJECT_NULL ||
-           !(entry->protection & VM_PROT_EXECUTE)) {
-               return KERN_SUCCESS;
-       }
-
-       vm_map_lock_assert_exclusive(map);
-
-       if (entry->used_for_jit) {
-               cs_ret = pmap_cs_associate(map->pmap,
-                   PMAP_CS_ASSOCIATE_JIT,
-                   entry->vme_start,
-                   entry->vme_end - entry->vme_start,
-                   0);
-               goto done;
-       }
-
-       if (vmk_flags.vmkf_remap_prot_copy) {
-               cs_ret = pmap_cs_associate(map->pmap,
-                   PMAP_CS_ASSOCIATE_COW,
-                   entry->vme_start,
-                   entry->vme_end - entry->vme_start,
-                   0);
-               goto done;
-       }
-
-       vm_object_lock_shared(VME_OBJECT(entry));
-       cs_offset = VME_OFFSET(entry);
-       for (cs_object = VME_OBJECT(entry);
-           (cs_object != VM_OBJECT_NULL &&
-           !cs_object->code_signed);
-           cs_object = cs_shadow) {
-               cs_shadow = cs_object->shadow;
-               if (cs_shadow != VM_OBJECT_NULL) {
-                       cs_offset += cs_object->vo_shadow_offset;
-                       vm_object_lock_shared(cs_shadow);
-               }
-               vm_object_unlock(cs_object);
-       }
-       if (cs_object == VM_OBJECT_NULL) {
-               return KERN_SUCCESS;
-       }
-
-       cs_offset += cs_object->paging_offset;
-       cs_vnode = vnode_pager_lookup_vnode(cs_object->pager);
-       cs_ret = vnode_pager_get_cs_blobs(cs_vnode,
-           &cs_blobs);
-       assert(cs_ret == KERN_SUCCESS);
-       cs_ret = cs_associate_blob_with_mapping(map->pmap,
-           entry->vme_start,
-           (entry->vme_end -
-           entry->vme_start),
-           cs_offset,
-           cs_blobs);
-       vm_object_unlock(cs_object);
-       cs_object = VM_OBJECT_NULL;
-
-done:
-       if (cs_ret == KERN_SUCCESS) {
-               DTRACE_VM2(vm_map_entry_cs_associate_success,
-                   vm_map_offset_t, entry->vme_start,
-                   vm_map_offset_t, entry->vme_end);
-               if (vm_map_executable_immutable) {
-                       /*
-                        * Prevent this executable
-                        * mapping from being unmapped
-                        * or modified.
-                        */
-                       entry->permanent = TRUE;
-               }
-               /*
-                * pmap says it will validate the
-                * code-signing validity of pages
-                * faulted in via this mapping, so
-                * this map entry should be marked so
-                * that vm_fault() bypasses code-signing
-                * validation for faults coming through
-                * this mapping.
-                */
-               entry->pmap_cs_associated = TRUE;
-       } else if (cs_ret == KERN_NOT_SUPPORTED) {
-               /*
-                * pmap won't check the code-signing
-                * validity of pages faulted in via
-                * this mapping, so VM should keep
-                * doing it.
-                */
-               DTRACE_VM3(vm_map_entry_cs_associate_off,
-                   vm_map_offset_t, entry->vme_start,
-                   vm_map_offset_t, entry->vme_end,
-                   int, cs_ret);
-       } else {
-               /*
-                * A real error: do not allow
-                * execution in this mapping.
-                */
-               DTRACE_VM3(vm_map_entry_cs_associate_failure,
-                   vm_map_offset_t, entry->vme_start,
-                   vm_map_offset_t, entry->vme_end,
-                   int, cs_ret);
-               entry->protection &= ~VM_PROT_EXECUTE;
-               entry->max_protection &= ~VM_PROT_EXECUTE;
-       }
-
-       return cs_ret;
-}
-#endif /* PMAP_CS */
 
 /*
  * FORKED CORPSE FOOTPRINT
index e07ad654950ce39b86e635222589edfcd10078fa..c20382971ed67be2c7e375c78c327e8eb9c6723c 100644 (file)
@@ -1159,12 +1159,6 @@ extern kern_return_t vm_map_set_cache_attr(
 
 extern int override_nx(vm_map_t map, uint32_t user_tag);
 
-#if PMAP_CS
-extern kern_return_t vm_map_entry_cs_associate(
-       vm_map_t                map,
-       vm_map_entry_t          entry,
-       vm_map_kernel_flags_t   vmk_flags);
-#endif /* PMAP_CS */
 
 extern void vm_map_region_top_walk(
        vm_map_entry_t entry,
index 990f1d4526cf2f7148f5949a4fea46cf9d6afcdf..f962d3e1f987b1e9f5576b4d7c7a3c120aadd554 100644 (file)
@@ -179,11 +179,7 @@ vm_map_store_entry_link(
                }
 #endif
        }
-#if PMAP_CS
-       (void) vm_map_entry_cs_associate(map, entry, vmk_flags);
-#else /* PMAP_CS */
        (void) vmk_flags;
-#endif /* PMAP_CS */
 }
 
 void
index 871b3c714f524d76df0cb8a5c55adeb824085c9f..50a654348f40b6943e3557ea0dd19c046cf33966 100644 (file)
@@ -516,14 +516,6 @@ extern void cs_validate_page(
        int *validated_p,
        int *tainted_p,
        int *nx_p);
-#if PMAP_CS
-extern kern_return_t cs_associate_blob_with_mapping(
-       void *pmap,
-       vm_map_offset_t start,
-       vm_map_size_t size,
-       vm_object_offset_t offset,
-       void *blobs_p);
-#endif /* PMAP_CS */
 
 extern kern_return_t memory_entry_purgeable_control_internal(
        ipc_port_t      entry_port,
index 45430fce92cd1ac84e884eebc4969412d1b640c2..ca78ec877f4cdd5dd48b7d5b6498c5ef11382fcb 100644 (file)
@@ -6819,8 +6819,15 @@ struct hibernate_statistics {
  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
  * so that we don't overrun the estimated image size, which would
  * result in a hibernation failure.
+ *
+ * We use a size value instead of pages because we don't want to take up more space
+ * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
+ * to have that additional space available.
+ *
+ * Since this was set at 40000 pages on X86 we are going to use 160MB as our
+ * xpmapped size.
  */
-#define HIBERNATE_XPMAPPED_LIMIT        40000
+#define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
 
 
 static int
@@ -7864,6 +7871,19 @@ hibernate_page_list_setall(hibernate_page_list_t * page_list,
 
        if (preflight && will_discard) {
                *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
+               /*
+                * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
+                * even if these are clean and so we need to size the hibernation image accordingly.
+                *
+                * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
+                * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
+                * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
+                * clean xpmapped pages.
+                *
+                * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
+                * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
+                */
+               *pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
        }
 
        hibernation_vmqueues_inspection = FALSE;
index 64c6aab05a0150719114501e499b2d6acffb55ad..115b247130e6ac0e9d2db7ac75764585068d60f8 100644 (file)
@@ -1345,7 +1345,6 @@ __attribute__((noinline))
 static kern_return_t
 vm_shared_region_map_file_setup(
        vm_shared_region_t              shared_region,
-       void                            *root_dir,
        int                             sr_file_mappings_count,
        struct _sr_file_mappings        *sr_file_mappings,
        unsigned int                    *mappings_to_slide_cnt,
@@ -1382,21 +1381,6 @@ vm_shared_region_map_file_setup(
        vm_shared_region_lock();
        assert(shared_region->sr_ref_count > 1);
 
-       if (shared_region->sr_root_dir != root_dir) {
-               /*
-                * This shared region doesn't match the current root
-                * directory of this process.  Deny the mapping to
-                * avoid tainting the shared region with something that
-                * doesn't quite belong into it.
-                */
-               vm_shared_region_unlock();
-
-               SHARED_REGION_TRACE_DEBUG(
-                       ("shared_region: map(%p) <- 0x%x \n",
-                       (void *)VM_KERNEL_ADDRPERM(shared_region), kr));
-               return KERN_PROTECTION_FAILURE;
-       }
-
        /*
         * Make sure we handle only one mapping at a time in a given
         * shared region, to avoid race conditions.  This should not
@@ -1728,7 +1712,6 @@ __attribute__((noinline))
 kern_return_t
 vm_shared_region_map_file(
        vm_shared_region_t       shared_region,
-       void                     *root_dir,
        int                      sr_file_mappings_count,
        struct _sr_file_mappings *sr_file_mappings)
 {
@@ -1745,7 +1728,7 @@ vm_shared_region_map_file(
        vm_map_offset_t         lowest_unnestable_addr = 0;
        mach_vm_offset_t        file_first_mappings[VMSR_NUM_SLIDES] = {(mach_vm_offset_t) -1, (mach_vm_offset_t) -1};
 
-       kr = vm_shared_region_map_file_setup(shared_region, root_dir, sr_file_mappings_count, sr_file_mappings,
+       kr = vm_shared_region_map_file_setup(shared_region, sr_file_mappings_count, sr_file_mappings,
            &mappings_to_slide_cnt, &mappings_to_slide[0], slid_mappings, slid_file_controls,
            &first_mapping, &file_first_mappings[0],
            &sfm_min_address, &sfm_max_address, &sr_map, &lowest_unnestable_addr);
@@ -3487,3 +3470,15 @@ post_sys_powersource_internal(int i, int internal)
        }
 }
 #endif
+
+void *
+vm_shared_region_root_dir(
+       struct vm_shared_region *sr)
+{
+       void *vnode;
+
+       vm_shared_region_lock();
+       vnode = sr->sr_root_dir;
+       vm_shared_region_unlock();
+       return vnode;
+}
index ffb9bb0cb47449852b7a958d48715cced00a300b..cc76069a0543b4d0dfbf1f6ddfc6afa5767f743d 100644 (file)
@@ -305,9 +305,10 @@ extern void vm_shared_region_undo_mappings(
 __attribute__((noinline))
 extern kern_return_t vm_shared_region_map_file(
        struct vm_shared_region *shared_region,
-       void                    *root_dir,
        int                     sr_mappings_count,
        struct _sr_file_mappings *sr_mappings);
+extern void *vm_shared_region_root_dir(
+       struct vm_shared_region *shared_region);
 extern kern_return_t vm_shared_region_sliding_valid(uint32_t slide);
 extern void vm_commpage_init(void);
 extern void vm_commpage_text_init(void);
index 5dc8518019986572d370083dee3a65c9481f55c1..fcd3a54c704e1002493c0060225d4848fddf7ddc 100644 (file)
@@ -631,9 +631,9 @@ get_interrupted_pc(bool *kernel_out)
 }
 
 static void
-kpc_sample_kperf_x86(uint32_t ctr, uint64_t count, uint64_t config)
+kpc_sample_kperf_x86(uint32_t ctr, uint32_t actionid, uint64_t count,
+    uint64_t config)
 {
-       uint32_t actionid = FIXED_ACTIONID(ctr);
        bool kernel = false;
        uintptr_t pc = get_interrupted_pc(&kernel);
        kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
@@ -666,14 +666,15 @@ kpc_pmi_handler(void)
                        FIXED_SHADOW(ctr)
                                += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
 
-                       BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr));
+                       uint32_t actionid = FIXED_ACTIONID(ctr);
+                       BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, actionid);
 
-                       if (FIXED_ACTIONID(ctr)) {
-                               kpc_sample_kperf_x86(ctr, FIXED_SHADOW(ctr) + extra, 0);
+                       if (actionid != 0) {
+                               kpc_sample_kperf_x86(ctr, actionid, FIXED_SHADOW(ctr) + extra, 0);
                        }
                }
        }
-#endif
+#endif // FIXED_COUNTER_SHADOW
 
        for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
                if ((1ULL << ctr) & status) {
@@ -686,11 +687,12 @@ kpc_pmi_handler(void)
                         * bits are in the correct state before the call to kperf_sample */
                        wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
 
-                       BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr));
+                       unsigned int actionid = CONFIGURABLE_ACTIONID(ctr);
+                       BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, actionid);
 
-                       if (CONFIGURABLE_ACTIONID(ctr)) {
+                       if (actionid != 0) {
                                uint64_t config = IA32_PERFEVTSELx(ctr);
-                               kpc_sample_kperf_x86(ctr + kpc_configurable_count(),
+                               kpc_sample_kperf_x86(ctr + kpc_fixed_count(), actionid,
                                    CONFIGURABLE_SHADOW(ctr) + extra, config);
                        }
                }
index bd6a75e42416f6a0f464f15409ebf3ee3f6267e5..51c9d6ba42ba2724f911bd1ad7a60da109d4cae6 100644 (file)
@@ -509,14 +509,12 @@ SecureDTGetProperty(const DTEntry entry, const char *propertyName, void const **
                   (vm_offset_t)DTRootNode, (vm_size_t)((uintptr_t)DTEnd - (uintptr_t)DTRootNode));
 }
 
-#if defined(__i386__) || defined(__x86_64__)
 int
 SecureDTGetPropertyRegion(const DTEntry entry, const char *propertyName, void const **propertyValue, unsigned int *propertySize, vm_offset_t const region_start, vm_size_t region_size)
 {
        return SecureDTGetPropertyInternal(entry, propertyName, propertyValue, propertySize,
                   region_start, region_size);
 }
-#endif
 
 
 int
diff --git a/pexpert/pexpert/arm64/H11.h b/pexpert/pexpert/arm64/H11.h
new file mode 100644 (file)
index 0000000..5986f4c
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2019 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef _PEXPERT_ARM64_H11_H
+#define _PEXPERT_ARM64_H11_H
+
+#define APPLEVORTEX
+#define NO_MONITOR              1 /* No EL3 for this CPU -- ever */
+#define HAS_CTRR                1 /* Has CTRR registers */
+#define HAS_NEX_PG              1 /* Supports p-Core NEX powergating during Neon inactivity */
+#define HAS_BP_RET              1 /* Supports branch predictor state retention across ACC sleep */
+#define HAS_CONTINUOUS_HWCLOCK  1 /* Has a hardware clock that ticks during sleep */
+#define HAS_IPI                 1 /* Has IPI registers */
+#define HAS_CLUSTER             1 /* Has eCores and pCores in separate clusters */
+#define HAS_RETENTION_STATE     1 /* Supports architectural state retention */
+#define HAS_VMSA_LOCK           1 /* Supports lockable MMU config registers */
+
+#define CPU_HAS_APPLE_PAC                    1
+#define HAS_UNCORE_CTRS                      1
+#define UNCORE_VERSION                       2
+#define UNCORE_PER_CLUSTER                   1
+#define UNCORE_NCTRS                         16
+#define CORE_NCTRS                           10
+
+#define __ARM_AMP__                          1
+#define __ARM_16K_PG__                       1
+#define __ARM_GLOBAL_SLEEP_BIT__             1
+#define __ARM_PAN_AVAILABLE__                1
+#define __ARM_WKDM_ISA_AVAILABLE__           1
+#define __PLATFORM_WKDM_ALIGNMENT_MASK__     (0x3FULL)
+#define __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ (64)
+
+
+#include <pexpert/arm64/apple_arm64_common.h>
+
+#endif /* !_PEXPERT_ARM64_H11_H */
diff --git a/pexpert/pexpert/arm64/H13.h b/pexpert/pexpert/arm64/H13.h
new file mode 100644 (file)
index 0000000..1da2d2e
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2019 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef _PEXPERT_ARM64_H13_H
+#define _PEXPERT_ARM64_H13_H
+
+#define APPLEFIRESTORM
+#define NO_MONITOR              1 /* No EL3 for this CPU -- ever */
+#define HAS_CTRR                1 /* Has CTRR registers */
+#define HAS_NEX_PG              1 /* Supports p-Core NEX powergating during Neon inactivity */
+#define HAS_BP_RET              1 /* Supports branch predictor state retention across ACC sleep */
+#define HAS_CONTINUOUS_HWCLOCK  1 /* Has a hardware clock that ticks during sleep */
+#define HAS_IPI                 1 /* Has IPI registers */
+#define HAS_CLUSTER             1 /* Has eCores and pCores in separate clusters */
+#define HAS_RETENTION_STATE     1 /* Supports architectural state retention */
+#define HAS_VMSA_LOCK           1 /* Supports lockable MMU config registers */
+#define HAS_DPC_ERR             1 /* Has an error register for DPC */
+#define HAS_UCNORMAL_MEM        1 /* Supports completely un-cacheable normal memory type */
+#define HAS_SPR_LOCK            1 /* Supports lockable special-purpose registers */
+#define HAS_TWO_STAGE_SPR_LOCK  1 /* SPR locks are split into RO_CTL and LOCK registers */
+#define HAS_FAST_CNTVCT         1
+#define HAS_E0PD                1 /* Supports E0PD0 and E0PD1 in TCR for Meltdown mitigation (ARMv8.5)*/
+#define HAS_ICACHE_FUSION_BUG   1 /* HW bug that causes incorrect reporting of instruction aborts on fused instructions */
+
+#define CPU_HAS_APPLE_PAC                    1
+#define HAS_UNCORE_CTRS                      1
+#define UNCORE_VERSION                       2
+#define UNCORE_PER_CLUSTER                   1
+#define UNCORE_NCTRS                         16
+#define CORE_NCTRS                           10
+
+#define __ARM_AMP__                          1
+#define __ARM_16K_PG__                       1
+#define __ARM_GLOBAL_SLEEP_BIT__             1
+#define __ARM_PAN_AVAILABLE__                1
+#define __ARM_WKDM_ISA_AVAILABLE__           1
+#define __ARM_WKDM_POPCNT__                  1
+#define __ARM_WKDM_POPCNT_COMPRESSED_DATA__  0
+#define __ARM_SB_AVAILABLE__                 1
+#define __PLATFORM_WKDM_ALIGNMENT_MASK__     (0x3FULL)
+#define __PLATFORM_WKDM_ALIGNMENT_BOUNDARY__ (64)
+
+/* Optional CPU features -- an SoC may #undef these */
+#define ARM_PARAMETERIZED_PMAP               1
+#define __ARM_MIXED_PAGE_SIZE__              1
+#define HAS_APCTL_EL1_USERKEYEN              1 /* Supports use of KernKey in EL0 */
+
+/*
+ * APSTS_SUPPORTED: Pointer authentication status registers, MKEYVld flag moved here from APCTL on APPLELIGHTNING (H12)
+ */
+#define __APSTS_SUPPORTED__                  1
+#define __ARM_RANGE_TLBI__                   1
+#define __ARM_E2H__                          1
+
+#include <pexpert/arm64/apple_arm64_common.h>
+
+#endif /* !_PEXPERT_ARM64_H13_H */
index 6807e0b64ade2668389e7c33143f294decfa97a0..9e5d4e58ee83b0454103b5ff70de005bff052ee8 100644 (file)
@@ -20,6 +20,8 @@ PRIVATE_KERNELFILES = \
        H7.h \
        H8.h \
        H9.h \
+       H11.h \
+       H13.h \
        BCM2837.h \
        spr_locks.h
 
@@ -54,6 +56,8 @@ DATAFILES = \
        H7.h \
        H8.h \
        H9.h \
+       H11.h \
+       H13.h \
        BCM2837.h \
        spr_locks.h
 
index 34005a9ed8399c006da0e08752f89e1cbaebc44d..b0085a13eec3cd094d46bb6aae1f85f7d19d1d3b 100644 (file)
 #define KERNEL_INTEGRITY_WT                  1
 #endif
 
+#if defined(CPU_HAS_APPLE_PAC) && defined(__arm64e__)
+#define HAS_APPLE_PAC                        1 /* Has Apple ARMv8.3a pointer authentication */
+#define KERNEL_ROP_ID 0xfeedfacefeedfacf /* placeholder static kernel ROP diversifier */
+#define KERNEL_KERNKEY_ID (KERNEL_ROP_ID + 4)
+#define KERNEL_JOP_ID (KERNEL_KERNKEY_ID + 2)
+#endif
 
 #include <pexpert/arm64/apple_arm64_regs.h>
 #include <pexpert/arm64/AIC.h>
index 01265fdfbd2827a4f59ee8ce99ce85d9416138f9..2d4d6690d7f3bcb53e93da6d596f19745f1e5128 100644 (file)
 #define MAX_CPU_CLUSTERS               2
 
 #define XNU_MONITOR                    1 /* Secure pmap runtime */
-#define XNU_MONITOR_T8020_DART         1 /* T8020 DART plugin for secure pmap runtime */
-#define T8020_DART_ALLOW_BYPASS        (1 << 1) /* DART allows translation bypass in certain cases */
-#define XNU_MONITOR_NVME_PPL           1 /* NVMe PPL plugin for secure pmap runtime */
-#define XNU_MONITOR_ANS2_SART          1 /* ANS2 SART plugin for secure pmap runtime */
-#define PMAP_CS                        1
-#define PMAP_CS_ENABLE                 1
 #endif  /* ARM64_BOARD_CONFIG_T8020 */
 
 #ifdef ARM64_BOARD_CONFIG_T8006
 #define PMAP_CS_ENABLE                 1
 #endif  /* ARM64_BOARD_CONFIG_T8030 */
 
+#ifdef ARM64_BOARD_CONFIG_T8101
+#include <pexpert/arm64/H13.h>
+#include <pexpert/arm64/spr_locks.h>
+
+#define MAX_L2_CLINE                   7
+#define MAX_CPUS                       8
+#define MAX_CPU_CLUSTERS               2
+
+#define XNU_MONITOR                    1 /* Secure pmap runtime */
+#endif  /* ARM64_BOARD_CONFIG_T8101 */
 
+#ifdef ARM64_BOARD_CONFIG_T8103
+#include <pexpert/arm64/H13.h>
+#include <pexpert/arm64/spr_locks.h>
+
+#define MAX_L2_CLINE                   7
+#define MAX_CPUS                       8
+#define MAX_CPU_CLUSTERS               2
+
+#define XNU_MONITOR                    1 /* Secure pmap runtime */
+#endif  /* ARM64_BOARD_CONFIG_T8103 */
 
 
 
index d3a18e529885f0b08a748e0f29be095aa7b03726..5a97d0c8854670718036036a6d961c9e873f0032 100644 (file)
@@ -249,13 +249,9 @@ extern int SecureDTRestartEntryIteration(DTEntryIterator iterator);
 extern int SecureDTGetProperty(const DTEntry entry, const char *propertyName,
     void const **propertyValue, unsigned int *propertySize);
 
-#if defined(__i386__) || defined(__x86_64__)
-// x86 processes device tree fragments outside the normal DT region in
-// hibernation. This would not work on ARM.
 extern int SecureDTGetPropertyRegion(const DTEntry entry, const char *propertyName,
     void const **propertyValue, unsigned int *propertySize,
     vm_offset_t const region_start, vm_size_t region_size);
-#endif
 
 /*
  *  -------------------------------------------------------------------------------
index 7f58a64e813385334b05f2552d1a39be2b99ed17..51630e5de1b8fba65f8f188cc718859d10b6201e 100644 (file)
@@ -2,6 +2,9 @@
 #include <darwintest_utils.h>
 #include <pthread.h>
 #include <sys/select.h>
+#include <sys/fileport.h>
+#include <sys/fcntl.h>
+#include <mach/mach.h>
 
 T_GLOBAL_META(
        T_META_RUN_CONCURRENTLY(true),
@@ -92,3 +95,42 @@ T_DECL(fd_dup2_erase_clofork_58446996,
        T_EXPECT_EQ(fcntl(fd2, F_GETFD, 0), 0,
            "neither FD_CLOEXEC nor FD_CLOFORK should be set");
 }
+
+struct confined_race_state {
+       int fd;
+       bool made;
+};
+
+static void *
+confine_thread(void *data)
+{
+       volatile int *fdp = data;
+
+       for (;;) {
+               fcntl(*fdp, F_SETCONFINED, 1);
+       }
+
+       return NULL;
+}
+
+T_DECL(confined_fileport_race, "test for rdar://69922255")
+{
+       int fd = -1;
+       pthread_t t;
+       mach_port_t p = MACH_PORT_NULL;
+
+       T_ASSERT_POSIX_SUCCESS(pthread_create(&t, NULL, confine_thread, &fd),
+           "pthread_create");
+
+       for (int i = 0; i < 100 * 1000; i++) {
+               fd = open("/dev/null", O_RDONLY | 0x08000000 /* O_CLOFORK */);
+               T_QUIET; T_ASSERT_POSIX_SUCCESS(fd, "open(/dev/null)");
+               if (fileport_makeport(fd, &p) == 0) {
+                       T_QUIET; T_ASSERT_EQ(fcntl(fd, F_GETCONFINED), 0,
+                           "should never get a confined fd: %d", fd);
+                       mach_port_destroy(mach_task_self(), p);
+               }
+
+               close(fd);
+       }
+}
index b9e45a30ce75cbf2375357c243f984ddeb9b5c36..35288da3903dab30bea22f447f265fcab1664198 100644 (file)
@@ -377,7 +377,8 @@ T_DECL(kpc_pmi_configurable,
        free(actions);
 
        (void)kperf_action_count_set(1);
-       ret = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO);
+       ret = kperf_action_samplers_set(1,
+           KPERF_SAMPLER_TINFO | KPERF_SAMPLER_KSTACK);
        T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kperf_action_samplers_set");
 
        ktrace_config_t ktconfig = ktrace_config_create_current();
@@ -389,6 +390,8 @@ T_DECL(kpc_pmi_configurable,
        T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(cpus, "allocate CPUs array");
 
        __block unsigned int nsamples = 0;
+       __block unsigned int npmis = 0;
+       __block unsigned int nstacks = 0;
        __block uint64_t first_ns = 0;
        __block uint64_t last_ns = 0;
 
@@ -436,10 +439,20 @@ T_DECL(kpc_pmi_configurable,
                        cpu->timeslices[(unsigned int)slice] += 1;
                }
 
-               nsamples++;
+               npmis++;
        });
 
-       ktrace_events_single(sess, END_EVENT, ^(struct trace_point *tp __unused) {
+       ktrace_events_single(sess, PERF_SAMPLE, ^(struct trace_point * tp) {
+               if (tp->debugid & DBG_FUNC_START) {
+                       nsamples++;
+               }
+       });
+       ktrace_events_single(sess, PERF_STK_KHDR,
+           ^(struct trace_point * __unused tp) {
+               nstacks++;
+       });
+
+       ktrace_events_single(sess, END_EVENT, ^(struct trace_point *tp) {
                int cret = ktrace_convert_timestamp_to_nanoseconds(sess,
                    tp->timestamp, &last_ns);
                T_QUIET; T_ASSERT_POSIX_ZERO(cret, "convert timestamp");
@@ -479,6 +492,7 @@ T_DECL(kpc_pmi_configurable,
                }
                check_counters(mch.ncpus, mch.nfixed + mch.nconfig, tly, counts);
        });
+       ktrace_events_class(sess, DBG_PERF, ^(struct trace_point * __unused tp) {});
 
        int stop = 0;
        (void)start_threads(&mch, spin, &stop);
@@ -496,6 +510,16 @@ T_DECL(kpc_pmi_configurable,
                T_LOG("saw %llu cycles in process", post_ru.ri_cycles - pre_ru.ri_cycles);
                uint64_t total = 0;
 
+               T_LOG("saw pmis = %u, samples = %u, stacks = %u", npmis, nsamples,
+                   nstacks);
+               // Allow some slop in case the trace is cut-off midway through a
+               // sample.
+               const unsigned int cutoff_leeway = 32;
+               T_EXPECT_GE(nsamples + cutoff_leeway, npmis,
+                   "saw as many samples as PMIs");
+               T_EXPECT_GE(nstacks + cutoff_leeway, npmis,
+                   "saw as many stacks as PMIs");
+
                unsigned int nsamplecpus = 0;
                char sample_slices[NTIMESLICES + 1];
                sample_slices[NTIMESLICES] = '\0';
@@ -508,7 +532,6 @@ T_DECL(kpc_pmi_configurable,
                        bool seen_empty = false;
                        for (unsigned int j = 0; j < NTIMESLICES; j++) {
                                unsigned int nslice = cpu->timeslices[j];
-                               nsamples += nslice;
                                ncpusamples += nslice;
                                if (nslice > 0) {
                                        nsampleslices++;
index 2c133a1a9f50da0d0caa52651c41fcb4ea619680..cf738136b6b55f079356981ef99724234a9242e0 100644 (file)
 #include "kperf_helpers.h"
 #include "ktrace_helpers.h"
 
-#define PERF_STK_KHDR  UINT32_C(0x25020014)
-#define PERF_STK_UHDR  UINT32_C(0x25020018)
-#define PERF_STK_KDATA UINT32_C(0x2502000c)
-#define PERF_STK_UDATA UINT32_C(0x25020010)
-
 #define CALLSTACK_VALID 0x1
 #define CALLSTACK_TRUNCATED 0x10
 
index 238d37afb39d955a07b728cbd8b86f631fbeb5cf..130d3e1b22fb8e97b1cfbfa1f497428861f582a3 100644 (file)
@@ -9,5 +9,9 @@ void configure_kperf_stacks_timer(pid_t pid, unsigned int period_ms,
 
 #define PERF_SAMPLE KDBG_EVENTID(DBG_PERF, 0, 0)
 #define PERF_KPC_PMI KDBG_EVENTID(DBG_PERF, 6, 0)
+#define PERF_STK_KHDR  UINT32_C(0x25020014)
+#define PERF_STK_UHDR  UINT32_C(0x25020018)
+#define PERF_STK_KDATA UINT32_C(0x2502000c)
+#define PERF_STK_UDATA UINT32_C(0x25020010)
 
 #endif /* !defined(KPERF_HELPERS_H) */
index 58aa696591866d804e69c04cce248773e2473c38..0e1e51ad6a19e457619d76780ac2deceb591e849 100644 (file)
@@ -39,7 +39,10 @@ T_GLOBAL_META(
        X(IS_FREEZABLE_NOT_AS_EXPECTED) \
        X(MEMSTAT_PRIORITY_CHANGE_FAILED) \
        X(INVALID_ALLOCATE_PAGES_ARGUMENTS) \
-       X(EXIT_CODE_MAX)
+       X(FROZEN_BIT_SET) \
+       X(FROZEN_BIT_NOT_SET) \
+       X(MEMORYSTATUS_CONTROL_ERROR) \
+       X(EXIT_CODE_MAX) \
 
 #define EXIT_CODES_ENUM(VAR) VAR,
 enum exit_codes_num {
@@ -545,7 +548,7 @@ T_HELPER_DECL(frozen_background, "Frozen background process", T_META_ASROOT(true
 
 /* Launches the frozen_background helper as a managed process. */
 static pid_t
-launch_frozen_background_process()
+launch_background_helper(const char* variant)
 {
        pid_t pid;
        char **launch_tool_args;
@@ -559,7 +562,7 @@ launch_frozen_background_process()
        launch_tool_args = (char *[]){
                testpath,
                "-n",
-               "frozen_background",
+               variant,
                NULL
        };
        ret = dt_launch_tool(&pid, launch_tool_args, false, NULL, NULL);
@@ -634,7 +637,7 @@ memorystatus_assertion_test_demote_frozen()
        });
 
        /* Launch the child process and set the initial properties on it. */
-       child_pid = launch_frozen_background_process();
+       child_pid = launch_background_helper("frozen_background");
        set_memlimits(child_pid, active_limit_mb, inactive_limit_mb, false, false);
        set_assertion_priority(child_pid, requestedpriority, 0x0);
        (void)check_properties(child_pid, requestedpriority, inactive_limit_mb, 0x0, ASSERTION_STATE_IS_SET, "Priority was set");
@@ -811,7 +814,7 @@ get_jetsam_snapshot_entry(memorystatus_jetsam_snapshot_t *snapshot, pid_t pid)
  * If exit_with_child is true, the test will exit when the child exits.
  */
 static void
-test_after_frozen_background_launches(bool exit_with_child, dispatch_block_t test_block)
+test_after_background_helper_launches(bool exit_with_child, const char* variant, dispatch_block_t test_block)
 {
        dispatch_source_t ds_signal, ds_exit;
 
@@ -821,7 +824,7 @@ test_after_frozen_background_launches(bool exit_with_child, dispatch_block_t tes
        T_QUIET; T_ASSERT_NOTNULL(ds_signal, "dispatch_source_create");
        dispatch_source_set_event_handler(ds_signal, test_block);
        /* Launch the child process. */
-       child_pid = launch_frozen_background_process();
+       child_pid = launch_background_helper(variant);
        /* Listen for exit. */
        if (exit_with_child) {
                ds_exit = dispatch_source_create(DISPATCH_SOURCE_TYPE_PROC, (uintptr_t)child_pid, DISPATCH_PROC_EXIT, dispatch_get_main_queue());
@@ -830,6 +833,9 @@ test_after_frozen_background_launches(bool exit_with_child, dispatch_block_t tes
                        pid_t rc = waitpid(child_pid, &status, 0);
                        T_QUIET; T_ASSERT_EQ(rc, child_pid, "waitpid");
                        code = WEXITSTATUS(status);
+                       if (code != 0) {
+                               T_LOG("Child exited with error: %s", exit_codes_str[code]);
+                       }
                        T_QUIET; T_ASSERT_EQ(code, 0, "Child exited cleanly");
                        T_END;
                });
@@ -843,7 +849,7 @@ test_after_frozen_background_launches(bool exit_with_child, dispatch_block_t tes
 T_DECL(get_frozen_procs, "List processes in the freezer") {
        skip_if_freezer_is_disabled();
 
-       test_after_frozen_background_launches(true, ^{
+       test_after_background_helper_launches(true, "frozen_background", ^{
                proc_name_t name;
                /* Place the child in the idle band so that it gets elevated like a typical app. */
                move_to_idle_band(child_pid);
@@ -864,7 +870,7 @@ T_DECL(frozen_to_swap_accounting, "jetsam snapshot has frozen_to_swap accounting
 
        skip_if_freezer_is_disabled();
 
-       test_after_frozen_background_launches(true, ^{
+       test_after_background_helper_launches(true, "frozen_background", ^{
                memorystatus_jetsam_snapshot_t *snapshot = NULL;
                memorystatus_jetsam_snapshot_entry_t *child_entry = NULL;
                /* Place the child in the idle band so that it gets elevated like a typical app. */
@@ -897,7 +903,7 @@ T_DECL(freezer_snapshot, "App kills are recorded in the freezer snapshot") {
        /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */
        take_jetsam_snapshot_ownership();
 
-       test_after_frozen_background_launches(false, ^{
+       test_after_background_helper_launches(false, "frozen_background", ^{
                int ret;
                memorystatus_jetsam_snapshot_t *snapshot = NULL;
                memorystatus_jetsam_snapshot_entry_t *child_entry = NULL;
@@ -920,7 +926,7 @@ T_DECL(freezer_snapshot_consume, "Freezer snapshot is consumed on read") {
        /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */
        take_jetsam_snapshot_ownership();
 
-       test_after_frozen_background_launches(false, ^{
+       test_after_background_helper_launches(false, "frozen_background", ^{
                int ret;
                memorystatus_jetsam_snapshot_t *snapshot = NULL;
                memorystatus_jetsam_snapshot_entry_t *child_entry = NULL;
@@ -949,7 +955,7 @@ T_DECL(freezer_snapshot_frozen_state, "Frozen state is recorded in freezer snaps
        /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */
        take_jetsam_snapshot_ownership();
 
-       test_after_frozen_background_launches(false, ^{
+       test_after_background_helper_launches(false, "frozen_background", ^{
                int ret;
                memorystatus_jetsam_snapshot_t *snapshot = NULL;
                memorystatus_jetsam_snapshot_entry_t *child_entry = NULL;
@@ -976,7 +982,7 @@ T_DECL(freezer_snapshot_thaw_state, "Thaw count is recorded in freezer snapshot"
        /* Take ownership of the snapshot to ensure we don't race with another process trying to consume them. */
        take_jetsam_snapshot_ownership();
 
-       test_after_frozen_background_launches(false, ^{
+       test_after_background_helper_launches(false, "frozen_background", ^{
                int ret;
                memorystatus_jetsam_snapshot_t *snapshot = NULL;
                memorystatus_jetsam_snapshot_entry_t *child_entry = NULL;
@@ -1003,3 +1009,69 @@ T_DECL(freezer_snapshot_thaw_state, "Thaw count is recorded in freezer snapshot"
                T_END;
        });
 }
+
+T_HELPER_DECL(check_frozen, "Check frozen state", T_META_ASROOT(true)) {
+       int kern_ret;
+       dispatch_source_t ds_signal;
+       __block int is_frozen;
+       /* Set the process to freezable */
+       kern_ret = memorystatus_control(MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE, getpid(), 1, NULL, 0);
+       T_QUIET; T_ASSERT_POSIX_SUCCESS(kern_ret, "set process is freezable");
+       /* Signal to our parent that we can be frozen */
+       if (kill(getppid(), SIGUSR1) != 0) {
+               T_LOG("Unable to signal to parent process!");
+               exit(SIGNAL_TO_PARENT_FAILED);
+       }
+
+       /* We should not be frozen yet. */
+       is_frozen = memorystatus_control(MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN, getpid(), 0, NULL, 0);
+       if (is_frozen == -1) {
+               T_LOG("memorystatus_control error: %s", strerror(errno));
+               exit(MEMORYSTATUS_CONTROL_ERROR);
+       }
+       if (is_frozen) {
+               exit(FROZEN_BIT_SET);
+       }
+
+
+       sig_t sig_ret = signal(SIGUSR1, SIG_IGN);
+       T_QUIET; T_WITH_ERRNO; T_ASSERT_NE(sig_ret, SIG_ERR, "signal(SIGUSR1, SIG_IGN)");
+       ds_signal = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dispatch_get_main_queue());
+       if (ds_signal == NULL) {
+               exit(DISPATCH_SOURCE_CREATE_FAILED);
+       }
+
+       dispatch_source_set_event_handler(ds_signal, ^{
+               /* We should now be frozen. */
+               is_frozen = memorystatus_control(MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN, getpid(), 0, NULL, 0);
+               if (is_frozen == -1) {
+                       T_LOG("memorystatus_control error: %s", strerror(errno));
+                       exit(MEMORYSTATUS_CONTROL_ERROR);
+               }
+               if (!is_frozen) {
+                       exit(FROZEN_BIT_NOT_SET);
+               }
+               exit(SUCCESS);
+       });
+       dispatch_activate(ds_signal);
+
+       dispatch_main();
+}
+
+T_DECL(memorystatus_get_process_is_frozen, "MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN returns correct state") {
+       skip_if_freezer_is_disabled();
+
+       test_after_background_helper_launches(true, "check_frozen", ^{
+               int ret;
+               /* Freeze the child, resume it, and signal it to check its state */
+               move_to_idle_band(child_pid);
+               ret = pid_suspend(child_pid);
+               T_ASSERT_POSIX_SUCCESS(ret, "child suspended");
+               freeze_process(child_pid);
+               ret = pid_resume(child_pid);
+               T_ASSERT_POSIX_SUCCESS(ret, "child resumed after freeze");
+
+               kill(child_pid, SIGUSR1);
+               /* The child will checks its own frozen state & exit. */
+       });
+}
index 5cbfc1e26df0852fa668dddd1ce00d33a163d09a..4f96938188c32dfbb57a6e5b383260e72736d98e 100644 (file)
@@ -48,6 +48,9 @@ T_DECL(posix_spawn_archpref, "verify posix_spawn_setarchpref_np can select slice
 #if defined(__arm64__) && defined(__LP64__)
        run_test("arm64", CPU_TYPE_ARM64, CPU_SUBTYPE_ARM64_ALL);
 #endif /* defined(__arm64__) && defined(__LP64__) */
+#if defined(__arm64e__)
+       run_test("arm64e", CPU_TYPE_ARM64, CPU_SUBTYPE_ARM64E);
+#endif /* defined(__arm64e__) */
 
 #if defined(__x86_64__)
        run_test("any (x86_64)", CPU_TYPE_X86_64, CPU_SUBTYPE_ANY);
index 2bca287cae2a7ee25363bccab6c4c77af33d4a35..f0da8b81ef166b8c10ee9f78eaa04941d4cb00ea 100644 (file)
@@ -8,6 +8,8 @@ main(void)
 {
 #if defined(__x86_64__)
        return CPU_SUBTYPE_X86_64_ALL;
+#elif __arm64e__
+       return CPU_SUBTYPE_ARM64E;
 #elif defined(__arm64__) && defined(__LP64__)
        return CPU_SUBTYPE_ARM64_ALL;
 #elif defined(__arm64__)
index 0b94fa9de1f76806b3ee2fcd30f25453fecc517b..9b1f2b201d8d9d18e1ba440fa10e99d3487ffecc 100644 (file)
@@ -102,3 +102,37 @@ T_DECL(processor_cpu_stat64,
        free(poststats);
 #endif /* __arm64__ */
 }
+
+
+T_DECL(processor_cpu_info_order,
+    "ensure host_processor_info iterates CPU in CPU ID order")
+{
+       host_t host = mach_host_self();
+       host_t priv_port = MACH_PORT_NULL;
+
+       kern_return_t kr = host_get_host_priv_port(host, &priv_port);
+       T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "host_get_host_priv_port");
+       T_QUIET; T_ASSERT_NE(priv_port, MACH_PORT_NULL, "valid host priv port");
+
+       processor_info_array_t  info_array = NULL;
+       mach_msg_type_number_t  info_count = 0;
+       natural_t               processor_count = 0;
+
+       kr = host_processor_info(mach_host_self(), PROCESSOR_BASIC_INFO, &processor_count,
+           &info_array, &info_count);
+
+       T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "host_processor_info(PROCESSOR_BASIC_INFO)");
+       T_QUIET; T_ASSERT_NOTNULL(info_array, "valid processor port array");
+       T_QUIET; T_ASSERT_GT(info_count, (mach_msg_type_number_t)0, "non-zero array");
+       T_QUIET; T_ASSERT_GT(processor_count, (natural_t)0, "non-zero processor_count");
+
+       processor_basic_info_t basic_info_array = (processor_basic_info_t)info_array;
+
+       for (natural_t i = 0; i < processor_count; i++) {
+               struct processor_basic_info* processor_info = &basic_info_array[i];
+
+               natural_t slot_num = (natural_t)processor_info->slot_num;
+
+               T_ASSERT_EQ(slot_num, i, "CPU ID must equal array index");
+       }
+}
index 035f73bcb3f2364da639c2e671015623947d1a50..e94e7a8f0f0d1f8cc733590162a22f3cdd2d4a8a 100644 (file)
@@ -42,6 +42,29 @@ T_GLOBAL_META(
 T_DECL(thread_set_state_corrupted_pc,
     "Test that ptrauth failures in thread_set_state() poison the respective register.")
 {
+#if !__arm64e__
        T_SKIP("Running on non-arm64e target, skipping...");
+#else
+       mach_port_t thread;
+       kern_return_t err = thread_create(mach_task_self(), &thread);
+       T_QUIET; T_ASSERT_EQ(err, KERN_SUCCESS, "Created thread");
+
+       arm_thread_state64_t state;
+       mach_msg_type_number_t count = ARM_THREAD_STATE64_COUNT;
+       err = thread_get_state(mach_thread_self(), ARM_THREAD_STATE64, (thread_state_t)&state, &count);
+       T_QUIET; T_ASSERT_EQ(err, KERN_SUCCESS, "Got own thread state");
+
+       void *corrupted_pc = (void *)((uintptr_t)state.__opaque_pc ^ 0x4);
+       state.__opaque_pc = corrupted_pc;
+       err = thread_set_state(thread, ARM_THREAD_STATE64, (thread_state_t)&state, count);
+       T_QUIET; T_ASSERT_EQ(err, KERN_SUCCESS, "Set child thread's PC to a corrupted pointer");
+
+       err = thread_get_state(thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count);
+       T_QUIET; T_ASSERT_EQ(err, KERN_SUCCESS, "Got child's thread state");
+       T_EXPECT_NE(state.__opaque_pc, corrupted_pc, "thread_set_state() with a corrupted PC should poison the PC value");
+
+       err = thread_terminate(thread);
+       T_QUIET; T_EXPECT_EQ(err, KERN_SUCCESS, "Terminated thread");
+#endif // __arm64e__
 }
 
index 0c6a498fd6048504f3b484503c1058df986a0f2a..51e7d56c5451d25e749e0389460af2ffa020bf29 100644 (file)
 
 T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
 
+#if     (__arm64e__ && TARGET_OS_IPHONE)
+static void *
+get_current_slide_address(bool reslide)
+{
+       pid_t                                           pid;
+       int                             pipefd[2];
+       posix_spawnattr_t               attr;
+       posix_spawn_file_actions_t      action;
+       uintptr_t                       addr;
+
+       T_ASSERT_POSIX_SUCCESS(posix_spawnattr_init(&attr), "posix_spawnattr_init");
+       /* spawn the helper requesting a reslide */
+       if (reslide) {
+               T_ASSERT_POSIX_SUCCESS(posix_spawnattr_setflags(&attr, _POSIX_SPAWN_RESLIDE), "posix_spawnattr_setflags");
+       }
+
+       T_ASSERT_POSIX_SUCCESS(pipe(pipefd), "pipe");
+       T_ASSERT_POSIX_ZERO(posix_spawn_file_actions_init(&action), "posix_spawn_fileactions_init");
+       T_ASSERT_POSIX_ZERO(posix_spawn_file_actions_addclose(&action, pipefd[0]), "posix_spawn_file_actions_addclose");
+       T_ASSERT_POSIX_ZERO(posix_spawn_file_actions_adddup2(&action, pipefd[1], 1), "posix_spawn_file_actions_addup2");
+       T_ASSERT_POSIX_ZERO(posix_spawn_file_actions_addclose(&action, pipefd[1]), "posix_spawn_file_actions_addclose");
+
+       char *argvs[3];
+       argvs[0] = SHARED_CACHE_HELPER;
+       argvs[1] = reslide ? DO_RUSAGE_CHECK : DO_DUMMY;
+       argvs[2] = NULL;
+       char *const envps[] = {NULL};
+
+       T_ASSERT_POSIX_ZERO(posix_spawn(&pid, SHARED_CACHE_HELPER, &action, &attr, argvs, envps), "helper posix_spawn");
+       T_ASSERT_POSIX_SUCCESS(close(pipefd[1]), "close child end of the pipe");
+
+       char buf[ADDRESS_OUTPUT_SIZE] = {0};
+
+       ssize_t read_bytes = 0;
+       do {
+               if (read_bytes == -1) {
+                       T_LOG("reading off get_shared_cache_address got interrupted");
+               }
+               read_bytes = read(pipefd[0], buf, sizeof(buf));
+       } while (read_bytes == -1 && errno == EINTR);
+
+       T_ASSERT_EQ_LONG(ADDRESS_OUTPUT_SIZE, read_bytes, "read helper output");
+
+       int status = 0;
+       int waitpid_result = waitpid(pid, &status, 0);
+       T_ASSERT_POSIX_SUCCESS(waitpid_result, "waitpid");
+       T_ASSERT_EQ(waitpid_result, pid, "waitpid should return child we spawned");
+       T_ASSERT_EQ(WIFEXITED(status), 1, "child should have exited normally");
+       T_ASSERT_EQ(WEXITSTATUS(status), EX_OK, "child should have exited with success");
+
+       addr = strtoul(buf, NULL, 16);
+       T_ASSERT_GE_LONG(addr, 0L, "convert address to uintptr_t");
+
+       return (void *)addr;
+}
+
+/*
+ * build_faulting_shared_cache_address creates a pointer to an address that is
+ * within the shared_cache range but that is guaranteed to not be mapped.
+ */
+static char *
+build_faulting_shared_cache_address(bool tbi)
+{
+       uintptr_t fault_address;
+
+       // Grab currently mapped shared cache location and size
+       size_t shared_cache_len = 0;
+       const void *shared_cache_location = _dyld_get_shared_cache_range(&shared_cache_len);
+       if (shared_cache_location == NULL || shared_cache_len == 0) {
+               return NULL;
+       }
+
+       // Locate a mach_header in the shared cache
+       Dl_info info;
+       if (dladdr((const void *)fork, &info) == 0) {
+               return NULL;
+       }
+
+       const struct mach_header *mh = info.dli_fbase;
+       uintptr_t slide = (uintptr_t)_dyld_get_image_slide(mh);
+
+       if (slide == 0) {
+               fault_address = (uintptr_t)shared_cache_location + shared_cache_len + PAGE_SIZE;
+       } else {
+               fault_address = (uintptr_t)shared_cache_location - PAGE_SIZE;
+       }
+
+       if (tbi) {
+               fault_address |= 0x2000000000000000;
+       }
+
+       return (char *)fault_address;
+}
+
+static void
+induce_crash(volatile char *ptr)
+{
+       pid_t child = fork();
+       T_ASSERT_POSIX_SUCCESS(child, "fork");
+
+       if (child == 0) {
+               ptr[1];
+       } else {
+               sleep(1);
+               struct proc_exitreasonbasicinfo exit_reason = {0};
+               T_ASSERT_POSIX_SUCCESS(proc_pidinfo(child, PROC_PIDEXITREASONBASICINFO, 1, &exit_reason, sizeof(exit_reason)), "basic exit reason");
+
+               int status = 0;
+               int waitpid_result;
+               do {
+                       waitpid_result = waitpid(child, &status, 0);
+               } while (waitpid_result < 0 && errno == EINTR);
+               T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid_result, "waitpid");
+               T_ASSERT_EQ(waitpid_result, child, "waitpid should return forked child");
+               T_ASSERT_EQ(exit_reason.beri_namespace, OS_REASON_SIGNAL, "child should have exited with a signal");
+
+               if (ptr) {
+                       T_ASSERT_EQ_ULLONG(exit_reason.beri_code, (unsigned long long)SIGSEGV, "child should have received SIGSEGV");
+                       T_ASSERT_NE((int)(exit_reason.beri_flags & OS_REASON_FLAG_SHAREDREGION_FAULT), 0, "should detect shared cache fault");
+               } else {
+                       T_ASSERT_EQ((int)(exit_reason.beri_flags & OS_REASON_FLAG_SHAREDREGION_FAULT), 0, "should not detect shared cache fault");
+               }
+       }
+}
+
+static int saved_status;
+static void
+cleanup_sysctl(void)
+{
+       int ret;
+
+       if (saved_status == 0) {
+               ret = sysctlbyname("vm.vm_shared_region_reslide_aslr", NULL, NULL, &saved_status, sizeof(saved_status));
+               T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "set shared region resliding back off");
+       }
+}
+#endif  /* __arm64e && TARGET_OS_IPHONE */
 
 T_DECL(reslide_sharedcache, "crash induced reslide of the shared cache",
     T_META_CHECK_LEAKS(false), T_META_IGNORECRASHES(".*shared_cache_reslide_test.*"),
     T_META_ASROOT(true))
 {
+#if (__arm64e__ && TARGET_OS_IOS)
+       void *system_address;
+       void *reslide_address;
+       void *confirm_address;
+       char *ptr;
+       int  on = 1;
+       size_t size;
+
+       /* Force resliding on */
+       T_ASSERT_POSIX_SUCCESS(sysctlbyname("vm.vm_shared_region_reslide_aslr", &saved_status, &size, &on, sizeof(on)), "force enable reslide");
+       T_ATEND(cleanup_sysctl);
+
+       system_address = get_current_slide_address(false);
+       confirm_address = get_current_slide_address(false);
+       T_ASSERT_EQ_PTR(system_address, confirm_address, "system and current addresses should not diverge %p %p", system_address, confirm_address);
+
+       reslide_address = get_current_slide_address(true);
+       confirm_address = get_current_slide_address(true);
+       T_ASSERT_NE_PTR(system_address, reslide_address, "system and reslide addresses should diverge %p %p", system_address, reslide_address);
+       T_ASSERT_EQ_PTR(reslide_address, confirm_address, "reslide and another reslide (no crash) shouldn't diverge %p %p", reslide_address, confirm_address);
+
+       /* Crash into the shared cache area */
+       ptr = build_faulting_shared_cache_address(false);
+       T_ASSERT_NOTNULL(ptr, "faulting on %p in the shared region", (void *)ptr);
+       induce_crash(ptr);
+       reslide_address = get_current_slide_address(true);
+       T_ASSERT_NE_PTR(system_address, reslide_address, "system and reslide should diverge (after crash) %p %p", system_address, reslide_address);
+       T_ASSERT_NE_PTR(confirm_address, reslide_address, "reslide and another reslide should diverge (after crash) %p %p", confirm_address, reslide_address);
+
+       confirm_address = get_current_slide_address(true);
+       T_ASSERT_EQ_PTR(reslide_address, confirm_address, "reslide and another reslide shouldn't diverge (no crash) %p %p", reslide_address, confirm_address);
+
+       /* Crash somewhere else */
+       ptr = NULL;
+       induce_crash(ptr);
+       confirm_address = get_current_slide_address(true);
+       T_ASSERT_EQ_PTR(reslide_address, confirm_address, "reslide and another reslide after a non-tracked crash shouldn't diverge %p %p", reslide_address, confirm_address);
+
+       /* Ensure we still get the system address */
+       confirm_address = get_current_slide_address(false);
+       T_ASSERT_EQ_PTR(system_address, confirm_address, "system address and new process without resliding shouldn't diverge %p %p", system_address, confirm_address);
+
+       /* Ensure we detect a crash into the shared area with a TBI tagged address */
+       ptr = build_faulting_shared_cache_address(true);
+       T_ASSERT_NOTNULL(ptr, "faulting on %p in the shared region", (void *)ptr);
+       confirm_address = get_current_slide_address(true);
+       induce_crash(ptr);
+       reslide_address = get_current_slide_address(true);
+       T_ASSERT_NE_PTR(system_address, reslide_address, "system and reslide should diverge (after crash, TBI test) %p %p", system_address, reslide_address);
+       T_ASSERT_NE_PTR(confirm_address, reslide_address, "reslide and another reslide should diverge (after crash, TBI test) %p %p", confirm_address, reslide_address);
+#else   /* __arm64e__ && TARGET_OS_IPHONE */
        T_SKIP("shared cache reslide is currently only supported on arm64e iPhones");
+#endif /* __arm64e__ && TARGET_OS_IPHONE */
 }
index b0c496d15d1a6c5d1fd6830ecea96a8a19a66063..bec6f4a8c734c553a7d9f3dfbe1dd4140cb091ad 100644 (file)
@@ -75,7 +75,9 @@ T_DECL(sr_entitlement, "shared region by entitlement test")
        int on = 1;
        size_t size_on = sizeof(on);
 
+#if !__arm64e__
        T_SKIP("No pointer authentication support");
+#endif
 
        /*
         * Check if the sysctl vm_shared_region_by_entitlement exists and if so make
index 83a372fb3da5018b0f21195ff136c1c47f7fed0e..823e1123ff5ca98ed4616bfc8ddd3a96da3ed753 100644 (file)
@@ -1,7 +1,7 @@
 #include <darwintest.h>
 #include <sys/sysctl.h>
 
-T_DECL(sysctl_hw_target_product, "ensure the hw.target and hw.product sysctls exist")
+T_DECL(sysctl_hw_cpu, "ensure vital product and CPU-related sysctls exist")
 {
        char buffer[64] = "";
        size_t buffer_size = sizeof(buffer);
@@ -17,4 +17,12 @@ T_DECL(sysctl_hw_target_product, "ensure the hw.target and hw.product sysctls ex
            &buffer_size, NULL, 0);
        T_ASSERT_POSIX_SUCCESS(ret, "hw.product sysctl");
        T_LOG("hw.product = %s", buffer);
+
+       buffer_size = sizeof(buffer);
+
+       ret = sysctlbyname("machdep.cpu.brand_string", buffer,
+           &buffer_size, NULL, 0);
+
+       T_ASSERT_POSIX_SUCCESS(ret, "machdep.cpu.brand_string sysctl");
+       T_LOG("machdep.cpu.brand_string = %s", buffer);
 }
index 9affaa7d2dc3ef3f9ee116d867844880a2003ced..93e85f7590ea7c0408a80ab8ec5fb6cc0f84e7f8 100755 (executable)
@@ -14,11 +14,11 @@ import xnudefines
 def MBufStat(cmd_args=None):
     """ Print extended mbuf allocator statistics.
     """
-    hdr_format = "{0: <16s} {1: >8s} {2: >8s} {3: ^16s} {4: >8s} {5: >12s} {6: >8s} {7: >8s} {8: >8s}"
-    print hdr_format.format('class', 'total', 'cached', 'uncached', 'inuse', 'failed', 'waiter', 'notified', 'purge')
-    print hdr_format.format('name', 'objs', 'objs', 'objs/slabs', 'objs', 'alloc count', 'count', 'count', 'count')
-    print hdr_format.format('-'*16, '-'*8, '-'*8, '-'*16, '-'*8, '-'*12, '-'*8, '-'*8, '-'*8)
-    entry_format = "{0: <16s} {1: >8d} {2: >8d} {3:>7d} / {4:<6d} {5: >8d} {6: >12d} {7: >8d} {8: >8d} {9: >8d}"
+    hdr_format = "{0: <16s} {1: >8s} {2: >8s} {3: ^16s} {4: >8s} {5: >12s} {6: >8s} {7: >8s} {8: >8s} {9: >8s}"
+    print hdr_format.format('class', 'total', 'cached', 'uncached', 'inuse', 'failed', 'waiter', 'notified', 'purge', 'max')
+    print hdr_format.format('name', 'objs', 'objs', 'objs/slabs', 'objs', 'alloc count', 'count', 'count', 'count', 'objs')
+    print hdr_format.format('-'*16, '-'*8, '-'*8, '-'*16, '-'*8, '-'*12, '-'*8, '-'*8, '-'*8, '-'*8)
+    entry_format = "{0: <16s} {1: >8d} {2: >8d} {3:>7d} / {4:<6d} {5: >8d} {6: >12d} {7: >8d} {8: >8d} {9: >8d} {10: >8d}"
     num_items = sizeof(kern.globals.mbuf_table) / sizeof(kern.globals.mbuf_table[0])
     ncpus = int(kern.globals.ncpu)
     for i in range(num_items):
@@ -38,7 +38,8 @@ def MBufStat(cmd_args=None):
                                   mcs.mbcl_infree, mcs.mbcl_slab_cnt,
                                   (mcs.mbcl_total - total - mcs.mbcl_infree),
                                   mcs.mbcl_fail_cnt, mbuf.mtbl_cache.mc_waiter_cnt,
-                                  mcs.mbcl_notified, mcs.mbcl_purge_cnt
+                                  mcs.mbcl_notified, mcs.mbcl_purge_cnt,
+                                  mbuf.mtbl_maxlimit
                                   )
 # EndMacro: mbuf_stat
 
index f64b5a0aa7193984c1a917b95af77a63404cdfaf..be557e7b8059875907f71487a64b70cd3838d963 100755 (executable)
@@ -3708,7 +3708,7 @@ def _vm_page_unpack_ptr(page):
         vm_pages_addr = unsigned(addressof(kern.globals.vm_pages[0]))
         element_size = unsigned(addressof(kern.globals.vm_pages[1])) - vm_pages_addr
         return (vm_pages_addr + masked_page * element_size)
-    return vm_unpack_pointer(page, params)
+    return unsigned(vm_unpack_pointer(page, params))
 
 @lldb_command('calcvmpagehash')
 def CalcVMPageHash(cmd_args=None):
index 1fc105bdfe9aea986820f9c1e784ba2f4b33d1b8..232cabba723395994bcb92c57fde29f4199dfb09 100755 (executable)
@@ -1671,7 +1671,12 @@ def GetInPcb(pcb, proto):
     out_string += "\n\t"
     so = pcb.inp_socket
     if (so != 0):
-        out_string += "so=" + str(so) + " s=" + str(int(so.so_snd.sb_cc)) + " r=" + str(int(so.so_rcv.sb_cc)) + " usecnt=" + str(int(so.so_usecount)) + ", "
+        out_string += "so=" + str(so) + " s=" + str(int(so.so_snd.sb_cc)) + " r=" + str(int(so.so_rcv.sb_cc))
+        if proto == IPPROTO_TCP :
+            tcpcb = cast(pcb.inp_ppcb, 'tcpcb *')
+            out_string += " reass=" + str(int(tcpcb.t_reassqlen))
+
+        out_string += " usecnt=" + str(int(so.so_usecount)) + ", "
 
     if (pcb.inp_state == 0 or pcb.inp_state == INPCB_STATE_INUSE):
         out_string += "inuse"
@@ -1707,7 +1712,11 @@ def CalcMbufInSB(so, snd_cc, snd_buf, rcv_cc, rcv_buf, snd_record_cnt, rcv_recor
     CalcMbufInList(mpkt, rcv_record_cnt, rcv_buf, rcv_mbuf_cnt, rcv_mbuf_cluster_cnt)
 
 def GetPcbInfo(pcbi, proto):
-    tcp_reassqlen = 0
+    tcp_reassqlen = [0]
+    tcp_reassq_bytes = 0
+    mbuf_reassq_cnt = [0]
+    mbuf_reassq_bytes = [0] * (Mbuf_Type.MT_LAST + 1)
+    mbuf_reassq_cluster = [0]
     out_string = ""
     snd_mbuf_cnt = [0]
     snd_mbuf_cluster_cnt = [0]
@@ -1754,7 +1763,14 @@ def GetPcbInfo(pcbi, proto):
                     CalcMbufInSB(so, snd_cc, snd_buf, rcv_cc, rcv_buf, snd_record_cnt, rcv_record_cnt, snd_mbuf_cnt, rcv_mbuf_cnt, snd_mbuf_cluster_cnt, rcv_mbuf_cluster_cnt)
                 if proto == IPPROTO_TCP and pcb.inp_ppcb:
                     tcpcb = cast(pcb.inp_ppcb, 'tcpcb *')
-                    tcp_reassqlen += tcpcb.t_reassqlen
+                    reass_entry = cast(tcpcb.t_segq.lh_first, 'tseg_qent *')
+                    curr_reass = 0
+                    while reass_entry != 0:
+                        CalcMbufInList(reass_entry.tqe_m, tcp_reassqlen, mbuf_reassq_bytes, mbuf_reassq_cnt, mbuf_reassq_cluster)
+                        tcp_reassq_bytes += reass_entry.tqe_len
+                        curr_reass += reass_entry.tqe_len
+
+                        reass_entry = reass_entry.tqe_q.le_next
 
                 pcb = cast(pcb.inp_hash.le_next, 'inpcb *')
             i += 1
@@ -1770,7 +1786,11 @@ def GetPcbInfo(pcbi, proto):
             out_string += "total snd_buf bytes of type " + Mbuf_Type.reverse_mapping[x] + " : " + str(int(snd_buf[x])) + " total recv_buf bytes of type " + Mbuf_Type.reverse_mapping[x] + " : " + str(int(rcv_buf[x])) + "\n"
     out_string += "port hash base is " + hex(pcbi.ipi_porthashbase) + "\n"
     if proto == IPPROTO_TCP:
-        out_string += "TCP reassembly queue length: " + str(tcp_reassqlen) + "\n"
+        out_string += "TCP reassembly queue length: " + str(tcp_reassqlen[0]) + " TCP-payload bytes: " + str(tcp_reassq_bytes) + "\n"
+
+        for x in range(Mbuf_Type.MT_LAST):
+            if mbuf_reassq_bytes[x] != 0:
+                out_string += "total reassq bytes of type " + Mbuf_Type.reverse_mapping[x] + " : " + str(mbuf_reassq_bytes[x]) + "\n"
 
     i = 0
     hashbase = pcbi.ipi_porthashbase