From: Apple Date: Tue, 2 Feb 2021 00:24:03 +0000 (+0000) Subject: xnu-7195.60.75.tar.gz X-Git-Tag: v7195.60.75^0 X-Git-Url: https://git.saurik.com/apple/xnu.git/commitdiff_plain/2a1bd2d3eef5c7a7bb14f4bb9fdbca9a96ee4752 xnu-7195.60.75.tar.gz --- diff --git a/EXTERNAL_HEADERS/corecrypto/cc.h b/EXTERNAL_HEADERS/corecrypto/cc.h index 4ea8e63d0..3a08ba57f 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc.h +++ b/EXTERNAL_HEADERS/corecrypto/cc.h @@ -167,4 +167,19 @@ int cc_cmp_safe (size_t num, const void * ptr1, const void * ptr2); /* Return the minimum value between S and T. */ #define CC_MIN(S, T) ({__typeof__(S) _cc_min_s = S; __typeof__(T) _cc_min_t = T; _cc_min_s <= _cc_min_t ? _cc_min_s : _cc_min_t;}) +/* + When building with "-nostdinc" (i.e. iboot), ptrauth.h is in a non-standard location. + This requires a new flag to be used when building iboot: -ibuiltininc. + + This flag doesn't seem present at the moment in clang. For now lets not + diversify in iBoot. +*/ +#if __has_feature(ptrauth_calls) && (CC_KERNEL || CC_USE_L4 || CC_USE_SEPROM) +#include +#define CC_SPTR(_sn_, _n_) \ + __ptrauth(ptrauth_key_process_independent_code, 1, ptrauth_string_discriminator("cc_" #_sn_ #_n_)) _n_ +#else +#define CC_SPTR(_sn_, _n_) _n_ +#endif + #endif /* _CORECRYPTO_CC_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccdigest.h b/EXTERNAL_HEADERS/corecrypto/ccdigest.h index ce84aa8d4..0edd80261 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdigest.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdigest.h @@ -46,9 +46,9 @@ struct ccdigest_info { size_t oid_size; const unsigned char *oid; const void *initial_state; - void(*compress)(ccdigest_state_t state, size_t nblocks, + void(* CC_SPTR(ccdigest_info, compress))(ccdigest_state_t state, size_t nblocks, const void *data); - void(*final)(const struct ccdigest_info *di, ccdigest_ctx_t ctx, + void(* CC_SPTR(ccdigest_info, final))(const struct ccdigest_info *di, ccdigest_ctx_t ctx, unsigned char *digest); }; diff --git a/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h b/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h index 263dded51..700828a0c 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h +++ b/EXTERNAL_HEADERS/corecrypto/ccdrbg_impl.h @@ -27,7 +27,7 @@ struct ccdrbg_info { @param in Additional input bytes @return 0 if successful */ - int (*init)(const struct ccdrbg_info *info, struct ccdrbg_state *drbg, + int (*CC_SPTR(ccdrbg_info, init))(const struct ccdrbg_info *info, struct ccdrbg_state *drbg, size_t entropyLength, const void* entropy, size_t nonceLength, const void* nonce, size_t psLength, const void* ps); @@ -40,7 +40,7 @@ struct ccdrbg_info { @param in Additional input bytes @return 0 if successful */ - int (*reseed)(struct ccdrbg_state *prng, + int (*CC_SPTR(ccdrbg_info, reseed))(struct ccdrbg_state *prng, size_t entropylen, const void *entropy, size_t inlen, const void *in); @@ -52,14 +52,14 @@ struct ccdrbg_info { @param in Additional input bytes @return 0 if successfull */ - int (*generate)(struct ccdrbg_state *prng, + int (*CC_SPTR(ccdrbg_info, generate))(struct ccdrbg_state *prng, size_t outlen, void *out, size_t inlen, const void *in); /*! Terminate a PRNG state @param prng The PRNG state to terminate */ - void (*done)(struct ccdrbg_state *prng); + void (*CC_SPTR(ccdrbg_info, done))(struct ccdrbg_state *prng); /** private parameters */ const void *custom; diff --git a/EXTERNAL_HEADERS/corecrypto/cckprng.h b/EXTERNAL_HEADERS/corecrypto/cckprng.h index 0c97177ff..79fe22fd3 100644 --- a/EXTERNAL_HEADERS/corecrypto/cckprng.h +++ b/EXTERNAL_HEADERS/corecrypto/cckprng.h @@ -220,6 +220,20 @@ struct cckprng_sched_ctx { unsigned pool_idx; }; +// A function pointer to fill an entropy buffer. It should return some +// estimate of entropy (e.g. the number of timing samples resident in +// the buffer). The implementation may return zero if no entropy is +// available. The implementation should return negative in case of an +// error (e.g. a failure in continuous health tests). +// +// The caller should set entropy_nbytes to the maximum size of the +// input buffer, and the implementation should set it to the number of +// bytes it has initialized. The third argument is arbitrary state the +// implementation provides and receives back on each call. +typedef int32_t (*cckprng_getentropy)(size_t *entropy_nbytes, + void *entropy, + void *arg); + struct cckprng_ctx { // The master secret of the PRNG struct cckprng_key_ctx key; @@ -250,24 +264,38 @@ struct cckprng_ctx { // Diagnostics for the PRNG struct cckprng_diag diag; + + // A function pointer to get entropy + cckprng_getentropy getentropy; + + // An arbitrary piece of state to be provided to the entropy function + void *getentropy_arg; }; // This collection of function pointers is just a convenience for // registering the PRNG with xnu struct cckprng_funcs { - void (*init)(struct cckprng_ctx *ctx, - unsigned max_ngens, - size_t entropybuf_nbytes, - const void *entropybuf, - const uint32_t *entropybuf_nsamples, - size_t seed_nbytes, - const void *seed, - size_t nonce_nbytes, - const void *nonce); - void (*initgen)(struct cckprng_ctx *ctx, unsigned gen_idx); - void (*reseed)(struct cckprng_ctx *ctx, size_t nbytes, const void *seed); - void (*refresh)(struct cckprng_ctx *ctx); - void (*generate)(struct cckprng_ctx *ctx, unsigned gen_idx, size_t nbytes, void *out); + void (*CC_SPTR(cckprng_funcs, init))(struct cckprng_ctx *ctx, + unsigned max_ngens, + size_t entropybuf_nbytes, + const void *entropybuf, + const uint32_t *entropybuf_nsamples, + size_t seed_nbytes, + const void *seed, + size_t nonce_nbytes, + const void *nonce); + void (*CC_SPTR(cckprng_funcs, initgen))(struct cckprng_ctx *ctx, unsigned gen_idx); + void (*CC_SPTR(cckprng_funcs, reseed))(struct cckprng_ctx *ctx, size_t nbytes, const void *seed); + void (*CC_SPTR(cckprng_funcs, refresh))(struct cckprng_ctx *ctx); + void (*CC_SPTR(cckprng_funcs, generate))(struct cckprng_ctx *ctx, unsigned gen_idx, size_t nbytes, void *out); + void (*CC_SPTR(cckprng_funcs, init_with_getentropy))(struct cckprng_ctx *ctx, + unsigned max_ngens, + size_t seed_nbytes, + const void *seed, + size_t nonce_nbytes, + const void *nonce, + cckprng_getentropy getentropy, + void *getentropy_arg); }; /* @@ -296,6 +324,30 @@ void cckprng_init(struct cckprng_ctx *ctx, size_t nonce_nbytes, const void *nonce); +/* + @function cckprng_init_with_getentropy + @abstract Initialize a kernel PRNG context. + + @param ctx Context for this instance + @param max_ngens Maximum count of generators that may be allocated + @param seed_nbytes Length of the seed in bytes + @param seed Pointer to a high-entropy seed + @param nonce_nbytes Length of the nonce in bytes + @param seed Pointer to a single-use nonce + @param getentropy A function pointer to fill an entropy buffer + @param getentropy_arg State provided to the entropy function + + @discussion @p max_ngens should be set based on an upper bound of CPUs available on the device. See the @p cckprng_getentropy type definition for discussion on its semantics. +*/ +void cckprng_init_with_getentropy(struct cckprng_ctx *ctx, + unsigned max_ngens, + size_t seed_nbytes, + const void *seed, + size_t nonce_nbytes, + const void *nonce, + cckprng_getentropy getentropy, + void *getentropy_arg); + /* @function cckprng_initgen @abstract Initialize an output generator. diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h b/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h index 849881ed4..ff8486b24 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h @@ -17,16 +17,13 @@ /* ECB mode. */ cc_aligned_struct(16) ccecb_ctx; - /* Actual symmetric algorithm implementation should provide you one of these. */ struct ccmode_ecb { - size_t size; /* first argument to ccecb_ctx_decl(). */ + size_t size; /* first argument to ccecb_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_ecb *ecb, ccecb_ctx *ctx, - size_t key_nbytes, const void *key); - int (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in, - void *out); - void (*roundkey)(const ccecb_ctx *ctx, unsigned r, void *key); + int (*CC_SPTR(ccmode_ecb, init))(const struct ccmode_ecb *ecb, ccecb_ctx *ctx, size_t key_nbytes, const void *key); + int (*CC_SPTR(ccmode_ecb, ecb))(const ccecb_ctx *ctx, size_t nblocks, const void *in, void *out); + void (*CC_SPTR(ccmode_ecb, roundkey))(const ccecb_ctx *ctx, unsigned r, void *key); }; /*! @@ -64,13 +61,11 @@ cc_aligned_struct(16) cccbc_ctx; cc_aligned_struct(16) cccbc_iv; struct ccmode_cbc { - size_t size; /* first argument to cccbc_ctx_decl(). */ + size_t size; /* first argument to cccbc_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, - size_t key_len, const void *key); + int (*CC_SPTR(ccmode_cbc, init))(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, size_t key_len, const void *key); /* cbc encrypt or decrypt nblocks from in to out, iv will be used and updated. */ - int (*cbc)(const cccbc_ctx *ctx, cccbc_iv *iv, - size_t nblocks, const void *in, void *out); + int (*CC_SPTR(ccmode_cbc, cbc))(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks, const void *in, void *out); const void *custom; }; @@ -78,11 +73,11 @@ struct ccmode_cbc { cc_aligned_struct(16) cccfb_ctx; struct ccmode_cfb { - size_t size; /* first argument to cccfb_ctx_decl(). */ + size_t size; /* first argument to cccfb_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, - size_t key_len, const void *key, const void *iv); - int (*cfb)(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*CC_SPTR(ccmode_cfb, + init))(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, size_t key_len, const void *key, const void *iv); + int (*CC_SPTR(ccmode_cfb, cfb))(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -90,11 +85,11 @@ struct ccmode_cfb { cc_aligned_struct(16) cccfb8_ctx; struct ccmode_cfb8 { - size_t size; /* first argument to cccfb8_ctx_decl(). */ + size_t size; /* first argument to cccfb8_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, - size_t key_len, const void *key, const void *iv); - int (*cfb8)(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*CC_SPTR(ccmode_cfb8, + init))(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, size_t key_len, const void *key, const void *iv); + int (*CC_SPTR(ccmode_cfb8, cfb8))(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -102,13 +97,13 @@ struct ccmode_cfb8 { cc_aligned_struct(16) ccctr_ctx; struct ccmode_ctr { - size_t size; /* first argument to ccctr_ctx_decl(). */ - size_t block_size; /* for historical reasons, this is set to 1 */ - size_t ecb_block_size; /* the actual block size of the underlying cipher */ - int (*init)(const struct ccmode_ctr *mode, ccctr_ctx *ctx, - size_t key_len, const void *key, const void *iv); - int (*setctr)(const struct ccmode_ctr *mode, ccctr_ctx *ctx, const void *ctr); - int (*ctr)(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out); + size_t size; /* first argument to ccctr_ctx_decl(). */ + size_t block_size; /* for historical reasons, this is set to 1 */ + size_t ecb_block_size; /* the actual block size of the underlying cipher */ + int (*CC_SPTR(ccmode_ctr, + init))(const struct ccmode_ctr *mode, ccctr_ctx *ctx, size_t key_len, const void *key, const void *iv); + int (*CC_SPTR(ccmode_ctr, setctr))(const struct ccmode_ctr *mode, ccctr_ctx *ctx, const void *ctr); + int (*CC_SPTR(ccmode_ctr, ctr))(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -116,11 +111,11 @@ struct ccmode_ctr { cc_aligned_struct(16) ccofb_ctx; struct ccmode_ofb { - size_t size; /* first argument to ccofb_ctx_decl(). */ + size_t size; /* first argument to ccofb_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, - size_t key_len, const void *key, const void *iv); - int (*ofb)(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*CC_SPTR(ccmode_ofb, + init))(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, size_t key_len, const void *key, const void *iv); + int (*CC_SPTR(ccmode_ofb, ofb))(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -129,8 +124,8 @@ cc_aligned_struct(16) ccxts_ctx; cc_aligned_struct(16) ccxts_tweak; struct ccmode_xts { - size_t size; /* first argument to ccxts_ctx_decl(). Size of the ctx data structure */ - size_t tweak_size; /* first argument to ccxts_tweak_decl(). Size of the tweak structure, not the expected tweak size */ + size_t size; /* first argument to ccxts_ctx_decl(). Size of the ctx data structure */ + size_t tweak_size; /* first argument to ccxts_tweak_decl(). Size of the tweak structure, not the expected tweak size */ size_t block_size; /* Create a xts key from a xts mode object. @@ -139,72 +134,83 @@ struct ccmode_xts { key and tweak_key must differ. Returns nonzero on failure. */ - int (*init)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - size_t key_nbytes, const void *data_key, const void *tweak_key); - - void (*key_sched)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - size_t key_nbytes, const void *data_key, const void *tweak_key); + int (*CC_SPTR(ccmode_xts, init))(const struct ccmode_xts *xts, + ccxts_ctx *ctx, + size_t key_nbytes, + const void *data_key, + const void *tweak_key); + + void (*CC_SPTR(ccmode_xts, key_sched))(const struct ccmode_xts *xts, + ccxts_ctx *ctx, + size_t key_nbytes, + const void *data_key, + const void *tweak_key); /* Set the tweak (sector number), the block within the sector zero. */ - int (*set_tweak)(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv); + int (*CC_SPTR(ccmode_xts, set_tweak))(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv); /* Encrypt blocks for a sector, clients must call set_tweak before calling this function. Return a pointer to the tweak buffer */ - void *(*xts)(const ccxts_ctx *ctx, ccxts_tweak *tweak, - size_t nblocks, const void *in, void *out); + void *(*CC_SPTR(ccmode_xts, xts))(const ccxts_ctx *ctx, ccxts_tweak *tweak, size_t nblocks, const void *in, void *out); const void *custom; const void *custom1; }; -//7- GCM mode, statful +// 7- GCM mode, statful cc_aligned_struct(16) ccgcm_ctx; -#define CCMODE_GCM_DECRYPTOR 78647 -#define CCMODE_GCM_ENCRYPTOR 4073947 +#define CCMODE_GCM_DECRYPTOR 78647 +#define CCMODE_GCM_ENCRYPTOR 4073947 struct ccmode_gcm { - size_t size; /* first argument to ccgcm_ctx_decl(). */ - int encdec; //is it encrypt or decrypt object + size_t size; /* first argument to ccgcm_ctx_decl(). */ + int encdec; // is it encrypt or decrypt object size_t block_size; - int (*init)(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx, - size_t key_nbytes, const void *key); - int (*set_iv)(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv); - int (*gmac)(ccgcm_ctx *ctx, size_t nbytes, const void *in); // could just be gcm with NULL out - int (*gcm)(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out); - int (*finalize)(ccgcm_ctx *key, size_t tag_nbytes, void *tag); - int (*reset)(ccgcm_ctx *ctx); + int (*CC_SPTR(ccmode_gcm, init))(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx, size_t key_nbytes, const void *key); + int (*CC_SPTR(ccmode_gcm, set_iv))(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv); + int (*CC_SPTR(ccmode_gcm, gmac))(ccgcm_ctx *ctx, size_t nbytes, const void *in); // could just be gcm with NULL out + int (*CC_SPTR(ccmode_gcm, gcm))(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*CC_SPTR(ccmode_gcm, finalize))(ccgcm_ctx *key, size_t tag_nbytes, void *tag); + int (*CC_SPTR(ccmode_gcm, reset))(ccgcm_ctx *ctx); const void *custom; }; -//8- CCM mode, stateful +// 8- CCM mode, stateful cc_aligned_struct(16) ccccm_ctx; cc_aligned_struct(16) ccccm_nonce; struct ccmode_ccm { - size_t size; /* first argument to ccccm_ctx_decl(). */ - size_t nonce_size; /* first argument to ccccm_nonce_decl(). */ + size_t size; /* first argument to ccccm_ctx_decl(). */ + size_t nonce_size; /* first argument to ccccm_nonce_decl(). */ size_t block_size; - int (*init)(const struct ccmode_ccm *ccm, ccccm_ctx *ctx, - size_t key_len, const void *key); - int (*set_iv)(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nonce_len, const void *nonce, - size_t mac_size, size_t auth_len, size_t data_len); - int (*cbcmac)(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nbytes, const void *in); // could just be ccm with NULL out - int (*ccm)(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nbytes, const void *in, void *out); - int (*finalize)(ccccm_ctx *key, ccccm_nonce *nonce_ctx, void *mac); - int (*reset)(ccccm_ctx *key, ccccm_nonce *nonce_ctx); + int (*CC_SPTR(ccmode_ccm, init))(const struct ccmode_ccm *ccm, ccccm_ctx *ctx, size_t key_len, const void *key); + int (*CC_SPTR(ccmode_ccm, set_iv))(ccccm_ctx *ctx, + ccccm_nonce *nonce_ctx, + size_t nonce_len, + const void *nonce, + size_t mac_size, + size_t auth_len, + size_t data_len); + int (*CC_SPTR(ccmode_ccm, cbcmac))(ccccm_ctx *ctx, + ccccm_nonce *nonce_ctx, + size_t nbytes, + const void *in); // could just be ccm with NULL out + int (*CC_SPTR(ccmode_ccm, ccm))(ccccm_ctx *ctx, ccccm_nonce *nonce_ctx, size_t nbytes, const void *in, void *out); + int (*CC_SPTR(ccmode_ccm, finalize))(ccccm_ctx *key, ccccm_nonce *nonce_ctx, void *mac); + int (*CC_SPTR(ccmode_ccm, reset))(ccccm_ctx *key, ccccm_nonce *nonce_ctx); const void *custom; }; /* We need to expose this (currently)to keep CommonCrypto happy. */ struct _ccmode_ccm_nonce { - unsigned char A_i[16]; /* crypto block iv */ - unsigned char B_i[16]; /* mac block iv */ - unsigned char MAC[16]; /* crypted mac */ - unsigned char buf[16]; /* crypt buffer */ + unsigned char A_i[16]; /* crypto block iv */ + unsigned char B_i[16]; /* mac block iv */ + unsigned char MAC[16]; /* crypted mac */ + unsigned char buf[16]; /* crypt buffer */ - uint32_t mode; /* mode: IV -> AD -> DATA */ - uint32_t buflen; /* length of data in buf */ - uint32_t b_i_len; /* length of cbcmac data in B_i */ + uint32_t mode; /* mode: IV -> AD -> DATA */ + uint32_t buflen; /* length of data in buf */ + uint32_t b_i_len; /* length of cbcmac data in B_i */ size_t nonce_size; size_t mac_size; @@ -214,12 +220,11 @@ struct _ccmode_ccm_nonce { cc_aligned_struct(16) ccomac_ctx; struct ccmode_omac { - size_t size; /* first argument to ccomac_ctx_decl(). */ + size_t size; /* first argument to ccomac_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_omac *omac, ccomac_ctx *ctx, - size_t tweak_len, size_t key_len, const void *key); - int (*omac)(ccomac_ctx *ctx, size_t nblocks, - const void *tweak, const void *in, void *out); + int (*CC_SPTR(ccmode_omac, + init))(const struct ccmode_omac *omac, ccomac_ctx *ctx, size_t tweak_len, size_t key_len, const void *key); + int (*CC_SPTR(ccmode_omac, omac))(ccomac_ctx *ctx, size_t nblocks, const void *tweak, const void *in, void *out); const void *custom; }; diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h b/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h index 5d40c1dd1..a1df1a480 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_siv.h @@ -29,12 +29,12 @@ cc_aligned_struct(16) ccsiv_ctx; struct ccmode_siv { size_t size; /* first argument to ccsiv_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_siv *siv, ccsiv_ctx *ctx, + int (*CC_SPTR(ccmode_siv, init))(const struct ccmode_siv *siv, ccsiv_ctx *ctx, size_t key_len, const uint8_t *key); - int (*set_nonce)(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in); // could just be ccm with NULL out - int (*auth)(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in); // could just be ccm with NULL out - int (*crypt)(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out); - int (*reset)(ccsiv_ctx *ctx); + int (*CC_SPTR(ccmode_siv, set_nonce))(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in); // could just be ccm with NULL out + int (*CC_SPTR(ccmode_siv, auth))(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in); // could just be ccm with NULL out + int (*CC_SPTR(ccmode_siv, crypt))(ccsiv_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out); + int (*CC_SPTR(ccmode_siv, reset))(ccsiv_ctx *ctx); const struct ccmode_cbc *cbc; const struct ccmode_ctr *ctr; }; diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h b/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h index eba951c50..c828135da 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_siv_hmac.h @@ -34,15 +34,15 @@ struct ccmode_siv_hmac { size_t size; /* first argument to ccsiv_hmac_ctx_decl(). */ size_t block_size; - int (*init)(const struct ccmode_siv_hmac *sivhmac, + int (*CC_SPTR(ccmode_siv_hmac, init))(const struct ccmode_siv_hmac *sivhmac, ccsiv_hmac_ctx *ctx, size_t key_len, const uint8_t *key, const size_t tag_size); - int (*set_nonce)(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in); - int (*auth)(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in); - int (*crypt)(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out); - int (*reset)(ccsiv_hmac_ctx *ctx); + int (*CC_SPTR(ccmode_siv_hmac, set_nonce))(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in); + int (*CC_SPTR(ccmode_siv_hmac, auth))(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in); + int (*CC_SPTR(ccmode_siv_hmac, crypt))(ccsiv_hmac_ctx *ctx, size_t nbytes, const uint8_t *in, uint8_t *out); + int (*CC_SPTR(ccmode_siv_hmac, reset))(ccsiv_hmac_ctx *ctx); const struct ccdigest_info *hmac_digest; // Digest to be used in HMAC; const struct ccmode_ctr *ctr; }; diff --git a/EXTERNAL_HEADERS/corecrypto/ccrng.h b/EXTERNAL_HEADERS/corecrypto/ccrng.h index 4582ddab6..d38115a8b 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccrng.h +++ b/EXTERNAL_HEADERS/corecrypto/ccrng.h @@ -15,7 +15,7 @@ #include #define CCRNG_STATE_COMMON \ - int (*generate)(struct ccrng_state *rng, size_t outlen, void *out); + int (*CC_SPTR(ccrng_state, generate))(struct ccrng_state *rng, size_t outlen, void *out); /*! @type struct ccrng_state diff --git a/Makefile b/Makefile index 30e496301..a3afec9d0 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 1999-2016 Apple Inc. All rights reserved. +# Copyright (C) 1999-2020 Apple Inc. All rights reserved. # ifndef VERSDIR export VERSDIR := $(shell /bin/pwd) @@ -186,7 +186,6 @@ TOP_TARGETS = \ install install_desktop install_embedded \ install_release_embedded install_development_embedded \ install_kernels \ - installopensource \ cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \ help @@ -317,6 +316,7 @@ xnu_tests_driverkit: $(MAKE) -C $(SRCROOT)/tests/driverkit $(if $(filter -j,$(MAKEFLAGS)),,$(MAKEJOBS)) \ SRCROOT=$(SRCROOT)/tests/driverkit + # # The "analyze" target defined below invokes Clang Static Analyzer # with a predefined set of checks and options for the project. diff --git a/SETUP/kextsymboltool/kextsymboltool.c b/SETUP/kextsymboltool/kextsymboltool.c index 46f644b55..2954a391f 100644 --- a/SETUP/kextsymboltool/kextsymboltool.c +++ b/SETUP/kextsymboltool/kextsymboltool.c @@ -476,6 +476,7 @@ lookup_arch(const char *archstring) { "armv7s", 12 /* CPU_TYPE_ARM */, 11 /* CPU_SUBTYPE_ARM_V7S */, NX_LittleEndian, NULL }, { "armv7k", 12 /* CPU_TYPE_ARM */, 12 /* CPU_SUBTYPE_ARM_V7K */, NX_LittleEndian, NULL }, { "arm64", 0x0100000c /* CPU_TYPE_ARM64 */, 0 /* CPU_SUBTYPE_ARM64_ALL */, NX_LittleEndian, NULL }, + { "arm64e", 0x0100000c /* CPU_TYPE_ARM64 */, 2 /* CPU_SUBTYPE_ARM64_E */, NX_LittleEndian, NULL }, }; unsigned long i; diff --git a/bsd/conf/Makefile.arm64 b/bsd/conf/Makefile.arm64 index 333857edb..c7d4f4a2e 100644 --- a/bsd/conf/Makefile.arm64 +++ b/bsd/conf/Makefile.arm64 @@ -5,6 +5,7 @@ # Inline assembly doesn't interact well with LTO fbt_arm.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) +kern_csr.o_CFLAGS_ADD += -I$(SRCROOT)/osfmk ###################################################################### #END Machine dependent Makefile fragment for arm diff --git a/bsd/conf/files b/bsd/conf/files index 258176a7e..6a99242b6 100644 --- a/bsd/conf/files +++ b/bsd/conf/files @@ -371,7 +371,7 @@ bsd/netkey/keydb.c optional ipsec bsd/net/multi_layer_pkt_log.c optional inet inet ipsec ipsec_esp -bsd/crypto/entropy/diag_entropy_sysctl.c standard +bsd/crypto/entropy/entropy_sysctl.c standard #bsd/netpm/pm_aTT.c optional pm #bsd/netpm/pm_ams.c optional pm diff --git a/bsd/crypto/entropy/Makefile b/bsd/crypto/entropy/Makefile index a3ad2c884..2d1197ce7 100644 --- a/bsd/crypto/entropy/Makefile +++ b/bsd/crypto/entropy/Makefile @@ -7,7 +7,7 @@ include $(MakeInc_cmd) include $(MakeInc_def) DATAFILES = \ - diag_entropy_sysctl.h + entropy_sysctl.h INSTALL_MI_LIST = diff --git a/bsd/crypto/entropy/diag_entropy_sysctl.c b/bsd/crypto/entropy/diag_entropy_sysctl.c deleted file mode 100644 index af5329859..000000000 --- a/bsd/crypto/entropy/diag_entropy_sysctl.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (c) 2019 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#include -#include -#include - -extern entropy_data_t EntropyData; - -static int -sysctl_entropy_collect(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) -{ - if (!req->oldptr || req->oldlen > EntropyData.buffer_size) { - return EINVAL; - } - return SYSCTL_OUT(req, EntropyData.buffer, req->oldlen); -} - -SYSCTL_NODE(_kern, OID_AUTO, entropy, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, 0, NULL); -// Get current size of entropy buffer in bytes -SYSCTL_UINT(_kern_entropy, OID_AUTO, entropy_buffer_size, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, &EntropyData.buffer_size, 0, NULL); -// Collect contents from entropy buffer -SYSCTL_PROC(_kern_entropy, OID_AUTO, entropy_collect, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, NULL, 0, sysctl_entropy_collect, "-", NULL); - -void -register_entropy_sysctl(void) -{ - sysctl_register_oid(&sysctl__kern_entropy); - sysctl_register_oid(&sysctl__kern_entropy_entropy_buffer_size); - sysctl_register_oid(&sysctl__kern_entropy_entropy_collect); -} diff --git a/bsd/crypto/entropy/diag_entropy_sysctl.h b/bsd/crypto/entropy/diag_entropy_sysctl.h deleted file mode 100644 index c05650e79..000000000 --- a/bsd/crypto/entropy/diag_entropy_sysctl.h +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2019 Apple Inc. All rights reserved. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. The rights granted to you under the License - * may not be used to create, or enable the creation or redistribution of, - * unlawful or unlicensed copies of an Apple operating system, or to - * circumvent, violate, or enable the circumvention or violation of, any - * terms of an Apple operating system software license agreement. - * - * Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER - * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, - * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. - * - * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ - */ - -#ifndef _SYS_CRYPTO_ENTROPY_DIAG_ENTROPY_SYSCTL_H_ -#define _SYS_CRYPTO_ENTROPY_DIAG_ENTROPY_SYSCTL_H_ - -void register_entropy_sysctl(void); - -#endif diff --git a/bsd/crypto/entropy/entropy_sysctl.c b/bsd/crypto/entropy/entropy_sysctl.c new file mode 100644 index 000000000..39502f7b9 --- /dev/null +++ b/bsd/crypto/entropy/entropy_sysctl.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include + +SYSCTL_NODE(_kern, OID_AUTO, entropy, CTLFLAG_RD, 0, NULL); +SYSCTL_NODE(_kern_entropy, OID_AUTO, health, CTLFLAG_RD, 0, NULL); + +SYSCTL_INT(_kern_entropy_health, OID_AUTO, startup_done, CTLFLAG_RD, &entropy_health_startup_done, 0, NULL); + +SYSCTL_NODE(_kern_entropy_health, OID_AUTO, repetition_count_test, CTLFLAG_RD, 0, NULL); +SYSCTL_UINT(_kern_entropy_health_repetition_count_test, OID_AUTO, reset_count, CTLFLAG_RD, &entropy_health_rct_stats.reset_count, 0, NULL); +SYSCTL_UINT(_kern_entropy_health_repetition_count_test, OID_AUTO, failure_count, CTLFLAG_RD, &entropy_health_rct_stats.failure_count, 0, NULL); +SYSCTL_UINT(_kern_entropy_health_repetition_count_test, OID_AUTO, max_observation_count, CTLFLAG_RD, &entropy_health_rct_stats.max_observation_count, 0, NULL); + +SYSCTL_NODE(_kern_entropy_health, OID_AUTO, adaptive_proportion_test, CTLFLAG_RD, 0, NULL); +SYSCTL_UINT(_kern_entropy_health_adaptive_proportion_test, OID_AUTO, reset_count, CTLFLAG_RD, &entropy_health_apt_stats.reset_count, 0, NULL); +SYSCTL_UINT(_kern_entropy_health_adaptive_proportion_test, OID_AUTO, failure_count, CTLFLAG_RD, &entropy_health_apt_stats.failure_count, 0, NULL); +SYSCTL_UINT(_kern_entropy_health_adaptive_proportion_test, OID_AUTO, max_observation_count, CTLFLAG_RD, &entropy_health_apt_stats.max_observation_count, 0, NULL); + +static int +sysctl_entropy_collect(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) +{ + if (!req->oldptr || req->oldlen > entropy_analysis_buffer_size) { + return EINVAL; + } + + return SYSCTL_OUT(req, entropy_analysis_buffer, req->oldlen); +} + +// Get current size of entropy buffer in bytes +SYSCTL_UINT(_kern_entropy, OID_AUTO, entropy_buffer_size, CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, &entropy_analysis_buffer_size, 0, NULL); +// Collect contents from entropy buffer +SYSCTL_PROC(_kern_entropy, OID_AUTO, entropy_collect, CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MASKED | CTLFLAG_NOAUTO, NULL, 0, sysctl_entropy_collect, "-", NULL); + +void +entropy_analysis_register_sysctls(void) +{ + sysctl_register_oid(&sysctl__kern_entropy_entropy_buffer_size); + sysctl_register_oid(&sysctl__kern_entropy_entropy_collect); +} diff --git a/bsd/crypto/entropy/entropy_sysctl.h b/bsd/crypto/entropy/entropy_sysctl.h new file mode 100644 index 000000000..4e957fb9b --- /dev/null +++ b/bsd/crypto/entropy/entropy_sysctl.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 Apple Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _SYS_CRYPTO_ENTROPY_ENTROPYSYSCTL_H_ +#define _SYS_CRYPTO_ENTROPY_ENTROPYSYSCTL_H_ + +// This function is used only for test purposes. We collect a large +// number of entropy samples during boot and analyze them offline. +// +// See entropy.c to understand the initialization of this module via +// boot arg and the collection of the samples. +// +// See entropy_sysctl.c to understand the semantics of the sysctl +// that exposes the samples for analysis. +void entropy_analysis_register_sysctls(void); + +#endif diff --git a/bsd/dev/arm/kern_machdep.c b/bsd/dev/arm/kern_machdep.c index 9caed58b1..02c9e723c 100644 --- a/bsd/dev/arm/kern_machdep.c +++ b/bsd/dev/arm/kern_machdep.c @@ -37,6 +37,31 @@ cpu_subtype32() } } +static int +grade_arm64e_binary(cpu_subtype_t execfeatures) +{ +#if XNU_TARGET_OS_IOS + /* + * iOS 13 toolchains produced unversioned arm64e slices which are not + * ABI compatible with this release. + */ + if ((execfeatures & CPU_SUBTYPE_PTRAUTH_ABI) == 0) { +#if DEBUG || DEVELOPMENT + printf("%s: arm64e prerelease ABI cannot be used with this kernel\n", __func__); +#endif /* DEBUG || DEVELOPMENT */ + return 0; + } +#endif /* XNU_TARGET_OS_IOS */ + + /* The current ABI version is preferred over arm64 */ + if (CPU_SUBTYPE_ARM64_PTR_AUTH_VERSION(execfeatures) == + CPU_SUBTYPE_ARM64_PTR_AUTH_CURRENT_VERSION) { + return 12; + } + + /* Future ABIs are allowed, but exec_mach_imgact will treat it like an arm64 slice */ + return 11; +} #endif /* __arm64__ */ /********************************************************************** @@ -70,6 +95,15 @@ grade_binary(cpu_type_t exectype, cpu_subtype_t execsubtype, cpu_subtype_t execf } break; + case CPU_SUBTYPE_ARM64E: + switch (execsubtype) { + case CPU_SUBTYPE_ARM64E: + return grade_arm64e_binary(execfeatures); + case CPU_SUBTYPE_ARM64_V8: + return 10; + case CPU_SUBTYPE_ARM64_ALL: + return 9; + } } /* switch (hostsubtype) */ break; diff --git a/bsd/dev/arm64/sysctl.c b/bsd/dev/arm64/sysctl.c index ef46d1af7..70c3bf468 100644 --- a/bsd/dev/arm64/sysctl.c +++ b/bsd/dev/arm64/sysctl.c @@ -10,7 +10,9 @@ #include #include #include +#include #include +#include #if HYPERVISOR #include @@ -159,12 +161,49 @@ SYSCTL_PROC(_machdep_cpu, OID_AUTO, thread_count, sizeof(integer_t), arm_host_info, "I", "Number of enabled threads per package"); +static SECURITY_READ_ONLY_LATE(char*) brand_string = NULL; +static SECURITY_READ_ONLY_LATE(size_t) brand_string_len = 0; + +/* + * SecureDTLookupEntry() is only guaranteed to work before PE_init_iokit(), + * so we load the brand string (if available) in a startup handler. + */ +__startup_func +static void +sysctl_load_brand_string(void) +{ + DTEntry node; + void const *value = NULL; + unsigned int size = 0; + + if (kSuccess != SecureDTLookupEntry(0, "/product", &node)) { + return; + } + + if (kSuccess != SecureDTGetProperty(node, "product-soc-name", (void const **) &value, &size)) { + return; + } + + if (size == 0) { + return; + } + + brand_string = zalloc_permanent(size, ZALIGN_NONE); + if (brand_string == NULL) { + return; + } + + memcpy(brand_string, value, size); + brand_string_len = size; +} +STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, sysctl_load_brand_string); + /* * machdep.cpu.brand_string * * x86: derived from CPUID data. - * ARM: cons something up from the CPUID register. Could include cpufamily - * here and map it to a "marketing" name, but there's no obvious need; + * ARM: Grab the product string from the device tree, if it exists. + * Otherwise, cons something up from the CPUID register. * the value is already exported via the commpage. So keep it simple. */ static int @@ -174,6 +213,10 @@ make_brand_string SYSCTL_HANDLER_ARGS __unused void *unused_arg1 = arg1; __unused int unused_arg2 = arg2; + if (brand_string != NULL) { + return SYSCTL_OUT(req, brand_string, brand_string_len); + } + const char *impl; switch (cpuid_info()->arm_info.arm_implementor) { @@ -258,54 +301,6 @@ SYSCTL_PROC_MACHDEP_CPU_SYSREG(TCR_EL1); SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64MMFR0_EL1); // ARM64: AArch64 Instruction Set Attribute Register 1 SYSCTL_PROC_MACHDEP_CPU_SYSREG(ID_AA64ISAR1_EL1); -/* - * ARM64: AArch64 Guarded Execution Mode GENTER Vector - * - * Workaround for pre-H13, since register cannot be read unless in guarded - * mode, thus expose software convention that GXF_ENTRY_EL1 is always set - * to the address of the gxf_ppl_entry_handler. - */ -#endif /* DEVELOPMENT || DEBUG */ -#if HYPERVISOR -SYSCTL_NODE(_kern, OID_AUTO, hv, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Hypervisor info"); - -SYSCTL_INT(_kern_hv, OID_AUTO, supported, - CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, - &hv_support_available, 0, ""); - -extern unsigned int arm64_num_vmids; - -SYSCTL_UINT(_kern_hv, OID_AUTO, max_address_spaces, - CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED, - &arm64_num_vmids, 0, ""); - -extern uint64_t pmap_ipa_size(uint64_t granule); - -static int -sysctl_ipa_size_16k SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2, oidp) - uint64_t return_value = pmap_ipa_size(16384); - return SYSCTL_OUT(req, &return_value, sizeof(return_value)); -} - -SYSCTL_PROC(_kern_hv, OID_AUTO, ipa_size_16k, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_ipa_size_16k, "P", - "Maximum size allowed for 16K-page guest IPA spaces"); - -static int -sysctl_ipa_size_4k SYSCTL_HANDLER_ARGS -{ -#pragma unused(arg1, arg2, oidp) - uint64_t return_value = pmap_ipa_size(4096); - return SYSCTL_OUT(req, &return_value, sizeof(return_value)); -} - -SYSCTL_PROC(_kern_hv, OID_AUTO, ipa_size_4k, - CTLFLAG_RD | CTLTYPE_QUAD | CTLFLAG_LOCKED, - 0, 0, sysctl_ipa_size_4k, "P", - "Maximum size allowed for 4K-page guest IPA spaces"); +#endif /* DEVELOPMENT || DEBUG */ -#endif // HYPERVISOR diff --git a/bsd/dev/dtrace/fasttrap.c b/bsd/dev/dtrace/fasttrap.c index b519a7a3b..e95eb2e1f 100644 --- a/bsd/dev/dtrace/fasttrap.c +++ b/bsd/dev/dtrace/fasttrap.c @@ -2415,6 +2415,48 @@ fasttrap_validatestr(char const* str, size_t maxlen) { return utf8_validatestr((unsigned const char*) str, len); } +/* + * Checks that provided credentials are allowed to debug target process. + */ +static int +fasttrap_check_cred_priv(cred_t *cr, proc_t *p) +{ + int err = 0; + + /* Only root can use DTrace. */ + if (!kauth_cred_issuser(cr)) { + err = EPERM; + goto out; + } + + /* Process is marked as no attach. */ + if (ISSET(p->p_lflag, P_LNOATTACH)) { + err = EBUSY; + goto out; + } + +#if CONFIG_MACF + /* Check with MAC framework when enabled. */ + struct proc_ident cur_ident = proc_ident(current_proc()); + struct proc_ident p_ident = proc_ident(p); + + /* Do not hold ref to proc here to avoid deadlock. */ + proc_rele(p); + err = mac_proc_check_debug(&cur_ident, cr, &p_ident); + + if (proc_find_ident(&p_ident) == PROC_NULL) { + err = ESRCH; + goto out_no_proc; + } +#endif /* CONFIG_MACF */ + +out: + proc_rele(p); + +out_no_proc: + return err; +} + /*ARGSUSED*/ static int fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv) @@ -2486,15 +2528,11 @@ fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int * ret = ESRCH; goto err; } - // proc_lock(p); - // FIXME! How is this done on OS X? - // if ((ret = priv_proc_cred_perm(cr, p, NULL, - // VREAD | VWRITE)) != 0) { - // mutex_exit(&p->p_lock); - // return (ret); - // } - // proc_unlock(p); - proc_rele(p); + + ret = fasttrap_check_cred_priv(cr, p); + if (ret != 0) { + goto err; + } } ret = fasttrap_add_probe(probe); @@ -2508,7 +2546,7 @@ err: fasttrap_instr_query_t instr; fasttrap_tracepoint_t *tp; uint_t index; - // int ret; + int ret; if (copyin(arg, &instr, sizeof (instr)) != 0) return (EFAULT); @@ -2526,15 +2564,11 @@ err: proc_rele(p); return (ESRCH); } - //proc_lock(p); - // FIXME! How is this done on OS X? - // if ((ret = priv_proc_cred_perm(cr, p, NULL, - // VREAD)) != 0) { - // mutex_exit(&p->p_lock); - // return (ret); - // } - // proc_unlock(p); - proc_rele(p); + + ret = fasttrap_check_cred_priv(cr, p); + if (ret != 0) { + return (ret); + } } index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc); diff --git a/bsd/kern/kern_authorization.c b/bsd/kern/kern_authorization.c index e1000e3d4..0181ee93d 100644 --- a/bsd/kern/kern_authorization.c +++ b/bsd/kern/kern_authorization.c @@ -1217,7 +1217,11 @@ kauth_acl_alloc(int count) void kauth_acl_free(kauth_acl_t aclp) { - FREE(aclp, M_KAUTH); + /* + * It's possible this may have have been allocated in a kext using + * MALLOC. Using KHEAP_ANY will allow us to free it here. + */ + kheap_free_addr(KHEAP_ANY, aclp); } diff --git a/bsd/kern/kern_descrip.c b/bsd/kern/kern_descrip.c index efca619e8..cb5705e8e 100644 --- a/bsd/kern/kern_descrip.c +++ b/bsd/kern/kern_descrip.c @@ -227,11 +227,22 @@ fg_free(struct fileglob *fg) OS_ALWAYS_INLINE void -fg_ref(struct fileglob *fg) +fg_ref(proc_t p, struct fileglob *fg) { +#if DEBUG || DEVELOPMENT + proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED); +#else + (void)p; +#endif os_ref_retain_raw(&fg->fg_count, &f_refgrp); } +void +fg_drop_live(struct fileglob *fg) +{ + os_ref_release_live_raw(&fg->fg_count, &f_refgrp); +} + int fg_drop(proc_t p, struct fileglob *fg) { @@ -3256,7 +3267,7 @@ finishdup(proc_t p, return ENOMEM; } - fg_ref(ofp->fp_glob); + fg_ref(p, ofp->fp_glob); nfp->fp_glob = ofp->fp_glob; #if DIAGNOSTIC @@ -4919,7 +4930,7 @@ fdcopy(proc_t p, vnode_t uth_cdir) fp->fp_flags |= (ofp->fp_flags & ~FP_TYPEMASK); fp->fp_glob = ofp->fp_glob; - fg_ref(fp->fp_glob); + fg_ref(p, fp->fp_glob); *fpp = fp; } } else { @@ -5284,18 +5295,19 @@ sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap, goto out_unlock; } + /* Dropped when port is deallocated */ + fg_ref(p, fg); + proc_fdunlock(p); /* Allocate and initialize a port */ fileport = fileport_alloc(fg); if (fileport == IPC_PORT_NULL) { + fg_drop_live(fg); err = EAGAIN; goto out; } - /* Dropped when port is deallocated */ - fg_ref(fg); - /* Add an entry. Deallocates port on failure. */ name = ipc_port_copyout_send(fileport, get_task_ipcspace(p->task)); if (!MACH_PORT_VALID(name)) { @@ -5382,7 +5394,7 @@ fileport_makefd(proc_t p, ipc_port_t port, int uf_flags, int *retval) } fp->fp_glob = fg; - fg_ref(fg); + fg_ref(p, fg); procfdtbl_releasefd(p, fd, fp); proc_fdunlock(p); @@ -5525,7 +5537,7 @@ dupfdopen(struct filedesc *fdp, int indx, int dfd, int flags, int error) if (fp->fp_glob) { fg_free(fp->fp_glob); } - fg_ref(wfp->fp_glob); + fg_ref(p, wfp->fp_glob); fp->fp_glob = wfp->fp_glob; fdp->fd_ofileflags[indx] = fdp->fd_ofileflags[dfd] | diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index db94eab6a..0385bf1b4 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -3939,17 +3939,8 @@ bad: * received by the child in a partially constructed state. */ proc_signalend(p, 0); - - /* flag the 'fork' has occurred */ - proc_knote(p->p_pptr, NOTE_FORK | p->p_pid); } - /* flag exec has occurred, notify only if it has not failed due to FP Key error */ - if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) { - proc_knote(p, NOTE_EXEC); - } - - if (error == 0) { /* * We need to initialize the bank context behind the protection of @@ -4077,6 +4068,15 @@ bad: } } + if (spawn_no_exec) { + /* flag the 'fork' has occurred */ + proc_knote(p->p_pptr, NOTE_FORK | p->p_pid); + } + + /* flag exec has occurred, notify only if it has not failed due to FP Key error */ + if (!error && ((p->p_lflag & P_LTERM_DECRYPTFAIL) == 0)) { + proc_knote(p, NOTE_EXEC); + } if (imgp != NULL) { if (imgp->ip_vp) { diff --git a/bsd/kern/kern_memorystatus.c b/bsd/kern/kern_memorystatus.c index 3d8198474..69e5f6d69 100644 --- a/bsd/kern/kern_memorystatus.c +++ b/bsd/kern/kern_memorystatus.c @@ -2045,6 +2045,9 @@ memorystatus_add(proc_t p, boolean_t locked) if (isSysProc(p)) { p->p_memstat_state |= P_MEMSTAT_FREEZE_DISABLED; } +#if CONFIG_FREEZE + memorystatus_freeze_init_proc(p); +#endif bucket = &memstat_bucket[p->p_memstat_effectivepriority]; @@ -4903,8 +4906,10 @@ memorystatus_update_jetsam_snapshot_entry_locked(proc_t p, uint32_t kill_cause, entry->jse_idle_delta = p->p_memstat_idle_delta; #if CONFIG_FREEZE entry->jse_thaw_count = p->p_memstat_thaw_count; + entry->jse_freeze_skip_reason = p->p_memstat_freeze_skip_reason; #else /* CONFIG_FREEZE */ entry->jse_thaw_count = 0; + entry->jse_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; #endif /* CONFIG_FREEZE */ /* @@ -5179,9 +5184,11 @@ memorystatus_init_jetsam_snapshot_entry_locked(proc_t p, memorystatus_jetsam_sna entry->jse_idle_delta = p->p_memstat_idle_delta; /* Most recent timespan spent in idle-band */ #if CONFIG_FREEZE + entry->jse_freeze_skip_reason = p->p_memstat_freeze_skip_reason; entry->jse_thaw_count = p->p_memstat_thaw_count; #else /* CONFIG_FREEZE */ entry->jse_thaw_count = 0; + entry->jse_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; #endif /* CONFIG_FREEZE */ proc_coalitionids(p, cids); @@ -7884,8 +7891,10 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * #pragma unused(jetsam_reason) #endif - /* We don't need entitlements if we're setting/ querying the freeze preference for a process. Skip the check below. */ - if (args->command == MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE || args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE) { + /* We don't need entitlements if we're setting / querying the freeze preference or frozen status for a process. */ + if (args->command == MEMORYSTATUS_CMD_SET_PROCESS_IS_FREEZABLE || + args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE || + args->command == MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN) { skip_auth_check = TRUE; } @@ -8023,6 +8032,9 @@ memorystatus_control(struct proc *p __unused, struct memorystatus_control_args * case MEMORYSTATUS_CMD_GET_PROCESS_IS_FREEZABLE: error = memorystatus_get_process_is_freezable(args->pid, ret); break; + case MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN: + error = memorystatus_get_process_is_frozen(args->pid, ret); + break; case MEMORYSTATUS_CMD_FREEZER_CONTROL: error = memorystatus_freezer_control(args->flags, args->buffer, args->buffersize, ret); diff --git a/bsd/kern/kern_memorystatus_freeze.c b/bsd/kern/kern_memorystatus_freeze.c index 08c86e4f6..1dfa926e9 100644 --- a/bsd/kern/kern_memorystatus_freeze.c +++ b/bsd/kern/kern_memorystatus_freeze.c @@ -106,8 +106,8 @@ unsigned int memorystatus_freeze_pages_min = 0; unsigned int memorystatus_freeze_pages_max = 0; unsigned int memorystatus_freeze_suspended_threshold = FREEZE_SUSPENDED_THRESHOLD_DEFAULT; unsigned int memorystatus_freeze_daily_mb_max = FREEZE_DAILY_MB_MAX_DEFAULT; -uint64_t memorystatus_freeze_budget_pages_remaining = 0; //remaining # of pages that can be frozen to disk -boolean_t memorystatus_freeze_degradation = FALSE; //protected by the freezer mutex. Signals we are in a degraded freeze mode. +uint64_t memorystatus_freeze_budget_pages_remaining = 0; /* Remaining # of pages that can be frozen to disk */ +boolean_t memorystatus_freeze_degradation = FALSE; /* Protected by the freezer mutex. Signals we are in a degraded freeze mode. */ unsigned int memorystatus_max_frozen_demotions_daily = 0; unsigned int memorystatus_thaw_count_demotion_threshold = 0; @@ -215,11 +215,9 @@ extern int i_coal_jetsam_get_taskrole(coalition_t coal, task_t task); static void memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed); static void memorystatus_demote_frozen_processes(boolean_t force_one); -/* - * Converts the freezer_error_code into a string and updates freezer error counts. - */ -static void memorystatus_freezer_stringify_error(const int freezer_error_code, char* buffer, size_t len); +static void memorystatus_freeze_handle_error(proc_t p, const int freezer_error_code, bool was_refreeze, pid_t pid, const coalition_t coalition, const char* log_prefix); +static void memorystatus_freeze_out_of_slots(void); static uint64_t memorystatus_freezer_thread_next_run_ts = 0; /* Sysctls needed for aggd stats */ @@ -275,6 +273,8 @@ SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_shared_pages_skipped, CTLFLAG_ SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_bytes_refrozen, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_bytes_refrozen, ""); SYSCTL_QUAD(_kern, OID_AUTO, memorystatus_freezer_refreeze_count, CTLFLAG_RD | CTLFLAG_LOCKED, &memorystatus_freezer_stats.mfs_refreeze_count, ""); +static_assert(_kMemorystatusFreezeSkipReasonMax <= UINT8_MAX); + /* * Calculates the hit rate for the freezer. @@ -449,11 +449,7 @@ again: } if (error) { - char reason[FREEZER_ERROR_STRING_LENGTH]; - memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason)); - - printf("sysctl_freeze: task_freeze failed: %s\n", reason); - + memorystatus_freeze_handle_error(p, freezer_error_code, state & P_MEMSTAT_FROZEN, pid, coal, "sysctl_freeze"); if (error == KERN_NO_SPACE) { /* Make it easy to distinguish between failures due to low compressor/ swap space and other failures. */ error = ENOSPC; @@ -464,7 +460,11 @@ again: proc_list_lock(); if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { p->p_memstat_state |= P_MEMSTAT_FROZEN; + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; memorystatus_frozen_count++; + if (memorystatus_frozen_count == memorystatus_frozen_processes_max) { + memorystatus_freeze_out_of_slots(); + } } else { // This was a re-freeze if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { @@ -1219,6 +1219,9 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) state = p->p_memstat_state; if (state & (P_MEMSTAT_TERMINATED | P_MEMSTAT_LOCKED | P_MEMSTAT_FREEZE_DISABLED | P_MEMSTAT_FREEZE_IGNORE)) { + if (state & P_MEMSTAT_FREEZE_DISABLED) { + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonDisabled; + } goto out; } @@ -1308,6 +1311,7 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) if (first_consideration) { memorystatus_freezer_stats.mfs_error_below_min_pages_count++; } + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonBelowMinPages; goto out; } @@ -1320,6 +1324,7 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) if (first_consideration) { memorystatus_freezer_stats.mfs_error_other_count++; } + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOther; goto out; } @@ -1339,19 +1344,27 @@ memorystatus_is_process_eligible_for_freeze(proc_t p) if (first_consideration) { memorystatus_freezer_stats.mfs_error_low_probability_of_use_count++; } + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonLowProbOfUse; goto out; } } should_freeze = TRUE; out: - if (should_freeze && !first_consideration && !(state & P_MEMSTAT_FROZEN)) { + if (should_freeze && !(state & P_MEMSTAT_FROZEN)) { /* - * We're freezing this for the first time and we previously considered it ineligible. - * Bump the considered count so that we track this as 1 failure - * and 1 success. + * Reset the skip reason. If it's killed before we manage to actually freeze it + * we failed to consider it early enough. */ - memorystatus_freezer_stats.mfs_process_considered_count++; + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; + if (!first_consideration) { + /* + * We're freezing this for the first time and we previously considered it ineligible. + * Bump the considered count so that we track this as 1 failure + * and 1 success. + */ + memorystatus_freezer_stats.mfs_process_considered_count++; + } } return should_freeze; } @@ -1460,7 +1473,11 @@ memorystatus_freeze_process_sync(proc_t p) if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { p->p_memstat_state |= P_MEMSTAT_FROZEN; + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; memorystatus_frozen_count++; + if (memorystatus_frozen_count == memorystatus_frozen_processes_max) { + memorystatus_freeze_out_of_slots(); + } } else { // This was a re-freeze if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { @@ -1511,11 +1528,7 @@ memorystatus_freeze_process_sync(proc_t p) */ } } else { - char reason[FREEZER_ERROR_STRING_LENGTH]; - memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason)); - - os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: freezing (specific) pid %d [%s]...skipped (%s)", - aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason); + memorystatus_freeze_handle_error(p, freezer_error_code, p->p_memstat_state & P_MEMSTAT_FROZEN, aPid, NULL, "memorystatus_freeze_process_sync"); p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE; } @@ -1541,7 +1554,7 @@ memorystatus_freeze_top_process(void) proc_t p = PROC_NULL, next_p = PROC_NULL; unsigned int i = 0; unsigned int band = JETSAM_PRIORITY_IDLE; - boolean_t refreeze_processes = FALSE; + bool refreeze_processes = false; task_t curr_task = NULL; coalition_t coal = COALITION_NULL; pid_t pid_list[MAX_XPC_SERVICE_PIDS]; @@ -1558,7 +1571,7 @@ memorystatus_freeze_top_process(void) * try to refreeze any processes we might have thawed * in the past and push out their compressed state out. */ - refreeze_processes = TRUE; + refreeze_processes = true; band = (unsigned int) memorystatus_freeze_jetsam_band; } @@ -1570,6 +1583,7 @@ freeze_process: uint32_t purgeable, wired, clean, dirty, shared; uint64_t max_pages = 0; int freezer_error_code = 0; + bool was_refreeze = false; p = next_p; @@ -1703,13 +1717,18 @@ freeze_process: if ((p->p_memstat_state & P_MEMSTAT_FROZEN) == 0) { p->p_memstat_state |= P_MEMSTAT_FROZEN; + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; memorystatus_frozen_count++; + if (memorystatus_frozen_count == memorystatus_frozen_processes_max) { + memorystatus_freeze_out_of_slots(); + } } else { // This was a re-freeze if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) { memorystatus_freezer_stats.mfs_bytes_refrozen += dirty * PAGE_SIZE; memorystatus_freezer_stats.mfs_refreeze_count++; } + was_refreeze = true; } p->p_memstat_frozen_count++; @@ -1738,7 +1757,7 @@ freeze_process: } memorystatus_freeze_update_throttle(&memorystatus_freeze_budget_pages_remaining); os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: %sfreezing (%s) pid %d [%s] done, memorystatus_freeze_budget_pages_remaining %llu %sfroze %u pages\n", - refreeze_processes? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_freeze_budget_pages_remaining, refreeze_processes? "Re" : "", dirty); + was_refreeze ? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), memorystatus_freeze_budget_pages_remaining, was_refreeze ? "Re" : "", dirty); proc_list_lock(); @@ -1825,7 +1844,7 @@ freeze_process: p->p_memstat_state &= ~P_MEMSTAT_LOCKED; wakeup(&p->p_memstat_state); - if (refreeze_processes == TRUE) { + if (refreeze_processes) { if ((freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) || (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO)) { /* @@ -1844,12 +1863,7 @@ freeze_process: } else { p->p_memstat_state |= P_MEMSTAT_FREEZE_IGNORE; } - - char reason[FREEZER_ERROR_STRING_LENGTH]; - memorystatus_freezer_stringify_error(freezer_error_code, reason, sizeof(reason)); - - os_log_with_startup_serial(OS_LOG_DEFAULT, "memorystatus: %sfreezing (%s) pid %d [%s]...skipped (%s)\n", - refreeze_processes? "re" : "", (coal == NULL ? "general" : "coalition-driven"), aPid, ((p && *p->p_name) ? p->p_name : "unknown"), reason); + memorystatus_freeze_handle_error(p, p->p_memstat_state & P_MEMSTAT_FROZEN, freezer_error_code, aPid, coal, "memorystatus_freeze_top_process"); proc_rele_locked(p); @@ -1861,7 +1875,7 @@ freeze_process: if ((ret == -1) && (memorystatus_refreeze_eligible_count >= MIN_THAW_REFREEZE_THRESHOLD) && - (refreeze_processes == FALSE)) { + (!refreeze_processes)) { /* * We failed to freeze a process from the IDLE * band AND we have some thawed processes @@ -1873,7 +1887,7 @@ freeze_process: band = (unsigned int) memorystatus_freeze_jetsam_band; - refreeze_processes = TRUE; + refreeze_processes = true; goto freeze_process; } @@ -2121,27 +2135,86 @@ memorystatus_freeze_calculate_new_budget( return (uint32_t) MIN(new_budget, UINT32_MAX); } +/* + * Mark all non frozen, freezer-eligible processes as skipped for the given reason. + * Used when we hit some system freeze limit and know that we won't be considering remaining processes. + * If you're using this for a new reason, make sure to add it to memorystatus_freeze_init_proc so that + * it gets set for new processes. + * NB: These processes will retain this skip reason until they are reconsidered by memorystatus_is_process_eligible_for_freeze. + */ static void -memorystatus_freezer_stringify_error( +memorystatus_freeze_mark_eligible_processes_with_skip_reason(memorystatus_freeze_skip_reason_t reason, bool locked) +{ + LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(proc_list_mlock, locked ? LCK_MTX_ASSERT_OWNED : LCK_MTX_ASSERT_NOTOWNED); + unsigned int band = JETSAM_PRIORITY_IDLE; + proc_t p; + + if (!locked) { + proc_list_lock(); + } + p = memorystatus_get_first_proc_locked(&band, FALSE); + while (p) { + assert(p->p_memstat_effectivepriority == (int32_t) band); + if (!(p->p_memstat_state & P_MEMSTAT_FROZEN) && memorystatus_is_process_eligible_for_freeze(p)) { + assert(p->p_memstat_freeze_skip_reason == kMemorystatusFreezeSkipReasonNone); + p->p_memstat_freeze_skip_reason = (uint8_t) reason; + } + p = memorystatus_get_next_proc_locked(&band, p, FALSE); + } + if (!locked) { + proc_list_unlock(); + } +} + +/* + * Called after we fail to freeze a process. + * Logs the failure, marks the process with the failure reason, and updates freezer stats. + */ +static void +memorystatus_freeze_handle_error( + proc_t p, const int freezer_error_code, - char* buffer, - size_t len) + bool was_refreeze, + pid_t pid, + const coalition_t coalition, + const char* log_prefix) { - if (freezer_error_code == FREEZER_ERROR_EXCESS_SHARED_MEMORY) { + const char *reason; + memorystatus_freeze_skip_reason_t skip_reason; + + switch (freezer_error_code) { + case FREEZER_ERROR_EXCESS_SHARED_MEMORY: memorystatus_freezer_stats.mfs_error_excess_shared_memory_count++; - strlcpy(buffer, "too much shared memory", len); - } else if (freezer_error_code == FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO) { + reason = "too much shared memory"; + skip_reason = kMemorystatusFreezeSkipReasonExcessSharedMemory; + break; + case FREEZER_ERROR_LOW_PRIVATE_SHARED_RATIO: memorystatus_freezer_stats.mfs_error_low_private_shared_ratio_count++; - strlcpy(buffer, "low private-shared pages ratio", len); - } else if (freezer_error_code == FREEZER_ERROR_NO_COMPRESSOR_SPACE) { + reason = "private-shared pages ratio"; + skip_reason = kMemorystatusFreezeSkipReasonLowPrivateSharedRatio; + break; + case FREEZER_ERROR_NO_COMPRESSOR_SPACE: memorystatus_freezer_stats.mfs_error_no_compressor_space_count++; - strlcpy(buffer, "no compressor space", len); - } else if (freezer_error_code == FREEZER_ERROR_NO_SWAP_SPACE) { + reason = "no compressor space"; + skip_reason = kMemorystatusFreezeSkipReasonNoCompressorSpace; + break; + case FREEZER_ERROR_NO_SWAP_SPACE: memorystatus_freezer_stats.mfs_error_no_swap_space_count++; - strlcpy(buffer, "no swap space", len); - } else { - strlcpy(buffer, "unknown error", len); + reason = "no swap space"; + skip_reason = kMemorystatusFreezeSkipReasonNoSwapSpace; + break; + default: + reason = "unknown error"; + skip_reason = kMemorystatusFreezeSkipReasonOther; } + + p->p_memstat_freeze_skip_reason = (uint8_t) skip_reason; + + os_log_with_startup_serial(OS_LOG_DEFAULT, "%s: %sfreezing (%s) pid %d [%s]...skipped (%s)\n", + log_prefix, was_refreeze ? "re" : "", + (coalition == NULL ? "general" : "coalition-driven"), pid, + ((p && *p->p_name) ? p->p_name : "unknown"), reason); } /* @@ -2152,6 +2225,7 @@ static void memorystatus_freeze_start_normal_throttle_interval(uint32_t new_budget, mach_timespec_t start_ts) { LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED); normal_throttle_window->max_pageouts = new_budget; normal_throttle_window->ts.tv_sec = normal_throttle_window->mins * 60; @@ -2190,6 +2264,54 @@ SYSCTL_PROC(_vm, OID_AUTO, memorystatus_freeze_calculate_new_budget, CTLTYPE_INT #endif /* DEVELOPMENT || DEBUG */ +/* + * Called when we first run out of budget in an interval. + * Marks idle processes as not frozen due to lack of budget. + * NB: It might be worth having a CA event here. + */ +static void +memorystatus_freeze_out_of_budget(const struct throttle_interval_t *interval) +{ + LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_NOTOWNED); + + mach_timespec_t time_left = {0, 0}; + mach_timespec_t now_ts; + clock_sec_t sec; + clock_nsec_t nsec; + + time_left.tv_sec = interval->ts.tv_sec; + time_left.tv_nsec = 0; + clock_get_system_nanotime(&sec, &nsec); + now_ts.tv_sec = (unsigned int)(MIN(sec, UINT32_MAX)); + now_ts.tv_nsec = nsec; + + SUB_MACH_TIMESPEC(&time_left, &now_ts); + os_log(OS_LOG_DEFAULT, + "memorystatus_freeze: Out of NAND write budget with %u minutes left in the current freezer interval. %u procs are frozen.\n", + time_left.tv_sec / 60, memorystatus_frozen_count); + + memorystatus_freeze_mark_eligible_processes_with_skip_reason(kMemorystatusFreezeSkipReasonOutOfBudget, false); +} + +/* + * Called when we cross over the threshold of maximum frozen processes allowed. + * Marks remaining idle processes as not frozen due to lack of slots. + */ +static void +memorystatus_freeze_out_of_slots(void) +{ + LCK_MTX_ASSERT(&freezer_mutex, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(proc_list_mlock, LCK_MTX_ASSERT_OWNED); + assert(memorystatus_frozen_count == memorystatus_frozen_processes_max); + + os_log(OS_LOG_DEFAULT, + "memorystatus_freeze: Out of slots in the freezer. %u procs are frozen.\n", + memorystatus_frozen_count); + + memorystatus_freeze_mark_eligible_processes_with_skip_reason(kMemorystatusFreezeSkipReasonOutOfSlots, true); +} + /* * This function will do 4 things: * @@ -2220,6 +2342,7 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) unsigned int freeze_daily_pageouts_max = 0; uint32_t budget_rollover = 0; + bool started_with_budget = (*budget_pages_allowed > 0); #if DEVELOPMENT || DEBUG if (!memorystatus_freeze_throttle_enabled) { @@ -2278,6 +2401,9 @@ memorystatus_freeze_update_throttle(uint64_t *budget_pages_allowed) if (memorystatus_freeze_degradation == FALSE) { if (interval->pageouts >= interval->max_pageouts) { *budget_pages_allowed = 0; + if (started_with_budget) { + memorystatus_freeze_out_of_budget(interval); + } } else { int budget_left = interval->max_pageouts - interval->pageouts; int budget_threshold = (freeze_daily_pageouts_max * FREEZE_DEGRADATION_BUDGET_THRESHOLD) / 100; @@ -2351,10 +2477,8 @@ memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused) } /* - * We use memorystatus_apps_idle_delay_time because if/when we adopt aging for applications, - * it'll tie neatly into running the freezer once we age an application. - * - * Till then, it serves as a good interval that can be tuned via a sysctl too. + * Give applications currently in the aging band a chance to age out into the idle band before + * running the freezer again. */ memorystatus_freezer_thread_next_run_ts = mach_absolute_time() + memorystatus_apps_idle_delay_time; @@ -2443,6 +2567,31 @@ memorystatus_get_process_is_freezable(pid_t pid, int *is_freezable) return 0; } +errno_t +memorystatus_get_process_is_frozen(pid_t pid, int *is_frozen) +{ + proc_t p = PROC_NULL; + + if (pid == 0) { + return EINVAL; + } + + /* + * Only allow this on the current proc for now. + * We can check for privileges and allow targeting another process in the future. + */ + p = current_proc(); + if (p->p_pid != pid) { + return EPERM; + } + + proc_list_lock(); + *is_frozen = (p->p_memstat_state & P_MEMSTAT_FROZEN) != 0; + proc_list_unlock(); + + return 0; +} + int memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable) { @@ -2495,6 +2644,23 @@ memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable) return 0; } +/* + * Called when process is created before it is added to a memorystatus bucket. + */ +void +memorystatus_freeze_init_proc(proc_t p) +{ + /* NB: Process is not on the memorystatus lists yet so it's safe to modify the skip reason without the freezer mutex. */ + if (memorystatus_freeze_budget_pages_remaining == 0) { + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOutOfBudget; + } else if ((memorystatus_frozen_count >= memorystatus_frozen_processes_max)) { + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonOutOfSlots; + } else { + p->p_memstat_freeze_skip_reason = kMemorystatusFreezeSkipReasonNone; + } +} + + static int sysctl_memorystatus_do_fastwake_warmup_all SYSCTL_HANDLER_ARGS { diff --git a/bsd/kern/proc_info.c b/bsd/kern/proc_info.c index bcdc1c18d..b34ca8058 100644 --- a/bsd/kern/proc_info.c +++ b/bsd/kern/proc_info.c @@ -2716,7 +2716,7 @@ proc_pidfdinfo(int pid, int flavor, int fd, user_addr_t buffer, uint32_t buffers break; case PROC_PIDFDPSEMINFO: { - if ((error = fp_get_ftype(p, fd, DTYPE_PSXSHM, EBADF, &fp)) != 0) { + if ((error = fp_get_ftype(p, fd, DTYPE_PSXSEM, EBADF, &fp)) != 0) { goto out1; } error = pid_pseminfo(fp->fp_glob->fg_data, fp, p, fd, buffer, buffersize, retval); diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index 7968e0491..8304c009d 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -2957,11 +2957,6 @@ ubc_cs_blob_deallocate( vm_offset_t blob_addr, vm_size_t blob_size) { -#if PMAP_CS - if (blob_size > pmap_cs_blob_limit) { - kmem_free(kernel_map, blob_addr, blob_size); - } else -#endif { kfree(blob_addr, blob_size); } @@ -3560,40 +3555,6 @@ ubc_cs_blob_add( blob->csb_entitlements_blob = new_entitlements; blob->csb_reconstituted = true; } -#elif PMAP_CS - /* - * When pmap_cs is enabled, there's an expectation that large blobs are - * relocated to their own page. Above, this happens under - * ubc_cs_reconstitute_code_signature() but that discards parts of the - * signatures that are necessary on some platforms (eg, requirements). - * So in this case, just copy everything. - */ - if (pmap_cs && (blob->csb_mem_size > pmap_cs_blob_limit)) { - vm_offset_t cd_offset, ent_offset; - vm_size_t new_mem_size = round_page(blob->csb_mem_size); - vm_address_t new_mem_kaddr = 0; - - kr = kmem_alloc_kobject(kernel_map, &new_mem_kaddr, new_mem_size, VM_KERN_MEMORY_SECURITY); - if (kr != KERN_SUCCESS) { - printf("failed to allocate %lu bytes to relocate blob: %d\n", new_mem_size, kr); - error = ENOMEM; - goto out; - } - - cd_offset = (vm_address_t) blob->csb_cd - blob->csb_mem_kaddr; - ent_offset = (vm_address_t) blob->csb_entitlements_blob - blob->csb_mem_kaddr; - - memcpy((void *) new_mem_kaddr, (const void *) blob->csb_mem_kaddr, blob->csb_mem_size); - ubc_cs_blob_deallocate(blob->csb_mem_kaddr, blob->csb_mem_size); - blob->csb_cd = (const CS_CodeDirectory *) (new_mem_kaddr + cd_offset); - /* Only update the entitlements blob pointer if it is non-NULL. If it is NULL, then - * the blob has no entitlements and ent_offset is garbage. */ - if (blob->csb_entitlements_blob != NULL) { - blob->csb_entitlements_blob = (const CS_GenericBlob *) (new_mem_kaddr + ent_offset); - } - blob->csb_mem_kaddr = new_mem_kaddr; - blob->csb_mem_size = new_mem_size; - } #endif @@ -4906,66 +4867,3 @@ ubc_cs_validation_bitmap_deallocate(__unused vnode_t vp) } #endif /* CHECK_CS_VALIDATION_BITMAP */ -#if PMAP_CS -kern_return_t -cs_associate_blob_with_mapping( - void *pmap, - vm_map_offset_t start, - vm_map_size_t size, - vm_object_offset_t offset, - void *blobs_p) -{ - off_t blob_start_offset, blob_end_offset; - kern_return_t kr; - struct cs_blob *blobs, *blob; - vm_offset_t kaddr; - struct pmap_cs_code_directory *cd_entry = NULL; - - if (!pmap_cs) { - return KERN_NOT_SUPPORTED; - } - - blobs = (struct cs_blob *)blobs_p; - - for (blob = blobs; - blob != NULL; - blob = blob->csb_next) { - blob_start_offset = (blob->csb_base_offset + - blob->csb_start_offset); - blob_end_offset = (blob->csb_base_offset + - blob->csb_end_offset); - if ((off_t) offset < blob_start_offset || - (off_t) offset >= blob_end_offset || - (off_t) (offset + size) <= blob_start_offset || - (off_t) (offset + size) > blob_end_offset) { - continue; - } - kaddr = blob->csb_mem_kaddr; - if (kaddr == 0) { - /* blob data has been released */ - continue; - } - cd_entry = blob->csb_pmap_cs_entry; - if (cd_entry == NULL) { - continue; - } - - break; - } - - if (cd_entry != NULL) { - kr = pmap_cs_associate(pmap, - cd_entry, - start, - size, - offset - blob_start_offset); - } else { - kr = KERN_CODESIGN_ERROR; - } -#if 00 - printf("FBDP %d[%s] pmap_cs_associate(%p,%p,0x%llx,0x%llx) -> kr=0x%x\n", proc_selfpid(), &(current_proc()->p_comm[0]), pmap, cd_entry, (uint64_t)start, (uint64_t)size, kr); - kr = KERN_SUCCESS; -#endif - return kr; -} -#endif /* PMAP_CS */ diff --git a/bsd/kern/uipc_mbuf.c b/bsd/kern/uipc_mbuf.c index 1a0e04ac9..ab5dbd324 100644 --- a/bsd/kern/uipc_mbuf.c +++ b/bsd/kern/uipc_mbuf.c @@ -719,6 +719,7 @@ static unsigned int mb_drain_maxint = 60; #else /* XNU_TARGET_OS_OSX */ static unsigned int mb_drain_maxint = 0; #endif /* XNU_TARGET_OS_OSX */ +static unsigned int mb_memory_pressure_percentage = 80; uintptr_t mb_obscure_extfree __attribute__((visibility("hidden"))); uintptr_t mb_obscure_extref __attribute__((visibility("hidden"))); @@ -1431,6 +1432,52 @@ mbuf_table_init(void) mbstat.m_bigmclbytes = m_maxsize(MC_BIGCL); } +int +mbuf_get_class(struct mbuf *m) +{ + if (m->m_flags & M_EXT) { + uint32_t composite = (MEXT_FLAGS(m) & EXTF_COMPOSITE); + m_ext_free_func_t m_free_func = m_get_ext_free(m); + + if (m_free_func == NULL) { + if (composite) { + return MC_MBUF_CL; + } else { + return MC_CL; + } + } else if (m_free_func == m_bigfree) { + if (composite) { + return MC_MBUF_BIGCL; + } else { + return MC_BIGCL; + } + } else if (m_free_func == m_16kfree) { + if (composite) { + return MC_MBUF_16KCL; + } else { + return MC_16KCL; + } + } + } + + return MC_MBUF; +} + +bool +mbuf_class_under_pressure(struct mbuf *m) +{ + int mclass = mbuf_get_class(m); // TODO - how can we get the class easily??? + + if (m_total(mclass) >= (m_maxlimit(mclass) * mb_memory_pressure_percentage) / 100) { + os_log(OS_LOG_DEFAULT, + "%s memory-pressure on mbuf due to class %u, total %u max %u", + __func__, mclass, m_total(mclass), m_maxlimit(mclass)); + return true; + } + + return false; +} + #if defined(__LP64__) typedef struct ncl_tbl { uint64_t nt_maxmem; /* memory (sane) size */ @@ -3770,8 +3817,8 @@ m_free(struct mbuf *m) } if (m->m_flags & M_EXT) { - u_int16_t refcnt; - u_int32_t composite; + uint16_t refcnt; + uint32_t composite; m_ext_free_func_t m_free_func; if (MBUF_IS_PAIRED(m) && m_free_paired(m)) { @@ -4168,6 +4215,12 @@ m_copy_pftag(struct mbuf *to, struct mbuf *from) #endif /* PF_ECN */ } +void +m_copy_necptag(struct mbuf *to, struct mbuf *from) +{ + memcpy(m_necptag(to), m_necptag(from), sizeof(struct necp_mtag_)); +} + void m_classifier_init(struct mbuf *m, uint32_t pktf_mask) { @@ -8811,3 +8864,6 @@ SYSCTL_PROC(_kern_ipc, OID_AUTO, mb_drain_force, SYSCTL_INT(_kern_ipc, OID_AUTO, mb_drain_maxint, CTLFLAG_RW | CTLFLAG_LOCKED, &mb_drain_maxint, 0, "Minimum time interval between garbage collection"); +SYSCTL_INT(_kern_ipc, OID_AUTO, mb_memory_pressure_percentage, + CTLFLAG_RW | CTLFLAG_LOCKED, &mb_memory_pressure_percentage, 0, + "Percentage of when we trigger memory-pressure for an mbuf-class"); diff --git a/bsd/kern/uipc_socket.c b/bsd/kern/uipc_socket.c index 607af6d3c..e1a5241a2 100644 --- a/bsd/kern/uipc_socket.c +++ b/bsd/kern/uipc_socket.c @@ -2497,9 +2497,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, if (error) { if (error == EJUSTRETURN) { error = 0; - clen = 0; - control = NULL; - top = NULL; + goto packet_consumed; } goto out_locked; } @@ -2523,6 +2521,7 @@ sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, error = (*so->so_proto->pr_usrreqs->pru_send) (so, sendflags, top, addr, control, p); +packet_consumed: if (dontroute) { so->so_options &= ~SO_DONTROUTE; } diff --git a/bsd/kern/uipc_usrreq.c b/bsd/kern/uipc_usrreq.c index 91eb43b4a..2bffce231 100644 --- a/bsd/kern/uipc_usrreq.c +++ b/bsd/kern/uipc_usrreq.c @@ -2031,7 +2031,7 @@ fg_insertuipc_mark(struct fileglob * fg) msleep(&fg->fg_lflags, &fg->fg_lock, 0, "fg_insertuipc", NULL); } - os_ref_retain_locked_raw(&fg->fg_count, &f_refgrp); + os_ref_retain_raw(&fg->fg_count, &f_refgrp); fg->fg_msgcount++; if (fg->fg_msgcount == 1) { fg->fg_lflags |= FG_INSMSGQ; diff --git a/bsd/miscfs/bindfs/bind_subr.c b/bsd/miscfs/bindfs/bind_subr.c index 2c82cbcc8..58b67705e 100644 --- a/bsd/miscfs/bindfs/bind_subr.c +++ b/bsd/miscfs/bindfs/bind_subr.c @@ -196,6 +196,7 @@ bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) struct bind_node_hashhead * hd; struct bind_node * a; struct vnode * vp = NULL; + uint32_t vp_vid = 0; int error = ENOENT; /* @@ -214,6 +215,8 @@ bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) /*lowervp has reved */ error = EIO; vp = NULL; + } else { + vp_vid = a->bind_myvid; } break; } @@ -221,7 +224,7 @@ bind_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) lck_mtx_unlock(&bind_hashmtx); if (vp != NULL) { - error = vnode_getwithvid(vp, a->bind_myvid); + error = vnode_getwithvid(vp, vp_vid); if (error == 0) { *vpp = vp; } @@ -243,6 +246,7 @@ bind_hashins(struct mount * mp, struct bind_node * xp, struct vnode ** vpp) struct bind_node_hashhead * hd; struct bind_node * oxp; struct vnode * ovp = NULL; + uint32_t oxp_vid = 0; int error = 0; hd = BIND_NHASH(xp->bind_lowervp); @@ -259,6 +263,8 @@ bind_hashins(struct mount * mp, struct bind_node * xp, struct vnode ** vpp) * don't add it.*/ error = EIO; ovp = NULL; + } else { + oxp_vid = oxp->bind_myvid; } goto end; } @@ -271,7 +277,7 @@ end: lck_mtx_unlock(&bind_hashmtx); if (ovp != NULL) { /* if we found something in the hash map then grab an iocount */ - error = vnode_getwithvid(ovp, oxp->bind_myvid); + error = vnode_getwithvid(ovp, oxp_vid); if (error == 0) { *vpp = ovp; } diff --git a/bsd/miscfs/nullfs/null_subr.c b/bsd/miscfs/nullfs/null_subr.c index 746f09e6a..caffb546a 100644 --- a/bsd/miscfs/nullfs/null_subr.c +++ b/bsd/miscfs/nullfs/null_subr.c @@ -195,6 +195,7 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) struct null_node_hashhead * hd = NULL; struct null_node * a = NULL; struct vnode * vp = NULL; + uint32_t vp_vid = 0; int error = ENOENT; /* @@ -214,6 +215,8 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) /*lowervp has reved */ error = EIO; vp = NULL; + } else { + vp_vid = a->null_myvid; } // In the case of a succesful look-up we should consider moving the object to the top of the head break; @@ -221,7 +224,7 @@ null_hashget(struct mount * mp, struct vnode * lowervp, struct vnode ** vpp) } lck_mtx_unlock(&null_hashmtx); if (vp != NULL) { - error = vnode_getwithvid(vp, a->null_myvid); + error = vnode_getwithvid(vp, vp_vid); if (error == 0) { *vpp = vp; } @@ -239,6 +242,7 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp) struct null_node_hashhead * hd = NULL; struct null_node * oxp = NULL; struct vnode * ovp = NULL; + uint32_t oxp_vid = 0; int error = 0; hd = NULL_NHASH(xp->null_lowervp); @@ -259,6 +263,8 @@ null_hashins(struct mount * mp, struct null_node * xp, struct vnode ** vpp) * don't add it.*/ error = EIO; ovp = NULL; + } else { + oxp_vid = oxp->null_myvid; } goto end; } @@ -271,7 +277,7 @@ end: lck_mtx_unlock(&null_hashmtx); if (ovp != NULL) { /* if we found something in the hash map then grab an iocount */ - error = vnode_getwithvid(ovp, oxp->null_myvid); + error = vnode_getwithvid(ovp, oxp_vid); if (error == 0) { *vpp = ovp; } diff --git a/bsd/net/content_filter.c b/bsd/net/content_filter.c index d0f3b06b3..ee3a5b630 100644 --- a/bsd/net/content_filter.c +++ b/bsd/net/content_filter.c @@ -1618,6 +1618,38 @@ cfil_socket_safe_lock(struct inpcb *inp) return false; } +/* + * cfil_socket_safe_lock_rip - + * This routine attempts to lock the rip socket safely. + * The passed in ripcbinfo is assumed to be locked and must be unlocked (regardless + * of success/failure) before calling socket_unlock(). This is to avoid double + * locking since rip_unlock() will lock ripcbinfo if it needs to dispose inpcb when + * so_usecount is 0. + */ +static bool +cfil_socket_safe_lock_rip(struct inpcb *inp, struct inpcbinfo *pcbinfo) +{ + struct socket *so = NULL; + + VERIFY(pcbinfo != NULL); + + if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { + so = inp->inp_socket; + socket_lock(so, 1); + if (in_pcb_checkstate(inp, WNT_RELEASE, 1) != WNT_STOPUSING) { + lck_rw_done(pcbinfo->ipi_lock); + return true; + } + } + + lck_rw_done(pcbinfo->ipi_lock); + + if (so) { + socket_unlock(so, 1); + } + return false; +} + static struct socket * cfil_socket_from_sock_id(cfil_sock_id_t cfil_sock_id, bool udp_only) { @@ -1670,6 +1702,9 @@ find_udp: } } lck_rw_done(pcbinfo->ipi_lock); + if (so != NULL) { + goto done; + } pcbinfo = &ripcbinfo; lck_rw_lock_shared(pcbinfo->ipi_lock); @@ -1678,10 +1713,11 @@ find_udp: inp->inp_socket != NULL && inp->inp_socket->so_cfil_db != NULL && (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) { - if (cfil_socket_safe_lock(inp)) { + if (cfil_socket_safe_lock_rip(inp, pcbinfo)) { so = inp->inp_socket; } - break; + /* pcbinfo is already unlocked, we are done. */ + goto done; } } lck_rw_done(pcbinfo->ipi_lock); @@ -2836,6 +2872,7 @@ cfil_sock_attach(struct socket *so, struct sockaddr *local, struct sockaddr *rem if (so->so_cfil != NULL) { OSIncrementAtomic(&cfil_stats.cfs_sock_attach_already); CFIL_LOG(LOG_ERR, "already attached"); + goto done; } else { cfil_info_alloc(so, NULL); if (so->so_cfil == NULL) { @@ -4738,7 +4775,9 @@ cfil_update_entry_offsets(struct socket *so, struct cfil_info *cfil_info, int ou } entrybuf->cfe_ctl_q.q_start += datalen; - entrybuf->cfe_pass_offset = entrybuf->cfe_ctl_q.q_start; + if (entrybuf->cfe_pass_offset < entrybuf->cfe_ctl_q.q_start) { + entrybuf->cfe_pass_offset = entrybuf->cfe_ctl_q.q_start; + } entrybuf->cfe_peeked = entrybuf->cfe_ctl_q.q_start; if (entrybuf->cfe_peek_offset < entrybuf->cfe_pass_offset) { entrybuf->cfe_peek_offset = entrybuf->cfe_pass_offset; @@ -4780,6 +4819,11 @@ cfil_data_common(struct socket *so, struct cfil_info *cfil_info, int outgoing, s datalen = cfil_data_length(data, &mbcnt, &mbnum); + if (datalen == 0) { + error = 0; + goto done; + } + if (outgoing) { cfi_buf = &cfil_info->cfi_snd; cfil_info->cfi_byte_outbound_count += datalen; diff --git a/bsd/net/ether_inet6_pr_module.c b/bsd/net/ether_inet6_pr_module.c index 2b077473a..c332fd523 100644 --- a/bsd/net/ether_inet6_pr_module.c +++ b/bsd/net/ether_inet6_pr_module.c @@ -161,7 +161,7 @@ ether_inet6_pre_output(ifnet_t ifp, protocol_family_t protocol_family, { #pragma unused(protocol_family) errno_t result; - struct sockaddr_dl sdl; + struct sockaddr_dl sdl = {}; struct mbuf *m = *m0; /* diff --git a/bsd/net/ether_inet_pr_module.c b/bsd/net/ether_inet_pr_module.c index 9830039be..e00447754 100644 --- a/bsd/net/ether_inet_pr_module.c +++ b/bsd/net/ether_inet_pr_module.c @@ -246,7 +246,7 @@ ether_inet_pre_output(ifnet_t ifp, protocol_family_t protocol_family, switch (dst_netaddr->sa_family) { case AF_INET: { - struct sockaddr_dl ll_dest; + struct sockaddr_dl ll_dest = {}; result = arp_lookup_ip(ifp, (const struct sockaddr_in *)(uintptr_t)(size_t)dst_netaddr, diff --git a/bsd/net/if_6lowpan.c b/bsd/net/if_6lowpan.c index 2d6246d31..8337acaaa 100644 --- a/bsd/net/if_6lowpan.c +++ b/bsd/net/if_6lowpan.c @@ -918,7 +918,7 @@ sixlowpan_proto_pre_output(ifnet_t ifp, { #pragma unused(protocol_family) errno_t result = 0; - struct sockaddr_dl sdl; + struct sockaddr_dl sdl = {}; struct sockaddr_in6 *dest6 = (struct sockaddr_in6 *)(uintptr_t)(size_t)dest; if (!IN6_IS_ADDR_MULTICAST(&dest6->sin6_addr)) { diff --git a/bsd/net/multicast_list.c b/bsd/net/multicast_list.c index 5169de4b9..2bf7c2d23 100644 --- a/bsd/net/multicast_list.c +++ b/bsd/net/multicast_list.c @@ -101,7 +101,7 @@ multicast_list_program(struct multicast_list * mc_list, int i; struct multicast_entry * mc = NULL; struct multicast_list new_mc_list; - struct sockaddr_dl source_sdl; + struct sockaddr_dl source_sdl = {}; ifmultiaddr_t * source_multicast_list; struct sockaddr_dl target_sdl; diff --git a/bsd/net/ndrv.c b/bsd/net/ndrv.c index 2075bbec1..df0845a76 100644 --- a/bsd/net/ndrv.c +++ b/bsd/net/ndrv.c @@ -170,7 +170,7 @@ ndrv_input( char *frame_header) { struct socket *so; - struct sockaddr_dl ndrvsrc; + struct sockaddr_dl ndrvsrc = {}; struct ndrv_cb *np; int error = 0; diff --git a/bsd/net/necp.c b/bsd/net/necp.c index ac3b6fbb3..a06dc3914 100644 --- a/bsd/net/necp.c +++ b/bsd/net/necp.c @@ -141,7 +141,16 @@ u_int32_t necp_drop_all_order = 0; u_int32_t necp_drop_all_level = 0; -u_int32_t necp_pass_loopback = 1; // 0=Off, 1=On +#define NECP_LOOPBACK_PASS_ALL 1 // Pass all loopback traffic +#define NECP_LOOPBACK_PASS_WITH_FILTER 2 // Pass all loopback traffic, but activate content filter and/or flow divert if applicable + +#if defined(XNU_TARGET_OS_OSX) +#define NECP_LOOPBACK_PASS_DEFAULT NECP_LOOPBACK_PASS_WITH_FILTER +#else +#define NECP_LOOPBACK_PASS_DEFAULT NECP_LOOPBACK_PASS_ALL +#endif + +u_int32_t necp_pass_loopback = NECP_LOOPBACK_PASS_DEFAULT; u_int32_t necp_pass_keepalives = 1; // 0=Off, 1=On u_int32_t necp_pass_interpose = 1; // 0=Off, 1=On u_int32_t necp_restrict_multicast = 1; // 0=Off, 1=On @@ -241,12 +250,19 @@ ZONE_DECLARE(necp_ip_policy_zone, "necp_ip_policy", #define NECP_KERNEL_CONDITION_SDK_VERSION 0x8000000 #define NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER 0x10000000 #define NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS 0x20000000 +#define NECP_KERNEL_CONDITION_IS_LOOPBACK 0x40000000 #define NECP_MAX_POLICY_RESULT_SIZE 512 #define NECP_MAX_ROUTE_RULES_ARRAY_SIZE 1024 #define NECP_MAX_CONDITIONS_ARRAY_SIZE 4096 #define NECP_MAX_POLICY_LIST_COUNT 1024 +typedef enum { + NECP_BYPASS_TYPE_NONE = 0, + NECP_BYPASS_TYPE_INTCOPROC = 1, + NECP_BYPASS_TYPE_LOOPBACK = 2, +} necp_socket_bypass_type_t; + // Cap the policy size at the max result + conditions size, with room for extra TLVs #define NECP_MAX_POLICY_SIZE (1024 + NECP_MAX_POLICY_RESULT_SIZE + NECP_MAX_CONDITIONS_ARRAY_SIZE) @@ -301,7 +317,8 @@ struct necp_socket_info { unsigned has_client : 1; unsigned is_platform_binary : 1; unsigned used_responsible_pid : 1; - unsigned __pad_bits : 5; + unsigned is_loopback : 1; + unsigned __pad_bits : 4; }; static lck_grp_attr_t *necp_kernel_policy_grp_attr = NULL; @@ -2076,6 +2093,10 @@ necp_policy_condition_is_valid(u_int8_t *buffer, u_int32_t length, u_int8_t poli } break; } + case NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK: { + validated = TRUE; + break; + } default: { validated = FALSE; break; @@ -2733,6 +2754,9 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) condition_tlv_length += sizeof(u_int16_t); num_conditions++; } + if (condition_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) { + num_conditions++; + } } condition_tlv_length += num_conditions * (sizeof(u_int8_t) + sizeof(u_int32_t)); // These are for the condition TLVs. The space for "value" is already accounted for above. @@ -2892,6 +2916,9 @@ necp_handle_policy_dump_all(user_addr_t out_buffer, size_t out_buffer_length) if (condition_mask & NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) { cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_PACKET_FILTER_TAGS, sizeof(policy->cond_packet_filter_tags), &policy->cond_packet_filter_tags, cond_buf, condition_tlv_length); } + if (condition_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) { + cond_buf_cursor = necp_buffer_write_tlv(cond_buf_cursor, NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK, 0, "", cond_buf, condition_tlv_length); + } } cursor = necp_buffer_write_tlv(cursor, NECP_TLV_POLICY_CONDITION, cond_buf_cursor - cond_buf, cond_buf, tlv_buffer, total_allocated_bytes); @@ -3596,6 +3623,14 @@ necp_policy_apply(struct necp_session *session, struct necp_session_policy *poli } break; } + case NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK: { + master_condition_mask |= NECP_KERNEL_CONDITION_IS_LOOPBACK; + if (condition_is_negative) { + master_condition_negated_mask |= NECP_KERNEL_CONDITION_IS_LOOPBACK; + } + socket_only_conditions = TRUE; + break; + } default: { break; } @@ -3951,7 +3986,7 @@ necp_kernel_policy_get_new_id(bool socket_level) return newid; } -#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY | NECP_KERNEL_CONDITION_SDK_VERSION | NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER | NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS) +#define NECP_KERNEL_VALID_SOCKET_CONDITIONS (NECP_KERNEL_CONDITION_APP_ID | NECP_KERNEL_CONDITION_REAL_APP_ID | NECP_KERNEL_CONDITION_DOMAIN | NECP_KERNEL_CONDITION_ACCOUNT_ID | NECP_KERNEL_CONDITION_PID | NECP_KERNEL_CONDITION_UID | NECP_KERNEL_CONDITION_ALL_INTERFACES | NECP_KERNEL_CONDITION_BOUND_INTERFACE | NECP_KERNEL_CONDITION_TRAFFIC_CLASS | NECP_KERNEL_CONDITION_PROTOCOL | NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_ENTITLEMENT | NECP_KERNEL_CONDITION_CUSTOM_ENTITLEMENT | NECP_KERNEL_CONDITION_AGENT_TYPE | NECP_KERNEL_CONDITION_HAS_CLIENT | NECP_KERNEL_CONDITION_LOCAL_NETWORKS | NECP_KERNEL_CONDITION_CLIENT_FLAGS | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_PLATFORM_BINARY | NECP_KERNEL_CONDITION_SDK_VERSION | NECP_KERNEL_CONDITION_SIGNING_IDENTIFIER | NECP_KERNEL_CONDITION_PACKET_FILTER_TAGS | NECP_KERNEL_CONDITION_IS_LOOPBACK) static necp_kernel_policy_id necp_kernel_socket_policy_add(necp_policy_order order, u_int32_t session_order, int session_pid, u_int32_t condition_mask, u_int32_t condition_negated_mask, necp_app_id cond_app_id, necp_app_id cond_real_app_id, char *cond_custom_entitlement, u_int32_t cond_account_id, char *cond_domain, pid_t cond_pid, uid_t cond_uid, ifnet_t cond_bound_interface, struct necp_policy_condition_tc_range cond_traffic_class, u_int16_t cond_protocol, union necp_sockaddr_union *cond_local_start, union necp_sockaddr_union *cond_local_end, u_int8_t cond_local_prefix, union necp_sockaddr_union *cond_remote_start, union necp_sockaddr_union *cond_remote_end, u_int8_t cond_remote_prefix, struct necp_policy_condition_agent_type *cond_agent_type, struct necp_policy_condition_sdk_version *cond_sdk_version, u_int32_t cond_client_flags, char *cond_signing_identifier, u_int16_t cond_packet_filter_tags, necp_kernel_policy_result result, necp_kernel_policy_result_parameter result_parameter) @@ -6134,7 +6169,7 @@ necp_check_restricted_multicast_drop(proc_t proc, struct necp_socket_info *info, #define NECP_KERNEL_ADDRESS_TYPE_CONDITIONS (NECP_KERNEL_CONDITION_LOCAL_START | NECP_KERNEL_CONDITION_LOCAL_END | NECP_KERNEL_CONDITION_LOCAL_PREFIX | NECP_KERNEL_CONDITION_REMOTE_START | NECP_KERNEL_CONDITION_REMOTE_END | NECP_KERNEL_CONDITION_REMOTE_PREFIX | NECP_KERNEL_CONDITION_LOCAL_EMPTY | NECP_KERNEL_CONDITION_REMOTE_EMPTY | NECP_KERNEL_CONDITION_LOCAL_NETWORKS) static void -necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, uuid_t responsible_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, u_int16_t local_port, u_int16_t remote_port, bool has_client, proc_t proc, proc_t responsible_proc, u_int32_t drop_order, u_int32_t client_flags, struct necp_socket_info *info) +necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_application_uuid, uuid_t responsible_application_uuid, char *account, char *domain, pid_t pid, uid_t uid, u_int16_t protocol, u_int32_t bound_interface_index, u_int32_t traffic_class, union necp_sockaddr_union *local_addr, union necp_sockaddr_union *remote_addr, u_int16_t local_port, u_int16_t remote_port, bool has_client, proc_t proc, proc_t responsible_proc, u_int32_t drop_order, u_int32_t client_flags, struct necp_socket_info *info, bool is_loopback) { memset(info, 0, sizeof(struct necp_socket_info)); @@ -6146,6 +6181,7 @@ necp_application_fillout_info_locked(uuid_t application_uuid, uuid_t real_applic info->has_client = has_client; info->drop_order = drop_order; info->client_flags = client_flags; + info->is_loopback = is_loopback; if (necp_kernel_application_policies_condition_mask & NECP_KERNEL_CONDITION_APP_ID && !uuid_is_null(application_uuid)) { struct necp_uuid_id_mapping *existing_mapping = necp_uuid_lookup_app_id_locked(application_uuid); @@ -6348,6 +6384,7 @@ necp_application_find_policy_match_internal(proc_t proc, proc_t responsible_proc = PROC_NULL; proc_t effective_proc = proc; bool release_eproc = false; + necp_socket_bypass_type_t bypass_type = NECP_BYPASS_TYPE_NONE; u_int32_t flow_divert_aggregate_unit = 0; @@ -6571,6 +6608,10 @@ necp_application_find_policy_match_internal(proc_t proc, // Check for loopback exception if (necp_pass_loopback > 0 && necp_is_loopback(&local_addr.sa, &remote_addr.sa, NULL, NULL, bound_interface_index)) { + bypass_type = NECP_BYPASS_TYPE_LOOPBACK; + } + + if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && necp_pass_loopback == NECP_LOOPBACK_PASS_ALL) { returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_PASS; returned_result->routed_interface_index = lo_ifp->if_index; @@ -6599,8 +6640,31 @@ necp_application_find_policy_match_internal(proc_t proc, u_int32_t route_rule_id_array[MAX_AGGREGATE_ROUTE_RULES]; size_t route_rule_id_array_count = 0; - necp_application_fillout_info_locked(application_uuid, real_application_uuid, responsible_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, local_port, remote_port, has_client, effective_proc, responsible_proc, drop_order, client_flags, &info); + necp_application_fillout_info_locked(application_uuid, real_application_uuid, responsible_application_uuid, account, domain, pid, uid, protocol, bound_interface_index, traffic_class, &local_addr, &remote_addr, local_port, remote_port, has_client, effective_proc, responsible_proc, drop_order, client_flags, &info, (bypass_type == NECP_BYPASS_TYPE_LOOPBACK)); matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_app_layer_map, &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, netagent_use_flags, NECP_MAX_NETAGENTS, required_agent_types, num_required_agent_types, info.used_responsible_pid ? responsible_proc : effective_proc, 0, NULL, NULL, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit); + + // Check for loopback exception again after the policy match + if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && + necp_pass_loopback == NECP_LOOPBACK_PASS_WITH_FILTER && + (matched_policy == NULL || matched_policy->result != NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT)) { + if (filter_control_unit == NECP_FILTER_UNIT_NO_FILTER) { + returned_result->filter_control_unit = 0; + } else { + returned_result->filter_control_unit = filter_control_unit; + } + + if (flow_divert_aggregate_unit > 0) { + returned_result->flow_divert_aggregate_unit = flow_divert_aggregate_unit; + } + + returned_result->policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; + returned_result->routing_result = NECP_KERNEL_POLICY_RESULT_PASS; + returned_result->routed_interface_index = lo_ifp->if_index; + *flags |= (NECP_CLIENT_RESULT_FLAG_IS_LOCAL | NECP_CLIENT_RESULT_FLAG_IS_DIRECT); + error = 0; + goto done; + } + if (matched_policy) { returned_result->policy_id = matched_policy->id; returned_result->routing_result = matched_policy->result; @@ -7036,6 +7100,8 @@ necp_application_find_policy_match_internal(proc_t proc, } rt = NULL; } + +done: // Unlock lck_rw_done(&necp_kernel_policy_lock); @@ -7103,7 +7169,7 @@ done: } static bool -necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, bool has_client, uint32_t client_flags, int is_platform_binary, proc_t proc, u_int16_t pf_tag, struct rtentry *rt) +necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_app_id app_id, necp_app_id real_app_id, errno_t cred_result, u_int32_t account_id, struct substring domain, u_int8_t domain_dot_count, pid_t pid, uid_t uid, u_int32_t bound_interface_index, u_int32_t traffic_class, u_int16_t protocol, union necp_sockaddr_union *local, union necp_sockaddr_union *remote, struct necp_client_parameter_netagent_type *required_agent_types, u_int32_t num_required_agent_types, bool has_client, uint32_t client_flags, int is_platform_binary, proc_t proc, u_int16_t pf_tag, struct rtentry *rt, bool is_loopback) { if (!(kernel_policy->condition_mask & NECP_KERNEL_CONDITION_ALL_INTERFACES)) { if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_BOUND_INTERFACE) { @@ -7482,6 +7548,18 @@ necp_socket_check_policy(struct necp_kernel_socket_policy *kernel_policy, necp_a } } + if (kernel_policy->condition_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) { + if (kernel_policy->condition_negated_mask & NECP_KERNEL_CONDITION_IS_LOOPBACK) { + if (is_loopback) { + return FALSE; + } + } else { + if (!is_loopback) { + return FALSE; + } + } + } + return TRUE; } @@ -7492,7 +7570,7 @@ necp_socket_calc_flowhash_locked(struct necp_socket_info *info) } static void -necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, u_int32_t drop_order, proc_t *socket_proc, struct necp_socket_info *info) +necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, u_int32_t override_bound_interface, u_int32_t drop_order, proc_t *socket_proc, struct necp_socket_info *info, bool is_loopback) { struct socket *so = NULL; proc_t sock_proc = NULL; @@ -7503,6 +7581,7 @@ necp_socket_fillout_info_locked(struct inpcb *inp, struct sockaddr *override_loc so = inp->inp_socket; info->drop_order = drop_order; + info->is_loopback = is_loopback; if (necp_kernel_socket_policies_condition_mask & NECP_KERNEL_CONDITION_PID) { info->pid = ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid); @@ -7771,7 +7850,7 @@ necp_socket_find_policy_match_with_info_locked(struct necp_kernel_socket_policy continue; } - if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, required_agent_types, num_required_agent_types, info->has_client, info->client_flags, info->is_platform_binary, proc, pf_tag, rt)) { + if (necp_socket_check_policy(policy_search_array[i], info->application_id, info->real_application_id, info->cred_result, info->account_id, domain_substring, domain_dot_count, info->pid, info->uid, info->bound_interface_index, info->traffic_class, info->protocol, &info->local_addr, &info->remote_addr, required_agent_types, num_required_agent_types, info->has_client, info->client_flags, info->is_platform_binary, proc, pf_tag, rt, info->is_loopback)) { if (policy_search_array[i]->result == NECP_KERNEL_POLICY_RESULT_SOCKET_FILTER) { if (return_filter && *return_filter != NECP_FILTER_UNIT_NO_FILTER) { necp_kernel_policy_filter control_unit = policy_search_array[i]->result_parameter.filter_control_unit; @@ -7924,16 +8003,16 @@ necp_socket_is_connected(struct inpcb *inp) return inp->inp_socket->so_state & (SS_ISCONNECTING | SS_ISCONNECTED | SS_ISDISCONNECTING); } -static inline bool +static inline necp_socket_bypass_type_t necp_socket_bypass(struct sockaddr *override_local_addr, struct sockaddr *override_remote_addr, struct inpcb *inp) { if (necp_pass_loopback > 0 && necp_is_loopback(override_local_addr, override_remote_addr, inp, NULL, IFSCOPE_NONE)) { - return true; + return NECP_BYPASS_TYPE_LOOPBACK; } else if (necp_is_intcoproc(inp, NULL)) { - return true; + return NECP_BYPASS_TYPE_INTCOPROC; } - return false; + return NECP_BYPASS_TYPE_NONE; } static inline void @@ -7963,6 +8042,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local u_int32_t drop_dest_policy_result = NECP_KERNEL_POLICY_RESULT_NONE; necp_drop_all_bypass_check_result_t drop_all_bypass = NECP_DROP_ALL_BYPASS_CHECK_RESULT_NONE; proc_t socket_proc = NULL; + necp_socket_bypass_type_t bypass_type = NECP_BYPASS_TYPE_NONE; u_int32_t netagent_ids[NECP_MAX_NETAGENTS]; memset(&netagent_ids, 0, sizeof(netagent_ids)); @@ -8002,7 +8082,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local inp->inp_policyresult.results.filter_control_unit = 0; inp->inp_policyresult.results.flow_divert_aggregate_unit = 0; inp->inp_policyresult.results.route_rule_id = 0; - if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) { + if (necp_socket_bypass(override_local_addr, override_remote_addr, inp) != NECP_BYPASS_TYPE_NONE) { inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS; } else { inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_DROP; @@ -8012,7 +8092,8 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local } // Check for loopback exception - if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) { + bypass_type = necp_socket_bypass(override_local_addr, override_remote_addr, inp); + if (bypass_type == NECP_BYPASS_TYPE_INTCOPROC || (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && necp_pass_loopback == NECP_LOOPBACK_PASS_ALL)) { if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) { // If the previous policy result was "socket scoped", un-scope the socket. inp->inp_flags &= ~INP_BOUND_IF; @@ -8033,7 +8114,7 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local // Lock lck_rw_lock_shared(&necp_kernel_policy_lock); - necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, drop_order, &socket_proc, &info); + necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, override_bound_interface, drop_order, &socket_proc, &info, (bypass_type == NECP_BYPASS_TYPE_LOOPBACK)); // Check info u_int32_t flowhash = necp_socket_calc_flowhash_locked(&info); @@ -8060,6 +8141,36 @@ necp_socket_find_policy_match(struct inpcb *inp, struct sockaddr *override_local size_t route_rule_id_array_count = 0; matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), 0, &skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit); + // Check for loopback exception again after the policy match + if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && + necp_pass_loopback == NECP_LOOPBACK_PASS_WITH_FILTER && + (matched_policy == NULL || matched_policy->result != NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT)) { + if (inp->inp_policyresult.results.result == NECP_KERNEL_POLICY_RESULT_SOCKET_SCOPED) { + // If the previous policy result was "socket scoped", un-scope the socket. + inp->inp_flags &= ~INP_BOUND_IF; + inp->inp_boundifp = NULL; + } + // Mark socket as a pass + inp->inp_policyresult.policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; + inp->inp_policyresult.skip_policy_id = NECP_KERNEL_POLICY_ID_NO_MATCH; + inp->inp_policyresult.policy_gencount = 0; + inp->inp_policyresult.app_id = 0; + inp->inp_policyresult.flowhash = 0; + inp->inp_policyresult.results.filter_control_unit = filter_control_unit; + inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit; + inp->inp_policyresult.results.route_rule_id = 0; + inp->inp_policyresult.results.result = NECP_KERNEL_POLICY_RESULT_PASS; + + // Unlock + lck_rw_done(&necp_kernel_policy_lock); + + if (socket_proc) { + proc_rele(socket_proc); + } + + return NECP_KERNEL_POLICY_ID_NONE; + } + // If the socket matched a scoped service policy, mark as Drop if not registered. // This covers the cases in which a service is required (on demand) but hasn't started yet. if ((service_action == NECP_KERNEL_POLICY_RESULT_TRIGGER_SCOPED || @@ -9511,6 +9622,7 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr necp_kernel_policy_filter filter_control_unit = 0; u_int32_t pass_flags = 0; u_int32_t flow_divert_aggregate_unit = 0; + necp_socket_bypass_type_t bypass_type = NECP_BYPASS_TYPE_NONE; memset(&netagent_ids, 0, sizeof(netagent_ids)); @@ -9541,7 +9653,7 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr if (necp_kernel_socket_policies_count == 0 || (!(inp->inp_flags2 & INP2_WANT_APP_POLICY) && necp_kernel_socket_policies_non_app_count == 0)) { if (necp_drop_all_order > 0 || drop_order > 0) { - if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) { + if (necp_socket_bypass(override_local_addr, override_remote_addr, inp) != NECP_BYPASS_TYPE_NONE) { allowed_to_receive = TRUE; } else { allowed_to_receive = FALSE; @@ -9593,14 +9705,15 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr } // Check for loopback exception - if (necp_socket_bypass(override_local_addr, override_remote_addr, inp)) { + bypass_type = necp_socket_bypass(override_local_addr, override_remote_addr, inp); + if (bypass_type == NECP_BYPASS_TYPE_INTCOPROC || (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && necp_pass_loopback == NECP_LOOPBACK_PASS_ALL)) { allowed_to_receive = TRUE; goto done; } // Actually calculate policy result lck_rw_lock_shared(&necp_kernel_policy_lock); - necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, drop_order, &socket_proc, &info); + necp_socket_fillout_info_locked(inp, override_local_addr, override_remote_addr, 0, drop_order, &socket_proc, &info, (bypass_type == NECP_BYPASS_TYPE_LOOPBACK)); flowhash = necp_socket_calc_flowhash_locked(&info); if (inp->inp_policyresult.policy_id != NECP_KERNEL_POLICY_ID_NONE && @@ -9635,6 +9748,22 @@ necp_socket_is_allowed_to_send_recv_internal(struct inpcb *inp, struct sockaddr size_t route_rule_id_array_count = 0; struct necp_kernel_socket_policy *matched_policy = necp_socket_find_policy_match_with_info_locked(necp_kernel_socket_policies_map[NECP_SOCKET_MAP_APP_ID_TO_BUCKET(info.application_id)], &info, &filter_control_unit, route_rule_id_array, &route_rule_id_array_count, MAX_AGGREGATE_ROUTE_RULES, &service_action, &service, netagent_ids, NULL, NECP_MAX_NETAGENTS, NULL, 0, socket_proc ? socket_proc : current_proc(), pf_tag, return_skip_policy_id, inp->inp_route.ro_rt, &drop_dest_policy_result, &drop_all_bypass, &flow_divert_aggregate_unit); + // Check for loopback exception again after the policy match + if (bypass_type == NECP_BYPASS_TYPE_LOOPBACK && + necp_pass_loopback == NECP_LOOPBACK_PASS_WITH_FILTER && + (matched_policy == NULL || matched_policy->result != NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT)) { + // Polices have changed since last evaluation, update inp result with new filter state + if (inp->inp_policyresult.results.filter_control_unit != filter_control_unit) { + inp->inp_policyresult.results.filter_control_unit = filter_control_unit; + } + if (inp->inp_policyresult.results.flow_divert_aggregate_unit != flow_divert_aggregate_unit) { + inp->inp_policyresult.results.flow_divert_aggregate_unit = flow_divert_aggregate_unit; + } + allowed_to_receive = TRUE; + lck_rw_done(&necp_kernel_policy_lock); + goto done; + } + if (route_rule_id_array_count == 1) { route_rule_id = route_rule_id_array[0]; } else if (route_rule_id_array_count > 1) { diff --git a/bsd/net/necp.h b/bsd/net/necp.h index 041682c5b..c2f39c6af 100644 --- a/bsd/net/necp.h +++ b/bsd/net/necp.h @@ -144,6 +144,7 @@ struct necp_packet_header { #define NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR 21 // necp_policy_condition_addr #define NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_RANGE 22 // necp_policy_condition_addr_range #define NECP_POLICY_CONDITION_FLOW_REMOTE_ADDR_RANGE 23 // necp_policy_condition_addr_range +#define NECP_POLICY_CONDITION_FLOW_IS_LOOPBACK 31 // N/A // Socket/Application conditions, continued #define NECP_POLICY_CONDITION_CLIENT_FLAGS 24 // u_int32_t, values from NECP_CLIENT_PARAMETER_FLAG_* #define NECP_POLICY_CONDITION_FLOW_LOCAL_ADDR_EMPTY 25 // N/A diff --git a/bsd/netinet/flow_divert.c b/bsd/netinet/flow_divert.c index 8a2b4c4f8..6a68dac81 100644 --- a/bsd/netinet/flow_divert.c +++ b/bsd/netinet/flow_divert.c @@ -1176,6 +1176,7 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr size_t cfil_id_size = 0; struct inpcb *inp = sotoinpcb(so); struct ifnet *ifp = NULL; + uint32_t flags = 0; error = flow_divert_packet_init(fd_cb, FLOW_DIVERT_PKT_CONNECT, &connect_packet); if (error) { @@ -1268,7 +1269,16 @@ flow_divert_create_connect_packet(struct flow_divert_pcb *fd_cb, struct sockaddr } if (so->so_flags1 & SOF1_DATA_IDEMPOTENT) { - uint32_t flags = FLOW_DIVERT_TOKEN_FLAG_TFO; + flags |= FLOW_DIVERT_TOKEN_FLAG_TFO; + } + + if ((inp->inp_flags & INP_BOUND_IF) || + ((inp->inp_vflag & INP_IPV6) && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) || + ((inp->inp_vflag & INP_IPV4) && inp->inp_laddr.s_addr != INADDR_ANY)) { + flags |= FLOW_DIVERT_TOKEN_FLAG_BOUND; + } + + if (flags != 0) { error = flow_divert_packet_append_tlv(connect_packet, FLOW_DIVERT_TLV_FLAGS, sizeof(flags), &flags); if (error) { goto done; diff --git a/bsd/netinet/flow_divert_proto.h b/bsd/netinet/flow_divert_proto.h index 424a5bbde..ecc07f4a2 100644 --- a/bsd/netinet/flow_divert_proto.h +++ b/bsd/netinet/flow_divert_proto.h @@ -85,6 +85,7 @@ #define FLOW_DIVERT_TOKEN_FLAG_VALIDATED 0x0000001 #define FLOW_DIVERT_TOKEN_FLAG_TFO 0x0000002 #define FLOW_DIVERT_TOKEN_FLAG_MPTCP 0x0000004 +#define FLOW_DIVERT_TOKEN_FLAG_BOUND 0x0000008 #define FLOW_DIVERT_GROUP_FLAG_NO_APP_MAP 0x0000001 diff --git a/bsd/netinet/in_arp.c b/bsd/netinet/in_arp.c index 01d60970a..2b583d3af 100644 --- a/bsd/netinet/in_arp.c +++ b/bsd/netinet/in_arp.c @@ -1254,7 +1254,7 @@ arp_lookup_ip(ifnet_t ifp, const struct sockaddr_in *net_dest, struct ifaddr *rt_ifa; struct sockaddr *sa; uint32_t rtflags; - struct sockaddr_dl sdl; + struct sockaddr_dl sdl = {}; boolean_t send_probe_notif = FALSE; boolean_t enqueued = FALSE; @@ -1632,7 +1632,7 @@ arp_ip_handle_input(ifnet_t ifp, u_short arpop, const struct sockaddr_in *target_ip) { char ipv4str[MAX_IPv4_STR_LEN]; - struct sockaddr_dl proxied; + struct sockaddr_dl proxied = {}; struct sockaddr_dl *gateway, *target_hw = NULL; struct ifaddr *ifa; struct in_ifaddr *ia; diff --git a/bsd/netinet/ip_output.c b/bsd/netinet/ip_output.c index ea79bcea4..ec6a8ecb4 100644 --- a/bsd/netinet/ip_output.c +++ b/bsd/netinet/ip_output.c @@ -1865,6 +1865,7 @@ ip_fragment(struct mbuf *m, struct ifnet *ifp, uint32_t mtu, int sw_csum) M_COPY_CLASSIFIER(m, m0); M_COPY_PFTAG(m, m0); + M_COPY_NECPTAG(m, m0); #if BYTE_ORDER != BIG_ENDIAN HTONS(mhip->ip_off); diff --git a/bsd/netinet/mptcp_subr.c b/bsd/netinet/mptcp_subr.c index c253fc4f8..f00002616 100644 --- a/bsd/netinet/mptcp_subr.c +++ b/bsd/netinet/mptcp_subr.c @@ -1928,7 +1928,11 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn, return 0; } - if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { + if (!(m->m_flags & M_PKTHDR)) { + return 0; + } + + if (m->m_pkthdr.pkt_flags & PKTF_MPTCP) { if (off && (dsn != m->m_pkthdr.mp_dsn || rseq != m->m_pkthdr.mp_rseq || dlen != m->m_pkthdr.mp_rlen)) { @@ -1941,34 +1945,38 @@ mptcp_adj_rmap(struct socket *so, struct mbuf *m, int off, uint64_t dsn, soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); return -1; } - m->m_pkthdr.mp_dsn += off; - m->m_pkthdr.mp_rseq += off; + } - VERIFY(m_pktlen(m) < UINT16_MAX); - m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m); - } else { - if (!(mpts->mpts_flags & MPTSF_FULLY_ESTABLISHED)) { - /* data arrived without an DSS option mapping */ + /* If mbuf is beyond right edge of the mapping, we need to split */ + if (m_pktlen(m) > dlen - off) { + struct mbuf *new = m_split(m, dlen - off, M_DONTWAIT); + if (new == NULL) { + os_log_error(mptcp_log_handle, "%s - %lx: m_split failed dlen %u off %d pktlen %d, killing subflow %d", + __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpts->mpts_mpte), + dlen, off, m_pktlen(m), + mpts->mpts_connid); - /* initial subflow can fallback right after SYN handshake */ - if (mpts->mpts_flags & MPTSF_INITIAL_SUB) { - mptcp_notify_mpfail(so); - } else { - soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); + soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); + return -1; + } - return -1; - } - } else if (m->m_flags & M_PKTHDR) { - /* We need to fake the DATA-mapping */ - m->m_pkthdr.pkt_flags |= PKTF_MPTCP; - m->m_pkthdr.mp_dsn = dsn + off; - m->m_pkthdr.mp_rseq = rseq + off; + m->m_next = new; + sballoc(&so->so_rcv, new); + /* Undo, as sballoc will add to it as well */ + so->so_rcv.sb_cc -= new->m_len; - VERIFY(m_pktlen(m) < UINT16_MAX); - m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m); + if (so->so_rcv.sb_mbtail == m) { + so->so_rcv.sb_mbtail = new; } } + m->m_pkthdr.pkt_flags |= PKTF_MPTCP; + m->m_pkthdr.mp_dsn = dsn + off; + m->m_pkthdr.mp_rseq = rseq + off; + + VERIFY(m_pktlen(m) < UINT16_MAX); + m->m_pkthdr.mp_rlen = (uint16_t)m_pktlen(m); + mpts->mpts_flags |= MPTSF_FULLY_ESTABLISHED; return 0; @@ -1982,11 +1990,15 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, struct mbuf **mp0, struct mbuf **controlp, int *flagsp) { #pragma unused(uio) - struct socket *mp_so = mptetoso(tptomptp(sototcpcb(so))->mpt_mpte); + struct socket *mp_so; + struct mptses *mpte; + struct mptcb *mp_tp; int flags, error = 0; - struct proc *p = current_proc(); struct mbuf *m, **mp = mp0; - boolean_t proc_held = FALSE; + + mpte = tptomptp(sototcpcb(so))->mpt_mpte; + mp_so = mptetoso(mpte); + mp_tp = mpte->mpte_mptcb; VERIFY(so->so_proto->pr_flags & PR_CONNREQUIRED); @@ -2107,16 +2119,6 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, mptcp_update_last_owner(so, mp_so); - if (mp_so->last_pid != proc_pid(p)) { - p = proc_find(mp_so->last_pid); - if (p == PROC_NULL) { - p = current_proc(); - } else { - proc_held = TRUE; - } - } - - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgrcv); SBLASTRECORDCHK(&so->so_rcv, "mptcp_subflow_soreceive 1"); SBLASTMBUFCHK(&so->so_rcv, "mptcp_subflow_soreceive 1"); @@ -2130,18 +2132,9 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, VERIFY(m->m_nextpkt == NULL); - if ((m->m_flags & M_PKTHDR) && (m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { - orig_dlen = dlen = m->m_pkthdr.mp_rlen; - dsn = m->m_pkthdr.mp_dsn; - sseq = m->m_pkthdr.mp_rseq; - csum = m->m_pkthdr.mp_csum; - } else { - /* We did fallback */ - if (mptcp_adj_rmap(so, m, 0, 0, 0, 0)) { - error = EIO; - *mp0 = NULL; - goto release; - } + if (mp_tp->mpt_flags & MPTCPF_FALLBACK_TO_TCP) { +fallback: + /* Just move mbuf to MPTCP-level */ sbfree(&so->so_rcv, m); @@ -2159,20 +2152,93 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, } continue; - } + } else if (!(m->m_flags & M_PKTHDR) || !(m->m_pkthdr.pkt_flags & PKTF_MPTCP)) { + struct mptsub *mpts = sototcpcb(so)->t_mpsub; + boolean_t found_mapping = false; + int parsed_length = 0; + struct mbuf *m_iter; + + /* + * No MPTCP-option in the header. Either fallback or + * wait for additional mappings. + */ + if (!(mpts->mpts_flags & MPTSF_FULLY_ESTABLISHED)) { + /* data arrived without a DSS option mapping */ + + /* initial subflow can fallback right after SYN handshake */ + if (mpts->mpts_flags & MPTSF_INITIAL_SUB) { + mptcp_notify_mpfail(so); + + goto fallback; + } else { + os_log_error(mptcp_log_handle, "%s - %lx: No DSS on secondary subflow. Killing %d\n", + __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), + mpts->mpts_connid); + soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); + + error = EIO; + *mp0 = NULL; + goto release; + } + } + + /* Thus, let's look for an mbuf with the mapping */ + m_iter = m->m_next; + parsed_length = m->m_len; + while (m_iter != NULL && parsed_length < UINT16_MAX) { + if (!(m_iter->m_flags & M_PKTHDR) || !(m_iter->m_pkthdr.pkt_flags & PKTF_MPTCP)) { + parsed_length += m_iter->m_len; + m_iter = m_iter->m_next; + continue; + } + + found_mapping = true; + + /* Found an mbuf with a DSS-mapping */ + orig_dlen = dlen = m_iter->m_pkthdr.mp_rlen; + dsn = m_iter->m_pkthdr.mp_dsn; + sseq = m_iter->m_pkthdr.mp_rseq; + csum = m_iter->m_pkthdr.mp_csum; + + if (m_iter->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) { + dfin = 1; + } + + break; + } + + if (!found_mapping && parsed_length < UINT16_MAX) { + /* Mapping not yet present, we can wait! */ + if (*mp0 == NULL) { + error = EWOULDBLOCK; + } + goto release; + } else if (!found_mapping && parsed_length >= UINT16_MAX) { + os_log_error(mptcp_log_handle, "%s - %lx: Received more than 64KB without DSS mapping. Killing %d\n", + __func__, (unsigned long)VM_KERNEL_ADDRPERM(mpte), + mpts->mpts_connid); + /* Received 64KB without DSS-mapping. We should kill the subflow */ + soevent(mpts->mpts_socket, SO_FILT_HINT_LOCKED | SO_FILT_HINT_MUSTRST); + + error = EIO; + *mp0 = NULL; + goto release; + } + } else { + orig_dlen = dlen = m->m_pkthdr.mp_rlen; + dsn = m->m_pkthdr.mp_dsn; + sseq = m->m_pkthdr.mp_rseq; + csum = m->m_pkthdr.mp_csum; - if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) { - dfin = 1; + if (m->m_pkthdr.pkt_flags & PKTF_MPTCP_DFIN) { + dfin = 1; + } } /* * Check if the full mapping is now present */ if ((int)so->so_rcv.sb_cc < dlen - dfin) { - mptcplog((LOG_INFO, "%s not enough data (%u) need %u for dsn %u\n", - __func__, so->so_rcv.sb_cc, dlen, (uint32_t)dsn), - MPTCP_RECEIVER_DBG, MPTCP_LOGLVL_LOG); - if (*mp0 == NULL) { error = EWOULDBLOCK; } @@ -2238,10 +2304,6 @@ mptcp_subflow_soreceive(struct socket *so, struct sockaddr **psa, release: sbunlock(&so->so_rcv, TRUE); - if (proc_held) { - proc_rele(p); - } - return error; } @@ -2253,8 +2315,8 @@ mptcp_subflow_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, struct mbuf *top, struct mbuf *control, int flags) { struct socket *mp_so = mptetoso(tptomptp(sototcpcb(so))->mpt_mpte); - struct proc *p = current_proc(); boolean_t en_tracing = FALSE, proc_held = FALSE; + struct proc *p = current_proc(); int en_tracing_val; int sblocked = 1; /* Pretend as if it is already locked, so we won't relock it */ int error; @@ -2301,8 +2363,6 @@ mptcp_subflow_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, inp_update_necp_policy(sotoinpcb(so), NULL, NULL, 0); #endif /* NECP */ - OSIncrementAtomicLong(&p->p_stats->p_ru.ru_msgsnd); - error = sosendcheck(so, NULL, top->m_pkthdr.len, 0, 1, 0, &sblocked); if (error) { goto out; diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c index 901c0338d..078745a24 100644 --- a/bsd/netinet/tcp_input.c +++ b/bsd/netinet/tcp_input.c @@ -579,6 +579,21 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m, } #endif /* TRAFFIC_MGT */ + if (th->th_seq != tp->rcv_nxt) { + struct mbuf *tmp = m; + while (tmp != NULL) { + if (mbuf_class_under_pressure(tmp)) { + m_freem(m); + tcp_reass_overflows++; + tcpstat.tcps_rcvmemdrop++; + *tlenp = 0; + return 0; + } + + tmp = tmp->m_next; + } + } + /* * Limit the number of segments in the reassembly queue to prevent * holding on to too many segments (and thus running out of mbufs). diff --git a/bsd/netinet/tcp_usrreq.c b/bsd/netinet/tcp_usrreq.c index c2389347c..2799b8fea 100644 --- a/bsd/netinet/tcp_usrreq.c +++ b/bsd/netinet/tcp_usrreq.c @@ -371,7 +371,7 @@ tcp_usr_listen(struct socket *so, struct proc *p) struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp; - COMMON_START(); + COMMON_START_ALLOW_FLOW_DIVERT(true); if (inp->inp_lport == 0) { error = in_pcbbind(inp, NULL, p); } @@ -389,7 +389,7 @@ tcp6_usr_listen(struct socket *so, struct proc *p) struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp; - COMMON_START(); + COMMON_START_ALLOW_FLOW_DIVERT(true); if (inp->inp_lport == 0) { inp->inp_vflag &= ~INP_IPV4; if ((inp->inp_flags & IN6P_IPV6_V6ONLY) == 0) { diff --git a/bsd/netinet6/ip6_output.c b/bsd/netinet6/ip6_output.c index 60508e3a4..92ee660ed 100644 --- a/bsd/netinet6/ip6_output.c +++ b/bsd/netinet6/ip6_output.c @@ -1910,6 +1910,7 @@ ip6_do_fragmentation(struct mbuf **mptr, uint32_t optlen, struct ifnet *ifp, M_COPY_CLASSIFIER(new_m, morig); M_COPY_PFTAG(new_m, morig); + M_COPY_NECPTAG(new_m, morig); ip6f->ip6f_reserved = 0; ip6f->ip6f_ident = id; diff --git a/bsd/netinet6/ipsec.c b/bsd/netinet6/ipsec.c index 2e11f4515..061e6f45c 100644 --- a/bsd/netinet6/ipsec.c +++ b/bsd/netinet6/ipsec.c @@ -4415,13 +4415,21 @@ ipsec6_tunnel_validate( panic("too short mbuf on ipsec6_tunnel_validate"); } #endif - if (nxt != IPPROTO_IPV4 && nxt != IPPROTO_IPV6) { + if (nxt == IPPROTO_IPV4) { + if (m->m_pkthdr.len < off + sizeof(struct ip)) { + ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr))); + return 0; + } + } else if (nxt == IPPROTO_IPV6) { + if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) { + ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate pkthdr %d off %d ip6hdr %zu", m->m_pkthdr.len, off, sizeof(struct ip6_hdr))); + return 0; + } + } else { + ipseclog((LOG_NOTICE, "ipsec6_tunnel_validate invalid nxt(%u) protocol", nxt)); return 0; } - if (m->m_pkthdr.len < off + sizeof(struct ip6_hdr)) { - return 0; - } /* do not decapsulate if the SA is for transport mode only */ if (sav->sah->saidx.mode == IPSEC_MODE_TRANSPORT) { return 0; diff --git a/bsd/nfs/nfs_bio.c b/bsd/nfs/nfs_bio.c index 0e7c29ea9..b9c2b5ac1 100644 --- a/bsd/nfs/nfs_bio.c +++ b/bsd/nfs/nfs_bio.c @@ -2987,6 +2987,17 @@ nfs_buf_write_rpc(struct nfsbuf *bp, int iomode, thread_t thd, kauth_cred_t cred return error; } + if (length == 0) { + /* We should never get here */ +#if DEVELOPMENT + printf("nfs_buf_write_rpc: Got request with zero length. np %p, bp %p, offset %lld\n", np, bp, offset); +#else + printf("nfs_buf_write_rpc: Got request with zero length.\n"); +#endif /* DEVELOPMENT */ + nfs_buf_iodone(bp); + return 0; + } + auio = uio_createwithbuffer(1, NBOFF(bp) + offset, UIO_SYSSPACE, UIO_WRITE, &uio_buf, sizeof(uio_buf)); NFS_UIO_ADDIOV(auio, CAST_USER_ADDR_T(bp->nb_data + offset), length); @@ -3204,7 +3215,7 @@ finish: bp->nb_verf = wverf; } - if ((rlen > 0) && (bp->nb_offio < (offset + (int)rlen))) { + if (!ISSET(bp->nb_flags, NB_STALEWVERF) && rlen > 0 && (bp->nb_offio < (offset + (int)rlen))) { bp->nb_offio = offset + rlen; } diff --git a/bsd/sys/file_internal.h b/bsd/sys/file_internal.h index 90175d11c..1bebabf82 100644 --- a/bsd/sys/file_internal.h +++ b/bsd/sys/file_internal.h @@ -206,11 +206,30 @@ os_refgrp_decl_extern(f_refgrp); /* os_refgrp_t for file refcounts */ * @brief * Acquire a file reference on the specified file. * + * @description + * The @c proc must be locked while this operation is being performed + * to avoid races with setting the FG_CONFINED flag. + * + * @param proc + * The proc this file reference is taken on behalf of. + * * @param fg * The specified file */ void -fg_ref(struct fileglob *fg); +fg_ref(proc_t proc, struct fileglob *fg); + +/*! + * @function fg_drop_live + * + * @brief + * Drops a file reference on the specified file that isn't the last one. + * + * @param fg + * The file whose reference is being dropped. + */ +void +fg_drop_live(struct fileglob *fg); /*! * @function fg_drop diff --git a/bsd/sys/kern_memorystatus.h b/bsd/sys/kern_memorystatus.h index 48b03e0de..4cbfd6b8f 100644 --- a/bsd/sys/kern_memorystatus.h +++ b/bsd/sys/kern_memorystatus.h @@ -161,6 +161,20 @@ typedef struct memorystatus_kernel_stats { char largest_zone_name[MACH_ZONE_NAME_MAX_LEN]; } memorystatus_kernel_stats_t; +typedef enum memorystatus_freeze_skip_reason { + kMemorystatusFreezeSkipReasonNone = 0, + kMemorystatusFreezeSkipReasonExcessSharedMemory = 1, + kMemorystatusFreezeSkipReasonLowPrivateSharedRatio = 2, + kMemorystatusFreezeSkipReasonNoCompressorSpace = 3, + kMemorystatusFreezeSkipReasonNoSwapSpace = 4, + kMemorystatusFreezeSkipReasonBelowMinPages = 5, + kMemorystatusFreezeSkipReasonLowProbOfUse = 6, + kMemorystatusFreezeSkipReasonOther = 7, + kMemorystatusFreezeSkipReasonOutOfBudget = 8, + kMemorystatusFreezeSkipReasonOutOfSlots = 9, + kMemorystatusFreezeSkipReasonDisabled = 10, + _kMemorystatusFreezeSkipReasonMax +} memorystatus_freeze_skip_reason_t; /* ** This is a variable-length struct. ** Allocate a buffer of the size returned by the sysctl, cast to a memorystatus_snapshot_t * @@ -172,6 +186,7 @@ typedef struct jetsam_snapshot_entry { int32_t priority; uint32_t state; uint32_t fds; + memorystatus_freeze_skip_reason_t jse_freeze_skip_reason; /* why wasn't this process frozen? */ uint8_t uuid[16]; uint64_t user_data; uint64_t killed; @@ -352,6 +367,8 @@ int memorystatus_control(uint32_t command, int32_t pid, uint32_t flags, void *bu #define MEMORYSTATUS_CMD_SET_JETSAM_SNAPSHOT_OWNERSHIP 23 /* Used by unit tests in the development kernel only. */ #endif /* PRIVATE */ +#define MEMORYSTATUS_CMD_GET_PROCESS_IS_FROZEN 24 /* Check if the process is frozen. */ + /* Commands that act on a group of processes */ #define MEMORYSTATUS_CMD_GRP_SET_PROPERTIES 100 @@ -505,7 +522,6 @@ typedef struct memorystatus_memlimit_properties2 { #define P_MEMSTAT_PRIORITY_ASSERTION 0x00020000 /* jetsam priority is being driven by an assertion */ #define P_MEMSTAT_FREEZE_CONSIDERED 0x00040000 /* This process has been considered for the freezer. */ - /* * p_memstat_relaunch_flags holds * - relaunch behavior when jetsammed diff --git a/bsd/sys/kern_memorystatus_freeze.h b/bsd/sys/kern_memorystatus_freeze.h index c56ba0b4e..dd01e09ce 100644 --- a/bsd/sys/kern_memorystatus_freeze.h +++ b/bsd/sys/kern_memorystatus_freeze.h @@ -112,6 +112,8 @@ boolean_t memorystatus_freeze_thread_should_run(void); int memorystatus_set_process_is_freezable(pid_t pid, boolean_t is_freezable); int memorystatus_get_process_is_freezable(pid_t pid, int *is_freezable); int memorystatus_freezer_control(int32_t flags, user_addr_t buffer, size_t buffer_size, int32_t *retval); +void memorystatus_freeze_init_proc(proc_t p); +errno_t memorystatus_get_process_is_frozen(pid_t pid, int *is_freezable); #endif /* CONFIG_FREEZE */ diff --git a/bsd/sys/mbuf.h b/bsd/sys/mbuf.h index 75369750d..0e8be447e 100644 --- a/bsd/sys/mbuf.h +++ b/bsd/sys/mbuf.h @@ -615,6 +615,7 @@ struct mbuf { #define m_dat M_dat.M_databuf #define m_pktlen(_m) ((_m)->m_pkthdr.len) #define m_pftag(_m) (&(_m)->m_pkthdr.builtin_mtag._net_mtag._pf_mtag) +#define m_necptag(_m) (&(_m)->m_pkthdr.builtin_mtag._net_mtag._necp_mtag) /* mbuf flags (private) */ #define M_EXT 0x0001 /* has associated external storage */ @@ -814,6 +815,8 @@ union m16kcluster { #define M_COPY_PFTAG(to, from) m_copy_pftag(to, from) +#define M_COPY_NECPTAG(to, from) m_copy_necptag(to, from) + #define M_COPY_CLASSIFIER(to, from) m_copy_classifier(to, from) /* @@ -1276,6 +1279,8 @@ extern struct mbuf *m_prepend_2(struct mbuf *, int, int, int); extern struct mbuf *m_pullup(struct mbuf *, int); extern struct mbuf *m_split(struct mbuf *, int, int); extern void m_mclfree(caddr_t p); +extern int mbuf_get_class(struct mbuf *m); +extern bool mbuf_class_under_pressure(struct mbuf *m); /* * On platforms which require strict alignment (currently for anything but @@ -1434,6 +1439,7 @@ __private_extern__ caddr_t m_mclalloc(int); __private_extern__ int m_mclhasreference(struct mbuf *); __private_extern__ void m_copy_pkthdr(struct mbuf *, struct mbuf *); __private_extern__ void m_copy_pftag(struct mbuf *, struct mbuf *); +__private_extern__ void m_copy_necptag(struct mbuf *, struct mbuf *); __private_extern__ void m_copy_classifier(struct mbuf *, struct mbuf *); __private_extern__ struct mbuf *m_dtom(void *); diff --git a/bsd/sys/proc_internal.h b/bsd/sys/proc_internal.h index 2bf615a71..46a610413 100644 --- a/bsd/sys/proc_internal.h +++ b/bsd/sys/proc_internal.h @@ -389,6 +389,9 @@ struct proc { int32_t p_memstat_requestedpriority; /* active priority */ int32_t p_memstat_assertionpriority; /* assertion driven priority */ uint32_t p_memstat_dirty; /* dirty state */ +#if CONFIG_FREEZE + uint8_t p_memstat_freeze_skip_reason; /* memorystaus_freeze_skipped_reason_t. Protected by the freezer mutex. */ +#endif uint64_t p_memstat_userdata; /* user state */ uint64_t p_memstat_idledeadline; /* time at which process became clean */ uint64_t p_memstat_idle_start; /* abstime process transitions into the idle band */ diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index 9d8c38209..9421ee0f9 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -2052,8 +2052,9 @@ shared_region_map_and_slide_setup( uint32_t mappings_count, struct shared_file_mapping_slide_np *mappings, struct _sr_file_mappings **sr_file_mappings, - struct vm_shared_region **shared_region, - struct vnode **scdir_vp) + struct vm_shared_region **shared_region_ptr, + struct vnode **scdir_vp, + struct vnode *rdir_vp) { int error = 0; struct _sr_file_mappings *srfmp; @@ -2064,6 +2065,7 @@ shared_region_map_and_slide_setup( vm_prot_t maxprot = VM_PROT_ALL; #endif uint32_t i; + struct vm_shared_region *shared_region; SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> map\n", @@ -2113,8 +2115,9 @@ shared_region_map_and_slide_setup( } /* get the process's shared region (setup in vm_map_exec()) */ - *shared_region = vm_shared_region_trim_and_get(current_task()); - if (*shared_region == NULL) { + shared_region = vm_shared_region_trim_and_get(current_task()); + *shared_region_ptr = shared_region; + if (shared_region == NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(): " "no shared region\n", @@ -2124,6 +2127,22 @@ shared_region_map_and_slide_setup( goto done; } + /* + * Check the shared region matches the current root + * directory of this process. Deny the mapping to + * avoid tainting the shared region with something that + * doesn't quite belong into it. + */ + struct vnode *sr_vnode = vm_shared_region_root_dir(shared_region); + if (sr_vnode != NULL ? rdir_vp != sr_vnode : rdir_vp != rootvnode) { + SHARED_REGION_TRACE_ERROR( + ("shared_region: map(%p) root_dir mismatch\n", + (void *)VM_KERNEL_ADDRPERM(current_thread()))); + error = EPERM; + goto done; + } + + for (srfmp = &(*sr_file_mappings)[0]; srfmp < &(*sr_file_mappings)[files_count]; srfmp++) { @@ -2311,11 +2330,8 @@ after_root_check: #else /* CONFIG_CSR */ /* Devices without SIP/ROSP need to make sure that the shared cache is on the root volume. */ - struct vnode *root_vp = p->p_fd->fd_rdir; - if (root_vp == NULL) { - root_vp = rootvnode; - } - if (srfmp->vp->v_mount != root_vp->v_mount) { + assert(rdir_vp != NULL); + if (srfmp->vp->v_mount != rdir_vp->v_mount) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not on process's root volume\n", @@ -2409,9 +2425,9 @@ after_root_check: } done: if (error != 0) { - shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, *shared_region, *scdir_vp); + shared_region_map_and_slide_cleanup(p, files_count, *sr_file_mappings, shared_region, *scdir_vp); *sr_file_mappings = NULL; - *shared_region = NULL; + *shared_region_ptr = NULL; *scdir_vp = NULL; } return error; @@ -2439,23 +2455,35 @@ _shared_region_map_and_slide( kern_return_t kr = KERN_SUCCESS; struct _sr_file_mappings *sr_file_mappings = NULL; struct vnode *scdir_vp = NULL; + struct vnode *rdir_vp = NULL; struct vm_shared_region *shared_region = NULL; + /* + * Get a reference to the current proc's root dir. + * Need this to prevent racing with chroot. + */ + proc_fdlock(p); + rdir_vp = p->p_fd->fd_rdir; + if (rdir_vp == NULL) { + rdir_vp = rootvnode; + } + assert(rdir_vp != NULL); + vnode_get(rdir_vp); + proc_fdunlock(p); + /* * Turn files, mappings into sr_file_mappings and other setup. */ error = shared_region_map_and_slide_setup(p, files_count, files, mappings_count, mappings, - &sr_file_mappings, &shared_region, &scdir_vp); + &sr_file_mappings, &shared_region, &scdir_vp, rdir_vp); if (error != 0) { + vnode_put(rdir_vp); return error; } /* map the file(s) into that shared region's submap */ - kr = vm_shared_region_map_file(shared_region, - (void *) p->p_fd->fd_rdir, - files_count, - sr_file_mappings); + kr = vm_shared_region_map_file(shared_region, files_count, sr_file_mappings); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR(("shared_region: %p [%d(%s)] map(): " "vm_shared_region_map_file() failed kr=0x%x\n", @@ -2491,6 +2519,7 @@ _shared_region_map_and_slide( OSBitAndAtomic(~((uint32_t)P_NOSHLIB), &p->p_flag); } + vnode_put(rdir_vp); shared_region_map_and_slide_cleanup(p, files_count, sr_file_mappings, shared_region, scdir_vp); SHARED_REGION_TRACE_DEBUG( @@ -3293,14 +3322,6 @@ SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_full, SYSCTL_QUAD(_vm, OID_AUTO, corpse_footprint_no_buf, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_corpse_footprint_no_buf, ""); -#if PMAP_CS -extern uint64_t vm_cs_defer_to_pmap_cs; -extern uint64_t vm_cs_defer_to_pmap_cs_not; -SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs, ""); -SYSCTL_QUAD(_vm, OID_AUTO, cs_defer_to_pmap_cs_not, - CTLFLAG_RD | CTLFLAG_LOCKED, &vm_cs_defer_to_pmap_cs_not, ""); -#endif /* PMAP_CS */ extern uint64_t shared_region_pager_copied; extern uint64_t shared_region_pager_slid; diff --git a/config/MASTER b/config/MASTER index d429f82a2..3e8381e78 100644 --- a/config/MASTER +++ b/config/MASTER @@ -326,6 +326,9 @@ options CONFIG_EMBEDDED # options CONFIG_ARROW # +options NOS_ARM_ASM # +options NOS_ARM_PMAP # + # support dynamic signing of code # options CONFIG_DYNAMIC_CODE_SIGNING # @@ -567,8 +570,6 @@ options CONFIG_SECURE_BSD_ROOT # secure BSD root # options CONFIG_KAS_INFO # kas_info support # -options CONFIG_ZALLOC_SEQUESTER # Sequester VA for zones # - # # MACH configuration options. # diff --git a/config/MASTER.arm b/config/MASTER.arm index 2b7602e1f..0dbf52e8f 100644 --- a/config/MASTER.arm +++ b/config/MASTER.arm @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm xsmall msgb_small config_embedded config_enforce_signed_code config_zcache config_darkboot ] +# KERNEL_BASE = [ arm xsmall msgb_small config_embedded config_enforce_signed_code config_zcache config_darkboot ARM_EXTRAS_BASE ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ] diff --git a/config/MASTER.arm64 b/config/MASTER.arm64 index bfeb9956f..15846736a 100644 --- a/config/MASTER.arm64 +++ b/config/MASTER.arm64 @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm64 xsmall msgb_small config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ] +# KERNEL_BASE = [ arm64 xsmall msgb_small config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ARM_EXTRAS_BASE ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] @@ -64,7 +64,7 @@ # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root ] # SECURITY_RELEASE = [ SECURITY_BASE ] # SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] # SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] diff --git a/config/MASTER.arm64.BridgeOS b/config/MASTER.arm64.BridgeOS index c4bae4b76..3fd4f903c 100644 --- a/config/MASTER.arm64.BridgeOS +++ b/config/MASTER.arm64.BridgeOS @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ] +# KERNEL_BASE = [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ARM_EXTRAS_BASE ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] @@ -64,7 +64,7 @@ # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root ] # SECURITY_RELEASE = [ SECURITY_BASE ] # SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] # SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] diff --git a/config/MASTER.arm64.MacOSX b/config/MASTER.arm64.MacOSX index fd3ab5e2e..509472214 100644 --- a/config/MASTER.arm64.MacOSX +++ b/config/MASTER.arm64.MacOSX @@ -18,8 +18,8 @@ # # KERNEL_BASE = [ arm64 medium msgb_large config_arrow config_requires_u32_munging config_zcache config_delay_idle_sleep config_proc_udata_storage ARM_EXTRAS_BASE ] # KERNEL_RELEASE = [ KERNEL_BASE ] -# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace config_zalloc_sequester ] -# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace config_zalloc_sequester ] +# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] +# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] # BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_netboot config_imageboot config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_personas ] # BSD_RELEASE = [ BSD_BASE ] # BSD_DEV = [ BSD_BASE config_vnguard ] @@ -40,11 +40,7 @@ # VPN = [ ipsec flow_divert necp content_filter ] # PF = [ pf pflog ] # MULTIPATH = [ multipath mptcp ] -#ifdef SOC_CONFIG_t8020 # HIBERNATION = [ ] -#else /*!SOC_CONFIG_t8020*/ -# HIBERNATION = [ hibernation ] -#endif /*!SOC_CONFIG_t8020*/ # IOKIT_BASE = [ iokit iokitcpp no_kernel_hid config_sleep iokitstats HIBERNATION ] # IOKIT_RELEASE = [ IOKIT_BASE ] # IOKIT_DEV = [ IOKIT_BASE iotracking ] diff --git a/config/MASTER.arm64.bcm2837 b/config/MASTER.arm64.bcm2837 index d1c4ef467..e1d6bfd92 100644 --- a/config/MASTER.arm64.bcm2837 +++ b/config/MASTER.arm64.bcm2837 @@ -16,7 +16,8 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm64 xsmall msgb_small config_embedded config_requires_u32_munging config_zcache ] +# ARM_EXTRAS_BASE = [ nos_arm_pmap nos_arm_asm ] +# KERNEL_BASE = [ arm64 xsmall msgb_small config_embedded config_requires_u32_munging config_zcache ARM_EXTRAS_BASE ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] @@ -62,7 +63,7 @@ # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root ] # SECURITY_RELEASE = [ SECURITY_BASE ] # SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] # SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] diff --git a/config/MASTER.arm64.iPhoneOS b/config/MASTER.arm64.iPhoneOS index 506772eb4..98852a7f7 100644 --- a/config/MASTER.arm64.iPhoneOS +++ b/config/MASTER.arm64.iPhoneOS @@ -16,7 +16,7 @@ # Standard Apple OS Configurations: # -------- ----- -- --------------- # -# KERNEL_BASE = [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ] +# KERNEL_BASE = [ arm64 xsmall msgb_large config_embedded config_enforce_signed_code config_requires_u32_munging config_zcache config_darkboot ARM_EXTRAS_BASE ] # KERNEL_RELEASE = [ KERNEL_BASE ] # KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug pgtrace ] # KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug pgtrace ] @@ -64,7 +64,7 @@ # VM_RELEASE = [ VM_BASE ] # VM_DEV = [ VM_BASE dynamic_codesigning ] # VM_DEBUG = [ VM_BASE dynamic_codesigning ] -# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root config_zalloc_sequester ] +# SECURITY_BASE = [ config_macf kernel_integrity config_secure_bsd_root ] # SECURITY_RELEASE = [ SECURITY_BASE ] # SECURITY_DEV = [ SECURITY_BASE config_setuid config_kas_info ] # SECURITY_DEBUG = [ SECURITY_BASE config_setuid config_kas_info ] diff --git a/config/MASTER.x86_64 b/config/MASTER.x86_64 index 5350cb839..31d87fd6f 100644 --- a/config/MASTER.x86_64 +++ b/config/MASTER.x86_64 @@ -18,8 +18,8 @@ # # KERNEL_BASE = [ intel medium msgb_large config_requires_u32_munging config_zcache config_delay_idle_sleep config_proc_udata_storage vsprintf ] # KERNEL_RELEASE = [ KERNEL_BASE ] -# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug config_zalloc_sequester ] -# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug config_zalloc_sequester ] +# KERNEL_DEV = [ KERNEL_BASE development mach_assert config_xnupost proc_ref_debug os_reason_debug ] +# KERNEL_DEBUG = [ KERNEL_BASE debug mach_assert config_xnupost config_ltable_stats config_ltable_debug config_waitq_stats config_workloop_debug config_waitq_debug ] # BSD_BASE = [ mach_bsd sysv_sem sysv_msg sysv_shm config_netboot config_imageboot config_imageboot_chunklist config_workqueue psynch config_proc_uuid_policy config_coredump pgo config_32bit_telemetry config_personas ] # BSD_RELEASE = [ BSD_BASE ] # BSD_DEV = [ BSD_BASE config_vnguard ] diff --git a/config/MasterVersion b/config/MasterVersion index 09a96f00e..3f2bda89b 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -20.1.0 +20.2.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/config/Private.arm64.exports b/config/Private.arm64.exports index f64ac3509..1c0e13e39 100644 --- a/config/Private.arm64.exports +++ b/config/Private.arm64.exports @@ -64,9 +64,40 @@ _pgtrace_clear_probe _mach_bridge_recv_timestamps _mach_bridge_init_timestamp _mach_bridge_set_params +_pmap_iommu_init +_pmap_iommu_iovmalloc +_pmap_iommu_map +_pmap_iommu_unmap +_pmap_iommu_iovmfree +_pmap_iommu_ioctl +_pmap_iommu_grant_page +_pmap_iommu_alloc_contiguous_pages +_nvme_ppl_get_desc +_sart_get_desc +_t8020dart_get_desc +_t8020dart_vo_tte +_uat_get_desc +_set_invalidate_hmac_function _PE_panic_debugging_enabled _register_additional_panic_data_buffer _apply_func_phys _Switch_context _gT1Sz +__ZN26IOUnifiedAddressTranslator10gMetaClassE +__ZN26IOUnifiedAddressTranslator10superClassE +__ZN26IOUnifiedAddressTranslator17getPageTableEntryEy +__ZN26IOUnifiedAddressTranslator18setClientContextIDEjb +__ZN26IOUnifiedAddressTranslator21removeClientContextIDEv +__ZN26IOUnifiedAddressTranslator19isPageFaultExpectedEyj +__ZN26IOUnifiedAddressTranslator22registerTaskForServiceEP4taskP9IOService +__ZN26IOUnifiedAddressTranslator23createMappingInApertureEjP18IOMemoryDescriptorjym +__ZN26IOUnifiedAddressTranslator23getTotalPageTableMemoryEv +__ZN26IOUnifiedAddressTranslator3mapEP11IOMemoryMapj +__ZN26IOUnifiedAddressTranslator5doMapEP18IOMemoryDescriptoryyj +__ZN26IOUnifiedAddressTranslator5unmapEP11IOMemoryMap +__ZN26IOUnifiedAddressTranslator7doUnmapEP18IOMemoryDescriptoryy +__ZN26IOUnifiedAddressTranslator8taskDiedEv +__ZN26IOUnifiedAddressTranslator12commitUnmapsEv +__ZN26IOUnifiedAddressTranslator14prepareFWUnmapEyy +__ZTV26IOUnifiedAddressTranslator diff --git a/iokit/IOKit/IOHibernatePrivate.h b/iokit/IOKit/IOHibernatePrivate.h index dd1dd039b..af2fb5ac2 100644 --- a/iokit/IOKit/IOHibernatePrivate.h +++ b/iokit/IOKit/IOHibernatePrivate.h @@ -33,7 +33,6 @@ #if defined(__arm64__) -#define HIBERNATE_HMAC_IMAGE 1 #define HIBERNATE_HAVE_MACHINE_HEADER 1 // enable the hibernation exception handler on DEBUG and DEVELOPMENT kernels diff --git a/iokit/IOKit/IONVRAM.h b/iokit/IOKit/IONVRAM.h index 91336d2a7..17f91c66a 100644 --- a/iokit/IOKit/IONVRAM.h +++ b/iokit/IOKit/IONVRAM.h @@ -83,6 +83,7 @@ private: OSPtr _registryPropertiesKey; UInt8 *_nvramImage; IOLock *_variableLock; + IOLock *_controllerLock; UInt32 _commonPartitionOffset; UInt32 _commonPartitionSize; UInt8 *_commonImage; @@ -145,7 +146,7 @@ private: UInt32 getNVRAMSize(void); void initNVRAMImage(void); void initProxyData(void); - IOReturn syncVariables(void); + IOReturn serializeVariables(void); IOReturn setPropertyInternal(const OSSymbol *aKey, OSObject *anObject); IOReturn removePropertyInternal(const OSSymbol *aKey); IOReturn chooseDictionary(IONVRAMOperation operation, const uuid_t *varGuid, diff --git a/iokit/Kernel/IOHibernateIO.cpp b/iokit/Kernel/IOHibernateIO.cpp index bb6e1e85c..1320d8d0c 100644 --- a/iokit/Kernel/IOHibernateIO.cpp +++ b/iokit/Kernel/IOHibernateIO.cpp @@ -181,9 +181,6 @@ #endif /* defined(__i386__) || defined(__x86_64__) */ #include -#if HIBERNATE_HMAC_IMAGE -#include -#endif /* HIBERNATE_HMAC_IMAGE */ extern "C" addr64_t kvtophys(vm_offset_t va); extern "C" ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); @@ -258,7 +255,6 @@ enum { kVideoMapSize = 80 * 1024 * 1024 }; // copy from phys addr to MD -#if !HIBERNATE_HMAC_IMAGE static IOReturn IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md, IOByteCount offset, addr64_t bytes, IOByteCount length) @@ -296,7 +292,6 @@ IOMemoryDescriptorWriteFromPhysical(IOMemoryDescriptor * md, return remaining ? kIOReturnUnderrun : kIOReturnSuccess; } -#endif /* !HIBERNATE_HMAC_IMAGE */ // copy from MD to phys addr @@ -631,10 +626,6 @@ IOHibernateSystemSleep(void) gIOHibernateCurrentHeader->options |= kIOHibernateOptionProgress; } -#if HIBERNATE_HMAC_IMAGE - // inform HMAC driver that we're going to hibernate - ppl_hmac_hibernate_begin(); -#endif /* HIBERNATE_HMAC_IMAGE */ #if defined(__i386__) || defined(__x86_64__) if (vars->volumeCryptKeySize && @@ -1144,26 +1135,40 @@ IOHibernateSystemHasSlept(void) /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ -#if defined(__i386__) || defined(__x86_64__) -static DeviceTreeNode * -MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry, vm_offset_t region_start, vm_size_t region_size) +static const DeviceTreeNode * +MergeDeviceTree(const DeviceTreeNode * entry, IORegistryEntry * regEntry, OSSet * entriesToUpdate, vm_offset_t region_start, vm_size_t region_size) { DeviceTreeNodeProperty * prop; - DeviceTreeNode * child; + const DeviceTreeNode * child; IORegistryEntry * childRegEntry; const char * nameProp; unsigned int propLen, idx; + bool updateEntry = true; + if (!regEntry) { + updateEntry = false; + } else if (entriesToUpdate && !entriesToUpdate->containsObject(regEntry)) { + updateEntry = false; + } + prop = (DeviceTreeNodeProperty *) (entry + 1); for (idx = 0; idx < entry->nProperties; idx++) { - if (regEntry && (0 != strcmp("name", prop->name))) { + if (updateEntry && (0 != strcmp("name", prop->name))) { regEntry->setProperty((const char *) prop->name, (void *) (prop + 1), prop->length); // HIBPRINT("%s: %s, %d\n", regEntry->getName(), prop->name, prop->length); } prop = (DeviceTreeNodeProperty *) (((uintptr_t)(prop + 1)) + ((prop->length + 3) & ~3)); } - child = (DeviceTreeNode *) prop; + if (entriesToUpdate) { + entriesToUpdate->removeObject(regEntry); + if (entriesToUpdate->getCount() == 0) { + // we've updated all the entries we care about so we can stop + return NULL; + } + } + + child = (const DeviceTreeNode *) prop; for (idx = 0; idx < entry->nChildren; idx++) { if (kSuccess != SecureDTGetPropertyRegion(child, "name", (void const **) &nameProp, &propLen, region_start, region_size)) { @@ -1171,12 +1176,14 @@ MergeDeviceTree(DeviceTreeNode * entry, IORegistryEntry * regEntry, vm_offset_t } childRegEntry = regEntry ? regEntry->childFromPath(nameProp, gIODTPlane) : NULL; // HIBPRINT("%s == %p\n", nameProp, childRegEntry); - child = MergeDeviceTree(child, childRegEntry, region_start, region_size); + child = MergeDeviceTree(child, childRegEntry, entriesToUpdate, region_start, region_size); + if (!child) { + // the recursive call updated the last entry we cared about, so we can stop + break; + } } return child; } -#endif - IOReturn IOHibernateSystemWake(void) @@ -1276,10 +1283,6 @@ IOHibernateDone(IOHibernateVars * vars) vars->srcBuffer->release(); } -#if HIBERNATE_HMAC_IMAGE - // inform HMAC driver that we're done hibernating - ppl_hmac_hibernate_end(); -#endif /* HIBERNATE_HMAC_IMAGE */ bzero(&gIOHibernateHandoffPages[0], gIOHibernateHandoffPageCount * sizeof(gIOHibernateHandoffPages[0])); if (vars->handoffBuffer) { @@ -1297,14 +1300,32 @@ IOHibernateDone(IOHibernateVars * vars) break; case kIOHibernateHandoffTypeDeviceTree: + { #if defined(__i386__) || defined(__x86_64__) - MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot(), - (vm_offset_t)data, (vm_size_t)handoff->bytecount); -#else - // On ARM, the device tree is confined to its region covered by CTRR, so effectively immutable. - panic("kIOHibernateHandoffTypeDeviceTree not supported on this platform."); + // On Intel, process the entirety of the passed in device tree + OSSet * entriesToUpdate = NULL; +#elif defined(__arm64__) + // On ARM, only allow hibernation to update specific entries + const char *mergePaths[] = { + kIODeviceTreePlane ":/chosen/boot-object-manifests", + kIODeviceTreePlane ":/chosen/secure-boot-hashes", + }; + const size_t mergePathCount = sizeof(mergePaths) / sizeof(mergePaths[0]); + OSSet * entriesToUpdate = OSSet::withCapacity(mergePathCount); + for (size_t i = 0; i < mergePathCount; i++) { + IORegistryEntry *entry = IORegistryEntry::fromPath(mergePaths[i]); + if (!entry) { + panic("failed to find %s in IORegistry", mergePaths[i]); + } + entriesToUpdate->setObject(entry); + OSSafeReleaseNULL(entry); + } #endif + MergeDeviceTree((DeviceTreeNode *) data, IOService::getServiceRoot(), entriesToUpdate, + (vm_offset_t)data, (vm_size_t)handoff->bytecount); + OSSafeReleaseNULL(entriesToUpdate); break; + } case kIOHibernateHandoffTypeKeyStore: #if defined(__i386__) || defined(__x86_64__) @@ -1562,38 +1583,12 @@ IOHibernatePolledFileWrite(IOHibernateVars * vars, { IOReturn err; -#if HIBERNATE_HMAC_IMAGE - uint64_t originalPosition = 0; - if (!bytes && !size) { - originalPosition = vars->fileVars->position; - } -#endif /* HIBERNATE_HMAC_IMAGE */ err = IOPolledFileWrite(vars->fileVars, bytes, size, cryptvars); if ((kIOReturnSuccess == err) && hibernate_should_abort()) { err = kIOReturnAborted; } -#if HIBERNATE_HMAC_IMAGE - if ((kIOReturnSuccess == err) && (vars->imageShaCtx)) { - if (!bytes && !size) { - // determine how many bytes were written - size = vars->fileVars->position - originalPosition; - } - if (bytes) { - SHA256_Update(vars->imageShaCtx, bytes, size); - } else { - // update with zeroes - uint8_t zeroes[512] = {}; - size_t len = size; - while (len) { - IOByteCount toHash = min(len, sizeof(zeroes)); - SHA256_Update(vars->imageShaCtx, zeroes, toHash); - len -= toHash; - } - } - } -#endif /* HIBERNATE_HMAC_IMAGE */ return err; } @@ -1630,11 +1625,9 @@ hibernate_write_image(void) uint32_t pageAndCount[2]; addr64_t phys64; IOByteCount segLen; -#if !HIBERNATE_HMAC_IMAGE uint32_t restore1Sum = 0, sum = 0, sum1 = 0, sum2 = 0; uintptr_t hibernateBase; uintptr_t hibernateEnd; -#endif /* HIBERNATE_HMAC_IMAGE */ AbsoluteTime startTime, endTime; AbsoluteTime allTime, compTime; @@ -1665,13 +1658,6 @@ hibernate_write_image(void) return kIOHibernatePostWriteSleep; } -#if HIBERNATE_HMAC_IMAGE - // set up SHA and HMAC context to hash image1 (wired pages) - SHA256_CTX imageShaCtx; - vars->imageShaCtx = &imageShaCtx; - SHA256_Init(vars->imageShaCtx); - ppl_hmac_reset(true); -#endif /* HIBERNATE_HMAC_IMAGE */ #if !defined(__arm64__) if (kIOHibernateModeSleep & gIOHibernateMode) { @@ -1795,61 +1781,6 @@ hibernate_write_image(void) } } -#if HIBERNATE_HMAC_IMAGE - if (vars->fileVars->position > UINT32_MAX) { - err = kIOReturnNoSpace; - break; - } - header->segmentsFileOffset = (uint32_t)vars->fileVars->position; - - // fetch the IOHibernateHibSegInfo and the actual pages to write - // we use srcBuffer as scratch space - IOHibernateHibSegInfo *segInfo = &header->hibSegInfo; - void *segInfoScratch = vars->srcBuffer->getBytesNoCopy(); - - // This call also enables PMAP hibernation asserts which will prevent modification - // of PMAP data structures. This needs to occur before pages start getting written - // into the image. - ppl_hmac_fetch_hibseg_and_info(segInfoScratch, vars->srcBuffer->getCapacity(), segInfo); - - // write each segment to the file - size_t segInfoScratchPos = 0; - int hibSectIdx = -1; - uint32_t hibSegPageCount = 0; - for (int i = 0; i < NUM_HIBSEGINFO_SEGMENTS; i++) { - hibSegPageCount += segInfo->segments[i].pageCount; - size_t size = ptoa_64(segInfo->segments[i].pageCount); - if (size) { - err = IOHibernatePolledFileWrite(vars, - (uint8_t*)segInfoScratch + segInfoScratchPos, size, cryptvars); - if (kIOReturnSuccess != err) { - break; - } - segInfoScratchPos += size; - - // is this sectHIBTEXTB? - if (ptoa_64(segInfo->segments[i].physPage) == trunc_page(kvtophys(sectHIBTEXTB))) { - // remember which segment is sectHIBTEXTB because we'll need it later - hibSectIdx = i; - } - } - } - - if (hibSectIdx == -1) { - panic("couldn't find sectHIBTEXTB in segInfo"); - } - - // set the header fields corresponding to the HIB segments - header->restore1CodePhysPage = segInfo->segments[hibSectIdx].physPage; - header->restore1CodeVirt = trunc_page(sectHIBTEXTB); - header->restore1PageCount = hibSegPageCount; - header->restore1CodeOffset = (uint32_t)(((uintptr_t) &hibernate_machine_entrypoint) - header->restore1CodeVirt); - - // set restore1StackOffset to the physical page of the top of the stack to simplify the restore code - vm_offset_t stackFirstPage, stackPageSize; - pal_hib_get_stack_pages(&stackFirstPage, &stackPageSize); - header->restore1StackOffset = (uint32_t)(stackFirstPage + stackPageSize); -#else /* !HIBERNATE_HMAC_IMAGE */ hibernateBase = HIB_BASE; /* Defined in PAL headers */ hibernateEnd = (segHIBB + segSizeHIB); @@ -1907,7 +1838,6 @@ hibernate_write_image(void) break; } } -#endif /* !HIBERNATE_HMAC_IMAGE */ if (!vars->hwEncrypt && (kIOHibernateModeEncrypt & gIOHibernateMode)) { vars->fileVars->encryptStart = (vars->fileVars->position & ~(AES_BLOCK_SIZE - 1)); @@ -1948,14 +1878,7 @@ hibernate_write_image(void) for (page = 0; page < count; page += page_size) { phys64 = vars->previewBuffer->getPhysicalSegment(page, NULL, kIOMemoryMapperNone); -#if HIBERNATE_HMAC_IMAGE - err = ppl_hmac_update_and_compress_page(atop_64_ppnum(phys64), NULL, NULL); - if (kIOReturnSuccess != err) { - break; - } -#else /* !HIBERNATE_HMAC_IMAGE */ sum1 += hibernate_sum_page(src + page, atop_64_ppnum(phys64)); -#endif /* !HIBERNATE_HMAC_IMAGE */ } if (kIOReturnSuccess != err) { break; @@ -2042,12 +1965,6 @@ hibernate_write_image(void) bitmap_size, header->previewSize, pageCount, vars->fileVars->position); -#if HIBERNATE_HMAC_IMAGE - // we don't need to sign the page data into imageHeaderHMAC because it's - // already signed into image1PagesHMAC/image2PagesHMAC - vars->imageShaCtx = NULL; - header->imageHeaderHMACSize = (uint32_t)vars->fileVars->position; -#endif /* HIBERNATE_HMAC_IMAGE */ enum // pageType @@ -2126,9 +2043,6 @@ hibernate_write_image(void) } for (page = ppnum; page < (ppnum + count); page++) { -#if HIBERNATE_HMAC_IMAGE - wkresult = ppl_hmac_update_and_compress_page(page, (void **)&src, compressed); -#else /* !HIBERNATE_HMAC_IMAGE */ err = IOMemoryDescriptorWriteFromPhysical(vars->srcBuffer, 0, ptoa_64(page), page_size); if (err) { HIBLOG("IOMemoryDescriptorWriteFromPhysical %d [%ld] %x\n", __LINE__, (long)page, err); @@ -2147,7 +2061,6 @@ hibernate_write_image(void) (WK_word*) compressed, (WK_word*) scratch, (uint32_t) (page_size - 4)); -#endif /* !HIBERNATE_HMAC_IMAGE */ clock_get_uptime(&endTime); ADD_ABSOLUTETIME(&compTime, &endTime); @@ -2239,12 +2152,6 @@ hibernate_write_image(void) image1Size = vars->fileVars->position; HIBLOG("image1Size 0x%qx, encryptStart1 0x%qx, End1 0x%qx\n", image1Size, header->encryptStart, header->encryptEnd); -#if HIBERNATE_HMAC_IMAGE - // compute the image1 HMAC - ppl_hmac_final(header->image1PagesHMAC, sizeof(header->image1PagesHMAC)); - // reset the PPL context so we can compute the image2 (non-wired pages) HMAC - ppl_hmac_reset(false); -#endif /* HIBERNATE_HMAC_IMAGE */ } } if (kIOReturnSuccess != err) { @@ -2258,10 +2165,6 @@ hibernate_write_image(void) break; } -#if HIBERNATE_HMAC_IMAGE - // compute the image2 HMAC - ppl_hmac_final(header->image2PagesHMAC, sizeof(header->image2PagesHMAC)); -#endif /* HIBERNATE_HMAC_IMAGE */ // Header: @@ -2270,11 +2173,9 @@ hibernate_write_image(void) header->bitmapSize = bitmap_size; header->pageCount = pageCount; -#if !HIBERNATE_HMAC_IMAGE header->restore1Sum = restore1Sum; header->image1Sum = sum1; header->image2Sum = sum2; -#endif /* !HIBERNATE_HMAC_IMAGE */ header->sleepTime = gIOLastSleepTime.tv_sec; header->compression = ((uint32_t)((compressedSize << 8) / uncompressedSize)); @@ -2294,17 +2195,6 @@ hibernate_write_image(void) header->lastHibAbsTime = mach_absolute_time(); header->lastHibContTime = mach_continuous_time(); -#if HIBERNATE_HMAC_IMAGE - // include the headers in the SHA calculation - SHA256_Update(&imageShaCtx, header, sizeof(*header)); - - // finalize the image header SHA - uint8_t imageHash[CCSHA256_OUTPUT_SIZE]; - SHA256_Final(imageHash, &imageShaCtx); - - // compute the header HMAC - ppl_hmac_finalize_image(imageHash, sizeof(imageHash), header->imageHeaderHMAC, sizeof(header->imageHeaderHMAC)); -#endif /* HIBERNATE_HMAC_IMAGE */ IOPolledFileSeek(vars->fileVars, 0); err = IOHibernatePolledFileWrite(vars, @@ -2343,9 +2233,7 @@ hibernate_write_image(void) uncompressedSize, atop_32(uncompressedSize), compressedSize, uncompressedSize ? ((int) ((compressedSize * 100ULL) / uncompressedSize)) : 0); -#if !HIBERNATE_HMAC_IMAGE HIBLOG("\nsum1 %x, sum2 %x\n", sum1, sum2); -#endif /* !HIBERNATE_HMAC_IMAGE */ HIBLOG("svPageCount %d, zvPageCount %d, wiredPagesEncrypted %d, wiredPagesClear %d, dirtyPagesEncrypted %d\n", svPageCount, zvPageCount, wiredPagesEncrypted, wiredPagesClear, dirtyPagesEncrypted); @@ -2643,10 +2531,6 @@ hibernate_machine_init(void) HIBLOG("hibernate_machine_init reading\n"); -#if HIBERNATE_HMAC_IMAGE - // Reset SHA context to verify image2 hash (non-wired pages). - ppl_hmac_reset(false); -#endif /* HIBERNATE_HMAC_IMAGE */ uint32_t * header = (uint32_t *) src; sum = 0; @@ -2721,12 +2605,6 @@ hibernate_machine_init(void) panic("Hibernate restore error %x", err); } -#if HIBERNATE_HMAC_IMAGE - err = ppl_hmac_update_and_compress_page(ppnum, NULL, NULL); - if (err) { - panic("Hibernate restore error %x", err); - } -#endif /* HIBERNATE_HMAC_IMAGE */ ppnum++; pagesDone++; @@ -2753,13 +2631,6 @@ hibernate_machine_init(void) panic("Hibernate restore error %x", err); } -#if HIBERNATE_HMAC_IMAGE - uint8_t image2PagesHMAC[HIBERNATE_HMAC_SIZE]; - ppl_hmac_final(image2PagesHMAC, sizeof(image2PagesHMAC)); - if (memcmp(image2PagesHMAC, gIOHibernateCurrentHeader->image2PagesHMAC, sizeof(image2PagesHMAC)) != 0) { - panic("image2 pages corrupted"); - } -#endif /* HIBERNATE_HMAC_IMAGE */ gIOHibernateCurrentHeader->actualImage2Sum = sum; gIOHibernateCompression = gIOHibernateCurrentHeader->compression; diff --git a/iokit/Kernel/IOHibernateInternal.h b/iokit/Kernel/IOHibernateInternal.h index b28a270a3..cbb8cc2eb 100644 --- a/iokit/Kernel/IOHibernateInternal.h +++ b/iokit/Kernel/IOHibernateInternal.h @@ -31,22 +31,12 @@ #ifdef __cplusplus -#if HIBERNATE_HMAC_IMAGE -#include -#endif /* HIBERNATE_HMAC_IMAGE */ enum { kIOHibernateAESKeySize = 16 }; /* bytes */ -#if HIBERNATE_HMAC_IMAGE -// when we call out to PPL to compute IOHibernateHibSegInfo, we use -// srcBuffer as a temporary buffer, to copy out all of the required -// HIB segments, so it should be big enough to contain those segments -#define HIBERNATION_SRC_BUFFER_SIZE (16 * 1024 * 1024) -#else // srcBuffer has to be big enough for a source page, the WKDM // compressed output, and a scratch page needed by WKDM #define HIBERNATION_SRC_BUFFER_SIZE (2 * page_size + WKdm_SCRATCH_BUF_SIZE_INTERNAL) -#endif struct IOHibernateVars { hibernate_page_list_t * page_list; @@ -73,9 +63,6 @@ struct IOHibernateVars { uint8_t cryptKey[kIOHibernateAESKeySize]; size_t volumeCryptKeySize; uint8_t volumeCryptKey[64]; -#if HIBERNATE_HMAC_IMAGE - SHA256_CTX * imageShaCtx; -#endif /* HIBERNATE_HMAC_IMAGE */ }; typedef struct IOHibernateVars IOHibernateVars; diff --git a/iokit/Kernel/IOHibernateRestoreKernel.c b/iokit/Kernel/IOHibernateRestoreKernel.c index f0ec6fc8d..9ab456022 100644 --- a/iokit/Kernel/IOHibernateRestoreKernel.c +++ b/iokit/Kernel/IOHibernateRestoreKernel.c @@ -1369,7 +1369,6 @@ __attribute__((optnone)) debug_code(' sp', context->ss.ss_64.sp); debug_code(' pc', context->ss.ss_64.pc); debug_code('cpsr', context->ss.ss_64.cpsr); - debug_code('asps', context->ss.ss_64.aspsr); debug_code(' far', context->ss.ss_64.far); debug_code(' esr', context->ss.ss_64.esr); diff --git a/iokit/Kernel/IONVRAM.cpp b/iokit/Kernel/IONVRAM.cpp index daa6cf7cb..69725d403 100644 --- a/iokit/Kernel/IONVRAM.cpp +++ b/iokit/Kernel/IONVRAM.cpp @@ -100,6 +100,18 @@ OSDefineMetaClassAndStructors(IODTNVRAM, IOService); #define DEBUG_ERROR DEBUG_ALWAYS +#define CONTROLLERLOCK() \ +({ \ + if (preemption_enabled() && !panic_active()) \ + IOLockLock(_controllerLock); \ +}) + +#define CONTROLLERUNLOCK() \ +({ \ + if (preemption_enabled() && !panic_active()) \ + IOLockUnlock(_controllerLock); \ +}) + #define NVRAMLOCK() \ ({ \ if (preemption_enabled() && !panic_active()) \ @@ -660,6 +672,11 @@ IODTNVRAM::init(IORegistryEntry *old, const IORegistryPlane *plane) return false; } + _controllerLock = IOLockAlloc(); + if (!_controllerLock) { + return false; + } + PE_parse_boot_argn("nvram-log", &gNVRAMLogging, sizeof(gNVRAMLogging)); dict = OSDictionary::withCapacity(1); @@ -763,6 +780,8 @@ IODTNVRAM::getNVRAMSize(void) void IODTNVRAM::registerNVRAMController(IONVRAMController *nvram) { + IOReturn ret; + if (_nvramController != nullptr) { DEBUG_ERROR("Duplicate controller set\n"); return; @@ -823,16 +842,15 @@ no_system: if (!_commonService->start(this)) { DEBUG_ERROR("Unable to start the common service!\n"); - _systemService->detach(this); + _commonService->detach(this); OSSafeReleaseNULL(_commonService); goto no_common; } } no_common: - NVRAMLOCK(); - (void) syncVariables(); - NVRAMUNLOCK(); + ret = serializeVariables(); + DEBUG_INFO("serializeVariables ret=0x%08x\n", ret); } void @@ -927,9 +945,10 @@ IODTNVRAM::syncInternal(bool rateLimit) } DEBUG_INFO("Calling sync()\n"); - NVRAMLOCK(); + + CONTROLLERLOCK(); _nvramController->sync(); - NVRAMUNLOCK(); + CONTROLLERUNLOCK(); } void @@ -942,49 +961,53 @@ bool IODTNVRAM::serializeProperties(OSSerialize *s) const { const OSSymbol *key; - OSSharedPtr dict; + OSSharedPtr systemDict, commonDict, dict; OSSharedPtr iter; bool result = false; unsigned int totalCapacity = 0; NVRAMLOCK(); if (_commonDict) { - totalCapacity += _commonDict->getCapacity(); + commonDict = OSDictionary::withDictionary(_commonDict.get()); } if (_systemDict) { - totalCapacity += _systemDict->getCapacity(); + systemDict = OSDictionary::withDictionary(_systemDict.get()); } + NVRAMUNLOCK(); + + totalCapacity += (commonDict != nullptr) ? commonDict->getCapacity() : 0; + totalCapacity += (systemDict != nullptr) ? systemDict->getCapacity() : 0; dict = OSDictionary::withCapacity(totalCapacity); if (dict == nullptr) { DEBUG_ERROR("No dictionary\n"); - goto unlock; + goto exit; } // Copy system entries first if present then copy unique common entries - if (_systemDict != nullptr) { - iter = OSCollectionIterator::withCollection(_systemDict.get()); + if (systemDict != nullptr) { + iter = OSCollectionIterator::withCollection(systemDict.get()); if (iter == nullptr) { DEBUG_ERROR("failed to create iterator\n"); - goto unlock; + goto exit; } while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { if (verifyPermission(kIONVRAMOperationRead, &gAppleSystemVariableGuid, key)) { - dict->setObject(key, _systemDict->getObject(key)); + dict->setObject(key, systemDict->getObject(key)); } } iter.reset(); } - if (_commonDict != nullptr) { - iter = OSCollectionIterator::withCollection(_commonDict.get()); + if (commonDict != nullptr) { + iter = OSCollectionIterator::withCollection(commonDict.get()); if (iter == nullptr) { DEBUG_ERROR("failed to create common iterator\n"); - goto unlock; + goto exit; } while ((key = OSDynamicCast(OSSymbol, iter->getNextObject()))) { @@ -993,16 +1016,14 @@ IODTNVRAM::serializeProperties(OSSerialize *s) const continue; } if (verifyPermission(kIONVRAMOperationRead, &gAppleNVRAMGuid, key)) { - dict->setObject(key, _commonDict->getObject(key)); + dict->setObject(key, commonDict->getObject(key)); } } } result = dict->serialize(s); -unlock: - NVRAMUNLOCK(); - +exit: DEBUG_INFO("result=%d\n", result); return result; @@ -1053,8 +1074,6 @@ IODTNVRAM::handleSpecialVariables(const char *name, uuid_t *guid, OSObject *obj, _commonDict->flushCollection(); DEBUG_INFO("system & common dictionary flushed\n"); - - err = syncVariables(); } special = true; @@ -1112,7 +1131,6 @@ IODTNVRAM::handleSpecialVariables(const char *name, uuid_t *guid, OSObject *obj, } special = true; - err = syncVariables(); } exit: @@ -1132,6 +1150,15 @@ IODTNVRAM::copyProperty(const OSSymbol *aKey) const OSDictionary *dict; OSSharedPtr theObject = nullptr; + if (aKey->isEqualTo(kIOBSDNameKey) || + aKey->isEqualTo(kIOBSDNamesKey) || + aKey->isEqualTo(kIOBSDMajorKey) || + aKey->isEqualTo(kIOBSDMinorKey) || + aKey->isEqualTo(kIOBSDUnitKey)) { + // These will never match. + // Check here and exit to avoid logging spam + return nullptr; + } DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy()); parseVariableName(aKey->getCStringNoCopy(), &varGuid, &variableName); @@ -1204,6 +1231,7 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject) uuid_t varGuid; OSDictionary *dict; bool deletePropertyKey, syncNowPropertyKey, forceSyncNowPropertyKey; + bool ok; size_t propDataSize = 0; DEBUG_INFO("aKey=%s\n", aKey->getCStringNoCopy()); @@ -1308,11 +1336,15 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject) } NVRAMLOCK(); + ok = handleSpecialVariables(variableName, &varGuid, propObject.get(), &result); + NVRAMUNLOCK(); - if (handleSpecialVariables(variableName, &varGuid, propObject.get(), &result)) { - goto unlock; + if (ok) { + serializeVariables(); + goto exit; } + NVRAMLOCK(); oldObject.reset(dict->getObject(variableName), OSRetain); if (remove == false) { DEBUG_INFO("Adding object\n"); @@ -1328,17 +1360,22 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject) result = kIOReturnNotFound; } } + NVRAMUNLOCK(); if (result == kIOReturnSuccess) { - result = syncVariables(); + result = serializeVariables(); if (result != kIOReturnSuccess) { - DEBUG_ERROR("syncVariables failed, result=0x%08x\n", result); + DEBUG_ERROR("serializeVariables failed, result=0x%08x\n", result); + + NVRAMLOCK(); if (oldObject) { dict->setObject(variableName, oldObject.get()); } else { dict->removeObject(variableName); } - (void) syncVariables(); + NVRAMUNLOCK(); + + (void) serializeVariables(); result = kIOReturnNoMemory; } } @@ -1350,9 +1387,6 @@ IODTNVRAM::setPropertyInternal(const OSSymbol *aKey, OSObject *anObject) propObject.reset(); } -unlock: - NVRAMUNLOCK(); - exit: DEBUG_INFO("result=0x%08x\n", result); @@ -1371,12 +1405,12 @@ IODTNVRAM::removeProperty(const OSSymbol *aKey) IOReturn ret; NVRAMLOCK(); - ret = removePropertyInternal(aKey); - NVRAMUNLOCK(); - if (ret != kIOReturnSuccess) { + if (ret == kIOReturnSuccess) { + serializeVariables(); + } else { DEBUG_INFO("removePropertyInternal failed, ret=0x%08x\n", ret); } } @@ -1409,7 +1443,6 @@ IODTNVRAM::removePropertyInternal(const OSSymbol *aKey) // If the object exists, remove it from the dictionary. if (dict->getObject(variableName) != nullptr) { dict->removeObject(variableName); - result = syncVariables(); } exit: @@ -1601,7 +1634,6 @@ IODTNVRAM::initVariables(void) OSSharedPtr propSymbol; OSSharedPtr propObject; NVRAMRegionInfo *currentRegion; - NVRAMRegionInfo variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage}, { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} }; @@ -1682,20 +1714,22 @@ IODTNVRAM::syncOFVariables(void) } IOReturn -IODTNVRAM::syncVariables(void) +IODTNVRAM::serializeVariables(void) { + IOReturn ret; bool ok; UInt32 length, maxLength, regionIndex; UInt8 *buffer, *tmpBuffer; const OSSymbol *tmpSymbol; OSObject *tmpObject; OSSharedPtr iter; + OSSharedPtr sizeUsed; + UInt32 systemUsed = 0; + UInt32 commonUsed = 0; + OSSharedPtr nvramImage; NVRAMRegionInfo *currentRegion; - - NVRAMRegionInfo variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage}, - { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} }; - - NVRAMLOCKASSERT(); + NVRAMRegionInfo variableRegions[] = { { NVRAM_CHRP_PARTITION_NAME_COMMON, _commonPartitionOffset, _commonPartitionSize, _commonDict, _commonImage}, + { NVRAM_CHRP_PARTITION_NAME_SYSTEM, _systemPartitionOffset, _systemPartitionSize, _systemDict, _systemImage} }; if (_systemPanicked) { return kIOReturnNotReady; @@ -1708,8 +1742,9 @@ IODTNVRAM::syncVariables(void) DEBUG_INFO("...\n"); + NVRAMLOCK(); + for (regionIndex = 0; regionIndex < ARRAY_SIZE(variableRegions); regionIndex++) { - OSSharedPtr sizeUsed; currentRegion = &variableRegions[regionIndex]; if (currentRegion->size == 0) { @@ -1755,16 +1790,14 @@ IODTNVRAM::syncVariables(void) IODelete(buffer, UInt8, currentRegion->size); - sizeUsed = OSNumber::withNumber(maxLength, 32); - _nvramController->setProperty(currentRegion->name, sizeUsed.get()); - sizeUsed.reset(); - if ((strncmp(currentRegion->name, NVRAM_CHRP_PARTITION_NAME_SYSTEM, strlen(NVRAM_CHRP_PARTITION_NAME_SYSTEM)) == 0) && (_systemService != nullptr)) { _systemService->setProperties(_systemDict.get()); + systemUsed = maxLength; } else if ((strncmp(currentRegion->name, NVRAM_CHRP_PARTITION_NAME_COMMON, strlen(NVRAM_CHRP_PARTITION_NAME_COMMON)) == 0) && (_commonService != nullptr)) { _commonService->setProperties(_commonDict.get()); + commonUsed = maxLength; } if (!ok) { @@ -1772,9 +1805,31 @@ IODTNVRAM::syncVariables(void) } } + nvramImage = OSData::withBytes(_nvramImage, _nvramSize); + + NVRAMUNLOCK(); + DEBUG_INFO("ok=%d\n", ok); - return _nvramController->write(0, _nvramImage, _nvramSize); + CONTROLLERLOCK(); + + if (_systemService) { + sizeUsed = OSNumber::withNumber(systemUsed, 32); + _nvramController->setProperty("SystemUsed", sizeUsed.get()); + sizeUsed.reset(); + } + + if (_commonService) { + sizeUsed = OSNumber::withNumber(commonUsed, 32); + _nvramController->setProperty("CommonUsed", sizeUsed.get()); + sizeUsed.reset(); + } + + ret = _nvramController->write(0, (uint8_t *)nvramImage->getBytesNoCopy(), nvramImage->getLength()); + + CONTROLLERUNLOCK(); + + return ret; } UInt32 @@ -2344,22 +2399,25 @@ IODTNVRAM::writeNVRAMPropertyType1(IORegistryEntry *entry, ok = _commonDict->setObject(_registryPropertiesKey.get(), data.get()); } + NVRAMUNLOCK(); + if (ok) { - if (syncVariables() != kIOReturnSuccess) { + if (serializeVariables() != kIOReturnSuccess) { + NVRAMLOCK(); if (oldData) { _commonDict->setObject(_registryPropertiesKey.get(), oldData.get()); } else { _commonDict->removeObject(_registryPropertiesKey.get()); } - (void) syncVariables(); + NVRAMUNLOCK(); + + (void) serializeVariables(); ok = false; } } oldData.reset(); - NVRAMUNLOCK(); - return ok ? kIOReturnSuccess : kIOReturnNoMemory; } diff --git a/iokit/Kernel/IOPMrootDomain.cpp b/iokit/Kernel/IOPMrootDomain.cpp index b4faa6330..2aec3e5c3 100644 --- a/iokit/Kernel/IOPMrootDomain.cpp +++ b/iokit/Kernel/IOPMrootDomain.cpp @@ -57,9 +57,6 @@ #include "IOKitKernelInternal.h" #if HIBERNATION #include -#if __arm64__ -#include -#endif /* __arm64__ */ #endif /* HIBERNATION */ #include #include @@ -578,7 +575,6 @@ defaultSleepPolicyHandler(void *ctx, const IOPMSystemSleepPolicyVariables *vars, // Hibernation enabled and either user forced hibernate or low battery sleep if ((vars->hibernateMode & kIOHibernateModeOn) && - ppl_hib_hibernation_supported() && (((vars->hibernateMode & kIOHibernateModeSleep) == 0) || (vars->sleepFactors & kIOPMSleepFactorBatteryLow))) { sleepType = kIOPMSleepTypeHibernate; @@ -1773,9 +1769,6 @@ IOPMrootDomain::start( IOService * nub ) #if HIBERNATION #if defined(__arm64__) - if (ppl_hib_hibernation_supported()) { - publishFeature(kIOHibernateFeatureKey); - } #endif /* defined(__arm64__) */ IOHibernateSystemInit(this); #endif diff --git a/iokit/Kernel/IOPolledInterface.cpp b/iokit/Kernel/IOPolledInterface.cpp index ccbea3d90..9ff9f8043 100644 --- a/iokit/Kernel/IOPolledInterface.cpp +++ b/iokit/Kernel/IOPolledInterface.cpp @@ -591,19 +591,7 @@ IOGetHibernationCryptKey(uint8_t * hibernationKey, uint32_t *swSeed ) { -#if XNU_MONITOR_PPL_HIB - SEPHibernator *hibernator = SEPHibernator::sepHibernator(); - sephib_wrapped_key_t wrappedKey = {}; - sephib_seprom_hib_payload_t sepromPayload = {}; - hibernator->prepareToHibernate(&wrappedKey, &sepromPayload); - *swSeed = sepromPayload.sw_seed; - assert(*keySize >= sizeof(wrappedKey.data)); - *keySize = sizeof(wrappedKey.data); - memcpy(hibernationKey, wrappedKey.data, *keySize); - return kIOReturnSuccess; -#else return kIOReturnNotFound; -#endif } #endif /* defined(__arm64__) */ diff --git a/iokit/conf/Makefile.arm64 b/iokit/conf/Makefile.arm64 index f40ee0aaf..b0684974c 100644 --- a/iokit/conf/Makefile.arm64 +++ b/iokit/conf/Makefile.arm64 @@ -19,8 +19,6 @@ HIB_FILES=$(filter $(UNCONFIGURED_HIB_FILES),$(OBJS)) IOHibernateRestoreKernel.o_CFLAGS_ADD += $(CFLAGS_NOLTO_FLAG) -fno-sanitize=address -UKASAN # Stack protector and stack check must be disabled because the stack protector runtime isn't available IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-stack-protector -fno-stack-check -# signing keys aren't set up yet, so ptrauth must be disabled -IOHibernateRestoreKernel.o_CFLAGS_ADD += -fno-ptrauth-calls IOHibernateIO.cpo_CFLAGS_ADD += -I$(SRCROOT)/osfmk IOHibernateRestoreKernel.o_CFLAGS_ADD += -I$(SRCROOT)/osfmk diff --git a/libkern/c++/OSKext.cpp b/libkern/c++/OSKext.cpp index aa1c83d5b..8b7c090e3 100644 --- a/libkern/c++/OSKext.cpp +++ b/libkern/c++/OSKext.cpp @@ -3828,11 +3828,13 @@ OSKext::lookupKextWithAddress(vm_address_t address) OSSharedPtr foundKext; // returned uint32_t count, i; kmod_info_t *kmod_info; + vm_address_t originalAddress; #if defined(__arm64__) uint64_t textExecBase; size_t textExecSize; #endif /* defined(__arm64__) */ + originalAddress = address; #if __has_feature(ptrauth_calls) address = (vm_address_t)VM_KERNEL_STRIP_PTR(address); #endif /* __has_feature(ptrauth_calls) */ @@ -3868,7 +3870,8 @@ OSKext::lookupKextWithAddress(vm_address_t address) } /* * DriverKit userspace executables do not have a kernel linkedExecutable, - * so we "fake" their address range with the LoadTag. + * so we "fake" their address range with the LoadTag. We cannot use the ptrauth-stripped address + * here, so use the original address passed to this method. * * This is supposed to be used for logging reasons only. When logd * calls this function it ors the address with FIREHOSE_TRACEPOINT_PC_KERNEL_MASK, so we @@ -3876,7 +3879,7 @@ OSKext::lookupKextWithAddress(vm_address_t address) * Also we need to remove FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT set when emitting the log line. */ - address = address & ~(FIREHOSE_TRACEPOINT_PC_KERNEL_MASK | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT); + address = originalAddress & ~(FIREHOSE_TRACEPOINT_PC_KERNEL_MASK | FIREHOSE_TRACEPOINT_PC_DYNAMIC_BIT); count = sLoadedDriverKitKexts->getCount(); for (i = 0; i < count; i++) { OSKext * thisKext = OSDynamicCast(OSKext, sLoadedDriverKitKexts->getObject(i)); @@ -3901,14 +3904,6 @@ OSKext::copyKextUUIDForAddress(OSNumber *address) return NULL; } - uintptr_t addr = ml_static_slide((uintptr_t)address->unsigned64BitValue()); - if (addr == 0) { - return NULL; - } -#if __has_feature(ptrauth_calls) - addr = (uintptr_t)VM_KERNEL_STRIP_PTR(addr); -#endif /* __has_feature(ptrauth_calls) */ - #if CONFIG_MACF /* Is the calling process allowed to query kext info? */ if (current_task() != kernel_task) { @@ -3928,10 +3923,28 @@ OSKext::copyKextUUIDForAddress(OSNumber *address) } } #endif - kext = lookupKextWithAddress(addr); - if (kext) { - uuid = kext->copyTextUUID(); + + uintptr_t slidAddress = ml_static_slide((uintptr_t)address->unsigned64BitValue()); + if (slidAddress != 0) { + kext = lookupKextWithAddress(slidAddress); + if (kext) { + uuid = kext->copyTextUUID(); + } + } + + if (!uuid) { + /* + * If we still don't have a UUID, then we failed to match the slid + stripped address with + * a kext. This might have happened because the log message came from a dext. + * + * Try again with the original address. + */ + kext = lookupKextWithAddress((vm_address_t)address->unsigned64BitValue()); + if (kext && kext->isDriverKit()) { + uuid = kext->copyTextUUID(); + } } + return uuid; } @@ -5392,10 +5405,8 @@ OSKext::loadCodelessKext(OSString *kextIdentifier, OSDictionary *requestDict) OSKext::recordIdentifierRequest(OSDynamicCast(OSString, newKext->getIdentifier())); result = kOSReturnSuccess; - /* send the kext's personalities to the IOCatalog */ - if (!newKext->flags.requireExplicitLoad) { - result = newKext->sendPersonalitiesToCatalog(true, NULL); - } + /* Send the kext's personalities to the IOCatalog. This is an explicit load. */ + result = newKext->sendPersonalitiesToCatalog(true, NULL); } finish: @@ -11843,6 +11854,13 @@ OSKext::loadFileSetKexts(OSDictionary * requestDict __unused) IORecursiveLockLock(sKextLock); + if (!sLoadEnabled) { + OSKextLog(NULL, kOSKextLogErrorLevel | kOSKextLogIPCFlag, + "KextLog: Kext loading is disabled (attempt to load KCs)."); + IORecursiveLockUnlock(sKextLock); + return ret; + } + pageable_filepath = OSDynamicCast(OSString, requestArgs->getObject(kKextRequestArgumentPageableKCFilename)); diff --git a/libkern/libkern/section_keywords.h b/libkern/libkern/section_keywords.h index 90382aa4d..9763726b6 100644 --- a/libkern/libkern/section_keywords.h +++ b/libkern/libkern/section_keywords.h @@ -38,6 +38,22 @@ #define __SECTION_START_SYM(seg, sect) asm("section$start$" seg "$" sect) #define __SECTION_END_SYM(seg, sect) asm("section$end$" seg "$" sect) +#if defined(__arm64__) || defined (__x86_64__) + +#define SECURITY_SEGMENT_NAME "__DATA" +#define SECURITY_SECTION_NAME "__const" +#define SECURITY_SEGMENT_SECTION_NAME "__DATA,__const" + +#define __security_const_early const +#define __security_const_late __attribute__((section(SECURITY_SEGMENT_SECTION_NAME))) +#define __security_read_write + +#if HIBERNATION +#define MARK_AS_HIBERNATE_TEXT __attribute__((section("__HIB, __text, regular, pure_instructions"))) +#define MARK_AS_HIBERNATE_DATA __attribute__((section("__HIB, __data"))) +#define MARK_AS_HIBERNATE_DATA_CONST_LATE __attribute__((section("__HIB, __const"))) +#endif /* HIBERNATION */ +#endif /* __arm64__ || __x86_64__ */ #ifndef __security_const_early #define __security_const_early const diff --git a/makedefs/MakeInc.cmd b/makedefs/MakeInc.cmd index 2bbd75c38..8f654850a 100644 --- a/makedefs/MakeInc.cmd +++ b/makedefs/MakeInc.cmd @@ -54,6 +54,7 @@ _LOG_HOST_LINK = $(call LOG,$1,$(ColorH),$(ColorLF),$(LOG_PFX_LEN)) LOG_LDFILELIST = $(call LOG,LDFILELIST,$(ColorL),$(ColorLF),$(LOG_PFX_LEN_ADJ)) LOG_MIG = $(call LOG,MIG,$(ColorM),$(ColorF),$(LOG_PFX_LEN_ADJ)) LOG_LD = $(call LOG,LD,$(ColorL),$(ColorF),$(LOG_PFX_LEN_ADJ)) +LOG_ALIGN = $(call LOG,--------->,$(Color0),$(Color0),$(LOG_PFX_LEN)) # Compiling/machine-specific operations. LOG_CC = $(call _LOG_COMP,CC) @@ -81,6 +82,7 @@ LOG_ALIAS = $(call _LOG_HOST,ALIAS) LOG_STRIP = $(call _LOG_HOST,STRIP) LOG_DSYMUTIL = $(call _LOG_HOST,DSYMUTIL) LOG_LIBTOOL = $(call _LOG_HOST,LIBTOOL) +LOG_FILEPREP = $(call _LOG_HOST,FILEPREP) # Host-side linking operations. LOG_GENASSYM = $(call _LOG_HOST_LINK,GENASSYM) diff --git a/makedefs/MakeInc.color b/makedefs/MakeInc.color new file mode 100644 index 000000000..ba3cfd7dd --- /dev/null +++ b/makedefs/MakeInc.color @@ -0,0 +1,55 @@ +# -*- mode: makefile;-*- +# +# Copyright (C) 2020 Apple Inc. All rights reserved. +# +# MakeInc.color defines macros used to enable +# colored output of the build log. +# + +define _setup_build_log_colors +ifeq ($${XNU_LOGCOLORS},y) + LOGCOLORS ?= y +endif +ifeq ($$(LOGCOLORS),y) + # Get a nice list of device code names associated with the build platform + ifndef CDevs + #ifdef EMBEDDED_DEVICE_MAP + # export CDevs := $$(shell $$(EMBEDDED_DEVICE_MAP) -db $$(EDM_DBPATH) -query "SELECT DISTINCT TargetType FROM Targets WHERE KernelPlatform = '$$(CURRENT_MACHINE_CONFIG_LC)'" | tr '[\r\n]' ':' | sed 's,:$$$$,,') + #endif + endif + ifndef MACHINE_PFX + export _MACHINE := $$(CURRENT_MACHINE_CONFIG_LC) + ifeq ($$(CURRENT_MACHINE_CONFIG),NONE) + export _MACHINE := $$(subst OSX,,$$(PLATFORM)) + endif + export MACHINE_PFX := $$(shell __A="$$(CURRENT_ARCH_CONFIG_LC)"; \ + __As=$$$$((6-$$$${\#__A})); \ + printf "%-.6s%*.*s %9.9s" \ + "$$$${__A}" \ + $$$${__As} $$$${__As} " " \ + "$$(_MACHINE)") + endif + override LOG_PFX_LEN := 30 + override LOG_PFX_LEN_ADJ := $$(shell __TMP="$$(MACHINE_PFX)"; \ + printf "%d" $$$$(($$(LOG_PFX_LEN) - $$$${\#__TMP} - 3))) + MACHINE_PFX_COL = $$(shell printf "\\033[1m%s\\033[m" "$$(MACHINE_PFX)") + # Turn off colored output + Color0:=$$(shell printf "\\033[m") + # Start a host command: bold text + ColorH:=$$(shell printf "\\033[1m") + # Start a compilation-related command: blue text + ColorC:=$$(shell printf "[$$(MACHINE_PFX_COL)] \\033[1;34m") + # Start a MIG command: green text + ColorM:=$$(shell printf "[$$(MACHINE_PFX_COL)] \\033[1;32m") + # Start a linking command: purple text + ColorL:=$$(shell printf "[$$(MACHINE_PFX_COL)] \\033[1;35m") + # Start a filename + ColorF:=$$(shell printf "") + # Start a linked file name: italic text + ColorLF:=$$(shell printf "\\033[3m") + # Error strings: red text + ColorErr:=$$(shell printf "\033[31m") +endif +endef + +# vim: set ft=make: diff --git a/makedefs/MakeInc.def b/makedefs/MakeInc.def index d53f3e1a7..508db7f96 100644 --- a/makedefs/MakeInc.def +++ b/makedefs/MakeInc.def @@ -32,7 +32,7 @@ SUPPORTED_ARM_MACHINE_CONFIGS = NONE SUPPORTED_ARM64_MACHINE_CONFIGS = NONE else SUPPORTED_ARM_MACHINE_CONFIGS = T8002 T8004 -SUPPORTED_ARM64_MACHINE_CONFIGS = T7000 T7001 S8000 S8001 T8010 T8011 BCM2837 +SUPPORTED_ARM64_MACHINE_CONFIGS = T7000 T7001 S8000 S8001 T8010 T8011 BCM2837 T8020 T8101 T8103 endif @@ -68,6 +68,9 @@ MACHINE_FLAGS_ARM_T8004 = -DARM_BOARD_CONFIG_T8004 MACHINE_FLAGS_ARM64_T8010 = -DARM64_BOARD_CONFIG_T8010 -mcpu=hurricane MACHINE_FLAGS_ARM64_T8011 = -DARM64_BOARD_CONFIG_T8011 -mcpu=hurricane MACHINE_FLAGS_ARM64_BCM2837 = -DARM64_BOARD_CONFIG_BCM2837 +MACHINE_FLAGS_ARM64_T8020 = -DARM64_BOARD_CONFIG_T8020 -mcpu=vortex +MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -mcpu=firestorm +MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -mcpu=firestorm # @@ -240,6 +243,9 @@ export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell $(EMBEDDED_DEVICE_MAP) else # Without embdedded device map, use a default arch string export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := $(shell echo $(CURRENT_ARCH_CONFIG) | tr A-Z a-z) +ifneq ($(filter ARM64,$(CURRENT_ARCH_CONFIG)),) +export ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG := arm64e +endif endif endif @@ -275,7 +281,7 @@ endif # Default CFLAGS # ifdef RC_NONARCH_CFLAGS -OTHER_CFLAGS = $(RC_NONARCH_CLFAGS) +OTHER_CFLAGS = $(RC_NONARCH_CFLAGS) endif # @@ -680,9 +686,10 @@ LDFLAGS_KERNEL_GENARM64 = \ -Wl,-sectcreate,"__PLK_LLVM_COV",__llvm_covmap,/dev/null \ -Wl,-sectcreate,"__PLK_LINKEDIT",__data,/dev/null - -LDFLAGS_KERNEL_SEGARM64 ?= \ - -Wl,-segment_order,__TEXT:__DATA_CONST:__LINKEDIT:__TEXT_EXEC:__LAST:__KLD:__DATA:__BOOTDATA +LDFLAGS_KERNEL_SEGARM64 = \ + -Wl,-rename_section,__PPLDATA,__const,__PPLDATA_CONST,__const \ + -Wl,-segment_order,__TEXT:__DATA_CONST:__LINKEDIT:__TEXT_EXEC:__PPLTEXT:__PPLTRAMP:__PPLDATA_CONST:__LASTDATA_CONST:__LAST:__PPLDATA:__KLD:__DATA:__HIBDATA:__BOOTDATA \ + -Wl,-segprot,__PPLTEXT,r-x,r-x -Wl,-segprot,__PPLTRAMP,r-x,r-x -Wl,-segprot,__PPLDATA_CONST,r--,r-- -Wl,-segprot,__LASTDATA_CONST,r--,r-- -Wl,-segprot,__LAST,r-x,r-x LDFLAGS_KERNEL_RELEASEARM64 = \ $(LDFLAGS_KERNEL_GENARM64) \ @@ -726,7 +733,7 @@ LDFLAGS_KERNEL = $(LDFLAGS_KERNEL_GEN) \ $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_))) \ $(DEPLOYMENT_TARGET_FLAGS) -LDFLAGS_KERNEL_ONLY = \ +LDFLAGS_KERNEL_ONLY += \ $($(addsuffix $(CURRENT_ARCH_CONFIG), $(addsuffix $(CURRENT_KERNEL_CONFIG),LDFLAGS_KERNEL_ONLY_CONFIG_))) \ $($(addsuffix $(ARCH_STRING_FOR_CURRENT_MACHINE_CONFIG),LDFLAGS_KERNEL_ONLY_SUBARCH_)) \ -Wl,-alias_list,$(TARGET)/all-alias.exp \ @@ -738,6 +745,11 @@ LDFLAGS_KERNEL_ONLY = \ LD_KERNEL_LIBS = -lcc_kext LD_KERNEL_ARCHIVES = $(LDFLAGS_KERNEL_SDK) -lfirehose_kernel +# Link opensource binary library +ifneq ($(filter T8020 T8020 T8101 T8101,$(CURRENT_MACHINE_CONFIG)),) +LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a +endif + # # DTrace support # diff --git a/makedefs/MakeInc.kernel b/makedefs/MakeInc.kernel index f630a4ba0..222b355d1 100644 --- a/makedefs/MakeInc.kernel +++ b/makedefs/MakeInc.kernel @@ -1,6 +1,6 @@ # -*- mode: makefile;-*- # -# Copyright (C) 1999-2016 Apple Inc. All rights reserved. +# Copyright (C) 1999-2020 Apple Inc. All rights reserved. # # MakeInc.kernel augments the single-architecture # recursive build system with rules specific @@ -416,3 +416,5 @@ install_alias: $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(ALIAS_FILE_NAME) print_exports: $(_v)printenv | sort + +# vim: set ft=make: diff --git a/makedefs/MakeInc.rule b/makedefs/MakeInc.rule index ad66233b6..d95e8186c 100644 --- a/makedefs/MakeInc.rule +++ b/makedefs/MakeInc.rule @@ -1,6 +1,6 @@ # -*- mode: makefile;-*- # -# Copyright (C) 1999-2016 Apple Inc. All rights reserved. +# Copyright (C) 1999-2020 Apple Inc. All rights reserved. # # MakeInc.rule defines the targets and rules for # leaf directories once MakeInc.dir has recursed @@ -8,6 +8,7 @@ # to allow the Makefile in the source directory # to augment the actions that will be performed. # +include $(SRCROOT)/makedefs/MakeInc.color # # Generic Install rules @@ -45,50 +46,7 @@ ifndef INSTALL_KF_MD_GEN_LIST INSTALL_KF_MD_GEN_LIST = $(EXPORT_MD_GEN_LIST) endif -ifeq (${XNU_LOGCOLORS},y) - LOGCOLORS ?= y -endif - -ifeq ($(LOGCOLORS),y) - # Get a nice list of device code names associated with the build platform - ifndef CDevs - #ifdef EMBEDDED_DEVICE_MAP - # export CDevs := $(shell $(EMBEDDED_DEVICE_MAP) -db $(EDM_DBPATH) -query "SELECT DISTINCT TargetType FROM Targets WHERE KernelPlatform = '$(CURRENT_MACHINE_CONFIG_LC)'" | tr '[\r\n]' ':' | sed 's,:$$,,') - #endif - endif - ifndef MACHINE_PFX - export _MACHINE := $(CURRENT_MACHINE_CONFIG_LC) - ifeq ($(CURRENT_MACHINE_CONFIG),NONE) - export _MACHINE := $(subst OSX,,$(PLATFORM)) - endif - export MACHINE_PFX := $(shell __A="$(CURRENT_ARCH_CONFIG_LC)"; \ - __As=$$((6-$${\#__A})); \ - printf "%-.6s%*.*s %9.9s" \ - "$${__A}" \ - $${__As} $${__As} " " \ - "$(_MACHINE)") - endif - override LOG_PFX_LEN := 30 - override LOG_PFX_LEN_ADJ := $(shell __TMP="$(MACHINE_PFX)"; \ - printf "%d" $$(($(LOG_PFX_LEN) - $${\#__TMP} - 3))) - MACHINE_PFX_COL = $(shell printf "\\033[1m%s\\033[m" "$(MACHINE_PFX)") - # Turn off colored output - Color0:=$(shell printf "\\033[m") - # Start a host command: bold text - ColorH:=$(shell printf "\\033[1m") - # Start a compilation-related command: blue text - ColorC:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;34m") - # Start a MIG command: green text - ColorM:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;32m") - # Start a linking command: purple text - ColorL:=$(shell printf "[$(MACHINE_PFX_COL)] \\033[1;35m") - # Start a filename - ColorF:=$(shell printf "") - # Start a linked file name: italic text - ColorLF:=$(shell printf "\\033[3m") - # Error strings: red text - ColorErr:=$(shell printf "\033[31m") -endif +$(eval $(call _setup_build_log_colors)) .PHONY: ALWAYS diff --git a/makedefs/MakeInc.top b/makedefs/MakeInc.top index 213d0cec1..e1ab49bc3 100644 --- a/makedefs/MakeInc.top +++ b/makedefs/MakeInc.top @@ -1,6 +1,6 @@ # -*- mode: makefile;-*- # -# Copyright (C) 2010-2016 Apple Inc. All rights reserved. +# Copyright (C) 2010-2020 Apple Inc. All rights reserved. # # MakeInc.top is the top-level makefile for the xnu # build system. All the main XBS targets @@ -758,3 +758,5 @@ $(eval $(generated_top_level_print_exports)) .PHONY: print_exports_first_build_config print_exports_first_build_config: print_exports_bootstrap + +# vim: set ft=make: diff --git a/osfmk/arm/arm_init.c b/osfmk/arm/arm_init.c index 9c11427c2..80a448ecb 100644 --- a/osfmk/arm/arm_init.c +++ b/osfmk/arm/arm_init.c @@ -111,7 +111,6 @@ int debug_task; bool need_wa_rdar_55577508 = false; SECURITY_READ_ONLY_LATE(bool) static_kernelcache = false; - #if HAS_BP_RET /* Enable both branch target retention (0x2) and branch direction retention (0x1) across sleep */ uint32_t bp_ret = 3; @@ -154,6 +153,9 @@ void arm_init(boot_args * args); #if __arm64__ unsigned int page_shift_user32; /* for page_size as seen by a 32-bit task */ + +extern void configure_misc_apple_boot_args(void); +extern void configure_misc_apple_regs(void); #endif /* __arm64__ */ @@ -282,33 +284,6 @@ arm_auxkc_init(void *mh, void *base) #endif /* defined(HAS_APPLE_PAC) */ } -#if HAS_IC_INVAL_FILTERS -static void -configure_misc_apple_regs(void) -{ - uint64_t actlr, __unused acfg, __unused ahcr; - - actlr = get_aux_control(); - -#if HAS_IC_INVAL_FILTERS - ahcr = __builtin_arm_rsr64(ARM64_REG_AHCR_EL2); - ahcr |= AHCR_IC_IVAU_EnRegime; - ahcr |= AHCR_IC_IVAU_EnVMID; - ahcr |= AHCR_IC_IALLU_EnRegime; - ahcr |= AHCR_IC_IALLU_EnVMID; - __builtin_arm_wsr64(ARM64_REG_AHCR_EL2, ahcr); -#endif /* HAS_IC_INVAL_FILTERS */ - - -#if HAS_IC_INVAL_FILTERS - actlr |= ACTLR_EL1_IC_IVAU_EnASID; -#endif /* HAS_IC_INVAL_FILTERS */ - - set_aux_control(actlr); - -} -#endif /* HAS_IC_INVAL_FILTERS */ - /* * Routine: arm_init * Function: Runs on the boot CPU, once, on entry from iBoot. @@ -341,25 +316,10 @@ arm_init( #if __arm64__ wfe_timeout_configure(); -#if HAS_IC_INVAL_FILTERS + + configure_misc_apple_boot_args(); configure_misc_apple_regs(); -#endif /* HAS_IC_INVAL_FILTERS */ -#if defined(HAS_APPLE_PAC) -#if DEVELOPMENT || DEBUG - boolean_t user_jop = TRUE; - PE_parse_boot_argn("user_jop", &user_jop, sizeof(user_jop)); - if (!user_jop) { - args->bootFlags |= kBootFlagsDisableUserJOP; - } -#endif /* DEVELOPMENT || DEBUG */ - boolean_t user_ts_jop = TRUE; - PE_parse_boot_argn("user_ts_jop", &user_ts_jop, sizeof(user_ts_jop)); - if (!user_ts_jop) { - args->bootFlags |= kBootFlagsDisableUserThreadStateJOP; - } - PE_parse_boot_argn("diversify_user_jop", &diversify_user_jop, sizeof(diversify_user_jop)); -#endif /* defined(HAS_APPLE_PAC) */ { /* @@ -507,6 +467,9 @@ arm_init( */ #if __arm64__ need_wa_rdar_55577508 = cpuid_get_cpufamily() == CPUFAMILY_ARM_LIGHTNING_THUNDER; +#ifndef RC_HIDE_XNU_FIRESTORM + need_wa_rdar_55577508 |= (cpuid_get_cpufamily() == CPUFAMILY_ARM_FIRESTORM_ICESTORM && get_arm_cpu_version() == CPU_VERSION_A0); +#endif #endif /* setup debugging output if one has been chosen */ @@ -623,9 +586,9 @@ arm_init_cpu( __builtin_arm_wsr("pan", 1); #endif -#if HAS_IC_INVAL_FILTERS +#ifdef __arm64__ configure_misc_apple_regs(); -#endif /* HAS_IC_INVAL_FILTERS */ +#endif cpu_data_ptr->cpu_flags &= ~SleepState; #if defined(ARMA7) diff --git a/osfmk/arm/caches_asm.s b/osfmk/arm/caches_asm.s index 5556a00ae..d30f034a0 100644 --- a/osfmk/arm/caches_asm.s +++ b/osfmk/arm/caches_asm.s @@ -31,7 +31,6 @@ #include #include #include "assym.s" -#include "caches_macros.s" /* @@ -144,37 +143,31 @@ fmir_loop: LEXT(CleanPoC_Dcache) LEXT(clean_mmu_dcache) #if !defined(__ARM_L1_WT_CACHE__) - mov r0, #0 - GET_CACHE_CONFIG r0, r1, r2, r3 mov r0, #0 dsb clean_dcacheway: clean_dcacheline: mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set - add r0, r0, r1 // increment set index - tst r0, r2 // look for overflow + add r0, r0, #1 << MMU_I7SET // increment set index + tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow beq clean_dcacheline - bic r0, r0, r2 // clear set overflow - adds r0, r0, r3 // increment way + bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow + adds r0, r0, #1 << MMU_I7WAY // increment way bcc clean_dcacheway // loop #endif - HAS_L2_CACHE r0 - cmp r0, #0 - beq clean_skipl2dcache - mov r0, #1 - GET_CACHE_CONFIG r0, r1, r2, r3 +#if __ARM_L2CACHE__ dsb mov r0, #2 clean_l2dcacheway: clean_l2dcacheline: mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set - add r0, r0, r1 // increment set index - tst r0, r2 // look for overflow + add r0, r0, #1 << L2_I7SET // increment set index + tst r0, #1 << (L2_NSET + L2_I7SET) // look for overflow beq clean_l2dcacheline - bic r0, r0, r2 // clear set overflow - adds r0, r0, r3 // increment way + bic r0, r0, #1 << (L2_NSET + L2_I7SET) // clear set overflow + adds r0, r0, #1 << L2_I7WAY // increment way bcc clean_l2dcacheway // loop -clean_skipl2dcache: +#endif dsb bx lr @@ -188,18 +181,16 @@ clean_skipl2dcache: .globl EXT(CleanPoU_Dcache) LEXT(CleanPoU_Dcache) #if !defined(__ARM_PoU_WT_CACHE__) - mov r0, #0 - GET_CACHE_CONFIG r0, r1, r2, r3 mov r0, #0 dsb clean_dcacheway_idle: clean_dcacheline_idle: mcr p15, 0, r0, c7, c10, 2 // clean dcache line by way/set - add r0, r0, r1 // increment set index - tst r0, r2 // look for overflow + add r0, r0, #1 << MMU_I7SET // increment set index + tst r0, #1 << (MMU_NSET + MMU_I7SET) // look for overflow beq clean_dcacheline_idle - bic r0, r0, r2 // clear set overflow - adds r0, r0, r3 // increment way + bic r0, r0, #1 << (MMU_NSET + MMU_I7SET) // clear set overflow + adds r0, r0, #1 << MMU_I7WAY // increment way bcc clean_dcacheway_idle // loop #endif dsb @@ -248,6 +239,7 @@ LEXT(CleanPoC_DcacheRegion_Force) add r1, r1, r2 sub r1, r1, #1 mov r1, r1, LSR #MMU_CLINE // Set cache line counter + dsb ccdr_loop: mcr p15, 0, r0, c7, c10, 1 // Clean dcache line to PoC add r0, r0, #1<