From: Apple Date: Thu, 15 Dec 2016 17:53:06 +0000 (+0000) Subject: xnu-3789.31.2.tar.gz X-Git-Tag: macos-10122^0 X-Git-Url: https://git.saurik.com/apple/xnu.git/commitdiff_plain/d190cdc3f5544636abb56dc1874be391d3e1b148 xnu-3789.31.2.tar.gz --- diff --git a/EXTERNAL_HEADERS/corecrypto/cc_config.h b/EXTERNAL_HEADERS/corecrypto/cc_config.h index 464f32b18..807d58cea 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_config.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_config.h @@ -213,7 +213,7 @@ #endif #if !defined(CC_USE_HEAP_FOR_WORKSPACE) - #if CC_USE_L4 || CC_IBOOT || defined(_MSC_VER) + #if CC_USE_L4 || CC_IBOOT || CC_BASEBAND || defined(_MSC_VER) /* For L4, stack is too short, need to use HEAP for some computations */ /* CC_USE_HEAP_FOR_WORKSPACE not supported for KERNEL! */ #define CC_USE_HEAP_FOR_WORKSPACE 1 diff --git a/EXTERNAL_HEADERS/corecrypto/cc_debug.h b/EXTERNAL_HEADERS/corecrypto/cc_debug.h index 5c8ebbdc7..77aa6d0d0 100644 --- a/EXTERNAL_HEADERS/corecrypto/cc_debug.h +++ b/EXTERNAL_HEADERS/corecrypto/cc_debug.h @@ -24,7 +24,7 @@ #include #define cc_printf(x...) kprintf(x) extern int printf(const char *format, ...) __printflike(1,2); -#elif CC_USE_S3 +#elif CC_USE_S3 || CC_IBOOT #include #define cc_printf(x...) printf(x) #else diff --git a/EXTERNAL_HEADERS/corecrypto/cccmac.h b/EXTERNAL_HEADERS/corecrypto/cccmac.h index f4262d5bb..5d4ca87c2 100644 --- a/EXTERNAL_HEADERS/corecrypto/cccmac.h +++ b/EXTERNAL_HEADERS/corecrypto/cccmac.h @@ -15,7 +15,7 @@ #include #include -#define CMAC_BLOCKSIZE 16 +#define CMAC_BLOCKSIZE 16 #if CORECRYPTO_USE_TRANSPARENT_UNION struct cccmac_ctx { @@ -23,8 +23,12 @@ struct cccmac_ctx { } CC_ALIGNED(8); typedef struct cccmac_ctx_hdr { - uint8_t k1[16]; - uint8_t k2[16]; + uint8_t k1[CMAC_BLOCKSIZE]; + uint8_t k2[CMAC_BLOCKSIZE]; + uint8_t block[CMAC_BLOCKSIZE]; + size_t block_nbytes; // Number of byte occupied in block buf + size_t cumulated_nbytes; // Total size processed + const struct ccmode_cbc *cbc; uint8_t ctx[8]; } CC_ALIGNED(8) cccmac_ctx_hdr; @@ -38,8 +42,12 @@ typedef union { #else struct cccmac_ctx { - uint8_t k1[16]; - uint8_t k2[16]; + uint8_t k1[CMAC_BLOCKSIZE]; + uint8_t k2[CMAC_BLOCKSIZE]; + uint8_t block[CMAC_BLOCKSIZE]; + size_t block_nbytes; // Number of byte occupied in block + size_t cumulated_nbytes; // Total size processed + const struct ccmode_cbc *cbc; uint8_t ctx[8]; } CC_ALIGNED(8);// cccmac_ctx_hdr; @@ -73,20 +81,231 @@ typedef struct cccmac_ctx* cccmac_ctx_t; #define cccmac_mode_iv(_mode_, HC) (cccbc_iv *)(cccmac_mode_ctx_start(_mode_, HC)+cccmac_cbc_size(_mode_)) #define cccmac_k1(HC) (CCCMAC_HDR(HC)->k1) #define cccmac_k2(HC) (CCCMAC_HDR(HC)->k2) +#define cccmac_block(HC) (CCCMAC_HDR(HC)->block) +#define cccmac_cbc(HC) (CCCMAC_HDR(HC)->cbc) +#define cccmac_block_nbytes(HC) (CCCMAC_HDR(HC)->block_nbytes) +#define cccmac_cumulated_nbytes(HC) (CCCMAC_HDR(HC)->cumulated_nbytes) -void cccmac_init(const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, const void *key); +/* CMAC as defined in NIST SP800-38B - 2005 */ -void cccmac_block_update(const struct ccmode_cbc *cbc, cccmac_ctx_t cmac, - size_t nblocks, const void *data); +/* HACK: + To change the prototype of cccmac_init (and preserve the name) we need to + proceed in steps: + 1) Make corecrypto change (23557380) + 2) Have all clients define "CC_CHANGEFUNCTION_28544056_cccmac_init" + 3) Remove CC_CHANGEFUNCTION_28544056_cccmac_init logic and old functions of corecrypto + 4) Clients can remove CC_CHANGEFUNCTION_28544056_cccmac_init at their leisure + + */ + +/* ============================================================================= + + ONE SHOT + + ==============================================================================*/ + +/*! + @function cccmac_one_shot_generate + @abstract CMAC generation in one call + + @param cbc CBC and block cipher specification + @param key_nbytes Length of the key in bytes + @param key Pointer to the key of length key_nbytes + @param data_nbytes Length of the data in bytes + @param data Pointer to the data in bytes + @param mac_nbytes Length in byte of the mac, > 0 + @param mac Output of length cbc->block_size + + @result 0 iff successful. + + @discussion Only supports CMAC_BLOCKSIZE block ciphers + */ +int cccmac_one_shot_generate(const struct ccmode_cbc *cbc, + size_t key_nbytes, const void *key, + size_t data_nbytes, const void *data, + size_t mac_nbytes, void *mac); + +/*! + @function cccmac_one_shot_verify + @abstract CMAC verification in one call + + @param cbc CBC and block cipher specification + @param key_nbytes Length of the key in bytes + @param key Pointer to the key of length key_nbytes + @param data_nbytes Length of the data in bytes + @param data Pointer to the data in bytes + @param expected_mac_nbytes Length in byte of the mac, > 0 + @param expected_mac Mac value expected + + @result 0 iff successful. + + @discussion Only supports CMAC_BLOCKSIZE block ciphers + */ +int cccmac_one_shot_verify(const struct ccmode_cbc *cbc, + size_t key_nbytes, const void *key, + size_t data_nbytes, const void *data, + size_t expected_mac_nbytes, const void *expected_mac); + +/* ============================================================================= + + STREAMING + + Init - Update - Final + +==============================================================================*/ + +/*! + @function cccmac_init + @abstract Init CMAC context with CBC mode and key + + @param cbc CBC and block cipher specification + @param ctx Context use to store internal state + @param key_nbytes Length of the key in bytes + @param key Full key + + @result 0 iff successful. + + @discussion Only supports CMAC_BLOCKSIZE block ciphers + */ + + + +#ifndef CC_CHANGEFUNCTION_28544056_cccmac_init +int cccmac_init(const struct ccmode_cbc *cbc, + cccmac_ctx_t ctx, + size_t key_nbytes, const void *key) +// This is the good prototype! The deprecate warning is only for clients using the old function (now defined as macro) +__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "define 'CC_CHANGEFUNCTION_28544056_cccmac_init' and use new cccmac_init with parameter key_nbytes"))); +#else +int cccmac_init(const struct ccmode_cbc *cbc, + cccmac_ctx_t ctx, + size_t key_nbytes, const void *key); +#endif + +/*! + @function cccmac_update + @abstract Process data + + @param ctx Context use to store internal state + @param data_nbytes Length in byte of the data + @param data Data to process + + @result 0 iff successful. + + @discussion Only supports CMAC_BLOCKSIZE block ciphers + */ + +int cccmac_update(cccmac_ctx_t ctx, + size_t data_nbytes, const void *data); +/*! + @function cccmac_final_generate + @abstract Final step for generation + + @param ctx Context use to store internal state + @param mac_nbytes Length in byte of the mac, > 0 + @param mac Output of length mac_nbytes + + @result 0 iff successful. + + @discussion Only supports CMAC_BLOCKSIZE block ciphers + */ +int cccmac_final_generate(cccmac_ctx_t ctx, + size_t mac_nbytes, void *mac); + +/*! + @function cccmac_final_verify + @abstract Final step and verification + + @param ctx Context use to store internal state + @param expected_mac_nbytes Length in byte of the mac, > 0 + @param expected_mac Mac value expected + + @result 0 iff successful. + + @discussion Only supports CMAC_BLOCKSIZE block ciphers + */ +int cccmac_final_verify(cccmac_ctx_t ctx, + size_t expected_mac_nbytes, const void *expected_mac); + + +/* ============================================================================= + + Legacy - Please migrate to new functions above + + ==============================================================================*/ + +#ifndef CC_CHANGEFUNCTION_28544056_cccmac_init + +/* + Guidelines for switching to new CMAC functions + + Legacy New functions + cccmac_init -> cccmac_init w/ key kength in bytes + cccmac_block_update -> cccmac_update w/ size in bytes instead of blocks + cccmac_final -> cccmac_final_generate or cccmac_final_verify + depending the use case preceeded + by cccmac_update if any leftover bytes. + cccmac -> cccmac_one_shot_generate or cccmac_one_shot_verify + depending the use case + + */ + +/*! + @function cccmac_init + @abstract Initialize CMAC context with 128bit key + + Define CC_CHANGEFUNCTION_28544056_cccmac_init and use "cccmac_init(...,...,16,...)" + + */ +#define cccmac_init(cbc,ctx,key) cccmac_init(cbc,ctx,16,key) + +#endif /* CC_CHANGEFUNCTION_28544056_cccmac_init - TO BE REMOVED WITH 28544056 */ + +/*! + @function cccmac_block_update + @abstract Process data + */ + +CC_INLINE void cccmac_block_update(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, + size_t nblocks, const void *data) +__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "cccmac_update"))); + +CC_INLINE void cccmac_block_update(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, + size_t nblocks, const void *data) { + cccmac_update(ctx,(nblocks)*CMAC_BLOCKSIZE,data); +} + +/*! + @function cccmac_final + @abstract Finalize CMAC generation + */ +CC_INLINE void cccmac_final(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, + size_t nbytes, const void *in, void *out) +__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "cccmac_final_generate or cccmac_final_verify"))); + +CC_INLINE void cccmac_final(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, + size_t nbytes, const void *in, void *out) { + cccmac_update(ctx, nbytes, in); + cccmac_final_generate(ctx,CMAC_BLOCKSIZE,out); +} + +/*! + @function cccmac + @abstract One shot CMAC generation with 128bit key + */ +CC_INLINE void cccmac(const struct ccmode_cbc *cbc, + const void *key, + size_t data_len, const void *data, void *mac) +__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "cccmac_one_shot_generate or cccmac_one_shot_verify"))); -void cccmac_final(const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, - size_t nbytes, const void *in, void *out); +CC_INLINE void cccmac(const struct ccmode_cbc *cbc, + const void *key, + size_t data_len, const void *data, void *mac) { + cccmac_one_shot_generate(cbc,16,key,data_len,data,16,mac); +} -void cccmac(const struct ccmode_cbc *cbc, const void *key, - size_t data_len, const void *data, - void *mac); #endif /* _CORECRYPTO_cccmac_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode.h b/EXTERNAL_HEADERS/corecrypto/ccmode.h index 98057cce4..eda253b4d 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode.h @@ -32,26 +32,28 @@ CC_INLINE size_t ccecb_block_size(const struct ccmode_ecb *mode) return mode->block_size; } -CC_INLINE void ccecb_init(const struct ccmode_ecb *mode, ccecb_ctx *ctx, - size_t key_len, const void *key) +CC_INLINE int ccecb_init(const struct ccmode_ecb *mode, ccecb_ctx *ctx, + size_t key_len, const void *key) { - mode->init(mode, ctx, key_len, key); + return mode->init(mode, ctx, key_len, key); } -CC_INLINE void ccecb_update(const struct ccmode_ecb *mode, const ccecb_ctx *ctx, - size_t nblocks, const void *in, void *out) +CC_INLINE int ccecb_update(const struct ccmode_ecb *mode, const ccecb_ctx *ctx, + size_t nblocks, const void *in, void *out) { - mode->ecb(ctx, nblocks, in, out); + return mode->ecb(ctx, nblocks, in, out); } -CC_INLINE void ccecb_one_shot(const struct ccmode_ecb *mode, - size_t key_len, const void *key, - size_t nblocks, const void *in, void *out) +CC_INLINE int ccecb_one_shot(const struct ccmode_ecb *mode, + size_t key_len, const void *key, + size_t nblocks, const void *in, void *out) { + int rc; ccecb_ctx_decl(mode->size, ctx); - mode->init(mode, ctx, key_len, key); + rc = mode->init(mode, ctx, key_len, key); mode->ecb(ctx, nblocks, in, out); ccecb_ctx_clear(mode->size, ctx); + return rc; } /* CBC mode. */ @@ -90,42 +92,45 @@ CC_INLINE size_t cccbc_block_size(const struct ccmode_cbc *mode) return mode->block_size; } -CC_INLINE void cccbc_init(const struct ccmode_cbc *mode, cccbc_ctx *ctx, - size_t key_len, const void *key) +CC_INLINE int cccbc_init(const struct ccmode_cbc *mode, cccbc_ctx *ctx, + size_t key_len, const void *key) { - mode->init(mode, ctx, key_len, key); + return mode->init(mode, ctx, key_len, key); } -CC_INLINE void cccbc_set_iv(const struct ccmode_cbc *mode, cccbc_iv *iv_ctx, - const void *iv) +CC_INLINE int cccbc_set_iv(const struct ccmode_cbc *mode, cccbc_iv *iv_ctx, + const void *iv) { if (iv) cc_copy(mode->block_size, iv_ctx, iv); else cc_zero(mode->block_size, iv_ctx); + return 0; } -CC_INLINE void cccbc_update(const struct ccmode_cbc *mode, cccbc_ctx *ctx, - cccbc_iv *iv, size_t nblocks, - const void *in, void *out) +CC_INLINE int cccbc_update(const struct ccmode_cbc *mode, cccbc_ctx *ctx, + cccbc_iv *iv, size_t nblocks, + const void *in, void *out) { - mode->cbc(ctx, iv, nblocks, in, out); + return mode->cbc(ctx, iv, nblocks, in, out); } -CC_INLINE void cccbc_one_shot(const struct ccmode_cbc *mode, - size_t key_len, const void *key, - const void *iv, size_t nblocks, - const void *in, void *out) +CC_INLINE int cccbc_one_shot(const struct ccmode_cbc *mode, + size_t key_len, const void *key, + const void *iv, size_t nblocks, + const void *in, void *out) { + int rc; cccbc_ctx_decl(mode->size, ctx); cccbc_iv_decl(mode->block_size, iv_ctx); - mode->init(mode, ctx, key_len, key); + rc = mode->init(mode, ctx, key_len, key); if (iv) cccbc_set_iv(mode, iv_ctx, iv); else cc_zero(mode->block_size, iv_ctx); mode->cbc(ctx, iv_ctx, nblocks, in, out); cccbc_ctx_clear(mode->size, ctx); + return rc; } /* CFB mode. */ @@ -145,27 +150,29 @@ CC_INLINE size_t cccfb_block_size(const struct ccmode_cfb *mode) return mode->block_size; } -CC_INLINE void cccfb_init(const struct ccmode_cfb *mode, cccfb_ctx *ctx, - size_t key_len, const void *key, - const void *iv) +CC_INLINE int cccfb_init(const struct ccmode_cfb *mode, cccfb_ctx *ctx, + size_t key_len, const void *key, + const void *iv) { - mode->init(mode, ctx, key_len, key, iv); + return mode->init(mode, ctx, key_len, key, iv); } -CC_INLINE void cccfb_update(const struct ccmode_cfb *mode, cccfb_ctx *ctx, - size_t nbytes, const void *in, void *out) +CC_INLINE int cccfb_update(const struct ccmode_cfb *mode, cccfb_ctx *ctx, + size_t nbytes, const void *in, void *out) { - mode->cfb(ctx, nbytes, in, out); + return mode->cfb(ctx, nbytes, in, out); } -CC_INLINE void cccfb_one_shot(const struct ccmode_cfb *mode, - size_t key_len, const void *key, const void *iv, - size_t nbytes, const void *in, void *out) +CC_INLINE int cccfb_one_shot(const struct ccmode_cfb *mode, + size_t key_len, const void *key, const void *iv, + size_t nbytes, const void *in, void *out) { + int rc; cccfb_ctx_decl(mode->size, ctx); - mode->init(mode, ctx, key_len, key, iv); + rc = mode->init(mode, ctx, key_len, key, iv); mode->cfb(ctx, nbytes, in, out); cccfb_ctx_clear(mode->size, ctx); + return rc; } /* CFB8 mode. */ @@ -185,26 +192,28 @@ CC_INLINE size_t cccfb8_block_size(const struct ccmode_cfb8 *mode) return mode->block_size; } -CC_INLINE void cccfb8_init(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx, - size_t key_len, const void *key, const void *iv) +CC_INLINE int cccfb8_init(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx, + size_t key_len, const void *key, const void *iv) { - mode->init(mode, ctx, key_len, key, iv); + return mode->init(mode, ctx, key_len, key, iv); } -CC_INLINE void cccfb8_update(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx, - size_t nbytes, const void *in, void *out) +CC_INLINE int cccfb8_update(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx, + size_t nbytes, const void *in, void *out) { - mode->cfb8(ctx, nbytes, in, out); + return mode->cfb8(ctx, nbytes, in, out); } -CC_INLINE void cccfb8_one_shot(const struct ccmode_cfb8 *mode, - size_t key_len, const void *key, const void *iv, - size_t nbytes, const void *in, void *out) +CC_INLINE int cccfb8_one_shot(const struct ccmode_cfb8 *mode, + size_t key_len, const void *key, const void *iv, + size_t nbytes, const void *in, void *out) { + int rc; cccfb8_ctx_decl(mode->size, ctx); - mode->init(mode, ctx, key_len, key, iv); + rc = mode->init(mode, ctx, key_len, key, iv); mode->cfb8(ctx, nbytes, in, out); cccfb8_ctx_clear(mode->size, ctx); + return rc; } /* CTR mode. */ @@ -228,26 +237,28 @@ CC_INLINE size_t ccctr_block_size(const struct ccmode_ctr *mode) return mode->block_size; } -CC_INLINE void ccctr_init(const struct ccmode_ctr *mode, ccctr_ctx *ctx, - size_t key_len, const void *key, const void *iv) +CC_INLINE int ccctr_init(const struct ccmode_ctr *mode, ccctr_ctx *ctx, + size_t key_len, const void *key, const void *iv) { - mode->init(mode, ctx, key_len, key, iv); + return mode->init(mode, ctx, key_len, key, iv); } -CC_INLINE void ccctr_update(const struct ccmode_ctr *mode, ccctr_ctx *ctx, - size_t nbytes, const void *in, void *out) +CC_INLINE int ccctr_update(const struct ccmode_ctr *mode, ccctr_ctx *ctx, + size_t nbytes, const void *in, void *out) { - mode->ctr(ctx, nbytes, in, out); + return mode->ctr(ctx, nbytes, in, out); } -CC_INLINE void ccctr_one_shot(const struct ccmode_ctr *mode, - size_t key_len, const void *key, const void *iv, - size_t nbytes, const void *in, void *out) +CC_INLINE int ccctr_one_shot(const struct ccmode_ctr *mode, + size_t key_len, const void *key, const void *iv, + size_t nbytes, const void *in, void *out) { + int rc; ccctr_ctx_decl(mode->size, ctx); - mode->init(mode, ctx, key_len, key, iv); + rc = mode->init(mode, ctx, key_len, key, iv); mode->ctr(ctx, nbytes, in, out); ccctr_ctx_clear(mode->size, ctx); + return rc; } @@ -268,30 +279,30 @@ CC_INLINE size_t ccofb_block_size(const struct ccmode_ofb *mode) return mode->block_size; } -CC_INLINE void ccofb_init(const struct ccmode_ofb *mode, ccofb_ctx *ctx, - size_t key_len, const void *key, const void *iv) +CC_INLINE int ccofb_init(const struct ccmode_ofb *mode, ccofb_ctx *ctx, + size_t key_len, const void *key, const void *iv) { - mode->init(mode, ctx, key_len, key, iv); + return mode->init(mode, ctx, key_len, key, iv); } -CC_INLINE void ccofb_update(const struct ccmode_ofb *mode, ccofb_ctx *ctx, - size_t nbytes, const void *in, void *out) +CC_INLINE int ccofb_update(const struct ccmode_ofb *mode, ccofb_ctx *ctx, + size_t nbytes, const void *in, void *out) { - mode->ofb(ctx, nbytes, in, out); + return mode->ofb(ctx, nbytes, in, out); } -CC_INLINE void ccofb_one_shot(const struct ccmode_ofb *mode, - size_t key_len, const void *key, const void *iv, - size_t nbytes, const void *in, void *out) +CC_INLINE int ccofb_one_shot(const struct ccmode_ofb *mode, + size_t key_len, const void *key, const void *iv, + size_t nbytes, const void *in, void *out) { + int rc; ccofb_ctx_decl(mode->size, ctx); - mode->init(mode, ctx, key_len, key, iv); + rc = mode->init(mode, ctx, key_len, key, iv); mode->ofb(ctx, nbytes, in, out); ccofb_ctx_clear(mode->size, ctx); + return rc; } -/* Authenticated cipher modes. */ - /* XTS mode. */ /* Declare a xts key named _name_. Pass the size field of a struct ccmode_xts @@ -327,38 +338,86 @@ CC_INLINE size_t ccxts_block_size(const struct ccmode_xts *mode) return mode->block_size; } -CC_INLINE void ccxts_init(const struct ccmode_xts *mode, ccxts_ctx *ctx, - size_t key_len, const void *key, +/*! + @function ccxts_init + @abstract Initialize an XTS context. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param key_nbytes Length of the key arguments in bytes + @param data_key Key for data encryption + @param tweak_key Key for tweak generation + + @result 0 iff successful. + + @discussion For security reasons, the two keys must be different. + */ +CC_INLINE int ccxts_init(const struct ccmode_xts *mode, ccxts_ctx *ctx, + size_t key_nbytes, const void *data_key, const void *tweak_key) { - mode->init(mode, ctx, key_len, key, tweak_key); -} - -CC_INLINE void ccxts_set_tweak(const struct ccmode_xts *mode, ccxts_ctx *ctx, - ccxts_tweak *tweak, const void *iv) -{ - mode->set_tweak(ctx, tweak, iv); + return mode->init(mode, ctx, key_nbytes, data_key, tweak_key); } +/*! + @function ccxts_set_tweak + @abstract Initialize the tweak for a sector. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param tweak Context for the tweak for this sector + @param iv Data used to generate the tweak + + @discussion The IV must be exactly one block in length. + */ +CC_INLINE int ccxts_set_tweak(const struct ccmode_xts *mode, ccxts_ctx *ctx, + ccxts_tweak *tweak, const void *iv) +{ + return mode->set_tweak(ctx, tweak, iv); +} + +/*! + @function ccxts_update + @abstract Encrypt or decrypt data. + + @param mode Descriptor for the mode + @param ctx Context for an instance + @param tweak Context for the tweak for this sector + @param nblocks Length of the data in blocks + @param in Input data + @param out Output buffer + + @result The updated internal buffer of the tweak context. May be ignored. + */ CC_INLINE void *ccxts_update(const struct ccmode_xts *mode, ccxts_ctx *ctx, - ccxts_tweak *tweak, size_t nblocks, const void *in, void *out) + ccxts_tweak *tweak, size_t nblocks, const void *in, void *out) { return mode->xts(ctx, tweak, nblocks, in, out); } -CC_INLINE void ccxts_one_shot(const struct ccmode_xts *mode, - size_t key_len, const void *key, - const void *tweak_key, const void *iv, - size_t nblocks, const void *in, void *out) -{ - ccxts_ctx_decl(mode->size, ctx); - ccxts_tweak_decl(mode->tweak_size, tweak); - mode->init(mode, ctx, key_len, key, tweak_key); - mode->set_tweak(ctx, tweak, iv); - mode->xts(ctx, tweak, nblocks, in, out); - ccxts_ctx_clear(mode->size, ctx); - ccxts_tweak_clear(mode->tweak_size, tweak); -} +/*! + @function ccxts_one_shot + @abstract Encrypt or decrypt data in XTS mode. + + @param mode Descriptor for the mode + @param key_nbytes Length of the key arguments in bytes + @param data_key Key for data encryption + @param tweak_key Key for tweak generation + @param iv Data used to generate the tweak + @param nblocks Length of the data in blocks + @param in Input data + @param out Output buffer + + @result 0 iff successful. + + @discussion For security reasons, the two keys must be different. + */ +int ccxts_one_shot(const struct ccmode_xts *mode, + size_t key_nbytes, const void *data_key, + const void *tweak_key, const void *iv, + size_t nblocks, const void *in, void *out); + +/* Authenticated cipher modes. */ /* GCM mode. */ @@ -367,6 +426,9 @@ CC_INLINE void ccxts_one_shot(const struct ccmode_xts *mode, #define ccgcm_ctx_decl(_size_, _name_) cc_ctx_decl(ccgcm_ctx, _size_, _name_) #define ccgcm_ctx_clear(_size_, _name_) cc_clear(_size_, _name_) +#define CCGCM_IV_NBYTES 12 +#define CCGCM_BLOCK_NBYTES 16 + CC_INLINE size_t ccgcm_context_size(const struct ccmode_gcm *mode) { return mode->size; @@ -377,64 +439,321 @@ CC_INLINE size_t ccgcm_block_size(const struct ccmode_gcm *mode) return mode->block_size; } -CC_INLINE int ccgcm_init(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, - size_t key_len, const void *key) -{ - return mode->init(mode, ctx, key_len, key); -} +/*! + @function ccgcm_init + @abstract Initialize a GCM context. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param key_nbytes Length of the key in bytes + @param key Key for the underlying blockcipher (AES) + + @result 0 iff successful. + + @discussion The correct sequence of calls is: + + @code ccgcm_init(...) + ccgcm_set_iv(...) + ccgcm_aad(...) (may be called zero or more times) + ccgcm_update(...) (may be called zero or more times) + ccgcm_finalize(...) + + To reuse the context for additional encryptions, follow this sequence: + + @code ccgcm_reset(...) + ccgcm_set_iv(...) + ccgcm_aad(...) (may be called zero or more times) + ccgcm_update(...) (may be called zero or more times) + ccgcm_finalize(...) + + @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length. + + @warning It is not permitted to call @p ccgcm_inc_iv after initializing the cipher via the @p ccgcm_init interface. Nonzero is returned in the event of an improper call sequence. + */ +CC_INLINE int ccgcm_init(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, + size_t key_nbytes, const void *key) +{ + return mode->init(mode, ctx, key_nbytes, key); +} + +/*! + @function ccgcm_init_with_iv + @abstract Initialize a GCM context to manage IVs internally. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param key_nbytes Length of the key in bytes + @param key Key for the underlying blockcipher (AES) + @param iv IV for the first encryption + + @result 0 iff successful. + + @discussion The correct sequence of calls is: + + @code ccgcm_init_with_iv(...) + ccgcm_aad(...) (may be called zero or more times) + ccgcm_update(...) (may be called zero or more times) + ccgcm_finalize(...) + + To reuse the context for additional encryptions, follow this sequence: + + @code ccgcm_reset(...) + ccgcm_inc_iv(...) + ccgcm_aad(...) (may be called zero or more times) + ccgcm_update(...) (may be called zero or more times) + ccgcm_finalize(...) + + The IV must be exactly 12 bytes in length. + + Internally, the IV is treated as a four-byte salt followed by an eight-byte counter. This is to match the behavior of certain protocols (e.g. TLS). In the call to @p ccgcm_inc_iv, the counter component will be interpreted as a big-endian, unsigned value and incremented in place. + + @warning It is not permitted to call @p ccgcm_set_iv after initializing the cipher via the @p ccgcm_init_with_iv interface. Nonzero is returned in the event of an improper call sequence. + + @warning The security of GCM depends on the uniqueness of key-IV pairs. To avoid key-IV repetition, callers should not initialize multiple contexts with the same key material via the @p ccgcm_init_with_iv interface. + */ +int ccgcm_init_with_iv(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, + size_t key_nbytes, const void *key, + const void *iv); + +/*! + @function ccgcm_set_iv + @abstract Set the IV for encryption. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param iv_nbytes Length of the IV in bytes + @param iv Initialization vector + + @result 0 iff successful. + + @discussion Set the initialization vector for encryption. + + @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length. + + In stateful protocols, if each packet exposes a guaranteed-unique value, it is recommended to format this as a 12-byte value for use as the IV. + + In stateless protocols, it is recommended to choose a 16-byte value using a cryptographically-secure pseudorandom number generator (e.g. @p ccrng). + + @warning This function may not be used after initializing the cipher via @p ccgcm_init_with_iv. Nonzero is returned in the event of an improper call sequence. + */ CC_INLINE int ccgcm_set_iv(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, - size_t iv_size, const void *iv) -{ - return mode->set_iv(ctx, iv_size, iv); -} - -// add Additional authenticated data (AAD) + size_t iv_nbytes, const void *iv) +{ + return mode->set_iv(ctx, iv_nbytes, iv); +} + +/*! + @function ccgcm_set_iv_legacy + @abstract Set the IV for encryption. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param iv_nbytes Length of the IV in bytes + @param iv Initialization vector + + @result 0 iff successful. + + @discussion Identical to @p ccgcm_set_iv except that it allows zero-length IVs. + + @warning Zero-length IVs nullify the authenticity guarantees of GCM. + + @warning Do not use this function in new applications. + */ +int ccgcm_set_iv_legacy(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, + size_t iv_nbytes, const void *iv); + +/*! + @function ccgcm_inc_iv + @abstract Increment the IV for another encryption. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param iv Updated initialization vector + + @result 0 iff successful. + + @discussion Updates the IV internally for another encryption. + + Internally, the IV is treated as a four-byte salt followed by an eight-byte counter. This is to match the behavior of certain protocols (e.g. TLS). The counter component is interpreted as a big-endian, unsigned value and incremented in place. + + The updated IV is copied to @p iv. This is to support protocols that require part of the IV to be specified explicitly in each packet (e.g. TLS). + + @warning This function may be used only after initializing the cipher via @p ccgcm_init_with_iv. + */ +int ccgcm_inc_iv(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, void *iv); + + +/*! + @function ccgcm_aad + @abstract Authenticate additional data. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param nbytes Length of the additional data in bytes + @param additional_data Additional data to authenticate + + @result 0 iff successful. + + @discussion This is typically used to authenticate data that cannot be encrypted (e.g. packet headers). + + This function may be called zero or more times. + */ CC_INLINE int ccgcm_aad(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, size_t nbytes, const void *additional_data) { return mode->gmac(ctx, nbytes, additional_data); } +/*! + @function ccgcm_gmac + + @discussion See @p ccgcm_aad. + */ CC_INLINE int ccgcm_gmac(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, size_t nbytes, const void *in) { return mode->gmac(ctx, nbytes, in); } -// encrypt or decrypt +/*! + @function ccgcm_update + @abstract Encrypt or decrypt data. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param nbytes Length of the data in bytes + @param in Input plaintext or ciphertext + @param out Output ciphertext or plaintext + + @result 0 iff successful. + + @discussion In-place processing is supported. + + This function may be called zero or more times. + */ CC_INLINE int ccgcm_update(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out) { return mode->gcm(ctx, nbytes, in, out); } +/*! + @function ccgcm_finalize + @abstract Finish processing and authenticate. + + @param mode Descriptor for the mode + @param ctx Context for this instance + @param tag_nbytes Length of the tag in bytes + @param tag Authentication tag + + @result 0 iff successful. + + @discussion Finish processing a packet and generate the authentication tag. + + On encryption, @p tag is purely an output parameter. The generated tag is written to @p tag. + + On decryption, @p tag is primarily an input parameter. The caller should provide the authentication tag generated during encryption. The function will return nonzero if the input tag does not match the generated tag. + + @warning To support legacy applications, @p tag is also an output parameter during decryption. The generated tag is written to @p tag. Legacy callers may choose to compare this to the tag generated during encryption. Do not follow this usage pattern in new applications. + */ CC_INLINE int ccgcm_finalize(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, - size_t tag_size, void *tag) + size_t tag_nbytes, void *tag) { - return mode->finalize(ctx, tag_size, tag); + return mode->finalize(ctx, tag_nbytes, tag); } +/*! + @function ccgcm_reset + @abstract Reset the context for another encryption. + + @param mode Descriptor for the mode + @param ctx Context for this instance + + @result 0 iff successful. + + @discussion Refer to @p ccgcm_init for correct usage. + */ CC_INLINE int ccgcm_reset(const struct ccmode_gcm *mode, ccgcm_ctx *ctx) { return mode->reset(ctx); } +/*! + @function ccgcm_one_shot + @abstract Encrypt or decrypt with GCM. + + @param mode Descriptor for the mode + @param key_nbytes Length of the key in bytes + @param key Key for the underlying blockcipher (AES) + @param iv_nbytes Length of the IV in bytes + @param iv Initialization vector + @param adata_nbytes Length of the additional data in bytes + @param adata Additional data to authenticate + @param nbytes Length of the data in bytes + @param in Input plaintext or ciphertext + @param out Output ciphertext or plaintext + @param tag_nbytes Length of the tag in bytes + @param tag Authentication tag + + @result 0 iff successful. + + @discussion Perform GCM encryption or decryption. + + @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length. + + In stateful protocols, if each packet exposes a guaranteed-unique value, it is recommended to format this as a 12-byte value for use as the IV. + + In stateless protocols, it is recommended to choose a 16-byte value using a cryptographically-secure pseudorandom number generator (e.g. @p ccrng). + + In-place processing is supported. + + On encryption, @p tag is purely an output parameter. The generated tag is written to @p tag. + + On decryption, @p tag is primarily an input parameter. The caller should provide the authentication tag generated during encryption. The function will return nonzero if the input tag does not match the generated tag. + + @warning To support legacy applications, @p tag is also an output parameter during decryption. The generated tag is written to @p tag. Legacy callers may choose to compare this to the tag generated during encryption. Do not follow this usage pattern in new applications. + */ int ccgcm_one_shot(const struct ccmode_gcm *mode, - size_t key_len, const void *key, - size_t iv_len, const void *iv, - size_t adata_len, const void *adata, - size_t nbytes, const void *in, void *out, - size_t tag_len, void *tag); - -//do not call ccgcm_one_shot_legacy() in any new application + size_t key_nbytes, const void *key, + size_t iv_nbytes, const void *iv, + size_t adata_nbytes, const void *adata, + size_t nbytes, const void *in, void *out, + size_t tag_nbytes, void *tag); + + +/*! + @function ccgcm_one_shot_legacy + @abstract Encrypt or decrypt with GCM. + + @param mode Descriptor for the mode + @param key_nbytes Length of the key in bytes + @param key Key for the underlying blockcipher (AES) + @param iv_nbytes Length of the IV in bytes + @param iv Initialization vector + @param adata_nbytes Length of the additional data in bytes + @param adata Additional data to authenticate + @param nbytes Length of the data in bytes + @param in Input plaintext or ciphertext + @param out Output ciphertext or plaintext + @param tag_nbytes Length of the tag in bytes + @param tag Authentication tag + + @result 0 iff successful. + + @discussion Identical to @p ccgcm_one_shot except that it allows zero-length IVs. + + @warning Zero-length IVs nullify the authenticity guarantees of GCM. + + @warning Do not use this function in new applications. + */ int ccgcm_one_shot_legacy(const struct ccmode_gcm *mode, - size_t key_len, const void *key, - size_t iv_len, const void *iv, - size_t adata_len, const void *adata, - size_t nbytes, const void *in, void *out, - size_t tag_len, void *tag); + size_t key_nbytes, const void *key, + size_t iv_nbytes, const void *iv, + size_t adata_nbytes, const void *adata, + size_t nbytes, const void *in, void *out, + size_t tag_nbytes, void *tag); /* CCM */ @@ -534,10 +853,10 @@ CC_INLINE size_t ccomac_block_size(const struct ccmode_omac *mode) return mode->block_size; } -CC_INLINE void ccomac_init(const struct ccmode_omac *mode, ccomac_ctx *ctx, - size_t tweak_len, size_t key_len, const void *key) +CC_INLINE int ccomac_init(const struct ccmode_omac *mode, ccomac_ctx *ctx, + size_t tweak_len, size_t key_len, const void *key) { - mode->init(mode, ctx, tweak_len, key_len, key); + return mode->init(mode, ctx, tweak_len, key_len, key); } CC_INLINE int ccomac_update(const struct ccmode_omac *mode, ccomac_ctx *ctx, @@ -550,11 +869,12 @@ CC_INLINE int ccomac_one_shot(const struct ccmode_omac *mode, size_t tweak_len, size_t key_len, const void *key, const void *tweak, size_t nblocks, const void *in, void *out) { + int rc; ccomac_ctx_decl(mode->size, ctx); - mode->init(mode, ctx, tweak_len, key_len, key); - int result = mode->omac(ctx, nblocks, tweak, in, out); + rc = mode->init(mode, ctx, tweak_len, key_len, key); + if (rc == 0) rc = mode->omac(ctx, nblocks, tweak, in, out); ccomac_ctx_clear(mode->size, ctx); - return result; + return rc; } diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h b/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h index 482c6ce92..c05518e27 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_factory.h @@ -115,12 +115,12 @@ const struct ccmode_cbc *cc3des_cbc_encrypt_mode(void) { -void ccmode_cbc_init(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, - size_t rawkey_len, const void *rawkey); -void ccmode_cbc_decrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks, - const void *in, void *out); -void ccmode_cbc_encrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks, - const void *in, void *out); +int ccmode_cbc_init(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, + size_t rawkey_len, const void *rawkey); +int ccmode_cbc_decrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks, + const void *in, void *out); +int ccmode_cbc_encrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks, + const void *in, void *out); struct _ccmode_cbc_key { const struct ccmode_ecb *ecb; @@ -160,13 +160,13 @@ void ccmode_factory_cbc_encrypt(struct ccmode_cbc *cbc, const struct ccmode_ecb *ecb); -void ccmode_cfb_init(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, - size_t rawkey_len, const void *rawkey, - const void *iv); -void ccmode_cfb_decrypt(cccfb_ctx *ctx, size_t nbytes, - const void *in, void *out); -void ccmode_cfb_encrypt(cccfb_ctx *ctx, size_t nbytes, - const void *in, void *out); +int ccmode_cfb_init(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, + size_t rawkey_len, const void *rawkey, + const void *iv); +int ccmode_cfb_decrypt(cccfb_ctx *ctx, size_t nbytes, + const void *in, void *out); +int ccmode_cfb_encrypt(cccfb_ctx *ctx, size_t nbytes, + const void *in, void *out); struct _ccmode_cfb_key { const struct ccmode_ecb *ecb; size_t pad_len; @@ -205,12 +205,12 @@ void ccmode_factory_cfb_decrypt(struct ccmode_cfb *cfb, void ccmode_factory_cfb_encrypt(struct ccmode_cfb *cfb, const struct ccmode_ecb *ecb); -void ccmode_cfb8_init(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, - size_t rawkey_len, const void *rawkey, const void *iv); -void ccmode_cfb8_decrypt(cccfb8_ctx *ctx, size_t nbytes, - const void *in, void *out); -void ccmode_cfb8_encrypt(cccfb8_ctx *ctx, size_t nbytes, - const void *in, void *out); +int ccmode_cfb8_init(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, + size_t rawkey_len, const void *rawkey, const void *iv); +int ccmode_cfb8_decrypt(cccfb8_ctx *ctx, size_t nbytes, + const void *in, void *out); +int ccmode_cfb8_encrypt(cccfb8_ctx *ctx, size_t nbytes, + const void *in, void *out); struct _ccmode_cfb8_key { const struct ccmode_ecb *ecb; @@ -249,10 +249,10 @@ void ccmode_factory_cfb8_decrypt(struct ccmode_cfb8 *cfb8, void ccmode_factory_cfb8_encrypt(struct ccmode_cfb8 *cfb8, const struct ccmode_ecb *ecb); -void ccmode_ctr_init(const struct ccmode_ctr *ctr, ccctr_ctx *ctx, - size_t rawkey_len, const void *rawkey, const void *iv); -void ccmode_ctr_crypt(ccctr_ctx *ctx, size_t nbytes, - const void *in, void *out); +int ccmode_ctr_init(const struct ccmode_ctr *ctr, ccctr_ctx *ctx, + size_t rawkey_len, const void *rawkey, const void *iv); +int ccmode_ctr_crypt(ccctr_ctx *ctx, size_t nbytes, + const void *in, void *out); struct _ccmode_ctr_key { const struct ccmode_ecb *ecb; @@ -282,7 +282,7 @@ void ccmode_factory_ctr_crypt(struct ccmode_ctr *ctr, storage. */ int ccmode_gcm_init(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx, size_t rawkey_len, const void *rawkey); -int ccmode_gcm_set_iv(ccgcm_ctx *ctx, size_t iv_size, const void *iv); +int ccmode_gcm_set_iv(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv); int ccmode_gcm_aad(ccgcm_ctx *ctx, size_t nbytes, const void *in); int ccmode_gcm_decrypt(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out); @@ -301,10 +301,13 @@ int ccmode_gcm_encrypt(ccgcm_ctx *ctx, size_t nbytes, const void *in, int ccmode_gcm_finalize(ccgcm_ctx *key, size_t tag_size, void *tag); int ccmode_gcm_reset(ccgcm_ctx *key); +#define CCGCM_FLAGS_INIT_WITH_IV 1 // Here is what the structure looks like in memory // [ temp space | length | *ecb | *ecb_key | table | ecb_key ] // size of table depends on the implementation (VNG vs factory) +// currently, VNG and factory share the same "header" described here +// VNG may add additional data after the header struct _ccmode_gcm_key { // 5 blocks of temp space. unsigned char H[16]; /* multiplier */ @@ -314,12 +317,12 @@ struct _ccmode_gcm_key { unsigned char buf[16]; /* buffer for stuff */ // State and length - uint32_t ivmode; /* Which mode is the IV in? */ - uint32_t state; /* state the GCM code is in */ - uint32_t buflen; /* length of data in buf */ + uint16_t state; /* state the GCM code is in */ + uint16_t flags; /* flags (persistent across reset) */ + uint32_t buf_nbytes; /* length of data in buf */ - uint64_t totlen; /* 64-bit counter used for IV and AAD */ - uint64_t pttotlen; /* 64-bit counter for the plaintext PT */ + uint64_t aad_nbytes; /* 64-bit counter used for IV and AAD */ + uint64_t text_nbytes; /* 64-bit counter for the plaintext PT */ // ECB const struct ccmode_ecb *ecb; // ecb mode @@ -431,11 +434,11 @@ void ccmode_factory_ccm_encrypt(struct ccmode_ccm *ccm, const struct ccmode_ecb *ecb_encrypt); -void ccmode_ofb_init(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, - size_t rawkey_len, const void *rawkey, - const void *iv); -void ccmode_ofb_crypt(ccofb_ctx *ctx, size_t nbytes, - const void *in, void *out); +int ccmode_ofb_init(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, + size_t rawkey_len, const void *rawkey, + const void *iv); +int ccmode_ofb_crypt(ccofb_ctx *ctx, size_t nbytes, + const void *in, void *out); struct _ccmode_ofb_key { const struct ccmode_ecb *ecb; @@ -469,9 +472,9 @@ int ccmode_omac_encrypt(ccomac_ctx *ctx, size_t nblocks, ccmode_omac->omac(). key must point to at least sizeof(CCMODE_OMAC_KEY(ecb)) bytes of free storage. */ -void ccmode_omac_init(const struct ccmode_omac *omac, ccomac_ctx *ctx, - size_t tweak_len, size_t rawkey_len, - const void *rawkey); +int ccmode_omac_init(const struct ccmode_omac *omac, ccomac_ctx *ctx, + size_t tweak_len, size_t rawkey_len, + const void *rawkey); struct _ccmode_omac_key { const struct ccmode_ecb *ecb; @@ -513,13 +516,16 @@ void ccmode_factory_omac_encrypt(struct ccmode_omac *omac, /* Function prototypes used by the macros below, do not call directly. */ -void ccmode_xts_init(const struct ccmode_xts *xts, ccxts_ctx *ctx, - size_t key_len, const void *data_key, - const void *tweak_key); +int ccmode_xts_init(const struct ccmode_xts *xts, ccxts_ctx *ctx, + size_t key_nbytes, const void *data_key, + const void *tweak_key); +void ccmode_xts_key_sched(const struct ccmode_xts *xts, ccxts_ctx *ctx, + size_t key_nbytes, const void *data_key, + const void *tweak_key); void *ccmode_xts_crypt(const ccxts_ctx *ctx, ccxts_tweak *tweak, size_t nblocks, const void *in, void *out); -void ccmode_xts_set_tweak(const ccxts_ctx *ctx, ccxts_tweak *tweak, - const void *iv); +int ccmode_xts_set_tweak(const ccxts_ctx *ctx, ccxts_tweak *tweak, + const void *iv); struct _ccmode_xts_key { @@ -544,6 +550,7 @@ struct _ccmode_xts_tweak { .tweak_size = ccn_sizeof_size(sizeof(struct _ccmode_xts_tweak)) + ccn_sizeof_size(ecb->block_size), \ .block_size = ecb->block_size, \ .init = ccmode_xts_init, \ +.key_sched = ccmode_xts_key_sched, \ .set_tweak = ccmode_xts_set_tweak, \ .xts = ccmode_xts_crypt, \ .custom = (ECB), \ @@ -556,6 +563,7 @@ struct _ccmode_xts_tweak { .tweak_size = ccn_sizeof_size(sizeof(struct _ccmode_xts_tweak)) + ccn_sizeof_size(ecb->block_size), \ .block_size = ecb->block_size, \ .init = ccmode_xts_init, \ +.key_sched = ccmode_xts_key_sched, \ .set_tweak = ccmode_xts_set_tweak, \ .xts = ccmode_xts_crypt, \ .custom = (ECB), \ diff --git a/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h b/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h index 817d45070..1337e1467 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h +++ b/EXTERNAL_HEADERS/corecrypto/ccmode_impl.h @@ -21,10 +21,10 @@ cc_aligned_struct(16) ccecb_ctx; struct ccmode_ecb { size_t size; /* first argument to ccecb_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_ecb *ecb, ccecb_ctx *ctx, - size_t key_len, const void *key); - void (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in, - void *out); + int (*init)(const struct ccmode_ecb *ecb, ccecb_ctx *ctx, + size_t key_nbytes, const void *key); + int (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in, + void *out); }; /*! @@ -64,11 +64,11 @@ cc_aligned_struct(16) cccbc_iv; struct ccmode_cbc { size_t size; /* first argument to cccbc_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, - size_t key_len, const void *key); + int (*init)(const struct ccmode_cbc *cbc, cccbc_ctx *ctx, + size_t key_len, const void *key); /* cbc encrypt or decrypt nblocks from in to out, iv will be used and updated. */ - void (*cbc)(const cccbc_ctx *ctx, cccbc_iv *iv, - size_t nblocks, const void *in, void *out); + int (*cbc)(const cccbc_ctx *ctx, cccbc_iv *iv, + size_t nblocks, const void *in, void *out); const void *custom; }; @@ -78,9 +78,9 @@ cc_aligned_struct(16) cccfb_ctx; struct ccmode_cfb { size_t size; /* first argument to cccfb_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, - size_t key_len, const void *key, const void *iv); - void (*cfb)(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*init)(const struct ccmode_cfb *cfb, cccfb_ctx *ctx, + size_t key_len, const void *key, const void *iv); + int (*cfb)(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -90,9 +90,9 @@ cc_aligned_struct(16) cccfb8_ctx; struct ccmode_cfb8 { size_t size; /* first argument to cccfb8_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, - size_t key_len, const void *key, const void *iv); - void (*cfb8)(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*init)(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx, + size_t key_len, const void *key, const void *iv); + int (*cfb8)(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -102,9 +102,9 @@ cc_aligned_struct(16) ccctr_ctx; struct ccmode_ctr { size_t size; /* first argument to ccctr_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_ctr *ctr, ccctr_ctx *ctx, - size_t key_len, const void *key, const void *iv); - void (*ctr)(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*init)(const struct ccmode_ctr *ctr, ccctr_ctx *ctx, + size_t key_len, const void *key, const void *iv); + int (*ctr)(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -114,9 +114,9 @@ cc_aligned_struct(16) ccofb_ctx; struct ccmode_ofb { size_t size; /* first argument to ccofb_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, - size_t key_len, const void *key, const void *iv); - void (*ofb)(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out); + int (*init)(const struct ccmode_ofb *ofb, ccofb_ctx *ctx, + size_t key_len, const void *key, const void *iv); + int (*ofb)(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out); const void *custom; }; @@ -133,12 +133,18 @@ struct ccmode_xts { determines how long the tweak is in bytes, for each subsequent call to ccmode_xts->xts(). key must point to at least 'size' cc_units of free storage. - tweak_key must point to at least 'tweak_size' cc_units of free storage. */ - void (*init)(const struct ccmode_xts *xts, ccxts_ctx *ctx, - size_t key_len, const void *key, const void *tweak_key); + tweak_key must point to at least 'tweak_size' cc_units of free storage. + key and tweak_key must differ. + Returns nonzero on failure. + */ + int (*init)(const struct ccmode_xts *xts, ccxts_ctx *ctx, + size_t key_nbytes, const void *data_key, const void *tweak_key); + + void (*key_sched)(const struct ccmode_xts *xts, ccxts_ctx *ctx, + size_t key_nbytes, const void *data_key, const void *tweak_key); /* Set the tweak (sector number), the block within the sector zero. */ - void (*set_tweak)(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv); + int (*set_tweak)(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv); /* Encrypt blocks for a sector, clients must call set_tweak before calling this function. Return a pointer to the tweak buffer */ @@ -159,11 +165,11 @@ struct ccmode_gcm { int encdec; //is it encrypt or decrypt object size_t block_size; int (*init)(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx, - size_t key_len, const void *key); - int (*set_iv)(ccgcm_ctx *ctx, size_t iv_size, const void *iv); + size_t key_nbytes, const void *key); + int (*set_iv)(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv); int (*gmac)(ccgcm_ctx *ctx, size_t nbytes, const void *in); // could just be gcm with NULL out int (*gcm)(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out); - int (*finalize)(ccgcm_ctx *key, size_t tag_size, void *tag); + int (*finalize)(ccgcm_ctx *key, size_t tag_nbytes, void *tag); int (*reset)(ccgcm_ctx *ctx); const void *custom; }; @@ -194,8 +200,8 @@ cc_aligned_struct(16) ccomac_ctx; struct ccmode_omac { size_t size; /* first argument to ccomac_ctx_decl(). */ size_t block_size; - void (*init)(const struct ccmode_omac *omac, ccomac_ctx *ctx, - size_t tweak_len, size_t key_len, const void *key); + int (*init)(const struct ccmode_omac *omac, ccomac_ctx *ctx, + size_t tweak_len, size_t key_len, const void *key); int (*omac)(ccomac_ctx *ctx, size_t nblocks, const void *tweak, const void *in, void *out); const void *custom; diff --git a/EXTERNAL_HEADERS/corecrypto/ccrsa.h b/EXTERNAL_HEADERS/corecrypto/ccrsa.h new file mode 100644 index 000000000..97a88529a --- /dev/null +++ b/EXTERNAL_HEADERS/corecrypto/ccrsa.h @@ -0,0 +1,609 @@ +/* + * ccrsa.h + * corecrypto + * + * Created on 11/16/2010 + * + * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved. + * + */ + +#ifndef _CORECRYPTO_CCRSA_H_ +#define _CORECRYPTO_CCRSA_H_ + +#include +#include +#include +#include +#include + +// Apple does not generate keys of greater than 4096 bits +// This limit is relaxed to accommodate potential third-party consumers +#define CCRSA_KEYGEN_MAX_NBITS 8192 + +// Program error: buffer too small or encrypted message is too small +#define CCRSA_INVALID_INPUT -1 +// Invalid crypto configuration: Hash length versus RSA key size +#define CCRSA_INVALID_CONFIG -2 +// The data is invalid (we won't say more for security +#define CCRSA_DECRYPTION_ERROR -3 + +#define CCRSA_ENCODING_ERROR -4 +#define CCRSA_DECODING_ERROR -5 +#define CCRSA_SIGNATURE_GEN_ERROR -6 + +struct ccrsa_full_ctx { + __CCZP_ELEMENTS_DEFINITIONS(pb_) +} CC_ALIGNED(CCN_UNIT_SIZE); + +struct ccrsa_pub_ctx { + __CCZP_ELEMENTS_DEFINITIONS(pb_) +} CC_ALIGNED(CCN_UNIT_SIZE); + +struct ccrsa_priv_ctx { + __CCZP_ELEMENTS_DEFINITIONS(pv_) +} CC_ALIGNED(CCN_UNIT_SIZE); + + +#if CORECRYPTO_USE_TRANSPARENT_UNION + typedef union { + cczp_t zp; + struct ccrsa_pub_ctx* pub; + struct ccrsa_full_ctx *full; + } ccrsa_full_ctx_t __attribute__((transparent_union)); + typedef struct ccrsa_full_ctx ccrsa_full_ctx; + typedef struct ccrsa_priv_ctx ccrsa_priv_ctx; + + typedef union { + cczp_t zp; + ccrsa_priv_ctx *priv; + } ccrsa_priv_ctx_t __attribute__((transparent_union)); + + +typedef ccrsa_full_ctx_t ccrsa_pub_ctx_t; +typedef struct ccrsa_pub_ctx ccrsa_pub_ctx; + +#else + typedef struct ccrsa_full_ctx* ccrsa_full_ctx_t; + typedef struct ccrsa_pub_ctx* ccrsa_pub_ctx_t; + typedef struct ccrsa_priv_ctx* ccrsa_priv_ctx_t; +#endif + + + +/* + public key cczp d=e^-1 mod phi(m) priv key cczp priv key cczq dp, dq, qinv + | | | | | + | | | | | + +-------+------+-------+------++------++-------+------+---------++-------+------+---------++-------+-------+---------+ + | zm_hd | m[n] |mr[n+1]| e[n] || d[n] || zp_hd |p[n/2]|pr[n/2+1]|| zq_hd |q[n/2]|qr[n/2+1]||dp[n/2]|dq[n/2]|qinv[n/2]| + +-------+------+-------+------++------++-------+------+---------++-------+------+---------++-------+-------+---------+ + */ + + /* Return the size of an ccec_full_ctx where each ccn is _size_ bytes. Get _size_ through ccn_sizeof(nbits) */ + +/* Return the size of an ccec_full_ctx where each ccn is _size_ bytes. */ + +#define ccrsa_pub_ctx_size(_size_) (sizeof(struct cczp) + CCN_UNIT_SIZE + 3 * (_size_)) +#define ccrsa_priv_ctx_size(_size_) ((sizeof(struct cczp) + CCN_UNIT_SIZE) * 2 + 7 * ccn_sizeof(ccn_bitsof_size(_size_)/2 + 1)) +#define ccrsa_full_ctx_size(_size_) (ccrsa_pub_ctx_size(_size_) + _size_ + ccrsa_priv_ctx_size(_size_)) + +/* Declare a fully scheduled rsa key. Size is the size in bytes each ccn in + the key. For example to declare (on the stack or in a struct) a 1021 bit + rsa public key named foo use ccrsa_pub_ctx_decl(ccn_sizeof(1021), foo). + */ +#define ccrsa_full_ctx_decl(_size_, _name_) cc_ctx_decl(struct ccrsa_full_ctx, ccrsa_full_ctx_size(_size_), _name_) +#define ccrsa_full_ctx_clear(_size_, _name_) cc_clear(ccrsa_full_ctx_size(_size_), _name_) +#define ccrsa_pub_ctx_decl(_size_, _name_) cc_ctx_decl(struct ccrsa_pub_ctx, ccrsa_pub_ctx_size(_size_), _name_) +#define ccrsa_pub_ctx_clear(_size_, _name_) cc_clear(ccrsa_pub_ctx_size(_size_), _name_) + +// accessors to ccrsa full and public key fields. */ +// The offsets are computed using pb_ccn. If any object other than ccrsa_full_ctx_t +// or ccrsa_pub_ctx_t is passed to the macros, compiler error is generated. + + + +#if CORECRYPTO_USE_TRANSPARENT_UNION +//#define ccrsa_ctx_zm(_ctx_) (((ccrsa_pub_ctx_t)(_ctx_)).zp) + + CC_CONST CC_INLINE cczp_t ccrsa_ctx_zm(ccrsa_full_ctx_t _ctx_) { return ((cczp_t)(struct cczp *)((_ctx_).full)); } + CC_CONST CC_INLINE cc_unit *ccrsa_ctx_m(ccrsa_full_ctx_t _ctx_){ return ((_ctx_).full->pb_ccn);} + #define ccrsa_ctx_n(_ctx_) (ccrsa_ctx_zm(_ctx_).zp->n) +#else + #define ccrsa_ctx_zm(_ctx_) ((cczp_t)(_ctx_)) + #define ccrsa_ctx_n(_ctx_) (ccrsa_ctx_zm(_ctx_)->n) + #define ccrsa_ctx_m(_ctx_) ((_ctx_)->pb_ccn) +#endif + +#define ccrsa_ctx_e(_ctx_) (ccrsa_ctx_m(_ctx_) + 2 * ccrsa_ctx_n(_ctx_) + 1) +#define ccrsa_ctx_d(_ctx_) (ccrsa_ctx_m(_ctx_) + 3 * ccrsa_ctx_n(_ctx_) + 1) + +// accessors to ccrsa private key fields +// The offsets are computed using pv_ccn. If any object other than ccrsa_priv_ctx_t +// is passed to the macros, compiler error is generated. +#if CORECRYPTO_USE_TRANSPARENT_UNION + +/* rvalue accessors to ccec_key fields. */ +CC_CONST CC_INLINE +ccrsa_priv_ctx_t ccrsa_get_private_ctx_ptr(ccrsa_full_ctx_t fk) { + cc_unit *p = (cc_unit *)fk.full; + cc_size p_size = ccrsa_ctx_n(fk); + p += ccn_nof_size(ccrsa_pub_ctx_size(ccn_sizeof_n(p_size))) + p_size; + ccrsa_priv_ctx *priv = (ccrsa_priv_ctx *)p; + return (ccrsa_priv_ctx_t)priv; +} + +CC_CONST CC_INLINE +ccrsa_pub_ctx_t ccrsa_ctx_public(ccrsa_full_ctx_t fk) { + return (ccrsa_pub_ctx_t) fk.full; +} + +#define ccrsa_ctx_private_zp(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp) +#define ccrsa_ctx_private_zq(FK) ((cczp_t)((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 2 * ccrsa_ctx_private_zp(FK).zp->n + 1)) +#define ccrsa_ctx_private_dp(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 4 * ccrsa_ctx_private_zp(FK).zp->n + 2 + ccn_nof_size(sizeof(struct cczp))) +#define ccrsa_ctx_private_dq(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 5 * ccrsa_ctx_private_zp(FK).zp->n + 2 + ccn_nof_size(sizeof(struct cczp))) +#define ccrsa_ctx_private_qinv(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 6 * ccrsa_ctx_private_zp(FK).zp->n + 2 + ccn_nof_size(sizeof(struct cczp))) + +#else +#define ccrsa_ctx_private_zp(FK) ((cczp_t)ccrsa_get_private_ctx_ptr(FK)) +#define ccrsa_ctx_private_zq(FK) ((cczp_t)((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 2 * ccrsa_ctx_private_zp(FK)->n + 1)) +#define ccrsa_ctx_private_dp(FK) ((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 4 * ccrsa_ctx_private_zp(FK)->n + 2 + ccn_nof_size(sizeof(struct cczp))) +#define ccrsa_ctx_private_dq(FK) ((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 5 * ccrsa_ctx_private_zp(FK)->n + 2 + ccn_nof_size(sizeof(struct cczp))) +#define ccrsa_ctx_private_qinv(FK) ((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 6 * ccrsa_ctx_private_zp(FK)->n + 2 + ccn_nof_size(sizeof(struct cczp))) + +CC_CONST CC_INLINE +ccrsa_priv_ctx_t ccrsa_get_private_ctx_ptr(ccrsa_full_ctx_t fk) { + ccrsa_priv_ctx_t priv = (ccrsa_priv_ctx_t)(ccrsa_ctx_d(fk)+ccrsa_ctx_n(fk)); + return priv; +} + +/*! + @function ccrsa_ctx_public + @abstract gets the public key from full key + @param fk RSA full key + @result Returns RSA public ker + */ +CC_CONST CC_INLINE +ccrsa_pub_ctx_t ccrsa_ctx_public(ccrsa_full_ctx_t fk) { + return (ccrsa_pub_ctx_t) fk; +} + +#endif + +/* Return exact key bit size */ +static inline size_t +ccrsa_pubkeylength(ccrsa_pub_ctx_t pubk) { + return cczp_bitlen(ccrsa_ctx_zm(pubk)); +} + +/* PKCS1 pad_markers */ +#define CCRSA_PKCS1_PAD_SIGN 1 +#define CCRSA_PKCS1_PAD_ENCRYPT 2 + +/* Initialize key based on modulus and e as cc_unit. key->zp.n must already be set. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +void ccrsa_init_pub(ccrsa_pub_ctx_t key, const cc_unit *modulus, + const cc_unit *e); + +/* Initialize key based on modulus and e as big endian byte array + key->zp.n must already be set. */ +CC_NONNULL_TU((1)) CC_NONNULL((3 ,5)) +int ccrsa_make_pub(ccrsa_pub_ctx_t pubk, + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod); + +/* Do a public key crypto operation (typically verify or encrypt) on in and put + the result in out. Both in and out should be cc_unit aligned and + ccrsa_key_n(key) units long. Clients should use ccn_read_uint() to + convert bytes to a cc_unit to use for this API.*/ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +int ccrsa_pub_crypt(ccrsa_pub_ctx_t key, cc_unit *out, const cc_unit *in); + +/* Generate an nbit rsa key pair in key, which should be allocated using + ccrsa_full_ctx_decl(ccn_sizeof(1024), rsa_ctx). The unsigned big endian + byte array exponent e of length e_size is used as the exponent. It's an + error to call this function with an exponent larger than nbits. rng + must be a pointer to an initialized struct ccrng_state. */ +CC_NONNULL_TU((2)) CC_NONNULL((4, 5)) +int ccrsa_generate_key(size_t nbits, ccrsa_full_ctx_t rsa_ctx, + size_t e_size, const void *e, struct ccrng_state *rng) CC_WARN_RESULT; + +/* Generate RSA key in conformance with FIPS186-4 standard */ +CC_NONNULL_TU((2)) CC_NONNULL((4, 5, 6)) +int +ccrsa_generate_fips186_key(size_t nbits, ccrsa_full_ctx_t fk, + size_t e_size, const void *eBytes, + struct ccrng_state *rng1, struct ccrng_state *rng2) CC_WARN_RESULT; + +/* Construct RSA key from fix input in conformance with FIPS186-4 standard */ +CC_NONNULL_TU((16)) CC_NONNULL((3, 5, 7, 9, 11, 13, 15)) +int +ccrsa_make_fips186_key(size_t nbits, + const cc_size e_n, const cc_unit *e, + const cc_size xp1Len, const cc_unit *xp1, const cc_size xp2Len, const cc_unit *xp2, + const cc_size xpLen, const cc_unit *xp, + const cc_size xq1Len, const cc_unit *xq1, const cc_size xq2Len, const cc_unit *xq2, + const cc_size xqLen, const cc_unit *xq, + ccrsa_full_ctx_t fk, + cc_size *np, cc_unit *r_p, + cc_size *nq, cc_unit *r_q, + cc_size *nm, cc_unit *r_m, + cc_size *nd, cc_unit *r_d); + +/*! + * @brief ccrsa_sign_pss() generates RSASSA-PSS signature in PKCS1-V2 format + * + * note that in RSASSA-PSS, salt length is part of the signature as specified in ASN1 + * RSASSA-PSS-params ::= SEQUENCE { + * hashAlgorithm [0] HashAlgorithm DEFAULT sha1, + * maskGenAlgorithm [1] MaskGenAlgorithm DEFAULT mgf1SHA1, + * saltLength [2] INTEGER DEFAULT 20, + * trailerField [3] TrailerField DEFAULT trailerFieldBC + * + * + * FIPS 186-4 for RSASSA-PSS: + * .... Both signature schemes are approved for use, but additional constraints are imposed beyond those specified in PKCS #1 v2.1..... + * + * • If nlen = 1024 bits (i.e., 128 bytes), and the output length of the approved hash function output block is 512 bits (i.e., 64 bytes), then the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen – 2, + * • Otherwise, the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen, where hLen is the length of the hash function output block (in bytes). + * + * + * • CAVS test vectors are not very useful in the case of RSA-PSS, because they only validate the exponentiation part of the signature. See: http://csrc.nist.gov/groups/STM/cavp/documents/components/RSA2SP1VS.pdf + * + * @param key The RSA key + * @param hashAlgorithm The hash algorithm used to generate mHash from the original message. It is also used inside the PSS encoding function. This is also the hash function to be used in the mask generation function (MGF) + * @param MgfHashAlgorithm The hash algorithm for thr mask generation function + * @param rng Random number geberator to generate salt in PSS encoding + * @param saltSize Intended length of the salt + * @param hSize Length of message hash . Must be equal to hashAlgorithm->output_size + * @param mHash The input that needs to be signed. This is the hash of message M with length of hLen + * + * @param sig The signature output + * @param sigSize The length of generated signature in bytes, which equals the size of the RSA modulus. + * @return 0:ok, non-zero:error + */ +CC_NONNULL((2,3,5,7,8,9)) +int ccrsa_sign_pss(ccrsa_full_ctx_t key, + const struct ccdigest_info* hashAlgorithm, const struct ccdigest_info* MgfHashAlgorithm, + size_t saltSize, struct ccrng_state *rng, + size_t hSize, const uint8_t *mHash, + size_t *sigSize, uint8_t *sig); + +CC_NONNULL((2,3,5,7,9)) +int ccrsa_verify_pss(ccrsa_pub_ctx_t key, + const struct ccdigest_info* di, const struct ccdigest_info* MgfDi, + size_t digestSize, const uint8_t *digest, + size_t sigSize, const uint8_t *sig, + size_t saltSize, bool *valid); + +/*! + @function ccrsa_sign_pkcs1v15 + @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2 + + @param key Full key + @param oid OID describing the type of digest passed in + @param digest_len Byte length of the digest + @param digest Byte array of digest_len bytes containing the digest + @param sig_len Pointer to the number of byte allocate for sig. + Output the exact size of the signature. + @param sig Pointer to the allocated buffer of size *sig_len + for the output signature + + @result 0 iff successful. + + @discussion Null OID is a special case, required to support RFC 4346 where the padding + is based on SHA1+MD5. In general it is not recommended to use a NULL OID, + except when strictly required for interoperability + + */ +CC_NONNULL_TU((1)) CC_NONNULL((4, 5, 6)) +int ccrsa_sign_pkcs1v15(ccrsa_full_ctx_t key, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t *sig_len, uint8_t *sig); + + +/*! + @function ccrsa_sign_pkcs1v15 + @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2 + + @param key Public key + @param oid OID describing the type of digest passed in + @param digest_len Byte length of the digest + @param digest Byte array of digest_len bytes containing the digest + @param sig_len Number of byte of the signature sig. + @param sig Pointer to the signature buffer of sig_len + @param valid Output boolean, true if the signature is valid. + + @result 0 iff successful. + + @discussion Null OID is a special case, required to support RFC 4346 where the padding + is based on SHA1+MD5. In general it is not recommended to use a NULL OID, + except when strictly required for interoperability + */ +CC_NONNULL_TU((1)) CC_NONNULL((4, 6, 7)) +int ccrsa_verify_pkcs1v15(ccrsa_pub_ctx_t key, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + bool *valid); + +/*! + @function ccder_encode_rsa_pub_size + @abstract Calculate size of public key export format data package. + + @param key Public key + + @result Returns size required for encoding. + */ + +CC_NONNULL_TU((1)) +size_t ccder_encode_rsa_pub_size(const ccrsa_pub_ctx_t key); + +/*! + @function ccrsa_export_priv_pkcs1 + @abstract Export a public key. + + @param key Public key + @param der Beginning of output DER buffer + @param der_end End of output DER buffer + */ + +CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3)) +uint8_t *ccder_encode_rsa_pub(const ccrsa_pub_ctx_t key, uint8_t *der, uint8_t *der_end); + + +/*! + @function ccder_encode_rsa_priv_size + @abstract Calculate size of full key exported in PKCS#1 format. + + @param key Full key + + @result Returns size required for encoding. + */ + +CC_NONNULL_TU((1)) +size_t ccder_encode_rsa_priv_size(const ccrsa_full_ctx_t key); + +/*! + @function ccder_encode_rsa_priv + @abstract Export a full key in PKCS#1 format. + + @param key Full key + @param der Beginning of output DER buffer + @param der_end End of output DER buffer + */ + +CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3)) +uint8_t *ccder_encode_rsa_priv(const ccrsa_full_ctx_t key, const uint8_t *der, uint8_t *der_end); + +/*! + @function ccder_decode_rsa_pub_n + @abstract Calculate "n" for a public key imported from a data package. + PKCS #1 format + + @param der Beginning of input DER buffer + @param der_end End of input DER buffer + + @result the "n" of the RSA key that would result from the import. This can be used + to declare the key itself. + */ + +CC_NONNULL((1)) CC_NONNULL((2)) +cc_size ccder_decode_rsa_pub_n(const uint8_t *der, const uint8_t *der_end); + +/*! + @function ccder_decode_rsa_pub + @abstract Import a public RSA key from a package in public key format. + PKCS #1 format + + @param key Public key (n must be set) + @param der Beginning of input DER buffer + @param der_end End of input DER buffer + + @result Key is initialized using the data in the public key message. + */ + +CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3)) +const uint8_t *ccder_decode_rsa_pub(const ccrsa_pub_ctx_t key, const uint8_t *der, const uint8_t *der_end); + +/*! + @function ccder_decode_rsa_pub_x509_n + @abstract Calculate "n" for a public key imported from a data package in x509 format + + @param der Beginning of input DER buffer + @param der_end End of input DER buffer + + @result the "n" of the RSA key that would result from the import. This can be used + to declare the key itself. + */ + +CC_NONNULL((1)) CC_NONNULL((2)) +cc_size ccder_decode_rsa_pub_x509_n(const uint8_t *der, const uint8_t *der_end); + +/*! + @function ccder_decode_rsa_pub_x509 + @abstract Import a public RSA key from a package in x509 format. + + @param key Public key (n must be set) + @param der Beginning of input DER buffer + @param der_end End of input DER buffer + + @result Key is initialized using the data in the public key message. + */ + +CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3)) +const uint8_t *ccder_decode_rsa_pub_x509(const ccrsa_pub_ctx_t key, const uint8_t *der, const uint8_t *der_end); + + +/*! + @function ccder_decode_rsa_priv_n + @abstract Calculate "n" for a private key imported from a data package. + + @param der Beginning of input DER buffer + @param der_end End of input DER buffer + + @result the "n" of the RSA key that would result from the import. This can be used + to declare the key itself. + */ + +CC_NONNULL((1)) CC_NONNULL((2)) +cc_size ccder_decode_rsa_priv_n(const uint8_t *der, const uint8_t *der_end); + +/*! + @function ccder_decode_rsa_priv + @abstract Import a private RSA key from a package in PKCS#1 format. + + @param key Full key (n must be set) + @param der Beginning of input DER buffer + @param der_end End of input DER buffer + + @result Key is initialized using the data in the public key message. + */ + +CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3)) +const uint8_t *ccder_decode_rsa_priv(const ccrsa_full_ctx_t key, const uint8_t *der, const uint8_t *der_end); + +/*! + @function ccrsa_export_pub_size + @abstract Calculate size of public key exported data package. + + @param key Public key + + @result Returns size required for encoding. + */ + +CC_CONST CC_INLINE CC_NONNULL_TU((1)) +size_t ccrsa_export_pub_size(const ccrsa_pub_ctx_t key) { + return ccder_encode_rsa_pub_size(key); +} + +/*! + @function ccrsa_export_pub + @abstract Export a public key in public key format. + + @param key Public key + @param out_len Allocated size + @param out Output buffer + */ + +CC_NONNULL_TU((1)) CC_NONNULL((3)) +int ccrsa_export_pub(const ccrsa_pub_ctx_t key, size_t out_len, uint8_t *out); +/*! + @function ccrsa_import_pub_n + @abstract Calculate "n" for a public key imported from a data package. + + @param inlen Length of public key package data + @param der pointer to public key package data + + @result the "n" of the RSA key that would result from the import. This can be used + to declare the key itself. + */ + +CC_CONST CC_INLINE CC_NONNULL((2)) +cc_size ccrsa_import_pub_n(size_t inlen, const uint8_t *der) { + cc_size size = ccder_decode_rsa_pub_x509_n(der, der + inlen); + if(size == 0) { + size = ccder_decode_rsa_pub_n(der, der + inlen); + } + return size; +} + +/*! + @function ccrsa_import_pub + @abstract Import a public RSA key from a package in public key format. + + @param key Public key (n must be set) + @param inlen Length of public key package data + @param der pointer to public key package data + + @result Key is initialized using the data in the public key message. + */ + +CC_NONNULL_TU((1)) CC_NONNULL((3)) +int ccrsa_import_pub(ccrsa_pub_ctx_t key, size_t inlen, const uint8_t *der); + +/*! + @function ccrsa_export_priv_size + @abstract Calculate size of full key exported in PKCS#1 format. + + @param key Full key + + @result Returns size required for encoding. + */ + +CC_CONST CC_INLINE CC_NONNULL_TU((1)) +size_t ccrsa_export_priv_size(const ccrsa_full_ctx_t key) { + return ccder_encode_rsa_priv_size(key); +} + +/*! + @function ccrsa_export_priv + @abstract Export a full key in PKCS#1 format. + + @param key Full key + @param out_len Allocated size + @param out Output buffer + */ + +CC_CONST CC_INLINE CC_NONNULL_TU((1)) CC_NONNULL((3)) +int ccrsa_export_priv(const ccrsa_full_ctx_t key, size_t out_len, uint8_t *out) { + return (ccder_encode_rsa_priv(key, out, out+out_len) != out); +} + +/*! + @function ccrsa_import_priv_n + @abstract Calculate size of full key exported in PKCS#1 format. + + @param inlen Length of PKCS#1 package data + @param der pointer to PKCS#1 package data + + @result the "n" of the RSA key that would result from the import. This can be used + to declare the key itself. + */ + +CC_CONST CC_INLINE CC_NONNULL((2)) +cc_size ccrsa_import_priv_n(size_t inlen, const uint8_t *der) { + return ccder_decode_rsa_priv_n(der, der + inlen); +} + +/*! + @function ccrsa_import_priv + @abstract Import a full RSA key from a package in PKCS#1 format. + + @param key Full key (n must be set) + @param inlen Length of PKCS#1 package data + @param der pointer to PKCS#1 package data + + @result Key is initialized using the data in the PKCS#1 message. + */ + +CC_CONST CC_INLINE CC_NONNULL_TU((1)) CC_NONNULL((3)) +int ccrsa_import_priv(ccrsa_full_ctx_t key, size_t inlen, const uint8_t *der) { + return (ccder_decode_rsa_priv(key, der, der+inlen) == NULL); +} + + +CC_NONNULL_TU((1)) CC_NONNULL2 +int ccrsa_get_pubkey_components(const ccrsa_pub_ctx_t pubkey, uint8_t *modulus, size_t *modulusLength, uint8_t *exponent, size_t *exponentLength); + +CC_NONNULL_TU((1)) CC_NONNULL2 +int ccrsa_get_fullkey_components(const ccrsa_full_ctx_t key, uint8_t *modulus, size_t *modulusLength, uint8_t *exponent, size_t *exponentLength, + uint8_t *p, size_t *pLength, uint8_t *q, size_t *qLength); + + +/*! + @function ccrsa_dump_public_key + @abstract Print a rsa public key in the console (printf) + + @param key Public key + */ +void ccrsa_dump_public_key(ccrsa_pub_ctx_t key); + +/*! + @function ccrsa_dump_full_key + @abstract Print a rsa private key in the console (printf) + + @param key Public key + */ +void ccrsa_dump_full_key(ccrsa_full_ctx_t key); + +#endif /* _CORECRYPTO_CCRSA_H_ */ diff --git a/EXTERNAL_HEADERS/corecrypto/ccsha2.h b/EXTERNAL_HEADERS/corecrypto/ccsha2.h index 1efca569d..37a646ec6 100644 --- a/EXTERNAL_HEADERS/corecrypto/ccsha2.h +++ b/EXTERNAL_HEADERS/corecrypto/ccsha2.h @@ -52,9 +52,11 @@ extern const struct ccdigest_info ccsha512_vng_intel_AVX2_di; extern const struct ccdigest_info ccsha512_vng_intel_AVX1_di; extern const struct ccdigest_info ccsha512_vng_intel_SupplementalSSE3_di; #endif +extern const struct ccdigest_info ccsha224_vng_intel_SupplementalSSE3_di; extern const struct ccdigest_info ccsha256_vng_intel_SupplementalSSE3_di; #endif #if CCSHA2_VNG_ARMV7NEON +extern const struct ccdigest_info ccsha224_vng_armv7neon_di; extern const struct ccdigest_info ccsha256_vng_armv7neon_di; extern const struct ccdigest_info ccsha384_vng_arm64_di; extern const struct ccdigest_info ccsha384_vng_armv7neon_di; @@ -67,12 +69,6 @@ extern const uint64_t ccsha512_K[80]; /* SHA224 */ #define CCSHA224_OUTPUT_SIZE 28 extern const struct ccdigest_info ccsha224_ltc_di; -#if CCSHA2_VNG_INTEL -extern const struct ccdigest_info ccsha224_vng_intel_SupplementalSSE3_di; -#endif -#if CCSHA2_VNG_ARMV7NEON -extern const struct ccdigest_info ccsha224_vng_armv7neon_di; -#endif /* SHA512 */ #define CCSHA512_BLOCK_SIZE 128 diff --git a/EXTERNAL_HEADERS/corecrypto/cczp.h b/EXTERNAL_HEADERS/corecrypto/cczp.h new file mode 100644 index 000000000..f19891bd8 --- /dev/null +++ b/EXTERNAL_HEADERS/corecrypto/cczp.h @@ -0,0 +1,339 @@ +/* + * cczp.h + * corecrypto + * + * Created on 11/16/2010 + * + * Copyright (c) 2010,2011,2012,2013,2014,2015 Apple Inc. All rights reserved. + * + */ + +#ifndef _CORECRYPTO_CCZP_H_ +#define _CORECRYPTO_CCZP_H_ + +#include +#include + +/* + Don't use cczp_hd struct directly, except in static tables such as eliptic curve parameter definitions. + + Declare cczp objects using cczp_decl_n(). It allocates cc_unit arrays of the length returned by either cczp_nof_n() or cczp_short_nof_n(). +*/ + +struct cczp; +#if CORECRYPTO_USE_TRANSPARENT_UNION + +typedef union { + cc_unit *u; + struct cczp *zp; + //cczp_const_t czp; //for automatic type cast + //struct cczp_prime *prime; +} cczp_t __attribute__((transparent_union)); + +typedef union { + const cc_unit *u; + const struct cczp *zp; + //const struct cczp_prime *prime; + cczp_t _nczp; +} cczp_const_t __attribute__((transparent_union)); + +#else + typedef struct cczp* cczp_t; + typedef const struct cczp* cczp_const_t; +#endif +typedef void (*ccmod_func_t)(cczp_const_t zp, cc_unit *r, const cc_unit *s, cc_ws_t ws); + +// keep cczp_hd and cczp structures consistent +// cczp_hd is typecasted to cczp to read EC curve params +// options field is to specify Montgomery arithmetic, bit field, etc +// make sure n is the first element see ccrsa_ctx_n macro +#define __CCZP_HEADER_ELEMENTS_DEFINITIONS(pre) \ +cc_size pre ## n;\ +cc_unit pre ## options;\ +ccmod_func_t pre ## mod_prime; + +#define __CCZP_ELEMENTS_DEFINITIONS(pre) \ +__CCZP_HEADER_ELEMENTS_DEFINITIONS(pre) \ +cc_unit pre ## ccn[]; + +//cczp_hd must be defined separetly without variable length array ccn[], because it is used in sructures such as ccdh_gp_decl_n +struct cczp_hd{ + __CCZP_HEADER_ELEMENTS_DEFINITIONS() +} CC_ALIGNED(CCN_UNIT_SIZE); + +struct cczp { + __CCZP_ELEMENTS_DEFINITIONS() +} CC_ALIGNED(CCN_UNIT_SIZE); + + +/* Return the size of an cczp where each ccn is _size_ bytes. */ +#define cczp_size(_size_) (sizeof(struct cczp) + ccn_sizeof_n(1) + 2 * (_size_)) + +/* Return number of units that a struct cczp needs to be in units for a prime + size of N units. This is large enough for all operations. */ +#define cczp_nof_n(_n_) (ccn_nof_size(sizeof(struct cczp)) + 1 + 2 * (_n_)) + +/* Return number of units that a struct cczp needs to be in units for a prime + size of _n_ units. The _short variant does not have room for CCZP_RECIP, + so it can not be used with cczp_mod, cczp_mul, cczp_sqr. It can be used + with cczp_add, cczp_sub, cczp_div2, cczp_mod_inv. */ +#define cczp_short_nof_n(_n_) (ccn_nof_size(sizeof(struct cczp)) + (_n_)) + +#define cczp_decl_n(_n_, _name_) cc_ctx_decl(struct cczp, ccn_sizeof_n(cczp_nof_n(_n_)), _name_) +#define cczp_short_decl_n(_n_, _name_) cc_ctx_decl(struct cczp_short, ccn_sizeof_n(cczp_short_nof_n(_n_)), _name_) + +#define cczp_clear_n(_n_, _name_) cc_clear(ccn_sizeof_n(cczp_nof_n(_n_)), _name_) +#define cczp_short_clear_n(_n_, _name_) cc_clear(ccn_sizeof_n(cczp_short_nof_n(_n_)), _name_) + +#if CORECRYPTO_USE_TRANSPARENT_UNION + #define CCZP_N(ZP) (((cczp_t)(ZP)).zp->n) + #define CCZP_MOD(ZP) (((cczp_t)(ZP)).zp->mod_prime) + #define CCZP_PRIME(ZP) (((cczp_t)(ZP)).zp->ccn) + #define CCZP_RECIP(ZP) (((cczp_t)(ZP)).zp->ccn + cczp_n(ZP)) + #define CCZP_OPS(ZP) ((ZP).zp->options) + #define CCZP_MOD_PRIME(ZP) CCZP_MOD(ZP) + +CC_CONST CC_NONNULL_TU((1)) +static inline cc_size cczp_n(cczp_const_t zp) { + return zp.zp->n; +} + +CC_CONST CC_NONNULL_TU((1)) +static inline cc_unit cczp_options(cczp_const_t zp) { + return zp.zp->options; +} + +CC_CONST CC_NONNULL_TU((1)) +static inline ccmod_func_t cczp_mod_prime(cczp_const_t zp) { + return zp.zp->mod_prime; +} + +CC_CONST CC_NONNULL_TU((1)) +static inline const cc_unit *cczp_prime(cczp_const_t zp) { + return zp.zp->ccn; +} + +/* Return a pointer to the Reciprocal or Montgomery constant of zp, which is + allocated cczp_n(zp) + 1 units long. */ +CC_CONST CC_NONNULL_TU((1)) + +static inline const cc_unit *cczp_recip(cczp_const_t zp) { + return zp.zp->ccn + zp.zp->n; +} + +#else + #define CCZP_N(ZP) ((ZP)->n) + #define CCZP_MOD(ZP) ((ZP)->mod_prime) + #define CCZP_MOD_PRIME(ZP) CCZP_MOD(ZP) + #define CCZP_PRIME(ZP) ((ZP)->ccn) + #define CCZP_RECIP(ZP) ((ZP)->ccn + CCZP_N(ZP)) + #define CCZP_OPS(ZP) ((ZP)->options) +CC_CONST CC_NONNULL_TU((1)) +static inline cc_size cczp_n(cczp_const_t zp) { + return zp->n; +} + +CC_CONST CC_NONNULL_TU((1)) +static inline cc_unit cczp_options(cczp_const_t zp) { + return zp->options; +} + +CC_CONST CC_NONNULL_TU((1)) +static inline ccmod_func_t cczp_mod_prime(cczp_const_t zp) { + return zp->mod_prime; +} + +CC_CONST CC_NONNULL_TU((1)) +static inline const cc_unit *cczp_prime(cczp_const_t zp) { + return zp->ccn; +} + +/* Return a pointer to the Reciprocal or Montgomery constant of zp, which is + allocated cczp_n(zp) + 1 units long. */ +CC_CONST CC_NONNULL_TU((1)) + +static inline const cc_unit *cczp_recip(cczp_const_t zp) { + return zp->ccn + zp->n; +} + +#endif + + +CC_CONST CC_NONNULL_TU((1)) +CC_INLINE size_t cczp_bitlen(cczp_const_t zp) { + return ccn_bitlen(cczp_n(zp), cczp_prime(zp)); +} + + +/* Ensure both cczp_mod_prime(zp) and cczp_recip(zp) are valid. cczp_n and + cczp_prime must have been previously initialized. */ +CC_NONNULL_TU((1)) +void cczp_init(cczp_t zp); + +/* Compute r = s2n mod cczp_prime(zp). Will write cczp_n(zp) + units to r and reads 2 * cczp_n(zp) units units from s2n. If r and s2n are not + identical they must not overlap. Before calling this function either + cczp_init(zp) must have been called or both CCZP_MOD_PRIME((cc_unit *)zp) + and CCZP_RECIP((cc_unit *)zp) must be initialized some other way. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +void cczp_mod(cczp_const_t zp, cc_unit *r, const cc_unit *s2n, cc_ws_t ws); + +/* Compute r = sn mod cczp_prime(zp), Will write cczp_n(zp) + units to r and reads sn units units from s. If r and s are not + identical they must not overlap. Before calling this function either + cczp_init(zp) must have been called or both CCZP_MOD_PRIME((cc_unit *)zp) + and CCZP_RECIP((cc_unit *)zp) must be initialized some other way. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 4)) + +int cczp_modn(cczp_const_t zp, cc_unit *r, cc_size ns, const cc_unit *s); + +/* Compute r = x * y mod cczp_prime(zp). Will write cczp_n(zp) units to r + and reads cczp_n(zp) units units from both x and y. If r and x are not + identical they must not overlap, The same holds for r and y. Before + calling this function either cczp_init(zp) must have been called or both + CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must be + initialized some other way. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +void cczp_mul(cczp_const_t zp, cc_unit *t, const cc_unit *x, const cc_unit *y); + +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4, 5)) +void cczp_mul_ws(cczp_const_t zp, cc_unit *t, const cc_unit *x, const cc_unit *y, cc_ws_t ws); + +/* Compute r = x * x mod cczp_prime(zp). Will write cczp_n(zp) units to r + and reads cczp_n(zp) units from x. If r and x are not identical they must + not overlap. Before calling this function either cczp_init(zp) must have + been called or both CCZP_MOD_PRIME((cc_unit *)zp) and + CCZP_RECIP((cc_unit *)zp) must be initialized some other way. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +void cczp_sqr(cczp_const_t zp, cc_unit *r, const cc_unit *x); + +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +void cczp_sqr_ws(cczp_const_t zp, cc_unit *r, const cc_unit *x, cc_ws_t ws); + +/* Compute r = x^(1/2) mod cczp_prime(zp). Will write cczp_n(zp) units to r + and reads cczp_n(zp) units from x. If r and x are not identical they must + not overlap. Before calling this function either cczp_init(zp) must have + been called or both CCZP_MOD_PRIME((cc_unit *)zp) and + CCZP_RECIP((cc_unit *)zp) must be initialized some other way. + Only support prime = 3 mod 4 */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +int cczp_sqrt(cczp_const_t zp, cc_unit *r, const cc_unit *x); + +/* Compute r = m ^ e mod cczp_prime(zp), using Montgomery ladder. + - writes cczp_n(zp) units to r + - reads cczp_n(zp) units units from m and e + - if r and m are not identical they must not overlap. + - r and e must not overlap nor be identical. + - before calling this function either cczp_init(zp) must have been called + or both CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must + be initialized some other way. + */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +void cczp_power(cczp_const_t zp, cc_unit *r, const cc_unit *m, + const cc_unit *e); + +/* Compute r = m ^ e mod cczp_prime(zp), using Square Square Multiply Always. + - writes cczp_n(zp) units to r + - reads cczp_n(zp) units units from m and e + - if r and m are not identical they must not overlap. + - r and e must not overlap nor be identical. + - before calling this function either cczp_init(zp) must have been called + or both CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must + be initialized some other way. + + Important: This function is intented to be constant time but is more likely + to leak information due to memory cache. Only used with randomized input + */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +int cczp_power_ssma(cczp_const_t zp, cc_unit *r, const cc_unit *m, + const cc_unit *e); + +int cczp_power_ssma_ws(cc_ws_t ws, cczp_const_t zp, cc_unit *r, const cc_unit *s, const cc_unit *e); + +/* Compute r = m ^ e mod cczp_prime(zp). Will write cczp_n(zp) units to r and + reads cczp_n(zp) units units from m. Reads ebitlen bits from e. + m must be <= to cczp_prime(zp). If r and m are not identical they must not + overlap. r and e must not overlap nor be identical. + Before calling this function either cczp_init(zp) must have been called + or both CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must + be initialized some other way. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 5)) +void cczp_powern(cczp_const_t zp, cc_unit *r, const cc_unit *s, + size_t ebitlen, const cc_unit *e); + +/* Compute r = x + y mod cczp_prime(zp). Will write cczp_n(zp) units to r and + reads cczp_n(zp) units units from x and y. If r and x are not identical + they must not overlap. Only cczp_n(zp) and cczp_prime(zp) need to be valid. + Can be used with cczp_short_nof_n sized cc_unit array zp. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +void cczp_add(cczp_const_t zp, cc_unit *r, const cc_unit *x, + const cc_unit *y); + +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4, 5)) +void cczp_add_ws(cczp_const_t zp, cc_unit *r, const cc_unit *x, + const cc_unit *y, cc_ws_t ws); + +/* Compute r = x - y mod cczp_prime(zp). Will write cczp_n(zp) units to r and + reads cczp_n(zp) units units from x and y. If r and x are not identical + they must not overlap. Only cczp_n(zp) and cczp_prime(zp) need to be valid. + Can be used with cczp_short_nof_n sized cc_unit array zp. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +void cczp_sub(cczp_const_t zp, cc_unit *r, const cc_unit *x, const cc_unit *y); + +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4, 5)) +void cczp_sub_ws(cczp_const_t zp, cc_unit *r, const cc_unit *x, + const cc_unit *y, cc_ws_t ws); + +/* Compute r = x / 2 mod cczp_prime(zp). Will write cczp_n(zp) units to r and + reads cczp_n(zp) units units from x. If r and x are not identical + they must not overlap. Only cczp_n(zp) and cczp_prime(zp) need to be valid. + Can be used with cczp_short_nof_n sized cc_unit array zp. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +void cczp_div2(cczp_const_t zp, cc_unit *r, const cc_unit *x); + +/* Compute q = a_2n / cczp_prime(zd) (mod cczp_prime(zd)) . Will write cczp_n(zd) + units to q and r. Will read 2 * cczp_n(zd) units units from a. If r and a + are not identical they must not overlap. Before calling this function + either cczp_init(zp) must have been called or both + CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must be + initialized some other way. */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4)) +void cczp_div(cczp_const_t zd, cc_unit *q, cc_unit *r, const cc_unit *a_2n); + + +/*! + @brief cczp_inv(zp, r, x) computes r = x^-1 (mod p) , where p=cczp_prime(zp). + @discussion It is a general function and works for any p. It validates the inputs. r and x can overlap. It writes n =cczp_n(zp) units to r, and read n units units from x and p. The output r is overwriten only if the inverse is correctly computed. This function is not constant time in absolute sense, but it does not have data dependent 'if' statements in the code. + @param zp The input zp. cczp_n(zp) and cczp_prime(zp) need to be valid. cczp_init(zp) need not to be called before invoking cczp_inv(). + @param x input big integer + @param r output big integer + @return 0 if inverse exists and correctly computed. + */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) + +int cczp_inv(cczp_const_t zp, cc_unit *r, const cc_unit *x); + +/*! + @brief cczp_inv_odd(zp, r, x) computes r = x^-1 (mod p) , where p=cczp_prime(zp) is an odd number. + @discussion r and x can overlap. + @param zp The input zp. cczp_n(zp) and cczp_prime(zp) need to be valid. cczp_init(zp) need not to be called before invoking. + @param x input big integer + @param r output big integer + @return 0 if successful + */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +int cczp_inv_odd(cczp_const_t zp, cc_unit *r, const cc_unit *x); + +/*! + @brief cczp_inv_field(zp, r, x) computes r = x^-1 (mod p) , where p=cczp_prime(zp) is a prime number number. + @discussion r and x must NOT overlap. The excution time of the function is independent to the value of the input x. It works only if p is a field. That is, when p is a prime. It supports Montgomery and non-Montgomery form of zp. It leaks the value of the prime and should only be used be used for public (not secret) primes (ex. Elliptic Curves) + + @param zp The input zp. cczp_n(zp) and cczp_prime(zp) need to be valid. cczp_init(zp) need not to be called before invoking cczp_inv_field(). + @param x input big unteger + @param r output big integer + @return 0 if inverse exists and correctly computed. + */ +CC_NONNULL_TU((1)) CC_NONNULL((2, 3)) +int cczp_inv_field(cczp_const_t zp, cc_unit *r, const cc_unit *x); + +#endif /* _CORECRYPTO_CCZP_H_ */ diff --git a/README.md b/README.md index 5a0601b8e..2d3ba49a8 100644 --- a/README.md +++ b/README.md @@ -268,7 +268,8 @@ want to export a function only to kernel level but not user level. header files installed in all the paths described above in (1) will not have code enclosed within this macro. - b. `KERNEL_PRIVATE` : Same as PRIVATE + b. `KERNEL_PRIVATE` : If true, code is available to all of the xnu kernel and Apple + internal kernel extensions. c. `BSD_KERNEL_PRIVATE` : If true, code is available to the xnu/bsd part of the kernel and is not available to rest of the kernel, kernel extensions diff --git a/bsd/dev/dtrace/dtrace.c b/bsd/dev/dtrace/dtrace.c index c90a465a9..30d41336c 100644 --- a/bsd/dev/dtrace/dtrace.c +++ b/bsd/dev/dtrace/dtrace.c @@ -580,7 +580,7 @@ static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, dtrace_state_t *, dtrace_mstate_t *); static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, dtrace_optval_t); -static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); +static int dtrace_ecb_create_enable(dtrace_probe_t *, void *, void *); static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); static int dtrace_canload_remains(uint64_t, size_t, size_t *, dtrace_mstate_t *, dtrace_vstate_t *); @@ -7464,7 +7464,7 @@ dtrace_match_nonzero(const char *s, const char *p, int depth) static int dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, - zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) + zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *, void *), void *arg1, void *arg2) { dtrace_probe_t template, *probe; dtrace_hash_t *hash = NULL; @@ -7480,7 +7480,7 @@ dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, if (pkp->dtpk_id != DTRACE_IDNONE) { if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { - if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL) + if ((*matched)(probe, arg1, arg2) == DTRACE_MATCH_FAIL) return (DTRACE_MATCH_FAIL); nmatched++; } @@ -7528,7 +7528,7 @@ dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, nmatched++; - if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) { + if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) { if (rc == DTRACE_MATCH_FAIL) return (DTRACE_MATCH_FAIL); break; @@ -7551,7 +7551,7 @@ dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, nmatched++; - if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) { + if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) { if (rc == DTRACE_MATCH_FAIL) return (DTRACE_MATCH_FAIL); break; @@ -8117,9 +8117,10 @@ dtrace_probe_lookup_id(dtrace_id_t id) } static int -dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) +dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg1, void *arg2) { - *((dtrace_id_t *)arg) = probe->dtpr_id; +#pragma unused(arg2) + *((dtrace_id_t *)arg1) = probe->dtpr_id; return (DTRACE_MATCH_DONE); } @@ -8148,7 +8149,7 @@ dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod, lck_mtx_lock(&dtrace_lock); match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, - dtrace_probe_lookup_match, &id); + dtrace_probe_lookup_match, &id, NULL); lck_mtx_unlock(&dtrace_lock); ASSERT(match == 1 || match == 0); @@ -8287,7 +8288,7 @@ dtrace_probe_foreach(uintptr_t offs) } static int -dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) +dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab, dtrace_ecbdesc_t *ep) { dtrace_probekey_t pkey; uint32_t priv; @@ -8303,7 +8304,7 @@ dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) * If we're passed a NULL description, we're being asked to * create an ECB with a NULL probe. */ - (void) dtrace_ecb_create_enable(NULL, enab); + (void) dtrace_ecb_create_enable(NULL, enab, ep); return (0); } @@ -8312,7 +8313,7 @@ dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab) &priv, &uid, &zoneid); return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, - enab)); + enab, ep)); } /* @@ -8344,7 +8345,7 @@ dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, } static void -dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) +dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; @@ -8393,7 +8394,7 @@ dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) */ dtrace_dofprov2hprov(&dhpv, provider, strtab); - if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) + if ((parg = mops->dtms_provide_proc(meta->dtm_arg, &dhpv, p)) == NULL) return; meta->dtm_count++; @@ -8444,7 +8445,7 @@ dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) } static void -dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) +dtrace_helper_provide(dof_helper_t *dhp, proc_t *p) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; @@ -8459,12 +8460,12 @@ dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) if (sec->dofs_type != DOF_SECT_PROVIDER) continue; - dtrace_helper_provide_one(dhp, sec, pid); + dtrace_helper_provide_one(dhp, sec, p); } } static void -dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) +dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; @@ -8486,13 +8487,13 @@ dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) */ dtrace_dofprov2hprov(&dhpv, provider, strtab); - mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); + mops->dtms_remove_proc(meta->dtm_arg, &dhpv, p); meta->dtm_count--; } static void -dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) +dtrace_helper_provider_remove(dof_helper_t *dhp, proc_t *p) { uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; dof_hdr_t *dof = (dof_hdr_t *)daddr; @@ -8507,7 +8508,7 @@ dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) if (sec->dofs_type != DOF_SECT_PROVIDER) continue; - dtrace_helper_provider_remove_one(dhp, sec, pid); + dtrace_helper_provider_remove_one(dhp, sec, p); } } @@ -8539,8 +8540,8 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, if (mops == NULL || mops->dtms_create_probe == NULL || - mops->dtms_provide_pid == NULL || - mops->dtms_remove_pid == NULL) { + mops->dtms_provide_proc == NULL || + mops->dtms_remove_proc == NULL) { cmn_err(CE_WARN, "failed to register meta-register %s: " "invalid ops", name); return (EINVAL); @@ -8586,8 +8587,12 @@ dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, while (help != NULL) { for (i = 0; i < help->dthps_nprovs; i++) { + proc_t *p = proc_find(help->dthps_pid); + if (p == PROC_NULL) + continue; dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, - help->dthps_pid); + p); + proc_rele(p); } next = help->dthps_next; @@ -10824,15 +10829,16 @@ dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, } static int -dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) +dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg1, void *arg2) { dtrace_ecb_t *ecb; - dtrace_enabling_t *enab = arg; + dtrace_enabling_t *enab = arg1; + dtrace_ecbdesc_t *ep = arg2; dtrace_state_t *state = enab->dten_vstate->dtvs_state; ASSERT(state != NULL); - if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { + if (probe != NULL && ep != NULL && probe->dtpr_gen < ep->dted_probegen) { /* * This probe was created in a generation for which this * enabling has previously created ECBs; we don't want to @@ -11730,7 +11736,7 @@ dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched, dtrace_match_cond_ * If a provider failed to enable a probe then get out and * let the consumer know we failed. */ - if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0) + if ((matched = dtrace_probe_enable(&ep->dted_probe, enab, ep)) < 0) return (EBUSY); total_matched += matched; @@ -11757,9 +11763,10 @@ dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched, dtrace_match_cond_ return (enab->dten_error); } + + ep->dted_probegen = dtrace_probegen; } - enab->dten_probegen = dtrace_probegen; if (nmatched != NULL) *nmatched = total_matched; @@ -11840,7 +11847,7 @@ dtrace_enabling_prime(dtrace_state_t *state) for (i = 0; i < enab->dten_ndesc; i++) { enab->dten_current = enab->dten_desc[i]; - (void) dtrace_probe_enable(NULL, enab); + (void) dtrace_probe_enable(NULL, enab, NULL); } enab->dten_primed = 1; @@ -14274,7 +14281,7 @@ dtrace_helper_destroygen(proc_t* p, int gen) if (dtrace_meta_pid != NULL) { ASSERT(dtrace_deferred_pid == NULL); dtrace_helper_provider_remove(&prov->dthp_prov, - p->p_pid); + p); } lck_mtx_unlock(&dtrace_meta_lock); @@ -14417,7 +14424,7 @@ dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, lck_mtx_unlock(&dtrace_lock); - dtrace_helper_provide(dofhp, p->p_pid); + dtrace_helper_provide(dofhp, p); } else { /* @@ -14430,7 +14437,7 @@ dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, - p->p_pid); + p); } } @@ -15301,7 +15308,7 @@ dtrace_helpers_destroy(proc_t* p) for (i = 0; i < help->dthps_nprovs; i++) { dtrace_helper_provider_remove( - &help->dthps_provs[i]->dthp_prov, p->p_pid); + &help->dthps_provs[i]->dthp_prov, p); } } else { lck_mtx_lock(&dtrace_lock); diff --git a/bsd/dev/dtrace/fasttrap.c b/bsd/dev/dtrace/fasttrap.c index 359949319..d25f82dba 100644 --- a/bsd/dev/dtrace/fasttrap.c +++ b/bsd/dev/dtrace/fasttrap.c @@ -176,9 +176,9 @@ static lck_mtx_t fasttrap_count_mtx; /* lock on ref count */ static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t); static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t); -static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, fasttrap_provider_type_t, const char *, +static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *, const dtrace_pattr_t *); -static void fasttrap_provider_retire(pid_t, const char *, int); +static void fasttrap_provider_retire(proc_t*, const char *, int); static void fasttrap_provider_free(fasttrap_provider_t *); static fasttrap_proc_t *fasttrap_proc_lookup(pid_t); @@ -547,15 +547,15 @@ fasttrap_exec_exit(proc_t *p) * We clean up the pid provider for this process here; user-land * static probes are handled by the meta-provider remove entry point. */ - fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0); + fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0); /* * APPLE NOTE: We also need to remove any aliased providers. * XXX optimization: track which provider types are instantiated * and only retire as needed. */ - fasttrap_provider_retire(p->p_pid, FASTTRAP_OBJC_NAME, 0); - fasttrap_provider_retire(p->p_pid, FASTTRAP_ONESHOT_NAME, 0); + fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0); + fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0); /* * This should be called after it is no longer possible for a user @@ -1387,19 +1387,20 @@ fasttrap_proc_release(fasttrap_proc_t *proc) } /* - * Lookup a fasttrap-managed provider based on its name and associated pid. + * Lookup a fasttrap-managed provider based on its name and associated proc. + * A reference to the proc must be held for the duration of the call. * If the pattr argument is non-NULL, this function instantiates the provider * if it doesn't exist otherwise it returns NULL. The provider is returned * with its lock held. */ static fasttrap_provider_t * -fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, const char *name, +fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name, const dtrace_pattr_t *pattr) { + pid_t pid = p->p_pid; fasttrap_provider_t *fp, *new_fp = NULL; fasttrap_bucket_t *bucket; char provname[DTRACE_PROVNAMELEN]; - proc_t *p; cred_t *cred; ASSERT(strlen(name) < sizeof (fp->ftp_name)); @@ -1429,16 +1430,12 @@ fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, cons lck_mtx_unlock(&bucket->ftb_mtx); /* - * Make sure the process exists, isn't a child created as the result + * Make sure the process isn't a child created as the result * of a vfork(2), and isn't a zombie (but may be in fork). */ - if ((p = proc_find(pid)) == NULL) { - return NULL; - } proc_lock(p); if (p->p_lflag & (P_LINVFORK | P_LEXIT)) { proc_unlock(p); - proc_rele(p); return (NULL); } @@ -1460,11 +1457,10 @@ fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, cons cred = p->p_ucred; // lck_mtx_unlock(&p->p_crlock); proc_unlock(p); - proc_rele(p); new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP); ASSERT(new_fp != NULL); - new_fp->ftp_pid = pid; + new_fp->ftp_pid = p->p_pid; new_fp->ftp_proc = fasttrap_proc_lookup(pid); new_fp->ftp_provider_type = provider_type; @@ -1578,7 +1574,7 @@ fasttrap_provider_free(fasttrap_provider_t *provider) } static void -fasttrap_provider_retire(pid_t pid, const char *name, int mprov) +fasttrap_provider_retire(proc_t *p, const char *name, int mprov) { fasttrap_provider_t *fp; fasttrap_bucket_t *bucket; @@ -1586,11 +1582,11 @@ fasttrap_provider_retire(pid_t pid, const char *name, int mprov) ASSERT(strlen(name) < sizeof (fp->ftp_name)); - bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)]; + bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)]; lck_mtx_lock(&bucket->ftb_mtx); for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) { - if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 && + if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 && !fp->ftp_retired) break; } @@ -1633,7 +1629,7 @@ fasttrap_provider_retire(pid_t pid, const char *name, int mprov) /* * We don't have to worry about invalidating the same provider twice - * since fasttrap_provider_lookup() will ignore provider that have + * since fasttrap_provider_lookup() will ignore providers that have * been marked as retired. */ dtrace_invalidate(provid); @@ -1658,6 +1654,7 @@ fasttrap_uint64_cmp(const void *ap, const void *bp) static int fasttrap_add_probe(fasttrap_probe_spec_t *pdata) { + proc_t *p; fasttrap_provider_t *provider; fasttrap_probe_t *pp; fasttrap_tracepoint_t *tp; @@ -1702,10 +1699,15 @@ fasttrap_add_probe(fasttrap_probe_spec_t *pdata) return (EINVAL); } - if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, pdata->ftps_provider_type, + p = proc_find(pdata->ftps_pid); + if (p == PROC_NULL) + return (ESRCH); + + if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type, provider_name, &pid_attr)) == NULL) return (ESRCH); + proc_rele(p); /* * Increment this reference count to indicate that a consumer is * actively adding a new probe associated with this provider. This @@ -1859,7 +1861,7 @@ no_mem: /*ARGSUSED*/ static void * -fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) +fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p) { #pragma unused(arg) fasttrap_provider_t *provider; @@ -1917,10 +1919,10 @@ fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA) dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA; - if ((provider = fasttrap_provider_lookup(pid, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname, + if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname, &dhpv->dthpv_pattr)) == NULL) { cmn_err(CE_WARN, "failed to instantiate provider %s for " - "process %u", dhpv->dthpv_provname, (uint_t)pid); + "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid); return (NULL); } @@ -2120,7 +2122,7 @@ fasttrap_meta_create_probe(void *arg, void *parg, /*ARGSUSED*/ static void -fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) +fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p) { #pragma unused(arg) /* @@ -2129,7 +2131,7 @@ fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid) * provider until that count has dropped to zero. This just puts * the provider on death row. */ - fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1); + fasttrap_provider_retire(p, dhpv->dthpv_provname, 1); } static char* @@ -2559,7 +2561,7 @@ fasttrap_init( void ) return; } - gFasttrapInited = 1; + gFasttrapInited = 1; } } diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c index 6d72e54a3..66cd6e2a5 100644 --- a/bsd/kern/kern_event.c +++ b/bsd/kern/kern_event.c @@ -5167,9 +5167,10 @@ event_unlock(struct socket *so, int refcount, void *lr) else lr_saved = lr; - if (refcount) + if (refcount) { + VERIFY(so->so_usecount > 0); so->so_usecount--; - + } if (so->so_usecount < 0) { panic("%s: so=%p usecount=%d lrh= %s\n", __func__, so, so->so_usecount, solockhistory_nr(so)); diff --git a/bsd/kern/kern_exec.c b/bsd/kern/kern_exec.c index dc4c83eae..eb042349b 100644 --- a/bsd/kern/kern_exec.c +++ b/bsd/kern/kern_exec.c @@ -620,7 +620,8 @@ exec_fat_imgact(struct image_params *imgp) if (imgp->ip_origcputype != 0) { /* Fat header previously matched, don't allow another fat file inside */ - return (-1); + error = -1; /* not claimed */ + goto bad; } /* Make sure it's a fat binary */ @@ -1012,12 +1013,18 @@ grade: /* * Commit to new map. * - * Swap the new map for the old, which consumes our new map reference but - * each leaves us responsible for the old_map reference. That lets us get - * off the pmap associated with it, and then we can release it. + * Swap the new map for the old for target task, which consumes + * our new map reference but each leaves us responsible for the + * old_map reference. That lets us get off the pmap associated + * with it, and then we can release it. + * + * The map needs to be set on the target task which is different + * than current task, thus swap_task_map is used instead of + * vm_map_switch. */ - old_map = swap_task_map(task, thread, map, !spawn); + old_map = swap_task_map(task, thread, map); vm_map_deallocate(old_map); + old_map = NULL; lret = activate_exec_state(task, p, thread, &load_result); if (lret != KERN_SUCCESS) { @@ -1059,6 +1066,7 @@ grade: goto badtoolate; } + /* Switch to target task's map to copy out strings */ old_map = vm_map_switch(get_task_map(task)); if (load_result.unixproc) { @@ -2874,6 +2882,10 @@ bad: } } + /* Inherit task role from old task to new task for exec */ + if (error == 0 && !spawn_no_exec) { + proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task()); + } /* * Apply the spawnattr policy, apptype (which primes the task for importance donation), @@ -3443,6 +3455,11 @@ __mac_execve(proc_t p, struct __mac_execve_args *uap, int32_t *retval) /* Sever any extant thread affinity */ thread_affinity_exec(current_thread()); + /* Inherit task role from old task to new task for exec */ + if (!in_vfexec) { + proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task()); + } + thread_t main_thread = imgp->ip_new_thread; task_set_main_thread_qos(new_task, main_thread); diff --git a/bsd/kern/kern_exit.c b/bsd/kern/kern_exit.c index 6cf36945d..a03d9b87a 100644 --- a/bsd/kern/kern_exit.c +++ b/bsd/kern/kern_exit.c @@ -763,10 +763,21 @@ proc_prepareexit(proc_t p, int rv, boolean_t perf_notify) /* NOTREACHED */ } - /* If a core should be generated, notify crash reporter */ - if (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) || - (p->p_exit_reason != OS_REASON_NULL && (p->p_exit_reason->osr_flags & - OS_REASON_FLAG_GENERATE_CRASH_REPORT))) { + /* + * Generate a corefile/crashlog if: + * The process doesn't have an exit reason that indicates no crash report should be created + * AND any of the following are true: + * - The process was terminated due to a fatal signal that generates a core + * - The process was killed due to a code signing violation + * - The process has an exit reason that indicates we should generate a crash report + * + * The first condition is necessary because abort_with_reason()/payload() use SIGABRT + * (which normally triggers a core) but may indicate that no crash report should be created. + */ + if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) && + (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) || + (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & + OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) { /* * Workaround for processes checking up on PT_DENY_ATTACH: * should be backed out post-Leopard (details in 5431025). @@ -803,7 +814,7 @@ skipcheck: /* stash the usage into corpse data if making_corpse == true */ if (create_corpse == TRUE) { - kr = task_mark_corpse(current_task()); + kr = task_mark_corpse(p->task); if (kr != KERN_SUCCESS) { if (kr == KERN_NO_SPACE) { printf("Process[%d] has no vm space for corpse info.\n", p->p_pid); @@ -854,7 +865,7 @@ skipcheck: /* Update the code, subcode based on exit reason */ proc_update_corpse_exception_codes(p, &code, &subcode); - populate_corpse_crashinfo(p, task_get_corpseinfo(current_task()), rup, code, subcode, buffer, num_knotes); + populate_corpse_crashinfo(p, task_get_corpseinfo(p->task), rup, code, subcode, buffer, num_knotes); if (buffer != NULL) { kfree(buffer, buf_size); } diff --git a/bsd/kern/kern_memorystatus.c b/bsd/kern/kern_memorystatus.c index b9f736ce0..0745a0039 100644 --- a/bsd/kern/kern_memorystatus.c +++ b/bsd/kern/kern_memorystatus.c @@ -1091,6 +1091,7 @@ SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_min_processes, CTLFLAG_RW|CTLFL boolean_t memorystatus_freeze_throttle_enabled = TRUE; SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_throttle_enabled, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_throttle_enabled, 0, ""); +#define VM_PAGES_FOR_ALL_PROCS (2) /* * Manual trigger of freeze and thaw for dev / debug kernels only. */ @@ -1109,7 +1110,7 @@ sysctl_memorystatus_freeze SYSCTL_HANDLER_ARGS if (error || !req->newptr) return (error); - if (pid == 2) { + if (pid == VM_PAGES_FOR_ALL_PROCS) { vm_pageout_anonymous_pages(); return 0; @@ -1175,14 +1176,19 @@ sysctl_memorystatus_available_pages_thaw SYSCTL_HANDLER_ARGS if (error || !req->newptr) return (error); - p = proc_find(pid); - if (p != NULL) { - error = task_thaw(p->task); - proc_rele(p); - - if (error) - error = EIO; - return error; + if (pid == VM_PAGES_FOR_ALL_PROCS) { + do_fastwake_warmup_all(); + return 0; + } else { + p = proc_find(pid); + if (p != NULL) { + error = task_thaw(p->task); + proc_rele(p); + + if (error) + error = EIO; + return error; + } } return EINVAL; @@ -3601,8 +3607,8 @@ done: uint64_t timestamp_now = mach_absolute_time(); memorystatus_jetsam_snapshot->notification_time = timestamp_now; memorystatus_jetsam_snapshot->js_gencount++; - if (memorystatus_jetsam_snapshot_last_timestamp == 0 || - timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout) { + if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || + timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { proc_list_unlock(); int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size)); if (!ret) { @@ -3851,8 +3857,8 @@ memorystatus_kill_process_sync(pid_t victim_pid, uint32_t cause, os_reason_t jet sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count; uint64_t timestamp_now = mach_absolute_time(); memorystatus_jetsam_snapshot->notification_time = timestamp_now; - if (memorystatus_jetsam_snapshot_last_timestamp == 0 || - timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout) { + if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 || + timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) { proc_list_unlock(); int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size)); if (!ret) { @@ -5816,6 +5822,28 @@ memorystatus_freeze_thread(void *param __unused, wait_result_t wr __unused) thread_block((thread_continue_t) memorystatus_freeze_thread); } +static int +sysctl_memorystatus_do_fastwake_warmup_all SYSCTL_HANDLER_ARGS +{ +#pragma unused(oidp, req, arg1, arg2) + + /* Need to be root or have entitlement */ + if (!kauth_cred_issuser(kauth_cred_get()) && !IOTaskHasEntitlement(current_task(), MEMORYSTATUS_ENTITLEMENT)) { + return EPERM; + } + + if (memorystatus_freeze_enabled == FALSE) { + return ENOTSUP; + } + + do_fastwake_warmup_all(); + + return 0; +} + +SYSCTL_PROC(_kern, OID_AUTO, memorystatus_do_fastwake_warmup_all, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED, + 0, 0, &sysctl_memorystatus_do_fastwake_warmup_all, "I", ""); + #endif /* CONFIG_FREEZE */ #if VM_PRESSURE_EVENTS diff --git a/bsd/kern/kern_sig.c b/bsd/kern/kern_sig.c index 82dc644e5..5d38f292e 100644 --- a/bsd/kern/kern_sig.c +++ b/bsd/kern/kern_sig.c @@ -1643,18 +1643,11 @@ terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t { proc_t target_proc = PROC_NULL; kauth_cred_t cur_cred = kauth_cred_get(); - int signum = SIGKILL; os_reason_t signal_reason = OS_REASON_NULL; AUDIT_ARG(pid, target_pid); - if ((target_pid <= 0) || (cur_proc->p_pid == target_pid)) { - return EINVAL; - } - - if (reason_namespace == OS_REASON_INVALID || - reason_namespace > OS_REASON_MAX_VALID_NAMESPACE) { - + if ((target_pid <= 0)) { return EINVAL; } @@ -1665,7 +1658,7 @@ terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t AUDIT_ARG(process, target_proc); - if (!cansignal(cur_proc, cur_cred, target_proc, signum, 0)) { + if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL, 0)) { proc_rele(target_proc); return EPERM; } @@ -1677,7 +1670,17 @@ terminate_with_payload_internal(struct proc *cur_proc, int target_pid, uint32_t signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size, reason_string, reason_flags); - psignal_with_reason(target_proc, signum, signal_reason); + if (target_pid == cur_proc->p_pid) { + /* + * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or + * return if the thread and/or task are already terminating. Either way, the + * current thread won't return to userspace. + */ + psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason); + } else { + psignal_with_reason(target_proc, SIGKILL, signal_reason); + } + proc_rele(target_proc); return 0; @@ -2038,7 +2041,7 @@ build_signal_reason(int signum, const char *procname) reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) + sizeof(sender_proc->p_pid)); - ret = os_reason_alloc_buffer(signal_reason, reason_buffer_size_estimate); + ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate); if (ret != 0) { printf("build_signal_reason: unable to allocate signal reason buffer.\n"); return signal_reason; @@ -2656,6 +2659,12 @@ psignal_try_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_ psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason); } +void +psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason) +{ + psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason); +} + /* * If the current process has received a signal (should be caught or cause * termination, should interrupt current syscall), return the signal number. diff --git a/bsd/kern/kern_time.c b/bsd/kern/kern_time.c index af55a09ed..7f94f9b50 100644 --- a/bsd/kern/kern_time.c +++ b/bsd/kern/kern_time.c @@ -395,6 +395,7 @@ getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval) if (IS_64BIT_PROCESS(p)) { struct user64_itimerval user_itv; + bzero(&user_itv, sizeof (user_itv)); user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; user_itv.it_value.tv_sec = aitv.it_value.tv_sec; @@ -402,6 +403,7 @@ getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval) return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv))); } else { struct user32_itimerval user_itv; + bzero(&user_itv, sizeof (user_itv)); user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec; user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec; user_itv.it_value.tv_sec = aitv.it_value.tv_sec; diff --git a/bsd/kern/kpi_socket.c b/bsd/kern/kpi_socket.c index 2251c3f6d..560b79bf2 100644 --- a/bsd/kern/kpi_socket.c +++ b/bsd/kern/kpi_socket.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -108,6 +109,8 @@ sock_accept(socket_t sock, struct sockaddr *from, int fromlen, int flags, new_so = TAILQ_FIRST(&sock->so_comp); TAILQ_REMOVE(&sock->so_comp, new_so, so_list); + new_so->so_state &= ~SS_COMP; + new_so->so_head = NULL; sock->so_qlen--; /* @@ -122,7 +125,7 @@ sock_accept(socket_t sock, struct sockaddr *from, int fromlen, int flags, * again once we're done with the filter(s). */ socket_unlock(sock, 0); - if ((error = soacceptfilter(new_so)) != 0) { + if ((error = soacceptfilter(new_so, sock)) != 0) { /* Drop reference on listening socket */ sodereference(sock); return (error); @@ -136,8 +139,6 @@ sock_accept(socket_t sock, struct sockaddr *from, int fromlen, int flags, socket_lock(new_so, 1); } - new_so->so_state &= ~SS_COMP; - new_so->so_head = NULL; (void) soacceptlock(new_so, &sa, 0); socket_unlock(sock, 1); /* release the head */ @@ -961,6 +962,7 @@ sock_release(socket_t sock) soclose_locked(sock); } else { /* remove extra reference holding the socket */ + VERIFY(sock->so_usecount > 1); sock->so_usecount--; } socket_unlock(sock, 1); diff --git a/bsd/kern/process_policy.c b/bsd/kern/process_policy.c index ffbd70e98..393e1e1e1 100644 --- a/bsd/kern/process_policy.c +++ b/bsd/kern/process_policy.c @@ -243,6 +243,12 @@ handle_cpuuse(int action, user_addr_t attrp, proc_t proc, __unused uint64_t targ } #endif + // on macOS tasks can only set and clear their own CPU limits + if ((action == PROC_POLICY_ACTION_APPLY || action == PROC_POLICY_ACTION_RESTORE) + && proc != current_proc()) { + return (EPERM); + } + switch (action) { case PROC_POLICY_ACTION_GET: error = proc_get_task_ruse_cpu(proc->task, &cpuattr.ppattr_cpu_attr, diff --git a/bsd/kern/sys_generic.c b/bsd/kern/sys_generic.c index 8692d514d..bd8fd7c22 100644 --- a/bsd/kern/sys_generic.c +++ b/bsd/kern/sys_generic.c @@ -1778,8 +1778,15 @@ poll_nocancel(struct proc *p, struct poll_nocancel_args *uap, int32_t *retval) fds[i].revents = 0; } - /* Did we have any trouble registering? */ - if (rfds == nfds) + /* + * Did we have any trouble registering? + * If user space passed 0 FDs, then respect any timeout value passed. + * This is an extremely inefficient sleep. If user space passed one or + * more FDs, and we had trouble registering _all_ of them, then bail + * out. If a subset of the provided FDs failed to register, then we + * will still call the kqueue_scan function. + */ + if (nfds && (rfds == nfds)) goto done; /* scan for, and possibly wait for, the kevents to trigger */ diff --git a/bsd/kern/sys_reason.c b/bsd/kern/sys_reason.c index 3404d199b..16ea4cfe4 100644 --- a/bsd/kern/sys_reason.c +++ b/bsd/kern/sys_reason.c @@ -54,6 +54,8 @@ lck_attr_t *os_reason_lock_attr; #define OS_REASON_MAX_COUNT (maxproc + 100) static struct zone *os_reason_zone; +static int os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, + boolean_t can_block); void os_reason_init() @@ -151,6 +153,26 @@ os_reason_dealloc_buffer(os_reason_t cur_reason) return; } +/* + * Allocates and initializes a buffer of specified size for the reason. This function + * may block and should not be called from extremely performance sensitive contexts + * (i.e. jetsam). Also initializes the kcdata descriptor accordingly. If there is an + * existing buffer, we dealloc the buffer before allocating a new one and + * clear the associated kcdata descriptor. If osr_bufsize is passed as 0, + * we deallocate the existing buffer and then return. + * + * Returns: + * 0 on success + * EINVAL if the passed reason pointer is invalid or the requested size is + * larger than REASON_BUFFER_MAX_SIZE + * EIO if we fail to initialize the kcdata buffer + */ +int +os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize) +{ + return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, TRUE); +} + /* * Allocates and initializes a buffer of specified size for the reason. Also * initializes the kcdata descriptor accordingly. If there is an existing @@ -166,7 +188,14 @@ os_reason_dealloc_buffer(os_reason_t cur_reason) * EIO if we fail to initialize the kcdata buffer */ int -os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize) +os_reason_alloc_buffer_noblock(os_reason_t cur_reason, uint32_t osr_bufsize) +{ + return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, FALSE); +} + +static int +os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize, + boolean_t can_block) { if (cur_reason == OS_REASON_NULL) { return EINVAL; @@ -185,14 +214,15 @@ os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize) return 0; } - /* - * We don't want to block trying to acquire a reason buffer and hold - * up important things trying to clean up the system (i.e. jetsam). - */ - cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON); - if (cur_reason->osr_kcd_buf == NULL) { - lck_mtx_unlock(&cur_reason->osr_lock); - return ENOMEM; + if (can_block) { + cur_reason->osr_kcd_buf = kalloc_tag(osr_bufsize, VM_KERN_MEMORY_REASON); + assert(cur_reason->osr_kcd_buf != NULL); + } else { + cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON); + if (cur_reason->osr_kcd_buf == NULL) { + lck_mtx_unlock(&cur_reason->osr_lock); + return ENOMEM; + } } bzero(cur_reason->osr_kcd_buf, osr_bufsize); diff --git a/bsd/kern/ubc_subr.c b/bsd/kern/ubc_subr.c index 27a8cc511..300f91b0b 100644 --- a/bsd/kern/ubc_subr.c +++ b/bsd/kern/ubc_subr.c @@ -158,7 +158,7 @@ struct cs_hash { cs_md_final cs_final; }; -static struct cs_hash cs_hash_sha1 = { +static const struct cs_hash cs_hash_sha1 = { .cs_type = CS_HASHTYPE_SHA1, .cs_size = CS_SHA1_LEN, .cs_digest_size = SHA_DIGEST_LENGTH, @@ -167,7 +167,7 @@ static struct cs_hash cs_hash_sha1 = { .cs_final = (cs_md_final)SHA1Final, }; #if CRYPTO_SHA2 -static struct cs_hash cs_hash_sha256 = { +static const struct cs_hash cs_hash_sha256 = { .cs_type = CS_HASHTYPE_SHA256, .cs_size = SHA256_DIGEST_LENGTH, .cs_digest_size = SHA256_DIGEST_LENGTH, @@ -175,7 +175,7 @@ static struct cs_hash cs_hash_sha256 = { .cs_update = (cs_md_update)SHA256_Update, .cs_final = (cs_md_final)SHA256_Final, }; -static struct cs_hash cs_hash_sha256_truncate = { +static const struct cs_hash cs_hash_sha256_truncate = { .cs_type = CS_HASHTYPE_SHA256_TRUNCATED, .cs_size = CS_SHA256_TRUNCATED_LEN, .cs_digest_size = SHA256_DIGEST_LENGTH, @@ -183,7 +183,7 @@ static struct cs_hash cs_hash_sha256_truncate = { .cs_update = (cs_md_update)SHA256_Update, .cs_final = (cs_md_final)SHA256_Final, }; -static struct cs_hash cs_hash_sha384 = { +static const struct cs_hash cs_hash_sha384 = { .cs_type = CS_HASHTYPE_SHA384, .cs_size = SHA384_DIGEST_LENGTH, .cs_digest_size = SHA384_DIGEST_LENGTH, @@ -193,7 +193,7 @@ static struct cs_hash cs_hash_sha384 = { }; #endif -static struct cs_hash * +static struct cs_hash const * cs_find_md(uint8_t type) { if (type == CS_HASHTYPE_SHA1) { @@ -221,7 +221,7 @@ union cs_hash_union { * Choose among different hash algorithms. * Higher is better, 0 => don't use at all. */ -static uint32_t hashPriorities[] = { +static const uint32_t hashPriorities[] = { CS_HASHTYPE_SHA1, CS_HASHTYPE_SHA256_TRUNCATED, CS_HASHTYPE_SHA256, @@ -354,7 +354,7 @@ hashes( static int cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length) { - struct cs_hash *hashtype; + struct cs_hash const *hashtype; if (length < sizeof(*cd)) return EBADEXEC; @@ -3612,7 +3612,7 @@ cs_validate_hash( unsigned *tainted) { union cs_hash_union mdctx; - struct cs_hash *hashtype = NULL; + struct cs_hash const *hashtype = NULL; unsigned char actual_hash[CS_HASH_MAX_SIZE]; unsigned char expected_hash[CS_HASH_MAX_SIZE]; boolean_t found_hash; diff --git a/bsd/kern/uipc_socket.c b/bsd/kern/uipc_socket.c index 1f694df67..c552c4175 100644 --- a/bsd/kern/uipc_socket.c +++ b/bsd/kern/uipc_socket.c @@ -753,6 +753,7 @@ socreate_internal(int dom, struct socket **aso, int type, int proto, * so protocol attachment handler must be coded carefuly */ so->so_state |= SS_NOFDREF; + VERIFY(so->so_usecount > 0); so->so_usecount--; sofreelastref(so, 1); /* will deallocate the socket */ return (error); @@ -1068,10 +1069,19 @@ sofreelastref(struct socket *so, int dealloc) return; } if (head != NULL) { - socket_lock(head, 1); + /* + * Need to lock the listener when the protocol has + * per socket locks + */ + if (head->so_proto->pr_getlock != NULL) + socket_lock(head, 1); + if (so->so_state & SS_INCOMP) { + so->so_state &= ~SS_INCOMP; TAILQ_REMOVE(&head->so_incomp, so, so_list); head->so_incqlen--; + head->so_qlen--; + so->so_head = NULL; } else if (so->so_state & SS_COMP) { /* * We must not decommission a socket that's @@ -1084,15 +1094,14 @@ sofreelastref(struct socket *so, int dealloc) so->so_rcv.sb_flags &= ~(SB_SEL|SB_UPCALL); so->so_snd.sb_flags &= ~(SB_SEL|SB_UPCALL); so->so_event = sonullevent; - socket_unlock(head, 1); + if (head->so_proto->pr_getlock != NULL) + socket_unlock(head, 1); return; } else { panic("sofree: not queued"); } - head->so_qlen--; - so->so_state &= ~SS_INCOMP; - so->so_head = NULL; - socket_unlock(head, 1); + if (head->so_proto->pr_getlock != NULL) + socket_unlock(head, 1); } sowflush(so); sorflush(so); @@ -1177,8 +1186,7 @@ soclose_locked(struct socket *so) } if ((so->so_options & SO_ACCEPTCONN)) { - struct socket *sp, *sonext; - int socklock = 0; + struct socket *sp; /* * We do not want new connection to be added @@ -1186,9 +1194,8 @@ soclose_locked(struct socket *so) */ so->so_options &= ~SO_ACCEPTCONN; - for (sp = TAILQ_FIRST(&so->so_incomp); - sp != NULL; sp = sonext) { - sonext = TAILQ_NEXT(sp, so_list); + while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) { + int socklock = 0; /* * Radar 5350314 @@ -1204,7 +1211,7 @@ soclose_locked(struct socket *so) /* * Lock ordering for consistency with the * rest of the stack, we lock the socket - * first and then grabb the head. + * first and then grab the head. */ socket_unlock(so, 0); socket_lock(sp, 1); @@ -1212,43 +1219,55 @@ soclose_locked(struct socket *so) socklock = 1; } - TAILQ_REMOVE(&so->so_incomp, sp, so_list); - so->so_incqlen--; - + /* + * Radar 27945981 + * The extra reference for the list insure the + * validity of the socket pointer when we perform the + * unlock of the head above + */ if (sp->so_state & SS_INCOMP) { sp->so_state &= ~SS_INCOMP; sp->so_head = NULL; + TAILQ_REMOVE(&so->so_incomp, sp, so_list); + so->so_incqlen--; + so->so_qlen--; (void) soabort(sp); } - if (socklock) + if (socklock != 0) socket_unlock(sp, 1); } while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) { + int socklock = 0; + /* Dequeue from so_comp since sofree() won't do it */ - TAILQ_REMOVE(&so->so_comp, sp, so_list); - so->so_qlen--; - if (so->so_proto->pr_getlock != NULL) { + /* + * Lock ordering for consistency with the + * rest of the stack, we lock the socket + * first and then grab the head. + */ socket_unlock(so, 0); socket_lock(sp, 1); + socket_lock(so, 0); + socklock = 1; } if (sp->so_state & SS_COMP) { sp->so_state &= ~SS_COMP; sp->so_head = NULL; + TAILQ_REMOVE(&so->so_comp, sp, so_list); + so->so_qlen--; (void) soabort(sp); } - if (so->so_proto->pr_getlock != NULL) { + if (socklock) socket_unlock(sp, 1); - socket_lock(so, 0); } } - } if (so->so_pcb == NULL) { /* 3915887: mark the socket as ready for dealloc */ so->so_flags |= SOF_PCBCLEARING; @@ -1317,6 +1336,7 @@ discard: atomic_add_32(&so->so_proto->pr_domain->dom_refs, -1); evsofree(so); + VERIFY(so->so_usecount > 0); so->so_usecount--; sofree(so); return (error); @@ -1405,11 +1425,10 @@ soaccept(struct socket *so, struct sockaddr **nam) } int -soacceptfilter(struct socket *so) +soacceptfilter(struct socket *so, struct socket *head) { struct sockaddr *local = NULL, *remote = NULL; int error = 0; - struct socket *head = so->so_head; /* * Hold the lock even if this socket has not been made visible @@ -1419,8 +1438,7 @@ soacceptfilter(struct socket *so) socket_lock(so, 1); if (sogetaddr_locked(so, &remote, 1) != 0 || sogetaddr_locked(so, &local, 0) != 0) { - so->so_state &= ~(SS_NOFDREF | SS_COMP); - so->so_head = NULL; + so->so_state &= ~SS_NOFDREF; socket_unlock(so, 1); soclose(so); /* Out of resources; try it again next time */ @@ -1448,8 +1466,7 @@ soacceptfilter(struct socket *so) * the following is done while holding the lock since * the socket has been exposed to the filter(s) earlier. */ - so->so_state &= ~(SS_NOFDREF | SS_COMP); - so->so_head = NULL; + so->so_state &= ~SS_COMP; socket_unlock(so, 1); soclose(so); /* Propagate socket filter's error code to the caller */ diff --git a/bsd/kern/uipc_socket2.c b/bsd/kern/uipc_socket2.c index 264246cb2..0817894df 100644 --- a/bsd/kern/uipc_socket2.c +++ b/bsd/kern/uipc_socket2.c @@ -193,7 +193,6 @@ soisconnecting(struct socket *so) void soisconnected(struct socket *so) { - struct socket *head = so->so_head; so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING); so->so_state |= SS_ISCONNECTED; @@ -202,23 +201,38 @@ soisconnected(struct socket *so) sflt_notify(so, sock_evt_connected, NULL); - if (head && (so->so_state & SS_INCOMP)) { - so->so_state &= ~SS_INCOMP; - so->so_state |= SS_COMP; + if (so->so_head != NULL && (so->so_state & SS_INCOMP)) { + struct socket *head = so->so_head; + int locked = 0; + + /* + * Enforce lock order when the protocol has per socket locks + */ if (head->so_proto->pr_getlock != NULL) { socket_unlock(so, 0); socket_lock(head, 1); + socket_lock(so, 0); + locked = 1; } - postevent(head, 0, EV_RCONN); + if (so->so_head == head && (so->so_state & SS_INCOMP)) { + so->so_state &= ~SS_INCOMP; + so->so_state |= SS_COMP; TAILQ_REMOVE(&head->so_incomp, so, so_list); + TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); head->so_incqlen--; - TAILQ_INSERT_TAIL(&head->so_comp, so, so_list); + + if (locked != 0) + socket_unlock(so, 0); + + postevent(head, 0, EV_RCONN); sorwakeup(head); wakeup_one((caddr_t)&head->so_timeo); - if (head->so_proto->pr_getlock != NULL) { - socket_unlock(head, 1); + + if (locked != 0) socket_lock(so, 0); } + if (locked != 0) + socket_unlock(head, 1); } else { postevent(so, 0, EV_WCONN); wakeup((caddr_t)&so->so_timeo); @@ -235,7 +249,6 @@ socanwrite(struct socket *so) return ((so->so_state & SS_ISCONNECTED) || !(so->so_proto->pr_flags & PR_CONNREQUIRED) || (so->so_flags1 & SOF1_PRECONNECT_DATA)); - } void @@ -679,7 +692,6 @@ sowakeup(struct socket *so, struct sockbuf *sb) int soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc) { - if (sbreserve(&so->so_snd, sndcc) == 0) goto bad; else @@ -2644,7 +2656,7 @@ sbunlock(struct sockbuf *sb, boolean_t keeplocked) lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); - VERIFY(so->so_usecount != 0); + VERIFY(so->so_usecount > 0); so->so_usecount--; so->unlock_lr[so->next_unlock_lr] = lr_saved; so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; diff --git a/bsd/kern/uipc_syscalls.c b/bsd/kern/uipc_syscalls.c index beee6c37f..603f8b34d 100644 --- a/bsd/kern/uipc_syscalls.c +++ b/bsd/kern/uipc_syscalls.c @@ -491,7 +491,6 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, goto out; } - /* * At this point we know that there is at least one connection * ready to be accepted. Remove it from the queue prior to @@ -502,6 +501,8 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); so = TAILQ_FIRST(&head->so_comp); TAILQ_REMOVE(&head->so_comp, so, so_list); + so->so_head = NULL; + so->so_state &= ~SS_COMP; head->so_qlen--; /* unlock head to avoid deadlock with select, keep a ref on head */ socket_unlock(head, 0); @@ -515,8 +516,7 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, */ if ((error = mac_socket_check_accepted(kauth_cred_get(), so)) != 0) { socket_lock(so, 1); - so->so_state &= ~(SS_NOFDREF | SS_COMP); - so->so_head = NULL; + so->so_state &= ~SS_NOFDREF; socket_unlock(so, 1); soclose(so); /* Drop reference on listening socket */ @@ -529,7 +529,7 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, * Pass the pre-accepted socket to any interested socket filter(s). * Upon failure, the socket would have been closed by the callee. */ - if (so->so_filt != NULL && (error = soacceptfilter(so)) != 0) { + if (so->so_filt != NULL && (error = soacceptfilter(so, head)) != 0) { /* Drop reference on listening socket */ sodereference(head); /* Propagate socket filter's error code to the caller */ @@ -547,8 +547,7 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, * just causes the client to spin. Drop the socket. */ socket_lock(so, 1); - so->so_state &= ~(SS_NOFDREF | SS_COMP); - so->so_head = NULL; + so->so_state &= ~SS_NOFDREF; socket_unlock(so, 1); soclose(so); sodereference(head); @@ -563,9 +562,6 @@ accept_nocancel(struct proc *p, struct accept_nocancel_args *uap, if (dosocklock) socket_lock(so, 1); - so->so_state &= ~SS_COMP; - so->so_head = NULL; - /* Sync socket non-blocking/async state with file flags */ if (fp->f_flag & FNONBLOCK) { so->so_state |= SS_NBIO; diff --git a/bsd/kern/uipc_usrreq.c b/bsd/kern/uipc_usrreq.c index 8ae71f6e7..995f96a13 100644 --- a/bsd/kern/uipc_usrreq.c +++ b/bsd/kern/uipc_usrreq.c @@ -1165,6 +1165,7 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) socket_lock(so, 0); } else { /* Release the reference held for the listen socket */ + VERIFY(so2->so_usecount > 0); so2->so_usecount--; } goto out; @@ -1207,6 +1208,7 @@ unp_connect(struct socket *so, struct sockaddr *nam, __unused proc_t p) /* Release the reference held for * listen socket. */ + VERIFY(so2->so_usecount > 0); so2->so_usecount--; } goto out; @@ -1298,6 +1300,7 @@ decref_out: * This is possible only for SOCK_DGRAM sockets. We refuse * connecting to the same socket for SOCK_STREAM sockets. */ + VERIFY(so2->so_usecount > 0); so2->so_usecount--; } } @@ -1352,6 +1355,7 @@ unp_connect2(struct socket *so, struct socket *so2) socket_unlock(so2, 0); soisconnected(so); unp_get_locks_in_order(so, so2); + VERIFY(so2->so_usecount > 0); so2->so_usecount--; } else { soisconnected(so); @@ -1386,6 +1390,7 @@ unp_connect2(struct socket *so, struct socket *so2) unp_get_locks_in_order(so, so2); /* Decrement the extra reference left before */ + VERIFY(so2->so_usecount > 0); so2->so_usecount--; break; @@ -1478,6 +1483,7 @@ try_again: } unp->unp_conn = NULL; + VERIFY(so2->so_usecount > 0); so2->so_usecount--; if (unp->unp_flags & UNP_TRACE_MDNS) @@ -1494,6 +1500,7 @@ try_again: case SOCK_STREAM: unp2->unp_conn = NULL; + VERIFY(so2->so_usecount > 0); so->so_usecount--; /* Set the socket state correctly but do a wakeup later when @@ -2411,9 +2418,10 @@ unp_lock(struct socket *so, int refcount, void * lr) panic("unp_lock: so=%p so_pcb=%p lr=%p ref=0x%x\n", so, so->so_pcb, lr_saved, so->so_usecount); - if (refcount) - so->so_usecount++; - + if (refcount) { + VERIFY(so->so_usecount > 0); + so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; return (0); diff --git a/bsd/miscfs/devfs/devfs_vnops.c b/bsd/miscfs/devfs/devfs_vnops.c index ebf0f0c14..fbd3246f7 100644 --- a/bsd/miscfs/devfs/devfs_vnops.c +++ b/bsd/miscfs/devfs/devfs_vnops.c @@ -636,7 +636,8 @@ devfs_close(struct vnop_close_args *ap) if (vnode_isinuse(vp, 1)) { DEVFS_LOCK(); dnp = VTODN(vp); - dn_times_now(dnp, 0); + if (dnp) + dn_times_now(dnp, 0); DEVFS_UNLOCK(); } return (0); @@ -656,7 +657,8 @@ devfsspec_close(struct vnop_close_args *ap) if (vnode_isinuse(vp, 0)) { DEVFS_LOCK(); dnp = VTODN(vp); - dn_times_now(dnp, 0); + if (dnp) + dn_times_now(dnp, 0); DEVFS_UNLOCK(); } diff --git a/bsd/miscfs/specfs/spec_vnops.c b/bsd/miscfs/specfs/spec_vnops.c index adddc10d8..f698e5d68 100644 --- a/bsd/miscfs/specfs/spec_vnops.c +++ b/bsd/miscfs/specfs/spec_vnops.c @@ -90,6 +90,7 @@ #include #include #include +#include #include @@ -237,6 +238,7 @@ int lowpri_throttle_enabled = 1; static void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level); static int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap); static int throttle_get_thread_throttle_level(uthread_t ut); +static int throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier); /* * Trivial lookup routine that always fails. @@ -941,7 +943,7 @@ throttle_timer_start(struct _throttle_io_info_t *info, boolean_t update_io_count if (!TAILQ_EMPTY(&info->throttle_uthlist[level])) { - if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[level]) { + if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[throttle_level]) { /* * we had an I/O occur at a higher priority tier within * this tier's throttle window @@ -1556,17 +1558,40 @@ throttle_get_passive_io_policy(uthread_t *ut) static int throttle_get_thread_throttle_level(uthread_t ut) { - int thread_throttle_level; + uthread_t *ut_p = (ut == NULL) ? &ut : NULL; + int io_tier = throttle_get_io_policy(ut_p); - if (ut == NULL) - ut = get_bsdthread_info(current_thread()); + return throttle_get_thread_throttle_level_internal(ut, io_tier); +} + +/* + * Return a throttle level given an existing I/O tier (such as returned by throttle_get_io_policy) + */ +static int +throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) { + int thread_throttle_level = io_tier; + int user_idle_level; - thread_throttle_level = proc_get_effective_thread_policy(ut->uu_thread, TASK_POLICY_IO); + assert(ut != NULL); /* Bootcache misses should always be throttled */ if (ut->uu_throttle_bc == TRUE) thread_throttle_level = THROTTLE_LEVEL_TIER3; + /* + * Issue tier3 I/O as tier2 when the user is idle + * to allow maintenance tasks to make more progress. + * + * Assume any positive idle level is enough... for now it's + * only ever 0 or 128 but this is not defined anywhere. + */ + if (thread_throttle_level >= THROTTLE_LEVEL_TIER3) { + user_idle_level = timer_get_user_idle_level(); + if (user_idle_level > 0) { + thread_throttle_level--; + } + } + return (thread_throttle_level); } @@ -1899,6 +1924,7 @@ void throttle_info_end_io(buf_t bp) { mount_t mp; struct bufattr *bap; struct _throttle_io_info_t *info; + int io_tier; bap = &bp->b_attr; if (!ISSET(bap->ba_flags, BA_STRATEGY_TRACKED_IO)) { @@ -1913,7 +1939,12 @@ void throttle_info_end_io(buf_t bp) { info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1]; } - throttle_info_end_io_internal(info, GET_BUFATTR_IO_TIER(bap)); + io_tier = GET_BUFATTR_IO_TIER(bap); + if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) { + io_tier--; + } + + throttle_info_end_io_internal(info, io_tier); } /* @@ -1947,6 +1978,9 @@ int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut if (bap && inflight && !ut->uu_throttle_bc) { thread_throttle_level = GET_BUFATTR_IO_TIER(bap); + if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) { + thread_throttle_level--; + } } else { thread_throttle_level = throttle_get_thread_throttle_level(ut); } @@ -2139,6 +2173,7 @@ spec_strategy(struct vnop_strategy_args *ap) struct _throttle_io_info_t *throttle_info; boolean_t isssd = FALSE; boolean_t inflight = FALSE; + boolean_t upgrade = FALSE; int code = 0; proc_t curproc = current_proc(); @@ -2151,6 +2186,21 @@ spec_strategy(struct vnop_strategy_args *ap) io_tier = throttle_get_io_policy(&ut); passive = throttle_get_passive_io_policy(&ut); + /* + * Mark if the I/O was upgraded by throttle_get_thread_throttle_level + * while preserving the original issued tier (throttle_get_io_policy + * does not return upgraded tiers) + */ + if (mp && io_tier > throttle_get_thread_throttle_level_internal(ut, io_tier)) { +#if CONFIG_IOSCHED + if (!(mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) { + upgrade = TRUE; + } +#else /* CONFIG_IOSCHED */ + upgrade = TRUE; +#endif /* CONFIG_IOSCHED */ + } + if (bp->b_flags & B_META) bap->ba_flags |= BA_META; @@ -2212,6 +2262,11 @@ spec_strategy(struct vnop_strategy_args *ap) if (bap->ba_flags & BA_NOCACHE) code |= DKIO_NOCACHE; + if (upgrade) { + code |= DKIO_TIER_UPGRADE; + SET(bap->ba_flags, BA_IO_TIER_UPGRADE); + } + if (kdebug_enable) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), bdev, (int)buf_blkno(bp), buf_count(bp), 0); diff --git a/bsd/net/content_filter.c b/bsd/net/content_filter.c index c01c3d078..a36d7137c 100644 --- a/bsd/net/content_filter.c +++ b/bsd/net/content_filter.c @@ -1840,6 +1840,7 @@ cfil_info_free(struct socket *so, struct cfil_info *cfil_info) if (so->so_flags & SOF_CONTENT_FILTER) { so->so_flags &= ~SOF_CONTENT_FILTER; + VERIFY(so->so_usecount > 0); so->so_usecount--; } if (cfil_info == NULL) diff --git a/bsd/net/ntstat.c b/bsd/net/ntstat.c index 14f523535..1135bc55e 100644 --- a/bsd/net/ntstat.c +++ b/bsd/net/ntstat.c @@ -65,8 +65,11 @@ #include __private_extern__ int nstat_collect = 1; + +#if (DEBUG || DEVELOPMENT) SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_collect, 0, "Collect detailed statistics"); +#endif /* (DEBUG || DEVELOPMENT) */ static int nstat_privcheck = 0; SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED, @@ -1272,9 +1275,13 @@ nstat_pcb_detach(struct inpcb *inp) for (prevsrc = NULL, src = state->ncs_srcs; src; prevsrc = src, src = src->next) { - tucookie = (struct nstat_tucookie *)src->cookie; - if (tucookie->inp == inp) - break; + nstat_provider_id_t provider_id = src->provider->nstat_provider_id; + if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) + { + tucookie = (struct nstat_tucookie *)src->cookie; + if (tucookie->inp == inp) + break; + } } if (src) @@ -4220,6 +4227,7 @@ nstat_control_source_add( src->provider = provider; src->cookie = cookie; src->filter = src_filter; + src->seq = 0; if (msg) { @@ -4591,16 +4599,6 @@ nstat_control_begin_query( state->ncs_seq++; } } - else if (state->ncs_context != 0) - { - /* - * A continuation of a paced-query was in progress. Send that - * context an error and reset the state. If the same context - * has changed its mind, just send the full query results. - */ - if (state->ncs_context != hdrp->context) - nstat_send_error(state, state->ncs_context, EAGAIN); - } return partial; } diff --git a/bsd/net/route.c b/bsd/net/route.c index 4e5dd5af7..94750b9e8 100644 --- a/bsd/net/route.c +++ b/bsd/net/route.c @@ -770,6 +770,17 @@ rtm_scrub(int type, int idx, struct sockaddr *hint, struct sockaddr *sa, } break; } + case RTAX_GATEWAY: { + /* + * Break if the gateway is not AF_LINK type (indirect routes) + * + * Else, if is, check if it is resolved. If not yet resolved + * simply break else scrub the link layer address. + */ + if ((sa->sa_family != AF_LINK) || (SDL(sa)->sdl_alen == 0)) + break; + /* fallthrough */ + } case RTAX_IFP: { if (sa->sa_family == AF_LINK && credp) { struct sockaddr_dl *sdl = SDL(buf); diff --git a/bsd/net/rtsock.c b/bsd/net/rtsock.c index a7a9f16db..3af0a71d2 100644 --- a/bsd/net/rtsock.c +++ b/bsd/net/rtsock.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2015 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -1150,7 +1150,7 @@ again: sa = rtm_scrub(type, i, hint, sa, &ssbuf, sizeof (ssbuf), NULL); break; - + case RTAX_GATEWAY: case RTAX_IFP: sa = rtm_scrub(type, i, NULL, sa, &ssbuf, sizeof (ssbuf), credp); diff --git a/bsd/netinet/mptcp_opt.c b/bsd/netinet/mptcp_opt.c index f8611236b..d2aec1750 100644 --- a/bsd/netinet/mptcp_opt.c +++ b/bsd/netinet/mptcp_opt.c @@ -426,8 +426,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, boolean_t send_64bit_dsn = FALSE; boolean_t send_64bit_ack = FALSE; u_int32_t old_mpt_flags = tp->t_mpflags & - (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL | - TMPF_MPCAP_RETRANSMIT); + (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL); if ((mptcp_enable == 0) || (mp_tp == NULL) || @@ -474,8 +473,7 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, if (((tp->t_mpflags & TMPF_PREESTABLISHED) && (!(tp->t_mpflags & TMPF_SENT_KEYS)) && - (!(tp->t_mpflags & TMPF_JOINED_FLOW))) || - (tp->t_mpflags & TMPF_MPCAP_RETRANSMIT)) { + (!(tp->t_mpflags & TMPF_JOINED_FLOW)))) { struct mptcp_mpcapable_opt_rsp1 mptcp_opt; if ((MAX_TCPOPTLEN - optlen) < sizeof (struct mptcp_mpcapable_opt_rsp1)) @@ -499,7 +497,6 @@ mptcp_setup_opts(struct tcpcb *tp, int32_t off, u_char *opt, tp->t_mpflags |= TMPF_SENT_KEYS | TMPF_MPTCP_TRUE; so->so_flags |= SOF_MPTCP_TRUE; tp->t_mpflags &= ~TMPF_PREESTABLISHED; - tp->t_mpflags &= ~TMPF_MPCAP_RETRANSMIT; if (!tp->t_mpuna) { tp->t_mpuna = tp->snd_una; @@ -952,8 +949,7 @@ ret_optlen: if (TRUE == *p_mptcp_acknow ) { VERIFY(old_mpt_flags != 0); u_int32_t new_mpt_flags = tp->t_mpflags & - (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL | - TMPF_MPCAP_RETRANSMIT); + (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL); /* * If none of the above mpflags were acted on by @@ -971,8 +967,7 @@ ret_optlen: */ if ((old_mpt_flags == new_mpt_flags) || (new_mpt_flags == 0)) { tp->t_mpflags &= ~(TMPF_SND_MPPRIO - | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL | - TMPF_MPCAP_RETRANSMIT); + | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL); *p_mptcp_acknow = FALSE; mptcplog((LOG_DEBUG, "MPTCP Sender: %s: no action \n", __func__), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG); @@ -1063,15 +1058,9 @@ mptcp_do_mpcapable_opt(struct tcpcb *tp, u_char *cp, struct tcphdr *th, return; } - /* Handle old duplicate SYN/ACK retransmission */ - if (SEQ_GT(tp->rcv_nxt, (tp->irs + 1))) - return; - /* handle SYN/ACK retransmission by acknowledging with ACK */ - if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) { - tp->t_mpflags |= TMPF_MPCAP_RETRANSMIT; + if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) return; - } /* A SYN/ACK contains peer's key and flags */ if (optlen != sizeof (struct mptcp_mpcapable_opt_rsp)) { diff --git a/bsd/netinet/mptcp_subr.c b/bsd/netinet/mptcp_subr.c index a2ecbf4c0..2e737e0cb 100644 --- a/bsd/netinet/mptcp_subr.c +++ b/bsd/netinet/mptcp_subr.c @@ -1561,7 +1561,7 @@ mptcp_subflow_del(struct mptses *mpte, struct mptsub *mpts, boolean_t close) if (close) (void) mptcp_subflow_soclose(mpts, so); - VERIFY(mp_so->so_usecount != 0); + VERIFY(mp_so->so_usecount > 0); mp_so->so_usecount--; /* for subflow socket */ mpts->mpts_mpte = NULL; mpts->mpts_socket = NULL; @@ -3943,7 +3943,7 @@ mptcp_thread_destroy(struct mptses *mpte) mp_so = mpte->mpte_mppcb->mpp_socket; VERIFY(mp_so != NULL); - VERIFY(mp_so->so_usecount != 0); + VERIFY(mp_so->so_usecount > 0); mp_so->so_usecount--; /* for thread */ mpte->mpte_mppcb->mpp_flags |= MPP_DEFUNCT; MPTE_UNLOCK(mpte); diff --git a/bsd/netinet/tcp_input.c b/bsd/netinet/tcp_input.c index 72f5f611c..2a48e2046 100644 --- a/bsd/netinet/tcp_input.c +++ b/bsd/netinet/tcp_input.c @@ -5399,8 +5399,8 @@ tcp_compute_rtt(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th) } /* - * Collect new round-trip time estimate - * and update averages and current timeout. + * Collect new round-trip time estimate and update averages and + * current timeout. */ static void tcp_xmit_timer(struct tcpcb *tp, int rtt, @@ -5408,6 +5408,17 @@ tcp_xmit_timer(struct tcpcb *tp, int rtt, { int delta; + /* + * On AWDL interface, the initial RTT measurement on SYN + * can be wrong due to peer caching. Avoid the first RTT + * measurement as it might skew up the RTO. + * + */ + if (tp->t_inpcb->inp_last_outifp != NULL && + (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) && + th_ack == tp->iss + 1) + return; + if (tp->t_flagsext & TF_RECOMPUTE_RTT) { if (SEQ_GT(th_ack, tp->snd_una) && SEQ_LEQ(th_ack, tp->snd_max) && @@ -5903,7 +5914,6 @@ tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) else tp->snd_cwnd = 0; tp->snd_cwnd += tp->t_maxseg; - } /* @@ -5970,7 +5980,6 @@ tcp_dropdropablreq(struct socket *head) cur_cnt = 0; } - qlen = head->so_incqlen; if (rnd == 0) rnd = RandomULong(); @@ -6022,7 +6031,6 @@ tcp_dropdropablreq(struct socket *head) } } so = sonext; - } if (so == NULL) { return (0); @@ -6043,12 +6051,15 @@ found_victim: } TAILQ_REMOVE(&head->so_incomp, so, so_list); + head->so_incqlen--; + head->so_qlen--; + so->so_state &= ~SS_INCOMP; + so->so_flags |= SOF_OVERFLOW; + so->so_head = NULL; tcp_unlock(head, 0, 0); lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED); tp = sototcpcb(so); - so->so_flags |= SOF_OVERFLOW; - so->so_head = NULL; tcp_close(tp); if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) { @@ -6059,6 +6070,7 @@ found_victim: * be garbage collected later. * Release the reference held for so_incomp queue */ + VERIFY(so->so_usecount > 0); so->so_usecount--; tcp_unlock(so, 1, 0); } else { @@ -6073,6 +6085,7 @@ found_victim: tcp_lock(so, 0, 0); /* Release the reference held for so_incomp queue */ + VERIFY(so->so_usecount > 0); so->so_usecount--; if (so->so_usecount != 1 || @@ -6086,8 +6099,8 @@ found_victim: */ tcp_unlock(so, 1, 0); } else { - /* Drop the reference held for this function */ + VERIFY(so->so_usecount > 0); so->so_usecount--; in_pcbdispose(inp); @@ -6097,8 +6110,6 @@ found_victim: tcpstat.tcps_drops++; tcp_lock(head, 0, 0); - head->so_incqlen--; - head->so_qlen--; return(1); } diff --git a/bsd/netinet/tcp_output.c b/bsd/netinet/tcp_output.c index 5c29ff3a6..75bd67029 100644 --- a/bsd/netinet/tcp_output.c +++ b/bsd/netinet/tcp_output.c @@ -357,6 +357,10 @@ static int32_t tcp_tfo_check(struct tcpcb *tp, int32_t len) /* No cookie, so we request one */ return (0); + /* There is not enough space for the cookie, so we cannot do TFO */ + if (MAX_TCPOPTLEN - optlen < cookie_len) + goto fallback; + /* Do not send SYN+data if there is more in the queue than MSS */ if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN)) goto fallback; @@ -1156,8 +1160,7 @@ after_sack_rexmit: if ((tp->t_state >= TCPS_ESTABLISHED) && ((tp->t_mpflags & TMPF_SND_MPPRIO) || (tp->t_mpflags & TMPF_SND_REM_ADDR) || - (tp->t_mpflags & TMPF_SND_MPFAIL) || - (tp->t_mpflags & TMPF_MPCAP_RETRANSMIT))) { + (tp->t_mpflags & TMPF_SND_MPFAIL))) { if (len > 0) { len = 0; } @@ -1244,16 +1247,19 @@ after_sack_rexmit: #if TRAFFIC_MGT if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) { - if (tcp_recv_throttle(tp)) { - uint32_t min_iaj_win = - tcp_min_iaj_win * tp->t_maxseg; + if (recwin > 0 && tcp_recv_throttle(tp)) { + uint32_t min_iaj_win = tcp_min_iaj_win * tp->t_maxseg; if (tp->iaj_rwintop == 0 || - SEQ_LT(tp->iaj_rwintop, tp->rcv_adv)) + SEQ_LT(tp->iaj_rwintop, tp->rcv_adv)) tp->iaj_rwintop = tp->rcv_adv; if (SEQ_LT(tp->iaj_rwintop, - tp->rcv_nxt + min_iaj_win)) - tp->iaj_rwintop = tp->rcv_nxt + min_iaj_win; - recwin = min(tp->iaj_rwintop - tp->rcv_nxt, recwin); + tp->rcv_nxt + min_iaj_win)) + tp->iaj_rwintop = tp->rcv_nxt + + min_iaj_win; + recwin = imin((int32_t)(tp->iaj_rwintop - + tp->rcv_nxt), recwin); + if (recwin < 0) + recwin = 0; } } #endif /* TRAFFIC_MGT */ @@ -2625,7 +2631,8 @@ out: if (error == ENOBUFS) { if (!tp->t_timer[TCPT_REXMT] && - !tp->t_timer[TCPT_PERSIST]) + !tp->t_timer[TCPT_PERSIST] && + SEQ_GT(tp->snd_max, tp->snd_una)) tp->t_timer[TCPT_REXMT] = OFFSET_FROM_START(tp, tp->t_rxtcur); tp->snd_cwnd = tp->t_maxseg; diff --git a/bsd/netinet/tcp_subr.c b/bsd/netinet/tcp_subr.c index d7289624e..b8c06d6b0 100644 --- a/bsd/netinet/tcp_subr.c +++ b/bsd/netinet/tcp_subr.c @@ -2991,10 +2991,15 @@ tcp_sbspace(struct tcpcb *tp) { struct socket *so = tp->t_inpcb->inp_socket; struct sockbuf *sb = &so->so_rcv; - u_int32_t rcvbuf = sb->sb_hiwat; + u_int32_t rcvbuf; int32_t space; int32_t pending = 0; + tcp_sbrcv_grow_rwin(tp, sb); + + /* hiwat might have changed */ + rcvbuf = sb->sb_hiwat; + /* * If message delivery is enabled, do not count * unordered bytes in receive buffer towards hiwat mark. @@ -3005,8 +3010,6 @@ tcp_sbspace(struct tcpcb *tp) if (so->so_flags & SOF_ENABLE_MSGS) rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes; - tcp_sbrcv_grow_rwin(tp, sb); - space = ((int32_t) imin((rcvbuf - sb->sb_cc), (sb->sb_mbmax - sb->sb_mbcnt))); if (space < 0) diff --git a/bsd/netinet/tcp_timer.c b/bsd/netinet/tcp_timer.c index e50bab301..e6ed974d2 100644 --- a/bsd/netinet/tcp_timer.c +++ b/bsd/netinet/tcp_timer.c @@ -539,6 +539,7 @@ tcp_garbage_collect(struct inpcb *inp, int istimewait) #endif /* INET6 */ in_pcbdetach(inp); } + VERIFY(so->so_usecount > 0); so->so_usecount--; if (inp->inp_wantcnt == WNT_STOPUSING) active = TRUE; diff --git a/bsd/netinet/tcp_usrreq.c b/bsd/netinet/tcp_usrreq.c index 67306d65d..f50103ea0 100644 --- a/bsd/netinet/tcp_usrreq.c +++ b/bsd/netinet/tcp_usrreq.c @@ -1174,6 +1174,7 @@ tcp_usr_abort(struct socket *so) if (tp == NULL) goto out; tp = tcp_drop(tp, ECONNABORTED); + VERIFY(so->so_usecount > 0); so->so_usecount--; COMMON_END(PRU_ABORT); } diff --git a/bsd/netinet/tcp_var.h b/bsd/netinet/tcp_var.h index afbcb41d5..42118d413 100644 --- a/bsd/netinet/tcp_var.h +++ b/bsd/netinet/tcp_var.h @@ -539,7 +539,6 @@ struct tcpcb { #define TMPF_SND_MPFAIL 0x00200000 /* Received mapping csum failure */ #define TMPF_FASTJOIN_SEND 0x00400000 /* Fast join early data send */ #define TMPF_FASTJOINBY2_SEND 0x00800000 /* Fast join send after 3 WHS */ -#define TMPF_MPCAP_RETRANSMIT 0x01000000 /* Retransmission of 3rd ACK */ #define TMPF_TFO_REQUEST 0x02000000 /* TFO Requested */ tcp_seq t_mpuna; /* unacknowledged sequence */ diff --git a/bsd/netinet/udp_usrreq.c b/bsd/netinet/udp_usrreq.c index 765cd6826..b1caebcdf 100644 --- a/bsd/netinet/udp_usrreq.c +++ b/bsd/netinet/udp_usrreq.c @@ -2249,9 +2249,10 @@ udp_unlock(struct socket *so, int refcount, void *debug) else lr_saved = debug; - if (refcount) + if (refcount) { + VERIFY(so->so_usecount > 0); so->so_usecount--; - + } if (so->so_pcb == NULL) { panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__, so, lr_saved, solockhistory_nr(so)); diff --git a/bsd/netinet6/esp_input.c b/bsd/netinet6/esp_input.c index 23e5aa560..8056438bb 100644 --- a/bsd/netinet6/esp_input.c +++ b/bsd/netinet6/esp_input.c @@ -428,19 +428,12 @@ noreplaycheck: if (algo->finalizedecrypt) { - unsigned char tag[algo->icvlen]; - if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) { + if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) { ipseclog((LOG_ERR, "packet decryption ICV failure\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); goto bad; } - if (cc_cmp_safe(algo->icvlen, saved_icv, tag)) { - ipseclog((LOG_ERR, "packet decryption ICV mismatch\n")); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); - goto bad; - } } /* @@ -1021,19 +1014,12 @@ noreplaycheck: if (algo->finalizedecrypt) { - unsigned char tag[algo->icvlen]; - if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) { + if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) { ipseclog((LOG_ERR, "packet decryption ICV failure\n")); IPSEC_STAT_INCREMENT(ipsecstat.in_inval); KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); goto bad; } - if (cc_cmp_safe(algo->icvlen, saved_icv, tag)) { - ipseclog((LOG_ERR, "packet decryption ICV mismatch\n")); - IPSEC_STAT_INCREMENT(ipsecstat.in_inval); - KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0); - goto bad; - } } /* diff --git a/bsd/netinet6/esp_rijndael.c b/bsd/netinet6/esp_rijndael.c index cdd86bff3..56b560263 100644 --- a/bsd/netinet6/esp_rijndael.c +++ b/bsd/netinet6/esp_rijndael.c @@ -558,6 +558,8 @@ esp_gcm_schedule( __unused const struct esp_algorithm *algo, { lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED); aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); + u_int ivlen = sav->ivlen; + unsigned char nonce[ESP_GCM_SALT_LEN+ivlen]; int rc; ctx->decrypt = &ctx->ctxt[0]; @@ -568,10 +570,20 @@ esp_gcm_schedule( __unused const struct esp_algorithm *algo, return (rc); } - rc = aes_encrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->encrypt); + bzero(nonce, ESP_GCM_SALT_LEN + ivlen); + memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); + memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen); + + rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, nonce, ctx->encrypt); if (rc) { return (rc); } + + rc = aes_encrypt_reset_gcm(ctx->encrypt); + if (rc) { + return (rc); + } + return (rc); } @@ -611,7 +623,7 @@ esp_gcm_encrypt_aes( int scutoff; int i, len; unsigned char nonce[ESP_GCM_SALT_LEN+ivlen]; - + if (ivlen != ESP_GCM_IVLEN) { ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen)); m_freem(m); @@ -627,27 +639,40 @@ esp_gcm_encrypt_aes( bodyoff = off + sizeof(struct newesp) + ivlen; } - m_copyback(m, ivoff, ivlen, sav->iv); + bzero(nonce, ESP_GCM_SALT_LEN+ivlen); + /* generate new iv */ + ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); - if (m->m_pkthdr.len < bodyoff) { - ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__, - m->m_pkthdr.len, (u_int32_t)bodyoff)); + if (aes_encrypt_reset_gcm(ctx->encrypt)) { + ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__)); m_freem(m); return EINVAL; } - /* Set IV */ - memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN); - memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen); + if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) { + ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__)); + m_freem(m); + return EINVAL; + } - ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN); - if (aes_encrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->encrypt)) { - ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__)); + /* + * The IV is now generated within corecrypto and + * is provided to ESP using aes_encrypt_inc_iv_gcm(). + * This makes the sav->iv redundant and is no longer + * used in GCM operations. But we still copy the IV + * back to sav->iv to ensure that any future code reading + * this value will get the latest IV. + */ + memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen); + m_copyback(m, ivoff, ivlen, sav->iv); + bzero(nonce, ESP_GCM_SALT_LEN+ivlen); + + if (m->m_pkthdr.len < bodyoff) { + ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__, + m->m_pkthdr.len, (u_int32_t)bodyoff)); m_freem(m); - bzero(nonce, sizeof(nonce)); return EINVAL; } - bzero(nonce, sizeof(nonce)); /* Set Additional Authentication Data */ if (!(sav->flags & SADB_X_EXT_OLD)) { @@ -784,9 +809,6 @@ esp_gcm_encrypt_aes( sp_aligned = NULL; } - /* generate new iv */ - key_sa_stir_iv(sav); - return 0; } diff --git a/bsd/netinet6/nd6_rtr.c b/bsd/netinet6/nd6_rtr.c index 4dc3b3b3c..0d6c9f044 100644 --- a/bsd/netinet6/nd6_rtr.c +++ b/bsd/netinet6/nd6_rtr.c @@ -1461,6 +1461,7 @@ defrouter_select(struct ifnet *ifp) rtaddr = installed_dr->rtaddr_mapped; else rtaddr = installed_dr->rtaddr; + NDDR_UNLOCK(dr); lck_mtx_unlock(nd6_mutex); /* Callee returns a locked route upon success */ if ((rt = nd6_lookup(&rtaddr, 0, ifp, 0)) != NULL) { @@ -1479,9 +1480,11 @@ defrouter_select(struct ifnet *ifp) nd6log((LOG_ERR, "defrouter_select: more than one " "default router is installed for interface :%s.\n", if_name(ifp))); + NDDR_UNLOCK(dr); } - } - NDDR_UNLOCK(dr); + } else + NDDR_UNLOCK(dr); + NDDR_REMREF(dr); /* for this for loop */ if (drrele != NULL) NDDR_REMREF(drrele); @@ -1497,7 +1500,7 @@ defrouter_select(struct ifnet *ifp) } if (installed_dr) { - NDDR_REMREF(selected_dr); + NDDR_REMREF(installed_dr); installed_dr = NULL; } @@ -2159,8 +2162,6 @@ prelist_update( NDPR_REMREF(pr); lck_mtx_unlock(nd6_mutex); } else { - struct nd_prefix *newpr = NULL; - newprefix = 1; if (new->ndpr_vltime == 0) @@ -2170,33 +2171,16 @@ prelist_update( bzero(&new->ndpr_addr, sizeof (struct in6_addr)); - error = nd6_prelist_add(new, dr, &newpr, FALSE); - if (error != 0 || newpr == NULL) { + error = nd6_prelist_add(new, dr, &pr, FALSE); + if (error != 0 || pr == NULL) { nd6log((LOG_NOTICE, "prelist_update: " "nd6_prelist_add failed for %s/%d on %s " "errno=%d, returnpr=0x%llx\n", ip6_sprintf(&new->ndpr_prefix.sin6_addr), new->ndpr_plen, if_name(new->ndpr_ifp), - error, (uint64_t)VM_KERNEL_ADDRPERM(newpr))); + error, (uint64_t)VM_KERNEL_ADDRPERM(pr))); goto end; /* we should just give up in this case. */ } - - /* - * XXX: from the ND point of view, we can ignore a prefix - * with the on-link bit being zero. However, we need a - * prefix structure for references from autoconfigured - * addresses. Thus, we explicitly make sure that the prefix - * itself expires now. - */ - NDPR_LOCK(newpr); - if (newpr->ndpr_raf_onlink == 0) { - newpr->ndpr_vltime = 0; - newpr->ndpr_pltime = 0; - in6_init_prefix_ltimes(newpr); - } - - pr = newpr; - NDPR_UNLOCK(newpr); } /* @@ -2206,7 +2190,7 @@ prelist_update( /* 5.5.3 (a). Ignore the prefix without the A bit set. */ if (!new->ndpr_raf_auto) - goto afteraddrconf; + goto end; /* * 5.5.3 (b). the link-local prefix should have been ignored in @@ -2419,8 +2403,6 @@ prelist_update( } } -afteraddrconf: - end: if (pr != NULL) NDPR_REMREF(pr); diff --git a/bsd/netkey/key.c b/bsd/netkey/key.c index 1b4e6420b..7e1a6061f 100644 --- a/bsd/netkey/key.c +++ b/bsd/netkey/key.c @@ -103,7 +103,7 @@ #include #include #include - +#include #include #if INET6 @@ -4245,7 +4245,11 @@ key_setsaval( } /* initialize */ - key_randomfill(sav->iv, sav->ivlen); + if (sav->alg_enc == SADB_X_EALG_AES_GCM) { + bzero(sav->iv, sav->ivlen); + } else { + key_randomfill(sav->iv, sav->ivlen); + } #endif break; case SADB_SATYPE_AH: @@ -4495,7 +4499,11 @@ key_setsaval2(struct secasvar *sav, } } /* initialize */ - key_randomfill(sav->iv, sav->ivlen); + if (sav->alg_enc == SADB_X_EALG_AES_GCM) { + bzero(sav->iv, sav->ivlen); + } else { + key_randomfill(sav->iv, sav->ivlen); + } } #endif } @@ -6315,8 +6323,7 @@ key_randomfill( size_t l) { #ifdef __APPLE__ - - read_random(p, (u_int)l); + cc_rand_generate(p, l); #else size_t n; u_int32_t v; diff --git a/bsd/sys/buf_internal.h b/bsd/sys/buf_internal.h index 18d0e1119..3b007d99e 100644 --- a/bsd/sys/buf_internal.h +++ b/bsd/sys/buf_internal.h @@ -270,6 +270,7 @@ extern vm_offset_t buf_kernel_addrperm; #define BA_ISOCHRONOUS 0x00001000 /* device specific isochronous throughput to media */ #define BA_STRATEGY_TRACKED_IO 0x00002000 /* tracked by spec_strategy */ +#define BA_IO_TIER_UPGRADE 0x00004000 /* effective I/O tier is higher than BA_IO_TIER */ #define GET_BUFATTR_IO_TIER(bap) ((bap->ba_flags & BA_IO_TIER_MASK) >> BA_IO_TIER_SHIFT) diff --git a/bsd/sys/disk.h b/bsd/sys/disk.h index c75d1dae1..a365b5030 100644 --- a/bsd/sys/disk.h +++ b/bsd/sys/disk.h @@ -76,6 +76,8 @@ * * DKIOCGETPROVISIONSTATUS get device's block provision status * DKIOCGETIOMINSATURATIONBYTECOUNT get minimum byte count to saturate storage bandwidth + * + * DKIOCGETERRORDESCRIPTION get description of any drive error */ #define DK_FEATURE_BARRIER 0x00000002 @@ -174,6 +176,15 @@ typedef struct dk_provision_extent_t * extents; /* output: provision extents */ } dk_provision_status_t; +typedef struct +{ + uint64_t options; /* reserved, clear to zero */ + uint64_t reserved; /* reserved, clear to zero */ + uint64_t description_size; + char * description; +} dk_error_description_t; + + #ifdef KERNEL #ifdef PRIVATE @@ -219,6 +230,8 @@ typedef struct #define DKIOCGETPROVISIONSTATUS _IOWR('d', 79, dk_provision_status_t) +#define DKIOCGETERRORDESCRIPTION _IOR('d', 80, dk_error_description_t) + #define DKIOCSYNCHRONIZECACHE _IO('d', 22) #ifdef KERNEL diff --git a/bsd/sys/dtrace.h b/bsd/sys/dtrace.h index ede1f7ac3..b44140030 100644 --- a/bsd/sys/dtrace.h +++ b/bsd/sys/dtrace.h @@ -1042,14 +1042,21 @@ typedef struct dtrace_actdesc { int dtad_refcnt; /* reference count */ } dtrace_actdesc_t; + typedef struct dtrace_ecbdesc { dtrace_actdesc_t *dted_action; /* action description(s) */ dtrace_preddesc_t dted_pred; /* predicate description */ dtrace_probedesc_t dted_probe; /* probe description */ uint64_t dted_uarg; /* library argument */ int dted_refcnt; /* reference count */ + uint64_t dted_probegen; /* matched probe generation */ } dtrace_ecbdesc_t; +/* + * APPLE NOTE: The kernel always rebuild dtrace_ecbdesc structures + * coming from userspace, so there is no dted_probegen manipulation risk + */ + /* * DTrace Metadata Description Structures * @@ -2326,8 +2333,8 @@ extern void dtrace_probe(dtrace_id_t, uint64_t arg0, uint64_t arg1, * a meta provider. This structure consists of the following members: * * dtms_create_probe() <-- Add a new probe to a created provider - * dtms_provide_pid() <-- Create a new provider for a given process - * dtms_remove_pid() <-- Remove a previously created provider + * dtms_provide_proc() <-- Create a new provider for a given process + * dtms_remove_proc() <-- Remove a previously created provider * * 1.2 void dtms_create_probe(void *arg, void *parg, * dtrace_helper_probedesc_t *probedesc); @@ -2341,7 +2348,7 @@ extern void dtrace_probe(dtrace_id_t, uint64_t arg0, uint64_t arg1, * * The first argument is the cookie as passed to dtrace_meta_register(). * The second argument is the provider cookie for the associated provider; - * this is obtained from the return value of dtms_provide_pid(). The third + * this is obtained from the return value of dtms_provide_proc(). The third * argument is the helper probe description. * * 1.2.3 Return value @@ -2357,8 +2364,8 @@ extern void dtrace_probe(dtrace_id_t, uint64_t arg0, uint64_t arg1, * such that the provider may (and is expected to) call provider-related * DTrace provider APIs including dtrace_probe_create(). * - * 1.3 void *dtms_provide_pid(void *arg, dtrace_meta_provider_t *mprov, - * pid_t pid) + * 1.3 void *dtms_provide_proc(void *arg, dtrace_meta_provider_t *mprov, + * proc_t *proc) * * 1.3.1 Overview * @@ -2384,15 +2391,15 @@ extern void dtrace_probe(dtrace_id_t, uint64_t arg0, uint64_t arg1, * * 1.3.4 Caller's context * - * dtms_provide_pid() is called from either ioctl() or module load context. + * dtms_provide_proc() is called from either ioctl() or module load context. * The DTrace framework is locked in such a way that meta providers may not * register or unregister. This means that the meta provider cannot call * dtrace_meta_register() or dtrace_meta_unregister(). However, the context * is such that the provider may -- and is expected to -- call * provider-related DTrace provider APIs including dtrace_register(). * - * 1.4 void dtms_remove_pid(void *arg, dtrace_meta_provider_t *mprov, - * pid_t pid) + * 1.4 void dtms_remove_proc(void *arg, dtrace_meta_provider_t *mprov, + * proc_t proc) * * 1.4.1 Overview * @@ -2415,7 +2422,7 @@ extern void dtrace_probe(dtrace_id_t, uint64_t arg0, uint64_t arg1, * * 1.4.4 Caller's context * - * dtms_remove_pid() is called from either ioctl() or exit() context. + * dtms_remove_proc() is called from either ioctl() or exit() context. * The DTrace framework is locked in such a way that meta providers may not * register or unregister. This means that the meta provider cannot call * dtrace_meta_register() or dtrace_meta_unregister(). However, the context @@ -2448,10 +2455,18 @@ typedef struct dtrace_helper_provdesc { dtrace_pattr_t dthpv_pattr; /* stability attributes */ } dtrace_helper_provdesc_t; +/* + * APPLE NOTE: dtms_provide_pid and dtms_remove_pid are replaced with + * dtms_provide_proc on Darwin, and a proc reference need to be held + * for the duration of the call. + * + * This is due to the fact that proc_find is not re-entrant on Darwin. + */ + typedef struct dtrace_mops { void (*dtms_create_probe)(void *, void *, dtrace_helper_probedesc_t *); - void *(*dtms_provide_pid)(void *, dtrace_helper_provdesc_t *, pid_t); - void (*dtms_remove_pid)(void *, dtrace_helper_provdesc_t *, pid_t); + void *(*dtms_provide_proc)(void *, dtrace_helper_provdesc_t *, proc_t*); + void (*dtms_remove_proc)(void *, dtrace_helper_provdesc_t *, proc_t*); char* (*dtms_provider_name)(void *); } dtrace_mops_t; diff --git a/bsd/sys/kdebug.h b/bsd/sys/kdebug.h index 42b8c92f1..d29e0cd17 100644 --- a/bsd/sys/kdebug.h +++ b/bsd/sys/kdebug.h @@ -766,6 +766,7 @@ extern void kdebug_reset(void); #define DKIO_NOCACHE 0x80 #define DKIO_TIER_MASK 0xF00 #define DKIO_TIER_SHIFT 8 +#define DKIO_TIER_UPGRADE 0x1000 /* Kernel Debug Sub Classes for Applications (DBG_APPS) */ #define DBG_APP_LOGINWINDOW 0x03 diff --git a/bsd/sys/proc_internal.h b/bsd/sys/proc_internal.h index 4048a73a7..a6bfe6bc3 100644 --- a/bsd/sys/proc_internal.h +++ b/bsd/sys/proc_internal.h @@ -487,6 +487,10 @@ struct proc { #define PROC_SETACTION_STATE(p) (p->p_pcaction = (PROC_CONTROL_STATE(p) | (PROC_CONTROL_STATE(p) << 16))) #define PROC_RESETACTION_STATE(p) (p->p_pcaction = PROC_CONTROL_STATE(p)) +/* Process exit reason macros */ +#define PROC_HAS_EXITREASON(p) (p->p_exit_reason != OS_REASON_NULL) +#define PROC_EXITREASON_FLAGS(p) p->p_exit_reason->osr_flags + /* additional process flags */ #define P_LADVLOCK 0x01 #define P_LXBKIDLEINPROG 0x02 diff --git a/bsd/sys/reason.h b/bsd/sys/reason.h index 13a49e3e6..0952e5e96 100644 --- a/bsd/sys/reason.h +++ b/bsd/sys/reason.h @@ -33,7 +33,7 @@ __BEGIN_DECLS -#ifdef KERNEL +#ifdef KERNEL_PRIVATE #include @@ -62,6 +62,9 @@ os_reason_t build_userspace_exit_reason(uint32_t reason_namespace, uint64_t reas user_addr_t reason_string, uint64_t reason_flags); char *launchd_exit_reason_get_string_desc(os_reason_t exit_reason); +/* The blocking allocation is currently not exported to KEXTs */ +int os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize); + #else /* XNU_KERNEL_PRIVATE */ typedef void * os_reason_t; @@ -69,12 +72,12 @@ typedef void * os_reason_t; #endif /* XNU_KERNEL_PRIVATE */ os_reason_t os_reason_create(uint32_t osr_namespace, uint64_t osr_code); -int os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize); +int os_reason_alloc_buffer_noblock(os_reason_t cur_reason, uint32_t osr_bufsize); struct kcdata_descriptor * os_reason_get_kcdata_descriptor(os_reason_t cur_reason); void os_reason_ref(os_reason_t cur_reason); void os_reason_free(os_reason_t cur_reason); -#endif /* KERNEL */ +#endif /* KERNEL_PRIVATE */ /* * Reason namespaces. diff --git a/bsd/sys/signalvar.h b/bsd/sys/signalvar.h index f427f7215..b280c686f 100644 --- a/bsd/sys/signalvar.h +++ b/bsd/sys/signalvar.h @@ -223,6 +223,7 @@ void psignal_with_reason(struct proc *p, int sig, struct os_reason *signal_reaso void psignal_locked(struct proc *, int); void psignal_try_thread(proc_t, thread_t, int signum); void psignal_try_thread_with_reason(proc_t, thread_t, int, struct os_reason*); +void psignal_thread_with_reason(proc_t, thread_t, int, struct os_reason*); void psignal_uthread(thread_t, int); void pgsignal(struct pgrp *pgrp, int sig, int checkctty); void tty_pgsignal(struct tty * tp, int sig, int checkctty); diff --git a/bsd/sys/socketvar.h b/bsd/sys/socketvar.h index e3d1dfdf3..c2c47cab2 100644 --- a/bsd/sys/socketvar.h +++ b/bsd/sys/socketvar.h @@ -835,7 +835,7 @@ extern void sbunlock(struct sockbuf *sb, boolean_t keeplocked); extern int soaccept(struct socket *so, struct sockaddr **nam); extern int soacceptlock(struct socket *so, struct sockaddr **nam, int dolock); -extern int soacceptfilter(struct socket *so); +extern int soacceptfilter(struct socket *so, struct socket *head); extern struct socket *soalloc(int waitok, int dom, int type); extern int sobindlock(struct socket *so, struct sockaddr *nam, int dolock); extern int soclose(struct socket *so); diff --git a/bsd/sys/ubc_internal.h b/bsd/sys/ubc_internal.h index 6ea151d09..ba3d848c1 100644 --- a/bsd/sys/ubc_internal.h +++ b/bsd/sys/ubc_internal.h @@ -107,7 +107,7 @@ struct cs_blob { vm_offset_t csb_mem_offset; vm_address_t csb_mem_kaddr; unsigned char csb_cdhash[CS_CDHASH_LEN]; - struct cs_hash *csb_hashtype; + const struct cs_hash *csb_hashtype; vm_size_t csb_hash_pagesize; /* each hash entry represent this many bytes in the file */ vm_size_t csb_hash_pagemask; vm_size_t csb_hash_pageshift; diff --git a/bsd/vfs/vfs_bio.c b/bsd/vfs/vfs_bio.c index 495a5f3df..f54c98779 100644 --- a/bsd/vfs/vfs_bio.c +++ b/bsd/vfs/vfs_bio.c @@ -95,6 +95,7 @@ #include #include /* fslog_io_error() */ +#include /* dk_error_description_t */ #include #include @@ -2945,6 +2946,8 @@ start: break; } } else { + int clear_bdone; + /* * buffer in core and not busy */ @@ -2963,8 +2966,41 @@ start: if ( (bp->b_upl) ) panic("buffer has UPL, but not marked BUSY: %p", bp); - if ( !ret_only_valid && bp->b_bufsize != size) - allocbuf(bp, size); + clear_bdone = FALSE; + if (!ret_only_valid) { + /* + * If the number bytes that are valid is going + * to increase (even if we end up not doing a + * reallocation through allocbuf) we have to read + * the new size first. + * + * This is required in cases where we doing a read + * modify write of a already valid data on disk but + * in cases where the data on disk beyond (blkno + b_bcount) + * is invalid, we may end up doing extra I/O. + */ + if (operation == BLK_META && bp->b_bcount < size) { + /* + * Since we are going to read in the whole size first + * we first have to ensure that any pending delayed write + * is flushed to disk first. + */ + if (ISSET(bp->b_flags, B_DELWRI)) { + CLR(bp->b_flags, B_CACHE); + buf_bwrite(bp); + goto start; + } + /* + * clear B_DONE before returning from + * this function so that the caller can + * can issue a read for the new size. + */ + clear_bdone = TRUE; + } + + if (bp->b_bufsize != size) + allocbuf(bp, size); + } upl_flags = 0; switch (operation) { @@ -3016,6 +3052,9 @@ start: /*NOTREACHED*/ break; } + + if (clear_bdone) + CLR(bp->b_flags, B_DONE); } } else { /* not incore() */ int queue = BQ_EMPTY; /* Start with no preference */ @@ -3924,6 +3963,16 @@ buf_biodone(buf_t bp) mp = NULL; } + if (ISSET(bp->b_flags, B_ERROR)) { + if (mp && (MNT_ROOTFS & mp->mnt_flag)) { + dk_error_description_t desc; + bzero(&desc, sizeof(desc)); + desc.description = panic_disk_error_description; + desc.description_size = panic_disk_error_description_size; + VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel()); + } + } + if (mp && (bp->b_flags & B_READ) == 0) { update_last_io_time(mp); INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size); @@ -3958,6 +4007,10 @@ buf_biodone(buf_t bp) if (bap->ba_flags & BA_NOCACHE) code |= DKIO_NOCACHE; + if (bap->ba_flags & BA_IO_TIER_UPGRADE) { + code |= DKIO_TIER_UPGRADE; + } + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, buf_kernel_addrperm_addr(bp), (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, bp->b_error, 0); } @@ -3969,7 +4022,7 @@ buf_biodone(buf_t bp) * indicators */ CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE)); - CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP)); + CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE)); SET_BUFATTR_IO_TIER(bap, 0); diff --git a/config/MasterVersion b/config/MasterVersion index 4f558e748..5c5f71e02 100644 --- a/config/MasterVersion +++ b/config/MasterVersion @@ -1,4 +1,4 @@ -16.1.0 +16.3.0 # The first line of this file contains the master version number for the kernel. # All other instances of the kernel version in xnu are derived from this file. diff --git a/config/Private.exports b/config/Private.exports index 3df0d077a..7f9615ea7 100644 --- a/config/Private.exports +++ b/config/Private.exports @@ -293,7 +293,7 @@ _net_del_domain:_net_del_domain_old _net_del_proto:_net_del_proto_old _netboot_root _os_reason_create -_os_reason_alloc_buffer +_os_reason_alloc_buffer_noblock _os_reason_get_kcdata_descriptor _os_reason_ref _os_reason_free diff --git a/iokit/IOKit/IOHibernatePrivate.h b/iokit/IOKit/IOHibernatePrivate.h index ee9139154..778b6aa08 100644 --- a/iokit/IOKit/IOHibernatePrivate.h +++ b/iokit/IOKit/IOHibernatePrivate.h @@ -309,7 +309,7 @@ IOReturn IOHibernateIOKitSleep(void); IOReturn IOHibernateSystemHasSlept(void); IOReturn IOHibernateSystemWake(void); IOReturn IOHibernateSystemPostWake(void); -bool IOHibernateWasScreenLocked(void); +uint32_t IOHibernateWasScreenLocked(void); void IOHibernateSetScreenLocked(uint32_t lockState); void IOHibernateSetWakeCapabilities(uint32_t capability); void IOHibernateSystemRestart(void); @@ -540,7 +540,8 @@ enum kIOScreenLockFileVaultDialog = 4, }; -#define kIOScreenLockStateKey "IOScreenLockState" +#define kIOScreenLockStateKey "IOScreenLockState" +#define kIOBooterScreenLockStateKey "IOBooterScreenLockState" #endif /* ! __IOKIT_IOHIBERNATEPRIVATE_H */ diff --git a/iokit/IOKit/IOKitKeysPrivate.h b/iokit/IOKit/IOKitKeysPrivate.h index 3c294ff22..146968d7e 100644 --- a/iokit/IOKit/IOKitKeysPrivate.h +++ b/iokit/IOKit/IOKitKeysPrivate.h @@ -46,6 +46,7 @@ #define kIOConsoleSessionLoginDoneKey "kCGSessionLoginDoneKey" /* value is OSBoolean */ #define kIOConsoleSessionSecureInputPIDKey "kCGSSessionSecureInputPID" /* value is OSNumber */ #define kIOConsoleSessionScreenLockedTimeKey "CGSSessionScreenLockedTime" /* value is OSNumber, secs - 1970 */ +#define kIOConsoleSessionScreenIsLockedKey "CGSSessionScreenIsLocked" /* value is OSBoolean */ // IOResources property #define kIOConsoleUsersSeedKey "IOConsoleUsersSeed" /* value is OSNumber */ diff --git a/iokit/IOKit/IOMemoryDescriptor.h b/iokit/IOKit/IOMemoryDescriptor.h index fb5f5ce26..c284aaa12 100644 --- a/iokit/IOKit/IOMemoryDescriptor.h +++ b/iokit/IOKit/IOMemoryDescriptor.h @@ -122,7 +122,7 @@ enum { #endif kIOMemoryPersistent = 0x00010000, #ifdef XNU_KERNEL_PRIVATE - kIOMemoryReserved6156215 = 0x00020000, + kIOMemoryMapCopyOnWrite = 0x00020000, #endif kIOMemoryThreadSafe = 0x00100000, // Shared with Buffer MD kIOMemoryClearEncrypt = 0x00200000, // Shared with Buffer MD diff --git a/iokit/IOKit/IOWorkLoop.h b/iokit/IOKit/IOWorkLoop.h index 73c868876..afc4979b0 100644 --- a/iokit/IOKit/IOWorkLoop.h +++ b/iokit/IOKit/IOWorkLoop.h @@ -143,9 +143,6 @@ protected: struct ExpansionData { IOOptionBits options; IOEventSource *passiveEventChain; -#if DEBUG - void * allocationBacktrace[16]; -#endif /* DEBUG */ #if IOKITSTATS struct IOWorkLoopCounter *counter; #else diff --git a/iokit/Kernel/IODeviceTreeSupport.cpp b/iokit/Kernel/IODeviceTreeSupport.cpp index c66265be1..e115584ce 100644 --- a/iokit/Kernel/IODeviceTreeSupport.cpp +++ b/iokit/Kernel/IODeviceTreeSupport.cpp @@ -257,23 +257,29 @@ int IODTGetLoaderInfo( const char *key, void **infoAddr, int *infoSize ) OSData *propObj; dtptr_t *propPtr; unsigned int propSize; + int ret = -1; chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); if ( chosen == 0 ) return -1; propObj = OSDynamicCast( OSData, chosen->getProperty(key) ); - if ( propObj == 0 ) return -1; + if ( propObj == 0 ) goto cleanup; propSize = propObj->getLength(); - if ( propSize != (2 * sizeof(dtptr_t)) ) return -1; + if ( propSize != (2 * sizeof(dtptr_t)) ) goto cleanup; propPtr = (dtptr_t *)propObj->getBytesNoCopy(); - if ( propPtr == 0 ) return -1; + if ( propPtr == 0 ) goto cleanup; *infoAddr = (void *)(uintptr_t) (propPtr[0]); *infoSize = (int) (propPtr[1]); - return 0; + ret = 0; + +cleanup: + chosen->release(); + + return ret; } void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize ) @@ -289,6 +295,7 @@ void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize ) chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane ); if ( chosen != 0 ) { chosen->removeProperty(key); + chosen->release(); } } } @@ -337,6 +344,7 @@ MakeReferenceTable( DTEntry dtEntry, bool copy ) char *name; char location[ 32 ]; bool noLocation = true; + bool kernelOnly; regEntry = new IOService; @@ -348,6 +356,7 @@ MakeReferenceTable( DTEntry dtEntry, bool copy ) if( regEntry && (kSuccess == DTCreatePropertyIterator( dtEntry, &dtIter))) { + kernelOnly = (kSuccess == DTGetProperty(dtEntry, "kernel-only", &prop, &propSize)); propTable = regEntry->getPropertyTable(); while( kSuccess == DTIterateProperties( dtIter, &name)) { @@ -364,6 +373,9 @@ MakeReferenceTable( DTEntry dtEntry, bool copy ) } assert( nameKey && data ); + if (kernelOnly) + data->setSerializable(false); + propTable->setObject( nameKey, data); data->release(); nameKey->release(); diff --git a/iokit/Kernel/IOHibernateIO.cpp b/iokit/Kernel/IOHibernateIO.cpp index 56a77f89d..2ae11096b 100644 --- a/iokit/Kernel/IOHibernateIO.cpp +++ b/iokit/Kernel/IOHibernateIO.cpp @@ -1295,26 +1295,21 @@ IOHibernateSystemPostWake(void) return (kIOReturnSuccess); } -bool IOHibernateWasScreenLocked(void) +uint32_t IOHibernateWasScreenLocked(void) { - bool ret = false; + uint32_t ret = 0; if ((kIOHibernateStateWakingFromHibernate == gIOHibernateState) && gIOChosenEntry) { OSData * data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOScreenLockStateKey)); - if (data) switch (*((uint32_t *)data->getBytesNoCopy())) + if (data) { - case kIOScreenLockLocked: - case kIOScreenLockFileVaultDialog: - ret = true; - break; - case kIOScreenLockNoLock: - case kIOScreenLockUnlocked: - default: - ret = false; - break; - } + ret = ((uint32_t *)data->getBytesNoCopy())[0]; + gIOChosenEntry->setProperty(kIOBooterScreenLockStateKey, data); + } } + else gIOChosenEntry->removeProperty(kIOBooterScreenLockStateKey); + return (ret); } diff --git a/iokit/Kernel/IOLib.cpp b/iokit/Kernel/IOLib.cpp index 01f751a86..73a0c67a8 100644 --- a/iokit/Kernel/IOLib.cpp +++ b/iokit/Kernel/IOLib.cpp @@ -295,15 +295,25 @@ void * IOMalloc(vm_size_t size) return address; } -void IOFree(void * address, vm_size_t size) +void IOFree(void * inAddress, vm_size_t size) { - if (address) { + void * address; + if ((address = inAddress)) + { address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader); #if IOTRACKING - if (TRACK_ALLOC) { + if (TRACK_ALLOC) + { IOLibMallocHeader * hdr; + struct ptr_reference{ void * ptr; }; + volatile struct ptr_reference ptr; + + // we're about to block in IOTrackingRemove(), make sure the original pointer + // exists in memory or a register for leak scanning to find + ptr.ptr = inAddress; + hdr = (typeof(hdr)) address; if (size != hdr->tracking.size) { @@ -311,6 +321,7 @@ void IOFree(void * address, vm_size_t size) size = hdr->tracking.size; } IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size); + ptr.ptr = NULL; } #endif @@ -746,7 +757,7 @@ kern_return_t IOIteratePageableMaps(vm_size_t size, else index = gIOKitPageableSpace.count - 1; } - if( KERN_SUCCESS == kr) + if (KERN_NO_SPACE != kr) break; lck_mtx_lock( gIOKitPageableSpace.lock ); diff --git a/iokit/Kernel/IOMemoryDescriptor.cpp b/iokit/Kernel/IOMemoryDescriptor.cpp index b2bde45a8..fa735f3e4 100644 --- a/iokit/Kernel/IOMemoryDescriptor.cpp +++ b/iokit/Kernel/IOMemoryDescriptor.cpp @@ -383,17 +383,19 @@ struct IOMemoryEntry struct IOMemoryReference { - volatile SInt32 refCount; - vm_prot_t prot; - uint32_t capacity; - uint32_t count; - IOMemoryEntry entries[0]; + volatile SInt32 refCount; + vm_prot_t prot; + uint32_t capacity; + uint32_t count; + struct IOMemoryReference * mapRef; + IOMemoryEntry entries[0]; }; enum { kIOMemoryReferenceReuse = 0x00000001, kIOMemoryReferenceWrite = 0x00000002, + kIOMemoryReferenceCOW = 0x00000004, }; SInt32 gIOMemoryReferenceCount; @@ -435,6 +437,12 @@ IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref) IOMemoryEntry * entries; size_t size; + if (ref->mapRef) + { + memoryReferenceFree(ref->mapRef); + ref->mapRef = 0; + } + entries = ref->entries + ref->count; while (entries > &ref->entries[0]) { @@ -489,10 +497,14 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( tag = getVMTag(kernel_map); entries = &ref->entries[0]; count = 0; + err = KERN_SUCCESS; offset = 0; rangeIdx = 0; - if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + if (_task) + { + getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx); + } else { nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); @@ -526,6 +538,7 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE; if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE; if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE; + if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY; if ((kIOMemoryReferenceReuse & options) && _memRef) { @@ -650,6 +663,13 @@ IOGeneralMemoryDescriptor::memoryReferenceCreate( ref->count = count; ref->prot = prot; + if (_task && (KERN_SUCCESS == err) + && (kIOMemoryMapCopyOnWrite & _flags) + && !(kIOMemoryReferenceCOW & options)) + { + err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef); + } + if (KERN_SUCCESS == err) { if (MAP_MEM_NAMED_REUSE & prot) @@ -723,14 +743,19 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( IOOptionBits type; IOOptionBits cacheMode; vm_tag_t tag; + // for the kIOMapPrefault option. + upl_page_info_t * pageList = NULL; + UInt currentPageIndex = 0; + bool didAlloc; - /* - * For the kIOMapPrefault option. - */ - upl_page_info_t *pageList = NULL; - UInt currentPageIndex = 0; + if (ref->mapRef) + { + err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr); + return (err); + } type = _flags & kIOMemoryTypeMask; + prot = VM_PROT_READ; if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE; prot &= ref->prot; @@ -769,7 +794,9 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( nextAddr += remain; nextLen -= remain; pageOffset = (page_mask & nextAddr); - addr = 0; + addr = 0; + didAlloc = false; + if (!(options & kIOMapAnywhere)) { addr = *inaddr; @@ -813,8 +840,9 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( err = IOMemoryDescriptorMapAlloc(ref.map, &ref); if (KERN_SUCCESS == err) { - addr = ref.mapped; - map = ref.map; + addr = ref.mapped; + map = ref.map; + didAlloc = true; } } @@ -956,7 +984,7 @@ IOGeneralMemoryDescriptor::memoryReferenceMap( } } - if ((KERN_SUCCESS != err) && addr && !(kIOMapOverwrite & options)) + if ((KERN_SUCCESS != err) && didAlloc) { (void) mach_vm_deallocate(map, trunc_page_64(addr), size); addr = 0; @@ -1449,12 +1477,6 @@ IOGeneralMemoryDescriptor::initWithOptions(void * buffers, gIOSystemMapper = mapper = IOMapper::gSystem; } - // Temp binary compatibility for kIOMemoryThreadSafe - if (kIOMemoryReserved6156215 & options) - { - options &= ~kIOMemoryReserved6156215; - options |= kIOMemoryThreadSafe; - } // Remove the dynamic internal use flags from the initial setting options &= ~(kIOMemoryPreparedReadOnly); _flags = options; @@ -3503,7 +3525,7 @@ IOReturn IOGeneralMemoryDescriptor::doMap( // upl_transpose> // else { - err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); + err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress); #if IOTRACKING if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) { diff --git a/iokit/Kernel/IOPMrootDomain.cpp b/iokit/Kernel/IOPMrootDomain.cpp index 4ae1c904c..e967cb33f 100644 --- a/iokit/Kernel/IOPMrootDomain.cpp +++ b/iokit/Kernel/IOPMrootDomain.cpp @@ -6841,8 +6841,11 @@ void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) userWasActive = true; // Stay awake after dropping demand for display power on - if (kFullWakeReasonDisplayOn == fullWakeReason) + if (kFullWakeReasonDisplayOn == fullWakeReason) { fullWakeReason = fFullWakeReasonDisplayOnAndLocalUser; + DLOG("User activity while in notification wake\n"); + changePowerStateWithOverrideTo( ON_STATE, 0); + } kdebugTrace(kPMLogUserActiveState, 0, 1, 0); setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue); @@ -7056,6 +7059,7 @@ void IOPMrootDomain::evaluatePolicy( int stimulus, uint32_t arg ) (kFullWakeReasonDisplayOn == fullWakeReason)) { // kIOPMSleepReasonMaintenance? + DLOG("Display sleep while in notification wake\n"); changePowerStateWithOverrideTo( SLEEP_STATE, kIOPMSleepReasonMaintenance ); } diff --git a/iokit/Kernel/IOPlatformExpert.cpp b/iokit/Kernel/IOPlatformExpert.cpp index 7dff66444..c83a71fd5 100644 --- a/iokit/Kernel/IOPlatformExpert.cpp +++ b/iokit/Kernel/IOPlatformExpert.cpp @@ -822,8 +822,13 @@ int PEHaltRestart(unsigned int type) } else if(type == kPEPanicRestartCPU || type == kPEPanicSync) { - IOCPURunPlatformPanicActions(type); - PE_sync_panic_buffers(); + // Do an initial sync to flush as much panic data as possible, + // in case we have a problem in one of the platorm panic handlers. + // After running the platform handlers, do a final sync w/ + // platform hardware quiesced for the panic. + PE_sync_panic_buffers(); + IOCPURunPlatformPanicActions(type); + PE_sync_panic_buffers(); } if (gIOPlatform) return gIOPlatform->haltRestart(type); diff --git a/iokit/Kernel/IOService.cpp b/iokit/Kernel/IOService.cpp index bbb8781d1..440b5c589 100644 --- a/iokit/Kernel/IOService.cpp +++ b/iokit/Kernel/IOService.cpp @@ -131,10 +131,11 @@ const OSSymbol * gIOConsoleSessionOnConsoleKey; const OSSymbol * gIOConsoleSessionLoginDoneKey; const OSSymbol * gIOConsoleSessionSecureInputPIDKey; const OSSymbol * gIOConsoleSessionScreenLockedTimeKey; - +const OSSymbol * gIOConsoleSessionScreenIsLockedKey; clock_sec_t gIOConsoleLockTime; static bool gIOConsoleLoggedIn; #if HIBERNATION +static OSBoolean * gIOConsoleBooterLockState; static uint32_t gIOScreenLockState; #endif static IORegistryEntry * gIOChosenEntry; @@ -367,6 +368,7 @@ void IOService::initialize( void ) gIOConsoleSessionLoginDoneKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionLoginDoneKey); gIOConsoleSessionSecureInputPIDKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionSecureInputPIDKey); gIOConsoleSessionScreenLockedTimeKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenLockedTimeKey); + gIOConsoleSessionScreenIsLockedKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenIsLockedKey); gIOConsoleUsersSeedValue = OSData::withBytesNoCopy(&gIOConsoleUsersSeed, sizeof(gIOConsoleUsersSeed)); @@ -4994,9 +4996,25 @@ void IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessa { sSystemPower = systemMessage; #if HIBERNATION - if ((kIOMessageSystemHasPoweredOn == systemMessage) && IOHibernateWasScreenLocked()) + if (kIOMessageSystemHasPoweredOn == systemMessage) { - locked = kOSBooleanTrue; + uint32_t lockState = IOHibernateWasScreenLocked(); + switch (lockState) + { + case 0: + break; + case kIOScreenLockLocked: + case kIOScreenLockFileVaultDialog: + gIOConsoleBooterLockState = kOSBooleanTrue; + break; + case kIOScreenLockNoLock: + gIOConsoleBooterLockState = 0; + break; + case kIOScreenLockUnlocked: + default: + gIOConsoleBooterLockState = kOSBooleanFalse; + break; + } } #endif /* HIBERNATION */ } @@ -5004,6 +5022,8 @@ void IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessa if (consoleUsers) { OSNumber * num = 0; + bool loginLocked = true; + gIOConsoleLoggedIn = false; for (idx = 0; (user = OSDynamicCast(OSDictionary, consoleUsers->getObject(idx))); @@ -5011,11 +5031,16 @@ void IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessa { gIOConsoleLoggedIn |= ((kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey)) && (kOSBooleanTrue == user->getObject(gIOConsoleSessionLoginDoneKey))); + + loginLocked &= (kOSBooleanTrue == user->getObject(gIOConsoleSessionScreenIsLockedKey)); if (!num) { num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionScreenLockedTimeKey)); } } +#if HIBERNATION + if (!loginLocked) gIOConsoleBooterLockState = 0; +#endif /* HIBERNATION */ gIOConsoleLockTime = num ? num->unsigned32BitValue() : 0; } @@ -5025,6 +5050,12 @@ void IOService::updateConsoleUsers(OSArray * consoleUsers, IOMessage systemMessa { locked = kOSBooleanTrue; } +#if HIBERNATION + else if (gIOConsoleBooterLockState) + { + locked = gIOConsoleBooterLockState; + } +#endif /* HIBERNATION */ else if (gIOConsoleLockTime) { clock_sec_t now; diff --git a/iokit/Kernel/IOUserClient.cpp b/iokit/Kernel/IOUserClient.cpp index e2b671727..d568aaca2 100644 --- a/iokit/Kernel/IOUserClient.cpp +++ b/iokit/Kernel/IOUserClient.cpp @@ -2920,6 +2920,7 @@ kern_return_t is_io_registry_entry_get_property_bytes( if( (data = OSDynamicCast( OSData, obj ))) { len = data->getLength(); bytes = data->getBytesNoCopy(); + if (!data->isSerializable()) len = 0; } else if( (str = OSDynamicCast( OSString, obj ))) { len = str->getLength() + 1; @@ -3384,7 +3385,8 @@ kern_return_t is_io_service_open_extended( do { - if (properties) + if (properties) return (kIOReturnUnsupported); +#if 0 { OSObject * obj; vm_offset_t data; @@ -3412,7 +3414,7 @@ kern_return_t is_io_service_open_extended( if (kIOReturnSuccess != res) break; } - +#endif crossEndian = (ndr.int_rep != NDR_record.int_rep); if (crossEndian) { @@ -3793,7 +3795,8 @@ kern_return_t is_io_connect_method_var_output if (ool_input) inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, - kIODirectionOut, current_task()); + kIODirectionOut | kIOMemoryMapCopyOnWrite, + current_task()); args.structureInputDescriptor = inputMD; @@ -3887,7 +3890,8 @@ kern_return_t is_io_connect_method if (ool_input) inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, - kIODirectionOut, current_task()); + kIODirectionOut | kIOMemoryMapCopyOnWrite, + current_task()); args.structureInputDescriptor = inputMD; @@ -3971,7 +3975,8 @@ kern_return_t is_io_connect_async_method if (ool_input) inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size, - kIODirectionOut, current_task()); + kIODirectionOut | kIOMemoryMapCopyOnWrite, + current_task()); args.structureInputDescriptor = inputMD; diff --git a/iokit/Kernel/IOWorkLoop.cpp b/iokit/Kernel/IOWorkLoop.cpp index 157bdd976..e3896d38b 100644 --- a/iokit/Kernel/IOWorkLoop.cpp +++ b/iokit/Kernel/IOWorkLoop.cpp @@ -129,11 +129,7 @@ bool IOWorkLoop::init() bzero(reserved,sizeof(ExpansionData)); } - -#if DEBUG - OSBacktrace ( reserved->allocationBacktrace, sizeof ( reserved->allocationBacktrace ) / sizeof ( reserved->allocationBacktrace[0] ) ); -#endif - + if ( gateLock == NULL ) { if ( !( gateLock = IORecursiveLockAlloc()) ) return false; diff --git a/iokit/Kernel/RootDomainUserClient.cpp b/iokit/Kernel/RootDomainUserClient.cpp index a7836f4c3..ecd0cc1f3 100644 --- a/iokit/Kernel/RootDomainUserClient.cpp +++ b/iokit/Kernel/RootDomainUserClient.cpp @@ -216,14 +216,19 @@ IOReturn RootDomainUserClient::secureGetSystemSleepType( IOReturn RootDomainUserClient::clientClose( void ) { - detach(fOwner); + terminate(); + return kIOReturnSuccess; +} + +void RootDomainUserClient::stop( IOService *provider) +{ if(fOwningTask) { task_deallocate(fOwningTask); fOwningTask = 0; } - return kIOReturnSuccess; + super::stop(provider); } IOReturn RootDomainUserClient::externalMethod( diff --git a/iokit/Kernel/RootDomainUserClient.h b/iokit/Kernel/RootDomainUserClient.h index ac09ffbf0..b37f71029 100644 --- a/iokit/Kernel/RootDomainUserClient.h +++ b/iokit/Kernel/RootDomainUserClient.h @@ -89,6 +89,7 @@ public: // Unused - retained for symbol compatibility virtual IOExternalMethod * getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ) APPLE_KEXT_OVERRIDE; + virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE; }; diff --git a/iokit/Tests/TestIOMemoryDescriptor.cpp b/iokit/Tests/TestIOMemoryDescriptor.cpp index 4cad3c34c..59ce35546 100644 --- a/iokit/Tests/TestIOMemoryDescriptor.cpp +++ b/iokit/Tests/TestIOMemoryDescriptor.cpp @@ -65,6 +65,8 @@ __END_DECLS #if DEVELOPMENT || DEBUG +extern SInt32 gIOMemoryReferenceCount; + static int IOMultMemoryDescriptorTest(int newValue) { IOMemoryDescriptor * mds[3]; @@ -123,6 +125,23 @@ ZeroLengthTest(int newValue) return (0); } +// +static IOReturn +BadFixedAllocTest(int newValue) +{ + IOBufferMemoryDescriptor * bmd; + IOMemoryMap * map; + + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL, + kIODirectionIn | kIOMemoryPageable, ptoa(1)); + assert(bmd); + map = bmd->createMappingInTask(kernel_task, 0x2000, 0); + assert(!map); + + bmd->release(); + return (0); +} + // static IOReturn IODirectionPrepareNoZeroFillTest(int newValue) @@ -140,10 +159,89 @@ IODirectionPrepareNoZeroFillTest(int newValue) return (0); } + +// +static IOReturn +IOMemoryMapTest(uint32_t options) +{ + IOBufferMemoryDescriptor * bmd; + IOMemoryDescriptor * md; + IOMemoryMap * map; + uint32_t data; + user_addr_t p; + uint8_t * p2; + int r; + uint64_t time, nano; + + bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(), + kIODirectionOutIn | kIOMemoryPageable, 0x4018+0x800); + assert(bmd); + p = (typeof(p)) bmd->getBytesNoCopy(); + p += 0x800; + data = 0x11111111; + r = copyout(&data, p, sizeof(data)); + assert(r == 0); + data = 0x22222222; + r = copyout(&data, p + 0x1000, sizeof(data)); + assert(r == 0); + data = 0x33333333; + r = copyout(&data, p + 0x2000, sizeof(data)); + assert(r == 0); + data = 0x44444444; + r = copyout(&data, p + 0x3000, sizeof(data)); + assert(r == 0); + + md = IOMemoryDescriptor::withAddressRange(p, 0x4018, + kIODirectionOut | options, + current_task()); + assert(md); + time = mach_absolute_time(); + map = md->map(kIOMapReadOnly); + time = mach_absolute_time() - time; + assert(map); + absolutetime_to_nanoseconds(time, &nano); + + p2 = (typeof(p2)) map->getVirtualAddress(); + assert(0x11 == p2[0]); + assert(0x22 == p2[0x1000]); + assert(0x33 == p2[0x2000]); + assert(0x44 == p2[0x3000]); + + data = 0x99999999; + r = copyout(&data, p + 0x2000, sizeof(data)); + assert(r == 0); + + assert(0x11 == p2[0]); + assert(0x22 == p2[0x1000]); + assert(0x44 == p2[0x3000]); + if (kIOMemoryMapCopyOnWrite & options) assert(0x33 == p2[0x2000]); + else assert(0x99 == p2[0x2000]); + + IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n", + kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "", + nano); + + map->release(); + md->release(); + bmd->release(); + + return (kIOReturnSuccess); +} + +static int +IOMemoryMapCopyOnWriteTest(int newValue) +{ + IOMemoryMapTest(0); + IOMemoryMapTest(kIOMemoryMapCopyOnWrite); + return (0); +} + int IOMemoryDescriptorTest(int newValue) { int result; + IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount); + #if 0 if (6 == newValue) { @@ -368,6 +466,9 @@ int IOMemoryDescriptorTest(int newValue) } #endif + result = IOMemoryMapCopyOnWriteTest(newValue); + if (result) return (result); + result = IOMultMemoryDescriptorTest(newValue); if (result) return (result); @@ -377,13 +478,19 @@ int IOMemoryDescriptorTest(int newValue) result = IODirectionPrepareNoZeroFillTest(newValue); if (result) return (result); + result = BadFixedAllocTest(newValue); + if (result) return (result); + IOGeneralMemoryDescriptor * md; vm_offset_t data[2]; vm_size_t bsize = 16*1024*1024; vm_size_t srcsize, srcoffset, mapoffset, size; kern_return_t kr; + data[0] = data[1] = 0; kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE); + assert(KERN_SUCCESS == kr); + vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE); vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE); @@ -405,6 +512,7 @@ int IOMemoryDescriptorTest(int newValue) bzero(&ranges[0], sizeof(ranges)); ranges[0].address = data[0] + srcoffset; ranges[0].length = srcsize; + ranges[1].address = ranges[2].address = data[0]; if (srcsize > ptoa(5)) { @@ -431,7 +539,7 @@ int IOMemoryDescriptorTest(int newValue) assert(md); IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n", - (long) srcsize, (long) srcoffset, + (long) srcsize, (long) srcoffset, (long long) ranges[0].address - data[0], (long long) ranges[0].length, (long long) ranges[1].address - data[0], (long long) ranges[1].length, (long long) ranges[2].address - data[0], (long long) ranges[2].length); @@ -506,6 +614,8 @@ int IOMemoryDescriptorTest(int newValue) vm_deallocate(kernel_map, data[0], bsize); // vm_deallocate(kernel_map, data[1], size); + IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount); + return (0); } diff --git a/libkern/c++/OSData.cpp b/libkern/c++/OSData.cpp index 6e17f6c41..a542ee603 100644 --- a/libkern/c++/OSData.cpp +++ b/libkern/c++/OSData.cpp @@ -496,3 +496,8 @@ void OSData::setSerializable(bool serializable) } reserved->disableSerialization = (!serializable); } + +bool OSData::isSerializable(void) +{ + return (!reserved || !reserved->disableSerialization); +} diff --git a/libkern/c++/OSKext.cpp b/libkern/c++/OSKext.cpp index 5fc12ffc8..b7fcbf77c 100644 --- a/libkern/c++/OSKext.cpp +++ b/libkern/c++/OSKext.cpp @@ -4421,6 +4421,7 @@ OSKext::recordIdentifierRequest( goto finish; } + IORecursiveLockLock(sKextLock); if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) { if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol)) { fail = true; @@ -4434,6 +4435,8 @@ OSKext::recordIdentifierRequest( kextIdentifier->getCStringNoCopy()); } } + IORecursiveLockUnlock(sKextLock); + finish: if (fail) { diff --git a/libkern/c++/OSSerializeBinary.cpp b/libkern/c++/OSSerializeBinary.cpp index 51bd067dc..08cf1654c 100644 --- a/libkern/c++/OSSerializeBinary.cpp +++ b/libkern/c++/OSSerializeBinary.cpp @@ -69,8 +69,9 @@ bool OSSerialize::addBinary(const void * bits, size_t size) unsigned int newCapacity; size_t alignSize; - alignSize = ((size + 3) & ~3L); - newCapacity = length + alignSize; + if (os_add_overflow(size, 3, &alignSize)) return (false); + alignSize &= ~3L; + if (os_add_overflow(length, alignSize, &newCapacity)) return (false); if (newCapacity >= capacity) { newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; @@ -92,8 +93,9 @@ bool OSSerialize::addBinaryObject(const OSMetaClassBase * o, uint32_t key, // add to tag array tags->setObject(o); - alignSize = ((size + sizeof(key) + 3) & ~3L); - newCapacity = length + alignSize; + if (os_add3_overflow(size, sizeof(key), 3, &alignSize)) return (false); + alignSize &= ~3L; + if (os_add_overflow(length, alignSize, &newCapacity)) return (false); if (newCapacity >= capacity) { newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement; @@ -267,7 +269,7 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin OSObject ** stackArray; uint32_t stackCapacity; - enum { stackCapacityMax = 64*1024 }; + enum { stackCapacityMax = 64 }; uint32_t stackIdx; OSObject * result; @@ -430,14 +432,12 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin if (!ok) break; + if (end) parent = 0; if (newCollect) { - if (!end) - { - stackIdx++; - setAtIndex(stack, stackIdx, parent); - if (!ok) break; - } + stackIdx++; + setAtIndex(stack, stackIdx, parent); + if (!ok) break; DEBG("++stack[%d] %p\n", stackIdx, parent); parent = o; dict = newDict; @@ -448,11 +448,15 @@ OSUnserializeBinary(const char *buffer, size_t bufferSize, OSString **errorStrin if (end) { - if (!stackIdx) break; - parent = stackArray[stackIdx]; - DEBG("--stack[%d] %p\n", stackIdx, parent); - stackIdx--; - set = 0; + while (stackIdx) + { + parent = stackArray[stackIdx]; + DEBG("--stack[%d] %p\n", stackIdx, parent); + stackIdx--; + if (parent) break; + } + if (!parent) break; + set = 0; dict = 0; array = 0; if (!(dict = OSDynamicCast(OSDictionary, parent))) diff --git a/libkern/c++/OSSymbol.cpp b/libkern/c++/OSSymbol.cpp index a521f5cea..25c16b1d7 100644 --- a/libkern/c++/OSSymbol.cpp +++ b/libkern/c++/OSSymbol.cpp @@ -501,7 +501,7 @@ const OSSymbol *OSSymbol::withCString(const char *cString) newSymb->OSString::free(); } - oldSymb->retain(); // Retain the old symbol before releasing the lock. + if (oldSymb) oldSymb->retain(); // Retain the old symbol before releasing the lock. pool->openGate(); return oldSymb; diff --git a/libkern/c++/OSUnserializeXML.cpp b/libkern/c++/OSUnserializeXML.cpp index 37c40da4d..7df203dc7 100644 --- a/libkern/c++/OSUnserializeXML.cpp +++ b/libkern/c++/OSUnserializeXML.cpp @@ -553,10 +553,10 @@ static const yytype_int8 yyrhs[] = /* YYRLINE[YYN] -- source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { - 0, 149, 149, 152, 157, 162, 170, 178, 186, 194, - 202, 210, 218, 237, 240, 243, 246, 247, 262, 271, - 283, 286, 289, 292, 295, 298, 301, 304, 311, 314, - 317, 320, 323 + 0, 149, 149, 152, 157, 162, 174, 186, 198, 210, + 222, 234, 246, 265, 268, 271, 274, 275, 290, 299, + 311, 314, 317, 320, 323, 326, 329, 332, 339, 342, + 345, 348, 351 }; #endif @@ -935,7 +935,7 @@ int yydebug; /* YYINITDEPTH -- initial size of the parser's stacks. */ #ifndef YYINITDEPTH -# define YYINITDEPTH 200 +# define YYINITDEPTH 64 #endif /* YYMAXDEPTH -- maximum size the stacks can grow to (effective only @@ -1520,6 +1520,10 @@ yyreduce: #line 162 "OSUnserializeXML.y" { (yyval) = buildDictionary(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildDictionary"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1529,9 +1533,13 @@ yyreduce: break; case 6: -#line 170 "OSUnserializeXML.y" +#line 174 "OSUnserializeXML.y" { (yyval) = buildArray(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildArray"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1541,9 +1549,13 @@ yyreduce: break; case 7: -#line 178 "OSUnserializeXML.y" +#line 186 "OSUnserializeXML.y" { (yyval) = buildSet(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildSet"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1553,9 +1565,13 @@ yyreduce: break; case 8: -#line 186 "OSUnserializeXML.y" +#line 198 "OSUnserializeXML.y" { (yyval) = buildString(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildString"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1565,9 +1581,13 @@ yyreduce: break; case 9: -#line 194 "OSUnserializeXML.y" +#line 210 "OSUnserializeXML.y" { (yyval) = buildData(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildData"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1577,9 +1597,13 @@ yyreduce: break; case 10: -#line 202 "OSUnserializeXML.y" +#line 222 "OSUnserializeXML.y" { (yyval) = buildNumber(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildNumber"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1589,9 +1613,13 @@ yyreduce: break; case 11: -#line 210 "OSUnserializeXML.y" +#line 234 "OSUnserializeXML.y" { (yyval) = buildBoolean(STATE, (yyvsp[(1) - (1)])); + if (!yyval->object) { + yyerror("buildBoolean"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -1601,7 +1629,7 @@ yyreduce: break; case 12: -#line 218 "OSUnserializeXML.y" +#line 246 "OSUnserializeXML.y" { (yyval) = retrieveObject(STATE, (yyvsp[(1) - (1)])->idref); if ((yyval)) { (yyval)->object->retain(); @@ -1620,21 +1648,21 @@ yyreduce: break; case 13: -#line 237 "OSUnserializeXML.y" +#line 265 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->elements = NULL; ;} break; case 14: -#line 240 "OSUnserializeXML.y" +#line 268 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->elements = (yyvsp[(2) - (3)]); ;} break; case 17: -#line 247 "OSUnserializeXML.y" +#line 275 "OSUnserializeXML.y" { (yyval) = (yyvsp[(2) - (2)]); (yyval)->next = (yyvsp[(1) - (2)]); @@ -1651,7 +1679,7 @@ yyreduce: break; case 18: -#line 262 "OSUnserializeXML.y" +#line 290 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->key = (OSSymbol *)(yyval)->object; (yyval)->object = (yyvsp[(2) - (2)])->object; @@ -1662,7 +1690,7 @@ yyreduce: break; case 19: -#line 271 "OSUnserializeXML.y" +#line 299 "OSUnserializeXML.y" { (yyval) = buildSymbol(STATE, (yyvsp[(1) - (1)])); // STATE->parsedObjectCount++; @@ -1674,42 +1702,42 @@ yyreduce: break; case 20: -#line 283 "OSUnserializeXML.y" +#line 311 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->elements = NULL; ;} break; case 21: -#line 286 "OSUnserializeXML.y" +#line 314 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->elements = (yyvsp[(2) - (3)]); ;} break; case 23: -#line 292 "OSUnserializeXML.y" +#line 320 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (2)]); (yyval)->elements = NULL; ;} break; case 24: -#line 295 "OSUnserializeXML.y" +#line 323 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (3)]); (yyval)->elements = (yyvsp[(2) - (3)]); ;} break; case 26: -#line 301 "OSUnserializeXML.y" +#line 329 "OSUnserializeXML.y" { (yyval) = (yyvsp[(1) - (1)]); (yyval)->next = NULL; ;} break; case 27: -#line 304 "OSUnserializeXML.y" +#line 332 "OSUnserializeXML.y" { (yyval) = (yyvsp[(2) - (2)]); (yyval)->next = (yyvsp[(1) - (2)]); ;} @@ -1717,7 +1745,7 @@ yyreduce: /* Line 1267 of yacc.c. */ -#line 1671 "OSUnserializeXML.tab.c" +#line 1699 "OSUnserializeXML.tab.c" default: break; } YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc); @@ -1931,7 +1959,7 @@ yyreturn: } -#line 326 "OSUnserializeXML.y" +#line 354 "OSUnserializeXML.y" int diff --git a/libkern/c++/OSUnserializeXML.y b/libkern/c++/OSUnserializeXML.y index 10465a9aa..23e64137c 100644 --- a/libkern/c++/OSUnserializeXML.y +++ b/libkern/c++/OSUnserializeXML.y @@ -161,6 +161,10 @@ input: /* empty */ { yyerror("unexpected end of buffer"); object: dict { $$ = buildDictionary(STATE, $1); + if (!yyval->object) { + yyerror("buildDictionary"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -169,6 +173,10 @@ object: dict { $$ = buildDictionary(STATE, $1); } | array { $$ = buildArray(STATE, $1); + if (!yyval->object) { + yyerror("buildArray"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -177,6 +185,10 @@ object: dict { $$ = buildDictionary(STATE, $1); } | set { $$ = buildSet(STATE, $1); + if (!yyval->object) { + yyerror("buildSet"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -185,6 +197,10 @@ object: dict { $$ = buildDictionary(STATE, $1); } | string { $$ = buildString(STATE, $1); + if (!yyval->object) { + yyerror("buildString"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -193,6 +209,10 @@ object: dict { $$ = buildDictionary(STATE, $1); } | data { $$ = buildData(STATE, $1); + if (!yyval->object) { + yyerror("buildData"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -201,6 +221,10 @@ object: dict { $$ = buildDictionary(STATE, $1); } | number { $$ = buildNumber(STATE, $1); + if (!yyval->object) { + yyerror("buildNumber"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -209,6 +233,10 @@ object: dict { $$ = buildDictionary(STATE, $1); } | boolean { $$ = buildBoolean(STATE, $1); + if (!yyval->object) { + yyerror("buildBoolean"); + YYERROR; + } STATE->parsedObjectCount++; if (STATE->parsedObjectCount > MAX_OBJECTS) { yyerror("maximum object count"); @@ -405,6 +433,7 @@ getTag(parser_state_t *state, if (c == '\n') state->lineNumber++; if (c != '?') continue; c = nextChar(); + if (!c) return TAG_IGNORE; if (c == '>') { (void)nextChar(); return TAG_IGNORE; @@ -459,6 +488,7 @@ getTag(parser_state_t *state, values[*attributeCount][length++] = c; if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD; c = nextChar(); + if (!c) return TAG_BAD; } values[*attributeCount][length] = 0; @@ -1179,9 +1209,10 @@ OSObject* OSUnserializeXML(const char *buffer, OSString **errorString) { OSObject *object; - parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t)); - if ((!state) || (!buffer)) return 0; + if (!buffer) return 0; + parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t)); + if (!state) return 0; // just in case if (errorString) *errorString = NULL; @@ -1207,13 +1238,18 @@ OSUnserializeXML(const char *buffer, OSString **errorString) return object; } +#include + OSObject* OSUnserializeXML(const char *buffer, size_t bufferSize, OSString **errorString) { - if ((!buffer) || (!bufferSize)) return 0; + if (!buffer) return (0); + if (bufferSize < sizeof(kOSSerializeBinarySignature)) return (0); + + if (!strcmp(kOSSerializeBinarySignature, buffer)) return OSUnserializeBinary(buffer, bufferSize, errorString); // XML must be null terminated - if (buffer[bufferSize - 1] || strnlen(buffer, bufferSize) == bufferSize) return 0; + if (buffer[bufferSize - 1]) return 0; return OSUnserializeXML(buffer, errorString); } diff --git a/libkern/conf/files b/libkern/conf/files index 6f8e2f998..6f4f78550 100644 --- a/libkern/conf/files +++ b/libkern/conf/files @@ -77,6 +77,8 @@ libkern/crypto/corecrypto_md5.c optional crypto libkern/crypto/corecrypto_des.c optional crypto libkern/crypto/corecrypto_aes.c optional crypto libkern/crypto/corecrypto_aesxts.c optional crypto +libkern/crypto/corecrypto_rand.c optional crypto +libkern/crypto/corecrypto_rsa.c optional crypto libkern/stack_protector.c standard diff --git a/libkern/crypto/corecrypto_aes.c b/libkern/crypto/corecrypto_aes.c index b73ff2f34..3fa1ad333 100644 --- a/libkern/crypto/corecrypto_aes.c +++ b/libkern/crypto/corecrypto_aes.c @@ -126,8 +126,17 @@ aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *c return aes_error; } - ccgcm_init(gcm, ctx, key_len, key); - return aes_good; + return ccgcm_init(gcm, ctx, key_len, key); +} + +aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx) +{ + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; + if (!gcm) { + return aes_error; + } + + return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv); } aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) @@ -137,8 +146,27 @@ aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, cc return aes_error; } - ccgcm_set_iv(gcm, ctx, len, in_iv); - return aes_good; + return ccgcm_set_iv(gcm, ctx, len, in_iv); +} + +aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx) +{ + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; + if (!gcm) { + return aes_error; + } + + return ccgcm_reset(gcm, ctx); +} + +aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) +{ + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; + if (!gcm) { + return aes_error; + } + + return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv); } aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx) @@ -148,8 +176,7 @@ aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, c return aes_error; } - ccgcm_gmac(gcm, ctx, aad_bytes, aad); - return aes_good; + return ccgcm_gmac(gcm, ctx, aad_bytes, aad); } aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, @@ -160,20 +187,20 @@ aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, return aes_error; } - ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt. - return aes_good; + return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt. } aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) { + int rc; const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt; if (!gcm) { return aes_error; } - ccgcm_finalize(gcm, ctx, tag_bytes, tag); - ccgcm_reset(gcm, ctx); - return aes_good; + rc = ccgcm_finalize(gcm, ctx, tag_bytes, tag); + rc |= ccgcm_reset(gcm, ctx); + return rc; } aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx) @@ -183,19 +210,51 @@ aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *c return aes_error; } - ccgcm_init(gcm, ctx, key_len, key); - return aes_good; + return ccgcm_init(gcm, ctx, key_len, key); +} + +aes_rval aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx) +{ + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; + if (!gcm) { + return aes_error; + } + + return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv); } aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx) { + int rc; + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { return aes_error; } - ccgcm_set_iv(gcm, ctx, len, in_iv); - return aes_good; + rc = ccgcm_reset(gcm, ctx); + rc |= ccgcm_set_iv(gcm, ctx, len, in_iv); + return rc; +} + +aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx) +{ + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; + if (!gcm) { + return aes_error; + } + + return ccgcm_reset(gcm, ctx); +} + +aes_rval aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx) +{ + const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; + if (!gcm) { + return aes_error; + } + + return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv); } aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx) @@ -205,8 +264,7 @@ aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, c return aes_error; } - ccgcm_gmac(gcm, ctx, aad_bytes, aad); - return aes_good; + return ccgcm_gmac(gcm, ctx, aad_bytes, aad); } aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, @@ -217,20 +275,20 @@ aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, return aes_error; } - ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt. - return aes_good; + return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt. } aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx) { + int rc; const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt; if (!gcm) { return aes_error; } - ccgcm_finalize(gcm, ctx, tag_bytes, tag); - ccgcm_reset(gcm, ctx); - return aes_good; + rc = ccgcm_finalize(gcm, ctx, tag_bytes, tag); + rc |= ccgcm_reset(gcm, ctx); + return rc; } unsigned aes_encrypt_get_ctx_size_gcm(void) diff --git a/libkern/crypto/corecrypto_des.c b/libkern/crypto/corecrypto_des.c index 888ed87a4..e916b520b 100644 --- a/libkern/crypto/corecrypto_des.c +++ b/libkern/crypto/corecrypto_des.c @@ -64,6 +64,7 @@ void des_ecb_encrypt(des_cblock *in, des_cblock *out, des_ecb_key_schedule *ks, /* Triple DES ECB - used by ipv6 (esp_core.c) */ int des3_ecb_key_sched(des_cblock *key, des3_ecb_key_schedule *ks) { + int rc; const struct ccmode_ecb *enc = g_crypto_funcs->cctdes_ecb_encrypt; const struct ccmode_ecb *dec = g_crypto_funcs->cctdes_ecb_decrypt; @@ -71,12 +72,10 @@ int des3_ecb_key_sched(des_cblock *key, des3_ecb_key_schedule *ks) if((enc->size>sizeof(ks->enc)) || (dec->size>sizeof(ks->dec))) panic("%s: inconsistent size for 3DES-ECB context", __FUNCTION__); - enc->init(enc, ks->enc, CCDES_KEY_SIZE*3, key); - dec->init(dec, ks->dec, CCDES_KEY_SIZE*3, key); + rc = enc->init(enc, ks->enc, CCDES_KEY_SIZE*3, key); + rc |= dec->init(dec, ks->dec, CCDES_KEY_SIZE*3, key); - /* The old DES interface could return -1 or -2 for weak keys and wrong parity, - but this was disabled all the time, so we never fail here */ - return 0; + return rc; } /* Simple des - 1 block */ diff --git a/libkern/crypto/corecrypto_rand.c b/libkern/crypto/corecrypto_rand.c new file mode 100644 index 000000000..332b48d7d --- /dev/null +++ b/libkern/crypto/corecrypto_rand.c @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2016 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + +int +cc_rand_generate(void *out, size_t outlen) +{ + struct ccrng_state *rng_state = NULL; + int error = -1; + + if (g_crypto_funcs) { + rng_state = g_crypto_funcs->ccrng_fn(&error); + if (rng_state != NULL) { + error = ccrng_generate(rng_state, outlen, out); + } + } + + return error; +} diff --git a/libkern/crypto/corecrypto_rsa.c b/libkern/crypto/corecrypto_rsa.c new file mode 100644 index 000000000..8bf03214a --- /dev/null +++ b/libkern/crypto/corecrypto_rsa.c @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2016 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#include +#include +#include + + +int rsa_make_pub(rsa_pub_ctx *pub, + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod) { + if ((exp_nbytes>RSA_MAX_KEY_BITSIZE/8) + || (mod_nbytes>RSA_MAX_KEY_BITSIZE/8)) { + return -1; // Too big + } + ccrsa_ctx_n(pub->key) = ccn_nof(RSA_MAX_KEY_BITSIZE); + return g_crypto_funcs->ccrsa_make_pub_fn(pub->key, + exp_nbytes, exp, + mod_nbytes, mod); +} + +int rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + bool *valid) { + return g_crypto_funcs->ccrsa_verify_pkcs1v15_fn(pub->key,oid, + digest_len,digest, + sig_len,sig,valid); +} + + diff --git a/libkern/libkern/c++/OSData.h b/libkern/libkern/c++/OSData.h index 488087dc6..e25079f80 100644 --- a/libkern/libkern/c++/OSData.h +++ b/libkern/libkern/c++/OSData.h @@ -751,6 +751,7 @@ private: #endif virtual void setDeallocFunction(DeallocFunction func); OSMetaClassDeclareReservedUsed(OSData, 0); + bool isSerializable(void); private: OSMetaClassDeclareReservedUnused(OSData, 1); diff --git a/libkern/libkern/crypto/Makefile b/libkern/libkern/crypto/Makefile index 0274f4bf1..e701850eb 100644 --- a/libkern/libkern/crypto/Makefile +++ b/libkern/libkern/crypto/Makefile @@ -8,7 +8,7 @@ include $(MakeInc_def) DATAFILES = md5.h sha1.h -PRIVATE_DATAFILES = register_crypto.h sha2.h des.h aes.h aesxts.h +PRIVATE_DATAFILES = register_crypto.h sha2.h des.h aes.h aesxts.h rand.h rsa.h INSTALL_KF_MI_LIST = ${DATAFILES} diff --git a/libkern/libkern/crypto/aes.h b/libkern/libkern/crypto/aes.h index 827150c6d..bcb704d20 100644 --- a/libkern/libkern/crypto/aes.h +++ b/libkern/libkern/crypto/aes.h @@ -97,14 +97,20 @@ aes_rval aes_decrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv unsigned char *out_blk, aes_decrypt_ctx cx[1]); aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx); +aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx); aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx); +aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx); +aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx); aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx); aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx); aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx); unsigned aes_encrypt_get_ctx_size_gcm(void); aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx); +aes_rval aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx); aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx); +aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx); +aes_rval aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx); aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx); aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx); aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx); diff --git a/libkern/libkern/crypto/rand.h b/libkern/libkern/crypto/rand.h new file mode 100644 index 000000000..08778f4cd --- /dev/null +++ b/libkern/libkern/crypto/rand.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2016 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _RAND_H +#define _RAND_H + +#if defined(__cplusplus) +extern "C" +{ +#endif + +int cc_rand_generate(void *out, size_t outlen); + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/libkern/libkern/crypto/register_crypto.h b/libkern/libkern/crypto/register_crypto.h index 428d2faa5..6041ebb31 100644 --- a/libkern/libkern/crypto/register_crypto.h +++ b/libkern/libkern/crypto/register_crypto.h @@ -37,6 +37,8 @@ extern "C" { #include #include #include +#include +#include /* Function types */ @@ -61,6 +63,13 @@ typedef void (*cchmac_fn_t)(const struct ccdigest_info *di, unsigned long key_le const void *key, unsigned long data_len, const void *data, unsigned char *mac); +/* gcm */ +typedef int (*ccgcm_init_with_iv_fn_t)(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, + size_t key_nbytes, const void *key, + const void *iv); +typedef int (*ccgcm_inc_iv_fn_t)(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, void *iv); + + /* pbkdf2 */ typedef void (*ccpbkdf2_hmac_fn_t)(const struct ccdigest_info *di, unsigned long passwordLen, const void *password, @@ -83,6 +92,19 @@ typedef void (*ccpad_xts_encrypt_fn_t)(const struct ccmode_xts *xts, ccxts_ctx * typedef size_t (*ccpad_cts3_crypt_fn_t)(const struct ccmode_cbc *cbc, cccbc_ctx *cbc_key, cccbc_iv *iv, size_t nbytes, const void *in, void *out); +/* rng */ +typedef struct ccrng_state *(*ccrng_fn_t)(int *error); + +/* rsa */ +typedef int (*ccrsa_make_pub_fn_t)(ccrsa_pub_ctx_t pubk, + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod); + +typedef int (*ccrsa_verify_pkcs1v15_fn_t)(ccrsa_pub_ctx_t key, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + bool *valid); + typedef struct crypto_functions { /* digests common functions */ ccdigest_init_fn_t ccdigest_init_fn; @@ -112,6 +134,10 @@ typedef struct crypto_functions { const struct ccmode_xts *ccaes_xts_decrypt; const struct ccmode_gcm *ccaes_gcm_encrypt; const struct ccmode_gcm *ccaes_gcm_decrypt; + + ccgcm_init_with_iv_fn_t ccgcm_init_with_iv_fn; + ccgcm_inc_iv_fn_t ccgcm_inc_iv_fn; + /* DES, ecb and cbc */ const struct ccmode_ecb *ccdes_ecb_encrypt; const struct ccmode_ecb *ccdes_ecb_decrypt; @@ -138,7 +164,14 @@ typedef struct crypto_functions { ccpad_xts_decrypt_fn_t ccpad_xts_decrypt_fn; /* CTS3 padding+encrypt functions */ ccpad_cts3_crypt_fn_t ccpad_cts3_encrypt_fn; - ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn; + ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn; + + /* rng */ + ccrng_fn_t ccrng_fn; + + /* rsa */ + ccrsa_make_pub_fn_t ccrsa_make_pub_fn; + ccrsa_verify_pkcs1v15_fn_t ccrsa_verify_pkcs1v15_fn; } *crypto_functions_t; int register_crypto_functions(const crypto_functions_t funcs); diff --git a/libkern/libkern/crypto/rsa.h b/libkern/libkern/crypto/rsa.h new file mode 100644 index 000000000..2084dfdea --- /dev/null +++ b/libkern/libkern/crypto/rsa.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2016 Apple Computer, Inc. All rights reserved. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ + */ + +#ifndef _RSA_H +#define _RSA_H + +#if defined(__cplusplus) +extern "C" +{ +#endif + +#include +#define RSA_MAX_KEY_BITSIZE 4096 + +typedef struct{ + ccrsa_pub_ctx_decl(ccn_sizeof(RSA_MAX_KEY_BITSIZE),key); +} rsa_pub_ctx; + +int rsa_make_pub(rsa_pub_ctx *pub, + size_t exp_nbytes, const uint8_t *exp, + size_t mod_nbytes, const uint8_t *mod); + +int rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid, + size_t digest_len, const uint8_t *digest, + size_t sig_len, const uint8_t *sig, + bool *valid); + +#if defined(__cplusplus) +} +#endif + +#endif diff --git a/libsyscall/wrappers/libproc/libproc.c b/libsyscall/wrappers/libproc/libproc.c index e46dbe0c5..cc0321e2d 100644 --- a/libsyscall/wrappers/libproc/libproc.c +++ b/libsyscall/wrappers/libproc/libproc.c @@ -633,6 +633,29 @@ proc_list_uptrs(int pid, uint64_t *buf, uint32_t bufsz) return count; } +int +proc_setcpu_percentage(pid_t pid, int action, int percentage) +{ + proc_policy_cpuusage_attr_t attr; + + bzero(&attr, sizeof(proc_policy_cpuusage_attr_t)); + attr.ppattr_cpu_attr = action; + attr.ppattr_cpu_percentage = percentage; + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1) + return(0); + else + return(errno); +} + +int +proc_clear_cpulimits(pid_t pid) +{ + if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_RESTORE, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, NULL, pid, (uint64_t)0) != -1) + return(0); + else + return(errno); +} + /* Donate importance to adaptive processes from this process */ diff --git a/libsyscall/wrappers/libproc/libproc_internal.h b/libsyscall/wrappers/libproc/libproc_internal.h index d8fc8f1f9..7169b7eb4 100644 --- a/libsyscall/wrappers/libproc/libproc_internal.h +++ b/libsyscall/wrappers/libproc/libproc_internal.h @@ -31,6 +31,16 @@ __BEGIN_DECLS +/* CPU monitor action */ +#define PROC_SETCPU_ACTION_NONE 0 +#define PROC_SETCPU_ACTION_THROTTLE 1 + +int proc_setcpu_percentage(pid_t pid, int action, int percentage) __OSX_AVAILABLE_STARTING(__MAC_10_12_2, __IPHONE_5_0); +int proc_clear_cpulimits(pid_t pid) __OSX_AVAILABLE_STARTING(__MAC_10_12_2, __IPHONE_5_0); + +/* CPU limits, applies to current thread only. 0% unsets limit */ +int proc_setthread_cpupercent(uint8_t percentage, uint32_t ms_refill) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_5_0); + /* resume the process suspend due to low VM resource */ int proc_clear_vmpressure(pid_t pid); diff --git a/libsyscall/wrappers/terminate_with_reason.c b/libsyscall/wrappers/terminate_with_reason.c index 05fdb7848..3bb8a6683 100644 --- a/libsyscall/wrappers/terminate_with_reason.c +++ b/libsyscall/wrappers/terminate_with_reason.c @@ -71,17 +71,11 @@ static void abort_with_payload_wrapper_internal(uint32_t reason_namespace, uint6 __abort_with_payload(reason_namespace, reason_code, payload, payload_size, reason_string, reason_flags); - /* If sending a SIGABRT failed, we try to fall back to SIGKILL */ + /* If sending a SIGABRT failed, we fall back to SIGKILL */ terminate_with_payload(getpid(), reason_namespace, reason_code, payload, payload_size, reason_string, reason_flags); - /* Last resort, let's use SIGTRAP (SIGILL on i386) */ - sigemptyset(&unmask_signal); - sigaddset(&unmask_signal, SIGTRAP); - sigaddset(&unmask_signal, SIGILL); - sigprocmask(SIG_UNBLOCK, &unmask_signal, NULL); - - __builtin_trap(); + __builtin_unreachable(); } void diff --git a/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c b/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c index 2a9209f39..10c6210a0 100644 --- a/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c +++ b/osfmk/corecrypto/ccsha1/src/ccsha1_eay.c @@ -1,13 +1,3 @@ -/* - * ccsha1_eay.c - * corecrypto - * - * Created on 12/06/2010 - * - * Copyright (c) 2010,2011,2012,2015 Apple Inc. All rights reserved. - * - */ - /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) * All rights reserved. * diff --git a/osfmk/i386/AT386/model_dep.c b/osfmk/i386/AT386/model_dep.c index 63daa8019..27dd4ccc6 100644 --- a/osfmk/i386/AT386/model_dep.c +++ b/osfmk/i386/AT386/model_dep.c @@ -1269,7 +1269,7 @@ out: if (PC != 0) kmod_panic_dump(&PC, 1); - panic_display_system_configuration(); + panic_display_system_configuration(FALSE); doprnt_hide_pointers = old_doprnt_hide_pointers; @@ -1467,10 +1467,8 @@ void print_launchd_info(void) print_thread_num_that_crashed(task); print_threads_registers(thread); print_tasks_user_threads(task); - kdb_printf("Mac OS version: %s\n", (osversion[0] != 0) ? osversion : "Not yet set"); - kdb_printf("Kernel version: %s\n", version); - panic_display_kernel_uuid(); - panic_display_model_name(); + + panic_display_system_configuration(TRUE); /* Release print backtrace lock, to permit other callers in the * event of panics on multiple processors. diff --git a/osfmk/i386/pmCPU.c b/osfmk/i386/pmCPU.c index a08f86a01..5791823d7 100644 --- a/osfmk/i386/pmCPU.c +++ b/osfmk/i386/pmCPU.c @@ -787,12 +787,13 @@ thread_tell_urgency(int urgency, void machine_thread_going_on_core(__unused thread_t new_thread, __unused int urgency, - __unused uint64_t sched_latency) + __unused uint64_t sched_latency, + __unused uint64_t dispatch_time) { } void -machine_thread_going_off_core(__unused thread_t old_thread, __unused boolean_t thread_terminating) +machine_thread_going_off_core(__unused thread_t old_thread, __unused boolean_t thread_terminating, __unused uint64_t last_dispatch) { } diff --git a/osfmk/i386/pmap.h b/osfmk/i386/pmap.h index 6bf67c0d9..ccad03ea7 100644 --- a/osfmk/i386/pmap.h +++ b/osfmk/i386/pmap.h @@ -751,9 +751,7 @@ extern void pmap_pagetable_corruption_msg_log(int (*)(const char * fmt, ...)__pr #if defined(__x86_64__) #define PMAP_DEACTIVATE_MAP(map, thread, ccpu) \ -/* pmap_assert2((pmap_pcid_ncpus ? (pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu) == (get_cr3_raw() & 0xFFF)) : TRUE),"PCIDs: 0x%x, active PCID: 0x%x, CR3: 0x%lx, pmap_cr3: 0x%llx, kernel_cr3: 0x%llx, kernel pmap cr3: 0x%llx, CPU active PCID: 0x%x, CPU kernel PCID: 0x%x, specflags: 0x%x, pagezero: 0x%x", pmap_pcid_ncpus, pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu), get_cr3_raw(), map->pmap->pm_cr3, cpu_datap(ccpu)->cpu_kernel_cr3, kernel_pmap->pm_cr3, cpu_datap(ccpu)->cpu_active_pcid, cpu_datap(ccpu)->cpu_kernel_pcid, thread->machine.specFlags, map->pmap->pagezero_accessible); -*/ #else #define PMAP_DEACTIVATE_MAP(map, thread) #endif diff --git a/osfmk/i386/pmap_internal.h b/osfmk/i386/pmap_internal.h index 3c8909968..b2ea44959 100644 --- a/osfmk/i386/pmap_internal.h +++ b/osfmk/i386/pmap_internal.h @@ -385,9 +385,9 @@ static inline void pmap_pv_throttle(__unused pmap_t p) { (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_INTERNAL)) #define IS_REUSABLE_PAGE(x) \ (IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_REUSABLE)) -#define IS_ALTACCT_PAGE(x) \ +#define IS_ALTACCT_PAGE(x,pve) \ (IS_MANAGED_PAGE((x)) && \ - (PVE_IS_ALTACCT_PAGE(&pv_head_table[(x)]))) + (PVE_IS_ALTACCT_PAGE((pve)))) /* * Physical page attributes. Copy bits from PTE definition. @@ -661,7 +661,7 @@ pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t * uint32_t bitdex; pmap_t pvpmap = pv_h->pmap; vm_map_offset_t pvva = PVE_VA(pv_h); - vm_map_offset_t pve_flags = PVE_FLAGS(pv_h); + vm_map_offset_t pve_flags; boolean_t ppcd = FALSE; boolean_t is_ept; @@ -684,12 +684,9 @@ pmap_classify_pagetable_corruption(pmap_t pmap, vm_map_offset_t vaddr, ppnum_t * do { if ((popcnt1((uintptr_t)pv_e->pmap ^ (uintptr_t)pmap) && PVE_VA(pv_e) == vaddr) || (pv_e->pmap == pmap && popcnt1(PVE_VA(pv_e) ^ vaddr))) { + pve_flags = PVE_FLAGS(pv_e); pv_e->pmap = pmap; - if (pv_e == pv_h) { - pv_h->va_and_flags = vaddr | pve_flags; - } else { - pv_e->va_and_flags = vaddr; - } + pv_h->va_and_flags = vaddr | pve_flags; suppress_reason = PV_BITFLIP; action = PMAP_ACTION_RETRY; goto pmap_cpc_exit; @@ -763,8 +760,9 @@ pmap_cpc_exit: static inline __attribute__((always_inline)) pv_hashed_entry_t pmap_pv_remove(pmap_t pmap, vm_map_offset_t vaddr, - ppnum_t *ppnp, - pt_entry_t *pte) + ppnum_t *ppnp, + pt_entry_t *pte, + boolean_t *was_altacct) { pv_hashed_entry_t pvh_e; pv_rooted_entry_t pv_h; @@ -773,6 +771,7 @@ pmap_pv_remove(pmap_t pmap, uint32_t pv_cnt; ppnum_t ppn; + *was_altacct = FALSE; pmap_pv_remove_retry: ppn = *ppnp; pvh_e = PV_HASHED_ENTRY_NULL; @@ -794,6 +793,7 @@ pmap_pv_remove_retry: } if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) { + *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pv_h); /* * Header is the pv_rooted_entry. * We can't free that. If there is a queued @@ -803,8 +803,6 @@ pmap_pv_remove_retry: */ pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink); if (pv_h != (pv_rooted_entry_t) pvh_e) { - vm_map_offset_t pve_flags; - /* * Entry queued to root, remove this from hash * and install as new root. @@ -822,8 +820,7 @@ pmap_pv_remove_retry: pmap_pvh_unlink(pvh_e); UNLOCK_PV_HASH(pvhash_idx); pv_h->pmap = pvh_e->pmap; - pve_flags = PVE_FLAGS(pv_h); - pv_h->va_and_flags = PVE_VA(pvh_e) | pve_flags; + pv_h->va_and_flags = pvh_e->va_and_flags; /* dispose of pvh_e */ } else { /* none queued after rooted */ @@ -877,6 +874,8 @@ pmap_pv_remove_retry: } } + *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pvh_e); + pmap_pv_hashlist_cnts += pv_cnt; if (pmap_pv_hashlist_max < pv_cnt) pmap_pv_hashlist_max = pv_cnt; @@ -888,6 +887,55 @@ pmap_pv_remove_exit: return pvh_e; } +static inline __attribute__((always_inline)) boolean_t +pmap_pv_is_altacct( + pmap_t pmap, + vm_map_offset_t vaddr, + ppnum_t ppn) +{ + pv_hashed_entry_t pvh_e; + pv_rooted_entry_t pv_h; + int pvhash_idx; + boolean_t is_altacct; + + pvh_e = PV_HASHED_ENTRY_NULL; + pv_h = pai_to_pvh(ppn_to_pai(ppn)); + + if (__improbable(pv_h->pmap == PMAP_NULL)) { + return FALSE; + } + + if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) { + /* + * Header is the pv_rooted_entry. + */ + return IS_ALTACCT_PAGE(ppn, pv_h); + } + + CHK_NPVHASH(); + pvhash_idx = pvhashidx(pmap, vaddr); + LOCK_PV_HASH(pvhash_idx); + pvh_e = *(pvhash(pvhash_idx)); + if (PV_HASHED_ENTRY_NULL == pvh_e) { + panic("Possible memory corruption: pmap_pv_is_altacct(%p,0x%llx,0x%x): empty hash", + pmap, vaddr, ppn); + } + while (PV_HASHED_ENTRY_NULL != pvh_e) { + if (pvh_e->pmap == pmap && + PVE_VA(pvh_e) == vaddr && + pvh_e->ppn == ppn) + break; + pvh_e = pvh_e->nexth; + } + if (PV_HASHED_ENTRY_NULL == pvh_e) { + is_altacct = FALSE; + } else { + is_altacct = IS_ALTACCT_PAGE(ppn, pvh_e); + } + UNLOCK_PV_HASH(pvhash_idx); + + return is_altacct; +} extern int pt_fake_zone_index; static inline void diff --git a/osfmk/i386/pmap_x86_common.c b/osfmk/i386/pmap_x86_common.c index 1ee68dafb..1bfecd7cb 100644 --- a/osfmk/i386/pmap_x86_common.c +++ b/osfmk/i386/pmap_x86_common.c @@ -497,6 +497,7 @@ pmap_enter_options( pt_entry_t old_pte; kern_return_t kr_expand; boolean_t is_ept; + boolean_t is_altacct; pmap_intr_assert(); @@ -751,6 +752,7 @@ dont_update_pte: */ if (old_pa != (pmap_paddr_t) 0) { + boolean_t was_altacct; /* * Don't do anything to pages outside valid memory here. @@ -768,12 +770,23 @@ dont_update_pte: /* completely invalidate the PTE */ pmap_store_pte(pte, 0); + if (IS_MANAGED_PAGE(pai)) { + /* + * Remove the mapping from the pvlist for + * this physical page. + * We'll end up with either a rooted pv or a + * hashed pv + */ + pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte, &was_altacct); + } + if (IS_MANAGED_PAGE(pai)) { pmap_assert(old_pa_locked == TRUE); pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE); assert(pmap->stats.resident_count >= 1); OSAddAtomic(-1, &pmap->stats.resident_count); if (pmap != kernel_pmap) { + /* update pmap stats */ if (IS_REUSABLE_PAGE(pai)) { PMAP_STATS_ASSERTF( (pmap->stats.reusable > 0, @@ -786,12 +799,6 @@ dont_update_pte: "internal %d", pmap->stats.internal)); OSAddAtomic(-1, &pmap->stats.internal); - pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); - if (IS_ALTACCT_PAGE(pai)) { - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); - } else { - pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); - } } else { PMAP_STATS_ASSERTF( (pmap->stats.external > 0, @@ -799,6 +806,24 @@ dont_update_pte: pmap->stats.external)); OSAddAtomic(-1, &pmap->stats.external); } + + /* update ledgers */ + if (was_altacct) { + assert(IS_INTERNAL_PAGE(pai)); + pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); + } else if (IS_REUSABLE_PAGE(pai)) { + assert(!was_altacct); + assert(IS_INTERNAL_PAGE(pai)); + /* was already not in phys_footprint */ + } else if (IS_INTERNAL_PAGE(pai)) { + assert(!was_altacct); + assert(!IS_REUSABLE_PAGE(pai)); + pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); + } else { + /* not an internal page */ + } } if (iswired(*pte)) { assert(pmap->stats.wired_count >= 1); @@ -813,14 +838,6 @@ dont_update_pte: pmap_phys_attributes[pai] |= ept_refmod_to_physmap(oattr); } - /* - * Remove the mapping from the pvlist for - * this physical page. - * We'll end up with either a rooted pv or a - * hashed pv - */ - pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte); - } else { /* @@ -881,10 +898,11 @@ dont_update_pte: } if ((options & PMAP_OPTIONS_ALT_ACCT) && IS_INTERNAL_PAGE(pai)) { - assert(!IS_REUSABLE_PAGE(pai)); pv_h->va_and_flags |= PVE_IS_ALTACCT; + is_altacct = TRUE; } else { pv_h->va_and_flags &= ~PVE_IS_ALTACCT; + is_altacct = FALSE; } } else { /* @@ -923,6 +941,14 @@ dont_update_pte: pvh_e->va_and_flags = vaddr; pvh_e->pmap = pmap; pvh_e->ppn = pn; + if ((options & PMAP_OPTIONS_ALT_ACCT) && + IS_INTERNAL_PAGE(pai)) { + pvh_e->va_and_flags |= PVE_IS_ALTACCT; + is_altacct = TRUE; + } else { + pvh_e->va_and_flags &= ~PVE_IS_ALTACCT; + is_altacct = FALSE; + } pv_hash_add(pvh_e, pv_h); /* @@ -941,22 +967,38 @@ dont_update_pte: pmap->stats.resident_max = pmap->stats.resident_count; } if (pmap != kernel_pmap) { + /* update pmap stats */ if (IS_REUSABLE_PAGE(pai)) { OSAddAtomic(+1, &pmap->stats.reusable); PMAP_STATS_PEAK(pmap->stats.reusable); } else if (IS_INTERNAL_PAGE(pai)) { OSAddAtomic(+1, &pmap->stats.internal); PMAP_STATS_PEAK(pmap->stats.internal); - pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE); - if (IS_ALTACCT_PAGE(pai)) { - pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); - } else { - pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); - } } else { OSAddAtomic(+1, &pmap->stats.external); PMAP_STATS_PEAK(pmap->stats.external); } + + /* update ledgers */ + if (is_altacct) { + /* internal but also alternate accounting */ + assert(IS_INTERNAL_PAGE(pai)); + pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); + /* alternate accounting, so not in footprint */ + } else if (IS_REUSABLE_PAGE(pai)) { + assert(!is_altacct); + assert(IS_INTERNAL_PAGE(pai)); + /* internal but reusable: not in footprint */ + } else if (IS_INTERNAL_PAGE(pai)) { + assert(!is_altacct); + assert(!IS_REUSABLE_PAGE(pai)); + /* internal: add to footprint */ + pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); + } else { + /* not internal: not in footprint */ + } } } else if (last_managed_page == 0) { /* Account for early mappings created before "managed pages" @@ -1114,24 +1156,28 @@ pmap_remove_range_options( pv_hashed_entry_t pvh_e; int pvh_cnt = 0; int num_removed, num_unwired, num_found, num_invalid; - int num_external, num_reusable; - int num_internal, num_alt_internal; - uint64_t num_compressed, num_alt_compressed; + int stats_external, stats_internal, stats_reusable; + uint64_t stats_compressed; + int ledgers_internal, ledgers_alt_internal; + uint64_t ledgers_compressed, ledgers_alt_compressed; ppnum_t pai; pmap_paddr_t pa; vm_map_offset_t vaddr; boolean_t is_ept = is_ept_pmap(pmap); + boolean_t was_altacct; num_removed = 0; num_unwired = 0; num_found = 0; num_invalid = 0; - num_external = 0; - num_internal = 0; - num_reusable = 0; - num_compressed = 0; - num_alt_internal = 0; - num_alt_compressed = 0; + stats_external = 0; + stats_internal = 0; + stats_reusable = 0; + stats_compressed = 0; + ledgers_internal = 0; + ledgers_compressed = 0; + ledgers_alt_internal = 0; + ledgers_alt_compressed = 0; /* invalidate the PTEs first to "freeze" them */ for (cpte = spte, vaddr = start_vaddr; cpte < epte; @@ -1140,14 +1186,15 @@ pmap_remove_range_options( pa = pte_to_pa(p); if (pa == 0) { - if (pmap != kernel_pmap && - (options & PMAP_OPTIONS_REMOVE) && + if ((options & PMAP_OPTIONS_REMOVE) && (PTE_IS_COMPRESSED(p))) { + assert(pmap != kernel_pmap); /* one less "compressed"... */ - num_compressed++; + stats_compressed++; + ledgers_compressed++; if (p & PTE_COMPRESSED_ALT) { /* ... but it used to be "ALTACCT" */ - num_alt_compressed++; + ledgers_alt_compressed++; } /* clear marker(s) */ /* XXX probably does not need to be atomic! */ @@ -1199,14 +1246,15 @@ pmap_remove_range_options( * "compressed" marker after our first "freeze" * loop above, so check again. */ - if (pmap != kernel_pmap && - (options & PMAP_OPTIONS_REMOVE) && + if ((options & PMAP_OPTIONS_REMOVE) && (PTE_IS_COMPRESSED(*cpte))) { + assert(pmap != kernel_pmap); /* one less "compressed"... */ - num_compressed++; + stats_compressed++; + ledgers_compressed++; if (*cpte & PTE_COMPRESSED_ALT) { /* ... but it used to be "ALTACCT" */ - num_alt_compressed++; + ledgers_alt_compressed++; } pmap_store_pte(cpte, 0); } @@ -1222,17 +1270,38 @@ pmap_remove_range_options( UNLOCK_PVH(pai); goto check_pte_for_compressed_marker; } + + /* + * Remove the mapping from the pvlist for this physical page. + */ + pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte, &was_altacct); + num_removed++; + /* update pmap stats */ if (IS_REUSABLE_PAGE(pai)) { - assert(!IS_ALTACCT_PAGE(pai)); - num_reusable++; + stats_reusable++; } else if (IS_INTERNAL_PAGE(pai)) { - num_internal++; - if (IS_ALTACCT_PAGE(pai)) { - num_alt_internal++; - } + stats_internal++; + } else { + stats_external++; + } + /* update ledgers */ + if (was_altacct) { + /* internal and alternate accounting */ + assert(IS_INTERNAL_PAGE(pai)); + ledgers_internal++; + ledgers_alt_internal++; + } else if (IS_REUSABLE_PAGE(pai)) { + /* internal but reusable */ + assert(!was_altacct); + assert(IS_INTERNAL_PAGE(pai)); + } else if (IS_INTERNAL_PAGE(pai)) { + /* internal */ + assert(!was_altacct); + assert(!IS_REUSABLE_PAGE(pai)); + ledgers_internal++; } else { - num_external++; + /* not internal */ } /* @@ -1248,11 +1317,6 @@ pmap_remove_range_options( ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED); } - /* - * Remove the mapping from the pvlist for this physical page. - */ - pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte); - /* completely invalidate the PTE */ pmap_store_pte(cpte, 0); @@ -1287,52 +1351,57 @@ update_counts: OSAddAtomic(-num_removed, &pmap->stats.resident_count); if (pmap != kernel_pmap) { - PMAP_STATS_ASSERTF((pmap->stats.external >= num_external, - "pmap=%p num_external=%d stats.external=%d", - pmap, num_external, pmap->stats.external)); - PMAP_STATS_ASSERTF((pmap->stats.internal >= num_internal, - "pmap=%p num_internal=%d stats.internal=%d", - pmap, num_internal, pmap->stats.internal)); - PMAP_STATS_ASSERTF((pmap->stats.reusable >= num_reusable, - "pmap=%p num_reusable=%d stats.reusable=%d", - pmap, num_reusable, pmap->stats.reusable)); - PMAP_STATS_ASSERTF((pmap->stats.compressed >= num_compressed, - "pmap=%p num_compressed=%lld, stats.compressed=%lld", - pmap, num_compressed, pmap->stats.compressed)); - - if (num_external) { - OSAddAtomic(-num_external, &pmap->stats.external); + PMAP_STATS_ASSERTF((pmap->stats.external >= stats_external, + "pmap=%p stats_external=%d stats.external=%d", + pmap, stats_external, pmap->stats.external)); + PMAP_STATS_ASSERTF((pmap->stats.internal >= stats_internal, + "pmap=%p stats_internal=%d stats.internal=%d", + pmap, stats_internal, pmap->stats.internal)); + PMAP_STATS_ASSERTF((pmap->stats.reusable >= stats_reusable, + "pmap=%p stats_reusable=%d stats.reusable=%d", + pmap, stats_reusable, pmap->stats.reusable)); + PMAP_STATS_ASSERTF((pmap->stats.compressed >= stats_compressed, + "pmap=%p stats_compressed=%lld, stats.compressed=%lld", + pmap, stats_compressed, pmap->stats.compressed)); + + /* update pmap stats */ + if (stats_external) { + OSAddAtomic(-stats_external, &pmap->stats.external); } - if (num_internal) { - OSAddAtomic(-num_internal, &pmap->stats.internal); + if (stats_internal) { + OSAddAtomic(-stats_internal, &pmap->stats.internal); + } + if (stats_reusable) + OSAddAtomic(-stats_reusable, &pmap->stats.reusable); + if (stats_compressed) + OSAddAtomic64(-stats_compressed, &pmap->stats.compressed); + /* update ledgers */ + if (ledgers_internal) { pmap_ledger_debit(pmap, task_ledgers.internal, - machine_ptob(num_internal)); + machine_ptob(ledgers_internal)); } - if (num_alt_internal) { + if (ledgers_compressed) { pmap_ledger_debit(pmap, - task_ledgers.alternate_accounting, - machine_ptob(num_alt_internal)); + task_ledgers.internal_compressed, + machine_ptob(ledgers_compressed)); } - if (num_alt_compressed) { + if (ledgers_alt_internal) { pmap_ledger_debit(pmap, - task_ledgers.alternate_accounting_compressed, - machine_ptob(num_alt_compressed)); + task_ledgers.alternate_accounting, + machine_ptob(ledgers_alt_internal)); } - if (num_reusable) - OSAddAtomic(-num_reusable, &pmap->stats.reusable); - if (num_compressed) { - OSAddAtomic64(-num_compressed, &pmap->stats.compressed); + if (ledgers_alt_compressed) { pmap_ledger_debit(pmap, - task_ledgers.internal_compressed, - machine_ptob(num_compressed)); + task_ledgers.alternate_accounting_compressed, + machine_ptob(ledgers_alt_compressed)); } pmap_ledger_debit(pmap, task_ledgers.phys_footprint, - machine_ptob((num_internal - - num_alt_internal) + - (num_compressed - - num_alt_compressed))); + machine_ptob((ledgers_internal - + ledgers_alt_internal) + + (ledgers_compressed - + ledgers_alt_compressed))); } #if TESTING @@ -1598,7 +1667,7 @@ pmap_page_protect_options( assert(!PTE_IS_COMPRESSED(*pte)); /* mark this PTE as having been "compressed" */ new_pte_value = PTE_COMPRESSED; - if (IS_ALTACCT_PAGE(pai)) { + if (IS_ALTACCT_PAGE(pai, pv_e)) { new_pte_value |= PTE_COMPRESSED_ALT; } } else { @@ -1640,9 +1709,11 @@ pmap_page_protect_options( options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED; options |= PMAP_OPTIONS_COMPRESSOR; assert(new_pte_value == 0); - new_pte_value = PTE_COMPRESSED; - if (IS_ALTACCT_PAGE(pai)) { - new_pte_value |= PTE_COMPRESSED_ALT; + if (pmap != kernel_pmap) { + new_pte_value = PTE_COMPRESSED; + if (IS_ALTACCT_PAGE(pai, pv_e)) { + new_pte_value |= PTE_COMPRESSED_ALT; + } } } pmap_store_pte(pte, new_pte_value); @@ -1663,6 +1734,7 @@ pmap_page_protect_options( assert(IS_INTERNAL_PAGE(pai)); } if (pmap != kernel_pmap) { + /* update pmap stats */ if (IS_REUSABLE_PAGE(pai)) { assert(pmap->stats.reusable > 0); OSAddAtomic(-1, &pmap->stats.reusable); @@ -1680,14 +1752,27 @@ pmap_page_protect_options( PMAP_STATS_PEAK(pmap->stats.compressed); pmap->stats.compressed_lifetime++; } - if (IS_REUSABLE_PAGE(pai)) { - assert(!IS_ALTACCT_PAGE(pai)); + + /* update ledgers */ + if (IS_ALTACCT_PAGE(pai, pv_e)) { + assert(IS_INTERNAL_PAGE(pai)); + pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); + pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); + if (options & PMAP_OPTIONS_COMPRESSOR) { + pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE); + pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE); + } + } else if (IS_REUSABLE_PAGE(pai)) { + assert(!IS_ALTACCT_PAGE(pai, pv_e)); + assert(IS_INTERNAL_PAGE(pai)); if (options & PMAP_OPTIONS_COMPRESSOR) { pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE); /* was not in footprint, but is now */ pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); } } else if (IS_INTERNAL_PAGE(pai)) { + assert(!IS_ALTACCT_PAGE(pai, pv_e)); + assert(!IS_REUSABLE_PAGE(pai)); pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE); /* * Update all stats related to physical @@ -1702,19 +1787,6 @@ pmap_page_protect_options( * it mustn't affect total task * footprint. */ - if (IS_ALTACCT_PAGE(pai)) { - /* - * We've already debited - * internal, above. - * Debit - * alternate_accounting - * here, which means the - * net change on - * phys_footprint is 0. - */ - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); - pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE); - } pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE); } else { /* @@ -1723,20 +1795,7 @@ pmap_page_protect_options( * so adjust stats to keep * phys_footprint up to date. */ - if (IS_ALTACCT_PAGE(pai)) { - /* - * We've already debited - * internal, above. - * Debit - * alternate_accounting - * here, which means - * the net change on - * phys_footprint is 0. - */ - pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE); - } else { - pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); - } + pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE); } } } @@ -1791,12 +1850,9 @@ pmap_page_protect_options( pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink); if (pvh_e != (pv_hashed_entry_t) pv_h) { - vm_map_offset_t pve_flags; - pv_hash_remove(pvh_e); pv_h->pmap = pvh_e->pmap; - pve_flags = pv_h->va_and_flags & PAGE_MASK; - pv_h->va_and_flags = PVE_VA(pvh_e) | pve_flags; + pv_h->va_and_flags = pvh_e->va_and_flags; pvh_e->qlink.next = (queue_entry_t) pvh_eh; pvh_eh = pvh_e; @@ -1832,7 +1888,7 @@ phys_attribute_clear( int pai; pmap_t pmap; char attributes = 0; - boolean_t is_internal, is_reusable, is_ept; + boolean_t is_internal, is_reusable, is_altacct, is_ept; int ept_bits_to_clear; boolean_t ept_keep_global_mod = FALSE; @@ -1892,6 +1948,7 @@ phys_attribute_clear( pmap = pv_e->pmap; is_ept = is_ept_pmap(pmap); + is_altacct = IS_ALTACCT_PAGE(pai, pv_e); va = PVE_VA(pv_e); pte_bits = 0; @@ -1957,12 +2014,12 @@ phys_attribute_clear( OSAddAtomic(+1, &pmap->stats.internal); PMAP_STATS_PEAK(pmap->stats.internal); assert(pmap->stats.internal > 0); - pmap_ledger_credit(pmap, - task_ledgers.internal, - PAGE_SIZE); - if (IS_ALTACCT_PAGE(pai)) { - /* no impact on footprint */ + if (is_altacct) { + /* no impact on ledgers */ } else { + pmap_ledger_credit(pmap, + task_ledgers.internal, + PAGE_SIZE); pmap_ledger_credit( pmap, task_ledgers.phys_footprint, @@ -1985,12 +2042,12 @@ phys_attribute_clear( /* one less "internal" */ assert(pmap->stats.internal > 0); OSAddAtomic(-1, &pmap->stats.internal); - pmap_ledger_debit(pmap, - task_ledgers.internal, - PAGE_SIZE); - if (IS_ALTACCT_PAGE(pai)) { + if (is_altacct) { /* no impact on footprint */ } else { + pmap_ledger_debit(pmap, + task_ledgers.internal, + PAGE_SIZE); pmap_ledger_debit( pmap, task_ledgers.phys_footprint, @@ -2352,13 +2409,14 @@ pmap_query_page_info( disp |= PMAP_QUERY_PAGE_PRESENT; pai = pa_index(pa); if (!IS_MANAGED_PAGE(pai)) { + } else if (pmap_pv_is_altacct(pmap, va, pai)) { + assert(IS_INTERNAL_PAGE(pai)); + disp |= PMAP_QUERY_PAGE_INTERNAL; + disp |= PMAP_QUERY_PAGE_ALTACCT; } else if (IS_REUSABLE_PAGE(pai)) { disp |= PMAP_QUERY_PAGE_REUSABLE; } else if (IS_INTERNAL_PAGE(pai)) { disp |= PMAP_QUERY_PAGE_INTERNAL; - if (IS_ALTACCT_PAGE(pai)) { - disp |= PMAP_QUERY_PAGE_ALTACCT; - } } } diff --git a/osfmk/i386/trap.c b/osfmk/i386/trap.c index ace505bfa..5cdae79ad 100644 --- a/osfmk/i386/trap.c +++ b/osfmk/i386/trap.c @@ -116,7 +116,7 @@ extern void kprint_state(x86_saved_state64_t *saved_state); * Forward declarations */ static void user_page_fault_continue(kern_return_t kret); -static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl); +static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result); static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip); volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */ @@ -504,6 +504,7 @@ kernel_trap( int type; vm_map_t map = 0; /* protected by T_PAGE_FAULT */ kern_return_t result = KERN_FAILURE; + kern_return_t fault_result = KERN_SUCCESS; thread_t thread; ast_t *myast; boolean_t intr; @@ -719,7 +720,7 @@ kernel_trap( if (code & T_PF_EXECUTE) prot |= VM_PROT_EXECUTE; - result = vm_fault(map, + fault_result = result = vm_fault(map, vaddr, prot, FALSE, @@ -791,7 +792,7 @@ debugger_entry: #endif } pal_cli(); - panic_trap(saved_state, trap_pl); + panic_trap(saved_state, trap_pl, fault_result); /* * NO RETURN */ @@ -805,7 +806,7 @@ set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip) } static void -panic_trap(x86_saved_state64_t *regs, uint32_t pl) +panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result) { const char *trapname = "Unknown"; pal_cr_t cr0, cr2, cr3, cr4; @@ -851,7 +852,7 @@ panic_trap(x86_saved_state64_t *regs, uint32_t pl) "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" - "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d\n", + "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n", regs->isf.rip, regs->isf.trapno, trapname, cr0, cr2, cr3, cr4, regs->rax, regs->rbx, regs->rcx, regs->rdx, @@ -863,7 +864,9 @@ panic_trap(x86_saved_state64_t *regs, uint32_t pl) virtualized ? " VMM" : "", potential_kernel_NX_fault ? " Kernel NX fault" : "", potential_smep_fault ? " SMEP/User NX fault" : "", - potential_smap_fault ? " SMAP fault" : "", pl); + potential_smap_fault ? " SMAP fault" : "", + pl, + fault_result); /* * This next statement is not executed, * but it's needed to stop the compiler using tail call optimization diff --git a/osfmk/ipc/ipc_kmsg.c b/osfmk/ipc/ipc_kmsg.c index 92363485c..cd5b37f5e 100644 --- a/osfmk/ipc/ipc_kmsg.c +++ b/osfmk/ipc/ipc_kmsg.c @@ -117,6 +117,8 @@ #include #endif +#include + #include #include @@ -2406,18 +2408,37 @@ ipc_kmsg_copyin_header( } } - /* the entry(s) might need to be deallocated */ + /* + * The entries might need to be deallocated. + * + * Each entry should be deallocated only once, + * even if it was specified in more than one slot in the header. + * Note that dest can be the same entry as reply or voucher, + * but reply and voucher must be distinct entries. + */ assert(IE_NULL != dest_entry); + if (IE_NULL != reply_entry) + assert(reply_entry != voucher_entry); + if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) { ipc_entry_dealloc(space, dest_name, dest_entry); + + if (dest_entry == reply_entry) { + reply_entry = IE_NULL; + } + + if (dest_entry == voucher_entry) { + voucher_entry = IE_NULL; + } + dest_entry = IE_NULL; } - if (dest_entry != reply_entry && IE_NULL != reply_entry && + if (IE_NULL != reply_entry && IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE) { ipc_entry_dealloc(space, reply_name, reply_entry); reply_entry = IE_NULL; } - if (dest_entry != voucher_entry && IE_NULL != voucher_entry && + if (IE_NULL != voucher_entry && IE_BITS_TYPE(voucher_entry->ie_bits) == MACH_PORT_TYPE_NONE) { ipc_entry_dealloc(space, voucher_name, voucher_entry); voucher_entry = IE_NULL; @@ -2776,14 +2797,24 @@ ipc_kmsg_copyin_ool_ports_descriptor( result_disp = ipc_object_copyin_type(user_disp); dsc->disposition = result_disp; - if (count > (INT_MAX / sizeof(mach_port_t))) { - *mr = MACH_SEND_TOO_LARGE; + /* We always do a 'physical copy', but you have to specify something valid */ + if (copy_option != MACH_MSG_PHYSICAL_COPY && + copy_option != MACH_MSG_VIRTUAL_COPY) { + *mr = MACH_SEND_INVALID_TYPE; return NULL; } /* calculate length of data in bytes, rounding up */ - ports_length = count * sizeof(mach_port_t); - names_length = count * sizeof(mach_port_name_t); + + if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) { + *mr = MACH_SEND_TOO_LARGE; + return NULL; + } + + if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) { + *mr = MACH_SEND_TOO_LARGE; + return NULL; + } if (ports_length == 0) { return user_dsc; @@ -2895,6 +2926,8 @@ ipc_kmsg_copyin_body( vm_size_t descriptor_size = 0; + mach_msg_type_number_t total_ool_port_count = 0; + /* * Determine if the target is a kernel port. */ @@ -2914,6 +2947,7 @@ ipc_kmsg_copyin_body( daddr = NULL; for (i = 0; i < dsc_count; i++) { mach_msg_size_t size; + mach_msg_type_number_t ool_port_count = 0; daddr = naddr; @@ -2938,9 +2972,8 @@ ipc_kmsg_copyin_body( if (naddr > (mach_msg_descriptor_t *) ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size)) { - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); - mr = MACH_SEND_MSG_TOO_SMALL; - goto out; + mr = MACH_SEND_MSG_TOO_SMALL; + goto clean_message; } switch (daddr->type.type) { @@ -2955,11 +2988,10 @@ ipc_kmsg_copyin_body( /* * Invalid copy option */ - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); mr = MACH_SEND_INVALID_TYPE; - goto out; + goto clean_message; } - + if ((size >= MSG_OOL_SIZE_SMALL) && (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) && !(daddr->out_of_line.deallocate)) { @@ -2969,26 +3001,52 @@ ipc_kmsg_copyin_body( * memory requirements */ if (space_needed + round_page(size) <= space_needed) { - /* Overflow dectected */ - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); - mr = MACH_MSG_VM_KERNEL; - goto out; - } - + /* Overflow dectected */ + mr = MACH_MSG_VM_KERNEL; + goto clean_message; + } + space_needed += round_page(size); if (space_needed > ipc_kmsg_max_vm_space) { - - /* - * Per message kernel memory limit exceeded - */ - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); + /* Per message kernel memory limit exceeded */ mr = MACH_MSG_VM_KERNEL; - goto out; + goto clean_message; } } + break; + case MACH_MSG_PORT_DESCRIPTOR: + if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) { + /* Overflow detected */ + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + break; + case MACH_MSG_OOL_PORTS_DESCRIPTOR: + ool_port_count = (is_task_64bit) ? + ((mach_msg_ool_ports_descriptor64_t *)daddr)->count : + daddr->ool_ports.count; + + if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) { + /* Overflow detected */ + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + + if (ool_port_count > (ipc_kmsg_max_vm_space/sizeof(mach_port_t))) { + /* Per message kernel memory limit exceeded */ + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + break; } } + /* Sending more than 16383 rights in one message seems crazy */ + if (total_ool_port_count >= (MACH_PORT_UREFS_MAX / 4)) { + mr = MACH_SEND_TOO_LARGE; + goto clean_message; + } + /* * Allocate space in the pageable kernel ipc copy map for all the * ool data that is to be physically copied. Map is marked wait for @@ -2997,9 +3055,8 @@ ipc_kmsg_copyin_body( if (space_needed) { if (vm_allocate(ipc_kernel_copy_map, &paddr, space_needed, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)) != KERN_SUCCESS) { - ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); mr = MACH_MSG_VM_KERNEL; - goto out; + goto clean_message; } } @@ -3063,6 +3120,11 @@ ipc_kmsg_copyin_body( } out: return mr; + +clean_message: + /* no descriptors have been copied in yet */ + ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0); + return mr; } diff --git a/osfmk/ipc/ipc_object.h b/osfmk/ipc/ipc_object.h index 62e8bc253..63f533dfa 100644 --- a/osfmk/ipc/ipc_object.h +++ b/osfmk/ipc/ipc_object.h @@ -179,9 +179,12 @@ extern void io_free( #define _VOLATILE_ volatile /* Sanity check the ref count. If it is 0, we may be doubly zfreeing. - * If it is larger than max int, it has been corrupted, probably by being - * modified into an address (this is architecture dependent, but it's - * safe to assume there cannot really be max int references). + * If it is larger than max int, it has been corrupted or leaked, + * probably by being modified into an address (this is architecture + * dependent, but it's safe to assume there cannot really be max int + * references unless some code is leaking the io_reference without leaking + * object). Saturate the io_reference on release kernel if it reaches + * max int to avoid use after free. * * NOTE: The 0 test alone will not catch double zfreeing of ipc_port * structs, because the io_references field is the first word of the struct, @@ -192,18 +195,42 @@ extern void io_free( static inline void io_reference(ipc_object_t io) { + ipc_object_refs_t new_io_references; + ipc_object_refs_t old_io_references; + assert((io)->io_references > 0 && (io)->io_references < IO_MAX_REFERENCES); - OSIncrementAtomic(&((io)->io_references)); + + do { + old_io_references = (io)->io_references; + new_io_references = old_io_references + 1; + if (old_io_references == IO_MAX_REFERENCES) { + break; + } + } while (OSCompareAndSwap(old_io_references, new_io_references, + &((io)->io_references)) == FALSE); } static inline void io_release(ipc_object_t io) { + ipc_object_refs_t new_io_references; + ipc_object_refs_t old_io_references; + assert((io)->io_references > 0 && (io)->io_references < IO_MAX_REFERENCES); + + do { + old_io_references = (io)->io_references; + new_io_references = old_io_references - 1; + if (old_io_references == IO_MAX_REFERENCES) { + break; + } + } while (OSCompareAndSwap(old_io_references, new_io_references, + &((io)->io_references)) == FALSE); + /* If we just removed the last reference count */ - if ( 1 == OSDecrementAtomic(&((io)->io_references))) { + if (1 == old_io_references) { /* Free the object */ io_free(io_otype((io)), (io)); } diff --git a/osfmk/ipc/ipc_right.c b/osfmk/ipc/ipc_right.c index eb9c04544..c5f5cec0c 100644 --- a/osfmk/ipc/ipc_right.c +++ b/osfmk/ipc/ipc_right.c @@ -261,8 +261,6 @@ ipc_right_reverse( * KERN_INVALID_RIGHT Name doesn't denote port/dead rights. * KERN_INVALID_ARGUMENT Name denotes dead name, but * immediate is FALSE or notify is IP_NULL. - * KERN_UREFS_OVERFLOW Name denotes dead name, but - * generating immediate notif. would overflow urefs. * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. */ @@ -397,15 +395,12 @@ ipc_right_request_alloc( assert(urefs > 0); - if (MACH_PORT_UREFS_OVERFLOW(urefs, 1)) { - is_write_unlock(space); - if (port != IP_NULL) - ip_release(port); - return KERN_UREFS_OVERFLOW; - } + /* leave urefs pegged to maximum if it overflowed */ + if (urefs < MACH_PORT_UREFS_MAX) + (entry->ie_bits)++; /* increment urefs */ - (entry->ie_bits)++; /* increment urefs */ ipc_entry_modified(space, name, entry); + is_write_unlock(space); if (port != IP_NULL) @@ -563,8 +558,9 @@ ipc_right_check( */ if (entry->ie_request != IE_REQ_NONE) { if (ipc_port_request_type(port, name, entry->ie_request) != 0) { - assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); - bits++; + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + bits++; /* increment urefs */ } entry->ie_request = IE_REQ_NONE; } @@ -878,7 +874,9 @@ ipc_right_dealloc( if (IE_BITS_UREFS(bits) == 1) { ipc_entry_dealloc(space, name, entry); } else { - entry->ie_bits = bits-1; /* decrement urefs */ + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits-1; /* decrement urefs */ ipc_entry_modified(space, name, entry); } is_write_unlock(space); @@ -963,12 +961,13 @@ ipc_right_dealloc( ip_release(port); } else { - ip_unlock(port); - entry->ie_bits = bits-1; /* decrement urefs */ + ip_unlock(port); + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits-1; /* decrement urefs */ ipc_entry_modified(space, name, entry); is_write_unlock(space); } - if (nsrequest != IP_NULL) ipc_notify_no_senders(nsrequest, mscount); @@ -1004,9 +1003,12 @@ ipc_right_dealloc( entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND); - } else - entry->ie_bits = bits-1; /* decrement urefs */ - + } else { + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) { + entry->ie_bits = bits-1; /* decrement urefs */ + } + } ip_unlock(port); ipc_entry_modified(space, name, entry); @@ -1037,7 +1039,6 @@ ipc_right_dealloc( * KERN_SUCCESS Count was modified. * KERN_INVALID_RIGHT Entry has wrong type. * KERN_INVALID_VALUE Bad delta for the right. - * KERN_UREFS_OVERFLOW OK delta, except would overflow. */ kern_return_t @@ -1138,7 +1139,6 @@ ipc_right_delta( assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE); assert(IE_BITS_UREFS(bits) > 0); - assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); assert(port->ip_srights > 0); if (port->ip_pdrequest != NULL) { @@ -1169,7 +1169,9 @@ ipc_right_delta( bits |= MACH_PORT_TYPE_DEAD_NAME; if (entry->ie_request) { entry->ie_request = IE_REQ_NONE; - bits++; + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + bits++; /* increment urefs */ } entry->ie_bits = bits; entry->ie_object = IO_NULL; @@ -1256,26 +1258,46 @@ ipc_right_delta( bits = entry->ie_bits; relport = port; port = IP_NULL; - } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) + } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) { goto invalid_right; + } assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME); assert(IE_BITS_UREFS(bits) > 0); assert(entry->ie_object == IO_NULL); assert(entry->ie_request == IE_REQ_NONE); - urefs = IE_BITS_UREFS(bits); - if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) + if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) || + delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { goto invalid_value; - if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) - goto urefs_overflow; + } + + urefs = IE_BITS_UREFS(bits); + + if (urefs == MACH_PORT_UREFS_MAX) { + /* + * urefs are pegged due to an overflow + * only a delta removing all refs at once can change it + */ + + if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) + delta = 0; + } else { + if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) + goto invalid_value; + if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) { + /* leave urefs pegged to maximum if it overflowed */ + delta = MACH_PORT_UREFS_MAX - urefs; + } + } if ((urefs + delta) == 0) { ipc_entry_dealloc(space, name, entry); - } else { + } else if (delta != 0) { entry->ie_bits = bits + delta; ipc_entry_modified(space, name, entry); } + is_write_unlock(space); if (relport != IP_NULL) @@ -1293,7 +1315,7 @@ ipc_right_delta( if ((bits & MACH_PORT_TYPE_SEND) == 0) goto invalid_right; - /* maximum urefs for send is MACH_PORT_UREFS_MAX-1 */ + /* maximum urefs for send is MACH_PORT_UREFS_MAX */ port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); @@ -1306,14 +1328,31 @@ ipc_right_delta( assert(port->ip_srights > 0); - urefs = IE_BITS_UREFS(bits); - if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) { + if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) || + delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) { ip_unlock(port); goto invalid_value; } - if (MACH_PORT_UREFS_OVERFLOW(urefs+1, delta)) { - ip_unlock(port); - goto urefs_overflow; + + urefs = IE_BITS_UREFS(bits); + + if (urefs == MACH_PORT_UREFS_MAX) { + /* + * urefs are pegged due to an overflow + * only a delta removing all refs at once can change it + */ + + if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) + delta = 0; + } else { + if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) { + ip_unlock(port); + goto invalid_value; + } + if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) { + /* leave urefs pegged to maximum if it overflowed */ + delta = MACH_PORT_UREFS_MAX - urefs; + } } if ((urefs + delta) == 0) { @@ -1328,7 +1367,7 @@ ipc_right_delta( if (bits & MACH_PORT_TYPE_RECEIVE) { assert(port->ip_receiver_name == name); assert(port->ip_receiver == space); - ip_unlock(port); + ip_unlock(port); assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE); @@ -1350,10 +1389,12 @@ ipc_right_delta( entry->ie_object = IO_NULL; ipc_entry_dealloc(space, name, entry); } - } else { + } else if (delta != 0) { ip_unlock(port); entry->ie_bits = bits + delta; ipc_entry_modified(space, name, entry); + } else { + ip_unlock(port); } is_write_unlock(space); @@ -1386,12 +1427,8 @@ ipc_right_delta( is_write_unlock(space); return KERN_INVALID_VALUE; - urefs_overflow: - is_write_unlock(space); - return KERN_UREFS_OVERFLOW; - guard_failure: - return KERN_INVALID_RIGHT; + return KERN_INVALID_RIGHT; } /* @@ -1439,7 +1476,7 @@ ipc_right_destruct( port = (ipc_port_t) entry->ie_object; assert(port != IP_NULL); - + ip_lock(port); assert(ip_active(port)); assert(port->ip_receiver_name == name); @@ -1462,10 +1499,11 @@ ipc_right_destruct( */ if (srdelta) { - + assert(port->ip_srights > 0); urefs = IE_BITS_UREFS(bits); + /* * Since we made sure that srdelta is negative, * the check for urefs overflow is not required. @@ -1474,6 +1512,16 @@ ipc_right_destruct( ip_unlock(port); goto invalid_value; } + + if (urefs == MACH_PORT_UREFS_MAX) { + /* + * urefs are pegged due to an overflow + * only a delta removing all refs at once can change it + */ + if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) + srdelta = 0; + } + if ((urefs + srdelta) == 0) { if (--port->ip_srights == 0) { nsrequest = port->ip_nsrequest; @@ -1498,7 +1546,7 @@ ipc_right_destruct( bits = entry->ie_bits; if (bits & MACH_PORT_TYPE_SEND) { assert(IE_BITS_UREFS(bits) > 0); - assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); + assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX); if (port->ip_pdrequest != NULL) { /* @@ -1528,7 +1576,8 @@ ipc_right_destruct( bits |= MACH_PORT_TYPE_DEAD_NAME; if (entry->ie_request) { entry->ie_request = IE_REQ_NONE; - bits++; + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + bits++; /* increment urefs */ } entry->ie_bits = bits; entry->ie_object = IO_NULL; @@ -1953,14 +2002,18 @@ ipc_right_copyin( ipc_hash_delete(space, (ipc_object_t) port, name, entry); entry->ie_object = IO_NULL; + /* transfer entry's reference to caller */ } entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND); } else { port->ip_srights++; ip_reference(port); - entry->ie_bits = bits-1; /* decrement urefs */ + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits-1; /* decrement urefs */ } + ipc_entry_modified(space, name, entry); ip_unlock(port); @@ -2048,7 +2101,10 @@ ipc_right_copyin( if (IE_BITS_UREFS(bits) == 1) { bits &= ~MACH_PORT_TYPE_DEAD_NAME; } - entry->ie_bits = bits-1; /* decrement urefs */ + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits-1; /* decrement urefs */ + ipc_entry_modified(space, name, entry); *objectp = IO_DEAD; *sorightp = IP_NULL; @@ -2105,8 +2161,10 @@ ipc_right_copyin_undo( assert(IE_BITS_UREFS(bits) > 0); if (msgt_name != MACH_MSG_TYPE_COPY_SEND) { - assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX); - entry->ie_bits = bits+1; /* increment urefs */ + assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX); + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits+1; /* increment urefs */ } } else { assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) || @@ -2117,8 +2175,10 @@ ipc_right_copyin_undo( assert(IE_BITS_UREFS(bits) > 0); if (msgt_name != MACH_MSG_TYPE_COPY_SEND) { - assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX-1); - entry->ie_bits = bits+1; /* increment urefs */ + assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX); + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits+1; /* increment urefs */ } /* @@ -2216,7 +2276,9 @@ ipc_right_copyin_two_move_sends( port->ip_srights += 2; ip_reference(port); ip_reference(port); - entry->ie_bits = bits-2; /* decrement urefs */ + /* if urefs are pegged due to overflow, leave them pegged */ + if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) + entry->ie_bits = bits-2; /* decrement urefs */ } ipc_entry_modified(space, name, entry); @@ -2401,9 +2463,6 @@ ipc_right_copyin_two( * The object is unlocked; the space isn't. * Returns: * KERN_SUCCESS Copied out capability. - * KERN_UREFS_OVERFLOW User-refs would overflow; - * guaranteed not to happen with a fresh entry - * or if overflow=TRUE was specified. */ kern_return_t @@ -2412,7 +2471,7 @@ ipc_right_copyout( mach_port_name_t name, ipc_entry_t entry, mach_msg_type_name_t msgt_name, - boolean_t overflow, + __unused boolean_t overflow, ipc_object_t object) { ipc_entry_bits_t bits; @@ -2429,14 +2488,15 @@ ipc_right_copyout( switch (msgt_name) { case MACH_MSG_TYPE_PORT_SEND_ONCE: - + assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); + assert(IE_BITS_UREFS(bits) == 0); assert(port->ip_sorights > 0); /* transfer send-once right and ref to entry */ ip_unlock(port); - entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); + entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */ ipc_entry_modified(space, name, entry); break; @@ -2448,33 +2508,33 @@ ipc_right_copyout( assert(port->ip_srights > 1); assert(urefs > 0); - assert(urefs < MACH_PORT_UREFS_MAX); - - if (urefs+1 == MACH_PORT_UREFS_MAX) { - if (overflow) { - /* leave urefs pegged to maximum */ + assert(urefs <= MACH_PORT_UREFS_MAX); - port->ip_srights--; - ip_unlock(port); - ip_release(port); - return KERN_SUCCESS; - } + if (urefs == MACH_PORT_UREFS_MAX) { + /* + * leave urefs pegged to maximum, + * consume send right and ref + */ + port->ip_srights--; ip_unlock(port); - return KERN_UREFS_OVERFLOW; + ip_release(port); + return KERN_SUCCESS; } + + /* consume send right and ref */ port->ip_srights--; ip_unlock(port); ip_release(port); - + } else if (bits & MACH_PORT_TYPE_RECEIVE) { assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE); assert(IE_BITS_UREFS(bits) == 0); - /* transfer send right to entry */ + /* transfer send right to entry, consume ref */ ip_unlock(port); ip_release(port); - + } else { assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE); assert(IE_BITS_UREFS(bits) == 0); @@ -2488,7 +2548,7 @@ ipc_right_copyout( name, entry); } - entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; + entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */ ipc_entry_modified(space, name, entry); break; diff --git a/osfmk/ipc/mach_kernelrpc.c b/osfmk/ipc/mach_kernelrpc.c index af9223254..1b50efbb7 100644 --- a/osfmk/ipc/mach_kernelrpc.c +++ b/osfmk/ipc/mach_kernelrpc.c @@ -256,6 +256,11 @@ _kernelrpc_mach_port_insert_right_trap(struct _kernelrpc_mach_port_insert_right_ disp = ipc_object_copyin_type(args->polyPoly); rv = mach_port_insert_right(task->itk_space, args->name, port, disp); + if (rv != KERN_SUCCESS) { + if (IO_VALID((ipc_object_t)port)) { + ipc_object_destroy((ipc_object_t)port, disp); + } + } done: if (task) diff --git a/osfmk/ipc/port.h b/osfmk/ipc/port.h index 6e0b8e2d1..26dd9bcb1 100644 --- a/osfmk/ipc/port.h +++ b/osfmk/ipc/port.h @@ -81,7 +81,7 @@ #define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \ (((delta) > 0) && \ ((((urefs) + (delta)) <= (urefs)) || \ - (((urefs) + (delta)) > MACH_PORT_UREFS_MAX))) + (((urefs) + (delta)) >= MACH_PORT_UREFS_MAX))) #define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \ (((delta) < 0) && (((mach_port_urefs_t)-(delta)) > (urefs))) diff --git a/osfmk/kdp/kdp_udp.c b/osfmk/kdp/kdp_udp.c index ec0c072de..336dbfb6b 100644 --- a/osfmk/kdp/kdp_udp.c +++ b/osfmk/kdp/kdp_udp.c @@ -2166,16 +2166,19 @@ kdp_init(void) struct kdp_in_addr ipaddr; struct kdp_ether_addr macaddr; + boolean_t kdp_match_name_found = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)); + boolean_t kdp_not_serial = kdp_match_name_found ? (strncmp(kdpname, "serial", sizeof(kdpname))) : TRUE; + // serial must be explicitly requested - if(!PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)) || strncmp(kdpname, "serial", sizeof(kdpname)) != 0) + if(!kdp_match_name_found || kdp_not_serial) return; #if WITH_CONSISTENT_DBG - if (PE_consistent_debug_enabled() && debug_boot_arg) { + if (kdp_not_serial && PE_consistent_debug_enabled() && debug_boot_arg) { current_debugger = HW_SHM_CUR_DB; return; } else { - printf("Consistent debug disabled or debug boot arg not present, falling through to serial for debugger\n"); + printf("Serial requested, consistent debug disabled or debug boot arg not present, configuring debugging over serial\n"); } #endif /* WITH_CONSISTENT_DBG */ diff --git a/osfmk/kdp/ml/x86_64/kdp_machdep.c b/osfmk/kdp/ml/x86_64/kdp_machdep.c index 1b8a111d0..0bea2c52b 100644 --- a/osfmk/kdp/ml/x86_64/kdp_machdep.c +++ b/osfmk/kdp/ml/x86_64/kdp_machdep.c @@ -531,11 +531,6 @@ machine_trace_thread(thread_t thread, nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; - if (thread->machine.iss == NULL) { - // no register states to backtrace, probably thread is terminating - return 0; - } - if (user_p) { x86_saved_state32_t *iss32; @@ -633,11 +628,6 @@ machine_trace_thread64(thread_t thread, vm_offset_t kern_virt_addr = 0; vm_map_t bt_vm_map = VM_MAP_NULL; - if (thread->machine.iss == NULL) { - // no register states to backtrace, probably thread is terminating - return 0; - } - nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; if (user_p) { diff --git a/osfmk/kern/bsd_kern.c b/osfmk/kern/bsd_kern.c index 69aaa1ceb..ac79a2345 100644 --- a/osfmk/kern/bsd_kern.c +++ b/osfmk/kern/bsd_kern.c @@ -308,12 +308,13 @@ int is_64signalregset(void) /* * Swap in a new map for the task/thread pair; the old map reference is - * returned. + * returned. Also does a pmap switch if thread provided is current thread. */ vm_map_t -swap_task_map(task_t task, thread_t thread, vm_map_t map, boolean_t doswitch) +swap_task_map(task_t task, thread_t thread, vm_map_t map) { vm_map_t old_map; + boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE; if (task != thread->task) panic("swap_task_map"); diff --git a/osfmk/kern/debug.c b/osfmk/kern/debug.c index e031b95fd..5e74a85d1 100644 --- a/osfmk/kern/debug.c +++ b/osfmk/kern/debug.c @@ -141,6 +141,8 @@ char *debug_buf_stackshot_end; static char model_name[64]; unsigned char *kernel_uuid; /* uuid_string_t */ char kernel_uuid_string[37]; +char panic_disk_error_description[512]; +size_t panic_disk_error_description_size = sizeof(panic_disk_error_description); static spl_t panic_prologue(const char *str); static void panic_epilogue(spl_t s); @@ -657,32 +659,45 @@ static void panic_display_uptime(void) { kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime); } +static void panic_display_disk_errors(void) { + + if (panic_disk_error_description[0]) { + panic_disk_error_description[sizeof(panic_disk_error_description) - 1] = '\0'; + kdb_printf("Root disk errors: \"%s\"\n", panic_disk_error_description); + } +}; + extern const char version[]; extern char osversion[]; static volatile uint32_t config_displayed = 0; -__private_extern__ void panic_display_system_configuration(void) { +__private_extern__ void panic_display_system_configuration(boolean_t launchd_exit) { - panic_display_process_name(); + if (!launchd_exit) panic_display_process_name(); if (OSCompareAndSwap(0, 1, &config_displayed)) { char buf[256]; - if (strlcpy(buf, PE_boot_args(), sizeof(buf))) + if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf))) kdb_printf("Boot args: %s\n", buf); kdb_printf("\nMac OS version:\n%s\n", (osversion[0] != 0) ? osversion : "Not yet set"); kdb_printf("\nKernel version:\n%s\n",version); panic_display_kernel_uuid(); - panic_display_kernel_aslr(); - panic_display_hibb(); - panic_display_pal_info(); + if (!launchd_exit) { + panic_display_kernel_aslr(); + panic_display_hibb(); + panic_display_pal_info(); + } panic_display_model_name(); - panic_display_uptime(); - panic_display_zprint(); + panic_display_disk_errors(); + if (!launchd_exit) { + panic_display_uptime(); + panic_display_zprint(); #if CONFIG_ZLEAKS - panic_display_ztrace(); + panic_display_ztrace(); #endif /* CONFIG_ZLEAKS */ - kext_dump_panic_lists(&kdb_log); + kext_dump_panic_lists(&kdb_log); + } } } diff --git a/osfmk/kern/debug.h b/osfmk/kern/debug.h index dccba1842..fc25d65ab 100644 --- a/osfmk/kern/debug.h +++ b/osfmk/kern/debug.h @@ -259,6 +259,8 @@ extern char *debug_buf_stackshot_end; extern unsigned int debug_boot_arg; extern unsigned char *kernel_uuid; extern char kernel_uuid_string[]; +extern char panic_disk_error_description[]; +extern size_t panic_disk_error_description_size; #ifdef MACH_KERNEL_PRIVATE @@ -310,7 +312,7 @@ extern void panic_init(void); int packA(char *inbuf, uint32_t length, uint32_t buflen); void unpackA(char *inbuf, uint32_t length); -void panic_display_system_configuration(void); +void panic_display_system_configuration(boolean_t launchd_exit); void panic_display_zprint(void); void panic_display_kernel_aslr(void); void panic_display_hibb(void); diff --git a/osfmk/kern/kern_stackshot.c b/osfmk/kern/kern_stackshot.c index 41272304c..dde91f100 100644 --- a/osfmk/kern/kern_stackshot.c +++ b/osfmk/kern/kern_stackshot.c @@ -493,15 +493,12 @@ kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_confi } } - /* - * We only support the KDP fault path and delta snapshots and tailspin mode with the kcdata format - */ - if (!(flags & STACKSHOT_KCDATA_FORMAT)) { + if (!((flags & STACKSHOT_KCDATA_FORMAT) || (flags & STACKSHOT_RETRIEVE_EXISTING_BUFFER))) { return KERN_NOT_SUPPORTED; } /* - * If we're not saving the buffer in the kernel pointer, we need places to copy into. + * If we're not saving the buffer in the kernel pointer, we need a place to copy into. */ if ((!out_buffer_addr || !out_size_addr) && !(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) { return KERN_INVALID_ARGUMENT; diff --git a/osfmk/kern/machine.h b/osfmk/kern/machine.h index 18c48dd2b..37d69bf88 100644 --- a/osfmk/kern/machine.h +++ b/osfmk/kern/machine.h @@ -130,9 +130,10 @@ extern void consider_machine_collect(void); extern void machine_thread_going_on_core(thread_t new_thread, int urgency, - uint64_t sched_latency); + uint64_t sched_latency, + uint64_t dispatch_time); -extern void machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating); +extern void machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch); extern void machine_max_runnable_latency(uint64_t bg_max_latency, uint64_t default_max_latency, diff --git a/osfmk/kern/policy_internal.h b/osfmk/kern/policy_internal.h index 59a7b9a79..42cf146c2 100644 --- a/osfmk/kern/policy_internal.h +++ b/osfmk/kern/policy_internal.h @@ -133,6 +133,7 @@ extern int proc_task_role_to_darwin_role(int task_role); extern void task_set_main_thread_qos(task_t task, thread_t main_thread); extern void proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, ipc_port_t * portwatch_ports, int portwatch_count); +extern void proc_inherit_task_role(task_t new_task, task_t old_task); /* IO Throttle tiers */ #define THROTTLE_LEVEL_NONE -1 diff --git a/osfmk/kern/priority.c b/osfmk/kern/priority.c index f4f5b1cc8..dcf7715bd 100644 --- a/osfmk/kern/priority.c +++ b/osfmk/kern/priority.c @@ -181,7 +181,7 @@ thread_quantum_expire( /* Tell platform layer that we are still running this thread */ urgency = thread_get_urgency(thread, &ignore1, &ignore2); - machine_thread_going_on_core(thread, urgency, 0); + machine_thread_going_on_core(thread, urgency, 0, 0); /* * This quantum is up, give this thread another. diff --git a/osfmk/kern/sched_prim.c b/osfmk/kern/sched_prim.c index 2b2a98d68..d633e7ef9 100644 --- a/osfmk/kern/sched_prim.c +++ b/osfmk/kern/sched_prim.c @@ -2568,7 +2568,7 @@ thread_dispatch( */ thread->last_made_runnable_time = mach_approximate_time(); - machine_thread_going_off_core(thread, FALSE); + machine_thread_going_off_core(thread, FALSE, processor->last_dispatch); if (thread->reason & AST_QUANTUM) thread_setrun(thread, SCHED_TAILQ); @@ -2622,7 +2622,7 @@ thread_dispatch( } #endif - machine_thread_going_off_core(thread, should_terminate); + machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE, @@ -2672,7 +2672,7 @@ thread_dispatch( thread_tell_urgency(urgency, arg1, arg2, latency, self); - machine_thread_going_on_core(self, urgency, latency); + machine_thread_going_on_core(self, urgency, latency, processor->last_dispatch); /* * Get a new quantum if none remaining. @@ -2693,7 +2693,7 @@ thread_dispatch( processor->first_timeslice = FALSE; thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self); - machine_thread_going_on_core(self, THREAD_URGENCY_NONE, 0); + machine_thread_going_on_core(self, THREAD_URGENCY_NONE, 0, processor->last_dispatch); } self->computation_epoch = processor->last_dispatch; @@ -3313,8 +3313,7 @@ processor_setrun( if (processor->state == PROCESSOR_SHUTDOWN && thread->sched_pri >= processor->current_pri ) { ipi_action = eInterruptRunning; - } else if ( processor->state == PROCESSOR_IDLE && - processor != current_processor() ) { + } else if (processor->state == PROCESSOR_IDLE) { re_queue_tail(&pset->active_queue, &processor->processor_queue); processor->next_thread = THREAD_NULL; @@ -3996,7 +3995,7 @@ set_sched_pri( */ if (nurgency != curgency) { thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread); - machine_thread_going_on_core(thread, nurgency, 0); + machine_thread_going_on_core(thread, nurgency, 0, 0); } } diff --git a/osfmk/kern/startup.c b/osfmk/kern/startup.c index 8c3b99f09..34ebd7481 100644 --- a/osfmk/kern/startup.c +++ b/osfmk/kern/startup.c @@ -370,6 +370,8 @@ kernel_bootstrap(void) /* initialize the corpse config based on boot-args */ corpses_init(); + vm_user_init(); + /* * Create a kernel thread to execute the kernel bootstrap. */ diff --git a/osfmk/kern/task.c b/osfmk/kern/task.c index 4be2588be..25ea7354c 100644 --- a/osfmk/kern/task.c +++ b/osfmk/kern/task.c @@ -351,7 +351,7 @@ task_set_64bit( */ thread_lock(thread); urgency = thread_get_urgency(thread, &arg1, &arg2); - machine_thread_going_on_core(thread, urgency, 0); + machine_thread_going_on_core(thread, urgency, 0, 0); thread_unlock(thread); splx(spl); } @@ -5449,6 +5449,13 @@ kdebug_trace_dyld(task_t task, uint32_t base_code, vm_map_offset_t map_data; vm_offset_t data; + if (!kdebug_enable || + !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0))) + { + vm_map_copy_discard(infos_copy); + return KERN_SUCCESS; + } + assert(infos_copy != NULL); if (task == NULL || task != current_task()) { diff --git a/osfmk/kern/task.h b/osfmk/kern/task.h index a3c5edb6b..67eeac795 100644 --- a/osfmk/kern/task.h +++ b/osfmk/kern/task.h @@ -605,7 +605,7 @@ void task_wait_till_threads_terminate_locked(task_t task); /* JMM - should just be temporary (implementation in bsd_kern still) */ extern void set_bsdtask_info(task_t,void *); extern vm_map_t get_task_map_reference(task_t); -extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t, boolean_t); +extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t); extern pmap_t get_task_pmap(task_t); extern uint64_t get_task_resident_size(task_t); extern uint64_t get_task_compressed(task_t); diff --git a/osfmk/kern/task_policy.c b/osfmk/kern/task_policy.c index dcd6fc472..a89ca7458 100644 --- a/osfmk/kern/task_policy.c +++ b/osfmk/kern/task_policy.c @@ -1835,6 +1835,20 @@ proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role, task_is_importance_receiver(task), 0); } +/* + * Inherit task role across exec + */ +void +proc_inherit_task_role(task_t new_task, + task_t old_task) +{ + int role; + + /* inherit the role from old task to new task */ + role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE); + proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role); +} + extern task_t bsd_init_task; /* diff --git a/osfmk/mach/mach_time.h b/osfmk/mach/mach_time.h index e4c703174..e24219e75 100644 --- a/osfmk/mach/mach_time.h +++ b/osfmk/mach/mach_time.h @@ -60,33 +60,25 @@ uint64_t mach_approximate_time(void); /* * like mach_absolute_time, but advances during sleep */ -__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) -__TVOS_AVAILABLE(__TVOS_10_0) -__WATCHOS_AVAILABLE(__WATCHOS_3_0) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) uint64_t mach_continuous_time(void); /* * like mach_approximate_time, but advances during sleep */ -__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) -__TVOS_AVAILABLE(__TVOS_10_0) -__WATCHOS_AVAILABLE(__WATCHOS_3_0) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) uint64_t mach_continuous_approximate_time(void); #if !defined(KERNEL) && defined(PRIVATE) // Forward definition because this is a BSD value struct timespec; -__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) -__TVOS_AVAILABLE(__TVOS_10_0) -__WATCHOS_AVAILABLE(__WATCHOS_3_0) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) kern_return_t mach_get_times(uint64_t* absolute_time, uint64_t* continuous_time, struct timespec *tp); -__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0) -__TVOS_AVAILABLE(__TVOS_10_0) -__WATCHOS_AVAILABLE(__WATCHOS_3_0) +__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) uint64_t mach_boottime_usec(void); #endif /* KERNEL */ diff --git a/osfmk/mach/vm_statistics.h b/osfmk/mach/vm_statistics.h index 0c4623d15..51c99d7cc 100644 --- a/osfmk/mach/vm_statistics.h +++ b/osfmk/mach/vm_statistics.h @@ -494,10 +494,6 @@ typedef struct pmap_statistics *pmap_statistics_t; /* DHMM data */ #define VM_MEMORY_DHMM 84 -#if !(defined(RC_HIDE_XNU_J79) || defined(RC_HIDE_XNU_J80)) -/* memory needed for DFR related actions */ -#define VM_MEMORY_DFR 85 -#endif // !(defined(RC_HIDE_XNU_J79) || defined(RC_HIDE_XNU_J80)) /* memory allocated by SceneKit.framework */ #define VM_MEMORY_SCENEKIT 86 diff --git a/osfmk/vm/vm_compressor.c b/osfmk/vm/vm_compressor.c index 206046a2f..ac885bbd9 100644 --- a/osfmk/vm/vm_compressor.c +++ b/osfmk/vm/vm_compressor.c @@ -225,7 +225,7 @@ uint32_t swapout_target_age = 0; uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE]; uint32_t overage_decompressions_during_sample_period = 0; -void do_fastwake_warmup(void); +void do_fastwake_warmup(queue_head_t *, boolean_t); boolean_t fastwake_warmup = FALSE; boolean_t fastwake_recording_in_progress = FALSE; clock_sec_t dont_trim_until_ts = 0; @@ -2212,9 +2212,32 @@ vm_compressor_do_warmup(void) lck_mtx_unlock_always(c_list_lock); } +void +do_fastwake_warmup_all(void) +{ + + lck_mtx_lock_spin_always(c_list_lock); + + if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) { + + lck_mtx_unlock_always(c_list_lock); + return; + } + + fastwake_warmup = TRUE; + + do_fastwake_warmup(&c_swappedout_list_head, TRUE); + + do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE); + + fastwake_warmup = FALSE; + + lck_mtx_unlock_always(c_list_lock); + +} void -do_fastwake_warmup(void) +do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg) { c_segment_t c_seg = NULL; AbsoluteTime startTime, endTime; @@ -2234,16 +2257,18 @@ do_fastwake_warmup(void) lck_mtx_lock_spin_always(c_list_lock); - while (!queue_empty(&c_swappedout_list_head) && fastwake_warmup == TRUE) { + while (!queue_empty(c_queue) && fastwake_warmup == TRUE) { - c_seg = (c_segment_t) queue_first(&c_swappedout_list_head); + c_seg = (c_segment_t) queue_first(c_queue); - if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id || - c_seg->c_generation_id > last_c_segment_to_warm_generation_id) - break; + if (consider_all_cseg == FALSE) { + if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id || + c_seg->c_generation_id > last_c_segment_to_warm_generation_id) + break; - if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) - break; + if (vm_page_free_count < (AVAILABLE_MEMORY / 4)) + break; + } lck_mtx_lock_spin_always(&c_seg->c_lock); lck_mtx_unlock_always(c_list_lock); @@ -2278,7 +2303,9 @@ do_fastwake_warmup(void) lck_mtx_lock_spin_always(c_list_lock); - first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0; + if (consider_all_cseg == FALSE) { + first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0; + } } @@ -2298,7 +2325,7 @@ vm_compressor_compact_and_swap(boolean_t flush_all) KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count, first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0); - do_fastwake_warmup(); + do_fastwake_warmup(&c_swappedout_list_head, FALSE); KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0); fastwake_warmup = FALSE; diff --git a/osfmk/vm/vm_fault.c b/osfmk/vm/vm_fault.c index b7cd1bacc..21d449cde 100644 --- a/osfmk/vm/vm_fault.c +++ b/osfmk/vm/vm_fault.c @@ -2851,7 +2851,7 @@ vm_fault_enter(vm_page_t m, struct codesigning_exit_reason_info *ceri = NULL; uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri)); - if (os_reason_alloc_buffer(codesigning_exit_reason, reason_buffer_size_estimate)) { + if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) { printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n"); } else { if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor, @@ -2889,7 +2889,7 @@ vm_fault_enter(vm_page_t m, printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n"); #endif /* DEBUG || DEVELOPMENT */ /* Free the buffer */ - os_reason_alloc_buffer(codesigning_exit_reason, 0); + os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0); } } } @@ -3440,16 +3440,28 @@ vm_fault_internal( vm_object_t top_object = VM_OBJECT_NULL; int throttle_delay; int compressed_count_delta; - vm_map_offset_t real_vaddr; int grab_options; + vm_map_offset_t trace_vaddr; + vm_map_offset_t trace_real_vaddr; +#if DEVELOPMENT || DEBUG + vm_map_offset_t real_vaddr; real_vaddr = vaddr; +#endif /* DEVELOPMENT || DEBUG */ + trace_real_vaddr = vaddr; vaddr = vm_map_trunc_page(vaddr, PAGE_MASK); + if (map == kernel_map) { + trace_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(vaddr); + trace_real_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(trace_real_vaddr); + } else { + trace_vaddr = vaddr; + } + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START, - ((uint64_t)vaddr >> 32), - vaddr, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, (map == kernel_map), 0, 0); @@ -3457,8 +3469,8 @@ vm_fault_internal( if (get_preemption_level() != 0) { KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, - ((uint64_t)vaddr >> 32), - vaddr, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, KERN_FAILURE, 0, 0); @@ -4014,7 +4026,7 @@ FastPmapEnter: else event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0); DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); } @@ -4871,7 +4883,7 @@ handle_copy_delay: else event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL)); - KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0); + KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0); DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag); } @@ -5064,8 +5076,8 @@ done: } KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END, - ((uint64_t)vaddr >> 32), - vaddr, + ((uint64_t)trace_vaddr >> 32), + trace_vaddr, kr, type_of_fault, 0); diff --git a/osfmk/vm/vm_init.h b/osfmk/vm/vm_init.h index 8e23b580b..5901f9e2e 100644 --- a/osfmk/vm/vm_init.h +++ b/osfmk/vm/vm_init.h @@ -35,5 +35,6 @@ extern void vm_mem_bootstrap(void); extern void vm_mem_init(void); extern void vm_map_steal_memory(void);; +extern void vm_user_init(void); #endif /* VM_INIT_H */ diff --git a/osfmk/vm/vm_map.c b/osfmk/vm/vm_map.c index b834d295b..80f88aae4 100644 --- a/osfmk/vm/vm_map.c +++ b/osfmk/vm/vm_map.c @@ -3146,6 +3146,7 @@ vm_map_enter_mem_object_helper( if (flags & ~(VM_FLAGS_FIXED | VM_FLAGS_ANYWHERE | VM_FLAGS_OVERWRITE | + VM_FLAGS_IOKIT_ACCT | VM_FLAGS_RETURN_4K_DATA_ADDR | VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_ALIAS_MASK)) { @@ -3182,6 +3183,7 @@ vm_map_enter_mem_object_helper( mask, flags & (VM_FLAGS_ANYWHERE | VM_FLAGS_OVERWRITE | + VM_FLAGS_IOKIT_ACCT | VM_FLAGS_RETURN_4K_DATA_ADDR | VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_ALIAS_MASK), @@ -5430,7 +5432,17 @@ vm_map_wire_nested( goto done; } + if ((entry != vm_map_to_entry(map)) && /* we still have entries in the map */ + (tmp_entry.vme_end != end) && /* AND, we are not at the end of the requested range */ + (entry->vme_start != tmp_entry.vme_end)) { /* AND, the next entry is not contiguous. */ + /* found a "new" hole */ + s = tmp_entry.vme_end; + rc = KERN_INVALID_ADDRESS; + goto done; + } + s = entry->vme_start; + } /* end while loop through map entries */ done: diff --git a/osfmk/vm/vm_object.c b/osfmk/vm/vm_object.c index 882c3c077..72b525fdd 100644 --- a/osfmk/vm/vm_object.c +++ b/osfmk/vm/vm_object.c @@ -8764,11 +8764,13 @@ vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m) for(i=0; i < num_pages; i++) { if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) { if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) { - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl, upl->upl_priority, 0); + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), + VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0); vm_decmp_upl_reprioritize(upl, cur_tier); break; } - KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl->upl_reprio_info[i], upl->upl_priority, 0); + KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m), + upl->upl_reprio_info[i], upl->upl_priority, 0); if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier); break; @@ -8787,7 +8789,7 @@ vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible) { wait_result_t ret; - KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); + KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0); if (o->io_tracking && ((m->busy == TRUE) || (m->cleaning == TRUE) || VM_PAGE_WIRED(m))) { /* diff --git a/osfmk/vm/vm_pageout.c b/osfmk/vm/vm_pageout.c index 49f7e2527..01a9f4506 100644 --- a/osfmk/vm/vm_pageout.c +++ b/osfmk/vm/vm_pageout.c @@ -1835,26 +1835,19 @@ Restart: assert((vm_page_secluded_count_free + vm_page_secluded_count_inuse) == vm_page_secluded_count); - vm_page_queue_remove_first(&vm_page_queue_secluded, - secluded_page, - vm_page_t, - pageq); + secluded_page = vm_page_queue_first(&vm_page_queue_secluded); assert(secluded_page->vm_page_q_state == VM_PAGE_ON_SECLUDED_Q); - VM_PAGE_ZERO_PAGEQ_ENTRY(secluded_page); - secluded_page->vm_page_q_state = VM_PAGE_NOT_ON_Q; - vm_page_secluded_count--; + vm_page_queues_remove(secluded_page, FALSE); assert(!secluded_page->fictitious); assert(!VM_PAGE_WIRED(secluded_page)); if (secluded_page->vm_page_object == 0) { /* transfer to free queue */ assert(secluded_page->busy); - vm_page_secluded_count_free--; secluded_page->snext = local_freeq; local_freeq = secluded_page; local_freed++; } else { - vm_page_secluded_count_inuse--; /* transfer to head of inactive queue */ pmap_clear_refmod_options( VM_PAGE_GET_PHYS_PAGE(secluded_page), @@ -1927,26 +1920,19 @@ Restart: assert((vm_page_secluded_count_free + vm_page_secluded_count_inuse) == vm_page_secluded_count); - vm_page_queue_remove_first(&vm_page_queue_secluded, - secluded_page, - vm_page_t, - pageq); + secluded_page = vm_page_queue_first(&vm_page_queue_secluded); assert(secluded_page->vm_page_q_state == VM_PAGE_ON_SECLUDED_Q); - VM_PAGE_ZERO_PAGEQ_ENTRY(secluded_page); - secluded_page->vm_page_q_state = VM_PAGE_NOT_ON_Q; - vm_page_secluded_count--; + vm_page_queues_remove(secluded_page, FALSE); assert(!secluded_page->fictitious); assert(!VM_PAGE_WIRED(secluded_page)); if (secluded_page->vm_page_object == 0) { /* transfer to free queue */ assert(secluded_page->busy); - vm_page_secluded_count_free--; secluded_page->snext = local_freeq; local_freeq = secluded_page; local_freed++; } else { - vm_page_secluded_count_inuse--; /* transfer to head of active queue */ vm_page_enqueue_active(secluded_page, FALSE); @@ -2745,7 +2731,6 @@ consider_inactive: else vm_pageout_considered_bq_external++; - assert(VM_PAGE_PAGEABLE(m)); break; } } @@ -2883,6 +2868,7 @@ consider_inactive: /* NOTREACHED */ } + assert(VM_PAGE_PAGEABLE(m)); m_object = VM_PAGE_OBJECT(m); force_anonymous = FALSE; diff --git a/osfmk/vm/vm_protos.h b/osfmk/vm/vm_protos.h index 8acd5f072..ae4933209 100644 --- a/osfmk/vm/vm_protos.h +++ b/osfmk/vm/vm_protos.h @@ -479,7 +479,7 @@ extern kern_return_t compressor_memory_object_create( extern boolean_t vm_compressor_low_on_space(void); extern int vm_swap_low_on_space(void); - +void do_fastwake_warmup_all(void); #if CONFIG_JETSAM extern int proc_get_memstat_priority(struct proc*, boolean_t); #endif /* CONFIG_JETSAM */ diff --git a/osfmk/vm/vm_purgeable.c b/osfmk/vm/vm_purgeable.c index b1d7aeba5..d15e25f27 100644 --- a/osfmk/vm/vm_purgeable.c +++ b/osfmk/vm/vm_purgeable.c @@ -978,7 +978,7 @@ purge_now: vm_page_lock_queues(); KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)), - object, /* purged object */ + VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */ 0, available_for_purge, 0, diff --git a/osfmk/vm/vm_resident.c b/osfmk/vm/vm_resident.c index b378ebaaa..160d30607 100644 --- a/osfmk/vm/vm_resident.c +++ b/osfmk/vm/vm_resident.c @@ -1598,6 +1598,7 @@ vm_page_replace( */ *mp = m->next_m; m->hashed = FALSE; + m->next_m = VM_PAGE_PACK_PTR(NULL); found_m = m; break; @@ -1698,6 +1699,7 @@ vm_page_remove( bucket->cur_count--; #endif /* MACH_PAGE_HASH_STATS */ mem->hashed = FALSE; + this->next_m = VM_PAGE_PACK_PTR(NULL); lck_spin_unlock(bucket_lock); } /* @@ -2799,15 +2801,9 @@ vm_page_grab_secluded(void) } assert(!vm_page_queue_empty(&vm_page_queue_secluded)); LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED); - vm_page_queue_remove_first(&vm_page_queue_secluded, - mem, - vm_page_t, - pageq); + mem = vm_page_queue_first(&vm_page_queue_secluded); assert(mem->vm_page_q_state == VM_PAGE_ON_SECLUDED_Q); - - VM_PAGE_ZERO_PAGEQ_ENTRY(mem); - mem->vm_page_q_state = VM_PAGE_NOT_ON_Q; - vm_page_secluded_count--; + vm_page_queues_remove(mem, TRUE); object = VM_PAGE_OBJECT(mem); @@ -2815,14 +2811,24 @@ vm_page_grab_secluded(void) assert(!VM_PAGE_WIRED(mem)); if (object == VM_OBJECT_NULL) { /* free for grab! */ - assert(mem->busy); - vm_page_secluded_count_free--; vm_page_unlock_queues(); vm_page_secluded.grab_success_free++; + + assert(mem->busy); + assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q); + assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL); + assert(mem->pageq.next == 0); + assert(mem->pageq.prev == 0); + assert(mem->listq.next == 0); + assert(mem->listq.prev == 0); +#if CONFIG_BACKGROUND_QUEUE + assert(mem->vm_page_on_backgroundq == 0); + assert(mem->vm_page_backgroundq.next == 0); + assert(mem->vm_page_backgroundq.prev == 0); +#endif /* CONFIG_BACKGROUND_QUEUE */ return mem; } - vm_page_secluded_count_inuse--; assert(!object->internal); // vm_page_pageable_external_count--; @@ -2862,8 +2868,6 @@ vm_page_grab_secluded(void) if (mem->reference) { /* it's been used but we do need to grab a page... */ } - /* page could still be on vm_page_queue_background... */ - vm_page_free_prepare_queues(mem); vm_page_unlock_queues(); @@ -2875,9 +2879,21 @@ vm_page_grab_secluded(void) assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem))); } pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)); - assert(mem->busy); vm_page_secluded.grab_success_other++; + assert(mem->busy); + assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q); + assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL); + assert(mem->pageq.next == 0); + assert(mem->pageq.prev == 0); + assert(mem->listq.next == 0); + assert(mem->listq.prev == 0); +#if CONFIG_BACKGROUND_QUEUE + assert(mem->vm_page_on_backgroundq == 0); + assert(mem->vm_page_backgroundq.next == 0); + assert(mem->vm_page_backgroundq.prev == 0); +#endif /* CONFIG_BACKGROUND_QUEUE */ + return mem; } #endif /* CONFIG_SECLUDED_MEMORY */ @@ -3347,6 +3363,15 @@ vm_page_free_prepare_object( VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr); } if ( !mem->fictitious) { + assert(mem->pageq.next == 0); + assert(mem->pageq.prev == 0); + assert(mem->listq.next == 0); + assert(mem->listq.prev == 0); +#if CONFIG_BACKGROUND_QUEUE + assert(mem->vm_page_backgroundq.next == 0); + assert(mem->vm_page_backgroundq.prev == 0); +#endif /* CONFIG_BACKGROUND_QUEUE */ + assert(mem->next_m == 0); vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->lopage); } } @@ -3399,6 +3424,9 @@ vm_page_free_unlocked( * as blocked up by vm_pageout_scan(). * The big win is not having to take the free list lock once * per page. + * + * The VM page queues lock (vm_page_queue_lock) should NOT be held. + * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held. */ void vm_page_free_list( @@ -3410,6 +3438,9 @@ vm_page_free_list( vm_page_t local_freeq; int pg_count; + LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED); + LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED); + while (freeq) { pg_count = 0; @@ -5431,12 +5462,6 @@ did_consider: } if (abort_run == TRUE) { - if (m != VM_PAGE_NULL) { - vm_page_free_list(m, FALSE); - } - - dumped_run++; - /* * want the index of the last * page in this run that was @@ -5446,8 +5471,16 @@ did_consider: */ page_idx = tmp_start_idx + 2; if (page_idx >= vm_pages_count) { - if (wrapped) + if (wrapped) { + if (m != VM_PAGE_NULL) { + vm_page_unlock_queues(); + vm_page_free_list(m, FALSE); + vm_page_lock_queues(); + m = VM_PAGE_NULL; + } + dumped_run++; goto done_scanning; + } page_idx = last_idx = 0; wrapped = TRUE; } @@ -5467,6 +5500,14 @@ did_consider: last_idx = page_idx; + if (m != VM_PAGE_NULL) { + vm_page_unlock_queues(); + vm_page_free_list(m, FALSE); + vm_page_lock_queues(); + m = VM_PAGE_NULL; + } + dumped_run++; + lck_mtx_lock(&vm_page_queue_free_lock); /* * reset our free page limit since we @@ -7559,6 +7600,7 @@ vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq) #endif /* CONFIG_BACKGROUND_QUEUE */ return; } + if (mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR) { assert(mem->pageq.next == 0 && mem->pageq.prev == 0); diff --git a/osfmk/vm/vm_user.c b/osfmk/vm/vm_user.c index 37d3cbd0c..886dbb6ff 100644 --- a/osfmk/vm/vm_user.c +++ b/osfmk/vm/vm_user.c @@ -116,6 +116,7 @@ #include #include #include +#include vm_size_t upl_offset_to_pagelist = 0; @@ -123,6 +124,8 @@ vm_size_t upl_offset_to_pagelist = 0; #include #endif /* VM_CPM */ +lck_grp_t dynamic_pager_control_port_lock_group; +decl_lck_mtx_data(, dynamic_pager_control_port_lock); ipc_port_t dynamic_pager_control_port=NULL; /* @@ -3327,19 +3330,30 @@ mach_memory_entry_range_op( return kr; } +static void dp_control_port_init(void) +{ + lck_grp_init(&dynamic_pager_control_port_lock_group,"dp_control_port", LCK_GRP_ATTR_NULL); + lck_mtx_init(&dynamic_pager_control_port_lock, &dynamic_pager_control_port_lock_group, LCK_ATTR_NULL); +} kern_return_t set_dp_control_port( host_priv_t host_priv, ipc_port_t control_port) { - if (host_priv == HOST_PRIV_NULL) - return (KERN_INVALID_HOST); + ipc_port_t old_port; - if (IP_VALID(dynamic_pager_control_port)) - ipc_port_release_send(dynamic_pager_control_port); + if (host_priv == HOST_PRIV_NULL) + return (KERN_INVALID_HOST); + lck_mtx_lock(&dynamic_pager_control_port_lock); + old_port = dynamic_pager_control_port; dynamic_pager_control_port = control_port; + lck_mtx_unlock(&dynamic_pager_control_port_lock); + + if (IP_VALID(old_port)) + ipc_port_release_send(old_port); + return KERN_SUCCESS; } @@ -3348,10 +3362,13 @@ get_dp_control_port( host_priv_t host_priv, ipc_port_t *control_port) { - if (host_priv == HOST_PRIV_NULL) + if (host_priv == HOST_PRIV_NULL) return (KERN_INVALID_HOST); + lck_mtx_lock(&dynamic_pager_control_port_lock); *control_port = ipc_port_copy_send(dynamic_pager_control_port); + lck_mtx_unlock(&dynamic_pager_control_port_lock); + return KERN_SUCCESS; } @@ -3612,6 +3629,11 @@ vm_map_get_phys_page( return phys_page; } +void +vm_user_init(void) +{ + dp_control_port_init(); +} #if 0 kern_return_t kernel_object_iopl_request( /* forward */ diff --git a/security/mac_policy.h b/security/mac_policy.h index 2d5f1b348..de9ab798a 100644 --- a/security/mac_policy.h +++ b/security/mac_policy.h @@ -6591,7 +6591,7 @@ typedef unsigned int mac_policy_handle_t; struct mac_policy_conf { const char *mpc_name; /** policy name */ const char *mpc_fullname; /** full name */ - const char **mpc_labelnames; /** managed label namespaces */ + char const * const *mpc_labelnames; /** managed label namespaces */ unsigned int mpc_labelname_count; /** number of managed label namespaces */ struct mac_policy_ops *mpc_ops; /** operation vector */ int mpc_loadtime_flags; /** load time flags */ diff --git a/security/mac_vfs.c b/security/mac_vfs.c index 1f88f57a3..be3830804 100644 --- a/security/mac_vfs.c +++ b/security/mac_vfs.c @@ -1199,7 +1199,7 @@ mac_vnode_check_signature(struct vnode *vp, struct cs_blob *cs_blob, int reason_error = 0; int kcdata_error = 0; - if ((reason_error = os_reason_alloc_buffer(reason, kcdata_estimate_required_buffer_size + if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size (1, fatal_failure_desc_len))) == 0 && (kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor, EXIT_REASON_USER_DESC, fatal_failure_desc_len, diff --git a/tools/lldbmacros/README.md b/tools/lldbmacros/README.md index ed75ee9c7..3446cbd90 100644 --- a/tools/lldbmacros/README.md +++ b/tools/lldbmacros/README.md @@ -185,6 +185,10 @@ To easily reload your changes in lldb please follow the below example. memory is reloaded from ./memory.py (lldb) + * Alternatively, you can use lldb`s command for script loading as + (lldb) command script import /path/to/memory.py + You can re-run the same command every time you update the code in file. + It is very important that you do reload using xnudebug command as it does the plumbing of commands and types for your change in the module. Otherwise you could easily get confused why your changes are not reflected in the command. diff --git a/tools/lldbmacros/plugins/speedtracer.py b/tools/lldbmacros/plugins/speedtracer.py index 8d9d9e2e1..085223291 100644 --- a/tools/lldbmacros/plugins/speedtracer.py +++ b/tools/lldbmacros/plugins/speedtracer.py @@ -1,5 +1,5 @@ -# A basic Plugin that creates performance reports from zprint output -import urllib, urllib2 +import json, urllib, urllib2 +from urllib2 import Request, urlopen, HTTPError kern_version = None def plugin_init(kernel_target, config, lldb_obj, isConnected): @@ -19,25 +19,27 @@ def plugin_execute(command_name, result_output): outstr = '' further_cmds = [] submitvars = {} - submitvars['type']="text" - submitvars['log']=result_output + submitvars['log_content']=result_output - submiturl = "http://speedtracer.apple.com/trace/analyze?format=xml" + submiturl = "https://speedtracer.apple.com/api/v2/trace" encoded_data = urllib.urlencode(submitvars) - request = urllib2.Request(submiturl, encoded_data, {"Accept":"application/xml"}) - response = urllib2.urlopen(request) - - status = response.info()['status'] - if status == 201 or status == '201': - outstr += "CrashTracer data found at " + response.info()['location'] - newurl = response.info()['location'] - import webbrowser - webbrowser.open(newurl) - status = True - else: - outstr += "unknown response from server \n" + str(response.info()) + request = urllib2.Request(submiturl, encoded_data) + request.add_header("Accept", "application/json") + request.add_header("X-ST-GroupName", "core-os") + try: + response = urllib2.urlopen(request) + response_str = response.read() + j = json.loads(response_str) + outstr += "\nspeedtracer output:\n\n" + stacks = j.get("symbolicated_log") + if stacks: + outstr += stacks + else: + outstr += json.dumps(j) + except HTTPError as e: + outstr += "speedtracer replied with\n" + str(e.info()) status = False - + return (status, outstr, further_cmds) def plugin_cleanup(): diff --git a/tools/lldbmacros/process.py b/tools/lldbmacros/process.py index 71108c100..5420349e4 100644 --- a/tools/lldbmacros/process.py +++ b/tools/lldbmacros/process.py @@ -1138,6 +1138,52 @@ def ShowTaskStacksCmdHelper(cmd_args=None, cmd_options={}): # EndMacro: showtaskstacks +def CheckTaskProcRefs(task, proc): + for thread in IterateQueue(task.threads, 'thread *', 'task_threads'): + if int(thread.uthread) == 0: + continue + uthread = Cast(thread.uthread, 'uthread *') + refcount = int(uthread.uu_proc_refcount) + uu_ref_index = int(uthread.uu_pindex) + if refcount == 0: + continue + for ref in range(0, uu_ref_index): + if unsigned(uthread.uu_proc_ps[ref]) == unsigned(proc): + print GetTaskSummary.header + " " + GetProcSummary.header + pval = Cast(task.bsd_info, 'proc *') + print GetTaskSummary(task) + " " + GetProcSummary(pval) + print "\t" + GetThreadSummary.header + print "\t" + GetThreadSummary(thread) + "\n" + + for frame in range (0, 10): + trace_addr = unsigned(uthread.uu_proc_pcs[ref][frame]) + symbol_arr = kern.SymbolicateFromAddress(unsigned(trace_addr)) + if symbol_arr: + symbol_str = str(symbol_arr[0].addr) + else: + symbol_str = '' + print '{0: <#x} {1: + """ + if cmd_args == None or len(cmd_args) < 1: + raise ArgumentError("No arguments passed") + + proc = kern.GetValueFromAddress(cmd_args[0], 'proc *') + + for t in kern.tasks: + CheckTaskProcRefs(t, proc) + for t in kern.terminated_tasks: + CheckTaskProcRefs(t, proc) + + return + @lldb_command('showallthreads') def ShowAllThreads(cmd_args = None): """ Display info about all threads in the system diff --git a/tools/lldbmacros/userspace.py b/tools/lldbmacros/userspace.py index 4b4dd7ae5..4450c248f 100644 --- a/tools/lldbmacros/userspace.py +++ b/tools/lldbmacros/userspace.py @@ -247,31 +247,17 @@ def ShowTaskUserArgs(cmd_args=None, cmd_options={}): return True - -@lldb_command('showtaskuserstacks') -def ShowTaskUserStacks(cmd_args=None): - """ Print out the user stack for each thread in a task, followed by the user libraries. - Syntax: (lldb) showtaskuserstacks - The format is compatible with CrashTracer. You can also use the speedtracer plugin as follows - (lldb) showtaskuserstacks -p speedtracer - - Note: the address ranges are approximations. Also the list may not be completely accurate. This command expects memory read failures - and hence will skip a library if unable to read information. Please use your good judgement and not take the output as accurate - """ - if not cmd_args: - raise ArgumentError("Insufficient arguments") - - task = kern.GetValueFromAddress(cmd_args[0], 'task *') +def ShowTaskUserStacks(task): #print GetTaskSummary.header + " " + GetProcSummary.header pval = Cast(task.bsd_info, 'proc *') #print GetTaskSummary(task) + " " + GetProcSummary(pval) + "\n \n" crash_report_format_string = """\ -Process: {pid: <10d} +Process: {pname:s} [{pid:d}] Path: {path: <50s} Identifier: {pname: <30s} Version: ??? (???) Code Type: {parch: <20s} -Parent Process: {ppname: >20s}[{ppid:d}] +Parent Process: {ppname:s} [{ppid:d}] Date/Time: {timest:s}.000 -0800 OS Version: {osversion: <20s} @@ -344,6 +330,36 @@ Synthetic crash log generated from Kernel userstacks print "Enable debugging ('(lldb) xnudebug debug') to see detailed trace." return +@lldb_command('showtaskuserstacks', "P:F:") +def ShowTaskUserStacksCmdHelper(cmd_args=None, cmd_options={}): + """ Print out the user stack for each thread in a task, followed by the user libraries. + Syntax: (lldb) showtaskuserstacks + or: (lldb) showtaskuserstacks -P + or: (lldb) showtaskuserstacks -F + The format is compatible with CrashTracer. You can also use the speedtracer plugin as follows + (lldb) showtaskuserstacks -p speedtracer + + Note: the address ranges are approximations. Also the list may not be completely accurate. This command expects memory read failures + and hence will skip a library if unable to read information. Please use your good judgement and not take the output as accurate + """ + task_list = [] + if "-F" in cmd_options: + task_list = FindTasksByName(cmd_options["-F"]) + elif "-P" in cmd_options: + pidval = ArgumentStringToInt(cmd_options["-P"]) + for t in kern.tasks: + pval = Cast(t.bsd_info, 'proc *') + if pval and pval.p_pid == pidval: + task_list.append(t) + break + elif cmd_args: + t = kern.GetValueFromAddress(cmd_args[0], 'task *') + task_list.append(t) + else: + raise ArgumentError("Insufficient arguments") + + for task in task_list: + ShowTaskUserStacks(task) def GetUserDataAsString(task, addr, size): """ Get data from task's address space as a string of bytes diff --git a/tools/tests/darwintests/voucher_entry_18826844.c b/tools/tests/darwintests/voucher_entry_18826844.c new file mode 100644 index 000000000..600aae235 --- /dev/null +++ b/tools/tests/darwintests/voucher_entry_18826844.c @@ -0,0 +1,78 @@ +/* + * Test that sending a message to a voucher with the same voucher as the voucher port + * with only one send right count with move send before the copy send doesn't panic. + * + * clang -o voucherentry voucherentry.c -ldarwintest -Weverything -Wno-gnu-flexible-array-initializer + * + * + */ + +#include +#include + +T_DECL(voucher_entry, "voucher_entry", T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES)) +{ + kern_return_t kr = KERN_SUCCESS; + mach_voucher_t voucher = MACH_VOUCHER_NULL; + + /* + * The bank voucher already exists in this process, so using it doesn't + * actually test the problem. Use an importance voucher instead. + */ + mach_voucher_attr_recipe_data_t recipe = { + .key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE, + .command = MACH_VOUCHER_ATTR_IMPORTANCE_SELF, + .previous_voucher = MACH_VOUCHER_NULL, + .content_size = 0, + }; + + kr = host_create_mach_voucher(mach_host_self(), + (mach_voucher_attr_raw_recipe_array_t)&recipe, + sizeof(recipe), &voucher); + + T_ASSERT_MACH_SUCCESS(kr, "host_create_mach_voucher"); + + T_ASSERT_NOTNULL(voucher, "voucher must not be null"); + + mach_port_urefs_t refs = 0; + + kr = mach_port_get_refs(mach_task_self(), voucher, MACH_PORT_RIGHT_SEND, &refs); + + T_ASSERT_MACH_SUCCESS(kr, "mach_port_get_refs"); + + T_ASSERT_EQ(refs, (mach_port_urefs_t)1, "voucher must have only one ref"); + + /* First, try with two moves (must fail because there's only one ref) */ + mach_msg_header_t request_msg_1 = { + .msgh_remote_port = voucher, + .msgh_local_port = MACH_PORT_NULL, + .msgh_voucher_port = voucher, + .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_MOVE_SEND, 0, MACH_MSG_TYPE_MOVE_SEND, 0), + .msgh_id = 0xDEAD, + .msgh_size = sizeof(request_msg_1), + }; + + kr = mach_msg_send(&request_msg_1); + + T_ASSERT_MACH_ERROR(MACH_SEND_INVALID_DEST, kr, "send with two moves should fail with invalid dest"); + + /* Next, try with a move and a copy (will succeed and destroy the last ref) */ + mach_msg_header_t request_msg_2 = { + .msgh_remote_port = voucher, + .msgh_local_port = MACH_PORT_NULL, + .msgh_voucher_port = voucher, + .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_MOVE_SEND, 0, MACH_MSG_TYPE_COPY_SEND, 0), + .msgh_id = 0xDEAD, + .msgh_size = sizeof(request_msg_2), + }; + + /* panic happens here */ + kr = mach_msg_send(&request_msg_2); + + T_ASSERT_MACH_SUCCESS(kr, "send with move and copy succeeds"); + + kr = mach_port_get_refs(mach_task_self(), voucher, MACH_PORT_RIGHT_SEND, &refs); + + T_ASSERT_MACH_ERROR(KERN_INVALID_NAME, kr, "voucher should now be invalid name"); +} +