#endif
#if !defined(CC_USE_HEAP_FOR_WORKSPACE)
- #if CC_USE_L4 || CC_IBOOT || defined(_MSC_VER)
+ #if CC_USE_L4 || CC_IBOOT || CC_BASEBAND || defined(_MSC_VER)
/* For L4, stack is too short, need to use HEAP for some computations */
/* CC_USE_HEAP_FOR_WORKSPACE not supported for KERNEL! */
#define CC_USE_HEAP_FOR_WORKSPACE 1
#include <pexpert/pexpert.h>
#define cc_printf(x...) kprintf(x)
extern int printf(const char *format, ...) __printflike(1,2);
-#elif CC_USE_S3
+#elif CC_USE_S3 || CC_IBOOT
#include <stdio.h>
#define cc_printf(x...) printf(x)
#else
#include <corecrypto/ccmode.h>
#include <corecrypto/ccaes.h>
-#define CMAC_BLOCKSIZE 16
+#define CMAC_BLOCKSIZE 16
#if CORECRYPTO_USE_TRANSPARENT_UNION
struct cccmac_ctx {
} CC_ALIGNED(8);
typedef struct cccmac_ctx_hdr {
- uint8_t k1[16];
- uint8_t k2[16];
+ uint8_t k1[CMAC_BLOCKSIZE];
+ uint8_t k2[CMAC_BLOCKSIZE];
+ uint8_t block[CMAC_BLOCKSIZE];
+ size_t block_nbytes; // Number of byte occupied in block buf
+ size_t cumulated_nbytes; // Total size processed
+ const struct ccmode_cbc *cbc;
uint8_t ctx[8];
} CC_ALIGNED(8) cccmac_ctx_hdr;
#else
struct cccmac_ctx {
- uint8_t k1[16];
- uint8_t k2[16];
+ uint8_t k1[CMAC_BLOCKSIZE];
+ uint8_t k2[CMAC_BLOCKSIZE];
+ uint8_t block[CMAC_BLOCKSIZE];
+ size_t block_nbytes; // Number of byte occupied in block
+ size_t cumulated_nbytes; // Total size processed
+ const struct ccmode_cbc *cbc;
uint8_t ctx[8];
} CC_ALIGNED(8);// cccmac_ctx_hdr;
#define cccmac_mode_iv(_mode_, HC) (cccbc_iv *)(cccmac_mode_ctx_start(_mode_, HC)+cccmac_cbc_size(_mode_))
#define cccmac_k1(HC) (CCCMAC_HDR(HC)->k1)
#define cccmac_k2(HC) (CCCMAC_HDR(HC)->k2)
+#define cccmac_block(HC) (CCCMAC_HDR(HC)->block)
+#define cccmac_cbc(HC) (CCCMAC_HDR(HC)->cbc)
+#define cccmac_block_nbytes(HC) (CCCMAC_HDR(HC)->block_nbytes)
+#define cccmac_cumulated_nbytes(HC) (CCCMAC_HDR(HC)->cumulated_nbytes)
-void cccmac_init(const struct ccmode_cbc *cbc, cccmac_ctx_t ctx, const void *key);
+/* CMAC as defined in NIST SP800-38B - 2005 */
-void cccmac_block_update(const struct ccmode_cbc *cbc, cccmac_ctx_t cmac,
- size_t nblocks, const void *data);
+/* HACK:
+ To change the prototype of cccmac_init (and preserve the name) we need to
+ proceed in steps:
+ 1) Make corecrypto change (23557380)
+ 2) Have all clients define "CC_CHANGEFUNCTION_28544056_cccmac_init"
+ 3) Remove CC_CHANGEFUNCTION_28544056_cccmac_init logic and old functions of corecrypto
+ 4) Clients can remove CC_CHANGEFUNCTION_28544056_cccmac_init at their leisure
+
+ */
+
+/* =============================================================================
+
+ ONE SHOT
+
+ ==============================================================================*/
+
+/*!
+ @function cccmac_one_shot_generate
+ @abstract CMAC generation in one call
+
+ @param cbc CBC and block cipher specification
+ @param key_nbytes Length of the key in bytes
+ @param key Pointer to the key of length key_nbytes
+ @param data_nbytes Length of the data in bytes
+ @param data Pointer to the data in bytes
+ @param mac_nbytes Length in byte of the mac, > 0
+ @param mac Output of length cbc->block_size
+
+ @result 0 iff successful.
+
+ @discussion Only supports CMAC_BLOCKSIZE block ciphers
+ */
+int cccmac_one_shot_generate(const struct ccmode_cbc *cbc,
+ size_t key_nbytes, const void *key,
+ size_t data_nbytes, const void *data,
+ size_t mac_nbytes, void *mac);
+
+/*!
+ @function cccmac_one_shot_verify
+ @abstract CMAC verification in one call
+
+ @param cbc CBC and block cipher specification
+ @param key_nbytes Length of the key in bytes
+ @param key Pointer to the key of length key_nbytes
+ @param data_nbytes Length of the data in bytes
+ @param data Pointer to the data in bytes
+ @param expected_mac_nbytes Length in byte of the mac, > 0
+ @param expected_mac Mac value expected
+
+ @result 0 iff successful.
+
+ @discussion Only supports CMAC_BLOCKSIZE block ciphers
+ */
+int cccmac_one_shot_verify(const struct ccmode_cbc *cbc,
+ size_t key_nbytes, const void *key,
+ size_t data_nbytes, const void *data,
+ size_t expected_mac_nbytes, const void *expected_mac);
+
+/* =============================================================================
+
+ STREAMING
+
+ Init - Update - Final
+
+==============================================================================*/
+
+/*!
+ @function cccmac_init
+ @abstract Init CMAC context with CBC mode and key
+
+ @param cbc CBC and block cipher specification
+ @param ctx Context use to store internal state
+ @param key_nbytes Length of the key in bytes
+ @param key Full key
+
+ @result 0 iff successful.
+
+ @discussion Only supports CMAC_BLOCKSIZE block ciphers
+ */
+
+
+
+#ifndef CC_CHANGEFUNCTION_28544056_cccmac_init
+int cccmac_init(const struct ccmode_cbc *cbc,
+ cccmac_ctx_t ctx,
+ size_t key_nbytes, const void *key)
+// This is the good prototype! The deprecate warning is only for clients using the old function (now defined as macro)
+__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "define 'CC_CHANGEFUNCTION_28544056_cccmac_init' and use new cccmac_init with parameter key_nbytes")));
+#else
+int cccmac_init(const struct ccmode_cbc *cbc,
+ cccmac_ctx_t ctx,
+ size_t key_nbytes, const void *key);
+#endif
+
+/*!
+ @function cccmac_update
+ @abstract Process data
+
+ @param ctx Context use to store internal state
+ @param data_nbytes Length in byte of the data
+ @param data Data to process
+
+ @result 0 iff successful.
+
+ @discussion Only supports CMAC_BLOCKSIZE block ciphers
+ */
+
+int cccmac_update(cccmac_ctx_t ctx,
+ size_t data_nbytes, const void *data);
+/*!
+ @function cccmac_final_generate
+ @abstract Final step for generation
+
+ @param ctx Context use to store internal state
+ @param mac_nbytes Length in byte of the mac, > 0
+ @param mac Output of length mac_nbytes
+
+ @result 0 iff successful.
+
+ @discussion Only supports CMAC_BLOCKSIZE block ciphers
+ */
+int cccmac_final_generate(cccmac_ctx_t ctx,
+ size_t mac_nbytes, void *mac);
+
+/*!
+ @function cccmac_final_verify
+ @abstract Final step and verification
+
+ @param ctx Context use to store internal state
+ @param expected_mac_nbytes Length in byte of the mac, > 0
+ @param expected_mac Mac value expected
+
+ @result 0 iff successful.
+
+ @discussion Only supports CMAC_BLOCKSIZE block ciphers
+ */
+int cccmac_final_verify(cccmac_ctx_t ctx,
+ size_t expected_mac_nbytes, const void *expected_mac);
+
+
+/* =============================================================================
+
+ Legacy - Please migrate to new functions above
+
+ ==============================================================================*/
+
+#ifndef CC_CHANGEFUNCTION_28544056_cccmac_init
+
+/*
+ Guidelines for switching to new CMAC functions
+
+ Legacy New functions
+ cccmac_init -> cccmac_init w/ key kength in bytes
+ cccmac_block_update -> cccmac_update w/ size in bytes instead of blocks
+ cccmac_final -> cccmac_final_generate or cccmac_final_verify
+ depending the use case preceeded
+ by cccmac_update if any leftover bytes.
+ cccmac -> cccmac_one_shot_generate or cccmac_one_shot_verify
+ depending the use case
+
+ */
+
+/*!
+ @function cccmac_init
+ @abstract Initialize CMAC context with 128bit key
+
+ Define CC_CHANGEFUNCTION_28544056_cccmac_init and use "cccmac_init(...,...,16,...)"
+
+ */
+#define cccmac_init(cbc,ctx,key) cccmac_init(cbc,ctx,16,key)
+
+#endif /* CC_CHANGEFUNCTION_28544056_cccmac_init - TO BE REMOVED WITH 28544056 */
+
+/*!
+ @function cccmac_block_update
+ @abstract Process data
+ */
+
+CC_INLINE void cccmac_block_update(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx,
+ size_t nblocks, const void *data)
+__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "cccmac_update")));
+
+CC_INLINE void cccmac_block_update(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx,
+ size_t nblocks, const void *data) {
+ cccmac_update(ctx,(nblocks)*CMAC_BLOCKSIZE,data);
+}
+
+/*!
+ @function cccmac_final
+ @abstract Finalize CMAC generation
+ */
+CC_INLINE void cccmac_final(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx,
+ size_t nbytes, const void *in, void *out)
+__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "cccmac_final_generate or cccmac_final_verify")));
+
+CC_INLINE void cccmac_final(CC_UNUSED const struct ccmode_cbc *cbc, cccmac_ctx_t ctx,
+ size_t nbytes, const void *in, void *out) {
+ cccmac_update(ctx, nbytes, in);
+ cccmac_final_generate(ctx,CMAC_BLOCKSIZE,out);
+}
+
+/*!
+ @function cccmac
+ @abstract One shot CMAC generation with 128bit key
+ */
+CC_INLINE void cccmac(const struct ccmode_cbc *cbc,
+ const void *key,
+ size_t data_len, const void *data, void *mac)
+__attribute__((deprecated("see guidelines in corecrypto/cccmac.h for migration", "cccmac_one_shot_generate or cccmac_one_shot_verify")));
-void cccmac_final(const struct ccmode_cbc *cbc, cccmac_ctx_t ctx,
- size_t nbytes, const void *in, void *out);
+CC_INLINE void cccmac(const struct ccmode_cbc *cbc,
+ const void *key,
+ size_t data_len, const void *data, void *mac) {
+ cccmac_one_shot_generate(cbc,16,key,data_len,data,16,mac);
+}
-void cccmac(const struct ccmode_cbc *cbc, const void *key,
- size_t data_len, const void *data,
- void *mac);
#endif /* _CORECRYPTO_cccmac_H_ */
return mode->block_size;
}
-CC_INLINE void ccecb_init(const struct ccmode_ecb *mode, ccecb_ctx *ctx,
- size_t key_len, const void *key)
+CC_INLINE int ccecb_init(const struct ccmode_ecb *mode, ccecb_ctx *ctx,
+ size_t key_len, const void *key)
{
- mode->init(mode, ctx, key_len, key);
+ return mode->init(mode, ctx, key_len, key);
}
-CC_INLINE void ccecb_update(const struct ccmode_ecb *mode, const ccecb_ctx *ctx,
- size_t nblocks, const void *in, void *out)
+CC_INLINE int ccecb_update(const struct ccmode_ecb *mode, const ccecb_ctx *ctx,
+ size_t nblocks, const void *in, void *out)
{
- mode->ecb(ctx, nblocks, in, out);
+ return mode->ecb(ctx, nblocks, in, out);
}
-CC_INLINE void ccecb_one_shot(const struct ccmode_ecb *mode,
- size_t key_len, const void *key,
- size_t nblocks, const void *in, void *out)
+CC_INLINE int ccecb_one_shot(const struct ccmode_ecb *mode,
+ size_t key_len, const void *key,
+ size_t nblocks, const void *in, void *out)
{
+ int rc;
ccecb_ctx_decl(mode->size, ctx);
- mode->init(mode, ctx, key_len, key);
+ rc = mode->init(mode, ctx, key_len, key);
mode->ecb(ctx, nblocks, in, out);
ccecb_ctx_clear(mode->size, ctx);
+ return rc;
}
/* CBC mode. */
return mode->block_size;
}
-CC_INLINE void cccbc_init(const struct ccmode_cbc *mode, cccbc_ctx *ctx,
- size_t key_len, const void *key)
+CC_INLINE int cccbc_init(const struct ccmode_cbc *mode, cccbc_ctx *ctx,
+ size_t key_len, const void *key)
{
- mode->init(mode, ctx, key_len, key);
+ return mode->init(mode, ctx, key_len, key);
}
-CC_INLINE void cccbc_set_iv(const struct ccmode_cbc *mode, cccbc_iv *iv_ctx,
- const void *iv)
+CC_INLINE int cccbc_set_iv(const struct ccmode_cbc *mode, cccbc_iv *iv_ctx,
+ const void *iv)
{
if (iv)
cc_copy(mode->block_size, iv_ctx, iv);
else
cc_zero(mode->block_size, iv_ctx);
+ return 0;
}
-CC_INLINE void cccbc_update(const struct ccmode_cbc *mode, cccbc_ctx *ctx,
- cccbc_iv *iv, size_t nblocks,
- const void *in, void *out)
+CC_INLINE int cccbc_update(const struct ccmode_cbc *mode, cccbc_ctx *ctx,
+ cccbc_iv *iv, size_t nblocks,
+ const void *in, void *out)
{
- mode->cbc(ctx, iv, nblocks, in, out);
+ return mode->cbc(ctx, iv, nblocks, in, out);
}
-CC_INLINE void cccbc_one_shot(const struct ccmode_cbc *mode,
- size_t key_len, const void *key,
- const void *iv, size_t nblocks,
- const void *in, void *out)
+CC_INLINE int cccbc_one_shot(const struct ccmode_cbc *mode,
+ size_t key_len, const void *key,
+ const void *iv, size_t nblocks,
+ const void *in, void *out)
{
+ int rc;
cccbc_ctx_decl(mode->size, ctx);
cccbc_iv_decl(mode->block_size, iv_ctx);
- mode->init(mode, ctx, key_len, key);
+ rc = mode->init(mode, ctx, key_len, key);
if (iv)
cccbc_set_iv(mode, iv_ctx, iv);
else
cc_zero(mode->block_size, iv_ctx);
mode->cbc(ctx, iv_ctx, nblocks, in, out);
cccbc_ctx_clear(mode->size, ctx);
+ return rc;
}
/* CFB mode. */
return mode->block_size;
}
-CC_INLINE void cccfb_init(const struct ccmode_cfb *mode, cccfb_ctx *ctx,
- size_t key_len, const void *key,
- const void *iv)
+CC_INLINE int cccfb_init(const struct ccmode_cfb *mode, cccfb_ctx *ctx,
+ size_t key_len, const void *key,
+ const void *iv)
{
- mode->init(mode, ctx, key_len, key, iv);
+ return mode->init(mode, ctx, key_len, key, iv);
}
-CC_INLINE void cccfb_update(const struct ccmode_cfb *mode, cccfb_ctx *ctx,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int cccfb_update(const struct ccmode_cfb *mode, cccfb_ctx *ctx,
+ size_t nbytes, const void *in, void *out)
{
- mode->cfb(ctx, nbytes, in, out);
+ return mode->cfb(ctx, nbytes, in, out);
}
-CC_INLINE void cccfb_one_shot(const struct ccmode_cfb *mode,
- size_t key_len, const void *key, const void *iv,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int cccfb_one_shot(const struct ccmode_cfb *mode,
+ size_t key_len, const void *key, const void *iv,
+ size_t nbytes, const void *in, void *out)
{
+ int rc;
cccfb_ctx_decl(mode->size, ctx);
- mode->init(mode, ctx, key_len, key, iv);
+ rc = mode->init(mode, ctx, key_len, key, iv);
mode->cfb(ctx, nbytes, in, out);
cccfb_ctx_clear(mode->size, ctx);
+ return rc;
}
/* CFB8 mode. */
return mode->block_size;
}
-CC_INLINE void cccfb8_init(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx,
- size_t key_len, const void *key, const void *iv)
+CC_INLINE int cccfb8_init(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx,
+ size_t key_len, const void *key, const void *iv)
{
- mode->init(mode, ctx, key_len, key, iv);
+ return mode->init(mode, ctx, key_len, key, iv);
}
-CC_INLINE void cccfb8_update(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int cccfb8_update(const struct ccmode_cfb8 *mode, cccfb8_ctx *ctx,
+ size_t nbytes, const void *in, void *out)
{
- mode->cfb8(ctx, nbytes, in, out);
+ return mode->cfb8(ctx, nbytes, in, out);
}
-CC_INLINE void cccfb8_one_shot(const struct ccmode_cfb8 *mode,
- size_t key_len, const void *key, const void *iv,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int cccfb8_one_shot(const struct ccmode_cfb8 *mode,
+ size_t key_len, const void *key, const void *iv,
+ size_t nbytes, const void *in, void *out)
{
+ int rc;
cccfb8_ctx_decl(mode->size, ctx);
- mode->init(mode, ctx, key_len, key, iv);
+ rc = mode->init(mode, ctx, key_len, key, iv);
mode->cfb8(ctx, nbytes, in, out);
cccfb8_ctx_clear(mode->size, ctx);
+ return rc;
}
/* CTR mode. */
return mode->block_size;
}
-CC_INLINE void ccctr_init(const struct ccmode_ctr *mode, ccctr_ctx *ctx,
- size_t key_len, const void *key, const void *iv)
+CC_INLINE int ccctr_init(const struct ccmode_ctr *mode, ccctr_ctx *ctx,
+ size_t key_len, const void *key, const void *iv)
{
- mode->init(mode, ctx, key_len, key, iv);
+ return mode->init(mode, ctx, key_len, key, iv);
}
-CC_INLINE void ccctr_update(const struct ccmode_ctr *mode, ccctr_ctx *ctx,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int ccctr_update(const struct ccmode_ctr *mode, ccctr_ctx *ctx,
+ size_t nbytes, const void *in, void *out)
{
- mode->ctr(ctx, nbytes, in, out);
+ return mode->ctr(ctx, nbytes, in, out);
}
-CC_INLINE void ccctr_one_shot(const struct ccmode_ctr *mode,
- size_t key_len, const void *key, const void *iv,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int ccctr_one_shot(const struct ccmode_ctr *mode,
+ size_t key_len, const void *key, const void *iv,
+ size_t nbytes, const void *in, void *out)
{
+ int rc;
ccctr_ctx_decl(mode->size, ctx);
- mode->init(mode, ctx, key_len, key, iv);
+ rc = mode->init(mode, ctx, key_len, key, iv);
mode->ctr(ctx, nbytes, in, out);
ccctr_ctx_clear(mode->size, ctx);
+ return rc;
}
return mode->block_size;
}
-CC_INLINE void ccofb_init(const struct ccmode_ofb *mode, ccofb_ctx *ctx,
- size_t key_len, const void *key, const void *iv)
+CC_INLINE int ccofb_init(const struct ccmode_ofb *mode, ccofb_ctx *ctx,
+ size_t key_len, const void *key, const void *iv)
{
- mode->init(mode, ctx, key_len, key, iv);
+ return mode->init(mode, ctx, key_len, key, iv);
}
-CC_INLINE void ccofb_update(const struct ccmode_ofb *mode, ccofb_ctx *ctx,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int ccofb_update(const struct ccmode_ofb *mode, ccofb_ctx *ctx,
+ size_t nbytes, const void *in, void *out)
{
- mode->ofb(ctx, nbytes, in, out);
+ return mode->ofb(ctx, nbytes, in, out);
}
-CC_INLINE void ccofb_one_shot(const struct ccmode_ofb *mode,
- size_t key_len, const void *key, const void *iv,
- size_t nbytes, const void *in, void *out)
+CC_INLINE int ccofb_one_shot(const struct ccmode_ofb *mode,
+ size_t key_len, const void *key, const void *iv,
+ size_t nbytes, const void *in, void *out)
{
+ int rc;
ccofb_ctx_decl(mode->size, ctx);
- mode->init(mode, ctx, key_len, key, iv);
+ rc = mode->init(mode, ctx, key_len, key, iv);
mode->ofb(ctx, nbytes, in, out);
ccofb_ctx_clear(mode->size, ctx);
+ return rc;
}
-/* Authenticated cipher modes. */
-
/* XTS mode. */
/* Declare a xts key named _name_. Pass the size field of a struct ccmode_xts
return mode->block_size;
}
-CC_INLINE void ccxts_init(const struct ccmode_xts *mode, ccxts_ctx *ctx,
- size_t key_len, const void *key,
+/*!
+ @function ccxts_init
+ @abstract Initialize an XTS context.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param key_nbytes Length of the key arguments in bytes
+ @param data_key Key for data encryption
+ @param tweak_key Key for tweak generation
+
+ @result 0 iff successful.
+
+ @discussion For security reasons, the two keys must be different.
+ */
+CC_INLINE int ccxts_init(const struct ccmode_xts *mode, ccxts_ctx *ctx,
+ size_t key_nbytes, const void *data_key,
const void *tweak_key)
{
- mode->init(mode, ctx, key_len, key, tweak_key);
-}
-
-CC_INLINE void ccxts_set_tweak(const struct ccmode_xts *mode, ccxts_ctx *ctx,
- ccxts_tweak *tweak, const void *iv)
-{
- mode->set_tweak(ctx, tweak, iv);
+ return mode->init(mode, ctx, key_nbytes, data_key, tweak_key);
}
+/*!
+ @function ccxts_set_tweak
+ @abstract Initialize the tweak for a sector.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param tweak Context for the tweak for this sector
+ @param iv Data used to generate the tweak
+
+ @discussion The IV must be exactly one block in length.
+ */
+CC_INLINE int ccxts_set_tweak(const struct ccmode_xts *mode, ccxts_ctx *ctx,
+ ccxts_tweak *tweak, const void *iv)
+{
+ return mode->set_tweak(ctx, tweak, iv);
+}
+
+/*!
+ @function ccxts_update
+ @abstract Encrypt or decrypt data.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for an instance
+ @param tweak Context for the tweak for this sector
+ @param nblocks Length of the data in blocks
+ @param in Input data
+ @param out Output buffer
+
+ @result The updated internal buffer of the tweak context. May be ignored.
+ */
CC_INLINE void *ccxts_update(const struct ccmode_xts *mode, ccxts_ctx *ctx,
- ccxts_tweak *tweak, size_t nblocks, const void *in, void *out)
+ ccxts_tweak *tweak, size_t nblocks, const void *in, void *out)
{
return mode->xts(ctx, tweak, nblocks, in, out);
}
-CC_INLINE void ccxts_one_shot(const struct ccmode_xts *mode,
- size_t key_len, const void *key,
- const void *tweak_key, const void *iv,
- size_t nblocks, const void *in, void *out)
-{
- ccxts_ctx_decl(mode->size, ctx);
- ccxts_tweak_decl(mode->tweak_size, tweak);
- mode->init(mode, ctx, key_len, key, tweak_key);
- mode->set_tweak(ctx, tweak, iv);
- mode->xts(ctx, tweak, nblocks, in, out);
- ccxts_ctx_clear(mode->size, ctx);
- ccxts_tweak_clear(mode->tweak_size, tweak);
-}
+/*!
+ @function ccxts_one_shot
+ @abstract Encrypt or decrypt data in XTS mode.
+
+ @param mode Descriptor for the mode
+ @param key_nbytes Length of the key arguments in bytes
+ @param data_key Key for data encryption
+ @param tweak_key Key for tweak generation
+ @param iv Data used to generate the tweak
+ @param nblocks Length of the data in blocks
+ @param in Input data
+ @param out Output buffer
+
+ @result 0 iff successful.
+
+ @discussion For security reasons, the two keys must be different.
+ */
+int ccxts_one_shot(const struct ccmode_xts *mode,
+ size_t key_nbytes, const void *data_key,
+ const void *tweak_key, const void *iv,
+ size_t nblocks, const void *in, void *out);
+
+/* Authenticated cipher modes. */
/* GCM mode. */
#define ccgcm_ctx_decl(_size_, _name_) cc_ctx_decl(ccgcm_ctx, _size_, _name_)
#define ccgcm_ctx_clear(_size_, _name_) cc_clear(_size_, _name_)
+#define CCGCM_IV_NBYTES 12
+#define CCGCM_BLOCK_NBYTES 16
+
CC_INLINE size_t ccgcm_context_size(const struct ccmode_gcm *mode)
{
return mode->size;
return mode->block_size;
}
-CC_INLINE int ccgcm_init(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
- size_t key_len, const void *key)
-{
- return mode->init(mode, ctx, key_len, key);
-}
+/*!
+ @function ccgcm_init
+ @abstract Initialize a GCM context.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param key_nbytes Length of the key in bytes
+ @param key Key for the underlying blockcipher (AES)
+
+ @result 0 iff successful.
+
+ @discussion The correct sequence of calls is:
+
+ @code ccgcm_init(...)
+ ccgcm_set_iv(...)
+ ccgcm_aad(...) (may be called zero or more times)
+ ccgcm_update(...) (may be called zero or more times)
+ ccgcm_finalize(...)
+
+ To reuse the context for additional encryptions, follow this sequence:
+
+ @code ccgcm_reset(...)
+ ccgcm_set_iv(...)
+ ccgcm_aad(...) (may be called zero or more times)
+ ccgcm_update(...) (may be called zero or more times)
+ ccgcm_finalize(...)
+
+ @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length.
+
+ @warning It is not permitted to call @p ccgcm_inc_iv after initializing the cipher via the @p ccgcm_init interface. Nonzero is returned in the event of an improper call sequence.
+ */
+CC_INLINE int ccgcm_init(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
+ size_t key_nbytes, const void *key)
+{
+ return mode->init(mode, ctx, key_nbytes, key);
+}
+
+/*!
+ @function ccgcm_init_with_iv
+ @abstract Initialize a GCM context to manage IVs internally.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param key_nbytes Length of the key in bytes
+ @param key Key for the underlying blockcipher (AES)
+ @param iv IV for the first encryption
+
+ @result 0 iff successful.
+
+ @discussion The correct sequence of calls is:
+
+ @code ccgcm_init_with_iv(...)
+ ccgcm_aad(...) (may be called zero or more times)
+ ccgcm_update(...) (may be called zero or more times)
+ ccgcm_finalize(...)
+
+ To reuse the context for additional encryptions, follow this sequence:
+
+ @code ccgcm_reset(...)
+ ccgcm_inc_iv(...)
+ ccgcm_aad(...) (may be called zero or more times)
+ ccgcm_update(...) (may be called zero or more times)
+ ccgcm_finalize(...)
+
+ The IV must be exactly 12 bytes in length.
+
+ Internally, the IV is treated as a four-byte salt followed by an eight-byte counter. This is to match the behavior of certain protocols (e.g. TLS). In the call to @p ccgcm_inc_iv, the counter component will be interpreted as a big-endian, unsigned value and incremented in place.
+
+ @warning It is not permitted to call @p ccgcm_set_iv after initializing the cipher via the @p ccgcm_init_with_iv interface. Nonzero is returned in the event of an improper call sequence.
+
+ @warning The security of GCM depends on the uniqueness of key-IV pairs. To avoid key-IV repetition, callers should not initialize multiple contexts with the same key material via the @p ccgcm_init_with_iv interface.
+ */
+int ccgcm_init_with_iv(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
+ size_t key_nbytes, const void *key,
+ const void *iv);
+
+/*!
+ @function ccgcm_set_iv
+ @abstract Set the IV for encryption.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param iv_nbytes Length of the IV in bytes
+ @param iv Initialization vector
+
+ @result 0 iff successful.
+
+ @discussion Set the initialization vector for encryption.
+
+ @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length.
+
+ In stateful protocols, if each packet exposes a guaranteed-unique value, it is recommended to format this as a 12-byte value for use as the IV.
+
+ In stateless protocols, it is recommended to choose a 16-byte value using a cryptographically-secure pseudorandom number generator (e.g. @p ccrng).
+
+ @warning This function may not be used after initializing the cipher via @p ccgcm_init_with_iv. Nonzero is returned in the event of an improper call sequence.
+ */
CC_INLINE int ccgcm_set_iv(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
- size_t iv_size, const void *iv)
-{
- return mode->set_iv(ctx, iv_size, iv);
-}
-
-// add Additional authenticated data (AAD)
+ size_t iv_nbytes, const void *iv)
+{
+ return mode->set_iv(ctx, iv_nbytes, iv);
+}
+
+/*!
+ @function ccgcm_set_iv_legacy
+ @abstract Set the IV for encryption.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param iv_nbytes Length of the IV in bytes
+ @param iv Initialization vector
+
+ @result 0 iff successful.
+
+ @discussion Identical to @p ccgcm_set_iv except that it allows zero-length IVs.
+
+ @warning Zero-length IVs nullify the authenticity guarantees of GCM.
+
+ @warning Do not use this function in new applications.
+ */
+int ccgcm_set_iv_legacy(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
+ size_t iv_nbytes, const void *iv);
+
+/*!
+ @function ccgcm_inc_iv
+ @abstract Increment the IV for another encryption.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param iv Updated initialization vector
+
+ @result 0 iff successful.
+
+ @discussion Updates the IV internally for another encryption.
+
+ Internally, the IV is treated as a four-byte salt followed by an eight-byte counter. This is to match the behavior of certain protocols (e.g. TLS). The counter component is interpreted as a big-endian, unsigned value and incremented in place.
+
+ The updated IV is copied to @p iv. This is to support protocols that require part of the IV to be specified explicitly in each packet (e.g. TLS).
+
+ @warning This function may be used only after initializing the cipher via @p ccgcm_init_with_iv.
+ */
+int ccgcm_inc_iv(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, void *iv);
+
+
+/*!
+ @function ccgcm_aad
+ @abstract Authenticate additional data.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param nbytes Length of the additional data in bytes
+ @param additional_data Additional data to authenticate
+
+ @result 0 iff successful.
+
+ @discussion This is typically used to authenticate data that cannot be encrypted (e.g. packet headers).
+
+ This function may be called zero or more times.
+ */
CC_INLINE int ccgcm_aad(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
size_t nbytes, const void *additional_data)
{
return mode->gmac(ctx, nbytes, additional_data);
}
+/*!
+ @function ccgcm_gmac
+
+ @discussion See @p ccgcm_aad.
+ */
CC_INLINE int ccgcm_gmac(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
size_t nbytes, const void *in)
{
return mode->gmac(ctx, nbytes, in);
}
-// encrypt or decrypt
+/*!
+ @function ccgcm_update
+ @abstract Encrypt or decrypt data.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param nbytes Length of the data in bytes
+ @param in Input plaintext or ciphertext
+ @param out Output ciphertext or plaintext
+
+ @result 0 iff successful.
+
+ @discussion In-place processing is supported.
+
+ This function may be called zero or more times.
+ */
CC_INLINE int ccgcm_update(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
size_t nbytes, const void *in, void *out)
{
return mode->gcm(ctx, nbytes, in, out);
}
+/*!
+ @function ccgcm_finalize
+ @abstract Finish processing and authenticate.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+ @param tag_nbytes Length of the tag in bytes
+ @param tag Authentication tag
+
+ @result 0 iff successful.
+
+ @discussion Finish processing a packet and generate the authentication tag.
+
+ On encryption, @p tag is purely an output parameter. The generated tag is written to @p tag.
+
+ On decryption, @p tag is primarily an input parameter. The caller should provide the authentication tag generated during encryption. The function will return nonzero if the input tag does not match the generated tag.
+
+ @warning To support legacy applications, @p tag is also an output parameter during decryption. The generated tag is written to @p tag. Legacy callers may choose to compare this to the tag generated during encryption. Do not follow this usage pattern in new applications.
+ */
CC_INLINE int ccgcm_finalize(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
- size_t tag_size, void *tag)
+ size_t tag_nbytes, void *tag)
{
- return mode->finalize(ctx, tag_size, tag);
+ return mode->finalize(ctx, tag_nbytes, tag);
}
+/*!
+ @function ccgcm_reset
+ @abstract Reset the context for another encryption.
+
+ @param mode Descriptor for the mode
+ @param ctx Context for this instance
+
+ @result 0 iff successful.
+
+ @discussion Refer to @p ccgcm_init for correct usage.
+ */
CC_INLINE int ccgcm_reset(const struct ccmode_gcm *mode, ccgcm_ctx *ctx)
{
return mode->reset(ctx);
}
+/*!
+ @function ccgcm_one_shot
+ @abstract Encrypt or decrypt with GCM.
+
+ @param mode Descriptor for the mode
+ @param key_nbytes Length of the key in bytes
+ @param key Key for the underlying blockcipher (AES)
+ @param iv_nbytes Length of the IV in bytes
+ @param iv Initialization vector
+ @param adata_nbytes Length of the additional data in bytes
+ @param adata Additional data to authenticate
+ @param nbytes Length of the data in bytes
+ @param in Input plaintext or ciphertext
+ @param out Output ciphertext or plaintext
+ @param tag_nbytes Length of the tag in bytes
+ @param tag Authentication tag
+
+ @result 0 iff successful.
+
+ @discussion Perform GCM encryption or decryption.
+
+ @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length.
+
+ In stateful protocols, if each packet exposes a guaranteed-unique value, it is recommended to format this as a 12-byte value for use as the IV.
+
+ In stateless protocols, it is recommended to choose a 16-byte value using a cryptographically-secure pseudorandom number generator (e.g. @p ccrng).
+
+ In-place processing is supported.
+
+ On encryption, @p tag is purely an output parameter. The generated tag is written to @p tag.
+
+ On decryption, @p tag is primarily an input parameter. The caller should provide the authentication tag generated during encryption. The function will return nonzero if the input tag does not match the generated tag.
+
+ @warning To support legacy applications, @p tag is also an output parameter during decryption. The generated tag is written to @p tag. Legacy callers may choose to compare this to the tag generated during encryption. Do not follow this usage pattern in new applications.
+ */
int ccgcm_one_shot(const struct ccmode_gcm *mode,
- size_t key_len, const void *key,
- size_t iv_len, const void *iv,
- size_t adata_len, const void *adata,
- size_t nbytes, const void *in, void *out,
- size_t tag_len, void *tag);
-
-//do not call ccgcm_one_shot_legacy() in any new application
+ size_t key_nbytes, const void *key,
+ size_t iv_nbytes, const void *iv,
+ size_t adata_nbytes, const void *adata,
+ size_t nbytes, const void *in, void *out,
+ size_t tag_nbytes, void *tag);
+
+
+/*!
+ @function ccgcm_one_shot_legacy
+ @abstract Encrypt or decrypt with GCM.
+
+ @param mode Descriptor for the mode
+ @param key_nbytes Length of the key in bytes
+ @param key Key for the underlying blockcipher (AES)
+ @param iv_nbytes Length of the IV in bytes
+ @param iv Initialization vector
+ @param adata_nbytes Length of the additional data in bytes
+ @param adata Additional data to authenticate
+ @param nbytes Length of the data in bytes
+ @param in Input plaintext or ciphertext
+ @param out Output ciphertext or plaintext
+ @param tag_nbytes Length of the tag in bytes
+ @param tag Authentication tag
+
+ @result 0 iff successful.
+
+ @discussion Identical to @p ccgcm_one_shot except that it allows zero-length IVs.
+
+ @warning Zero-length IVs nullify the authenticity guarantees of GCM.
+
+ @warning Do not use this function in new applications.
+ */
int ccgcm_one_shot_legacy(const struct ccmode_gcm *mode,
- size_t key_len, const void *key,
- size_t iv_len, const void *iv,
- size_t adata_len, const void *adata,
- size_t nbytes, const void *in, void *out,
- size_t tag_len, void *tag);
+ size_t key_nbytes, const void *key,
+ size_t iv_nbytes, const void *iv,
+ size_t adata_nbytes, const void *adata,
+ size_t nbytes, const void *in, void *out,
+ size_t tag_nbytes, void *tag);
/* CCM */
return mode->block_size;
}
-CC_INLINE void ccomac_init(const struct ccmode_omac *mode, ccomac_ctx *ctx,
- size_t tweak_len, size_t key_len, const void *key)
+CC_INLINE int ccomac_init(const struct ccmode_omac *mode, ccomac_ctx *ctx,
+ size_t tweak_len, size_t key_len, const void *key)
{
- mode->init(mode, ctx, tweak_len, key_len, key);
+ return mode->init(mode, ctx, tweak_len, key_len, key);
}
CC_INLINE int ccomac_update(const struct ccmode_omac *mode, ccomac_ctx *ctx,
size_t tweak_len, size_t key_len, const void *key,
const void *tweak, size_t nblocks, const void *in, void *out)
{
+ int rc;
ccomac_ctx_decl(mode->size, ctx);
- mode->init(mode, ctx, tweak_len, key_len, key);
- int result = mode->omac(ctx, nblocks, tweak, in, out);
+ rc = mode->init(mode, ctx, tweak_len, key_len, key);
+ if (rc == 0) rc = mode->omac(ctx, nblocks, tweak, in, out);
ccomac_ctx_clear(mode->size, ctx);
- return result;
+ return rc;
}
-void ccmode_cbc_init(const struct ccmode_cbc *cbc, cccbc_ctx *ctx,
- size_t rawkey_len, const void *rawkey);
-void ccmode_cbc_decrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks,
- const void *in, void *out);
-void ccmode_cbc_encrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks,
- const void *in, void *out);
+int ccmode_cbc_init(const struct ccmode_cbc *cbc, cccbc_ctx *ctx,
+ size_t rawkey_len, const void *rawkey);
+int ccmode_cbc_decrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks,
+ const void *in, void *out);
+int ccmode_cbc_encrypt(const cccbc_ctx *ctx, cccbc_iv *iv, size_t nblocks,
+ const void *in, void *out);
struct _ccmode_cbc_key {
const struct ccmode_ecb *ecb;
const struct ccmode_ecb *ecb);
-void ccmode_cfb_init(const struct ccmode_cfb *cfb, cccfb_ctx *ctx,
- size_t rawkey_len, const void *rawkey,
- const void *iv);
-void ccmode_cfb_decrypt(cccfb_ctx *ctx, size_t nbytes,
- const void *in, void *out);
-void ccmode_cfb_encrypt(cccfb_ctx *ctx, size_t nbytes,
- const void *in, void *out);
+int ccmode_cfb_init(const struct ccmode_cfb *cfb, cccfb_ctx *ctx,
+ size_t rawkey_len, const void *rawkey,
+ const void *iv);
+int ccmode_cfb_decrypt(cccfb_ctx *ctx, size_t nbytes,
+ const void *in, void *out);
+int ccmode_cfb_encrypt(cccfb_ctx *ctx, size_t nbytes,
+ const void *in, void *out);
struct _ccmode_cfb_key {
const struct ccmode_ecb *ecb;
size_t pad_len;
void ccmode_factory_cfb_encrypt(struct ccmode_cfb *cfb,
const struct ccmode_ecb *ecb);
-void ccmode_cfb8_init(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx,
- size_t rawkey_len, const void *rawkey, const void *iv);
-void ccmode_cfb8_decrypt(cccfb8_ctx *ctx, size_t nbytes,
- const void *in, void *out);
-void ccmode_cfb8_encrypt(cccfb8_ctx *ctx, size_t nbytes,
- const void *in, void *out);
+int ccmode_cfb8_init(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx,
+ size_t rawkey_len, const void *rawkey, const void *iv);
+int ccmode_cfb8_decrypt(cccfb8_ctx *ctx, size_t nbytes,
+ const void *in, void *out);
+int ccmode_cfb8_encrypt(cccfb8_ctx *ctx, size_t nbytes,
+ const void *in, void *out);
struct _ccmode_cfb8_key {
const struct ccmode_ecb *ecb;
void ccmode_factory_cfb8_encrypt(struct ccmode_cfb8 *cfb8,
const struct ccmode_ecb *ecb);
-void ccmode_ctr_init(const struct ccmode_ctr *ctr, ccctr_ctx *ctx,
- size_t rawkey_len, const void *rawkey, const void *iv);
-void ccmode_ctr_crypt(ccctr_ctx *ctx, size_t nbytes,
- const void *in, void *out);
+int ccmode_ctr_init(const struct ccmode_ctr *ctr, ccctr_ctx *ctx,
+ size_t rawkey_len, const void *rawkey, const void *iv);
+int ccmode_ctr_crypt(ccctr_ctx *ctx, size_t nbytes,
+ const void *in, void *out);
struct _ccmode_ctr_key {
const struct ccmode_ecb *ecb;
storage. */
int ccmode_gcm_init(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx,
size_t rawkey_len, const void *rawkey);
-int ccmode_gcm_set_iv(ccgcm_ctx *ctx, size_t iv_size, const void *iv);
+int ccmode_gcm_set_iv(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv);
int ccmode_gcm_aad(ccgcm_ctx *ctx, size_t nbytes, const void *in);
int ccmode_gcm_decrypt(ccgcm_ctx *ctx, size_t nbytes, const void *in,
void *out);
int ccmode_gcm_finalize(ccgcm_ctx *key, size_t tag_size, void *tag);
int ccmode_gcm_reset(ccgcm_ctx *key);
+#define CCGCM_FLAGS_INIT_WITH_IV 1
// Here is what the structure looks like in memory
// [ temp space | length | *ecb | *ecb_key | table | ecb_key ]
// size of table depends on the implementation (VNG vs factory)
+// currently, VNG and factory share the same "header" described here
+// VNG may add additional data after the header
struct _ccmode_gcm_key {
// 5 blocks of temp space.
unsigned char H[16]; /* multiplier */
unsigned char buf[16]; /* buffer for stuff */
// State and length
- uint32_t ivmode; /* Which mode is the IV in? */
- uint32_t state; /* state the GCM code is in */
- uint32_t buflen; /* length of data in buf */
+ uint16_t state; /* state the GCM code is in */
+ uint16_t flags; /* flags (persistent across reset) */
+ uint32_t buf_nbytes; /* length of data in buf */
- uint64_t totlen; /* 64-bit counter used for IV and AAD */
- uint64_t pttotlen; /* 64-bit counter for the plaintext PT */
+ uint64_t aad_nbytes; /* 64-bit counter used for IV and AAD */
+ uint64_t text_nbytes; /* 64-bit counter for the plaintext PT */
// ECB
const struct ccmode_ecb *ecb; // ecb mode
const struct ccmode_ecb *ecb_encrypt);
-void ccmode_ofb_init(const struct ccmode_ofb *ofb, ccofb_ctx *ctx,
- size_t rawkey_len, const void *rawkey,
- const void *iv);
-void ccmode_ofb_crypt(ccofb_ctx *ctx, size_t nbytes,
- const void *in, void *out);
+int ccmode_ofb_init(const struct ccmode_ofb *ofb, ccofb_ctx *ctx,
+ size_t rawkey_len, const void *rawkey,
+ const void *iv);
+int ccmode_ofb_crypt(ccofb_ctx *ctx, size_t nbytes,
+ const void *in, void *out);
struct _ccmode_ofb_key {
const struct ccmode_ecb *ecb;
ccmode_omac->omac().
key must point to at least sizeof(CCMODE_OMAC_KEY(ecb)) bytes of free
storage. */
-void ccmode_omac_init(const struct ccmode_omac *omac, ccomac_ctx *ctx,
- size_t tweak_len, size_t rawkey_len,
- const void *rawkey);
+int ccmode_omac_init(const struct ccmode_omac *omac, ccomac_ctx *ctx,
+ size_t tweak_len, size_t rawkey_len,
+ const void *rawkey);
struct _ccmode_omac_key {
const struct ccmode_ecb *ecb;
/* Function prototypes used by the macros below, do not call directly. */
-void ccmode_xts_init(const struct ccmode_xts *xts, ccxts_ctx *ctx,
- size_t key_len, const void *data_key,
- const void *tweak_key);
+int ccmode_xts_init(const struct ccmode_xts *xts, ccxts_ctx *ctx,
+ size_t key_nbytes, const void *data_key,
+ const void *tweak_key);
+void ccmode_xts_key_sched(const struct ccmode_xts *xts, ccxts_ctx *ctx,
+ size_t key_nbytes, const void *data_key,
+ const void *tweak_key);
void *ccmode_xts_crypt(const ccxts_ctx *ctx, ccxts_tweak *tweak,
size_t nblocks, const void *in, void *out);
-void ccmode_xts_set_tweak(const ccxts_ctx *ctx, ccxts_tweak *tweak,
- const void *iv);
+int ccmode_xts_set_tweak(const ccxts_ctx *ctx, ccxts_tweak *tweak,
+ const void *iv);
struct _ccmode_xts_key {
.tweak_size = ccn_sizeof_size(sizeof(struct _ccmode_xts_tweak)) + ccn_sizeof_size(ecb->block_size), \
.block_size = ecb->block_size, \
.init = ccmode_xts_init, \
+.key_sched = ccmode_xts_key_sched, \
.set_tweak = ccmode_xts_set_tweak, \
.xts = ccmode_xts_crypt, \
.custom = (ECB), \
.tweak_size = ccn_sizeof_size(sizeof(struct _ccmode_xts_tweak)) + ccn_sizeof_size(ecb->block_size), \
.block_size = ecb->block_size, \
.init = ccmode_xts_init, \
+.key_sched = ccmode_xts_key_sched, \
.set_tweak = ccmode_xts_set_tweak, \
.xts = ccmode_xts_crypt, \
.custom = (ECB), \
struct ccmode_ecb {
size_t size; /* first argument to ccecb_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_ecb *ecb, ccecb_ctx *ctx,
- size_t key_len, const void *key);
- void (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in,
- void *out);
+ int (*init)(const struct ccmode_ecb *ecb, ccecb_ctx *ctx,
+ size_t key_nbytes, const void *key);
+ int (*ecb)(const ccecb_ctx *ctx, size_t nblocks, const void *in,
+ void *out);
};
/*!
struct ccmode_cbc {
size_t size; /* first argument to cccbc_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_cbc *cbc, cccbc_ctx *ctx,
- size_t key_len, const void *key);
+ int (*init)(const struct ccmode_cbc *cbc, cccbc_ctx *ctx,
+ size_t key_len, const void *key);
/* cbc encrypt or decrypt nblocks from in to out, iv will be used and updated. */
- void (*cbc)(const cccbc_ctx *ctx, cccbc_iv *iv,
- size_t nblocks, const void *in, void *out);
+ int (*cbc)(const cccbc_ctx *ctx, cccbc_iv *iv,
+ size_t nblocks, const void *in, void *out);
const void *custom;
};
struct ccmode_cfb {
size_t size; /* first argument to cccfb_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_cfb *cfb, cccfb_ctx *ctx,
- size_t key_len, const void *key, const void *iv);
- void (*cfb)(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out);
+ int (*init)(const struct ccmode_cfb *cfb, cccfb_ctx *ctx,
+ size_t key_len, const void *key, const void *iv);
+ int (*cfb)(cccfb_ctx *ctx, size_t nbytes, const void *in, void *out);
const void *custom;
};
struct ccmode_cfb8 {
size_t size; /* first argument to cccfb8_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx,
- size_t key_len, const void *key, const void *iv);
- void (*cfb8)(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out);
+ int (*init)(const struct ccmode_cfb8 *cfb8, cccfb8_ctx *ctx,
+ size_t key_len, const void *key, const void *iv);
+ int (*cfb8)(cccfb8_ctx *ctx, size_t nbytes, const void *in, void *out);
const void *custom;
};
struct ccmode_ctr {
size_t size; /* first argument to ccctr_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_ctr *ctr, ccctr_ctx *ctx,
- size_t key_len, const void *key, const void *iv);
- void (*ctr)(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out);
+ int (*init)(const struct ccmode_ctr *ctr, ccctr_ctx *ctx,
+ size_t key_len, const void *key, const void *iv);
+ int (*ctr)(ccctr_ctx *ctx, size_t nbytes, const void *in, void *out);
const void *custom;
};
struct ccmode_ofb {
size_t size; /* first argument to ccofb_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_ofb *ofb, ccofb_ctx *ctx,
- size_t key_len, const void *key, const void *iv);
- void (*ofb)(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out);
+ int (*init)(const struct ccmode_ofb *ofb, ccofb_ctx *ctx,
+ size_t key_len, const void *key, const void *iv);
+ int (*ofb)(ccofb_ctx *ctx, size_t nbytes, const void *in, void *out);
const void *custom;
};
determines how long the tweak is in bytes, for each subsequent call to
ccmode_xts->xts().
key must point to at least 'size' cc_units of free storage.
- tweak_key must point to at least 'tweak_size' cc_units of free storage. */
- void (*init)(const struct ccmode_xts *xts, ccxts_ctx *ctx,
- size_t key_len, const void *key, const void *tweak_key);
+ tweak_key must point to at least 'tweak_size' cc_units of free storage.
+ key and tweak_key must differ.
+ Returns nonzero on failure.
+ */
+ int (*init)(const struct ccmode_xts *xts, ccxts_ctx *ctx,
+ size_t key_nbytes, const void *data_key, const void *tweak_key);
+
+ void (*key_sched)(const struct ccmode_xts *xts, ccxts_ctx *ctx,
+ size_t key_nbytes, const void *data_key, const void *tweak_key);
/* Set the tweak (sector number), the block within the sector zero. */
- void (*set_tweak)(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv);
+ int (*set_tweak)(const ccxts_ctx *ctx, ccxts_tweak *tweak, const void *iv);
/* Encrypt blocks for a sector, clients must call set_tweak before calling
this function. Return a pointer to the tweak buffer */
int encdec; //is it encrypt or decrypt object
size_t block_size;
int (*init)(const struct ccmode_gcm *gcm, ccgcm_ctx *ctx,
- size_t key_len, const void *key);
- int (*set_iv)(ccgcm_ctx *ctx, size_t iv_size, const void *iv);
+ size_t key_nbytes, const void *key);
+ int (*set_iv)(ccgcm_ctx *ctx, size_t iv_nbytes, const void *iv);
int (*gmac)(ccgcm_ctx *ctx, size_t nbytes, const void *in); // could just be gcm with NULL out
int (*gcm)(ccgcm_ctx *ctx, size_t nbytes, const void *in, void *out);
- int (*finalize)(ccgcm_ctx *key, size_t tag_size, void *tag);
+ int (*finalize)(ccgcm_ctx *key, size_t tag_nbytes, void *tag);
int (*reset)(ccgcm_ctx *ctx);
const void *custom;
};
struct ccmode_omac {
size_t size; /* first argument to ccomac_ctx_decl(). */
size_t block_size;
- void (*init)(const struct ccmode_omac *omac, ccomac_ctx *ctx,
- size_t tweak_len, size_t key_len, const void *key);
+ int (*init)(const struct ccmode_omac *omac, ccomac_ctx *ctx,
+ size_t tweak_len, size_t key_len, const void *key);
int (*omac)(ccomac_ctx *ctx, size_t nblocks,
const void *tweak, const void *in, void *out);
const void *custom;
--- /dev/null
+/*
+ * ccrsa.h
+ * corecrypto
+ *
+ * Created on 11/16/2010
+ *
+ * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved.
+ *
+ */
+
+#ifndef _CORECRYPTO_CCRSA_H_
+#define _CORECRYPTO_CCRSA_H_
+
+#include <corecrypto/cc.h>
+#include <corecrypto/ccdigest.h>
+#include <corecrypto/ccrng.h>
+#include <corecrypto/cczp.h>
+#include <stdbool.h>
+
+// Apple does not generate keys of greater than 4096 bits
+// This limit is relaxed to accommodate potential third-party consumers
+#define CCRSA_KEYGEN_MAX_NBITS 8192
+
+// Program error: buffer too small or encrypted message is too small
+#define CCRSA_INVALID_INPUT -1
+// Invalid crypto configuration: Hash length versus RSA key size
+#define CCRSA_INVALID_CONFIG -2
+// The data is invalid (we won't say more for security
+#define CCRSA_DECRYPTION_ERROR -3
+
+#define CCRSA_ENCODING_ERROR -4
+#define CCRSA_DECODING_ERROR -5
+#define CCRSA_SIGNATURE_GEN_ERROR -6
+
+struct ccrsa_full_ctx {
+ __CCZP_ELEMENTS_DEFINITIONS(pb_)
+} CC_ALIGNED(CCN_UNIT_SIZE);
+
+struct ccrsa_pub_ctx {
+ __CCZP_ELEMENTS_DEFINITIONS(pb_)
+} CC_ALIGNED(CCN_UNIT_SIZE);
+
+struct ccrsa_priv_ctx {
+ __CCZP_ELEMENTS_DEFINITIONS(pv_)
+} CC_ALIGNED(CCN_UNIT_SIZE);
+
+
+#if CORECRYPTO_USE_TRANSPARENT_UNION
+ typedef union {
+ cczp_t zp;
+ struct ccrsa_pub_ctx* pub;
+ struct ccrsa_full_ctx *full;
+ } ccrsa_full_ctx_t __attribute__((transparent_union));
+ typedef struct ccrsa_full_ctx ccrsa_full_ctx;
+ typedef struct ccrsa_priv_ctx ccrsa_priv_ctx;
+
+ typedef union {
+ cczp_t zp;
+ ccrsa_priv_ctx *priv;
+ } ccrsa_priv_ctx_t __attribute__((transparent_union));
+
+
+typedef ccrsa_full_ctx_t ccrsa_pub_ctx_t;
+typedef struct ccrsa_pub_ctx ccrsa_pub_ctx;
+
+#else
+ typedef struct ccrsa_full_ctx* ccrsa_full_ctx_t;
+ typedef struct ccrsa_pub_ctx* ccrsa_pub_ctx_t;
+ typedef struct ccrsa_priv_ctx* ccrsa_priv_ctx_t;
+#endif
+
+
+
+/*
+ public key cczp d=e^-1 mod phi(m) priv key cczp priv key cczq dp, dq, qinv
+ | | | | |
+ | | | | |
+ +-------+------+-------+------++------++-------+------+---------++-------+------+---------++-------+-------+---------+
+ | zm_hd | m[n] |mr[n+1]| e[n] || d[n] || zp_hd |p[n/2]|pr[n/2+1]|| zq_hd |q[n/2]|qr[n/2+1]||dp[n/2]|dq[n/2]|qinv[n/2]|
+ +-------+------+-------+------++------++-------+------+---------++-------+------+---------++-------+-------+---------+
+ */
+
+ /* Return the size of an ccec_full_ctx where each ccn is _size_ bytes. Get _size_ through ccn_sizeof(nbits) */
+
+/* Return the size of an ccec_full_ctx where each ccn is _size_ bytes. */
+
+#define ccrsa_pub_ctx_size(_size_) (sizeof(struct cczp) + CCN_UNIT_SIZE + 3 * (_size_))
+#define ccrsa_priv_ctx_size(_size_) ((sizeof(struct cczp) + CCN_UNIT_SIZE) * 2 + 7 * ccn_sizeof(ccn_bitsof_size(_size_)/2 + 1))
+#define ccrsa_full_ctx_size(_size_) (ccrsa_pub_ctx_size(_size_) + _size_ + ccrsa_priv_ctx_size(_size_))
+
+/* Declare a fully scheduled rsa key. Size is the size in bytes each ccn in
+ the key. For example to declare (on the stack or in a struct) a 1021 bit
+ rsa public key named foo use ccrsa_pub_ctx_decl(ccn_sizeof(1021), foo).
+ */
+#define ccrsa_full_ctx_decl(_size_, _name_) cc_ctx_decl(struct ccrsa_full_ctx, ccrsa_full_ctx_size(_size_), _name_)
+#define ccrsa_full_ctx_clear(_size_, _name_) cc_clear(ccrsa_full_ctx_size(_size_), _name_)
+#define ccrsa_pub_ctx_decl(_size_, _name_) cc_ctx_decl(struct ccrsa_pub_ctx, ccrsa_pub_ctx_size(_size_), _name_)
+#define ccrsa_pub_ctx_clear(_size_, _name_) cc_clear(ccrsa_pub_ctx_size(_size_), _name_)
+
+// accessors to ccrsa full and public key fields. */
+// The offsets are computed using pb_ccn. If any object other than ccrsa_full_ctx_t
+// or ccrsa_pub_ctx_t is passed to the macros, compiler error is generated.
+
+
+
+#if CORECRYPTO_USE_TRANSPARENT_UNION
+//#define ccrsa_ctx_zm(_ctx_) (((ccrsa_pub_ctx_t)(_ctx_)).zp)
+
+ CC_CONST CC_INLINE cczp_t ccrsa_ctx_zm(ccrsa_full_ctx_t _ctx_) { return ((cczp_t)(struct cczp *)((_ctx_).full)); }
+ CC_CONST CC_INLINE cc_unit *ccrsa_ctx_m(ccrsa_full_ctx_t _ctx_){ return ((_ctx_).full->pb_ccn);}
+ #define ccrsa_ctx_n(_ctx_) (ccrsa_ctx_zm(_ctx_).zp->n)
+#else
+ #define ccrsa_ctx_zm(_ctx_) ((cczp_t)(_ctx_))
+ #define ccrsa_ctx_n(_ctx_) (ccrsa_ctx_zm(_ctx_)->n)
+ #define ccrsa_ctx_m(_ctx_) ((_ctx_)->pb_ccn)
+#endif
+
+#define ccrsa_ctx_e(_ctx_) (ccrsa_ctx_m(_ctx_) + 2 * ccrsa_ctx_n(_ctx_) + 1)
+#define ccrsa_ctx_d(_ctx_) (ccrsa_ctx_m(_ctx_) + 3 * ccrsa_ctx_n(_ctx_) + 1)
+
+// accessors to ccrsa private key fields
+// The offsets are computed using pv_ccn. If any object other than ccrsa_priv_ctx_t
+// is passed to the macros, compiler error is generated.
+#if CORECRYPTO_USE_TRANSPARENT_UNION
+
+/* rvalue accessors to ccec_key fields. */
+CC_CONST CC_INLINE
+ccrsa_priv_ctx_t ccrsa_get_private_ctx_ptr(ccrsa_full_ctx_t fk) {
+ cc_unit *p = (cc_unit *)fk.full;
+ cc_size p_size = ccrsa_ctx_n(fk);
+ p += ccn_nof_size(ccrsa_pub_ctx_size(ccn_sizeof_n(p_size))) + p_size;
+ ccrsa_priv_ctx *priv = (ccrsa_priv_ctx *)p;
+ return (ccrsa_priv_ctx_t)priv;
+}
+
+CC_CONST CC_INLINE
+ccrsa_pub_ctx_t ccrsa_ctx_public(ccrsa_full_ctx_t fk) {
+ return (ccrsa_pub_ctx_t) fk.full;
+}
+
+#define ccrsa_ctx_private_zp(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp)
+#define ccrsa_ctx_private_zq(FK) ((cczp_t)((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 2 * ccrsa_ctx_private_zp(FK).zp->n + 1))
+#define ccrsa_ctx_private_dp(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 4 * ccrsa_ctx_private_zp(FK).zp->n + 2 + ccn_nof_size(sizeof(struct cczp)))
+#define ccrsa_ctx_private_dq(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 5 * ccrsa_ctx_private_zp(FK).zp->n + 2 + ccn_nof_size(sizeof(struct cczp)))
+#define ccrsa_ctx_private_qinv(FK) ((ccrsa_get_private_ctx_ptr(FK)).zp.zp->ccn + 6 * ccrsa_ctx_private_zp(FK).zp->n + 2 + ccn_nof_size(sizeof(struct cczp)))
+
+#else
+#define ccrsa_ctx_private_zp(FK) ((cczp_t)ccrsa_get_private_ctx_ptr(FK))
+#define ccrsa_ctx_private_zq(FK) ((cczp_t)((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 2 * ccrsa_ctx_private_zp(FK)->n + 1))
+#define ccrsa_ctx_private_dp(FK) ((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 4 * ccrsa_ctx_private_zp(FK)->n + 2 + ccn_nof_size(sizeof(struct cczp)))
+#define ccrsa_ctx_private_dq(FK) ((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 5 * ccrsa_ctx_private_zp(FK)->n + 2 + ccn_nof_size(sizeof(struct cczp)))
+#define ccrsa_ctx_private_qinv(FK) ((ccrsa_get_private_ctx_ptr(FK))->pv_ccn + 6 * ccrsa_ctx_private_zp(FK)->n + 2 + ccn_nof_size(sizeof(struct cczp)))
+
+CC_CONST CC_INLINE
+ccrsa_priv_ctx_t ccrsa_get_private_ctx_ptr(ccrsa_full_ctx_t fk) {
+ ccrsa_priv_ctx_t priv = (ccrsa_priv_ctx_t)(ccrsa_ctx_d(fk)+ccrsa_ctx_n(fk));
+ return priv;
+}
+
+/*!
+ @function ccrsa_ctx_public
+ @abstract gets the public key from full key
+ @param fk RSA full key
+ @result Returns RSA public ker
+ */
+CC_CONST CC_INLINE
+ccrsa_pub_ctx_t ccrsa_ctx_public(ccrsa_full_ctx_t fk) {
+ return (ccrsa_pub_ctx_t) fk;
+}
+
+#endif
+
+/* Return exact key bit size */
+static inline size_t
+ccrsa_pubkeylength(ccrsa_pub_ctx_t pubk) {
+ return cczp_bitlen(ccrsa_ctx_zm(pubk));
+}
+
+/* PKCS1 pad_markers */
+#define CCRSA_PKCS1_PAD_SIGN 1
+#define CCRSA_PKCS1_PAD_ENCRYPT 2
+
+/* Initialize key based on modulus and e as cc_unit. key->zp.n must already be set. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+void ccrsa_init_pub(ccrsa_pub_ctx_t key, const cc_unit *modulus,
+ const cc_unit *e);
+
+/* Initialize key based on modulus and e as big endian byte array
+ key->zp.n must already be set. */
+CC_NONNULL_TU((1)) CC_NONNULL((3 ,5))
+int ccrsa_make_pub(ccrsa_pub_ctx_t pubk,
+ size_t exp_nbytes, const uint8_t *exp,
+ size_t mod_nbytes, const uint8_t *mod);
+
+/* Do a public key crypto operation (typically verify or encrypt) on in and put
+ the result in out. Both in and out should be cc_unit aligned and
+ ccrsa_key_n(key) units long. Clients should use ccn_read_uint() to
+ convert bytes to a cc_unit to use for this API.*/
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+int ccrsa_pub_crypt(ccrsa_pub_ctx_t key, cc_unit *out, const cc_unit *in);
+
+/* Generate an nbit rsa key pair in key, which should be allocated using
+ ccrsa_full_ctx_decl(ccn_sizeof(1024), rsa_ctx). The unsigned big endian
+ byte array exponent e of length e_size is used as the exponent. It's an
+ error to call this function with an exponent larger than nbits. rng
+ must be a pointer to an initialized struct ccrng_state. */
+CC_NONNULL_TU((2)) CC_NONNULL((4, 5))
+int ccrsa_generate_key(size_t nbits, ccrsa_full_ctx_t rsa_ctx,
+ size_t e_size, const void *e, struct ccrng_state *rng) CC_WARN_RESULT;
+
+/* Generate RSA key in conformance with FIPS186-4 standard */
+CC_NONNULL_TU((2)) CC_NONNULL((4, 5, 6))
+int
+ccrsa_generate_fips186_key(size_t nbits, ccrsa_full_ctx_t fk,
+ size_t e_size, const void *eBytes,
+ struct ccrng_state *rng1, struct ccrng_state *rng2) CC_WARN_RESULT;
+
+/* Construct RSA key from fix input in conformance with FIPS186-4 standard */
+CC_NONNULL_TU((16)) CC_NONNULL((3, 5, 7, 9, 11, 13, 15))
+int
+ccrsa_make_fips186_key(size_t nbits,
+ const cc_size e_n, const cc_unit *e,
+ const cc_size xp1Len, const cc_unit *xp1, const cc_size xp2Len, const cc_unit *xp2,
+ const cc_size xpLen, const cc_unit *xp,
+ const cc_size xq1Len, const cc_unit *xq1, const cc_size xq2Len, const cc_unit *xq2,
+ const cc_size xqLen, const cc_unit *xq,
+ ccrsa_full_ctx_t fk,
+ cc_size *np, cc_unit *r_p,
+ cc_size *nq, cc_unit *r_q,
+ cc_size *nm, cc_unit *r_m,
+ cc_size *nd, cc_unit *r_d);
+
+/*!
+ * @brief ccrsa_sign_pss() generates RSASSA-PSS signature in PKCS1-V2 format
+ *
+ * note that in RSASSA-PSS, salt length is part of the signature as specified in ASN1
+ * RSASSA-PSS-params ::= SEQUENCE {
+ * hashAlgorithm [0] HashAlgorithm DEFAULT sha1,
+ * maskGenAlgorithm [1] MaskGenAlgorithm DEFAULT mgf1SHA1,
+ * saltLength [2] INTEGER DEFAULT 20,
+ * trailerField [3] TrailerField DEFAULT trailerFieldBC
+ *
+ *
+ * FIPS 186-4 for RSASSA-PSS:
+ * .... Both signature schemes are approved for use, but additional constraints are imposed beyond those specified in PKCS #1 v2.1.....
+ *
+ * • If nlen = 1024 bits (i.e., 128 bytes), and the output length of the approved hash function output block is 512 bits (i.e., 64 bytes), then the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen – 2,
+ * • Otherwise, the length (in bytes) of the salt (sLen) shall satisfy 0 ≤ sLen ≤ hLen, where hLen is the length of the hash function output block (in bytes).
+ *
+ *
+ * • CAVS test vectors are not very useful in the case of RSA-PSS, because they only validate the exponentiation part of the signature. See: http://csrc.nist.gov/groups/STM/cavp/documents/components/RSA2SP1VS.pdf
+ *
+ * @param key The RSA key
+ * @param hashAlgorithm The hash algorithm used to generate mHash from the original message. It is also used inside the PSS encoding function. This is also the hash function to be used in the mask generation function (MGF)
+ * @param MgfHashAlgorithm The hash algorithm for thr mask generation function
+ * @param rng Random number geberator to generate salt in PSS encoding
+ * @param saltSize Intended length of the salt
+ * @param hSize Length of message hash . Must be equal to hashAlgorithm->output_size
+ * @param mHash The input that needs to be signed. This is the hash of message M with length of hLen
+ *
+ * @param sig The signature output
+ * @param sigSize The length of generated signature in bytes, which equals the size of the RSA modulus.
+ * @return 0:ok, non-zero:error
+ */
+CC_NONNULL((2,3,5,7,8,9))
+int ccrsa_sign_pss(ccrsa_full_ctx_t key,
+ const struct ccdigest_info* hashAlgorithm, const struct ccdigest_info* MgfHashAlgorithm,
+ size_t saltSize, struct ccrng_state *rng,
+ size_t hSize, const uint8_t *mHash,
+ size_t *sigSize, uint8_t *sig);
+
+CC_NONNULL((2,3,5,7,9))
+int ccrsa_verify_pss(ccrsa_pub_ctx_t key,
+ const struct ccdigest_info* di, const struct ccdigest_info* MgfDi,
+ size_t digestSize, const uint8_t *digest,
+ size_t sigSize, const uint8_t *sig,
+ size_t saltSize, bool *valid);
+
+/*!
+ @function ccrsa_sign_pkcs1v15
+ @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2
+
+ @param key Full key
+ @param oid OID describing the type of digest passed in
+ @param digest_len Byte length of the digest
+ @param digest Byte array of digest_len bytes containing the digest
+ @param sig_len Pointer to the number of byte allocate for sig.
+ Output the exact size of the signature.
+ @param sig Pointer to the allocated buffer of size *sig_len
+ for the output signature
+
+ @result 0 iff successful.
+
+ @discussion Null OID is a special case, required to support RFC 4346 where the padding
+ is based on SHA1+MD5. In general it is not recommended to use a NULL OID,
+ except when strictly required for interoperability
+
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((4, 5, 6))
+int ccrsa_sign_pkcs1v15(ccrsa_full_ctx_t key, const uint8_t *oid,
+ size_t digest_len, const uint8_t *digest,
+ size_t *sig_len, uint8_t *sig);
+
+
+/*!
+ @function ccrsa_sign_pkcs1v15
+ @abstract RSA signature with PKCS#1 v1.5 format per PKCS#1 v2.2
+
+ @param key Public key
+ @param oid OID describing the type of digest passed in
+ @param digest_len Byte length of the digest
+ @param digest Byte array of digest_len bytes containing the digest
+ @param sig_len Number of byte of the signature sig.
+ @param sig Pointer to the signature buffer of sig_len
+ @param valid Output boolean, true if the signature is valid.
+
+ @result 0 iff successful.
+
+ @discussion Null OID is a special case, required to support RFC 4346 where the padding
+ is based on SHA1+MD5. In general it is not recommended to use a NULL OID,
+ except when strictly required for interoperability
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((4, 6, 7))
+int ccrsa_verify_pkcs1v15(ccrsa_pub_ctx_t key, const uint8_t *oid,
+ size_t digest_len, const uint8_t *digest,
+ size_t sig_len, const uint8_t *sig,
+ bool *valid);
+
+/*!
+ @function ccder_encode_rsa_pub_size
+ @abstract Calculate size of public key export format data package.
+
+ @param key Public key
+
+ @result Returns size required for encoding.
+ */
+
+CC_NONNULL_TU((1))
+size_t ccder_encode_rsa_pub_size(const ccrsa_pub_ctx_t key);
+
+/*!
+ @function ccrsa_export_priv_pkcs1
+ @abstract Export a public key.
+
+ @param key Public key
+ @param der Beginning of output DER buffer
+ @param der_end End of output DER buffer
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3))
+uint8_t *ccder_encode_rsa_pub(const ccrsa_pub_ctx_t key, uint8_t *der, uint8_t *der_end);
+
+
+/*!
+ @function ccder_encode_rsa_priv_size
+ @abstract Calculate size of full key exported in PKCS#1 format.
+
+ @param key Full key
+
+ @result Returns size required for encoding.
+ */
+
+CC_NONNULL_TU((1))
+size_t ccder_encode_rsa_priv_size(const ccrsa_full_ctx_t key);
+
+/*!
+ @function ccder_encode_rsa_priv
+ @abstract Export a full key in PKCS#1 format.
+
+ @param key Full key
+ @param der Beginning of output DER buffer
+ @param der_end End of output DER buffer
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3))
+uint8_t *ccder_encode_rsa_priv(const ccrsa_full_ctx_t key, const uint8_t *der, uint8_t *der_end);
+
+/*!
+ @function ccder_decode_rsa_pub_n
+ @abstract Calculate "n" for a public key imported from a data package.
+ PKCS #1 format
+
+ @param der Beginning of input DER buffer
+ @param der_end End of input DER buffer
+
+ @result the "n" of the RSA key that would result from the import. This can be used
+ to declare the key itself.
+ */
+
+CC_NONNULL((1)) CC_NONNULL((2))
+cc_size ccder_decode_rsa_pub_n(const uint8_t *der, const uint8_t *der_end);
+
+/*!
+ @function ccder_decode_rsa_pub
+ @abstract Import a public RSA key from a package in public key format.
+ PKCS #1 format
+
+ @param key Public key (n must be set)
+ @param der Beginning of input DER buffer
+ @param der_end End of input DER buffer
+
+ @result Key is initialized using the data in the public key message.
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3))
+const uint8_t *ccder_decode_rsa_pub(const ccrsa_pub_ctx_t key, const uint8_t *der, const uint8_t *der_end);
+
+/*!
+ @function ccder_decode_rsa_pub_x509_n
+ @abstract Calculate "n" for a public key imported from a data package in x509 format
+
+ @param der Beginning of input DER buffer
+ @param der_end End of input DER buffer
+
+ @result the "n" of the RSA key that would result from the import. This can be used
+ to declare the key itself.
+ */
+
+CC_NONNULL((1)) CC_NONNULL((2))
+cc_size ccder_decode_rsa_pub_x509_n(const uint8_t *der, const uint8_t *der_end);
+
+/*!
+ @function ccder_decode_rsa_pub_x509
+ @abstract Import a public RSA key from a package in x509 format.
+
+ @param key Public key (n must be set)
+ @param der Beginning of input DER buffer
+ @param der_end End of input DER buffer
+
+ @result Key is initialized using the data in the public key message.
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3))
+const uint8_t *ccder_decode_rsa_pub_x509(const ccrsa_pub_ctx_t key, const uint8_t *der, const uint8_t *der_end);
+
+
+/*!
+ @function ccder_decode_rsa_priv_n
+ @abstract Calculate "n" for a private key imported from a data package.
+
+ @param der Beginning of input DER buffer
+ @param der_end End of input DER buffer
+
+ @result the "n" of the RSA key that would result from the import. This can be used
+ to declare the key itself.
+ */
+
+CC_NONNULL((1)) CC_NONNULL((2))
+cc_size ccder_decode_rsa_priv_n(const uint8_t *der, const uint8_t *der_end);
+
+/*!
+ @function ccder_decode_rsa_priv
+ @abstract Import a private RSA key from a package in PKCS#1 format.
+
+ @param key Full key (n must be set)
+ @param der Beginning of input DER buffer
+ @param der_end End of input DER buffer
+
+ @result Key is initialized using the data in the public key message.
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((2)) CC_NONNULL((3))
+const uint8_t *ccder_decode_rsa_priv(const ccrsa_full_ctx_t key, const uint8_t *der, const uint8_t *der_end);
+
+/*!
+ @function ccrsa_export_pub_size
+ @abstract Calculate size of public key exported data package.
+
+ @param key Public key
+
+ @result Returns size required for encoding.
+ */
+
+CC_CONST CC_INLINE CC_NONNULL_TU((1))
+size_t ccrsa_export_pub_size(const ccrsa_pub_ctx_t key) {
+ return ccder_encode_rsa_pub_size(key);
+}
+
+/*!
+ @function ccrsa_export_pub
+ @abstract Export a public key in public key format.
+
+ @param key Public key
+ @param out_len Allocated size
+ @param out Output buffer
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((3))
+int ccrsa_export_pub(const ccrsa_pub_ctx_t key, size_t out_len, uint8_t *out);
+/*!
+ @function ccrsa_import_pub_n
+ @abstract Calculate "n" for a public key imported from a data package.
+
+ @param inlen Length of public key package data
+ @param der pointer to public key package data
+
+ @result the "n" of the RSA key that would result from the import. This can be used
+ to declare the key itself.
+ */
+
+CC_CONST CC_INLINE CC_NONNULL((2))
+cc_size ccrsa_import_pub_n(size_t inlen, const uint8_t *der) {
+ cc_size size = ccder_decode_rsa_pub_x509_n(der, der + inlen);
+ if(size == 0) {
+ size = ccder_decode_rsa_pub_n(der, der + inlen);
+ }
+ return size;
+}
+
+/*!
+ @function ccrsa_import_pub
+ @abstract Import a public RSA key from a package in public key format.
+
+ @param key Public key (n must be set)
+ @param inlen Length of public key package data
+ @param der pointer to public key package data
+
+ @result Key is initialized using the data in the public key message.
+ */
+
+CC_NONNULL_TU((1)) CC_NONNULL((3))
+int ccrsa_import_pub(ccrsa_pub_ctx_t key, size_t inlen, const uint8_t *der);
+
+/*!
+ @function ccrsa_export_priv_size
+ @abstract Calculate size of full key exported in PKCS#1 format.
+
+ @param key Full key
+
+ @result Returns size required for encoding.
+ */
+
+CC_CONST CC_INLINE CC_NONNULL_TU((1))
+size_t ccrsa_export_priv_size(const ccrsa_full_ctx_t key) {
+ return ccder_encode_rsa_priv_size(key);
+}
+
+/*!
+ @function ccrsa_export_priv
+ @abstract Export a full key in PKCS#1 format.
+
+ @param key Full key
+ @param out_len Allocated size
+ @param out Output buffer
+ */
+
+CC_CONST CC_INLINE CC_NONNULL_TU((1)) CC_NONNULL((3))
+int ccrsa_export_priv(const ccrsa_full_ctx_t key, size_t out_len, uint8_t *out) {
+ return (ccder_encode_rsa_priv(key, out, out+out_len) != out);
+}
+
+/*!
+ @function ccrsa_import_priv_n
+ @abstract Calculate size of full key exported in PKCS#1 format.
+
+ @param inlen Length of PKCS#1 package data
+ @param der pointer to PKCS#1 package data
+
+ @result the "n" of the RSA key that would result from the import. This can be used
+ to declare the key itself.
+ */
+
+CC_CONST CC_INLINE CC_NONNULL((2))
+cc_size ccrsa_import_priv_n(size_t inlen, const uint8_t *der) {
+ return ccder_decode_rsa_priv_n(der, der + inlen);
+}
+
+/*!
+ @function ccrsa_import_priv
+ @abstract Import a full RSA key from a package in PKCS#1 format.
+
+ @param key Full key (n must be set)
+ @param inlen Length of PKCS#1 package data
+ @param der pointer to PKCS#1 package data
+
+ @result Key is initialized using the data in the PKCS#1 message.
+ */
+
+CC_CONST CC_INLINE CC_NONNULL_TU((1)) CC_NONNULL((3))
+int ccrsa_import_priv(ccrsa_full_ctx_t key, size_t inlen, const uint8_t *der) {
+ return (ccder_decode_rsa_priv(key, der, der+inlen) == NULL);
+}
+
+
+CC_NONNULL_TU((1)) CC_NONNULL2
+int ccrsa_get_pubkey_components(const ccrsa_pub_ctx_t pubkey, uint8_t *modulus, size_t *modulusLength, uint8_t *exponent, size_t *exponentLength);
+
+CC_NONNULL_TU((1)) CC_NONNULL2
+int ccrsa_get_fullkey_components(const ccrsa_full_ctx_t key, uint8_t *modulus, size_t *modulusLength, uint8_t *exponent, size_t *exponentLength,
+ uint8_t *p, size_t *pLength, uint8_t *q, size_t *qLength);
+
+
+/*!
+ @function ccrsa_dump_public_key
+ @abstract Print a rsa public key in the console (printf)
+
+ @param key Public key
+ */
+void ccrsa_dump_public_key(ccrsa_pub_ctx_t key);
+
+/*!
+ @function ccrsa_dump_full_key
+ @abstract Print a rsa private key in the console (printf)
+
+ @param key Public key
+ */
+void ccrsa_dump_full_key(ccrsa_full_ctx_t key);
+
+#endif /* _CORECRYPTO_CCRSA_H_ */
extern const struct ccdigest_info ccsha512_vng_intel_AVX1_di;
extern const struct ccdigest_info ccsha512_vng_intel_SupplementalSSE3_di;
#endif
+extern const struct ccdigest_info ccsha224_vng_intel_SupplementalSSE3_di;
extern const struct ccdigest_info ccsha256_vng_intel_SupplementalSSE3_di;
#endif
#if CCSHA2_VNG_ARMV7NEON
+extern const struct ccdigest_info ccsha224_vng_armv7neon_di;
extern const struct ccdigest_info ccsha256_vng_armv7neon_di;
extern const struct ccdigest_info ccsha384_vng_arm64_di;
extern const struct ccdigest_info ccsha384_vng_armv7neon_di;
/* SHA224 */
#define CCSHA224_OUTPUT_SIZE 28
extern const struct ccdigest_info ccsha224_ltc_di;
-#if CCSHA2_VNG_INTEL
-extern const struct ccdigest_info ccsha224_vng_intel_SupplementalSSE3_di;
-#endif
-#if CCSHA2_VNG_ARMV7NEON
-extern const struct ccdigest_info ccsha224_vng_armv7neon_di;
-#endif
/* SHA512 */
#define CCSHA512_BLOCK_SIZE 128
--- /dev/null
+/*
+ * cczp.h
+ * corecrypto
+ *
+ * Created on 11/16/2010
+ *
+ * Copyright (c) 2010,2011,2012,2013,2014,2015 Apple Inc. All rights reserved.
+ *
+ */
+
+#ifndef _CORECRYPTO_CCZP_H_
+#define _CORECRYPTO_CCZP_H_
+
+#include <corecrypto/ccn.h>
+#include <corecrypto/ccrng.h>
+
+/*
+ Don't use cczp_hd struct directly, except in static tables such as eliptic curve parameter definitions.
+
+ Declare cczp objects using cczp_decl_n(). It allocates cc_unit arrays of the length returned by either cczp_nof_n() or cczp_short_nof_n().
+*/
+
+struct cczp;
+#if CORECRYPTO_USE_TRANSPARENT_UNION
+
+typedef union {
+ cc_unit *u;
+ struct cczp *zp;
+ //cczp_const_t czp; //for automatic type cast
+ //struct cczp_prime *prime;
+} cczp_t __attribute__((transparent_union));
+
+typedef union {
+ const cc_unit *u;
+ const struct cczp *zp;
+ //const struct cczp_prime *prime;
+ cczp_t _nczp;
+} cczp_const_t __attribute__((transparent_union));
+
+#else
+ typedef struct cczp* cczp_t;
+ typedef const struct cczp* cczp_const_t;
+#endif
+typedef void (*ccmod_func_t)(cczp_const_t zp, cc_unit *r, const cc_unit *s, cc_ws_t ws);
+
+// keep cczp_hd and cczp structures consistent
+// cczp_hd is typecasted to cczp to read EC curve params
+// options field is to specify Montgomery arithmetic, bit field, etc
+// make sure n is the first element see ccrsa_ctx_n macro
+#define __CCZP_HEADER_ELEMENTS_DEFINITIONS(pre) \
+cc_size pre ## n;\
+cc_unit pre ## options;\
+ccmod_func_t pre ## mod_prime;
+
+#define __CCZP_ELEMENTS_DEFINITIONS(pre) \
+__CCZP_HEADER_ELEMENTS_DEFINITIONS(pre) \
+cc_unit pre ## ccn[];
+
+//cczp_hd must be defined separetly without variable length array ccn[], because it is used in sructures such as ccdh_gp_decl_n
+struct cczp_hd{
+ __CCZP_HEADER_ELEMENTS_DEFINITIONS()
+} CC_ALIGNED(CCN_UNIT_SIZE);
+
+struct cczp {
+ __CCZP_ELEMENTS_DEFINITIONS()
+} CC_ALIGNED(CCN_UNIT_SIZE);
+
+
+/* Return the size of an cczp where each ccn is _size_ bytes. */
+#define cczp_size(_size_) (sizeof(struct cczp) + ccn_sizeof_n(1) + 2 * (_size_))
+
+/* Return number of units that a struct cczp needs to be in units for a prime
+ size of N units. This is large enough for all operations. */
+#define cczp_nof_n(_n_) (ccn_nof_size(sizeof(struct cczp)) + 1 + 2 * (_n_))
+
+/* Return number of units that a struct cczp needs to be in units for a prime
+ size of _n_ units. The _short variant does not have room for CCZP_RECIP,
+ so it can not be used with cczp_mod, cczp_mul, cczp_sqr. It can be used
+ with cczp_add, cczp_sub, cczp_div2, cczp_mod_inv. */
+#define cczp_short_nof_n(_n_) (ccn_nof_size(sizeof(struct cczp)) + (_n_))
+
+#define cczp_decl_n(_n_, _name_) cc_ctx_decl(struct cczp, ccn_sizeof_n(cczp_nof_n(_n_)), _name_)
+#define cczp_short_decl_n(_n_, _name_) cc_ctx_decl(struct cczp_short, ccn_sizeof_n(cczp_short_nof_n(_n_)), _name_)
+
+#define cczp_clear_n(_n_, _name_) cc_clear(ccn_sizeof_n(cczp_nof_n(_n_)), _name_)
+#define cczp_short_clear_n(_n_, _name_) cc_clear(ccn_sizeof_n(cczp_short_nof_n(_n_)), _name_)
+
+#if CORECRYPTO_USE_TRANSPARENT_UNION
+ #define CCZP_N(ZP) (((cczp_t)(ZP)).zp->n)
+ #define CCZP_MOD(ZP) (((cczp_t)(ZP)).zp->mod_prime)
+ #define CCZP_PRIME(ZP) (((cczp_t)(ZP)).zp->ccn)
+ #define CCZP_RECIP(ZP) (((cczp_t)(ZP)).zp->ccn + cczp_n(ZP))
+ #define CCZP_OPS(ZP) ((ZP).zp->options)
+ #define CCZP_MOD_PRIME(ZP) CCZP_MOD(ZP)
+
+CC_CONST CC_NONNULL_TU((1))
+static inline cc_size cczp_n(cczp_const_t zp) {
+ return zp.zp->n;
+}
+
+CC_CONST CC_NONNULL_TU((1))
+static inline cc_unit cczp_options(cczp_const_t zp) {
+ return zp.zp->options;
+}
+
+CC_CONST CC_NONNULL_TU((1))
+static inline ccmod_func_t cczp_mod_prime(cczp_const_t zp) {
+ return zp.zp->mod_prime;
+}
+
+CC_CONST CC_NONNULL_TU((1))
+static inline const cc_unit *cczp_prime(cczp_const_t zp) {
+ return zp.zp->ccn;
+}
+
+/* Return a pointer to the Reciprocal or Montgomery constant of zp, which is
+ allocated cczp_n(zp) + 1 units long. */
+CC_CONST CC_NONNULL_TU((1))
+
+static inline const cc_unit *cczp_recip(cczp_const_t zp) {
+ return zp.zp->ccn + zp.zp->n;
+}
+
+#else
+ #define CCZP_N(ZP) ((ZP)->n)
+ #define CCZP_MOD(ZP) ((ZP)->mod_prime)
+ #define CCZP_MOD_PRIME(ZP) CCZP_MOD(ZP)
+ #define CCZP_PRIME(ZP) ((ZP)->ccn)
+ #define CCZP_RECIP(ZP) ((ZP)->ccn + CCZP_N(ZP))
+ #define CCZP_OPS(ZP) ((ZP)->options)
+CC_CONST CC_NONNULL_TU((1))
+static inline cc_size cczp_n(cczp_const_t zp) {
+ return zp->n;
+}
+
+CC_CONST CC_NONNULL_TU((1))
+static inline cc_unit cczp_options(cczp_const_t zp) {
+ return zp->options;
+}
+
+CC_CONST CC_NONNULL_TU((1))
+static inline ccmod_func_t cczp_mod_prime(cczp_const_t zp) {
+ return zp->mod_prime;
+}
+
+CC_CONST CC_NONNULL_TU((1))
+static inline const cc_unit *cczp_prime(cczp_const_t zp) {
+ return zp->ccn;
+}
+
+/* Return a pointer to the Reciprocal or Montgomery constant of zp, which is
+ allocated cczp_n(zp) + 1 units long. */
+CC_CONST CC_NONNULL_TU((1))
+
+static inline const cc_unit *cczp_recip(cczp_const_t zp) {
+ return zp->ccn + zp->n;
+}
+
+#endif
+
+
+CC_CONST CC_NONNULL_TU((1))
+CC_INLINE size_t cczp_bitlen(cczp_const_t zp) {
+ return ccn_bitlen(cczp_n(zp), cczp_prime(zp));
+}
+
+
+/* Ensure both cczp_mod_prime(zp) and cczp_recip(zp) are valid. cczp_n and
+ cczp_prime must have been previously initialized. */
+CC_NONNULL_TU((1))
+void cczp_init(cczp_t zp);
+
+/* Compute r = s2n mod cczp_prime(zp). Will write cczp_n(zp)
+ units to r and reads 2 * cczp_n(zp) units units from s2n. If r and s2n are not
+ identical they must not overlap. Before calling this function either
+ cczp_init(zp) must have been called or both CCZP_MOD_PRIME((cc_unit *)zp)
+ and CCZP_RECIP((cc_unit *)zp) must be initialized some other way. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+void cczp_mod(cczp_const_t zp, cc_unit *r, const cc_unit *s2n, cc_ws_t ws);
+
+/* Compute r = sn mod cczp_prime(zp), Will write cczp_n(zp)
+ units to r and reads sn units units from s. If r and s are not
+ identical they must not overlap. Before calling this function either
+ cczp_init(zp) must have been called or both CCZP_MOD_PRIME((cc_unit *)zp)
+ and CCZP_RECIP((cc_unit *)zp) must be initialized some other way. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 4))
+
+int cczp_modn(cczp_const_t zp, cc_unit *r, cc_size ns, const cc_unit *s);
+
+/* Compute r = x * y mod cczp_prime(zp). Will write cczp_n(zp) units to r
+ and reads cczp_n(zp) units units from both x and y. If r and x are not
+ identical they must not overlap, The same holds for r and y. Before
+ calling this function either cczp_init(zp) must have been called or both
+ CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must be
+ initialized some other way. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+void cczp_mul(cczp_const_t zp, cc_unit *t, const cc_unit *x, const cc_unit *y);
+
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4, 5))
+void cczp_mul_ws(cczp_const_t zp, cc_unit *t, const cc_unit *x, const cc_unit *y, cc_ws_t ws);
+
+/* Compute r = x * x mod cczp_prime(zp). Will write cczp_n(zp) units to r
+ and reads cczp_n(zp) units from x. If r and x are not identical they must
+ not overlap. Before calling this function either cczp_init(zp) must have
+ been called or both CCZP_MOD_PRIME((cc_unit *)zp) and
+ CCZP_RECIP((cc_unit *)zp) must be initialized some other way. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+void cczp_sqr(cczp_const_t zp, cc_unit *r, const cc_unit *x);
+
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+void cczp_sqr_ws(cczp_const_t zp, cc_unit *r, const cc_unit *x, cc_ws_t ws);
+
+/* Compute r = x^(1/2) mod cczp_prime(zp). Will write cczp_n(zp) units to r
+ and reads cczp_n(zp) units from x. If r and x are not identical they must
+ not overlap. Before calling this function either cczp_init(zp) must have
+ been called or both CCZP_MOD_PRIME((cc_unit *)zp) and
+ CCZP_RECIP((cc_unit *)zp) must be initialized some other way.
+ Only support prime = 3 mod 4 */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+int cczp_sqrt(cczp_const_t zp, cc_unit *r, const cc_unit *x);
+
+/* Compute r = m ^ e mod cczp_prime(zp), using Montgomery ladder.
+ - writes cczp_n(zp) units to r
+ - reads cczp_n(zp) units units from m and e
+ - if r and m are not identical they must not overlap.
+ - r and e must not overlap nor be identical.
+ - before calling this function either cczp_init(zp) must have been called
+ or both CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must
+ be initialized some other way.
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+void cczp_power(cczp_const_t zp, cc_unit *r, const cc_unit *m,
+ const cc_unit *e);
+
+/* Compute r = m ^ e mod cczp_prime(zp), using Square Square Multiply Always.
+ - writes cczp_n(zp) units to r
+ - reads cczp_n(zp) units units from m and e
+ - if r and m are not identical they must not overlap.
+ - r and e must not overlap nor be identical.
+ - before calling this function either cczp_init(zp) must have been called
+ or both CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must
+ be initialized some other way.
+
+ Important: This function is intented to be constant time but is more likely
+ to leak information due to memory cache. Only used with randomized input
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+int cczp_power_ssma(cczp_const_t zp, cc_unit *r, const cc_unit *m,
+ const cc_unit *e);
+
+int cczp_power_ssma_ws(cc_ws_t ws, cczp_const_t zp, cc_unit *r, const cc_unit *s, const cc_unit *e);
+
+/* Compute r = m ^ e mod cczp_prime(zp). Will write cczp_n(zp) units to r and
+ reads cczp_n(zp) units units from m. Reads ebitlen bits from e.
+ m must be <= to cczp_prime(zp). If r and m are not identical they must not
+ overlap. r and e must not overlap nor be identical.
+ Before calling this function either cczp_init(zp) must have been called
+ or both CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must
+ be initialized some other way. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 5))
+void cczp_powern(cczp_const_t zp, cc_unit *r, const cc_unit *s,
+ size_t ebitlen, const cc_unit *e);
+
+/* Compute r = x + y mod cczp_prime(zp). Will write cczp_n(zp) units to r and
+ reads cczp_n(zp) units units from x and y. If r and x are not identical
+ they must not overlap. Only cczp_n(zp) and cczp_prime(zp) need to be valid.
+ Can be used with cczp_short_nof_n sized cc_unit array zp. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+void cczp_add(cczp_const_t zp, cc_unit *r, const cc_unit *x,
+ const cc_unit *y);
+
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4, 5))
+void cczp_add_ws(cczp_const_t zp, cc_unit *r, const cc_unit *x,
+ const cc_unit *y, cc_ws_t ws);
+
+/* Compute r = x - y mod cczp_prime(zp). Will write cczp_n(zp) units to r and
+ reads cczp_n(zp) units units from x and y. If r and x are not identical
+ they must not overlap. Only cczp_n(zp) and cczp_prime(zp) need to be valid.
+ Can be used with cczp_short_nof_n sized cc_unit array zp. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+void cczp_sub(cczp_const_t zp, cc_unit *r, const cc_unit *x, const cc_unit *y);
+
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4, 5))
+void cczp_sub_ws(cczp_const_t zp, cc_unit *r, const cc_unit *x,
+ const cc_unit *y, cc_ws_t ws);
+
+/* Compute r = x / 2 mod cczp_prime(zp). Will write cczp_n(zp) units to r and
+ reads cczp_n(zp) units units from x. If r and x are not identical
+ they must not overlap. Only cczp_n(zp) and cczp_prime(zp) need to be valid.
+ Can be used with cczp_short_nof_n sized cc_unit array zp. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+void cczp_div2(cczp_const_t zp, cc_unit *r, const cc_unit *x);
+
+/* Compute q = a_2n / cczp_prime(zd) (mod cczp_prime(zd)) . Will write cczp_n(zd)
+ units to q and r. Will read 2 * cczp_n(zd) units units from a. If r and a
+ are not identical they must not overlap. Before calling this function
+ either cczp_init(zp) must have been called or both
+ CCZP_MOD_PRIME((cc_unit *)zp) and CCZP_RECIP((cc_unit *)zp) must be
+ initialized some other way. */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3, 4))
+void cczp_div(cczp_const_t zd, cc_unit *q, cc_unit *r, const cc_unit *a_2n);
+
+
+/*!
+ @brief cczp_inv(zp, r, x) computes r = x^-1 (mod p) , where p=cczp_prime(zp).
+ @discussion It is a general function and works for any p. It validates the inputs. r and x can overlap. It writes n =cczp_n(zp) units to r, and read n units units from x and p. The output r is overwriten only if the inverse is correctly computed. This function is not constant time in absolute sense, but it does not have data dependent 'if' statements in the code.
+ @param zp The input zp. cczp_n(zp) and cczp_prime(zp) need to be valid. cczp_init(zp) need not to be called before invoking cczp_inv().
+ @param x input big integer
+ @param r output big integer
+ @return 0 if inverse exists and correctly computed.
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+
+int cczp_inv(cczp_const_t zp, cc_unit *r, const cc_unit *x);
+
+/*!
+ @brief cczp_inv_odd(zp, r, x) computes r = x^-1 (mod p) , where p=cczp_prime(zp) is an odd number.
+ @discussion r and x can overlap.
+ @param zp The input zp. cczp_n(zp) and cczp_prime(zp) need to be valid. cczp_init(zp) need not to be called before invoking.
+ @param x input big integer
+ @param r output big integer
+ @return 0 if successful
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+int cczp_inv_odd(cczp_const_t zp, cc_unit *r, const cc_unit *x);
+
+/*!
+ @brief cczp_inv_field(zp, r, x) computes r = x^-1 (mod p) , where p=cczp_prime(zp) is a prime number number.
+ @discussion r and x must NOT overlap. The excution time of the function is independent to the value of the input x. It works only if p is a field. That is, when p is a prime. It supports Montgomery and non-Montgomery form of zp. It leaks the value of the prime and should only be used be used for public (not secret) primes (ex. Elliptic Curves)
+
+ @param zp The input zp. cczp_n(zp) and cczp_prime(zp) need to be valid. cczp_init(zp) need not to be called before invoking cczp_inv_field().
+ @param x input big unteger
+ @param r output big integer
+ @return 0 if inverse exists and correctly computed.
+ */
+CC_NONNULL_TU((1)) CC_NONNULL((2, 3))
+int cczp_inv_field(cczp_const_t zp, cc_unit *r, const cc_unit *x);
+
+#endif /* _CORECRYPTO_CCZP_H_ */
header files installed in all the paths described above in (1) will not
have code enclosed within this macro.
- b. `KERNEL_PRIVATE` : Same as PRIVATE
+ b. `KERNEL_PRIVATE` : If true, code is available to all of the xnu kernel and Apple
+ internal kernel extensions.
c. `BSD_KERNEL_PRIVATE` : If true, code is available to the xnu/bsd part of
the kernel and is not available to rest of the kernel, kernel extensions
dtrace_state_t *, dtrace_mstate_t *);
static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
dtrace_optval_t);
-static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
+static int dtrace_ecb_create_enable(dtrace_probe_t *, void *, void *);
static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
static int dtrace_canload_remains(uint64_t, size_t, size_t *,
dtrace_mstate_t *, dtrace_vstate_t *);
static int
dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
- zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
+ zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *, void *), void *arg1, void *arg2)
{
dtrace_probe_t template, *probe;
dtrace_hash_t *hash = NULL;
if (pkp->dtpk_id != DTRACE_IDNONE) {
if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
- if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
+ if ((*matched)(probe, arg1, arg2) == DTRACE_MATCH_FAIL)
return (DTRACE_MATCH_FAIL);
nmatched++;
}
nmatched++;
- if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
+ if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) {
if (rc == DTRACE_MATCH_FAIL)
return (DTRACE_MATCH_FAIL);
break;
nmatched++;
- if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
+ if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) {
if (rc == DTRACE_MATCH_FAIL)
return (DTRACE_MATCH_FAIL);
break;
}
static int
-dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
+dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg1, void *arg2)
{
- *((dtrace_id_t *)arg) = probe->dtpr_id;
+#pragma unused(arg2)
+ *((dtrace_id_t *)arg1) = probe->dtpr_id;
return (DTRACE_MATCH_DONE);
}
lck_mtx_lock(&dtrace_lock);
match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
- dtrace_probe_lookup_match, &id);
+ dtrace_probe_lookup_match, &id, NULL);
lck_mtx_unlock(&dtrace_lock);
ASSERT(match == 1 || match == 0);
}
static int
-dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
+dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab, dtrace_ecbdesc_t *ep)
{
dtrace_probekey_t pkey;
uint32_t priv;
* If we're passed a NULL description, we're being asked to
* create an ECB with a NULL probe.
*/
- (void) dtrace_ecb_create_enable(NULL, enab);
+ (void) dtrace_ecb_create_enable(NULL, enab, ep);
return (0);
}
&priv, &uid, &zoneid);
return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
- enab));
+ enab, ep));
}
/*
}
static void
-dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
+dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p)
{
uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
dof_hdr_t *dof = (dof_hdr_t *)daddr;
*/
dtrace_dofprov2hprov(&dhpv, provider, strtab);
- if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
+ if ((parg = mops->dtms_provide_proc(meta->dtm_arg, &dhpv, p)) == NULL)
return;
meta->dtm_count++;
}
static void
-dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
+dtrace_helper_provide(dof_helper_t *dhp, proc_t *p)
{
uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
dof_hdr_t *dof = (dof_hdr_t *)daddr;
if (sec->dofs_type != DOF_SECT_PROVIDER)
continue;
- dtrace_helper_provide_one(dhp, sec, pid);
+ dtrace_helper_provide_one(dhp, sec, p);
}
}
static void
-dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
+dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p)
{
uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
dof_hdr_t *dof = (dof_hdr_t *)daddr;
*/
dtrace_dofprov2hprov(&dhpv, provider, strtab);
- mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
+ mops->dtms_remove_proc(meta->dtm_arg, &dhpv, p);
meta->dtm_count--;
}
static void
-dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
+dtrace_helper_provider_remove(dof_helper_t *dhp, proc_t *p)
{
uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
dof_hdr_t *dof = (dof_hdr_t *)daddr;
if (sec->dofs_type != DOF_SECT_PROVIDER)
continue;
- dtrace_helper_provider_remove_one(dhp, sec, pid);
+ dtrace_helper_provider_remove_one(dhp, sec, p);
}
}
if (mops == NULL ||
mops->dtms_create_probe == NULL ||
- mops->dtms_provide_pid == NULL ||
- mops->dtms_remove_pid == NULL) {
+ mops->dtms_provide_proc == NULL ||
+ mops->dtms_remove_proc == NULL) {
cmn_err(CE_WARN, "failed to register meta-register %s: "
"invalid ops", name);
return (EINVAL);
while (help != NULL) {
for (i = 0; i < help->dthps_nprovs; i++) {
+ proc_t *p = proc_find(help->dthps_pid);
+ if (p == PROC_NULL)
+ continue;
dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
- help->dthps_pid);
+ p);
+ proc_rele(p);
}
next = help->dthps_next;
}
static int
-dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
+dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg1, void *arg2)
{
dtrace_ecb_t *ecb;
- dtrace_enabling_t *enab = arg;
+ dtrace_enabling_t *enab = arg1;
+ dtrace_ecbdesc_t *ep = arg2;
dtrace_state_t *state = enab->dten_vstate->dtvs_state;
ASSERT(state != NULL);
- if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
+ if (probe != NULL && ep != NULL && probe->dtpr_gen < ep->dted_probegen) {
/*
* This probe was created in a generation for which this
* enabling has previously created ECBs; we don't want to
* If a provider failed to enable a probe then get out and
* let the consumer know we failed.
*/
- if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
+ if ((matched = dtrace_probe_enable(&ep->dted_probe, enab, ep)) < 0)
return (EBUSY);
total_matched += matched;
return (enab->dten_error);
}
+
+ ep->dted_probegen = dtrace_probegen;
}
- enab->dten_probegen = dtrace_probegen;
if (nmatched != NULL)
*nmatched = total_matched;
for (i = 0; i < enab->dten_ndesc; i++) {
enab->dten_current = enab->dten_desc[i];
- (void) dtrace_probe_enable(NULL, enab);
+ (void) dtrace_probe_enable(NULL, enab, NULL);
}
enab->dten_primed = 1;
if (dtrace_meta_pid != NULL) {
ASSERT(dtrace_deferred_pid == NULL);
dtrace_helper_provider_remove(&prov->dthp_prov,
- p->p_pid);
+ p);
}
lck_mtx_unlock(&dtrace_meta_lock);
lck_mtx_unlock(&dtrace_lock);
- dtrace_helper_provide(dofhp, p->p_pid);
+ dtrace_helper_provide(dofhp, p);
} else {
/*
for (i = 0; i < help->dthps_nprovs; i++) {
dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
- p->p_pid);
+ p);
}
}
for (i = 0; i < help->dthps_nprovs; i++) {
dtrace_helper_provider_remove(
- &help->dthps_provs[i]->dthp_prov, p->p_pid);
+ &help->dthps_provs[i]->dthp_prov, p);
}
} else {
lck_mtx_lock(&dtrace_lock);
static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
-static fasttrap_provider_t *fasttrap_provider_lookup(pid_t, fasttrap_provider_type_t, const char *,
+static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
const dtrace_pattr_t *);
-static void fasttrap_provider_retire(pid_t, const char *, int);
+static void fasttrap_provider_retire(proc_t*, const char *, int);
static void fasttrap_provider_free(fasttrap_provider_t *);
static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
* We clean up the pid provider for this process here; user-land
* static probes are handled by the meta-provider remove entry point.
*/
- fasttrap_provider_retire(p->p_pid, FASTTRAP_PID_NAME, 0);
+ fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
/*
* APPLE NOTE: We also need to remove any aliased providers.
* XXX optimization: track which provider types are instantiated
* and only retire as needed.
*/
- fasttrap_provider_retire(p->p_pid, FASTTRAP_OBJC_NAME, 0);
- fasttrap_provider_retire(p->p_pid, FASTTRAP_ONESHOT_NAME, 0);
+ fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
+ fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
/*
* This should be called after it is no longer possible for a user
}
/*
- * Lookup a fasttrap-managed provider based on its name and associated pid.
+ * Lookup a fasttrap-managed provider based on its name and associated proc.
+ * A reference to the proc must be held for the duration of the call.
* If the pattr argument is non-NULL, this function instantiates the provider
* if it doesn't exist otherwise it returns NULL. The provider is returned
* with its lock held.
*/
static fasttrap_provider_t *
-fasttrap_provider_lookup(pid_t pid, fasttrap_provider_type_t provider_type, const char *name,
+fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
const dtrace_pattr_t *pattr)
{
+ pid_t pid = p->p_pid;
fasttrap_provider_t *fp, *new_fp = NULL;
fasttrap_bucket_t *bucket;
char provname[DTRACE_PROVNAMELEN];
- proc_t *p;
cred_t *cred;
ASSERT(strlen(name) < sizeof (fp->ftp_name));
lck_mtx_unlock(&bucket->ftb_mtx);
/*
- * Make sure the process exists, isn't a child created as the result
+ * Make sure the process isn't a child created as the result
* of a vfork(2), and isn't a zombie (but may be in fork).
*/
- if ((p = proc_find(pid)) == NULL) {
- return NULL;
- }
proc_lock(p);
if (p->p_lflag & (P_LINVFORK | P_LEXIT)) {
proc_unlock(p);
- proc_rele(p);
return (NULL);
}
cred = p->p_ucred;
// lck_mtx_unlock(&p->p_crlock);
proc_unlock(p);
- proc_rele(p);
new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
ASSERT(new_fp != NULL);
- new_fp->ftp_pid = pid;
+ new_fp->ftp_pid = p->p_pid;
new_fp->ftp_proc = fasttrap_proc_lookup(pid);
new_fp->ftp_provider_type = provider_type;
}
static void
-fasttrap_provider_retire(pid_t pid, const char *name, int mprov)
+fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
{
fasttrap_provider_t *fp;
fasttrap_bucket_t *bucket;
ASSERT(strlen(name) < sizeof (fp->ftp_name));
- bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
+ bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(p->p_pid, name)];
lck_mtx_lock(&bucket->ftb_mtx);
for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
- if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
+ if (fp->ftp_pid == p->p_pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
!fp->ftp_retired)
break;
}
/*
* We don't have to worry about invalidating the same provider twice
- * since fasttrap_provider_lookup() will ignore provider that have
+ * since fasttrap_provider_lookup() will ignore providers that have
* been marked as retired.
*/
dtrace_invalidate(provid);
static int
fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
{
+ proc_t *p;
fasttrap_provider_t *provider;
fasttrap_probe_t *pp;
fasttrap_tracepoint_t *tp;
return (EINVAL);
}
- if ((provider = fasttrap_provider_lookup(pdata->ftps_pid, pdata->ftps_provider_type,
+ p = proc_find(pdata->ftps_pid);
+ if (p == PROC_NULL)
+ return (ESRCH);
+
+ if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
provider_name, &pid_attr)) == NULL)
return (ESRCH);
+ proc_rele(p);
/*
* Increment this reference count to indicate that a consumer is
* actively adding a new probe associated with this provider. This
/*ARGSUSED*/
static void *
-fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
+fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
{
#pragma unused(arg)
fasttrap_provider_t *provider;
if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
- if ((provider = fasttrap_provider_lookup(pid, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
+ if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
&dhpv->dthpv_pattr)) == NULL) {
cmn_err(CE_WARN, "failed to instantiate provider %s for "
- "process %u", dhpv->dthpv_provname, (uint_t)pid);
+ "process %u", dhpv->dthpv_provname, (uint_t)p->p_pid);
return (NULL);
}
/*ARGSUSED*/
static void
-fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, pid_t pid)
+fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
{
#pragma unused(arg)
/*
* provider until that count has dropped to zero. This just puts
* the provider on death row.
*/
- fasttrap_provider_retire(pid, dhpv->dthpv_provname, 1);
+ fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
}
static char*
return;
}
- gFasttrapInited = 1;
+ gFasttrapInited = 1;
}
}
else
lr_saved = lr;
- if (refcount)
+ if (refcount) {
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
-
+ }
if (so->so_usecount < 0) {
panic("%s: so=%p usecount=%d lrh= %s\n", __func__,
so, so->so_usecount, solockhistory_nr(so));
if (imgp->ip_origcputype != 0) {
/* Fat header previously matched, don't allow another fat file inside */
- return (-1);
+ error = -1; /* not claimed */
+ goto bad;
}
/* Make sure it's a fat binary */
/*
* Commit to new map.
*
- * Swap the new map for the old, which consumes our new map reference but
- * each leaves us responsible for the old_map reference. That lets us get
- * off the pmap associated with it, and then we can release it.
+ * Swap the new map for the old for target task, which consumes
+ * our new map reference but each leaves us responsible for the
+ * old_map reference. That lets us get off the pmap associated
+ * with it, and then we can release it.
+ *
+ * The map needs to be set on the target task which is different
+ * than current task, thus swap_task_map is used instead of
+ * vm_map_switch.
*/
- old_map = swap_task_map(task, thread, map, !spawn);
+ old_map = swap_task_map(task, thread, map);
vm_map_deallocate(old_map);
+ old_map = NULL;
lret = activate_exec_state(task, p, thread, &load_result);
if (lret != KERN_SUCCESS) {
goto badtoolate;
}
+ /* Switch to target task's map to copy out strings */
old_map = vm_map_switch(get_task_map(task));
if (load_result.unixproc) {
}
}
+ /* Inherit task role from old task to new task for exec */
+ if (error == 0 && !spawn_no_exec) {
+ proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task());
+ }
/*
* Apply the spawnattr policy, apptype (which primes the task for importance donation),
/* Sever any extant thread affinity */
thread_affinity_exec(current_thread());
+ /* Inherit task role from old task to new task for exec */
+ if (!in_vfexec) {
+ proc_inherit_task_role(get_threadtask(imgp->ip_new_thread), current_task());
+ }
+
thread_t main_thread = imgp->ip_new_thread;
task_set_main_thread_qos(new_task, main_thread);
/* NOTREACHED */
}
- /* If a core should be generated, notify crash reporter */
- if (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) ||
- (p->p_exit_reason != OS_REASON_NULL && (p->p_exit_reason->osr_flags &
- OS_REASON_FLAG_GENERATE_CRASH_REPORT))) {
+ /*
+ * Generate a corefile/crashlog if:
+ * The process doesn't have an exit reason that indicates no crash report should be created
+ * AND any of the following are true:
+ * - The process was terminated due to a fatal signal that generates a core
+ * - The process was killed due to a code signing violation
+ * - The process has an exit reason that indicates we should generate a crash report
+ *
+ * The first condition is necessary because abort_with_reason()/payload() use SIGABRT
+ * (which normally triggers a core) but may indicate that no crash report should be created.
+ */
+ if (!(PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) & OS_REASON_FLAG_NO_CRASH_REPORT)) &&
+ (hassigprop(WTERMSIG(rv), SA_CORE) || ((p->p_csflags & CS_KILLED) != 0) ||
+ (PROC_HAS_EXITREASON(p) && (PROC_EXITREASON_FLAGS(p) &
+ OS_REASON_FLAG_GENERATE_CRASH_REPORT)))) {
/*
* Workaround for processes checking up on PT_DENY_ATTACH:
* should be backed out post-Leopard (details in 5431025).
/* stash the usage into corpse data if making_corpse == true */
if (create_corpse == TRUE) {
- kr = task_mark_corpse(current_task());
+ kr = task_mark_corpse(p->task);
if (kr != KERN_SUCCESS) {
if (kr == KERN_NO_SPACE) {
printf("Process[%d] has no vm space for corpse info.\n", p->p_pid);
/* Update the code, subcode based on exit reason */
proc_update_corpse_exception_codes(p, &code, &subcode);
- populate_corpse_crashinfo(p, task_get_corpseinfo(current_task()), rup, code, subcode, buffer, num_knotes);
+ populate_corpse_crashinfo(p, task_get_corpseinfo(p->task), rup, code, subcode, buffer, num_knotes);
if (buffer != NULL) {
kfree(buffer, buf_size);
}
boolean_t memorystatus_freeze_throttle_enabled = TRUE;
SYSCTL_UINT(_kern, OID_AUTO, memorystatus_freeze_throttle_enabled, CTLFLAG_RW|CTLFLAG_LOCKED, &memorystatus_freeze_throttle_enabled, 0, "");
+#define VM_PAGES_FOR_ALL_PROCS (2)
/*
* Manual trigger of freeze and thaw for dev / debug kernels only.
*/
if (error || !req->newptr)
return (error);
- if (pid == 2) {
+ if (pid == VM_PAGES_FOR_ALL_PROCS) {
vm_pageout_anonymous_pages();
return 0;
if (error || !req->newptr)
return (error);
- p = proc_find(pid);
- if (p != NULL) {
- error = task_thaw(p->task);
- proc_rele(p);
-
- if (error)
- error = EIO;
- return error;
+ if (pid == VM_PAGES_FOR_ALL_PROCS) {
+ do_fastwake_warmup_all();
+ return 0;
+ } else {
+ p = proc_find(pid);
+ if (p != NULL) {
+ error = task_thaw(p->task);
+ proc_rele(p);
+
+ if (error)
+ error = EIO;
+ return error;
+ }
}
return EINVAL;
uint64_t timestamp_now = mach_absolute_time();
memorystatus_jetsam_snapshot->notification_time = timestamp_now;
memorystatus_jetsam_snapshot->js_gencount++;
- if (memorystatus_jetsam_snapshot_last_timestamp == 0 ||
- timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout) {
+ if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 ||
+ timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) {
proc_list_unlock();
int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size));
if (!ret) {
sizeof(memorystatus_jetsam_snapshot_entry_t) * memorystatus_jetsam_snapshot_count;
uint64_t timestamp_now = mach_absolute_time();
memorystatus_jetsam_snapshot->notification_time = timestamp_now;
- if (memorystatus_jetsam_snapshot_last_timestamp == 0 ||
- timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout) {
+ if (memorystatus_jetsam_snapshot_count > 0 && (memorystatus_jetsam_snapshot_last_timestamp == 0 ||
+ timestamp_now > memorystatus_jetsam_snapshot_last_timestamp + memorystatus_jetsam_snapshot_timeout)) {
proc_list_unlock();
int ret = memorystatus_send_note(kMemorystatusSnapshotNote, &snapshot_size, sizeof(snapshot_size));
if (!ret) {
thread_block((thread_continue_t) memorystatus_freeze_thread);
}
+static int
+sysctl_memorystatus_do_fastwake_warmup_all SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, req, arg1, arg2)
+
+ /* Need to be root or have entitlement */
+ if (!kauth_cred_issuser(kauth_cred_get()) && !IOTaskHasEntitlement(current_task(), MEMORYSTATUS_ENTITLEMENT)) {
+ return EPERM;
+ }
+
+ if (memorystatus_freeze_enabled == FALSE) {
+ return ENOTSUP;
+ }
+
+ do_fastwake_warmup_all();
+
+ return 0;
+}
+
+SYSCTL_PROC(_kern, OID_AUTO, memorystatus_do_fastwake_warmup_all, CTLTYPE_INT|CTLFLAG_WR|CTLFLAG_LOCKED|CTLFLAG_MASKED,
+ 0, 0, &sysctl_memorystatus_do_fastwake_warmup_all, "I", "");
+
#endif /* CONFIG_FREEZE */
#if VM_PRESSURE_EVENTS
{
proc_t target_proc = PROC_NULL;
kauth_cred_t cur_cred = kauth_cred_get();
- int signum = SIGKILL;
os_reason_t signal_reason = OS_REASON_NULL;
AUDIT_ARG(pid, target_pid);
- if ((target_pid <= 0) || (cur_proc->p_pid == target_pid)) {
- return EINVAL;
- }
-
- if (reason_namespace == OS_REASON_INVALID ||
- reason_namespace > OS_REASON_MAX_VALID_NAMESPACE) {
-
+ if ((target_pid <= 0)) {
return EINVAL;
}
AUDIT_ARG(process, target_proc);
- if (!cansignal(cur_proc, cur_cred, target_proc, signum, 0)) {
+ if (!cansignal(cur_proc, cur_cred, target_proc, SIGKILL, 0)) {
proc_rele(target_proc);
return EPERM;
}
signal_reason = build_userspace_exit_reason(reason_namespace, reason_code, payload, payload_size,
reason_string, reason_flags);
- psignal_with_reason(target_proc, signum, signal_reason);
+ if (target_pid == cur_proc->p_pid) {
+ /*
+ * psignal_thread_with_reason() will pend a SIGKILL on the specified thread or
+ * return if the thread and/or task are already terminating. Either way, the
+ * current thread won't return to userspace.
+ */
+ psignal_thread_with_reason(target_proc, current_thread(), SIGKILL, signal_reason);
+ } else {
+ psignal_with_reason(target_proc, SIGKILL, signal_reason);
+ }
+
proc_rele(target_proc);
return 0;
reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(2, sizeof(sender_proc->p_name) +
sizeof(sender_proc->p_pid));
- ret = os_reason_alloc_buffer(signal_reason, reason_buffer_size_estimate);
+ ret = os_reason_alloc_buffer_noblock(signal_reason, reason_buffer_size_estimate);
if (ret != 0) {
printf("build_signal_reason: unable to allocate signal reason buffer.\n");
return signal_reason;
psignal_internal(p, TASK_NULL, thread, PSIG_TRY_THREAD, signum, signal_reason);
}
+void
+psignal_thread_with_reason(proc_t p, thread_t thread, int signum, struct os_reason *signal_reason)
+{
+ psignal_internal(p, TASK_NULL, thread, PSIG_THREAD, signum, signal_reason);
+}
+
/*
* If the current process has received a signal (should be caught or cause
* termination, should interrupt current syscall), return the signal number.
if (IS_64BIT_PROCESS(p)) {
struct user64_itimerval user_itv;
+ bzero(&user_itv, sizeof (user_itv));
user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv)));
} else {
struct user32_itimerval user_itv;
+ bzero(&user_itv, sizeof (user_itv));
user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
#include <sys/protosw.h>
#include <sys/domain.h>
#include <sys/mbuf.h>
+#include <sys/mcache.h>
#include <sys/fcntl.h>
#include <sys/filio.h>
#include <sys/uio_internal.h>
new_so = TAILQ_FIRST(&sock->so_comp);
TAILQ_REMOVE(&sock->so_comp, new_so, so_list);
+ new_so->so_state &= ~SS_COMP;
+ new_so->so_head = NULL;
sock->so_qlen--;
/*
* again once we're done with the filter(s).
*/
socket_unlock(sock, 0);
- if ((error = soacceptfilter(new_so)) != 0) {
+ if ((error = soacceptfilter(new_so, sock)) != 0) {
/* Drop reference on listening socket */
sodereference(sock);
return (error);
socket_lock(new_so, 1);
}
- new_so->so_state &= ~SS_COMP;
- new_so->so_head = NULL;
(void) soacceptlock(new_so, &sa, 0);
socket_unlock(sock, 1); /* release the head */
soclose_locked(sock);
} else {
/* remove extra reference holding the socket */
+ VERIFY(sock->so_usecount > 1);
sock->so_usecount--;
}
socket_unlock(sock, 1);
}
#endif
+ // on macOS tasks can only set and clear their own CPU limits
+ if ((action == PROC_POLICY_ACTION_APPLY || action == PROC_POLICY_ACTION_RESTORE)
+ && proc != current_proc()) {
+ return (EPERM);
+ }
+
switch (action) {
case PROC_POLICY_ACTION_GET:
error = proc_get_task_ruse_cpu(proc->task, &cpuattr.ppattr_cpu_attr,
fds[i].revents = 0;
}
- /* Did we have any trouble registering? */
- if (rfds == nfds)
+ /*
+ * Did we have any trouble registering?
+ * If user space passed 0 FDs, then respect any timeout value passed.
+ * This is an extremely inefficient sleep. If user space passed one or
+ * more FDs, and we had trouble registering _all_ of them, then bail
+ * out. If a subset of the provided FDs failed to register, then we
+ * will still call the kqueue_scan function.
+ */
+ if (nfds && (rfds == nfds))
goto done;
/* scan for, and possibly wait for, the kevents to trigger */
#define OS_REASON_MAX_COUNT (maxproc + 100)
static struct zone *os_reason_zone;
+static int os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize,
+ boolean_t can_block);
void
os_reason_init()
return;
}
+/*
+ * Allocates and initializes a buffer of specified size for the reason. This function
+ * may block and should not be called from extremely performance sensitive contexts
+ * (i.e. jetsam). Also initializes the kcdata descriptor accordingly. If there is an
+ * existing buffer, we dealloc the buffer before allocating a new one and
+ * clear the associated kcdata descriptor. If osr_bufsize is passed as 0,
+ * we deallocate the existing buffer and then return.
+ *
+ * Returns:
+ * 0 on success
+ * EINVAL if the passed reason pointer is invalid or the requested size is
+ * larger than REASON_BUFFER_MAX_SIZE
+ * EIO if we fail to initialize the kcdata buffer
+ */
+int
+os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize)
+{
+ return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, TRUE);
+}
+
/*
* Allocates and initializes a buffer of specified size for the reason. Also
* initializes the kcdata descriptor accordingly. If there is an existing
* EIO if we fail to initialize the kcdata buffer
*/
int
-os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize)
+os_reason_alloc_buffer_noblock(os_reason_t cur_reason, uint32_t osr_bufsize)
+{
+ return os_reason_alloc_buffer_internal(cur_reason, osr_bufsize, FALSE);
+}
+
+static int
+os_reason_alloc_buffer_internal(os_reason_t cur_reason, uint32_t osr_bufsize,
+ boolean_t can_block)
{
if (cur_reason == OS_REASON_NULL) {
return EINVAL;
return 0;
}
- /*
- * We don't want to block trying to acquire a reason buffer and hold
- * up important things trying to clean up the system (i.e. jetsam).
- */
- cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON);
- if (cur_reason->osr_kcd_buf == NULL) {
- lck_mtx_unlock(&cur_reason->osr_lock);
- return ENOMEM;
+ if (can_block) {
+ cur_reason->osr_kcd_buf = kalloc_tag(osr_bufsize, VM_KERN_MEMORY_REASON);
+ assert(cur_reason->osr_kcd_buf != NULL);
+ } else {
+ cur_reason->osr_kcd_buf = kalloc_noblock_tag(osr_bufsize, VM_KERN_MEMORY_REASON);
+ if (cur_reason->osr_kcd_buf == NULL) {
+ lck_mtx_unlock(&cur_reason->osr_lock);
+ return ENOMEM;
+ }
}
bzero(cur_reason->osr_kcd_buf, osr_bufsize);
cs_md_final cs_final;
};
-static struct cs_hash cs_hash_sha1 = {
+static const struct cs_hash cs_hash_sha1 = {
.cs_type = CS_HASHTYPE_SHA1,
.cs_size = CS_SHA1_LEN,
.cs_digest_size = SHA_DIGEST_LENGTH,
.cs_final = (cs_md_final)SHA1Final,
};
#if CRYPTO_SHA2
-static struct cs_hash cs_hash_sha256 = {
+static const struct cs_hash cs_hash_sha256 = {
.cs_type = CS_HASHTYPE_SHA256,
.cs_size = SHA256_DIGEST_LENGTH,
.cs_digest_size = SHA256_DIGEST_LENGTH,
.cs_update = (cs_md_update)SHA256_Update,
.cs_final = (cs_md_final)SHA256_Final,
};
-static struct cs_hash cs_hash_sha256_truncate = {
+static const struct cs_hash cs_hash_sha256_truncate = {
.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
.cs_size = CS_SHA256_TRUNCATED_LEN,
.cs_digest_size = SHA256_DIGEST_LENGTH,
.cs_update = (cs_md_update)SHA256_Update,
.cs_final = (cs_md_final)SHA256_Final,
};
-static struct cs_hash cs_hash_sha384 = {
+static const struct cs_hash cs_hash_sha384 = {
.cs_type = CS_HASHTYPE_SHA384,
.cs_size = SHA384_DIGEST_LENGTH,
.cs_digest_size = SHA384_DIGEST_LENGTH,
};
#endif
-static struct cs_hash *
+static struct cs_hash const *
cs_find_md(uint8_t type)
{
if (type == CS_HASHTYPE_SHA1) {
* Choose among different hash algorithms.
* Higher is better, 0 => don't use at all.
*/
-static uint32_t hashPriorities[] = {
+static const uint32_t hashPriorities[] = {
CS_HASHTYPE_SHA1,
CS_HASHTYPE_SHA256_TRUNCATED,
CS_HASHTYPE_SHA256,
static int
cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
{
- struct cs_hash *hashtype;
+ struct cs_hash const *hashtype;
if (length < sizeof(*cd))
return EBADEXEC;
unsigned *tainted)
{
union cs_hash_union mdctx;
- struct cs_hash *hashtype = NULL;
+ struct cs_hash const *hashtype = NULL;
unsigned char actual_hash[CS_HASH_MAX_SIZE];
unsigned char expected_hash[CS_HASH_MAX_SIZE];
boolean_t found_hash;
* so protocol attachment handler must be coded carefuly
*/
so->so_state |= SS_NOFDREF;
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
sofreelastref(so, 1); /* will deallocate the socket */
return (error);
return;
}
if (head != NULL) {
- socket_lock(head, 1);
+ /*
+ * Need to lock the listener when the protocol has
+ * per socket locks
+ */
+ if (head->so_proto->pr_getlock != NULL)
+ socket_lock(head, 1);
+
if (so->so_state & SS_INCOMP) {
+ so->so_state &= ~SS_INCOMP;
TAILQ_REMOVE(&head->so_incomp, so, so_list);
head->so_incqlen--;
+ head->so_qlen--;
+ so->so_head = NULL;
} else if (so->so_state & SS_COMP) {
/*
* We must not decommission a socket that's
so->so_rcv.sb_flags &= ~(SB_SEL|SB_UPCALL);
so->so_snd.sb_flags &= ~(SB_SEL|SB_UPCALL);
so->so_event = sonullevent;
- socket_unlock(head, 1);
+ if (head->so_proto->pr_getlock != NULL)
+ socket_unlock(head, 1);
return;
} else {
panic("sofree: not queued");
}
- head->so_qlen--;
- so->so_state &= ~SS_INCOMP;
- so->so_head = NULL;
- socket_unlock(head, 1);
+ if (head->so_proto->pr_getlock != NULL)
+ socket_unlock(head, 1);
}
sowflush(so);
sorflush(so);
}
if ((so->so_options & SO_ACCEPTCONN)) {
- struct socket *sp, *sonext;
- int socklock = 0;
+ struct socket *sp;
/*
* We do not want new connection to be added
*/
so->so_options &= ~SO_ACCEPTCONN;
- for (sp = TAILQ_FIRST(&so->so_incomp);
- sp != NULL; sp = sonext) {
- sonext = TAILQ_NEXT(sp, so_list);
+ while ((sp = TAILQ_FIRST(&so->so_incomp)) != NULL) {
+ int socklock = 0;
/*
* Radar 5350314
/*
* Lock ordering for consistency with the
* rest of the stack, we lock the socket
- * first and then grabb the head.
+ * first and then grab the head.
*/
socket_unlock(so, 0);
socket_lock(sp, 1);
socklock = 1;
}
- TAILQ_REMOVE(&so->so_incomp, sp, so_list);
- so->so_incqlen--;
-
+ /*
+ * Radar 27945981
+ * The extra reference for the list insure the
+ * validity of the socket pointer when we perform the
+ * unlock of the head above
+ */
if (sp->so_state & SS_INCOMP) {
sp->so_state &= ~SS_INCOMP;
sp->so_head = NULL;
+ TAILQ_REMOVE(&so->so_incomp, sp, so_list);
+ so->so_incqlen--;
+ so->so_qlen--;
(void) soabort(sp);
}
- if (socklock)
+ if (socklock != 0)
socket_unlock(sp, 1);
}
while ((sp = TAILQ_FIRST(&so->so_comp)) != NULL) {
+ int socklock = 0;
+
/* Dequeue from so_comp since sofree() won't do it */
- TAILQ_REMOVE(&so->so_comp, sp, so_list);
- so->so_qlen--;
-
if (so->so_proto->pr_getlock != NULL) {
+ /*
+ * Lock ordering for consistency with the
+ * rest of the stack, we lock the socket
+ * first and then grab the head.
+ */
socket_unlock(so, 0);
socket_lock(sp, 1);
+ socket_lock(so, 0);
+ socklock = 1;
}
if (sp->so_state & SS_COMP) {
sp->so_state &= ~SS_COMP;
sp->so_head = NULL;
+ TAILQ_REMOVE(&so->so_comp, sp, so_list);
+ so->so_qlen--;
(void) soabort(sp);
}
- if (so->so_proto->pr_getlock != NULL) {
+ if (socklock)
socket_unlock(sp, 1);
- socket_lock(so, 0);
}
}
- }
if (so->so_pcb == NULL) {
/* 3915887: mark the socket as ready for dealloc */
so->so_flags |= SOF_PCBCLEARING;
atomic_add_32(&so->so_proto->pr_domain->dom_refs, -1);
evsofree(so);
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
sofree(so);
return (error);
}
int
-soacceptfilter(struct socket *so)
+soacceptfilter(struct socket *so, struct socket *head)
{
struct sockaddr *local = NULL, *remote = NULL;
int error = 0;
- struct socket *head = so->so_head;
/*
* Hold the lock even if this socket has not been made visible
socket_lock(so, 1);
if (sogetaddr_locked(so, &remote, 1) != 0 ||
sogetaddr_locked(so, &local, 0) != 0) {
- so->so_state &= ~(SS_NOFDREF | SS_COMP);
- so->so_head = NULL;
+ so->so_state &= ~SS_NOFDREF;
socket_unlock(so, 1);
soclose(so);
/* Out of resources; try it again next time */
* the following is done while holding the lock since
* the socket has been exposed to the filter(s) earlier.
*/
- so->so_state &= ~(SS_NOFDREF | SS_COMP);
- so->so_head = NULL;
+ so->so_state &= ~SS_COMP;
socket_unlock(so, 1);
soclose(so);
/* Propagate socket filter's error code to the caller */
void
soisconnected(struct socket *so)
{
- struct socket *head = so->so_head;
so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
so->so_state |= SS_ISCONNECTED;
sflt_notify(so, sock_evt_connected, NULL);
- if (head && (so->so_state & SS_INCOMP)) {
- so->so_state &= ~SS_INCOMP;
- so->so_state |= SS_COMP;
+ if (so->so_head != NULL && (so->so_state & SS_INCOMP)) {
+ struct socket *head = so->so_head;
+ int locked = 0;
+
+ /*
+ * Enforce lock order when the protocol has per socket locks
+ */
if (head->so_proto->pr_getlock != NULL) {
socket_unlock(so, 0);
socket_lock(head, 1);
+ socket_lock(so, 0);
+ locked = 1;
}
- postevent(head, 0, EV_RCONN);
+ if (so->so_head == head && (so->so_state & SS_INCOMP)) {
+ so->so_state &= ~SS_INCOMP;
+ so->so_state |= SS_COMP;
TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
head->so_incqlen--;
- TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
+
+ if (locked != 0)
+ socket_unlock(so, 0);
+
+ postevent(head, 0, EV_RCONN);
sorwakeup(head);
wakeup_one((caddr_t)&head->so_timeo);
- if (head->so_proto->pr_getlock != NULL) {
- socket_unlock(head, 1);
+
+ if (locked != 0)
socket_lock(so, 0);
}
+ if (locked != 0)
+ socket_unlock(head, 1);
} else {
postevent(so, 0, EV_WCONN);
wakeup((caddr_t)&so->so_timeo);
return ((so->so_state & SS_ISCONNECTED) ||
!(so->so_proto->pr_flags & PR_CONNREQUIRED) ||
(so->so_flags1 & SOF1_PRECONNECT_DATA));
-
}
void
int
soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc)
{
-
if (sbreserve(&so->so_snd, sndcc) == 0)
goto bad;
else
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
- VERIFY(so->so_usecount != 0);
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
goto out;
}
-
/*
* At this point we know that there is at least one connection
* ready to be accepted. Remove it from the queue prior to
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
so = TAILQ_FIRST(&head->so_comp);
TAILQ_REMOVE(&head->so_comp, so, so_list);
+ so->so_head = NULL;
+ so->so_state &= ~SS_COMP;
head->so_qlen--;
/* unlock head to avoid deadlock with select, keep a ref on head */
socket_unlock(head, 0);
*/
if ((error = mac_socket_check_accepted(kauth_cred_get(), so)) != 0) {
socket_lock(so, 1);
- so->so_state &= ~(SS_NOFDREF | SS_COMP);
- so->so_head = NULL;
+ so->so_state &= ~SS_NOFDREF;
socket_unlock(so, 1);
soclose(so);
/* Drop reference on listening socket */
* Pass the pre-accepted socket to any interested socket filter(s).
* Upon failure, the socket would have been closed by the callee.
*/
- if (so->so_filt != NULL && (error = soacceptfilter(so)) != 0) {
+ if (so->so_filt != NULL && (error = soacceptfilter(so, head)) != 0) {
/* Drop reference on listening socket */
sodereference(head);
/* Propagate socket filter's error code to the caller */
* just causes the client to spin. Drop the socket.
*/
socket_lock(so, 1);
- so->so_state &= ~(SS_NOFDREF | SS_COMP);
- so->so_head = NULL;
+ so->so_state &= ~SS_NOFDREF;
socket_unlock(so, 1);
soclose(so);
sodereference(head);
if (dosocklock)
socket_lock(so, 1);
- so->so_state &= ~SS_COMP;
- so->so_head = NULL;
-
/* Sync socket non-blocking/async state with file flags */
if (fp->f_flag & FNONBLOCK) {
so->so_state |= SS_NBIO;
socket_lock(so, 0);
} else {
/* Release the reference held for the listen socket */
+ VERIFY(so2->so_usecount > 0);
so2->so_usecount--;
}
goto out;
/* Release the reference held for
* listen socket.
*/
+ VERIFY(so2->so_usecount > 0);
so2->so_usecount--;
}
goto out;
* This is possible only for SOCK_DGRAM sockets. We refuse
* connecting to the same socket for SOCK_STREAM sockets.
*/
+ VERIFY(so2->so_usecount > 0);
so2->so_usecount--;
}
}
socket_unlock(so2, 0);
soisconnected(so);
unp_get_locks_in_order(so, so2);
+ VERIFY(so2->so_usecount > 0);
so2->so_usecount--;
} else {
soisconnected(so);
unp_get_locks_in_order(so, so2);
/* Decrement the extra reference left before */
+ VERIFY(so2->so_usecount > 0);
so2->so_usecount--;
break;
}
unp->unp_conn = NULL;
+ VERIFY(so2->so_usecount > 0);
so2->so_usecount--;
if (unp->unp_flags & UNP_TRACE_MDNS)
case SOCK_STREAM:
unp2->unp_conn = NULL;
+ VERIFY(so2->so_usecount > 0);
so->so_usecount--;
/* Set the socket state correctly but do a wakeup later when
panic("unp_lock: so=%p so_pcb=%p lr=%p ref=0x%x\n",
so, so->so_pcb, lr_saved, so->so_usecount);
- if (refcount)
- so->so_usecount++;
-
+ if (refcount) {
+ VERIFY(so->so_usecount > 0);
+ so->so_usecount++;
+ }
so->lock_lr[so->next_lock_lr] = lr_saved;
so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
return (0);
if (vnode_isinuse(vp, 1)) {
DEVFS_LOCK();
dnp = VTODN(vp);
- dn_times_now(dnp, 0);
+ if (dnp)
+ dn_times_now(dnp, 0);
DEVFS_UNLOCK();
}
return (0);
if (vnode_isinuse(vp, 0)) {
DEVFS_LOCK();
dnp = VTODN(vp);
- dn_times_now(dnp, 0);
+ if (dnp)
+ dn_times_now(dnp, 0);
DEVFS_UNLOCK();
}
#include <kern/sched_prim.h>
#include <kern/thread.h>
#include <kern/policy_internal.h>
+#include <kern/timer_call.h>
#include <pexpert/pexpert.h>
static void throttle_info_end_io_internal(struct _throttle_io_info_t *info, int throttle_level);
static int throttle_info_update_internal(struct _throttle_io_info_t *info, uthread_t ut, int flags, boolean_t isssd, boolean_t inflight, struct bufattr *bap);
static int throttle_get_thread_throttle_level(uthread_t ut);
+static int throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier);
/*
* Trivial lookup routine that always fails.
if (!TAILQ_EMPTY(&info->throttle_uthlist[level])) {
- if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[level]) {
+ if (elapsed_msecs < (uint64_t)throttle_windows_msecs[level] || info->throttle_inflight_count[throttle_level]) {
/*
* we had an I/O occur at a higher priority tier within
* this tier's throttle window
static int
throttle_get_thread_throttle_level(uthread_t ut)
{
- int thread_throttle_level;
+ uthread_t *ut_p = (ut == NULL) ? &ut : NULL;
+ int io_tier = throttle_get_io_policy(ut_p);
- if (ut == NULL)
- ut = get_bsdthread_info(current_thread());
+ return throttle_get_thread_throttle_level_internal(ut, io_tier);
+}
+
+/*
+ * Return a throttle level given an existing I/O tier (such as returned by throttle_get_io_policy)
+ */
+static int
+throttle_get_thread_throttle_level_internal(uthread_t ut, int io_tier) {
+ int thread_throttle_level = io_tier;
+ int user_idle_level;
- thread_throttle_level = proc_get_effective_thread_policy(ut->uu_thread, TASK_POLICY_IO);
+ assert(ut != NULL);
/* Bootcache misses should always be throttled */
if (ut->uu_throttle_bc == TRUE)
thread_throttle_level = THROTTLE_LEVEL_TIER3;
+ /*
+ * Issue tier3 I/O as tier2 when the user is idle
+ * to allow maintenance tasks to make more progress.
+ *
+ * Assume any positive idle level is enough... for now it's
+ * only ever 0 or 128 but this is not defined anywhere.
+ */
+ if (thread_throttle_level >= THROTTLE_LEVEL_TIER3) {
+ user_idle_level = timer_get_user_idle_level();
+ if (user_idle_level > 0) {
+ thread_throttle_level--;
+ }
+ }
+
return (thread_throttle_level);
}
mount_t mp;
struct bufattr *bap;
struct _throttle_io_info_t *info;
+ int io_tier;
bap = &bp->b_attr;
if (!ISSET(bap->ba_flags, BA_STRATEGY_TRACKED_IO)) {
info = &_throttle_io_info[LOWPRI_MAX_NUM_DEV - 1];
}
- throttle_info_end_io_internal(info, GET_BUFATTR_IO_TIER(bap));
+ io_tier = GET_BUFATTR_IO_TIER(bap);
+ if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) {
+ io_tier--;
+ }
+
+ throttle_info_end_io_internal(info, io_tier);
}
/*
if (bap && inflight && !ut->uu_throttle_bc) {
thread_throttle_level = GET_BUFATTR_IO_TIER(bap);
+ if (ISSET(bap->ba_flags, BA_IO_TIER_UPGRADE)) {
+ thread_throttle_level--;
+ }
} else {
thread_throttle_level = throttle_get_thread_throttle_level(ut);
}
struct _throttle_io_info_t *throttle_info;
boolean_t isssd = FALSE;
boolean_t inflight = FALSE;
+ boolean_t upgrade = FALSE;
int code = 0;
proc_t curproc = current_proc();
io_tier = throttle_get_io_policy(&ut);
passive = throttle_get_passive_io_policy(&ut);
+ /*
+ * Mark if the I/O was upgraded by throttle_get_thread_throttle_level
+ * while preserving the original issued tier (throttle_get_io_policy
+ * does not return upgraded tiers)
+ */
+ if (mp && io_tier > throttle_get_thread_throttle_level_internal(ut, io_tier)) {
+#if CONFIG_IOSCHED
+ if (!(mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED)) {
+ upgrade = TRUE;
+ }
+#else /* CONFIG_IOSCHED */
+ upgrade = TRUE;
+#endif /* CONFIG_IOSCHED */
+ }
+
if (bp->b_flags & B_META)
bap->ba_flags |= BA_META;
if (bap->ba_flags & BA_NOCACHE)
code |= DKIO_NOCACHE;
+ if (upgrade) {
+ code |= DKIO_TIER_UPGRADE;
+ SET(bap->ba_flags, BA_IO_TIER_UPGRADE);
+ }
+
if (kdebug_enable) {
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
buf_kernel_addrperm_addr(bp), bdev, (int)buf_blkno(bp), buf_count(bp), 0);
if (so->so_flags & SOF_CONTENT_FILTER) {
so->so_flags &= ~SOF_CONTENT_FILTER;
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
}
if (cfil_info == NULL)
#include <netinet6/in6_var.h>
__private_extern__ int nstat_collect = 1;
+
+#if (DEBUG || DEVELOPMENT)
SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
&nstat_collect, 0, "Collect detailed statistics");
+#endif /* (DEBUG || DEVELOPMENT) */
static int nstat_privcheck = 0;
SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
for (prevsrc = NULL, src = state->ncs_srcs; src;
prevsrc = src, src = src->next)
{
- tucookie = (struct nstat_tucookie *)src->cookie;
- if (tucookie->inp == inp)
- break;
+ nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
+ if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL)
+ {
+ tucookie = (struct nstat_tucookie *)src->cookie;
+ if (tucookie->inp == inp)
+ break;
+ }
}
if (src)
src->provider = provider;
src->cookie = cookie;
src->filter = src_filter;
+ src->seq = 0;
if (msg)
{
state->ncs_seq++;
}
}
- else if (state->ncs_context != 0)
- {
- /*
- * A continuation of a paced-query was in progress. Send that
- * context an error and reset the state. If the same context
- * has changed its mind, just send the full query results.
- */
- if (state->ncs_context != hdrp->context)
- nstat_send_error(state, state->ncs_context, EAGAIN);
- }
return partial;
}
}
break;
}
+ case RTAX_GATEWAY: {
+ /*
+ * Break if the gateway is not AF_LINK type (indirect routes)
+ *
+ * Else, if is, check if it is resolved. If not yet resolved
+ * simply break else scrub the link layer address.
+ */
+ if ((sa->sa_family != AF_LINK) || (SDL(sa)->sdl_alen == 0))
+ break;
+ /* fallthrough */
+ }
case RTAX_IFP: {
if (sa->sa_family == AF_LINK && credp) {
struct sockaddr_dl *sdl = SDL(buf);
/*
- * Copyright (c) 2000-2015 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
sa = rtm_scrub(type, i, hint, sa, &ssbuf,
sizeof (ssbuf), NULL);
break;
-
+ case RTAX_GATEWAY:
case RTAX_IFP:
sa = rtm_scrub(type, i, NULL, sa, &ssbuf,
sizeof (ssbuf), credp);
boolean_t send_64bit_dsn = FALSE;
boolean_t send_64bit_ack = FALSE;
u_int32_t old_mpt_flags = tp->t_mpflags &
- (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL |
- TMPF_MPCAP_RETRANSMIT);
+ (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL);
if ((mptcp_enable == 0) ||
(mp_tp == NULL) ||
if (((tp->t_mpflags & TMPF_PREESTABLISHED) &&
(!(tp->t_mpflags & TMPF_SENT_KEYS)) &&
- (!(tp->t_mpflags & TMPF_JOINED_FLOW))) ||
- (tp->t_mpflags & TMPF_MPCAP_RETRANSMIT)) {
+ (!(tp->t_mpflags & TMPF_JOINED_FLOW)))) {
struct mptcp_mpcapable_opt_rsp1 mptcp_opt;
if ((MAX_TCPOPTLEN - optlen) <
sizeof (struct mptcp_mpcapable_opt_rsp1))
tp->t_mpflags |= TMPF_SENT_KEYS | TMPF_MPTCP_TRUE;
so->so_flags |= SOF_MPTCP_TRUE;
tp->t_mpflags &= ~TMPF_PREESTABLISHED;
- tp->t_mpflags &= ~TMPF_MPCAP_RETRANSMIT;
if (!tp->t_mpuna) {
tp->t_mpuna = tp->snd_una;
if (TRUE == *p_mptcp_acknow ) {
VERIFY(old_mpt_flags != 0);
u_int32_t new_mpt_flags = tp->t_mpflags &
- (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL |
- TMPF_MPCAP_RETRANSMIT);
+ (TMPF_SND_MPPRIO | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL);
/*
* If none of the above mpflags were acted on by
*/
if ((old_mpt_flags == new_mpt_flags) || (new_mpt_flags == 0)) {
tp->t_mpflags &= ~(TMPF_SND_MPPRIO
- | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL |
- TMPF_MPCAP_RETRANSMIT);
+ | TMPF_SND_REM_ADDR | TMPF_SND_MPFAIL);
*p_mptcp_acknow = FALSE;
mptcplog((LOG_DEBUG, "MPTCP Sender: %s: no action \n",
__func__), MPTCP_SENDER_DBG, MPTCP_LOGLVL_LOG);
return;
}
- /* Handle old duplicate SYN/ACK retransmission */
- if (SEQ_GT(tp->rcv_nxt, (tp->irs + 1)))
- return;
-
/* handle SYN/ACK retransmission by acknowledging with ACK */
- if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED) {
- tp->t_mpflags |= TMPF_MPCAP_RETRANSMIT;
+ if (mp_tp->mpt_state >= MPTCPS_ESTABLISHED)
return;
- }
/* A SYN/ACK contains peer's key and flags */
if (optlen != sizeof (struct mptcp_mpcapable_opt_rsp)) {
if (close)
(void) mptcp_subflow_soclose(mpts, so);
- VERIFY(mp_so->so_usecount != 0);
+ VERIFY(mp_so->so_usecount > 0);
mp_so->so_usecount--; /* for subflow socket */
mpts->mpts_mpte = NULL;
mpts->mpts_socket = NULL;
mp_so = mpte->mpte_mppcb->mpp_socket;
VERIFY(mp_so != NULL);
- VERIFY(mp_so->so_usecount != 0);
+ VERIFY(mp_so->so_usecount > 0);
mp_so->so_usecount--; /* for thread */
mpte->mpte_mppcb->mpp_flags |= MPP_DEFUNCT;
MPTE_UNLOCK(mpte);
}
/*
- * Collect new round-trip time estimate
- * and update averages and current timeout.
+ * Collect new round-trip time estimate and update averages and
+ * current timeout.
*/
static void
tcp_xmit_timer(struct tcpcb *tp, int rtt,
{
int delta;
+ /*
+ * On AWDL interface, the initial RTT measurement on SYN
+ * can be wrong due to peer caching. Avoid the first RTT
+ * measurement as it might skew up the RTO.
+ * <rdar://problem/28739046>
+ */
+ if (tp->t_inpcb->inp_last_outifp != NULL &&
+ (tp->t_inpcb->inp_last_outifp->if_eflags & IFEF_AWDL) &&
+ th_ack == tp->iss + 1)
+ return;
+
if (tp->t_flagsext & TF_RECOMPUTE_RTT) {
if (SEQ_GT(th_ack, tp->snd_una) &&
SEQ_LEQ(th_ack, tp->snd_max) &&
else
tp->snd_cwnd = 0;
tp->snd_cwnd += tp->t_maxseg;
-
}
/*
cur_cnt = 0;
}
-
qlen = head->so_incqlen;
if (rnd == 0)
rnd = RandomULong();
}
}
so = sonext;
-
}
if (so == NULL) {
return (0);
}
TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ head->so_incqlen--;
+ head->so_qlen--;
+ so->so_state &= ~SS_INCOMP;
+ so->so_flags |= SOF_OVERFLOW;
+ so->so_head = NULL;
tcp_unlock(head, 0, 0);
lck_mtx_assert(&inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
tp = sototcpcb(so);
- so->so_flags |= SOF_OVERFLOW;
- so->so_head = NULL;
tcp_close(tp);
if (inp->inp_wantcnt > 0 && inp->inp_wantcnt != WNT_STOPUSING) {
* be garbage collected later.
* Release the reference held for so_incomp queue
*/
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
tcp_unlock(so, 1, 0);
} else {
tcp_lock(so, 0, 0);
/* Release the reference held for so_incomp queue */
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
if (so->so_usecount != 1 ||
*/
tcp_unlock(so, 1, 0);
} else {
-
/* Drop the reference held for this function */
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
in_pcbdispose(inp);
tcpstat.tcps_drops++;
tcp_lock(head, 0, 0);
- head->so_incqlen--;
- head->so_qlen--;
return(1);
}
/* No cookie, so we request one */
return (0);
+ /* There is not enough space for the cookie, so we cannot do TFO */
+ if (MAX_TCPOPTLEN - optlen < cookie_len)
+ goto fallback;
+
/* Do not send SYN+data if there is more in the queue than MSS */
if (so->so_snd.sb_cc > (tp->t_maxopd - MAX_TCPOPTLEN))
goto fallback;
if ((tp->t_state >= TCPS_ESTABLISHED) &&
((tp->t_mpflags & TMPF_SND_MPPRIO) ||
(tp->t_mpflags & TMPF_SND_REM_ADDR) ||
- (tp->t_mpflags & TMPF_SND_MPFAIL) ||
- (tp->t_mpflags & TMPF_MPCAP_RETRANSMIT))) {
+ (tp->t_mpflags & TMPF_SND_MPFAIL))) {
if (len > 0) {
len = 0;
}
#if TRAFFIC_MGT
if (tcp_recv_bg == 1 || IS_TCP_RECV_BG(so)) {
- if (tcp_recv_throttle(tp)) {
- uint32_t min_iaj_win =
- tcp_min_iaj_win * tp->t_maxseg;
+ if (recwin > 0 && tcp_recv_throttle(tp)) {
+ uint32_t min_iaj_win = tcp_min_iaj_win * tp->t_maxseg;
if (tp->iaj_rwintop == 0 ||
- SEQ_LT(tp->iaj_rwintop, tp->rcv_adv))
+ SEQ_LT(tp->iaj_rwintop, tp->rcv_adv))
tp->iaj_rwintop = tp->rcv_adv;
if (SEQ_LT(tp->iaj_rwintop,
- tp->rcv_nxt + min_iaj_win))
- tp->iaj_rwintop = tp->rcv_nxt + min_iaj_win;
- recwin = min(tp->iaj_rwintop - tp->rcv_nxt, recwin);
+ tp->rcv_nxt + min_iaj_win))
+ tp->iaj_rwintop = tp->rcv_nxt +
+ min_iaj_win;
+ recwin = imin((int32_t)(tp->iaj_rwintop -
+ tp->rcv_nxt), recwin);
+ if (recwin < 0)
+ recwin = 0;
}
}
#endif /* TRAFFIC_MGT */
if (error == ENOBUFS) {
if (!tp->t_timer[TCPT_REXMT] &&
- !tp->t_timer[TCPT_PERSIST])
+ !tp->t_timer[TCPT_PERSIST] &&
+ SEQ_GT(tp->snd_max, tp->snd_una))
tp->t_timer[TCPT_REXMT] =
OFFSET_FROM_START(tp, tp->t_rxtcur);
tp->snd_cwnd = tp->t_maxseg;
{
struct socket *so = tp->t_inpcb->inp_socket;
struct sockbuf *sb = &so->so_rcv;
- u_int32_t rcvbuf = sb->sb_hiwat;
+ u_int32_t rcvbuf;
int32_t space;
int32_t pending = 0;
+ tcp_sbrcv_grow_rwin(tp, sb);
+
+ /* hiwat might have changed */
+ rcvbuf = sb->sb_hiwat;
+
/*
* If message delivery is enabled, do not count
* unordered bytes in receive buffer towards hiwat mark.
if (so->so_flags & SOF_ENABLE_MSGS)
rcvbuf = rcvbuf - so->so_msg_state->msg_uno_bytes;
- tcp_sbrcv_grow_rwin(tp, sb);
-
space = ((int32_t) imin((rcvbuf - sb->sb_cc),
(sb->sb_mbmax - sb->sb_mbcnt)));
if (space < 0)
#endif /* INET6 */
in_pcbdetach(inp);
}
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
if (inp->inp_wantcnt == WNT_STOPUSING)
active = TRUE;
if (tp == NULL)
goto out;
tp = tcp_drop(tp, ECONNABORTED);
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
COMMON_END(PRU_ABORT);
}
#define TMPF_SND_MPFAIL 0x00200000 /* Received mapping csum failure */
#define TMPF_FASTJOIN_SEND 0x00400000 /* Fast join early data send */
#define TMPF_FASTJOINBY2_SEND 0x00800000 /* Fast join send after 3 WHS */
-#define TMPF_MPCAP_RETRANSMIT 0x01000000 /* Retransmission of 3rd ACK */
#define TMPF_TFO_REQUEST 0x02000000 /* TFO Requested */
tcp_seq t_mpuna; /* unacknowledged sequence */
else
lr_saved = debug;
- if (refcount)
+ if (refcount) {
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
-
+ }
if (so->so_pcb == NULL) {
panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__,
so, lr_saved, solockhistory_nr(so));
if (algo->finalizedecrypt)
{
- unsigned char tag[algo->icvlen];
- if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
+ if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
goto bad;
}
- if (cc_cmp_safe(algo->icvlen, saved_icv, tag)) {
- ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
- IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
- KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
- goto bad;
- }
}
/*
if (algo->finalizedecrypt)
{
- unsigned char tag[algo->icvlen];
- if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
+ if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
goto bad;
}
- if (cc_cmp_safe(algo->icvlen, saved_icv, tag)) {
- ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
- IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
- KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
- goto bad;
- }
}
/*
{
lck_mtx_assert(sadb_mutex, LCK_MTX_ASSERT_OWNED);
aes_gcm_ctx *ctx = (aes_gcm_ctx*)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
+ u_int ivlen = sav->ivlen;
+ unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
int rc;
ctx->decrypt = &ctx->ctxt[0];
return (rc);
}
- rc = aes_encrypt_key_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ctx->encrypt);
+ bzero(nonce, ESP_GCM_SALT_LEN + ivlen);
+ memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
+ memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen);
+
+ rc = aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav->key_enc), _KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, nonce, ctx->encrypt);
if (rc) {
return (rc);
}
+
+ rc = aes_encrypt_reset_gcm(ctx->encrypt);
+ if (rc) {
+ return (rc);
+ }
+
return (rc);
}
int scutoff;
int i, len;
unsigned char nonce[ESP_GCM_SALT_LEN+ivlen];
-
+
if (ivlen != ESP_GCM_IVLEN) {
ipseclog((LOG_ERR, "%s: unsupported ivlen %d\n", __FUNCTION__, ivlen));
m_freem(m);
bodyoff = off + sizeof(struct newesp) + ivlen;
}
- m_copyback(m, ivoff, ivlen, sav->iv);
+ bzero(nonce, ESP_GCM_SALT_LEN+ivlen);
+ /* generate new iv */
+ ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
- if (m->m_pkthdr.len < bodyoff) {
- ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
- m->m_pkthdr.len, (u_int32_t)bodyoff));
+ if (aes_encrypt_reset_gcm(ctx->encrypt)) {
+ ipseclog((LOG_ERR, "%s: gcm reset failure\n", __FUNCTION__));
m_freem(m);
return EINVAL;
}
- /* Set IV */
- memcpy(nonce, _KEYBUF(sav->key_enc)+_KEYLEN(sav->key_enc)-ESP_GCM_SALT_LEN, ESP_GCM_SALT_LEN);
- memcpy(nonce+ESP_GCM_SALT_LEN, sav->iv, ivlen);
+ if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce, ctx->encrypt)) {
+ ipseclog((LOG_ERR, "%s: iv generation failure\n", __FUNCTION__));
+ m_freem(m);
+ return EINVAL;
+ }
- ctx = (aes_gcm_ctx *)P2ROUNDUP(sav->sched, ESP_GCM_ALIGN);
- if (aes_encrypt_set_iv_gcm(nonce, sizeof(nonce), ctx->encrypt)) {
- ipseclog((LOG_ERR, "%s: failed to set IV\n", __FUNCTION__));
+ /*
+ * The IV is now generated within corecrypto and
+ * is provided to ESP using aes_encrypt_inc_iv_gcm().
+ * This makes the sav->iv redundant and is no longer
+ * used in GCM operations. But we still copy the IV
+ * back to sav->iv to ensure that any future code reading
+ * this value will get the latest IV.
+ */
+ memcpy(sav->iv, (nonce + ESP_GCM_SALT_LEN), ivlen);
+ m_copyback(m, ivoff, ivlen, sav->iv);
+ bzero(nonce, ESP_GCM_SALT_LEN+ivlen);
+
+ if (m->m_pkthdr.len < bodyoff) {
+ ipseclog((LOG_ERR, "%s: bad len %d/%lu\n", __FUNCTION__,
+ m->m_pkthdr.len, (u_int32_t)bodyoff));
m_freem(m);
- bzero(nonce, sizeof(nonce));
return EINVAL;
}
- bzero(nonce, sizeof(nonce));
/* Set Additional Authentication Data */
if (!(sav->flags & SADB_X_EXT_OLD)) {
sp_aligned = NULL;
}
- /* generate new iv */
- key_sa_stir_iv(sav);
-
return 0;
}
rtaddr = installed_dr->rtaddr_mapped;
else
rtaddr = installed_dr->rtaddr;
+ NDDR_UNLOCK(dr);
lck_mtx_unlock(nd6_mutex);
/* Callee returns a locked route upon success */
if ((rt = nd6_lookup(&rtaddr, 0, ifp, 0)) != NULL) {
nd6log((LOG_ERR, "defrouter_select: more than one "
"default router is installed for interface :%s.\n",
if_name(ifp)));
+ NDDR_UNLOCK(dr);
}
- }
- NDDR_UNLOCK(dr);
+ } else
+ NDDR_UNLOCK(dr);
+
NDDR_REMREF(dr); /* for this for loop */
if (drrele != NULL)
NDDR_REMREF(drrele);
}
if (installed_dr) {
- NDDR_REMREF(selected_dr);
+ NDDR_REMREF(installed_dr);
installed_dr = NULL;
}
NDPR_REMREF(pr);
lck_mtx_unlock(nd6_mutex);
} else {
- struct nd_prefix *newpr = NULL;
-
newprefix = 1;
if (new->ndpr_vltime == 0)
bzero(&new->ndpr_addr, sizeof (struct in6_addr));
- error = nd6_prelist_add(new, dr, &newpr, FALSE);
- if (error != 0 || newpr == NULL) {
+ error = nd6_prelist_add(new, dr, &pr, FALSE);
+ if (error != 0 || pr == NULL) {
nd6log((LOG_NOTICE, "prelist_update: "
"nd6_prelist_add failed for %s/%d on %s "
"errno=%d, returnpr=0x%llx\n",
ip6_sprintf(&new->ndpr_prefix.sin6_addr),
new->ndpr_plen, if_name(new->ndpr_ifp),
- error, (uint64_t)VM_KERNEL_ADDRPERM(newpr)));
+ error, (uint64_t)VM_KERNEL_ADDRPERM(pr)));
goto end; /* we should just give up in this case. */
}
-
- /*
- * XXX: from the ND point of view, we can ignore a prefix
- * with the on-link bit being zero. However, we need a
- * prefix structure for references from autoconfigured
- * addresses. Thus, we explicitly make sure that the prefix
- * itself expires now.
- */
- NDPR_LOCK(newpr);
- if (newpr->ndpr_raf_onlink == 0) {
- newpr->ndpr_vltime = 0;
- newpr->ndpr_pltime = 0;
- in6_init_prefix_ltimes(newpr);
- }
-
- pr = newpr;
- NDPR_UNLOCK(newpr);
}
/*
/* 5.5.3 (a). Ignore the prefix without the A bit set. */
if (!new->ndpr_raf_auto)
- goto afteraddrconf;
+ goto end;
/*
* 5.5.3 (b). the link-local prefix should have been ignored in
}
}
-afteraddrconf:
-
end:
if (pr != NULL)
NDPR_REMREF(pr);
#include <netkey/keysock.h>
#include <netkey/key_debug.h>
#include <stdarg.h>
-
+#include <libkern/crypto/rand.h>
#include <netinet6/ipsec.h>
#if INET6
}
/* initialize */
- key_randomfill(sav->iv, sav->ivlen);
+ if (sav->alg_enc == SADB_X_EALG_AES_GCM) {
+ bzero(sav->iv, sav->ivlen);
+ } else {
+ key_randomfill(sav->iv, sav->ivlen);
+ }
#endif
break;
case SADB_SATYPE_AH:
}
}
/* initialize */
- key_randomfill(sav->iv, sav->ivlen);
+ if (sav->alg_enc == SADB_X_EALG_AES_GCM) {
+ bzero(sav->iv, sav->ivlen);
+ } else {
+ key_randomfill(sav->iv, sav->ivlen);
+ }
}
#endif
}
size_t l)
{
#ifdef __APPLE__
-
- read_random(p, (u_int)l);
+ cc_rand_generate(p, l);
#else
size_t n;
u_int32_t v;
#define BA_ISOCHRONOUS 0x00001000 /* device specific isochronous throughput to media */
#define BA_STRATEGY_TRACKED_IO 0x00002000 /* tracked by spec_strategy */
+#define BA_IO_TIER_UPGRADE 0x00004000 /* effective I/O tier is higher than BA_IO_TIER */
#define GET_BUFATTR_IO_TIER(bap) ((bap->ba_flags & BA_IO_TIER_MASK) >> BA_IO_TIER_SHIFT)
*
* DKIOCGETPROVISIONSTATUS get device's block provision status
* DKIOCGETIOMINSATURATIONBYTECOUNT get minimum byte count to saturate storage bandwidth
+ *
+ * DKIOCGETERRORDESCRIPTION get description of any drive error
*/
#define DK_FEATURE_BARRIER 0x00000002
dk_provision_extent_t * extents; /* output: provision extents */
} dk_provision_status_t;
+typedef struct
+{
+ uint64_t options; /* reserved, clear to zero */
+ uint64_t reserved; /* reserved, clear to zero */
+ uint64_t description_size;
+ char * description;
+} dk_error_description_t;
+
+
#ifdef KERNEL
#ifdef PRIVATE
#define DKIOCGETPROVISIONSTATUS _IOWR('d', 79, dk_provision_status_t)
+#define DKIOCGETERRORDESCRIPTION _IOR('d', 80, dk_error_description_t)
+
#define DKIOCSYNCHRONIZECACHE _IO('d', 22)
#ifdef KERNEL
int dtad_refcnt; /* reference count */
} dtrace_actdesc_t;
+
typedef struct dtrace_ecbdesc {
dtrace_actdesc_t *dted_action; /* action description(s) */
dtrace_preddesc_t dted_pred; /* predicate description */
dtrace_probedesc_t dted_probe; /* probe description */
uint64_t dted_uarg; /* library argument */
int dted_refcnt; /* reference count */
+ uint64_t dted_probegen; /* matched probe generation */
} dtrace_ecbdesc_t;
+/*
+ * APPLE NOTE: The kernel always rebuild dtrace_ecbdesc structures
+ * coming from userspace, so there is no dted_probegen manipulation risk
+ */
+
/*
* DTrace Metadata Description Structures
*
* a meta provider. This structure consists of the following members:
*
* dtms_create_probe() <-- Add a new probe to a created provider
- * dtms_provide_pid() <-- Create a new provider for a given process
- * dtms_remove_pid() <-- Remove a previously created provider
+ * dtms_provide_proc() <-- Create a new provider for a given process
+ * dtms_remove_proc() <-- Remove a previously created provider
*
* 1.2 void dtms_create_probe(void *arg, void *parg,
* dtrace_helper_probedesc_t *probedesc);
*
* The first argument is the cookie as passed to dtrace_meta_register().
* The second argument is the provider cookie for the associated provider;
- * this is obtained from the return value of dtms_provide_pid(). The third
+ * this is obtained from the return value of dtms_provide_proc(). The third
* argument is the helper probe description.
*
* 1.2.3 Return value
* such that the provider may (and is expected to) call provider-related
* DTrace provider APIs including dtrace_probe_create().
*
- * 1.3 void *dtms_provide_pid(void *arg, dtrace_meta_provider_t *mprov,
- * pid_t pid)
+ * 1.3 void *dtms_provide_proc(void *arg, dtrace_meta_provider_t *mprov,
+ * proc_t *proc)
*
* 1.3.1 Overview
*
*
* 1.3.4 Caller's context
*
- * dtms_provide_pid() is called from either ioctl() or module load context.
+ * dtms_provide_proc() is called from either ioctl() or module load context.
* The DTrace framework is locked in such a way that meta providers may not
* register or unregister. This means that the meta provider cannot call
* dtrace_meta_register() or dtrace_meta_unregister(). However, the context
* is such that the provider may -- and is expected to -- call
* provider-related DTrace provider APIs including dtrace_register().
*
- * 1.4 void dtms_remove_pid(void *arg, dtrace_meta_provider_t *mprov,
- * pid_t pid)
+ * 1.4 void dtms_remove_proc(void *arg, dtrace_meta_provider_t *mprov,
+ * proc_t proc)
*
* 1.4.1 Overview
*
*
* 1.4.4 Caller's context
*
- * dtms_remove_pid() is called from either ioctl() or exit() context.
+ * dtms_remove_proc() is called from either ioctl() or exit() context.
* The DTrace framework is locked in such a way that meta providers may not
* register or unregister. This means that the meta provider cannot call
* dtrace_meta_register() or dtrace_meta_unregister(). However, the context
dtrace_pattr_t dthpv_pattr; /* stability attributes */
} dtrace_helper_provdesc_t;
+/*
+ * APPLE NOTE: dtms_provide_pid and dtms_remove_pid are replaced with
+ * dtms_provide_proc on Darwin, and a proc reference need to be held
+ * for the duration of the call.
+ *
+ * This is due to the fact that proc_find is not re-entrant on Darwin.
+ */
+
typedef struct dtrace_mops {
void (*dtms_create_probe)(void *, void *, dtrace_helper_probedesc_t *);
- void *(*dtms_provide_pid)(void *, dtrace_helper_provdesc_t *, pid_t);
- void (*dtms_remove_pid)(void *, dtrace_helper_provdesc_t *, pid_t);
+ void *(*dtms_provide_proc)(void *, dtrace_helper_provdesc_t *, proc_t*);
+ void (*dtms_remove_proc)(void *, dtrace_helper_provdesc_t *, proc_t*);
char* (*dtms_provider_name)(void *);
} dtrace_mops_t;
#define DKIO_NOCACHE 0x80
#define DKIO_TIER_MASK 0xF00
#define DKIO_TIER_SHIFT 8
+#define DKIO_TIER_UPGRADE 0x1000
/* Kernel Debug Sub Classes for Applications (DBG_APPS) */
#define DBG_APP_LOGINWINDOW 0x03
#define PROC_SETACTION_STATE(p) (p->p_pcaction = (PROC_CONTROL_STATE(p) | (PROC_CONTROL_STATE(p) << 16)))
#define PROC_RESETACTION_STATE(p) (p->p_pcaction = PROC_CONTROL_STATE(p))
+/* Process exit reason macros */
+#define PROC_HAS_EXITREASON(p) (p->p_exit_reason != OS_REASON_NULL)
+#define PROC_EXITREASON_FLAGS(p) p->p_exit_reason->osr_flags
+
/* additional process flags */
#define P_LADVLOCK 0x01
#define P_LXBKIDLEINPROG 0x02
__BEGIN_DECLS
-#ifdef KERNEL
+#ifdef KERNEL_PRIVATE
#include <kern/kern_cdata.h>
user_addr_t reason_string, uint64_t reason_flags);
char *launchd_exit_reason_get_string_desc(os_reason_t exit_reason);
+/* The blocking allocation is currently not exported to KEXTs */
+int os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize);
+
#else /* XNU_KERNEL_PRIVATE */
typedef void * os_reason_t;
#endif /* XNU_KERNEL_PRIVATE */
os_reason_t os_reason_create(uint32_t osr_namespace, uint64_t osr_code);
-int os_reason_alloc_buffer(os_reason_t cur_reason, uint32_t osr_bufsize);
+int os_reason_alloc_buffer_noblock(os_reason_t cur_reason, uint32_t osr_bufsize);
struct kcdata_descriptor * os_reason_get_kcdata_descriptor(os_reason_t cur_reason);
void os_reason_ref(os_reason_t cur_reason);
void os_reason_free(os_reason_t cur_reason);
-#endif /* KERNEL */
+#endif /* KERNEL_PRIVATE */
/*
* Reason namespaces.
void psignal_locked(struct proc *, int);
void psignal_try_thread(proc_t, thread_t, int signum);
void psignal_try_thread_with_reason(proc_t, thread_t, int, struct os_reason*);
+void psignal_thread_with_reason(proc_t, thread_t, int, struct os_reason*);
void psignal_uthread(thread_t, int);
void pgsignal(struct pgrp *pgrp, int sig, int checkctty);
void tty_pgsignal(struct tty * tp, int sig, int checkctty);
extern int soaccept(struct socket *so, struct sockaddr **nam);
extern int soacceptlock(struct socket *so, struct sockaddr **nam, int dolock);
-extern int soacceptfilter(struct socket *so);
+extern int soacceptfilter(struct socket *so, struct socket *head);
extern struct socket *soalloc(int waitok, int dom, int type);
extern int sobindlock(struct socket *so, struct sockaddr *nam, int dolock);
extern int soclose(struct socket *so);
vm_offset_t csb_mem_offset;
vm_address_t csb_mem_kaddr;
unsigned char csb_cdhash[CS_CDHASH_LEN];
- struct cs_hash *csb_hashtype;
+ const struct cs_hash *csb_hashtype;
vm_size_t csb_hash_pagesize; /* each hash entry represent this many bytes in the file */
vm_size_t csb_hash_pagemask;
vm_size_t csb_hash_pageshift;
#include <kern/thread.h>
#include <sys/fslog.h> /* fslog_io_error() */
+#include <sys/disk.h> /* dk_error_description_t */
#include <mach/mach_types.h>
#include <mach/memory_object_types.h>
break;
}
} else {
+ int clear_bdone;
+
/*
* buffer in core and not busy
*/
if ( (bp->b_upl) )
panic("buffer has UPL, but not marked BUSY: %p", bp);
- if ( !ret_only_valid && bp->b_bufsize != size)
- allocbuf(bp, size);
+ clear_bdone = FALSE;
+ if (!ret_only_valid) {
+ /*
+ * If the number bytes that are valid is going
+ * to increase (even if we end up not doing a
+ * reallocation through allocbuf) we have to read
+ * the new size first.
+ *
+ * This is required in cases where we doing a read
+ * modify write of a already valid data on disk but
+ * in cases where the data on disk beyond (blkno + b_bcount)
+ * is invalid, we may end up doing extra I/O.
+ */
+ if (operation == BLK_META && bp->b_bcount < size) {
+ /*
+ * Since we are going to read in the whole size first
+ * we first have to ensure that any pending delayed write
+ * is flushed to disk first.
+ */
+ if (ISSET(bp->b_flags, B_DELWRI)) {
+ CLR(bp->b_flags, B_CACHE);
+ buf_bwrite(bp);
+ goto start;
+ }
+ /*
+ * clear B_DONE before returning from
+ * this function so that the caller can
+ * can issue a read for the new size.
+ */
+ clear_bdone = TRUE;
+ }
+
+ if (bp->b_bufsize != size)
+ allocbuf(bp, size);
+ }
upl_flags = 0;
switch (operation) {
/*NOTREACHED*/
break;
}
+
+ if (clear_bdone)
+ CLR(bp->b_flags, B_DONE);
}
} else { /* not incore() */
int queue = BQ_EMPTY; /* Start with no preference */
mp = NULL;
}
+ if (ISSET(bp->b_flags, B_ERROR)) {
+ if (mp && (MNT_ROOTFS & mp->mnt_flag)) {
+ dk_error_description_t desc;
+ bzero(&desc, sizeof(desc));
+ desc.description = panic_disk_error_description;
+ desc.description_size = panic_disk_error_description_size;
+ VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel());
+ }
+ }
+
if (mp && (bp->b_flags & B_READ) == 0) {
update_last_io_time(mp);
INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
if (bap->ba_flags & BA_NOCACHE)
code |= DKIO_NOCACHE;
+ if (bap->ba_flags & BA_IO_TIER_UPGRADE) {
+ code |= DKIO_TIER_UPGRADE;
+ }
+
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_COMMON, FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE,
buf_kernel_addrperm_addr(bp), (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid, bp->b_error, 0);
}
* indicators
*/
CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE));
- CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP));
+ CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE));
SET_BUFATTR_IO_TIER(bap, 0);
-16.1.0
+16.3.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
_net_del_proto:_net_del_proto_old
_netboot_root
_os_reason_create
-_os_reason_alloc_buffer
+_os_reason_alloc_buffer_noblock
_os_reason_get_kcdata_descriptor
_os_reason_ref
_os_reason_free
IOReturn IOHibernateSystemHasSlept(void);
IOReturn IOHibernateSystemWake(void);
IOReturn IOHibernateSystemPostWake(void);
-bool IOHibernateWasScreenLocked(void);
+uint32_t IOHibernateWasScreenLocked(void);
void IOHibernateSetScreenLocked(uint32_t lockState);
void IOHibernateSetWakeCapabilities(uint32_t capability);
void IOHibernateSystemRestart(void);
kIOScreenLockFileVaultDialog = 4,
};
-#define kIOScreenLockStateKey "IOScreenLockState"
+#define kIOScreenLockStateKey "IOScreenLockState"
+#define kIOBooterScreenLockStateKey "IOBooterScreenLockState"
#endif /* ! __IOKIT_IOHIBERNATEPRIVATE_H */
#define kIOConsoleSessionLoginDoneKey "kCGSessionLoginDoneKey" /* value is OSBoolean */
#define kIOConsoleSessionSecureInputPIDKey "kCGSSessionSecureInputPID" /* value is OSNumber */
#define kIOConsoleSessionScreenLockedTimeKey "CGSSessionScreenLockedTime" /* value is OSNumber, secs - 1970 */
+#define kIOConsoleSessionScreenIsLockedKey "CGSSessionScreenIsLocked" /* value is OSBoolean */
// IOResources property
#define kIOConsoleUsersSeedKey "IOConsoleUsersSeed" /* value is OSNumber */
#endif
kIOMemoryPersistent = 0x00010000,
#ifdef XNU_KERNEL_PRIVATE
- kIOMemoryReserved6156215 = 0x00020000,
+ kIOMemoryMapCopyOnWrite = 0x00020000,
#endif
kIOMemoryThreadSafe = 0x00100000, // Shared with Buffer MD
kIOMemoryClearEncrypt = 0x00200000, // Shared with Buffer MD
struct ExpansionData {
IOOptionBits options;
IOEventSource *passiveEventChain;
-#if DEBUG
- void * allocationBacktrace[16];
-#endif /* DEBUG */
#if IOKITSTATS
struct IOWorkLoopCounter *counter;
#else
OSData *propObj;
dtptr_t *propPtr;
unsigned int propSize;
+ int ret = -1;
chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane );
if ( chosen == 0 ) return -1;
propObj = OSDynamicCast( OSData, chosen->getProperty(key) );
- if ( propObj == 0 ) return -1;
+ if ( propObj == 0 ) goto cleanup;
propSize = propObj->getLength();
- if ( propSize != (2 * sizeof(dtptr_t)) ) return -1;
+ if ( propSize != (2 * sizeof(dtptr_t)) ) goto cleanup;
propPtr = (dtptr_t *)propObj->getBytesNoCopy();
- if ( propPtr == 0 ) return -1;
+ if ( propPtr == 0 ) goto cleanup;
*infoAddr = (void *)(uintptr_t) (propPtr[0]);
*infoSize = (int) (propPtr[1]);
- return 0;
+ ret = 0;
+
+cleanup:
+ chosen->release();
+
+ return ret;
}
void IODTFreeLoaderInfo( const char *key, void *infoAddr, int infoSize )
chosen = IORegistryEntry::fromPath( "/chosen/memory-map", gIODTPlane );
if ( chosen != 0 ) {
chosen->removeProperty(key);
+ chosen->release();
}
}
}
char *name;
char location[ 32 ];
bool noLocation = true;
+ bool kernelOnly;
regEntry = new IOService;
if( regEntry &&
(kSuccess == DTCreatePropertyIterator( dtEntry, &dtIter))) {
+ kernelOnly = (kSuccess == DTGetProperty(dtEntry, "kernel-only", &prop, &propSize));
propTable = regEntry->getPropertyTable();
while( kSuccess == DTIterateProperties( dtIter, &name)) {
}
assert( nameKey && data );
+ if (kernelOnly)
+ data->setSerializable(false);
+
propTable->setObject( nameKey, data);
data->release();
nameKey->release();
return (kIOReturnSuccess);
}
-bool IOHibernateWasScreenLocked(void)
+uint32_t IOHibernateWasScreenLocked(void)
{
- bool ret = false;
+ uint32_t ret = 0;
if ((kIOHibernateStateWakingFromHibernate == gIOHibernateState) && gIOChosenEntry)
{
OSData *
data = OSDynamicCast(OSData, gIOChosenEntry->getProperty(kIOScreenLockStateKey));
- if (data) switch (*((uint32_t *)data->getBytesNoCopy()))
+ if (data)
{
- case kIOScreenLockLocked:
- case kIOScreenLockFileVaultDialog:
- ret = true;
- break;
- case kIOScreenLockNoLock:
- case kIOScreenLockUnlocked:
- default:
- ret = false;
- break;
- }
+ ret = ((uint32_t *)data->getBytesNoCopy())[0];
+ gIOChosenEntry->setProperty(kIOBooterScreenLockStateKey, data);
+ }
}
+ else gIOChosenEntry->removeProperty(kIOBooterScreenLockStateKey);
+
return (ret);
}
return address;
}
-void IOFree(void * address, vm_size_t size)
+void IOFree(void * inAddress, vm_size_t size)
{
- if (address) {
+ void * address;
+ if ((address = inAddress))
+ {
address = (typeof(address)) (((uintptr_t) address) - sizeofIOLibMallocHeader);
#if IOTRACKING
- if (TRACK_ALLOC) {
+ if (TRACK_ALLOC)
+ {
IOLibMallocHeader * hdr;
+ struct ptr_reference{ void * ptr; };
+ volatile struct ptr_reference ptr;
+
+ // we're about to block in IOTrackingRemove(), make sure the original pointer
+ // exists in memory or a register for leak scanning to find
+ ptr.ptr = inAddress;
+
hdr = (typeof(hdr)) address;
if (size != hdr->tracking.size)
{
size = hdr->tracking.size;
}
IOTrackingRemove(gIOMallocTracking, &hdr->tracking.tracking, size);
+ ptr.ptr = NULL;
}
#endif
else
index = gIOKitPageableSpace.count - 1;
}
- if( KERN_SUCCESS == kr)
+ if (KERN_NO_SPACE != kr)
break;
lck_mtx_lock( gIOKitPageableSpace.lock );
struct IOMemoryReference
{
- volatile SInt32 refCount;
- vm_prot_t prot;
- uint32_t capacity;
- uint32_t count;
- IOMemoryEntry entries[0];
+ volatile SInt32 refCount;
+ vm_prot_t prot;
+ uint32_t capacity;
+ uint32_t count;
+ struct IOMemoryReference * mapRef;
+ IOMemoryEntry entries[0];
};
enum
{
kIOMemoryReferenceReuse = 0x00000001,
kIOMemoryReferenceWrite = 0x00000002,
+ kIOMemoryReferenceCOW = 0x00000004,
};
SInt32 gIOMemoryReferenceCount;
IOMemoryEntry * entries;
size_t size;
+ if (ref->mapRef)
+ {
+ memoryReferenceFree(ref->mapRef);
+ ref->mapRef = 0;
+ }
+
entries = ref->entries + ref->count;
while (entries > &ref->entries[0])
{
tag = getVMTag(kernel_map);
entries = &ref->entries[0];
count = 0;
+ err = KERN_SUCCESS;
offset = 0;
rangeIdx = 0;
- if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ if (_task)
+ {
+ getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
+ }
else
{
nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
+ if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY;
if ((kIOMemoryReferenceReuse & options) && _memRef)
{
ref->count = count;
ref->prot = prot;
+ if (_task && (KERN_SUCCESS == err)
+ && (kIOMemoryMapCopyOnWrite & _flags)
+ && !(kIOMemoryReferenceCOW & options))
+ {
+ err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
+ }
+
if (KERN_SUCCESS == err)
{
if (MAP_MEM_NAMED_REUSE & prot)
IOOptionBits type;
IOOptionBits cacheMode;
vm_tag_t tag;
+ // for the kIOMapPrefault option.
+ upl_page_info_t * pageList = NULL;
+ UInt currentPageIndex = 0;
+ bool didAlloc;
- /*
- * For the kIOMapPrefault option.
- */
- upl_page_info_t *pageList = NULL;
- UInt currentPageIndex = 0;
+ if (ref->mapRef)
+ {
+ err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
+ return (err);
+ }
type = _flags & kIOMemoryTypeMask;
+
prot = VM_PROT_READ;
if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
prot &= ref->prot;
nextAddr += remain;
nextLen -= remain;
pageOffset = (page_mask & nextAddr);
- addr = 0;
+ addr = 0;
+ didAlloc = false;
+
if (!(options & kIOMapAnywhere))
{
addr = *inaddr;
err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
if (KERN_SUCCESS == err)
{
- addr = ref.mapped;
- map = ref.map;
+ addr = ref.mapped;
+ map = ref.map;
+ didAlloc = true;
}
}
}
}
- if ((KERN_SUCCESS != err) && addr && !(kIOMapOverwrite & options))
+ if ((KERN_SUCCESS != err) && didAlloc)
{
(void) mach_vm_deallocate(map, trunc_page_64(addr), size);
addr = 0;
gIOSystemMapper = mapper = IOMapper::gSystem;
}
- // Temp binary compatibility for kIOMemoryThreadSafe
- if (kIOMemoryReserved6156215 & options)
- {
- options &= ~kIOMemoryReserved6156215;
- options |= kIOMemoryThreadSafe;
- }
// Remove the dynamic internal use flags from the initial setting
options &= ~(kIOMemoryPreparedReadOnly);
_flags = options;
// upl_transpose> //
else
{
- err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
+ err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
#if IOTRACKING
if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task))
{
userWasActive = true;
// Stay awake after dropping demand for display power on
- if (kFullWakeReasonDisplayOn == fullWakeReason)
+ if (kFullWakeReasonDisplayOn == fullWakeReason) {
fullWakeReason = fFullWakeReasonDisplayOnAndLocalUser;
+ DLOG("User activity while in notification wake\n");
+ changePowerStateWithOverrideTo( ON_STATE, 0);
+ }
kdebugTrace(kPMLogUserActiveState, 0, 1, 0);
setProperty(gIOPMUserIsActiveKey, kOSBooleanTrue);
(kFullWakeReasonDisplayOn == fullWakeReason))
{
// kIOPMSleepReasonMaintenance?
+ DLOG("Display sleep while in notification wake\n");
changePowerStateWithOverrideTo( SLEEP_STATE, kIOPMSleepReasonMaintenance );
}
}
else if(type == kPEPanicRestartCPU || type == kPEPanicSync)
{
- IOCPURunPlatformPanicActions(type);
- PE_sync_panic_buffers();
+ // Do an initial sync to flush as much panic data as possible,
+ // in case we have a problem in one of the platorm panic handlers.
+ // After running the platform handlers, do a final sync w/
+ // platform hardware quiesced for the panic.
+ PE_sync_panic_buffers();
+ IOCPURunPlatformPanicActions(type);
+ PE_sync_panic_buffers();
}
if (gIOPlatform) return gIOPlatform->haltRestart(type);
const OSSymbol * gIOConsoleSessionLoginDoneKey;
const OSSymbol * gIOConsoleSessionSecureInputPIDKey;
const OSSymbol * gIOConsoleSessionScreenLockedTimeKey;
-
+const OSSymbol * gIOConsoleSessionScreenIsLockedKey;
clock_sec_t gIOConsoleLockTime;
static bool gIOConsoleLoggedIn;
#if HIBERNATION
+static OSBoolean * gIOConsoleBooterLockState;
static uint32_t gIOScreenLockState;
#endif
static IORegistryEntry * gIOChosenEntry;
gIOConsoleSessionLoginDoneKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionLoginDoneKey);
gIOConsoleSessionSecureInputPIDKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionSecureInputPIDKey);
gIOConsoleSessionScreenLockedTimeKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenLockedTimeKey);
+ gIOConsoleSessionScreenIsLockedKey = OSSymbol::withCStringNoCopy(kIOConsoleSessionScreenIsLockedKey);
gIOConsoleUsersSeedValue = OSData::withBytesNoCopy(&gIOConsoleUsersSeed, sizeof(gIOConsoleUsersSeed));
{
sSystemPower = systemMessage;
#if HIBERNATION
- if ((kIOMessageSystemHasPoweredOn == systemMessage) && IOHibernateWasScreenLocked())
+ if (kIOMessageSystemHasPoweredOn == systemMessage)
{
- locked = kOSBooleanTrue;
+ uint32_t lockState = IOHibernateWasScreenLocked();
+ switch (lockState)
+ {
+ case 0:
+ break;
+ case kIOScreenLockLocked:
+ case kIOScreenLockFileVaultDialog:
+ gIOConsoleBooterLockState = kOSBooleanTrue;
+ break;
+ case kIOScreenLockNoLock:
+ gIOConsoleBooterLockState = 0;
+ break;
+ case kIOScreenLockUnlocked:
+ default:
+ gIOConsoleBooterLockState = kOSBooleanFalse;
+ break;
+ }
}
#endif /* HIBERNATION */
}
if (consoleUsers)
{
OSNumber * num = 0;
+ bool loginLocked = true;
+
gIOConsoleLoggedIn = false;
for (idx = 0;
(user = OSDynamicCast(OSDictionary, consoleUsers->getObject(idx)));
{
gIOConsoleLoggedIn |= ((kOSBooleanTrue == user->getObject(gIOConsoleSessionOnConsoleKey))
&& (kOSBooleanTrue == user->getObject(gIOConsoleSessionLoginDoneKey)));
+
+ loginLocked &= (kOSBooleanTrue == user->getObject(gIOConsoleSessionScreenIsLockedKey));
if (!num)
{
num = OSDynamicCast(OSNumber, user->getObject(gIOConsoleSessionScreenLockedTimeKey));
}
}
+#if HIBERNATION
+ if (!loginLocked) gIOConsoleBooterLockState = 0;
+#endif /* HIBERNATION */
gIOConsoleLockTime = num ? num->unsigned32BitValue() : 0;
}
{
locked = kOSBooleanTrue;
}
+#if HIBERNATION
+ else if (gIOConsoleBooterLockState)
+ {
+ locked = gIOConsoleBooterLockState;
+ }
+#endif /* HIBERNATION */
else if (gIOConsoleLockTime)
{
clock_sec_t now;
if( (data = OSDynamicCast( OSData, obj ))) {
len = data->getLength();
bytes = data->getBytesNoCopy();
+ if (!data->isSerializable()) len = 0;
} else if( (str = OSDynamicCast( OSString, obj ))) {
len = str->getLength() + 1;
do
{
- if (properties)
+ if (properties) return (kIOReturnUnsupported);
+#if 0
{
OSObject * obj;
vm_offset_t data;
if (kIOReturnSuccess != res)
break;
}
-
+#endif
crossEndian = (ndr.int_rep != NDR_record.int_rep);
if (crossEndian)
{
if (ool_input)
inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
- kIODirectionOut, current_task());
+ kIODirectionOut | kIOMemoryMapCopyOnWrite,
+ current_task());
args.structureInputDescriptor = inputMD;
if (ool_input)
inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
- kIODirectionOut, current_task());
+ kIODirectionOut | kIOMemoryMapCopyOnWrite,
+ current_task());
args.structureInputDescriptor = inputMD;
if (ool_input)
inputMD = IOMemoryDescriptor::withAddressRange(ool_input, ool_input_size,
- kIODirectionOut, current_task());
+ kIODirectionOut | kIOMemoryMapCopyOnWrite,
+ current_task());
args.structureInputDescriptor = inputMD;
bzero(reserved,sizeof(ExpansionData));
}
-
-#if DEBUG
- OSBacktrace ( reserved->allocationBacktrace, sizeof ( reserved->allocationBacktrace ) / sizeof ( reserved->allocationBacktrace[0] ) );
-#endif
-
+
if ( gateLock == NULL ) {
if ( !( gateLock = IORecursiveLockAlloc()) )
return false;
IOReturn RootDomainUserClient::clientClose( void )
{
- detach(fOwner);
+ terminate();
+ return kIOReturnSuccess;
+}
+
+void RootDomainUserClient::stop( IOService *provider)
+{
if(fOwningTask) {
task_deallocate(fOwningTask);
fOwningTask = 0;
}
- return kIOReturnSuccess;
+ super::stop(provider);
}
IOReturn RootDomainUserClient::externalMethod(
// Unused - retained for symbol compatibility
virtual IOExternalMethod * getTargetAndMethodForIndex( IOService ** targetP, UInt32 index ) APPLE_KEXT_OVERRIDE;
+ virtual void stop( IOService *provider) APPLE_KEXT_OVERRIDE;
};
#if DEVELOPMENT || DEBUG
+extern SInt32 gIOMemoryReferenceCount;
+
static int IOMultMemoryDescriptorTest(int newValue)
{
IOMemoryDescriptor * mds[3];
return (0);
}
+// <rdar://problem/27002624>
+static IOReturn
+BadFixedAllocTest(int newValue)
+{
+ IOBufferMemoryDescriptor * bmd;
+ IOMemoryMap * map;
+
+ bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
+ kIODirectionIn | kIOMemoryPageable, ptoa(1));
+ assert(bmd);
+ map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
+ assert(!map);
+
+ bmd->release();
+ return (0);
+}
+
// <rdar://problem/26466423>
static IOReturn
IODirectionPrepareNoZeroFillTest(int newValue)
return (0);
}
+
+// <rdar://problem/28190483>
+static IOReturn
+IOMemoryMapTest(uint32_t options)
+{
+ IOBufferMemoryDescriptor * bmd;
+ IOMemoryDescriptor * md;
+ IOMemoryMap * map;
+ uint32_t data;
+ user_addr_t p;
+ uint8_t * p2;
+ int r;
+ uint64_t time, nano;
+
+ bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
+ kIODirectionOutIn | kIOMemoryPageable, 0x4018+0x800);
+ assert(bmd);
+ p = (typeof(p)) bmd->getBytesNoCopy();
+ p += 0x800;
+ data = 0x11111111;
+ r = copyout(&data, p, sizeof(data));
+ assert(r == 0);
+ data = 0x22222222;
+ r = copyout(&data, p + 0x1000, sizeof(data));
+ assert(r == 0);
+ data = 0x33333333;
+ r = copyout(&data, p + 0x2000, sizeof(data));
+ assert(r == 0);
+ data = 0x44444444;
+ r = copyout(&data, p + 0x3000, sizeof(data));
+ assert(r == 0);
+
+ md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
+ kIODirectionOut | options,
+ current_task());
+ assert(md);
+ time = mach_absolute_time();
+ map = md->map(kIOMapReadOnly);
+ time = mach_absolute_time() - time;
+ assert(map);
+ absolutetime_to_nanoseconds(time, &nano);
+
+ p2 = (typeof(p2)) map->getVirtualAddress();
+ assert(0x11 == p2[0]);
+ assert(0x22 == p2[0x1000]);
+ assert(0x33 == p2[0x2000]);
+ assert(0x44 == p2[0x3000]);
+
+ data = 0x99999999;
+ r = copyout(&data, p + 0x2000, sizeof(data));
+ assert(r == 0);
+
+ assert(0x11 == p2[0]);
+ assert(0x22 == p2[0x1000]);
+ assert(0x44 == p2[0x3000]);
+ if (kIOMemoryMapCopyOnWrite & options) assert(0x33 == p2[0x2000]);
+ else assert(0x99 == p2[0x2000]);
+
+ IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
+ kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
+ nano);
+
+ map->release();
+ md->release();
+ bmd->release();
+
+ return (kIOReturnSuccess);
+}
+
+static int
+IOMemoryMapCopyOnWriteTest(int newValue)
+{
+ IOMemoryMapTest(0);
+ IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
+ return (0);
+}
+
int IOMemoryDescriptorTest(int newValue)
{
int result;
+ IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
+
#if 0
if (6 == newValue)
{
}
#endif
+ result = IOMemoryMapCopyOnWriteTest(newValue);
+ if (result) return (result);
+
result = IOMultMemoryDescriptorTest(newValue);
if (result) return (result);
result = IODirectionPrepareNoZeroFillTest(newValue);
if (result) return (result);
+ result = BadFixedAllocTest(newValue);
+ if (result) return (result);
+
IOGeneralMemoryDescriptor * md;
vm_offset_t data[2];
vm_size_t bsize = 16*1024*1024;
vm_size_t srcsize, srcoffset, mapoffset, size;
kern_return_t kr;
+ data[0] = data[1] = 0;
kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE);
+ assert(KERN_SUCCESS == kr);
+
vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
bzero(&ranges[0], sizeof(ranges));
ranges[0].address = data[0] + srcoffset;
ranges[0].length = srcsize;
+ ranges[1].address = ranges[2].address = data[0];
if (srcsize > ptoa(5))
{
assert(md);
IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
- (long) srcsize, (long) srcoffset,
+ (long) srcsize, (long) srcoffset,
(long long) ranges[0].address - data[0], (long long) ranges[0].length,
(long long) ranges[1].address - data[0], (long long) ranges[1].length,
(long long) ranges[2].address - data[0], (long long) ranges[2].length);
vm_deallocate(kernel_map, data[0], bsize);
// vm_deallocate(kernel_map, data[1], size);
+ IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
+
return (0);
}
}
reserved->disableSerialization = (!serializable);
}
+
+bool OSData::isSerializable(void)
+{
+ return (!reserved || !reserved->disableSerialization);
+}
goto finish;
}
+ IORecursiveLockLock(sKextLock);
if (!sAllKextLoadIdentifiers->containsObject(kextIdentifierSymbol)) {
if (!sAllKextLoadIdentifiers->setObject(kextIdentifierSymbol)) {
fail = true;
kextIdentifier->getCStringNoCopy());
}
}
+ IORecursiveLockUnlock(sKextLock);
+
finish:
if (fail) {
unsigned int newCapacity;
size_t alignSize;
- alignSize = ((size + 3) & ~3L);
- newCapacity = length + alignSize;
+ if (os_add_overflow(size, 3, &alignSize)) return (false);
+ alignSize &= ~3L;
+ if (os_add_overflow(length, alignSize, &newCapacity)) return (false);
if (newCapacity >= capacity)
{
newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement;
// add to tag array
tags->setObject(o);
- alignSize = ((size + sizeof(key) + 3) & ~3L);
- newCapacity = length + alignSize;
+ if (os_add3_overflow(size, sizeof(key), 3, &alignSize)) return (false);
+ alignSize &= ~3L;
+ if (os_add_overflow(length, alignSize, &newCapacity)) return (false);
if (newCapacity >= capacity)
{
newCapacity = (((newCapacity - 1) / capacityIncrement) + 1) * capacityIncrement;
OSObject ** stackArray;
uint32_t stackCapacity;
- enum { stackCapacityMax = 64*1024 };
+ enum { stackCapacityMax = 64 };
uint32_t stackIdx;
OSObject * result;
if (!ok) break;
+ if (end) parent = 0;
if (newCollect)
{
- if (!end)
- {
- stackIdx++;
- setAtIndex(stack, stackIdx, parent);
- if (!ok) break;
- }
+ stackIdx++;
+ setAtIndex(stack, stackIdx, parent);
+ if (!ok) break;
DEBG("++stack[%d] %p\n", stackIdx, parent);
parent = o;
dict = newDict;
if (end)
{
- if (!stackIdx) break;
- parent = stackArray[stackIdx];
- DEBG("--stack[%d] %p\n", stackIdx, parent);
- stackIdx--;
- set = 0;
+ while (stackIdx)
+ {
+ parent = stackArray[stackIdx];
+ DEBG("--stack[%d] %p\n", stackIdx, parent);
+ stackIdx--;
+ if (parent) break;
+ }
+ if (!parent) break;
+ set = 0;
dict = 0;
array = 0;
if (!(dict = OSDynamicCast(OSDictionary, parent)))
newSymb->OSString::free();
}
- oldSymb->retain(); // Retain the old symbol before releasing the lock.
+ if (oldSymb) oldSymb->retain(); // Retain the old symbol before releasing the lock.
pool->openGate();
return oldSymb;
/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
static const yytype_uint16 yyrline[] =
{
- 0, 149, 149, 152, 157, 162, 170, 178, 186, 194,
- 202, 210, 218, 237, 240, 243, 246, 247, 262, 271,
- 283, 286, 289, 292, 295, 298, 301, 304, 311, 314,
- 317, 320, 323
+ 0, 149, 149, 152, 157, 162, 174, 186, 198, 210,
+ 222, 234, 246, 265, 268, 271, 274, 275, 290, 299,
+ 311, 314, 317, 320, 323, 326, 329, 332, 339, 342,
+ 345, 348, 351
};
#endif
/* YYINITDEPTH -- initial size of the parser's stacks. */
#ifndef YYINITDEPTH
-# define YYINITDEPTH 200
+# define YYINITDEPTH 64
#endif
/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
#line 162 "OSUnserializeXML.y"
{ (yyval) = buildDictionary(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildDictionary");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 6:
-#line 170 "OSUnserializeXML.y"
+#line 174 "OSUnserializeXML.y"
{ (yyval) = buildArray(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildArray");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 7:
-#line 178 "OSUnserializeXML.y"
+#line 186 "OSUnserializeXML.y"
{ (yyval) = buildSet(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildSet");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 8:
-#line 186 "OSUnserializeXML.y"
+#line 198 "OSUnserializeXML.y"
{ (yyval) = buildString(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildString");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 9:
-#line 194 "OSUnserializeXML.y"
+#line 210 "OSUnserializeXML.y"
{ (yyval) = buildData(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildData");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 10:
-#line 202 "OSUnserializeXML.y"
+#line 222 "OSUnserializeXML.y"
{ (yyval) = buildNumber(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildNumber");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 11:
-#line 210 "OSUnserializeXML.y"
+#line 234 "OSUnserializeXML.y"
{ (yyval) = buildBoolean(STATE, (yyvsp[(1) - (1)]));
+ if (!yyval->object) {
+ yyerror("buildBoolean");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
break;
case 12:
-#line 218 "OSUnserializeXML.y"
+#line 246 "OSUnserializeXML.y"
{ (yyval) = retrieveObject(STATE, (yyvsp[(1) - (1)])->idref);
if ((yyval)) {
(yyval)->object->retain();
break;
case 13:
-#line 237 "OSUnserializeXML.y"
+#line 265 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (2)]);
(yyval)->elements = NULL;
;}
break;
case 14:
-#line 240 "OSUnserializeXML.y"
+#line 268 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (3)]);
(yyval)->elements = (yyvsp[(2) - (3)]);
;}
break;
case 17:
-#line 247 "OSUnserializeXML.y"
+#line 275 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(2) - (2)]);
(yyval)->next = (yyvsp[(1) - (2)]);
break;
case 18:
-#line 262 "OSUnserializeXML.y"
+#line 290 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (2)]);
(yyval)->key = (OSSymbol *)(yyval)->object;
(yyval)->object = (yyvsp[(2) - (2)])->object;
break;
case 19:
-#line 271 "OSUnserializeXML.y"
+#line 299 "OSUnserializeXML.y"
{ (yyval) = buildSymbol(STATE, (yyvsp[(1) - (1)]));
// STATE->parsedObjectCount++;
break;
case 20:
-#line 283 "OSUnserializeXML.y"
+#line 311 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (2)]);
(yyval)->elements = NULL;
;}
break;
case 21:
-#line 286 "OSUnserializeXML.y"
+#line 314 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (3)]);
(yyval)->elements = (yyvsp[(2) - (3)]);
;}
break;
case 23:
-#line 292 "OSUnserializeXML.y"
+#line 320 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (2)]);
(yyval)->elements = NULL;
;}
break;
case 24:
-#line 295 "OSUnserializeXML.y"
+#line 323 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (3)]);
(yyval)->elements = (yyvsp[(2) - (3)]);
;}
break;
case 26:
-#line 301 "OSUnserializeXML.y"
+#line 329 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(1) - (1)]);
(yyval)->next = NULL;
;}
break;
case 27:
-#line 304 "OSUnserializeXML.y"
+#line 332 "OSUnserializeXML.y"
{ (yyval) = (yyvsp[(2) - (2)]);
(yyval)->next = (yyvsp[(1) - (2)]);
;}
/* Line 1267 of yacc.c. */
-#line 1671 "OSUnserializeXML.tab.c"
+#line 1699 "OSUnserializeXML.tab.c"
default: break;
}
YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
}
-#line 326 "OSUnserializeXML.y"
+#line 354 "OSUnserializeXML.y"
int
object: dict { $$ = buildDictionary(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildDictionary");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
}
| array { $$ = buildArray(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildArray");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
}
| set { $$ = buildSet(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildSet");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
}
| string { $$ = buildString(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildString");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
}
| data { $$ = buildData(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildData");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
}
| number { $$ = buildNumber(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildNumber");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
}
| boolean { $$ = buildBoolean(STATE, $1);
+ if (!yyval->object) {
+ yyerror("buildBoolean");
+ YYERROR;
+ }
STATE->parsedObjectCount++;
if (STATE->parsedObjectCount > MAX_OBJECTS) {
yyerror("maximum object count");
if (c == '\n') state->lineNumber++;
if (c != '?') continue;
c = nextChar();
+ if (!c) return TAG_IGNORE;
if (c == '>') {
(void)nextChar();
return TAG_IGNORE;
values[*attributeCount][length++] = c;
if (length >= (TAG_MAX_LENGTH - 1)) return TAG_BAD;
c = nextChar();
+ if (!c) return TAG_BAD;
}
values[*attributeCount][length] = 0;
OSUnserializeXML(const char *buffer, OSString **errorString)
{
OSObject *object;
- parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t));
- if ((!state) || (!buffer)) return 0;
+ if (!buffer) return 0;
+ parser_state_t *state = (parser_state_t *)malloc(sizeof(parser_state_t));
+ if (!state) return 0;
// just in case
if (errorString) *errorString = NULL;
return object;
}
+#include <libkern/OSSerializeBinary.h>
+
OSObject*
OSUnserializeXML(const char *buffer, size_t bufferSize, OSString **errorString)
{
- if ((!buffer) || (!bufferSize)) return 0;
+ if (!buffer) return (0);
+ if (bufferSize < sizeof(kOSSerializeBinarySignature)) return (0);
+
+ if (!strcmp(kOSSerializeBinarySignature, buffer)) return OSUnserializeBinary(buffer, bufferSize, errorString);
// XML must be null terminated
- if (buffer[bufferSize - 1] || strnlen(buffer, bufferSize) == bufferSize) return 0;
+ if (buffer[bufferSize - 1]) return 0;
return OSUnserializeXML(buffer, errorString);
}
libkern/crypto/corecrypto_des.c optional crypto
libkern/crypto/corecrypto_aes.c optional crypto
libkern/crypto/corecrypto_aesxts.c optional crypto
+libkern/crypto/corecrypto_rand.c optional crypto
+libkern/crypto/corecrypto_rsa.c optional crypto
libkern/stack_protector.c standard
return aes_error;
}
- ccgcm_init(gcm, ctx, key_len, key);
- return aes_good;
+ return ccgcm_init(gcm, ctx, key_len, key);
+}
+
+aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv);
}
aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx)
return aes_error;
}
- ccgcm_set_iv(gcm, ctx, len, in_iv);
- return aes_good;
+ return ccgcm_set_iv(gcm, ctx, len, in_iv);
+}
+
+aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ return ccgcm_reset(gcm, ctx);
+}
+
+aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv);
}
aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx)
return aes_error;
}
- ccgcm_gmac(gcm, ctx, aad_bytes, aad);
- return aes_good;
+ return ccgcm_gmac(gcm, ctx, aad_bytes, aad);
}
aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes,
return aes_error;
}
- ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt.
- return aes_good;
+ return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt.
}
aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx)
{
+ int rc;
const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
if (!gcm) {
return aes_error;
}
- ccgcm_finalize(gcm, ctx, tag_bytes, tag);
- ccgcm_reset(gcm, ctx);
- return aes_good;
+ rc = ccgcm_finalize(gcm, ctx, tag_bytes, tag);
+ rc |= ccgcm_reset(gcm, ctx);
+ return rc;
}
aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx)
return aes_error;
}
- ccgcm_init(gcm, ctx, key_len, key);
- return aes_good;
+ return ccgcm_init(gcm, ctx, key_len, key);
+}
+
+aes_rval aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ return g_crypto_funcs->ccgcm_init_with_iv_fn(gcm, ctx, key_len, key, in_iv);
}
aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx)
{
+ int rc;
+
const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
if (!gcm) {
return aes_error;
}
- ccgcm_set_iv(gcm, ctx, len, in_iv);
- return aes_good;
+ rc = ccgcm_reset(gcm, ctx);
+ rc |= ccgcm_set_iv(gcm, ctx, len, in_iv);
+ return rc;
+}
+
+aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ return ccgcm_reset(gcm, ctx);
+}
+
+aes_rval aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ return g_crypto_funcs->ccgcm_inc_iv_fn(gcm, ctx, out_iv);
}
aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx)
return aes_error;
}
- ccgcm_gmac(gcm, ctx, aad_bytes, aad);
- return aes_good;
+ return ccgcm_gmac(gcm, ctx, aad_bytes, aad);
}
aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes,
return aes_error;
}
- ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt.
- return aes_good;
+ return ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt.
}
aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx)
{
+ int rc;
const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
if (!gcm) {
return aes_error;
}
- ccgcm_finalize(gcm, ctx, tag_bytes, tag);
- ccgcm_reset(gcm, ctx);
- return aes_good;
+ rc = ccgcm_finalize(gcm, ctx, tag_bytes, tag);
+ rc |= ccgcm_reset(gcm, ctx);
+ return rc;
}
unsigned aes_encrypt_get_ctx_size_gcm(void)
/* Triple DES ECB - used by ipv6 (esp_core.c) */
int des3_ecb_key_sched(des_cblock *key, des3_ecb_key_schedule *ks)
{
+ int rc;
const struct ccmode_ecb *enc = g_crypto_funcs->cctdes_ecb_encrypt;
const struct ccmode_ecb *dec = g_crypto_funcs->cctdes_ecb_decrypt;
if((enc->size>sizeof(ks->enc)) || (dec->size>sizeof(ks->dec)))
panic("%s: inconsistent size for 3DES-ECB context", __FUNCTION__);
- enc->init(enc, ks->enc, CCDES_KEY_SIZE*3, key);
- dec->init(dec, ks->dec, CCDES_KEY_SIZE*3, key);
+ rc = enc->init(enc, ks->enc, CCDES_KEY_SIZE*3, key);
+ rc |= dec->init(dec, ks->dec, CCDES_KEY_SIZE*3, key);
- /* The old DES interface could return -1 or -2 for weak keys and wrong parity,
- but this was disabled all the time, so we never fail here */
- return 0;
+ return rc;
}
/* Simple des - 1 block */
--- /dev/null
+/*
+ * Copyright (c) 2016 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <libkern/crypto/crypto_internal.h>
+#include <corecrypto/ccrng.h>
+#include <libkern/crypto/rand.h>
+
+int
+cc_rand_generate(void *out, size_t outlen)
+{
+ struct ccrng_state *rng_state = NULL;
+ int error = -1;
+
+ if (g_crypto_funcs) {
+ rng_state = g_crypto_funcs->ccrng_fn(&error);
+ if (rng_state != NULL) {
+ error = ccrng_generate(rng_state, outlen, out);
+ }
+ }
+
+ return error;
+}
--- /dev/null
+/*
+ * Copyright (c) 2016 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#include <libkern/crypto/crypto_internal.h>
+#include <libkern/crypto/rsa.h>
+#include <corecrypto/ccrsa.h>
+
+
+int rsa_make_pub(rsa_pub_ctx *pub,
+ size_t exp_nbytes, const uint8_t *exp,
+ size_t mod_nbytes, const uint8_t *mod) {
+ if ((exp_nbytes>RSA_MAX_KEY_BITSIZE/8)
+ || (mod_nbytes>RSA_MAX_KEY_BITSIZE/8)) {
+ return -1; // Too big
+ }
+ ccrsa_ctx_n(pub->key) = ccn_nof(RSA_MAX_KEY_BITSIZE);
+ return g_crypto_funcs->ccrsa_make_pub_fn(pub->key,
+ exp_nbytes, exp,
+ mod_nbytes, mod);
+}
+
+int rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid,
+ size_t digest_len, const uint8_t *digest,
+ size_t sig_len, const uint8_t *sig,
+ bool *valid) {
+ return g_crypto_funcs->ccrsa_verify_pkcs1v15_fn(pub->key,oid,
+ digest_len,digest,
+ sig_len,sig,valid);
+}
+
+
#endif
virtual void setDeallocFunction(DeallocFunction func);
OSMetaClassDeclareReservedUsed(OSData, 0);
+ bool isSerializable(void);
private:
OSMetaClassDeclareReservedUnused(OSData, 1);
DATAFILES = md5.h sha1.h
-PRIVATE_DATAFILES = register_crypto.h sha2.h des.h aes.h aesxts.h
+PRIVATE_DATAFILES = register_crypto.h sha2.h des.h aes.h aesxts.h rand.h rsa.h
INSTALL_KF_MI_LIST = ${DATAFILES}
unsigned char *out_blk, aes_decrypt_ctx cx[1]);
aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx);
+aes_rval aes_encrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx);
aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx);
+aes_rval aes_encrypt_reset_gcm(ccgcm_ctx *ctx);
+aes_rval aes_encrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx);
aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx);
aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx);
aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx);
unsigned aes_encrypt_get_ctx_size_gcm(void);
aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx);
+aes_rval aes_decrypt_key_with_iv_gcm(const unsigned char *key, int key_len, const unsigned char *in_iv, ccgcm_ctx *ctx);
aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx);
+aes_rval aes_decrypt_reset_gcm(ccgcm_ctx *ctx);
+aes_rval aes_decrypt_inc_iv_gcm(unsigned char *out_iv, ccgcm_ctx *ctx);
aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx);
aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx);
aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx);
--- /dev/null
+/*
+ * Copyright (c) 2016 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef _RAND_H
+#define _RAND_H
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+int cc_rand_generate(void *out, size_t outlen);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
#include <corecrypto/cchmac.h>
#include <corecrypto/ccmode.h>
#include <corecrypto/ccrc4.h>
+#include <corecrypto/ccrng.h>
+#include <corecrypto/ccrsa.h>
/* Function types */
const void *key, unsigned long data_len, const void *data,
unsigned char *mac);
+/* gcm */
+typedef int (*ccgcm_init_with_iv_fn_t)(const struct ccmode_gcm *mode, ccgcm_ctx *ctx,
+ size_t key_nbytes, const void *key,
+ const void *iv);
+typedef int (*ccgcm_inc_iv_fn_t)(const struct ccmode_gcm *mode, ccgcm_ctx *ctx, void *iv);
+
+
/* pbkdf2 */
typedef void (*ccpbkdf2_hmac_fn_t)(const struct ccdigest_info *di,
unsigned long passwordLen, const void *password,
typedef size_t (*ccpad_cts3_crypt_fn_t)(const struct ccmode_cbc *cbc, cccbc_ctx *cbc_key,
cccbc_iv *iv, size_t nbytes, const void *in, void *out);
+/* rng */
+typedef struct ccrng_state *(*ccrng_fn_t)(int *error);
+
+/* rsa */
+typedef int (*ccrsa_make_pub_fn_t)(ccrsa_pub_ctx_t pubk,
+ size_t exp_nbytes, const uint8_t *exp,
+ size_t mod_nbytes, const uint8_t *mod);
+
+typedef int (*ccrsa_verify_pkcs1v15_fn_t)(ccrsa_pub_ctx_t key, const uint8_t *oid,
+ size_t digest_len, const uint8_t *digest,
+ size_t sig_len, const uint8_t *sig,
+ bool *valid);
+
typedef struct crypto_functions {
/* digests common functions */
ccdigest_init_fn_t ccdigest_init_fn;
const struct ccmode_xts *ccaes_xts_decrypt;
const struct ccmode_gcm *ccaes_gcm_encrypt;
const struct ccmode_gcm *ccaes_gcm_decrypt;
+
+ ccgcm_init_with_iv_fn_t ccgcm_init_with_iv_fn;
+ ccgcm_inc_iv_fn_t ccgcm_inc_iv_fn;
+
/* DES, ecb and cbc */
const struct ccmode_ecb *ccdes_ecb_encrypt;
const struct ccmode_ecb *ccdes_ecb_decrypt;
ccpad_xts_decrypt_fn_t ccpad_xts_decrypt_fn;
/* CTS3 padding+encrypt functions */
ccpad_cts3_crypt_fn_t ccpad_cts3_encrypt_fn;
- ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn;
+ ccpad_cts3_crypt_fn_t ccpad_cts3_decrypt_fn;
+
+ /* rng */
+ ccrng_fn_t ccrng_fn;
+
+ /* rsa */
+ ccrsa_make_pub_fn_t ccrsa_make_pub_fn;
+ ccrsa_verify_pkcs1v15_fn_t ccrsa_verify_pkcs1v15_fn;
} *crypto_functions_t;
int register_crypto_functions(const crypto_functions_t funcs);
--- /dev/null
+/*
+ * Copyright (c) 2016 Apple Computer, Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
+ */
+
+#ifndef _RSA_H
+#define _RSA_H
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+
+#include <corecrypto/ccrsa.h>
+#define RSA_MAX_KEY_BITSIZE 4096
+
+typedef struct{
+ ccrsa_pub_ctx_decl(ccn_sizeof(RSA_MAX_KEY_BITSIZE),key);
+} rsa_pub_ctx;
+
+int rsa_make_pub(rsa_pub_ctx *pub,
+ size_t exp_nbytes, const uint8_t *exp,
+ size_t mod_nbytes, const uint8_t *mod);
+
+int rsa_verify_pkcs1v15(rsa_pub_ctx *pub, const uint8_t *oid,
+ size_t digest_len, const uint8_t *digest,
+ size_t sig_len, const uint8_t *sig,
+ bool *valid);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif
return count;
}
+int
+proc_setcpu_percentage(pid_t pid, int action, int percentage)
+{
+ proc_policy_cpuusage_attr_t attr;
+
+ bzero(&attr, sizeof(proc_policy_cpuusage_attr_t));
+ attr.ppattr_cpu_attr = action;
+ attr.ppattr_cpu_percentage = percentage;
+ if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_APPLY, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, (proc_policy_attribute_t*)&attr, pid, (uint64_t)0) != -1)
+ return(0);
+ else
+ return(errno);
+}
+
+int
+proc_clear_cpulimits(pid_t pid)
+{
+ if (__process_policy(PROC_POLICY_SCOPE_PROCESS, PROC_POLICY_ACTION_RESTORE, PROC_POLICY_RESOURCE_USAGE, PROC_POLICY_RUSAGE_CPU, NULL, pid, (uint64_t)0) != -1)
+ return(0);
+ else
+ return(errno);
+}
+
/* Donate importance to adaptive processes from this process */
__BEGIN_DECLS
+/* CPU monitor action */
+#define PROC_SETCPU_ACTION_NONE 0
+#define PROC_SETCPU_ACTION_THROTTLE 1
+
+int proc_setcpu_percentage(pid_t pid, int action, int percentage) __OSX_AVAILABLE_STARTING(__MAC_10_12_2, __IPHONE_5_0);
+int proc_clear_cpulimits(pid_t pid) __OSX_AVAILABLE_STARTING(__MAC_10_12_2, __IPHONE_5_0);
+
+/* CPU limits, applies to current thread only. 0% unsets limit */
+int proc_setthread_cpupercent(uint8_t percentage, uint32_t ms_refill) __OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_5_0);
+
/* resume the process suspend due to low VM resource */
int proc_clear_vmpressure(pid_t pid);
__abort_with_payload(reason_namespace, reason_code, payload, payload_size,
reason_string, reason_flags);
- /* If sending a SIGABRT failed, we try to fall back to SIGKILL */
+ /* If sending a SIGABRT failed, we fall back to SIGKILL */
terminate_with_payload(getpid(), reason_namespace, reason_code, payload, payload_size,
reason_string, reason_flags);
- /* Last resort, let's use SIGTRAP (SIGILL on i386) */
- sigemptyset(&unmask_signal);
- sigaddset(&unmask_signal, SIGTRAP);
- sigaddset(&unmask_signal, SIGILL);
- sigprocmask(SIG_UNBLOCK, &unmask_signal, NULL);
-
- __builtin_trap();
+ __builtin_unreachable();
}
void
-/*
- * ccsha1_eay.c
- * corecrypto
- *
- * Created on 12/06/2010
- *
- * Copyright (c) 2010,2011,2012,2015 Apple Inc. All rights reserved.
- *
- */
-
/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
* All rights reserved.
*
if (PC != 0)
kmod_panic_dump(&PC, 1);
- panic_display_system_configuration();
+ panic_display_system_configuration(FALSE);
doprnt_hide_pointers = old_doprnt_hide_pointers;
print_thread_num_that_crashed(task);
print_threads_registers(thread);
print_tasks_user_threads(task);
- kdb_printf("Mac OS version: %s\n", (osversion[0] != 0) ? osversion : "Not yet set");
- kdb_printf("Kernel version: %s\n", version);
- panic_display_kernel_uuid();
- panic_display_model_name();
+
+ panic_display_system_configuration(TRUE);
/* Release print backtrace lock, to permit other callers in the
* event of panics on multiple processors.
void
machine_thread_going_on_core(__unused thread_t new_thread,
__unused int urgency,
- __unused uint64_t sched_latency)
+ __unused uint64_t sched_latency,
+ __unused uint64_t dispatch_time)
{
}
void
-machine_thread_going_off_core(__unused thread_t old_thread, __unused boolean_t thread_terminating)
+machine_thread_going_off_core(__unused thread_t old_thread, __unused boolean_t thread_terminating, __unused uint64_t last_dispatch)
{
}
#if defined(__x86_64__)
#define PMAP_DEACTIVATE_MAP(map, thread, ccpu) \
-/*
pmap_assert2((pmap_pcid_ncpus ? (pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu) == (get_cr3_raw() & 0xFFF)) : TRUE),"PCIDs: 0x%x, active PCID: 0x%x, CR3: 0x%lx, pmap_cr3: 0x%llx, kernel_cr3: 0x%llx, kernel pmap cr3: 0x%llx, CPU active PCID: 0x%x, CPU kernel PCID: 0x%x, specflags: 0x%x, pagezero: 0x%x", pmap_pcid_ncpus, pcid_for_pmap_cpu_tuple(map->pmap, thread, ccpu), get_cr3_raw(), map->pmap->pm_cr3, cpu_datap(ccpu)->cpu_kernel_cr3, kernel_pmap->pm_cr3, cpu_datap(ccpu)->cpu_active_pcid, cpu_datap(ccpu)->cpu_kernel_pcid, thread->machine.specFlags, map->pmap->pagezero_accessible);
-*/
#else
#define PMAP_DEACTIVATE_MAP(map, thread)
#endif
(IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_INTERNAL))
#define IS_REUSABLE_PAGE(x) \
(IS_MANAGED_PAGE(x) && (pmap_phys_attributes[x] & PHYS_REUSABLE))
-#define IS_ALTACCT_PAGE(x) \
+#define IS_ALTACCT_PAGE(x,pve) \
(IS_MANAGED_PAGE((x)) && \
- (PVE_IS_ALTACCT_PAGE(&pv_head_table[(x)])))
+ (PVE_IS_ALTACCT_PAGE((pve))))
/*
* Physical page attributes. Copy bits from PTE definition.
uint32_t bitdex;
pmap_t pvpmap = pv_h->pmap;
vm_map_offset_t pvva = PVE_VA(pv_h);
- vm_map_offset_t pve_flags = PVE_FLAGS(pv_h);
+ vm_map_offset_t pve_flags;
boolean_t ppcd = FALSE;
boolean_t is_ept;
do {
if ((popcnt1((uintptr_t)pv_e->pmap ^ (uintptr_t)pmap) && PVE_VA(pv_e) == vaddr) ||
(pv_e->pmap == pmap && popcnt1(PVE_VA(pv_e) ^ vaddr))) {
+ pve_flags = PVE_FLAGS(pv_e);
pv_e->pmap = pmap;
- if (pv_e == pv_h) {
- pv_h->va_and_flags = vaddr | pve_flags;
- } else {
- pv_e->va_and_flags = vaddr;
- }
+ pv_h->va_and_flags = vaddr | pve_flags;
suppress_reason = PV_BITFLIP;
action = PMAP_ACTION_RETRY;
goto pmap_cpc_exit;
static inline __attribute__((always_inline)) pv_hashed_entry_t
pmap_pv_remove(pmap_t pmap,
vm_map_offset_t vaddr,
- ppnum_t *ppnp,
- pt_entry_t *pte)
+ ppnum_t *ppnp,
+ pt_entry_t *pte,
+ boolean_t *was_altacct)
{
pv_hashed_entry_t pvh_e;
pv_rooted_entry_t pv_h;
uint32_t pv_cnt;
ppnum_t ppn;
+ *was_altacct = FALSE;
pmap_pv_remove_retry:
ppn = *ppnp;
pvh_e = PV_HASHED_ENTRY_NULL;
}
if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) {
+ *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pv_h);
/*
* Header is the pv_rooted_entry.
* We can't free that. If there is a queued
*/
pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
if (pv_h != (pv_rooted_entry_t) pvh_e) {
- vm_map_offset_t pve_flags;
-
/*
* Entry queued to root, remove this from hash
* and install as new root.
pmap_pvh_unlink(pvh_e);
UNLOCK_PV_HASH(pvhash_idx);
pv_h->pmap = pvh_e->pmap;
- pve_flags = PVE_FLAGS(pv_h);
- pv_h->va_and_flags = PVE_VA(pvh_e) | pve_flags;
+ pv_h->va_and_flags = pvh_e->va_and_flags;
/* dispose of pvh_e */
} else {
/* none queued after rooted */
}
}
+ *was_altacct = IS_ALTACCT_PAGE(ppn_to_pai(*ppnp), pvh_e);
+
pmap_pv_hashlist_cnts += pv_cnt;
if (pmap_pv_hashlist_max < pv_cnt)
pmap_pv_hashlist_max = pv_cnt;
return pvh_e;
}
+static inline __attribute__((always_inline)) boolean_t
+pmap_pv_is_altacct(
+ pmap_t pmap,
+ vm_map_offset_t vaddr,
+ ppnum_t ppn)
+{
+ pv_hashed_entry_t pvh_e;
+ pv_rooted_entry_t pv_h;
+ int pvhash_idx;
+ boolean_t is_altacct;
+
+ pvh_e = PV_HASHED_ENTRY_NULL;
+ pv_h = pai_to_pvh(ppn_to_pai(ppn));
+
+ if (__improbable(pv_h->pmap == PMAP_NULL)) {
+ return FALSE;
+ }
+
+ if (PVE_VA(pv_h) == vaddr && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_rooted_entry.
+ */
+ return IS_ALTACCT_PAGE(ppn, pv_h);
+ }
+
+ CHK_NPVHASH();
+ pvhash_idx = pvhashidx(pmap, vaddr);
+ LOCK_PV_HASH(pvhash_idx);
+ pvh_e = *(pvhash(pvhash_idx));
+ if (PV_HASHED_ENTRY_NULL == pvh_e) {
+ panic("Possible memory corruption: pmap_pv_is_altacct(%p,0x%llx,0x%x): empty hash",
+ pmap, vaddr, ppn);
+ }
+ while (PV_HASHED_ENTRY_NULL != pvh_e) {
+ if (pvh_e->pmap == pmap &&
+ PVE_VA(pvh_e) == vaddr &&
+ pvh_e->ppn == ppn)
+ break;
+ pvh_e = pvh_e->nexth;
+ }
+ if (PV_HASHED_ENTRY_NULL == pvh_e) {
+ is_altacct = FALSE;
+ } else {
+ is_altacct = IS_ALTACCT_PAGE(ppn, pvh_e);
+ }
+ UNLOCK_PV_HASH(pvhash_idx);
+
+ return is_altacct;
+}
extern int pt_fake_zone_index;
static inline void
pt_entry_t old_pte;
kern_return_t kr_expand;
boolean_t is_ept;
+ boolean_t is_altacct;
pmap_intr_assert();
*/
if (old_pa != (pmap_paddr_t) 0) {
+ boolean_t was_altacct;
/*
* Don't do anything to pages outside valid memory here.
/* completely invalidate the PTE */
pmap_store_pte(pte, 0);
+ if (IS_MANAGED_PAGE(pai)) {
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ * We'll end up with either a rooted pv or a
+ * hashed pv
+ */
+ pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte, &was_altacct);
+ }
+
if (IS_MANAGED_PAGE(pai)) {
pmap_assert(old_pa_locked == TRUE);
pmap_ledger_debit(pmap, task_ledgers.phys_mem, PAGE_SIZE);
assert(pmap->stats.resident_count >= 1);
OSAddAtomic(-1, &pmap->stats.resident_count);
if (pmap != kernel_pmap) {
+ /* update pmap stats */
if (IS_REUSABLE_PAGE(pai)) {
PMAP_STATS_ASSERTF(
(pmap->stats.reusable > 0,
"internal %d",
pmap->stats.internal));
OSAddAtomic(-1, &pmap->stats.internal);
- pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
- if (IS_ALTACCT_PAGE(pai)) {
- pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
- } else {
- pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
- }
} else {
PMAP_STATS_ASSERTF(
(pmap->stats.external > 0,
pmap->stats.external));
OSAddAtomic(-1, &pmap->stats.external);
}
+
+ /* update ledgers */
+ if (was_altacct) {
+ assert(IS_INTERNAL_PAGE(pai));
+ pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+ pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
+ } else if (IS_REUSABLE_PAGE(pai)) {
+ assert(!was_altacct);
+ assert(IS_INTERNAL_PAGE(pai));
+ /* was already not in phys_footprint */
+ } else if (IS_INTERNAL_PAGE(pai)) {
+ assert(!was_altacct);
+ assert(!IS_REUSABLE_PAGE(pai));
+ pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+ pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+ } else {
+ /* not an internal page */
+ }
}
if (iswired(*pte)) {
assert(pmap->stats.wired_count >= 1);
pmap_phys_attributes[pai] |= ept_refmod_to_physmap(oattr);
}
- /*
- * Remove the mapping from the pvlist for
- * this physical page.
- * We'll end up with either a rooted pv or a
- * hashed pv
- */
- pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, &old_pte);
-
} else {
/*
}
if ((options & PMAP_OPTIONS_ALT_ACCT) &&
IS_INTERNAL_PAGE(pai)) {
- assert(!IS_REUSABLE_PAGE(pai));
pv_h->va_and_flags |= PVE_IS_ALTACCT;
+ is_altacct = TRUE;
} else {
pv_h->va_and_flags &= ~PVE_IS_ALTACCT;
+ is_altacct = FALSE;
}
} else {
/*
pvh_e->va_and_flags = vaddr;
pvh_e->pmap = pmap;
pvh_e->ppn = pn;
+ if ((options & PMAP_OPTIONS_ALT_ACCT) &&
+ IS_INTERNAL_PAGE(pai)) {
+ pvh_e->va_and_flags |= PVE_IS_ALTACCT;
+ is_altacct = TRUE;
+ } else {
+ pvh_e->va_and_flags &= ~PVE_IS_ALTACCT;
+ is_altacct = FALSE;
+ }
pv_hash_add(pvh_e, pv_h);
/*
pmap->stats.resident_max = pmap->stats.resident_count;
}
if (pmap != kernel_pmap) {
+ /* update pmap stats */
if (IS_REUSABLE_PAGE(pai)) {
OSAddAtomic(+1, &pmap->stats.reusable);
PMAP_STATS_PEAK(pmap->stats.reusable);
} else if (IS_INTERNAL_PAGE(pai)) {
OSAddAtomic(+1, &pmap->stats.internal);
PMAP_STATS_PEAK(pmap->stats.internal);
- pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
- if (IS_ALTACCT_PAGE(pai)) {
- pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
- } else {
- pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
- }
} else {
OSAddAtomic(+1, &pmap->stats.external);
PMAP_STATS_PEAK(pmap->stats.external);
}
+
+ /* update ledgers */
+ if (is_altacct) {
+ /* internal but also alternate accounting */
+ assert(IS_INTERNAL_PAGE(pai));
+ pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
+ pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
+ /* alternate accounting, so not in footprint */
+ } else if (IS_REUSABLE_PAGE(pai)) {
+ assert(!is_altacct);
+ assert(IS_INTERNAL_PAGE(pai));
+ /* internal but reusable: not in footprint */
+ } else if (IS_INTERNAL_PAGE(pai)) {
+ assert(!is_altacct);
+ assert(!IS_REUSABLE_PAGE(pai));
+ /* internal: add to footprint */
+ pmap_ledger_credit(pmap, task_ledgers.internal, PAGE_SIZE);
+ pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
+ } else {
+ /* not internal: not in footprint */
+ }
}
} else if (last_managed_page == 0) {
/* Account for early mappings created before "managed pages"
pv_hashed_entry_t pvh_e;
int pvh_cnt = 0;
int num_removed, num_unwired, num_found, num_invalid;
- int num_external, num_reusable;
- int num_internal, num_alt_internal;
- uint64_t num_compressed, num_alt_compressed;
+ int stats_external, stats_internal, stats_reusable;
+ uint64_t stats_compressed;
+ int ledgers_internal, ledgers_alt_internal;
+ uint64_t ledgers_compressed, ledgers_alt_compressed;
ppnum_t pai;
pmap_paddr_t pa;
vm_map_offset_t vaddr;
boolean_t is_ept = is_ept_pmap(pmap);
+ boolean_t was_altacct;
num_removed = 0;
num_unwired = 0;
num_found = 0;
num_invalid = 0;
- num_external = 0;
- num_internal = 0;
- num_reusable = 0;
- num_compressed = 0;
- num_alt_internal = 0;
- num_alt_compressed = 0;
+ stats_external = 0;
+ stats_internal = 0;
+ stats_reusable = 0;
+ stats_compressed = 0;
+ ledgers_internal = 0;
+ ledgers_compressed = 0;
+ ledgers_alt_internal = 0;
+ ledgers_alt_compressed = 0;
/* invalidate the PTEs first to "freeze" them */
for (cpte = spte, vaddr = start_vaddr;
cpte < epte;
pa = pte_to_pa(p);
if (pa == 0) {
- if (pmap != kernel_pmap &&
- (options & PMAP_OPTIONS_REMOVE) &&
+ if ((options & PMAP_OPTIONS_REMOVE) &&
(PTE_IS_COMPRESSED(p))) {
+ assert(pmap != kernel_pmap);
/* one less "compressed"... */
- num_compressed++;
+ stats_compressed++;
+ ledgers_compressed++;
if (p & PTE_COMPRESSED_ALT) {
/* ... but it used to be "ALTACCT" */
- num_alt_compressed++;
+ ledgers_alt_compressed++;
}
/* clear marker(s) */
/* XXX probably does not need to be atomic! */
* "compressed" marker after our first "freeze"
* loop above, so check again.
*/
- if (pmap != kernel_pmap &&
- (options & PMAP_OPTIONS_REMOVE) &&
+ if ((options & PMAP_OPTIONS_REMOVE) &&
(PTE_IS_COMPRESSED(*cpte))) {
+ assert(pmap != kernel_pmap);
/* one less "compressed"... */
- num_compressed++;
+ stats_compressed++;
+ ledgers_compressed++;
if (*cpte & PTE_COMPRESSED_ALT) {
/* ... but it used to be "ALTACCT" */
- num_alt_compressed++;
+ ledgers_alt_compressed++;
}
pmap_store_pte(cpte, 0);
}
UNLOCK_PVH(pai);
goto check_pte_for_compressed_marker;
}
+
+ /*
+ * Remove the mapping from the pvlist for this physical page.
+ */
+ pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte, &was_altacct);
+
num_removed++;
+ /* update pmap stats */
if (IS_REUSABLE_PAGE(pai)) {
- assert(!IS_ALTACCT_PAGE(pai));
- num_reusable++;
+ stats_reusable++;
} else if (IS_INTERNAL_PAGE(pai)) {
- num_internal++;
- if (IS_ALTACCT_PAGE(pai)) {
- num_alt_internal++;
- }
+ stats_internal++;
+ } else {
+ stats_external++;
+ }
+ /* update ledgers */
+ if (was_altacct) {
+ /* internal and alternate accounting */
+ assert(IS_INTERNAL_PAGE(pai));
+ ledgers_internal++;
+ ledgers_alt_internal++;
+ } else if (IS_REUSABLE_PAGE(pai)) {
+ /* internal but reusable */
+ assert(!was_altacct);
+ assert(IS_INTERNAL_PAGE(pai));
+ } else if (IS_INTERNAL_PAGE(pai)) {
+ /* internal */
+ assert(!was_altacct);
+ assert(!IS_REUSABLE_PAGE(pai));
+ ledgers_internal++;
} else {
- num_external++;
+ /* not internal */
}
/*
ept_refmod_to_physmap((*cpte & (INTEL_EPT_REF | INTEL_EPT_MOD))) & (PHYS_MODIFIED | PHYS_REFERENCED);
}
- /*
- * Remove the mapping from the pvlist for this physical page.
- */
- pvh_e = pmap_pv_remove(pmap, vaddr, (ppnum_t *) &pai, cpte);
-
/* completely invalidate the PTE */
pmap_store_pte(cpte, 0);
OSAddAtomic(-num_removed, &pmap->stats.resident_count);
if (pmap != kernel_pmap) {
- PMAP_STATS_ASSERTF((pmap->stats.external >= num_external,
- "pmap=%p num_external=%d stats.external=%d",
- pmap, num_external, pmap->stats.external));
- PMAP_STATS_ASSERTF((pmap->stats.internal >= num_internal,
- "pmap=%p num_internal=%d stats.internal=%d",
- pmap, num_internal, pmap->stats.internal));
- PMAP_STATS_ASSERTF((pmap->stats.reusable >= num_reusable,
- "pmap=%p num_reusable=%d stats.reusable=%d",
- pmap, num_reusable, pmap->stats.reusable));
- PMAP_STATS_ASSERTF((pmap->stats.compressed >= num_compressed,
- "pmap=%p num_compressed=%lld, stats.compressed=%lld",
- pmap, num_compressed, pmap->stats.compressed));
-
- if (num_external) {
- OSAddAtomic(-num_external, &pmap->stats.external);
+ PMAP_STATS_ASSERTF((pmap->stats.external >= stats_external,
+ "pmap=%p stats_external=%d stats.external=%d",
+ pmap, stats_external, pmap->stats.external));
+ PMAP_STATS_ASSERTF((pmap->stats.internal >= stats_internal,
+ "pmap=%p stats_internal=%d stats.internal=%d",
+ pmap, stats_internal, pmap->stats.internal));
+ PMAP_STATS_ASSERTF((pmap->stats.reusable >= stats_reusable,
+ "pmap=%p stats_reusable=%d stats.reusable=%d",
+ pmap, stats_reusable, pmap->stats.reusable));
+ PMAP_STATS_ASSERTF((pmap->stats.compressed >= stats_compressed,
+ "pmap=%p stats_compressed=%lld, stats.compressed=%lld",
+ pmap, stats_compressed, pmap->stats.compressed));
+
+ /* update pmap stats */
+ if (stats_external) {
+ OSAddAtomic(-stats_external, &pmap->stats.external);
}
- if (num_internal) {
- OSAddAtomic(-num_internal, &pmap->stats.internal);
+ if (stats_internal) {
+ OSAddAtomic(-stats_internal, &pmap->stats.internal);
+ }
+ if (stats_reusable)
+ OSAddAtomic(-stats_reusable, &pmap->stats.reusable);
+ if (stats_compressed)
+ OSAddAtomic64(-stats_compressed, &pmap->stats.compressed);
+ /* update ledgers */
+ if (ledgers_internal) {
pmap_ledger_debit(pmap,
task_ledgers.internal,
- machine_ptob(num_internal));
+ machine_ptob(ledgers_internal));
}
- if (num_alt_internal) {
+ if (ledgers_compressed) {
pmap_ledger_debit(pmap,
- task_ledgers.alternate_accounting,
- machine_ptob(num_alt_internal));
+ task_ledgers.internal_compressed,
+ machine_ptob(ledgers_compressed));
}
- if (num_alt_compressed) {
+ if (ledgers_alt_internal) {
pmap_ledger_debit(pmap,
- task_ledgers.alternate_accounting_compressed,
- machine_ptob(num_alt_compressed));
+ task_ledgers.alternate_accounting,
+ machine_ptob(ledgers_alt_internal));
}
- if (num_reusable)
- OSAddAtomic(-num_reusable, &pmap->stats.reusable);
- if (num_compressed) {
- OSAddAtomic64(-num_compressed, &pmap->stats.compressed);
+ if (ledgers_alt_compressed) {
pmap_ledger_debit(pmap,
- task_ledgers.internal_compressed,
- machine_ptob(num_compressed));
+ task_ledgers.alternate_accounting_compressed,
+ machine_ptob(ledgers_alt_compressed));
}
pmap_ledger_debit(pmap,
task_ledgers.phys_footprint,
- machine_ptob((num_internal -
- num_alt_internal) +
- (num_compressed -
- num_alt_compressed)));
+ machine_ptob((ledgers_internal -
+ ledgers_alt_internal) +
+ (ledgers_compressed -
+ ledgers_alt_compressed)));
}
#if TESTING
assert(!PTE_IS_COMPRESSED(*pte));
/* mark this PTE as having been "compressed" */
new_pte_value = PTE_COMPRESSED;
- if (IS_ALTACCT_PAGE(pai)) {
+ if (IS_ALTACCT_PAGE(pai, pv_e)) {
new_pte_value |= PTE_COMPRESSED_ALT;
}
} else {
options &= ~PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
options |= PMAP_OPTIONS_COMPRESSOR;
assert(new_pte_value == 0);
- new_pte_value = PTE_COMPRESSED;
- if (IS_ALTACCT_PAGE(pai)) {
- new_pte_value |= PTE_COMPRESSED_ALT;
+ if (pmap != kernel_pmap) {
+ new_pte_value = PTE_COMPRESSED;
+ if (IS_ALTACCT_PAGE(pai, pv_e)) {
+ new_pte_value |= PTE_COMPRESSED_ALT;
+ }
}
}
pmap_store_pte(pte, new_pte_value);
assert(IS_INTERNAL_PAGE(pai));
}
if (pmap != kernel_pmap) {
+ /* update pmap stats */
if (IS_REUSABLE_PAGE(pai)) {
assert(pmap->stats.reusable > 0);
OSAddAtomic(-1, &pmap->stats.reusable);
PMAP_STATS_PEAK(pmap->stats.compressed);
pmap->stats.compressed_lifetime++;
}
- if (IS_REUSABLE_PAGE(pai)) {
- assert(!IS_ALTACCT_PAGE(pai));
+
+ /* update ledgers */
+ if (IS_ALTACCT_PAGE(pai, pv_e)) {
+ assert(IS_INTERNAL_PAGE(pai));
+ pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
+ pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
+ if (options & PMAP_OPTIONS_COMPRESSOR) {
+ pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
+ pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE);
+ }
+ } else if (IS_REUSABLE_PAGE(pai)) {
+ assert(!IS_ALTACCT_PAGE(pai, pv_e));
+ assert(IS_INTERNAL_PAGE(pai));
if (options & PMAP_OPTIONS_COMPRESSOR) {
pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
/* was not in footprint, but is now */
pmap_ledger_credit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
}
} else if (IS_INTERNAL_PAGE(pai)) {
+ assert(!IS_ALTACCT_PAGE(pai, pv_e));
+ assert(!IS_REUSABLE_PAGE(pai));
pmap_ledger_debit(pmap, task_ledgers.internal, PAGE_SIZE);
/*
* Update all stats related to physical
* it mustn't affect total task
* footprint.
*/
- if (IS_ALTACCT_PAGE(pai)) {
- /*
- * We've already debited
- * internal, above.
- * Debit
- * alternate_accounting
- * here, which means the
- * net change on
- * phys_footprint is 0.
- */
- pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
- pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, PAGE_SIZE);
- }
pmap_ledger_credit(pmap, task_ledgers.internal_compressed, PAGE_SIZE);
} else {
/*
* so adjust stats to keep
* phys_footprint up to date.
*/
- if (IS_ALTACCT_PAGE(pai)) {
- /*
- * We've already debited
- * internal, above.
- * Debit
- * alternate_accounting
- * here, which means
- * the net change on
- * phys_footprint is 0.
- */
- pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, PAGE_SIZE);
- } else {
- pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
- }
+ pmap_ledger_debit(pmap, task_ledgers.phys_footprint, PAGE_SIZE);
}
}
}
pvh_e = (pv_hashed_entry_t) queue_next(&pv_h->qlink);
if (pvh_e != (pv_hashed_entry_t) pv_h) {
- vm_map_offset_t pve_flags;
-
pv_hash_remove(pvh_e);
pv_h->pmap = pvh_e->pmap;
- pve_flags = pv_h->va_and_flags & PAGE_MASK;
- pv_h->va_and_flags = PVE_VA(pvh_e) | pve_flags;
+ pv_h->va_and_flags = pvh_e->va_and_flags;
pvh_e->qlink.next = (queue_entry_t) pvh_eh;
pvh_eh = pvh_e;
int pai;
pmap_t pmap;
char attributes = 0;
- boolean_t is_internal, is_reusable, is_ept;
+ boolean_t is_internal, is_reusable, is_altacct, is_ept;
int ept_bits_to_clear;
boolean_t ept_keep_global_mod = FALSE;
pmap = pv_e->pmap;
is_ept = is_ept_pmap(pmap);
+ is_altacct = IS_ALTACCT_PAGE(pai, pv_e);
va = PVE_VA(pv_e);
pte_bits = 0;
OSAddAtomic(+1, &pmap->stats.internal);
PMAP_STATS_PEAK(pmap->stats.internal);
assert(pmap->stats.internal > 0);
- pmap_ledger_credit(pmap,
- task_ledgers.internal,
- PAGE_SIZE);
- if (IS_ALTACCT_PAGE(pai)) {
- /* no impact on footprint */
+ if (is_altacct) {
+ /* no impact on ledgers */
} else {
+ pmap_ledger_credit(pmap,
+ task_ledgers.internal,
+ PAGE_SIZE);
pmap_ledger_credit(
pmap,
task_ledgers.phys_footprint,
/* one less "internal" */
assert(pmap->stats.internal > 0);
OSAddAtomic(-1, &pmap->stats.internal);
- pmap_ledger_debit(pmap,
- task_ledgers.internal,
- PAGE_SIZE);
- if (IS_ALTACCT_PAGE(pai)) {
+ if (is_altacct) {
/* no impact on footprint */
} else {
+ pmap_ledger_debit(pmap,
+ task_ledgers.internal,
+ PAGE_SIZE);
pmap_ledger_debit(
pmap,
task_ledgers.phys_footprint,
disp |= PMAP_QUERY_PAGE_PRESENT;
pai = pa_index(pa);
if (!IS_MANAGED_PAGE(pai)) {
+ } else if (pmap_pv_is_altacct(pmap, va, pai)) {
+ assert(IS_INTERNAL_PAGE(pai));
+ disp |= PMAP_QUERY_PAGE_INTERNAL;
+ disp |= PMAP_QUERY_PAGE_ALTACCT;
} else if (IS_REUSABLE_PAGE(pai)) {
disp |= PMAP_QUERY_PAGE_REUSABLE;
} else if (IS_INTERNAL_PAGE(pai)) {
disp |= PMAP_QUERY_PAGE_INTERNAL;
- if (IS_ALTACCT_PAGE(pai)) {
- disp |= PMAP_QUERY_PAGE_ALTACCT;
- }
}
}
* Forward declarations
*/
static void user_page_fault_continue(kern_return_t kret);
-static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl);
+static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl, kern_return_t fault_result);
static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip);
volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
int type;
vm_map_t map = 0; /* protected by T_PAGE_FAULT */
kern_return_t result = KERN_FAILURE;
+ kern_return_t fault_result = KERN_SUCCESS;
thread_t thread;
ast_t *myast;
boolean_t intr;
if (code & T_PF_EXECUTE)
prot |= VM_PROT_EXECUTE;
- result = vm_fault(map,
+ fault_result = result = vm_fault(map,
vaddr,
prot,
FALSE,
#endif
}
pal_cli();
- panic_trap(saved_state, trap_pl);
+ panic_trap(saved_state, trap_pl, fault_result);
/*
* NO RETURN
*/
}
static void
-panic_trap(x86_saved_state64_t *regs, uint32_t pl)
+panic_trap(x86_saved_state64_t *regs, uint32_t pl, kern_return_t fault_result)
{
const char *trapname = "Unknown";
pal_cr_t cr0, cr2, cr3, cr4;
"R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
"R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
"RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n"
- "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d\n",
+ "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
regs->isf.rip, regs->isf.trapno, trapname,
cr0, cr2, cr3, cr4,
regs->rax, regs->rbx, regs->rcx, regs->rdx,
virtualized ? " VMM" : "",
potential_kernel_NX_fault ? " Kernel NX fault" : "",
potential_smep_fault ? " SMEP/User NX fault" : "",
- potential_smap_fault ? " SMAP fault" : "", pl);
+ potential_smap_fault ? " SMAP fault" : "",
+ pl,
+ fault_result);
/*
* This next statement is not executed,
* but it's needed to stop the compiler using tail call optimization
#include <ipc/flipc.h>
#endif
+#include <os/overflow.h>
+
#include <security/mac_mach_internal.h>
#include <device/device_server.h>
}
}
- /* the entry(s) might need to be deallocated */
+ /*
+ * The entries might need to be deallocated.
+ *
+ * Each entry should be deallocated only once,
+ * even if it was specified in more than one slot in the header.
+ * Note that dest can be the same entry as reply or voucher,
+ * but reply and voucher must be distinct entries.
+ */
assert(IE_NULL != dest_entry);
+ if (IE_NULL != reply_entry)
+ assert(reply_entry != voucher_entry);
+
if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
ipc_entry_dealloc(space, dest_name, dest_entry);
+
+ if (dest_entry == reply_entry) {
+ reply_entry = IE_NULL;
+ }
+
+ if (dest_entry == voucher_entry) {
+ voucher_entry = IE_NULL;
+ }
+
dest_entry = IE_NULL;
}
- if (dest_entry != reply_entry && IE_NULL != reply_entry &&
+ if (IE_NULL != reply_entry &&
IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
ipc_entry_dealloc(space, reply_name, reply_entry);
reply_entry = IE_NULL;
}
- if (dest_entry != voucher_entry && IE_NULL != voucher_entry &&
+ if (IE_NULL != voucher_entry &&
IE_BITS_TYPE(voucher_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
ipc_entry_dealloc(space, voucher_name, voucher_entry);
voucher_entry = IE_NULL;
result_disp = ipc_object_copyin_type(user_disp);
dsc->disposition = result_disp;
- if (count > (INT_MAX / sizeof(mach_port_t))) {
- *mr = MACH_SEND_TOO_LARGE;
+ /* We always do a 'physical copy', but you have to specify something valid */
+ if (copy_option != MACH_MSG_PHYSICAL_COPY &&
+ copy_option != MACH_MSG_VIRTUAL_COPY) {
+ *mr = MACH_SEND_INVALID_TYPE;
return NULL;
}
/* calculate length of data in bytes, rounding up */
- ports_length = count * sizeof(mach_port_t);
- names_length = count * sizeof(mach_port_name_t);
+
+ if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) {
+ *mr = MACH_SEND_TOO_LARGE;
+ return NULL;
+ }
+
+ if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) {
+ *mr = MACH_SEND_TOO_LARGE;
+ return NULL;
+ }
if (ports_length == 0) {
return user_dsc;
vm_size_t descriptor_size = 0;
+ mach_msg_type_number_t total_ool_port_count = 0;
+
/*
* Determine if the target is a kernel port.
*/
daddr = NULL;
for (i = 0; i < dsc_count; i++) {
mach_msg_size_t size;
+ mach_msg_type_number_t ool_port_count = 0;
daddr = naddr;
if (naddr > (mach_msg_descriptor_t *)
((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size)) {
- ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
- mr = MACH_SEND_MSG_TOO_SMALL;
- goto out;
+ mr = MACH_SEND_MSG_TOO_SMALL;
+ goto clean_message;
}
switch (daddr->type.type) {
/*
* Invalid copy option
*/
- ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
mr = MACH_SEND_INVALID_TYPE;
- goto out;
+ goto clean_message;
}
-
+
if ((size >= MSG_OOL_SIZE_SMALL) &&
(daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) &&
!(daddr->out_of_line.deallocate)) {
* memory requirements
*/
if (space_needed + round_page(size) <= space_needed) {
- /* Overflow dectected */
- ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
- mr = MACH_MSG_VM_KERNEL;
- goto out;
- }
-
+ /* Overflow dectected */
+ mr = MACH_MSG_VM_KERNEL;
+ goto clean_message;
+ }
+
space_needed += round_page(size);
if (space_needed > ipc_kmsg_max_vm_space) {
-
- /*
- * Per message kernel memory limit exceeded
- */
- ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
+ /* Per message kernel memory limit exceeded */
mr = MACH_MSG_VM_KERNEL;
- goto out;
+ goto clean_message;
}
}
+ break;
+ case MACH_MSG_PORT_DESCRIPTOR:
+ if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
+ /* Overflow detected */
+ mr = MACH_SEND_TOO_LARGE;
+ goto clean_message;
+ }
+ break;
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR:
+ ool_port_count = (is_task_64bit) ?
+ ((mach_msg_ool_ports_descriptor64_t *)daddr)->count :
+ daddr->ool_ports.count;
+
+ if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) {
+ /* Overflow detected */
+ mr = MACH_SEND_TOO_LARGE;
+ goto clean_message;
+ }
+
+ if (ool_port_count > (ipc_kmsg_max_vm_space/sizeof(mach_port_t))) {
+ /* Per message kernel memory limit exceeded */
+ mr = MACH_SEND_TOO_LARGE;
+ goto clean_message;
+ }
+ break;
}
}
+ /* Sending more than 16383 rights in one message seems crazy */
+ if (total_ool_port_count >= (MACH_PORT_UREFS_MAX / 4)) {
+ mr = MACH_SEND_TOO_LARGE;
+ goto clean_message;
+ }
+
/*
* Allocate space in the pageable kernel ipc copy map for all the
* ool data that is to be physically copied. Map is marked wait for
if (space_needed) {
if (vm_allocate(ipc_kernel_copy_map, &paddr, space_needed,
VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)) != KERN_SUCCESS) {
- ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
mr = MACH_MSG_VM_KERNEL;
- goto out;
+ goto clean_message;
}
}
}
out:
return mr;
+
+clean_message:
+ /* no descriptors have been copied in yet */
+ ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
+ return mr;
}
#define _VOLATILE_ volatile
/* Sanity check the ref count. If it is 0, we may be doubly zfreeing.
- * If it is larger than max int, it has been corrupted, probably by being
- * modified into an address (this is architecture dependent, but it's
- * safe to assume there cannot really be max int references).
+ * If it is larger than max int, it has been corrupted or leaked,
+ * probably by being modified into an address (this is architecture
+ * dependent, but it's safe to assume there cannot really be max int
+ * references unless some code is leaking the io_reference without leaking
+ * object). Saturate the io_reference on release kernel if it reaches
+ * max int to avoid use after free.
*
* NOTE: The 0 test alone will not catch double zfreeing of ipc_port
* structs, because the io_references field is the first word of the struct,
static inline void
io_reference(ipc_object_t io) {
+ ipc_object_refs_t new_io_references;
+ ipc_object_refs_t old_io_references;
+
assert((io)->io_references > 0 &&
(io)->io_references < IO_MAX_REFERENCES);
- OSIncrementAtomic(&((io)->io_references));
+
+ do {
+ old_io_references = (io)->io_references;
+ new_io_references = old_io_references + 1;
+ if (old_io_references == IO_MAX_REFERENCES) {
+ break;
+ }
+ } while (OSCompareAndSwap(old_io_references, new_io_references,
+ &((io)->io_references)) == FALSE);
}
static inline void
io_release(ipc_object_t io) {
+ ipc_object_refs_t new_io_references;
+ ipc_object_refs_t old_io_references;
+
assert((io)->io_references > 0 &&
(io)->io_references < IO_MAX_REFERENCES);
+
+ do {
+ old_io_references = (io)->io_references;
+ new_io_references = old_io_references - 1;
+ if (old_io_references == IO_MAX_REFERENCES) {
+ break;
+ }
+ } while (OSCompareAndSwap(old_io_references, new_io_references,
+ &((io)->io_references)) == FALSE);
+
/* If we just removed the last reference count */
- if ( 1 == OSDecrementAtomic(&((io)->io_references))) {
+ if (1 == old_io_references) {
/* Free the object */
io_free(io_otype((io)), (io));
}
* KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
* KERN_INVALID_ARGUMENT Name denotes dead name, but
* immediate is FALSE or notify is IP_NULL.
- * KERN_UREFS_OVERFLOW Name denotes dead name, but
- * generating immediate notif. would overflow urefs.
* KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
*/
assert(urefs > 0);
- if (MACH_PORT_UREFS_OVERFLOW(urefs, 1)) {
- is_write_unlock(space);
- if (port != IP_NULL)
- ip_release(port);
- return KERN_UREFS_OVERFLOW;
- }
+ /* leave urefs pegged to maximum if it overflowed */
+ if (urefs < MACH_PORT_UREFS_MAX)
+ (entry->ie_bits)++; /* increment urefs */
- (entry->ie_bits)++; /* increment urefs */
ipc_entry_modified(space, name, entry);
+
is_write_unlock(space);
if (port != IP_NULL)
*/
if (entry->ie_request != IE_REQ_NONE) {
if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
- assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
- bits++;
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ bits++; /* increment urefs */
}
entry->ie_request = IE_REQ_NONE;
}
if (IE_BITS_UREFS(bits) == 1) {
ipc_entry_dealloc(space, name, entry);
} else {
- entry->ie_bits = bits-1; /* decrement urefs */
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits-1; /* decrement urefs */
ipc_entry_modified(space, name, entry);
}
is_write_unlock(space);
ip_release(port);
} else {
- ip_unlock(port);
- entry->ie_bits = bits-1; /* decrement urefs */
+ ip_unlock(port);
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits-1; /* decrement urefs */
ipc_entry_modified(space, name, entry);
is_write_unlock(space);
}
-
if (nsrequest != IP_NULL)
ipc_notify_no_senders(nsrequest, mscount);
entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK |
MACH_PORT_TYPE_SEND);
- } else
- entry->ie_bits = bits-1; /* decrement urefs */
-
+ } else {
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
+ entry->ie_bits = bits-1; /* decrement urefs */
+ }
+ }
ip_unlock(port);
ipc_entry_modified(space, name, entry);
* KERN_SUCCESS Count was modified.
* KERN_INVALID_RIGHT Entry has wrong type.
* KERN_INVALID_VALUE Bad delta for the right.
- * KERN_UREFS_OVERFLOW OK delta, except would overflow.
*/
kern_return_t
assert(IE_BITS_TYPE(bits) ==
MACH_PORT_TYPE_SEND_RECEIVE);
assert(IE_BITS_UREFS(bits) > 0);
- assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
assert(port->ip_srights > 0);
if (port->ip_pdrequest != NULL) {
bits |= MACH_PORT_TYPE_DEAD_NAME;
if (entry->ie_request) {
entry->ie_request = IE_REQ_NONE;
- bits++;
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ bits++; /* increment urefs */
}
entry->ie_bits = bits;
entry->ie_object = IO_NULL;
bits = entry->ie_bits;
relport = port;
port = IP_NULL;
- } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0)
+ } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
goto invalid_right;
+ }
assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
assert(IE_BITS_UREFS(bits) > 0);
assert(entry->ie_object == IO_NULL);
assert(entry->ie_request == IE_REQ_NONE);
- urefs = IE_BITS_UREFS(bits);
- if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
+ delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
goto invalid_value;
- if (MACH_PORT_UREFS_OVERFLOW(urefs, delta))
- goto urefs_overflow;
+ }
+
+ urefs = IE_BITS_UREFS(bits);
+
+ if (urefs == MACH_PORT_UREFS_MAX) {
+ /*
+ * urefs are pegged due to an overflow
+ * only a delta removing all refs at once can change it
+ */
+
+ if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX)))
+ delta = 0;
+ } else {
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
+ /* leave urefs pegged to maximum if it overflowed */
+ delta = MACH_PORT_UREFS_MAX - urefs;
+ }
+ }
if ((urefs + delta) == 0) {
ipc_entry_dealloc(space, name, entry);
- } else {
+ } else if (delta != 0) {
entry->ie_bits = bits + delta;
ipc_entry_modified(space, name, entry);
}
+
is_write_unlock(space);
if (relport != IP_NULL)
if ((bits & MACH_PORT_TYPE_SEND) == 0)
goto invalid_right;
- /* maximum urefs for send is MACH_PORT_UREFS_MAX-1 */
+ /* maximum urefs for send is MACH_PORT_UREFS_MAX */
port = (ipc_port_t) entry->ie_object;
assert(port != IP_NULL);
assert(port->ip_srights > 0);
- urefs = IE_BITS_UREFS(bits);
- if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
+ if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
+ delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
ip_unlock(port);
goto invalid_value;
}
- if (MACH_PORT_UREFS_OVERFLOW(urefs+1, delta)) {
- ip_unlock(port);
- goto urefs_overflow;
+
+ urefs = IE_BITS_UREFS(bits);
+
+ if (urefs == MACH_PORT_UREFS_MAX) {
+ /*
+ * urefs are pegged due to an overflow
+ * only a delta removing all refs at once can change it
+ */
+
+ if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX)))
+ delta = 0;
+ } else {
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
+ ip_unlock(port);
+ goto invalid_value;
+ }
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
+ /* leave urefs pegged to maximum if it overflowed */
+ delta = MACH_PORT_UREFS_MAX - urefs;
+ }
}
if ((urefs + delta) == 0) {
if (bits & MACH_PORT_TYPE_RECEIVE) {
assert(port->ip_receiver_name == name);
assert(port->ip_receiver == space);
- ip_unlock(port);
+ ip_unlock(port);
assert(IE_BITS_TYPE(bits) ==
MACH_PORT_TYPE_SEND_RECEIVE);
entry->ie_object = IO_NULL;
ipc_entry_dealloc(space, name, entry);
}
- } else {
+ } else if (delta != 0) {
ip_unlock(port);
entry->ie_bits = bits + delta;
ipc_entry_modified(space, name, entry);
+ } else {
+ ip_unlock(port);
}
is_write_unlock(space);
is_write_unlock(space);
return KERN_INVALID_VALUE;
- urefs_overflow:
- is_write_unlock(space);
- return KERN_UREFS_OVERFLOW;
-
guard_failure:
- return KERN_INVALID_RIGHT;
+ return KERN_INVALID_RIGHT;
}
/*
port = (ipc_port_t) entry->ie_object;
assert(port != IP_NULL);
-
+
ip_lock(port);
assert(ip_active(port));
assert(port->ip_receiver_name == name);
*/
if (srdelta) {
-
+
assert(port->ip_srights > 0);
urefs = IE_BITS_UREFS(bits);
+
/*
* Since we made sure that srdelta is negative,
* the check for urefs overflow is not required.
ip_unlock(port);
goto invalid_value;
}
+
+ if (urefs == MACH_PORT_UREFS_MAX) {
+ /*
+ * urefs are pegged due to an overflow
+ * only a delta removing all refs at once can change it
+ */
+ if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX)))
+ srdelta = 0;
+ }
+
if ((urefs + srdelta) == 0) {
if (--port->ip_srights == 0) {
nsrequest = port->ip_nsrequest;
bits = entry->ie_bits;
if (bits & MACH_PORT_TYPE_SEND) {
assert(IE_BITS_UREFS(bits) > 0);
- assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+ assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
if (port->ip_pdrequest != NULL) {
/*
bits |= MACH_PORT_TYPE_DEAD_NAME;
if (entry->ie_request) {
entry->ie_request = IE_REQ_NONE;
- bits++;
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ bits++; /* increment urefs */
}
entry->ie_bits = bits;
entry->ie_object = IO_NULL;
ipc_hash_delete(space, (ipc_object_t) port,
name, entry);
entry->ie_object = IO_NULL;
+ /* transfer entry's reference to caller */
}
entry->ie_bits = bits &~
(IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
} else {
port->ip_srights++;
ip_reference(port);
- entry->ie_bits = bits-1; /* decrement urefs */
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits-1; /* decrement urefs */
}
+
ipc_entry_modified(space, name, entry);
ip_unlock(port);
if (IE_BITS_UREFS(bits) == 1) {
bits &= ~MACH_PORT_TYPE_DEAD_NAME;
}
- entry->ie_bits = bits-1; /* decrement urefs */
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits-1; /* decrement urefs */
+
ipc_entry_modified(space, name, entry);
*objectp = IO_DEAD;
*sorightp = IP_NULL;
assert(IE_BITS_UREFS(bits) > 0);
if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
- assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
- entry->ie_bits = bits+1; /* increment urefs */
+ assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits+1; /* increment urefs */
}
} else {
assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
assert(IE_BITS_UREFS(bits) > 0);
if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
- assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX-1);
- entry->ie_bits = bits+1; /* increment urefs */
+ assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits+1; /* increment urefs */
}
/*
port->ip_srights += 2;
ip_reference(port);
ip_reference(port);
- entry->ie_bits = bits-2; /* decrement urefs */
+ /* if urefs are pegged due to overflow, leave them pegged */
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits-2; /* decrement urefs */
}
ipc_entry_modified(space, name, entry);
* The object is unlocked; the space isn't.
* Returns:
* KERN_SUCCESS Copied out capability.
- * KERN_UREFS_OVERFLOW User-refs would overflow;
- * guaranteed not to happen with a fresh entry
- * or if overflow=TRUE was specified.
*/
kern_return_t
mach_port_name_t name,
ipc_entry_t entry,
mach_msg_type_name_t msgt_name,
- boolean_t overflow,
+ __unused boolean_t overflow,
ipc_object_t object)
{
ipc_entry_bits_t bits;
switch (msgt_name) {
case MACH_MSG_TYPE_PORT_SEND_ONCE:
-
+
assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
assert(port->ip_sorights > 0);
/* transfer send-once right and ref to entry */
ip_unlock(port);
- entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
ipc_entry_modified(space, name, entry);
break;
assert(port->ip_srights > 1);
assert(urefs > 0);
- assert(urefs < MACH_PORT_UREFS_MAX);
-
- if (urefs+1 == MACH_PORT_UREFS_MAX) {
- if (overflow) {
- /* leave urefs pegged to maximum */
+ assert(urefs <= MACH_PORT_UREFS_MAX);
- port->ip_srights--;
- ip_unlock(port);
- ip_release(port);
- return KERN_SUCCESS;
- }
+ if (urefs == MACH_PORT_UREFS_MAX) {
+ /*
+ * leave urefs pegged to maximum,
+ * consume send right and ref
+ */
+ port->ip_srights--;
ip_unlock(port);
- return KERN_UREFS_OVERFLOW;
+ ip_release(port);
+ return KERN_SUCCESS;
}
+
+ /* consume send right and ref */
port->ip_srights--;
ip_unlock(port);
ip_release(port);
-
+
} else if (bits & MACH_PORT_TYPE_RECEIVE) {
assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
assert(IE_BITS_UREFS(bits) == 0);
- /* transfer send right to entry */
+ /* transfer send right to entry, consume ref */
ip_unlock(port);
ip_release(port);
-
+
} else {
assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
assert(IE_BITS_UREFS(bits) == 0);
name, entry);
}
- entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
ipc_entry_modified(space, name, entry);
break;
disp = ipc_object_copyin_type(args->polyPoly);
rv = mach_port_insert_right(task->itk_space, args->name, port, disp);
+ if (rv != KERN_SUCCESS) {
+ if (IO_VALID((ipc_object_t)port)) {
+ ipc_object_destroy((ipc_object_t)port, disp);
+ }
+ }
done:
if (task)
#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \
(((delta) > 0) && \
((((urefs) + (delta)) <= (urefs)) || \
- (((urefs) + (delta)) > MACH_PORT_UREFS_MAX)))
+ (((urefs) + (delta)) >= MACH_PORT_UREFS_MAX)))
#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \
(((delta) < 0) && (((mach_port_urefs_t)-(delta)) > (urefs)))
struct kdp_in_addr ipaddr;
struct kdp_ether_addr macaddr;
+ boolean_t kdp_match_name_found = PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname));
+ boolean_t kdp_not_serial = kdp_match_name_found ? (strncmp(kdpname, "serial", sizeof(kdpname))) : TRUE;
+
// serial must be explicitly requested
- if(!PE_parse_boot_argn("kdp_match_name", kdpname, sizeof(kdpname)) || strncmp(kdpname, "serial", sizeof(kdpname)) != 0)
+ if(!kdp_match_name_found || kdp_not_serial)
return;
#if WITH_CONSISTENT_DBG
- if (PE_consistent_debug_enabled() && debug_boot_arg) {
+ if (kdp_not_serial && PE_consistent_debug_enabled() && debug_boot_arg) {
current_debugger = HW_SHM_CUR_DB;
return;
} else {
- printf("Consistent debug disabled or debug boot arg not present, falling through to serial for debugger\n");
+ printf("Serial requested, consistent debug disabled or debug boot arg not present, configuring debugging over serial\n");
}
#endif /* WITH_CONSISTENT_DBG */
nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
- if (thread->machine.iss == NULL) {
- // no register states to backtrace, probably thread is terminating
- return 0;
- }
-
if (user_p) {
x86_saved_state32_t *iss32;
vm_offset_t kern_virt_addr = 0;
vm_map_t bt_vm_map = VM_MAP_NULL;
- if (thread->machine.iss == NULL) {
- // no register states to backtrace, probably thread is terminating
- return 0;
- }
-
nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
if (user_p) {
/*
* Swap in a new map for the task/thread pair; the old map reference is
- * returned.
+ * returned. Also does a pmap switch if thread provided is current thread.
*/
vm_map_t
-swap_task_map(task_t task, thread_t thread, vm_map_t map, boolean_t doswitch)
+swap_task_map(task_t task, thread_t thread, vm_map_t map)
{
vm_map_t old_map;
+ boolean_t doswitch = (thread == current_thread()) ? TRUE : FALSE;
if (task != thread->task)
panic("swap_task_map");
static char model_name[64];
unsigned char *kernel_uuid;
/* uuid_string_t */ char kernel_uuid_string[37];
+char panic_disk_error_description[512];
+size_t panic_disk_error_description_size = sizeof(panic_disk_error_description);
static spl_t panic_prologue(const char *str);
static void panic_epilogue(spl_t s);
kdb_printf("\nSystem uptime in nanoseconds: %llu\n", uptime);
}
+static void panic_display_disk_errors(void) {
+
+ if (panic_disk_error_description[0]) {
+ panic_disk_error_description[sizeof(panic_disk_error_description) - 1] = '\0';
+ kdb_printf("Root disk errors: \"%s\"\n", panic_disk_error_description);
+ }
+};
+
extern const char version[];
extern char osversion[];
static volatile uint32_t config_displayed = 0;
-__private_extern__ void panic_display_system_configuration(void) {
+__private_extern__ void panic_display_system_configuration(boolean_t launchd_exit) {
- panic_display_process_name();
+ if (!launchd_exit) panic_display_process_name();
if (OSCompareAndSwap(0, 1, &config_displayed)) {
char buf[256];
- if (strlcpy(buf, PE_boot_args(), sizeof(buf)))
+ if (!launchd_exit && strlcpy(buf, PE_boot_args(), sizeof(buf)))
kdb_printf("Boot args: %s\n", buf);
kdb_printf("\nMac OS version:\n%s\n",
(osversion[0] != 0) ? osversion : "Not yet set");
kdb_printf("\nKernel version:\n%s\n",version);
panic_display_kernel_uuid();
- panic_display_kernel_aslr();
- panic_display_hibb();
- panic_display_pal_info();
+ if (!launchd_exit) {
+ panic_display_kernel_aslr();
+ panic_display_hibb();
+ panic_display_pal_info();
+ }
panic_display_model_name();
- panic_display_uptime();
- panic_display_zprint();
+ panic_display_disk_errors();
+ if (!launchd_exit) {
+ panic_display_uptime();
+ panic_display_zprint();
#if CONFIG_ZLEAKS
- panic_display_ztrace();
+ panic_display_ztrace();
#endif /* CONFIG_ZLEAKS */
- kext_dump_panic_lists(&kdb_log);
+ kext_dump_panic_lists(&kdb_log);
+ }
}
}
extern unsigned int debug_boot_arg;
extern unsigned char *kernel_uuid;
extern char kernel_uuid_string[];
+extern char panic_disk_error_description[];
+extern size_t panic_disk_error_description_size;
#ifdef MACH_KERNEL_PRIVATE
int packA(char *inbuf, uint32_t length, uint32_t buflen);
void unpackA(char *inbuf, uint32_t length);
-void panic_display_system_configuration(void);
+void panic_display_system_configuration(boolean_t launchd_exit);
void panic_display_zprint(void);
void panic_display_kernel_aslr(void);
void panic_display_hibb(void);
}
}
- /*
- * We only support the KDP fault path and delta snapshots and tailspin mode with the kcdata format
- */
- if (!(flags & STACKSHOT_KCDATA_FORMAT)) {
+ if (!((flags & STACKSHOT_KCDATA_FORMAT) || (flags & STACKSHOT_RETRIEVE_EXISTING_BUFFER))) {
return KERN_NOT_SUPPORTED;
}
/*
- * If we're not saving the buffer in the kernel pointer, we need places to copy into.
+ * If we're not saving the buffer in the kernel pointer, we need a place to copy into.
*/
if ((!out_buffer_addr || !out_size_addr) && !(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
return KERN_INVALID_ARGUMENT;
extern void machine_thread_going_on_core(thread_t new_thread,
int urgency,
- uint64_t sched_latency);
+ uint64_t sched_latency,
+ uint64_t dispatch_time);
-extern void machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating);
+extern void machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch);
extern void machine_max_runnable_latency(uint64_t bg_max_latency,
uint64_t default_max_latency,
extern void task_set_main_thread_qos(task_t task, thread_t main_thread);
extern void proc_set_task_spawnpolicy(task_t task, int apptype, int qos_clamp, int role,
ipc_port_t * portwatch_ports, int portwatch_count);
+extern void proc_inherit_task_role(task_t new_task, task_t old_task);
/* IO Throttle tiers */
#define THROTTLE_LEVEL_NONE -1
/* Tell platform layer that we are still running this thread */
urgency = thread_get_urgency(thread, &ignore1, &ignore2);
- machine_thread_going_on_core(thread, urgency, 0);
+ machine_thread_going_on_core(thread, urgency, 0, 0);
/*
* This quantum is up, give this thread another.
*/
thread->last_made_runnable_time = mach_approximate_time();
- machine_thread_going_off_core(thread, FALSE);
+ machine_thread_going_off_core(thread, FALSE, processor->last_dispatch);
if (thread->reason & AST_QUANTUM)
thread_setrun(thread, SCHED_TAILQ);
}
#endif
- machine_thread_going_off_core(thread, should_terminate);
+ machine_thread_going_off_core(thread, should_terminate, processor->last_dispatch);
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
MACHDBG_CODE(DBG_MACH_SCHED,MACH_DISPATCH) | DBG_FUNC_NONE,
thread_tell_urgency(urgency, arg1, arg2, latency, self);
- machine_thread_going_on_core(self, urgency, latency);
+ machine_thread_going_on_core(self, urgency, latency, processor->last_dispatch);
/*
* Get a new quantum if none remaining.
processor->first_timeslice = FALSE;
thread_tell_urgency(THREAD_URGENCY_NONE, 0, 0, 0, self);
- machine_thread_going_on_core(self, THREAD_URGENCY_NONE, 0);
+ machine_thread_going_on_core(self, THREAD_URGENCY_NONE, 0, processor->last_dispatch);
}
self->computation_epoch = processor->last_dispatch;
if (processor->state == PROCESSOR_SHUTDOWN &&
thread->sched_pri >= processor->current_pri ) {
ipi_action = eInterruptRunning;
- } else if ( processor->state == PROCESSOR_IDLE &&
- processor != current_processor() ) {
+ } else if (processor->state == PROCESSOR_IDLE) {
re_queue_tail(&pset->active_queue, &processor->processor_queue);
processor->next_thread = THREAD_NULL;
*/
if (nurgency != curgency) {
thread_tell_urgency(nurgency, urgency_param1, urgency_param2, 0, thread);
- machine_thread_going_on_core(thread, nurgency, 0);
+ machine_thread_going_on_core(thread, nurgency, 0, 0);
}
}
/* initialize the corpse config based on boot-args */
corpses_init();
+ vm_user_init();
+
/*
* Create a kernel thread to execute the kernel bootstrap.
*/
*/
thread_lock(thread);
urgency = thread_get_urgency(thread, &arg1, &arg2);
- machine_thread_going_on_core(thread, urgency, 0);
+ machine_thread_going_on_core(thread, urgency, 0, 0);
thread_unlock(thread);
splx(spl);
}
vm_map_offset_t map_data;
vm_offset_t data;
+ if (!kdebug_enable ||
+ !kdebug_debugid_enabled(KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, 0)))
+ {
+ vm_map_copy_discard(infos_copy);
+ return KERN_SUCCESS;
+ }
+
assert(infos_copy != NULL);
if (task == NULL || task != current_task()) {
/* JMM - should just be temporary (implementation in bsd_kern still) */
extern void set_bsdtask_info(task_t,void *);
extern vm_map_t get_task_map_reference(task_t);
-extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t, boolean_t);
+extern vm_map_t swap_task_map(task_t, thread_t, vm_map_t);
extern pmap_t get_task_pmap(task_t);
extern uint64_t get_task_resident_size(task_t);
extern uint64_t get_task_compressed(task_t);
task_is_importance_receiver(task), 0);
}
+/*
+ * Inherit task role across exec
+ */
+void
+proc_inherit_task_role(task_t new_task,
+ task_t old_task)
+{
+ int role;
+
+ /* inherit the role from old task to new task */
+ role = proc_get_task_policy(old_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE);
+ proc_set_task_policy(new_task, TASK_POLICY_ATTRIBUTE, TASK_POLICY_ROLE, role);
+}
+
extern task_t bsd_init_task;
/*
/*
* like mach_absolute_time, but advances during sleep
*/
-__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0)
-__TVOS_AVAILABLE(__TVOS_10_0)
-__WATCHOS_AVAILABLE(__WATCHOS_3_0)
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
uint64_t mach_continuous_time(void);
/*
* like mach_approximate_time, but advances during sleep
*/
-__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0)
-__TVOS_AVAILABLE(__TVOS_10_0)
-__WATCHOS_AVAILABLE(__WATCHOS_3_0)
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
uint64_t mach_continuous_approximate_time(void);
#if !defined(KERNEL) && defined(PRIVATE)
// Forward definition because this is a BSD value
struct timespec;
-__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0)
-__TVOS_AVAILABLE(__TVOS_10_0)
-__WATCHOS_AVAILABLE(__WATCHOS_3_0)
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
kern_return_t mach_get_times(uint64_t* absolute_time,
uint64_t* continuous_time,
struct timespec *tp);
-__OSX_AVAILABLE_STARTING(__MAC_10_12, __IPHONE_10_0)
-__TVOS_AVAILABLE(__TVOS_10_0)
-__WATCHOS_AVAILABLE(__WATCHOS_3_0)
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
uint64_t mach_boottime_usec(void);
#endif /* KERNEL */
/* DHMM data */
#define VM_MEMORY_DHMM 84
-#if !(defined(RC_HIDE_XNU_J79) || defined(RC_HIDE_XNU_J80))
-/* memory needed for DFR related actions */
-#define VM_MEMORY_DFR 85
-#endif // !(defined(RC_HIDE_XNU_J79) || defined(RC_HIDE_XNU_J80))
/* memory allocated by SceneKit.framework */
#define VM_MEMORY_SCENEKIT 86
uint32_t age_of_decompressions_during_sample_period[DECOMPRESSION_SAMPLE_MAX_AGE];
uint32_t overage_decompressions_during_sample_period = 0;
-void do_fastwake_warmup(void);
+void do_fastwake_warmup(queue_head_t *, boolean_t);
boolean_t fastwake_warmup = FALSE;
boolean_t fastwake_recording_in_progress = FALSE;
clock_sec_t dont_trim_until_ts = 0;
lck_mtx_unlock_always(c_list_lock);
}
+void
+do_fastwake_warmup_all(void)
+{
+
+ lck_mtx_lock_spin_always(c_list_lock);
+
+ if (queue_empty(&c_swappedout_list_head) && queue_empty(&c_swappedout_sparse_list_head)) {
+
+ lck_mtx_unlock_always(c_list_lock);
+ return;
+ }
+
+ fastwake_warmup = TRUE;
+
+ do_fastwake_warmup(&c_swappedout_list_head, TRUE);
+
+ do_fastwake_warmup(&c_swappedout_sparse_list_head, TRUE);
+
+ fastwake_warmup = FALSE;
+
+ lck_mtx_unlock_always(c_list_lock);
+
+}
void
-do_fastwake_warmup(void)
+do_fastwake_warmup(queue_head_t *c_queue, boolean_t consider_all_cseg)
{
c_segment_t c_seg = NULL;
AbsoluteTime startTime, endTime;
lck_mtx_lock_spin_always(c_list_lock);
- while (!queue_empty(&c_swappedout_list_head) && fastwake_warmup == TRUE) {
+ while (!queue_empty(c_queue) && fastwake_warmup == TRUE) {
- c_seg = (c_segment_t) queue_first(&c_swappedout_list_head);
+ c_seg = (c_segment_t) queue_first(c_queue);
- if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
- c_seg->c_generation_id > last_c_segment_to_warm_generation_id)
- break;
+ if (consider_all_cseg == FALSE) {
+ if (c_seg->c_generation_id < first_c_segment_to_warm_generation_id ||
+ c_seg->c_generation_id > last_c_segment_to_warm_generation_id)
+ break;
- if (vm_page_free_count < (AVAILABLE_MEMORY / 4))
- break;
+ if (vm_page_free_count < (AVAILABLE_MEMORY / 4))
+ break;
+ }
lck_mtx_lock_spin_always(&c_seg->c_lock);
lck_mtx_unlock_always(c_list_lock);
lck_mtx_lock_spin_always(c_list_lock);
- first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
+ if (consider_all_cseg == FALSE) {
+ first_c_segment_to_warm_generation_id = last_c_segment_to_warm_generation_id = 0;
+ }
}
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_START, c_segment_warmup_count,
first_c_segment_to_warm_generation_id, last_c_segment_to_warm_generation_id, 0, 0);
- do_fastwake_warmup();
+ do_fastwake_warmup(&c_swappedout_list_head, FALSE);
KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 11) | DBG_FUNC_END, c_segment_warmup_count, c_segment_warmup_count - starting_warmup_count, 0, 0, 0);
fastwake_warmup = FALSE;
struct codesigning_exit_reason_info *ceri = NULL;
uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
- if (os_reason_alloc_buffer(codesigning_exit_reason, reason_buffer_size_estimate)) {
+ if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
} else {
if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
#endif /* DEBUG || DEVELOPMENT */
/* Free the buffer */
- os_reason_alloc_buffer(codesigning_exit_reason, 0);
+ os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
}
}
}
vm_object_t top_object = VM_OBJECT_NULL;
int throttle_delay;
int compressed_count_delta;
- vm_map_offset_t real_vaddr;
int grab_options;
+ vm_map_offset_t trace_vaddr;
+ vm_map_offset_t trace_real_vaddr;
+#if DEVELOPMENT || DEBUG
+ vm_map_offset_t real_vaddr;
real_vaddr = vaddr;
+#endif /* DEVELOPMENT || DEBUG */
+ trace_real_vaddr = vaddr;
vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
+ if (map == kernel_map) {
+ trace_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(vaddr);
+ trace_real_vaddr = VM_KERNEL_UNSLIDE_OR_PERM(trace_real_vaddr);
+ } else {
+ trace_vaddr = vaddr;
+ }
+
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
- ((uint64_t)vaddr >> 32),
- vaddr,
+ ((uint64_t)trace_vaddr >> 32),
+ trace_vaddr,
(map == kernel_map),
0,
0);
if (get_preemption_level() != 0) {
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
- ((uint64_t)vaddr >> 32),
- vaddr,
+ ((uint64_t)trace_vaddr >> 32),
+ trace_vaddr,
KERN_FAILURE,
0,
0);
else
event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
}
else
event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
- KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | type_of_fault, m->offset, get_current_unique_pid(), 0);
DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
}
}
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
(MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
- ((uint64_t)vaddr >> 32),
- vaddr,
+ ((uint64_t)trace_vaddr >> 32),
+ trace_vaddr,
kr,
type_of_fault,
0);
extern void vm_mem_bootstrap(void);
extern void vm_mem_init(void);
extern void vm_map_steal_memory(void);;
+extern void vm_user_init(void);
#endif /* VM_INIT_H */
if (flags & ~(VM_FLAGS_FIXED |
VM_FLAGS_ANYWHERE |
VM_FLAGS_OVERWRITE |
+ VM_FLAGS_IOKIT_ACCT |
VM_FLAGS_RETURN_4K_DATA_ADDR |
VM_FLAGS_RETURN_DATA_ADDR |
VM_FLAGS_ALIAS_MASK)) {
mask,
flags & (VM_FLAGS_ANYWHERE |
VM_FLAGS_OVERWRITE |
+ VM_FLAGS_IOKIT_ACCT |
VM_FLAGS_RETURN_4K_DATA_ADDR |
VM_FLAGS_RETURN_DATA_ADDR |
VM_FLAGS_ALIAS_MASK),
goto done;
}
+ if ((entry != vm_map_to_entry(map)) && /* we still have entries in the map */
+ (tmp_entry.vme_end != end) && /* AND, we are not at the end of the requested range */
+ (entry->vme_start != tmp_entry.vme_end)) { /* AND, the next entry is not contiguous. */
+ /* found a "new" hole */
+ s = tmp_entry.vme_end;
+ rc = KERN_INVALID_ADDRESS;
+ goto done;
+ }
+
s = entry->vme_start;
+
} /* end while loop through map entries */
done:
for(i=0; i < num_pages; i++) {
if(UPL_PAGE_PRESENT(pl,i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
- KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl, upl->upl_priority, 0);
+ KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
+ VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
vm_decmp_upl_reprioritize(upl, cur_tier);
break;
}
- KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, upl->upl_creator, m, upl->upl_reprio_info[i], upl->upl_priority, 0);
+ KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
+ upl->upl_reprio_info[i], upl->upl_priority, 0);
if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0)
vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
break;
{
wait_result_t ret;
- KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
+ KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
if (o->io_tracking && ((m->busy == TRUE) || (m->cleaning == TRUE) || VM_PAGE_WIRED(m))) {
/*
assert((vm_page_secluded_count_free +
vm_page_secluded_count_inuse) ==
vm_page_secluded_count);
- vm_page_queue_remove_first(&vm_page_queue_secluded,
- secluded_page,
- vm_page_t,
- pageq);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
assert(secluded_page->vm_page_q_state ==
VM_PAGE_ON_SECLUDED_Q);
- VM_PAGE_ZERO_PAGEQ_ENTRY(secluded_page);
- secluded_page->vm_page_q_state = VM_PAGE_NOT_ON_Q;
- vm_page_secluded_count--;
+ vm_page_queues_remove(secluded_page, FALSE);
assert(!secluded_page->fictitious);
assert(!VM_PAGE_WIRED(secluded_page));
if (secluded_page->vm_page_object == 0) {
/* transfer to free queue */
assert(secluded_page->busy);
- vm_page_secluded_count_free--;
secluded_page->snext = local_freeq;
local_freeq = secluded_page;
local_freed++;
} else {
- vm_page_secluded_count_inuse--;
/* transfer to head of inactive queue */
pmap_clear_refmod_options(
VM_PAGE_GET_PHYS_PAGE(secluded_page),
assert((vm_page_secluded_count_free +
vm_page_secluded_count_inuse) ==
vm_page_secluded_count);
- vm_page_queue_remove_first(&vm_page_queue_secluded,
- secluded_page,
- vm_page_t,
- pageq);
+ secluded_page = vm_page_queue_first(&vm_page_queue_secluded);
assert(secluded_page->vm_page_q_state ==
VM_PAGE_ON_SECLUDED_Q);
- VM_PAGE_ZERO_PAGEQ_ENTRY(secluded_page);
- secluded_page->vm_page_q_state = VM_PAGE_NOT_ON_Q;
- vm_page_secluded_count--;
+ vm_page_queues_remove(secluded_page, FALSE);
assert(!secluded_page->fictitious);
assert(!VM_PAGE_WIRED(secluded_page));
if (secluded_page->vm_page_object == 0) {
/* transfer to free queue */
assert(secluded_page->busy);
- vm_page_secluded_count_free--;
secluded_page->snext = local_freeq;
local_freeq = secluded_page;
local_freed++;
} else {
- vm_page_secluded_count_inuse--;
/* transfer to head of active queue */
vm_page_enqueue_active(secluded_page,
FALSE);
else
vm_pageout_considered_bq_external++;
- assert(VM_PAGE_PAGEABLE(m));
break;
}
}
/* NOTREACHED */
}
+ assert(VM_PAGE_PAGEABLE(m));
m_object = VM_PAGE_OBJECT(m);
force_anonymous = FALSE;
extern boolean_t vm_compressor_low_on_space(void);
extern int vm_swap_low_on_space(void);
-
+void do_fastwake_warmup_all(void);
#if CONFIG_JETSAM
extern int proc_get_memstat_priority(struct proc*, boolean_t);
#endif /* CONFIG_JETSAM */
vm_page_lock_queues();
KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE)),
- object, /* purged object */
+ VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
0,
available_for_purge,
0,
*/
*mp = m->next_m;
m->hashed = FALSE;
+ m->next_m = VM_PAGE_PACK_PTR(NULL);
found_m = m;
break;
bucket->cur_count--;
#endif /* MACH_PAGE_HASH_STATS */
mem->hashed = FALSE;
+ this->next_m = VM_PAGE_PACK_PTR(NULL);
lck_spin_unlock(bucket_lock);
}
/*
}
assert(!vm_page_queue_empty(&vm_page_queue_secluded));
LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
- vm_page_queue_remove_first(&vm_page_queue_secluded,
- mem,
- vm_page_t,
- pageq);
+ mem = vm_page_queue_first(&vm_page_queue_secluded);
assert(mem->vm_page_q_state == VM_PAGE_ON_SECLUDED_Q);
-
- VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
- mem->vm_page_q_state = VM_PAGE_NOT_ON_Q;
- vm_page_secluded_count--;
+ vm_page_queues_remove(mem, TRUE);
object = VM_PAGE_OBJECT(mem);
assert(!VM_PAGE_WIRED(mem));
if (object == VM_OBJECT_NULL) {
/* free for grab! */
- assert(mem->busy);
- vm_page_secluded_count_free--;
vm_page_unlock_queues();
vm_page_secluded.grab_success_free++;
+
+ assert(mem->busy);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
+ assert(mem->pageq.next == 0);
+ assert(mem->pageq.prev == 0);
+ assert(mem->listq.next == 0);
+ assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_on_backgroundq == 0);
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
return mem;
}
- vm_page_secluded_count_inuse--;
assert(!object->internal);
// vm_page_pageable_external_count--;
if (mem->reference) {
/* it's been used but we do need to grab a page... */
}
- /* page could still be on vm_page_queue_background... */
- vm_page_free_prepare_queues(mem);
vm_page_unlock_queues();
assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem)));
}
pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
- assert(mem->busy);
vm_page_secluded.grab_success_other++;
+ assert(mem->busy);
+ assert(mem->vm_page_q_state == VM_PAGE_NOT_ON_Q);
+ assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
+ assert(mem->pageq.next == 0);
+ assert(mem->pageq.prev == 0);
+ assert(mem->listq.next == 0);
+ assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_on_backgroundq == 0);
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
+
return mem;
}
#endif /* CONFIG_SECLUDED_MEMORY */
VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
}
if ( !mem->fictitious) {
+ assert(mem->pageq.next == 0);
+ assert(mem->pageq.prev == 0);
+ assert(mem->listq.next == 0);
+ assert(mem->listq.prev == 0);
+#if CONFIG_BACKGROUND_QUEUE
+ assert(mem->vm_page_backgroundq.next == 0);
+ assert(mem->vm_page_backgroundq.prev == 0);
+#endif /* CONFIG_BACKGROUND_QUEUE */
+ assert(mem->next_m == 0);
vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->lopage);
}
}
* as blocked up by vm_pageout_scan().
* The big win is not having to take the free list lock once
* per page.
+ *
+ * The VM page queues lock (vm_page_queue_lock) should NOT be held.
+ * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
*/
void
vm_page_free_list(
vm_page_t local_freeq;
int pg_count;
+ LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
+ LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
+
while (freeq) {
pg_count = 0;
}
if (abort_run == TRUE) {
- if (m != VM_PAGE_NULL) {
- vm_page_free_list(m, FALSE);
- }
-
- dumped_run++;
-
/*
* want the index of the last
* page in this run that was
*/
page_idx = tmp_start_idx + 2;
if (page_idx >= vm_pages_count) {
- if (wrapped)
+ if (wrapped) {
+ if (m != VM_PAGE_NULL) {
+ vm_page_unlock_queues();
+ vm_page_free_list(m, FALSE);
+ vm_page_lock_queues();
+ m = VM_PAGE_NULL;
+ }
+ dumped_run++;
goto done_scanning;
+ }
page_idx = last_idx = 0;
wrapped = TRUE;
}
last_idx = page_idx;
+ if (m != VM_PAGE_NULL) {
+ vm_page_unlock_queues();
+ vm_page_free_list(m, FALSE);
+ vm_page_lock_queues();
+ m = VM_PAGE_NULL;
+ }
+ dumped_run++;
+
lck_mtx_lock(&vm_page_queue_free_lock);
/*
* reset our free page limit since we
#endif /* CONFIG_BACKGROUND_QUEUE */
return;
}
+
if (mem->vm_page_q_state == VM_PAGE_USED_BY_COMPRESSOR)
{
assert(mem->pageq.next == 0 && mem->pageq.prev == 0);
#include <vm/vm_pageout.h>
#include <vm/vm_protos.h>
#include <vm/vm_purgeable_internal.h>
+#include <vm/vm_init.h>
vm_size_t upl_offset_to_pagelist = 0;
#include <vm/cpm.h>
#endif /* VM_CPM */
+lck_grp_t dynamic_pager_control_port_lock_group;
+decl_lck_mtx_data(, dynamic_pager_control_port_lock);
ipc_port_t dynamic_pager_control_port=NULL;
/*
return kr;
}
+static void dp_control_port_init(void)
+{
+ lck_grp_init(&dynamic_pager_control_port_lock_group,"dp_control_port", LCK_GRP_ATTR_NULL);
+ lck_mtx_init(&dynamic_pager_control_port_lock, &dynamic_pager_control_port_lock_group, LCK_ATTR_NULL);
+}
kern_return_t
set_dp_control_port(
host_priv_t host_priv,
ipc_port_t control_port)
{
- if (host_priv == HOST_PRIV_NULL)
- return (KERN_INVALID_HOST);
+ ipc_port_t old_port;
- if (IP_VALID(dynamic_pager_control_port))
- ipc_port_release_send(dynamic_pager_control_port);
+ if (host_priv == HOST_PRIV_NULL)
+ return (KERN_INVALID_HOST);
+ lck_mtx_lock(&dynamic_pager_control_port_lock);
+ old_port = dynamic_pager_control_port;
dynamic_pager_control_port = control_port;
+ lck_mtx_unlock(&dynamic_pager_control_port_lock);
+
+ if (IP_VALID(old_port))
+ ipc_port_release_send(old_port);
+
return KERN_SUCCESS;
}
host_priv_t host_priv,
ipc_port_t *control_port)
{
- if (host_priv == HOST_PRIV_NULL)
+ if (host_priv == HOST_PRIV_NULL)
return (KERN_INVALID_HOST);
+ lck_mtx_lock(&dynamic_pager_control_port_lock);
*control_port = ipc_port_copy_send(dynamic_pager_control_port);
+ lck_mtx_unlock(&dynamic_pager_control_port_lock);
+
return KERN_SUCCESS;
}
return phys_page;
}
+void
+vm_user_init(void)
+{
+ dp_control_port_init();
+}
#if 0
kern_return_t kernel_object_iopl_request( /* forward */
struct mac_policy_conf {
const char *mpc_name; /** policy name */
const char *mpc_fullname; /** full name */
- const char **mpc_labelnames; /** managed label namespaces */
+ char const * const *mpc_labelnames; /** managed label namespaces */
unsigned int mpc_labelname_count; /** number of managed label namespaces */
struct mac_policy_ops *mpc_ops; /** operation vector */
int mpc_loadtime_flags; /** load time flags */
int reason_error = 0;
int kcdata_error = 0;
- if ((reason_error = os_reason_alloc_buffer(reason, kcdata_estimate_required_buffer_size
+ if ((reason_error = os_reason_alloc_buffer_noblock(reason, kcdata_estimate_required_buffer_size
(1, fatal_failure_desc_len))) == 0 &&
(kcdata_error = kcdata_get_memory_addr(&reason->osr_kcd_descriptor,
EXIT_REASON_USER_DESC, fatal_failure_desc_len,
memory is reloaded from ./memory.py
(lldb)
+ * Alternatively, you can use lldb`s command for script loading as
+ (lldb) command script import /path/to/memory.py
+ You can re-run the same command every time you update the code in file.
+
It is very important that you do reload using xnudebug command as it does the plumbing of commands and types for your change in the module. Otherwise you could easily get confused
why your changes are not reflected in the command.
-# A basic Plugin that creates performance reports from zprint output
-import urllib, urllib2
+import json, urllib, urllib2
+from urllib2 import Request, urlopen, HTTPError
kern_version = None
def plugin_init(kernel_target, config, lldb_obj, isConnected):
outstr = ''
further_cmds = []
submitvars = {}
- submitvars['type']="text"
- submitvars['log']=result_output
+ submitvars['log_content']=result_output
- submiturl = "http://speedtracer.apple.com/trace/analyze?format=xml"
+ submiturl = "https://speedtracer.apple.com/api/v2/trace"
encoded_data = urllib.urlencode(submitvars)
- request = urllib2.Request(submiturl, encoded_data, {"Accept":"application/xml"})
- response = urllib2.urlopen(request)
-
- status = response.info()['status']
- if status == 201 or status == '201':
- outstr += "CrashTracer data found at " + response.info()['location']
- newurl = response.info()['location']
- import webbrowser
- webbrowser.open(newurl)
- status = True
- else:
- outstr += "unknown response from server \n" + str(response.info())
+ request = urllib2.Request(submiturl, encoded_data)
+ request.add_header("Accept", "application/json")
+ request.add_header("X-ST-GroupName", "core-os")
+ try:
+ response = urllib2.urlopen(request)
+ response_str = response.read()
+ j = json.loads(response_str)
+ outstr += "\nspeedtracer output:\n\n"
+ stacks = j.get("symbolicated_log")
+ if stacks:
+ outstr += stacks
+ else:
+ outstr += json.dumps(j)
+ except HTTPError as e:
+ outstr += "speedtracer replied with\n" + str(e.info())
status = False
-
+
return (status, outstr, further_cmds)
def plugin_cleanup():
# EndMacro: showtaskstacks
+def CheckTaskProcRefs(task, proc):
+ for thread in IterateQueue(task.threads, 'thread *', 'task_threads'):
+ if int(thread.uthread) == 0:
+ continue
+ uthread = Cast(thread.uthread, 'uthread *')
+ refcount = int(uthread.uu_proc_refcount)
+ uu_ref_index = int(uthread.uu_pindex)
+ if refcount == 0:
+ continue
+ for ref in range(0, uu_ref_index):
+ if unsigned(uthread.uu_proc_ps[ref]) == unsigned(proc):
+ print GetTaskSummary.header + " " + GetProcSummary.header
+ pval = Cast(task.bsd_info, 'proc *')
+ print GetTaskSummary(task) + " " + GetProcSummary(pval)
+ print "\t" + GetThreadSummary.header
+ print "\t" + GetThreadSummary(thread) + "\n"
+
+ for frame in range (0, 10):
+ trace_addr = unsigned(uthread.uu_proc_pcs[ref][frame])
+ symbol_arr = kern.SymbolicateFromAddress(unsigned(trace_addr))
+ if symbol_arr:
+ symbol_str = str(symbol_arr[0].addr)
+ else:
+ symbol_str = ''
+ print '{0: <#x} {1: <s}'.format(trace_addr, symbol_str)
+ return
+
+@lldb_command('showprocrefs')
+def ShowProcRefs(cmd_args = None):
+ """ Display information on threads/BTs that could be holding a reference on the specified proc
+ NOTE: We can't say affirmatively if any of these references are still held since
+ there's no way to pair references with drop-refs in the current infrastructure.
+ Usage: showprocrefs <proc>
+ """
+ if cmd_args == None or len(cmd_args) < 1:
+ raise ArgumentError("No arguments passed")
+
+ proc = kern.GetValueFromAddress(cmd_args[0], 'proc *')
+
+ for t in kern.tasks:
+ CheckTaskProcRefs(t, proc)
+ for t in kern.terminated_tasks:
+ CheckTaskProcRefs(t, proc)
+
+ return
+
@lldb_command('showallthreads')
def ShowAllThreads(cmd_args = None):
""" Display info about all threads in the system
return True
-
-@lldb_command('showtaskuserstacks')
-def ShowTaskUserStacks(cmd_args=None):
- """ Print out the user stack for each thread in a task, followed by the user libraries.
- Syntax: (lldb) showtaskuserstacks <task_t>
- The format is compatible with CrashTracer. You can also use the speedtracer plugin as follows
- (lldb) showtaskuserstacks <task_t> -p speedtracer
-
- Note: the address ranges are approximations. Also the list may not be completely accurate. This command expects memory read failures
- and hence will skip a library if unable to read information. Please use your good judgement and not take the output as accurate
- """
- if not cmd_args:
- raise ArgumentError("Insufficient arguments")
-
- task = kern.GetValueFromAddress(cmd_args[0], 'task *')
+def ShowTaskUserStacks(task):
#print GetTaskSummary.header + " " + GetProcSummary.header
pval = Cast(task.bsd_info, 'proc *')
#print GetTaskSummary(task) + " " + GetProcSummary(pval) + "\n \n"
crash_report_format_string = """\
-Process: {pid: <10d}
+Process: {pname:s} [{pid:d}]
Path: {path: <50s}
Identifier: {pname: <30s}
Version: ??? (???)
Code Type: {parch: <20s}
-Parent Process: {ppname: >20s}[{ppid:d}]
+Parent Process: {ppname:s} [{ppid:d}]
Date/Time: {timest:s}.000 -0800
OS Version: {osversion: <20s}
print "Enable debugging ('(lldb) xnudebug debug') to see detailed trace."
return
+@lldb_command('showtaskuserstacks', "P:F:")
+def ShowTaskUserStacksCmdHelper(cmd_args=None, cmd_options={}):
+ """ Print out the user stack for each thread in a task, followed by the user libraries.
+ Syntax: (lldb) showtaskuserstacks <task_t>
+ or: (lldb) showtaskuserstacks -P <pid>
+ or: (lldb) showtaskuserstacks -F <task_name>
+ The format is compatible with CrashTracer. You can also use the speedtracer plugin as follows
+ (lldb) showtaskuserstacks <task_t> -p speedtracer
+
+ Note: the address ranges are approximations. Also the list may not be completely accurate. This command expects memory read failures
+ and hence will skip a library if unable to read information. Please use your good judgement and not take the output as accurate
+ """
+ task_list = []
+ if "-F" in cmd_options:
+ task_list = FindTasksByName(cmd_options["-F"])
+ elif "-P" in cmd_options:
+ pidval = ArgumentStringToInt(cmd_options["-P"])
+ for t in kern.tasks:
+ pval = Cast(t.bsd_info, 'proc *')
+ if pval and pval.p_pid == pidval:
+ task_list.append(t)
+ break
+ elif cmd_args:
+ t = kern.GetValueFromAddress(cmd_args[0], 'task *')
+ task_list.append(t)
+ else:
+ raise ArgumentError("Insufficient arguments")
+
+ for task in task_list:
+ ShowTaskUserStacks(task)
def GetUserDataAsString(task, addr, size):
""" Get data from task's address space as a string of bytes
--- /dev/null
+/*
+ * Test that sending a message to a voucher with the same voucher as the voucher port
+ * with only one send right count with move send before the copy send doesn't panic.
+ *
+ * clang -o voucherentry voucherentry.c -ldarwintest -Weverything -Wno-gnu-flexible-array-initializer
+ *
+ * <rdar://problem/18826844>
+ */
+
+#include <mach/mach.h>
+#include <darwintest.h>
+
+T_DECL(voucher_entry, "voucher_entry", T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES))
+{
+ kern_return_t kr = KERN_SUCCESS;
+ mach_voucher_t voucher = MACH_VOUCHER_NULL;
+
+ /*
+ * The bank voucher already exists in this process, so using it doesn't
+ * actually test the problem. Use an importance voucher instead.
+ */
+ mach_voucher_attr_recipe_data_t recipe = {
+ .key = MACH_VOUCHER_ATTR_KEY_IMPORTANCE,
+ .command = MACH_VOUCHER_ATTR_IMPORTANCE_SELF,
+ .previous_voucher = MACH_VOUCHER_NULL,
+ .content_size = 0,
+ };
+
+ kr = host_create_mach_voucher(mach_host_self(),
+ (mach_voucher_attr_raw_recipe_array_t)&recipe,
+ sizeof(recipe), &voucher);
+
+ T_ASSERT_MACH_SUCCESS(kr, "host_create_mach_voucher");
+
+ T_ASSERT_NOTNULL(voucher, "voucher must not be null");
+
+ mach_port_urefs_t refs = 0;
+
+ kr = mach_port_get_refs(mach_task_self(), voucher, MACH_PORT_RIGHT_SEND, &refs);
+
+ T_ASSERT_MACH_SUCCESS(kr, "mach_port_get_refs");
+
+ T_ASSERT_EQ(refs, (mach_port_urefs_t)1, "voucher must have only one ref");
+
+ /* First, try with two moves (must fail because there's only one ref) */
+ mach_msg_header_t request_msg_1 = {
+ .msgh_remote_port = voucher,
+ .msgh_local_port = MACH_PORT_NULL,
+ .msgh_voucher_port = voucher,
+ .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_MOVE_SEND, 0, MACH_MSG_TYPE_MOVE_SEND, 0),
+ .msgh_id = 0xDEAD,
+ .msgh_size = sizeof(request_msg_1),
+ };
+
+ kr = mach_msg_send(&request_msg_1);
+
+ T_ASSERT_MACH_ERROR(MACH_SEND_INVALID_DEST, kr, "send with two moves should fail with invalid dest");
+
+ /* Next, try with a move and a copy (will succeed and destroy the last ref) */
+ mach_msg_header_t request_msg_2 = {
+ .msgh_remote_port = voucher,
+ .msgh_local_port = MACH_PORT_NULL,
+ .msgh_voucher_port = voucher,
+ .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_MOVE_SEND, 0, MACH_MSG_TYPE_COPY_SEND, 0),
+ .msgh_id = 0xDEAD,
+ .msgh_size = sizeof(request_msg_2),
+ };
+
+ /* panic happens here */
+ kr = mach_msg_send(&request_msg_2);
+
+ T_ASSERT_MACH_SUCCESS(kr, "send with move and copy succeeds");
+
+ kr = mach_port_get_refs(mach_task_self(), voucher, MACH_PORT_RIGHT_SEND, &refs);
+
+ T_ASSERT_MACH_ERROR(KERN_INVALID_NAME, kr, "voucher should now be invalid name");
+}
+