}
} /* for each catalog entry */
+ /*
+ * If we couldn't fit all the entries requested in the user's buffer,
+ * it's not EOF.
+ */
+ if (*eofflag && (*actualcount < (int)ce_list->realentries))
+ *eofflag = 0;
+
/* If we skipped catalog entries for reserved files that should
* not be listed in namespace, update the index accordingly.
*/
return (error);
} else if (name[0] == HFS_EXTEND_FS) {
- u_int64_t newsize;
+ u_int64_t newsize = 0;
vnode_t vp = vfs_context_cwd(context);
if (newp == USER_ADDR_NULL || vp == NULLVP)
return (EINVAL);
if ((error = hfs_getmountpoint(vp, &hfsmp)))
return (error);
+
+ /* Start with the 'size' set to the current number of bytes in the filesystem */
+ newsize = ((uint64_t)hfsmp->totalBlocks) * ((uint64_t)hfsmp->blockSize);
+
+ /* now get the new size from userland and over-write our stored value */
error = sysctl_quad(oldp, oldlenp, newp, newlen, (quad_t *)&newsize);
if (error)
return (error);
.
.Sh RETURN VALUES
Upon successful completion the numbers of entries successfully read
-is returned. A value of 0 indicates there are no more entries. On error,
+is returned. A value of 0 indicates there are no more entries. Once 0 is returned,
+no further entries are returned even if new entries are added to the directory.
+Directory iteration should be restarted either by repostioning the offset to 0 by
+.Fn lseek
+or by closing the file descriptor and opening the directory again. On error,
a value of -1 is returned and
.Va errno
is set to indicate the error.
* switch the backgroung streams to use background
* congestion control algorithm. Otherwise, even background
* flows can move into foreground.
+ *
+ * System initiated background traffic like cloud uploads
+ * should always use background delay sensitive
+ * algorithms. This will make the stream more resposive to
+ * other streams on the user's network and it will
+ * minimize the latency induced.
*/
+ if (IS_SO_TC_BACKGROUNDSYSTEM(so->so_traffic_class))
+ fg_active = true;
+
if ((sotcdb & SOTCDB_NO_SENDTCPBG) != 0 ||
is_local || !fg_active) {
if (old_cc == TCP_CC_ALGO_BACKGROUND_INDEX)
#ifdef KERNEL_PRIVATE
#define SONPX_MASK_VALID (SONPX_SETOPTSHUT)
#define IS_SO_TC_BACKGROUND(_tc_) ((_tc_) == SO_TC_BK || (_tc_) == SO_TC_BK_SYS)
+#define IS_SO_TC_BACKGROUNDSYSTEM(_tc_) ((_tc_) == SO_TC_BK_SYS)
#endif /* KERNEL_PRIVATE */
#endif
#define FV_LOCK(fvd) lck_mtx_lock(&(((struct fd_vn_data *)fvd)->fv_lock))
#define FV_UNLOCK(fvd) lck_mtx_unlock(&(((struct fd_vn_data *)fvd)->fv_lock))
-#define FV_BUF_FREE(fvd, tag) \
-do { \
- FREE(fvd->fv_buf, tag); \
- fvd->fv_buf = NULL; \
- fvd->fv_bufsiz = 0; \
- fvd->fv_bufdone = 0; \
- fvd->fv_soff = 0; \
- fvd->fv_eoff = 0; \
- fvd->fv_eofflag = 0; \
-} while (0);
/*
* Global vnode data.
int nentries;
int error;
+ /*
+ * If the last readdir returned EOF, don't try again.
+ */
+ if (fvd->fv_eofflag) {
+ *eofflagp = 1;
+ if (fvd->fv_buf) {
+ FREE(fvd->fv_buf, M_FD_DIRBUF);
+ fvd->fv_buf = NULL;
+ }
+ return 0;
+ }
+
error = 0;
/*
*/
if (!fp->f_fglob->fg_offset) {
fvdata->fv_offset = 0;
- if (fvdata->fv_buf) {
- FV_BUF_FREE(fvdata, M_FD_DIRBUF);
- }
+ if (fvdata->fv_buf)
+ FREE(fvdata->fv_buf, M_FD_DIRBUF);
+ fvdata->fv_buf = NULL;
+ fvdata->fv_bufsiz = 0;
+ fvdata->fv_bufdone = 0;
+ fvdata->fv_soff = 0;
+ fvdata->fv_eoff = 0;
+ fvdata->fv_eofflag = 0;
}
auio = uio_createwithbuffer(1, fvdata->fv_offset, segflg, UIO_READ,
struct vnode_attr va;
char *va_name;
- eofflag = 0;
- count = 0;
+ if (fvdata->fv_eofflag && !fvdata->fv_buf) {
+ /*
+ * If the last successful VNOP_GETATTRLISTBULK or
+ * VNOP_READDIR returned EOF, don't try again.
+ */
+ eofflag = 1;
+ count = 0;
+ error = 0;
+ } else {
+ eofflag = 0;
+ count = 0;
- VATTR_INIT(&va);
- MALLOC(va_name, char *, MAXPATHLEN, M_TEMP, M_WAITOK|M_ZERO);
- va.va_name = va_name;
+ VATTR_INIT(&va);
+ MALLOC(va_name, char *, MAXPATHLEN, M_TEMP,
+ M_WAITOK | M_ZERO);
+ va.va_name = va_name;
- (void)getattrlist_setupvattr_all(&al, &va, VNON, NULL,
- IS_64BIT_PROCESS(p));
+ (void)getattrlist_setupvattr_all(&al, &va, VNON, NULL,
+ IS_64BIT_PROCESS(p));
- error = VNOP_GETATTRLISTBULK(dvp, &al, &va, auio, NULL,
- options, &eofflag, &count, ctx);
+ error = VNOP_GETATTRLISTBULK(dvp, &al, &va, auio, NULL,
+ options, &eofflag, &count, ctx);
- FREE(va_name, M_TEMP);
+ FREE(va_name, M_TEMP);
- /*
- * cache state of eofflag.
- */
- if (!error) {
- fvdata->fv_eofflag = eofflag;
+ /*
+ * cache state of eofflag.
+ */
+ if (!error) {
+ fvdata->fv_eofflag = eofflag;
+ }
}
}
&count, &eofflag, ctx);
}
- if (error && fvdata->fv_eofflag) {
- /*
- * Some filesystems return EINVAL if called again when,
- * for a directory, they have already returned EOF. We
- * have the EOF state from the last successful call to it.
- * If this is an error just reuse the state from the last
- * call and use that to return 0 to the user instead of
- * percolating an error to the user. We're not particular
- * about the error returned. If we get *any* error after
- * having already gotten an EOF, we ignore it.
- */
- eofflag = 1;
- error = 0;
- count = 0;
- }
-
if (count) {
fvdata->fv_offset = uio_offset(auio);
fp->f_fglob->fg_offset = fvdata->fv_offset;
-14.3.0
+14.4.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
{
return aes_decrypt_key(key, 32, cx);
}
+
+aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_init(gcm, ctx, key_len, key);
+ return aes_good;
+}
+
+aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_set_iv(gcm, ctx, len, in_iv);
+ return aes_good;
+}
+
+aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_gmac(gcm, ctx, aad_bytes, aad);
+ return aes_good;
+}
+
+aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes,
+ unsigned char *out_blk, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm encrypt.
+ return aes_good;
+}
+
+aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_finalize(gcm, ctx, tag_bytes, tag);
+ ccgcm_reset(gcm, ctx);
+ return aes_good;
+}
+
+aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_init(gcm, ctx, key_len, key);
+ return aes_good;
+}
+
+aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_set_iv(gcm, ctx, len, in_iv);
+ return aes_good;
+}
+
+aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_gmac(gcm, ctx, aad_bytes, aad);
+ return aes_good;
+}
+
+aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes,
+ unsigned char *out_blk, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_update(gcm, ctx, num_bytes, in_blk, out_blk); //Actually gcm decrypt.
+ return aes_good;
+}
+
+aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return aes_error;
+ }
+
+ ccgcm_finalize(gcm, ctx, tag_bytes, tag);
+ ccgcm_reset(gcm, ctx);
+ return aes_good;
+}
+
+unsigned aes_encrypt_get_ctx_size_gcm(void)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_encrypt;
+ if (!gcm) {
+ return 0;
+ }
+ return (cc_ctx_sizeof(ccgcm_ctx, gcm->size));
+}
+
+unsigned aes_decrypt_get_ctx_size_gcm(void)
+{
+ const struct ccmode_gcm *gcm = g_crypto_funcs->ccaes_gcm_decrypt;
+ if (!gcm) {
+ return 0;
+ }
+ return (cc_ctx_sizeof(ccgcm_ctx, gcm->size));
+}
+
aes_rval aes_decrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk,
unsigned char *out_blk, aes_decrypt_ctx cx[1]);
+aes_rval aes_encrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx);
+aes_rval aes_encrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx);
+aes_rval aes_encrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx);
+aes_rval aes_encrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx);
+aes_rval aes_encrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx);
+unsigned aes_encrypt_get_ctx_size_gcm(void);
+
+aes_rval aes_decrypt_key_gcm(const unsigned char *key, int key_len, ccgcm_ctx *ctx);
+aes_rval aes_decrypt_set_iv_gcm(const unsigned char *in_iv, unsigned int len, ccgcm_ctx *ctx);
+aes_rval aes_decrypt_aad_gcm(const unsigned char *aad, unsigned int aad_bytes, ccgcm_ctx *ctx);
+aes_rval aes_decrypt_gcm(const unsigned char *in_blk, unsigned int num_bytes, unsigned char *out_blk, ccgcm_ctx *ctx);
+aes_rval aes_decrypt_finalize_gcm(unsigned char *tag, unsigned int tag_bytes, ccgcm_ctx *ctx);
+unsigned aes_decrypt_get_ctx_size_gcm(void);
+
#if defined(__cplusplus)
}
#endif
const struct ccmode_cbc *ccaes_cbc_decrypt;
const struct ccmode_xts *ccaes_xts_encrypt;
const struct ccmode_xts *ccaes_xts_decrypt;
+ const struct ccmode_gcm *ccaes_gcm_encrypt;
+ const struct ccmode_gcm *ccaes_gcm_decrypt;
/* DES, ecb and cbc */
const struct ccmode_ecb *ccdes_ecb_encrypt;
const struct ccmode_ecb *ccdes_ecb_decrypt;
#define VM_KERNEL_IS_SLID(_o) \
(((vm_offset_t)(_o) >= vm_kernel_base) && \
- ((vm_offset_t)(_o) < vm_kernel_top))
+ ((vm_offset_t)(_o) <= vm_kernel_top))
#define VM_KERNEL_IS_KEXT(_o) \
(((vm_offset_t)(_o) >= vm_kext_base) && \
((vm_offset_t)(_o) < vm_kext_top))