X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..3e170ce000f1506b7b5d2c5c7faec85ceabb573d:/bsd/sys/buf_internal.h diff --git a/bsd/sys/buf_internal.h b/bsd/sys/buf_internal.h index a11222c07..6ff3284bc 100644 --- a/bsd/sys/buf_internal.h +++ b/bsd/sys/buf_internal.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2008 Apple Inc. All rights reserved. + * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -79,9 +79,23 @@ #include #include +#if CONFIG_PROTECT +#include +#endif #define NOLIST ((struct buf *)0x87654321) +/* + * Attributes of an I/O to be used by lower layers + */ +struct bufattr { +#if CONFIG_PROTECT + struct cpx *ba_cpx; + uint64_t ba_cp_file_off; +#endif + uint64_t ba_flags; /* flags. Some are only in-use on embedded devices */ +}; + /* * The buffer header describes an I/O operation in the kernel. */ @@ -115,7 +129,17 @@ struct buf { int b_dirtyend; /* Offset of end of dirty region. */ int b_validoff; /* Offset in buffer of valid region. */ int b_validend; /* Offset of end of valid region. */ + + /* store extra information related to redundancy of data, such as + * which redundancy copy to use, etc + */ + uint32_t b_redundancy_flags; + proc_t b_proc; /* Associated proc; NULL if kernel. */ +#ifdef BUF_MAKE_PRIVATE + buf_t b_data_store; +#endif + struct bufattr b_attr; #ifdef JOE_DEBUG void * b_owner; int b_tag; @@ -125,40 +149,73 @@ struct buf { #endif }; +extern vm_offset_t buf_kernel_addrperm; /* cluster_io definitions for use with io bufs */ #define b_uploffset b_bufsize +#define b_orig b_freelist.tqe_prev +#define b_shadow b_freelist.tqe_next +#define b_shadow_ref b_validoff +#ifdef BUF_MAKE_PRIVATE +#define b_data_ref b_validend +#endif #define b_trans_head b_freelist.tqe_prev #define b_trans_next b_freelist.tqe_next #define b_iostate b_rcred #define b_cliodone b_wcred /* - * These flags are kept in b_lflags... + * These flags are kept in b_lflags... * buf_mtxp must be held before examining/updating */ #define BL_BUSY 0x00000001 /* I/O in progress. */ #define BL_WANTED 0x00000002 /* Process wants this buffer. */ #define BL_IOBUF 0x00000004 /* buffer allocated via 'buf_alloc' */ -#define BL_CALLDONE 0x00000008 /* callback routine on B_CALL bp has completed */ #define BL_WANTDEALLOC 0x00000010 /* buffer should be put on empty list when clean */ +#define BL_SHADOW 0x00000020 +#define BL_EXTERNAL 0x00000040 +#define BL_WAITSHADOW 0x00000080 +#define BL_IOBUF_ALLOC 0x00000100 +#define BL_WANTED_REF 0x00000200 /* * Parameters for buffer cache garbage collection */ #define BUF_STALE_THRESHHOLD 30 /* Collect if untouched in the last 30 seconds */ +#define BUF_MAX_GC_BATCH_SIZE 64 /* Under a single grab of the lock */ /* * mask used by buf_flags... these are the readable external flags */ #define BUF_X_RDFLAGS (B_PHYS | B_RAW | B_LOCKED | B_ASYNC | B_READ | B_WRITE | B_PAGEIO |\ - B_META | B_CLUSTER | B_DELWRI | B_FUA | B_PASSIVE | B_IOSTREAMING | B_THROTTLED_IO) + B_META | B_CLUSTER | B_DELWRI | B_FUA | B_PASSIVE | B_IOSTREAMING |\ + B_ENCRYPTED_IO | B_STATICCONTENT) /* * mask used by buf_clearflags/buf_setflags... these are the writable external flags */ #define BUF_X_WRFLAGS (B_PHYS | B_RAW | B_LOCKED | B_ASYNC | B_READ | B_WRITE | B_PAGEIO |\ B_NOCACHE | B_FUA | B_PASSIVE | B_IOSTREAMING) +#if 0 +/* b_flags defined in buf.h */ +#define B_WRITE 0x00000000 /* Write buffer (pseudo flag). */ +#define B_READ 0x00000001 /* Read buffer. */ +#define B_ASYNC 0x00000002 /* Start I/O, do not wait. */ +#define B_NOCACHE 0x00000004 /* Do not cache block after use. */ +#define B_DELWRI 0x00000008 /* Delay I/O until buffer reused. */ +#define B_LOCKED 0x00000010 /* Locked in core (not reusable). */ +#define B_PHYS 0x00000020 /* I/O to user memory. */ +#define B_CLUSTER 0x00000040 /* UPL based I/O generated by cluster layer */ +#define B_PAGEIO 0x00000080 /* Page in/out */ +#define B_META 0x00000100 /* buffer contains meta-data. */ +#define B_RAW 0x00000200 /* Set by physio for raw transfers. */ +#define B_FUA 0x00000400 /* Write-through disk cache(if supported) */ +#define B_PASSIVE 0x00000800 /* PASSIVE I/Os are ignored by THROTTLE I/O */ +#define B_IOSTREAMING 0x00001000 /* sequential access pattern detected */ +#define B_ENCRYPTED_IO 0x00004000 /* Encrypted I/O */ +#define B_STATICCONTENT 0x00008000 /* Buffer is likely to remain unaltered */ +#endif + /* * These flags are kept in b_flags... access is lockless * External flags are defined in buf.h and cannot overlap @@ -185,15 +242,44 @@ struct buf { /* * private flags used by by the cluster layer */ -#define B_NEED_IODONE 0x20000000 /* need biodone on the real_bp associated with a cluster_io */ +#define B_TWANTED 0x20000000 /* but_t that is part of a cluster level transaction is wanted */ #define B_COMMIT_UPL 0x40000000 /* commit/abort the UPL on I/O success/failure */ - +#define B_TDONE 0x80000000 /* buf_t that is part of a cluster level transaction has completed */ /* Flags to low-level allocation routines. */ #define B_CLRBUF 0x01 /* Request allocated buffer be cleared. */ #define B_SYNC 0x02 /* Do all allocations synchronously. */ #define B_NOBUFF 0x04 /* Do not allocate struct buf */ +/* + * ba_flags (Buffer Attribute flags) + * Some of these may be in-use only on embedded devices. + */ +#define BA_RAW_ENCRYPTED_IO 0x00000001 +#define BA_THROTTLED_IO 0x00000002 +#define BA_DELAYIDLESLEEP 0x00000004 /* Process is marked to delay idle sleep on disk IO */ +#define BA_NOCACHE 0x00000008 +#define BA_META 0x00000010 +#define BA_GREEDY_MODE 0x00000020 /* High speed writes that consume more storage */ +#define BA_QUICK_COMPLETE 0x00000040 /* Request quick completion at expense of storage efficiency */ +#define BA_PASSIVE 0x00000080 + +/* + * Note: IO_TIERs consume 0x0100, 0x0200, 0x0400, 0x0800 + * These are now in-use by the I/O tiering system. + */ +#define BA_IO_TIER_MASK 0x00000f00 +#define BA_IO_TIER_SHIFT 8 + +#define BA_ISOCHRONOUS 0x00001000 /* device specific isochronous throughput to media */ + + +#define GET_BUFATTR_IO_TIER(bap) ((bap->ba_flags & BA_IO_TIER_MASK) >> BA_IO_TIER_SHIFT) +#define SET_BUFATTR_IO_TIER(bap, tier) \ +do { \ + (bap)->ba_flags &= (~BA_IO_TIER_MASK); \ + (bap)->ba_flags |= (((tier) << BA_IO_TIER_SHIFT) & BA_IO_TIER_MASK); \ +} while(0) extern int niobuf_headers; /* The number of IO buffer headers for cluster IO */ extern int nbuf_headers; /* The number of buffer headers */ @@ -217,11 +303,13 @@ extern struct buf *buf_headers; /* The buffer headers. */ __BEGIN_DECLS +buf_t buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg); + buf_t alloc_io_buf(vnode_t, int); void free_io_buf(buf_t); int allocbuf(struct buf *, int); -void bufinit(void) __attribute__((section("__TEXT, initcode"))); +void bufinit(void); /* * Flags for buf_acquire @@ -234,15 +322,23 @@ void bufinit(void) __attribute__((section("__TEXT, initcode"))); void buf_list_lock(void); void buf_list_unlock(void); -void buf_biowait_callback(buf_t); - -void cluster_init(void) __attribute__((section("__TEXT, initcode"))); +void cluster_init(void); void buf_drop(buf_t); errno_t buf_acquire(buf_t, int, int, int); int count_busy_buffers(void); int count_lock_queue(void); +int buf_flushdirtyblks_skipinfo (vnode_t, int, int, const char *); +void buf_wait_for_shadow_io (vnode_t, daddr64_t); + +#ifdef BUF_MAKE_PRIVATE +errno_t buf_make_private(buf_t bp); +#endif + +#ifdef CONFIG_PROTECT +void buf_setcpoff (buf_t, uint64_t); +#endif __END_DECLS