/*
- * Copyright (c) 1999-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1999-2008 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
extern struct zone *ubc_info_zone;
+/*
+ * Maximum number of vfs clusters per vnode
+ */
+#define MAX_CLUSTERS CONFIG_MAX_CLUSTERS
-#define MAX_CLUSTERS 4 /* maximum number of vfs clusters per vnode */
+#define SPARSE_PUSH_LIMIT 4 /* limit on number of concurrent sparse pushes outside of the cl_lockw */
+ /* once we reach this limit, we'll hold the lock */
struct cl_extent {
daddr64_t b_addr;
struct cl_writebehind {
lck_mtx_t cl_lockw;
void * cl_scmap; /* pointer to sparse cluster map */
- int cl_scdirty; /* number of dirty pages in the sparse cluster map */
+ off_t cl_last_write; /* offset of the end of the last write */
+ off_t cl_seq_written; /* sequentially written bytes */
+ int cl_sparse_pushes; /* number of pushes outside of the cl_lockw in progress */
+ int cl_sparse_wait; /* synchronous push is in progress */
int cl_number; /* number of packed write behind clusters currently valid */
struct cl_wextent cl_clusters[MAX_CLUSTERS]; /* packed write behind clusters */
};
struct cs_blob *csb_next;
cpu_type_t csb_cpu_type;
unsigned int csb_flags;
- off_t csb_base_offset;
- off_t csb_start_offset;
- off_t csb_end_offset;
+ off_t csb_base_offset; /* Offset of Mach-O binary in fat binary */
+ off_t csb_start_offset; /* Blob coverage area start, from csb_base_offset */
+ off_t csb_end_offset; /* Blob coverage area end, from csb_base_offset */
ipc_port_t csb_mem_handle;
vm_size_t csb_mem_size;
vm_offset_t csb_mem_offset;
vm_address_t csb_mem_kaddr;
unsigned char csb_sha1[SHA1_RESULTLEN];
+ unsigned int csb_sigpup;
+ const char *csb_teamid;
+ unsigned int csb_platform_binary;
};
/*
struct ubc_info {
memory_object_t ui_pager; /* pager */
memory_object_control_t ui_control; /* VM control for the pager */
- long ui_flags; /* flags */
vnode_t ui_vnode; /* vnode for this ubc_info */
kauth_cred_t ui_ucred; /* holds credentials for NFS paging */
off_t ui_size; /* file size for the vnode */
+ uint32_t ui_flags; /* flags */
+ uint32_t cs_add_gen; /* generation count when csblob was validated */
struct cl_readahead *cl_rahead; /* cluster read ahead context */
struct cl_writebehind *cl_wbehind; /* cluster write behind context */
+ struct timespec cs_mtime; /* modify time of file when
+ first cs_blob was loaded */
struct cs_blob *cs_blobs; /* for CODE SIGNING */
+#if CHECK_CS_VALIDATION_BITMAP
+ void *cs_valid_bitmap; /* right now: used only for signed files on the read-only root volume */
+ uint64_t cs_valid_bitmap_size; /* Save original bitmap size in case the file size changes.
+ * In the future, we may want to reconsider changing the
+ * underlying bitmap to reflect the new file size changes.
+ */
+#endif /* CHECK_CS_VALIDATION_BITMAP */
};
/* Defines for ui_flags */
#define UI_ISMAPPED 0x00000010 /* vnode is currently mapped */
#define UI_MAPBUSY 0x00000020 /* vnode is being mapped or unmapped */
#define UI_MAPWAITING 0x00000040 /* someone waiting for UI_MAPBUSY */
+#define UI_MAPPEDWRITE 0x00000080 /* it's mapped with PROT_WRITE */
/*
* exported primitives for loadable file systems.
__private_extern__ int ubc_umount(mount_t mp);
__private_extern__ void ubc_unmountall(void);
__private_extern__ memory_object_t ubc_getpager(vnode_t);
-__private_extern__ int ubc_map(vnode_t, int);
__private_extern__ void ubc_destroy_named(vnode_t);
/* internal only */
__private_extern__ void cluster_release(struct ubc_info *);
__private_extern__ uint32_t cluster_max_io_size(mount_t, int);
-
-
+__private_extern__ uint32_t cluster_throttle_io_limit(vnode_t, uint32_t *);
+
/* Flags for ubc_getobject() */
#define UBC_FLAGS_NONE 0x0000
#define UBC_FOR_PAGEOUT 0x0002
memory_object_control_t ubc_getobject(vnode_t, int);
+boolean_t ubc_strict_uncached_IO(vnode_t);
int ubc_info_init(vnode_t);
int ubc_info_init_withsize(vnode_t, off_t);
int ubc_isinuse(vnode_t, int);
int ubc_isinuse_locked(vnode_t, int, int);
-int ubc_page_op(vnode_t, off_t, int, ppnum_t *, int *);
-int ubc_range_op(vnode_t, off_t, off_t, int, int *);
-
int ubc_getcdhash(vnode_t, off_t, unsigned char *);
-int UBCINFOEXISTS(vnode_t);
-
+__attribute__((pure)) boolean_t ubc_is_mapped(const struct vnode *, boolean_t *writable);
+__attribute__((pure)) boolean_t ubc_is_mapped_writable(const struct vnode *);
+
+#ifdef XNU_KERNEL_PRIVATE
+int UBCINFOEXISTS(const struct vnode *);
+#endif /* XNU_KERNEL_PRIVATE */
+
+/* code signing */
+struct cs_blob;
+int ubc_cs_blob_add(vnode_t, cpu_type_t, off_t, vm_address_t, vm_size_t, int);
+int ubc_cs_sigpup_add(vnode_t, vm_address_t, vm_size_t);
+struct cs_blob *ubc_get_cs_blobs(vnode_t);
+void ubc_get_cs_mtime(vnode_t, struct timespec *);
+int ubc_cs_getcdhash(vnode_t, off_t, unsigned char *);
+kern_return_t ubc_cs_blob_allocate(vm_offset_t *, vm_size_t *);
+void ubc_cs_blob_deallocate(vm_offset_t, vm_size_t);
+
+kern_return_t ubc_cs_validation_bitmap_allocate( vnode_t );
+void ubc_cs_validation_bitmap_deallocate( vnode_t );
__END_DECLS