#include <sys/kernel_types.h>
#include <kern/locks.h>
#include <mach/memory_object_types.h>
+#include <sys/ucred.h>
+#ifdef KERNEL_PRIVATE
+#include <sys/imgact.h>
+#endif // KERNEL_PRIVATE
-/* defns for ubc_sync_range() and ubc_msync */
+/* defns for ubc_msync() and ubc_msync */
#define UBC_PUSHDIRTY 0x01 /* clean any dirty pages in the specified range to the backing store */
#define UBC_PUSHALL 0x02 /* push both dirty and precious pages to the backing store */
off_t ubc_getsize(struct vnode *);
int ubc_setsize(struct vnode *, off_t);
+#ifdef KERNEL_PRIVATE
+
+enum {
+ UBC_SETSIZE_NO_FS_REENTRY = 1
+};
+typedef uint32_t ubc_setsize_opts_t;
+
+errno_t ubc_setsize_ex(vnode_t vp, off_t nsize, ubc_setsize_opts_t opts);
+
+#endif // KERNEL_PRIVATE
+
kauth_cred_t ubc_getcred(struct vnode *);
struct thread;
int ubc_setthreadcred(struct vnode *, struct proc *, struct thread *);
-int ubc_sync_range(vnode_t, off_t, off_t, int);
errno_t ubc_msync(vnode_t, off_t, off_t, off_t *, int);
int ubc_pages_resident(vnode_t);
int ubc_page_op(vnode_t, off_t, int, ppnum_t *, int *);
/* code signing */
struct cs_blob;
struct cs_blob *ubc_cs_blob_get(vnode_t, cpu_type_t, off_t);
+
+/* apis to handle generation count for cs blob */
+void cs_blob_reset_cache(void);
+int ubc_cs_blob_revalidate(vnode_t, struct cs_blob *, struct image_params *, int);
+int ubc_cs_generation_check(vnode_t);
+
+int cs_entitlements_blob_get(proc_t, void **, size_t *);
+int cs_blob_get(proc_t, void **, size_t *);
+const char *cs_identity_get(proc_t);
+
#endif
/* cluster IO routines */
int cluster_push(vnode_t, int);
int cluster_push_ext(vnode_t, int, int (*)(buf_t, void *), void *);
+int cluster_push_err(vnode_t, int, int (*)(buf_t, void *), void *, int *);
int cluster_bp(buf_t);
int cluster_bp_ext(buf_t, int (*)(buf_t, void *), void *);
int cluster_copy_upl_data(uio_t, upl_t, int, int *);
int cluster_copy_ubc_data(vnode_t, uio_t, int *, int);
+typedef struct cl_direct_read_lock cl_direct_read_lock_t;
+cl_direct_read_lock_t *cluster_lock_direct_read(vnode_t vp, lck_rw_type_t exclusive);
+void cluster_unlock_direct_read(cl_direct_read_lock_t *lck);
/* UPL routines */
int ubc_create_upl(vnode_t, off_t, int, upl_t *, upl_page_info_t **, int);
int ubc_upl_commit_range(upl_t, upl_offset_t, upl_size_t, int);
int ubc_upl_abort(upl_t, int);
int ubc_upl_abort_range(upl_t, upl_offset_t, upl_size_t, int);
+void ubc_upl_range_needed(upl_t, int, int);
upl_page_info_t *ubc_upl_pageinfo(upl_t);
upl_size_t ubc_upl_maxbufsize(void);
int is_file_clean(vnode_t, off_t);
+errno_t mach_to_bsd_errno(kern_return_t mach_err);
+
+#ifdef KERNEL_PRIVATE
+
+__attribute__((pure)) boolean_t ubc_is_mapped(const struct vnode *, boolean_t *writable);
+__attribute__((pure)) boolean_t ubc_is_mapped_writable(const struct vnode *);
+
+uint32_t cluster_max_io_size(mount_t, int);
+
+#endif
+
__END_DECLS
#endif /* _SYS_UBC_H_ */