X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..e8c3f78193f1895ea514044358b93b1add9322f3:/bsd/vfs/vfs_init.c diff --git a/bsd/vfs/vfs_init.c b/bsd/vfs/vfs_init.c index fe9c904c5..bd44f3631 100644 --- a/bsd/vfs/vfs_init.c +++ b/bsd/vfs/vfs_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -83,8 +83,8 @@ #include #include #include +#include -#include /* journal_init() */ #if CONFIG_MACF #include #include @@ -102,7 +102,7 @@ #define DODEBUG(A) #endif -__private_extern__ void vntblinit(void) __attribute__((section("__TEXT, initcode"))); +__private_extern__ void vntblinit(void); extern struct vnodeopv_desc *vfs_opv_descs[]; /* a list of lists of vnodeops defns */ @@ -173,6 +173,13 @@ vfs_opv_init(void) for (j=0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) { opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]); + /* Silently skip known-disabled operations */ + if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) { + printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n", + vfs_opv_descs[i], opve_descp->opve_op->vdesc_name); + continue; + } + /* * Sanity check: is this operation listed * in the list of operations? We check this @@ -191,8 +198,8 @@ vfs_opv_init(void) * list of supported operations. */ if (opve_descp->opve_op->vdesc_offset == 0 && - opve_descp->opve_op->vdesc_offset != - VOFFSET(vnop_default)) { + opve_descp->opve_op != + VDESC(vnop_default)) { printf("operation %s not listed in %s.\n", opve_descp->opve_op->vdesc_name, "vfs_op_descs"); @@ -244,6 +251,10 @@ vfs_op_init(void) * and assign each its offset. */ for (vfs_opv_numops = 0, i = 0; vfs_op_descs[i]; i++) { + /* Silently skip known-disabled operations */ + if (vfs_op_descs[i]->vdesc_flags & VDESC_DISABLED) { + continue; + } vfs_op_descs[i]->vdesc_offset = vfs_opv_numops; vfs_opv_numops++; } @@ -261,6 +272,16 @@ lck_grp_t * vnode_lck_grp; lck_grp_attr_t * vnode_lck_grp_attr; lck_attr_t * vnode_lck_attr; +#if CONFIG_TRIGGERS +/* vars for vnode trigger resolver */ +lck_grp_t * trigger_vnode_lck_grp; +lck_grp_attr_t * trigger_vnode_lck_grp_attr; +lck_attr_t * trigger_vnode_lck_attr; +#endif + +lck_grp_t * fd_vn_lck_grp; +lck_grp_attr_t * fd_vn_lck_grp_attr; +lck_attr_t * fd_vn_lck_attr; /* vars for vnode list lock */ lck_grp_t * vnode_list_lck_grp; @@ -286,7 +307,18 @@ lck_grp_attr_t * mnt_list_lck_grp_attr; lck_attr_t * mnt_list_lck_attr; lck_mtx_t * mnt_list_mtx_lock; +/* vars for sync mutex */ +lck_grp_t * sync_mtx_lck_grp; +lck_grp_attr_t * sync_mtx_lck_grp_attr; +lck_attr_t * sync_mtx_lck_attr; +lck_mtx_t * sync_mtx_lck; + +lck_mtx_t *pkg_extensions_lck; + struct mount * dead_mountp; + +extern void nspace_handler_init(void); + /* * Initialize the vnode structures and initialize each file system type. */ @@ -311,6 +343,9 @@ vfsinit(void) /* Allocate spec hash list lock */ spechash_mtx_lock = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr); + /* Allocate the package extensions table lock */ + pkg_extensions_lck = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr); + /* allocate vnode lock group attribute and group */ vnode_lck_grp_attr= lck_grp_attr_alloc_init(); @@ -319,6 +354,16 @@ vfsinit(void) /* Allocate vnode lock attribute */ vnode_lck_attr = lck_attr_alloc_init(); +#if CONFIG_TRIGGERS + trigger_vnode_lck_grp_attr = lck_grp_attr_alloc_init(); + trigger_vnode_lck_grp = lck_grp_alloc_init("trigger_vnode", trigger_vnode_lck_grp_attr); + trigger_vnode_lck_attr = lck_attr_alloc_init(); +#endif + /* Allocate per fd vnode data lock attribute and group */ + fd_vn_lck_grp_attr = lck_grp_attr_alloc_init(); + fd_vn_lck_grp = lck_grp_alloc_init("fd_vnode_data", fd_vn_lck_grp_attr); + fd_vn_lck_attr = lck_attr_alloc_init(); + /* Allocate fs config lock group attribute and group */ fsconf_lck_grp_attr= lck_grp_attr_alloc_init(); @@ -349,6 +394,12 @@ vfsinit(void) /* Allocate mount lock attribute */ mnt_lck_attr = lck_attr_alloc_init(); + /* Allocate sync lock */ + sync_mtx_lck_grp_attr = lck_grp_attr_alloc_init(); + sync_mtx_lck_grp = lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr); + sync_mtx_lck_attr = lck_attr_alloc_init(); + sync_mtx_lck = lck_mtx_alloc_init(sync_mtx_lck_grp, sync_mtx_lck_attr); + /* * Initialize the vnode table */ @@ -362,12 +413,7 @@ vfsinit(void) */ nchinit(); -#if JOURNALING - /* - * Initialize the journaling locks - */ - journal_init(); -#endif + nspace_handler_init(); /* * Build vnode operation vectors. @@ -378,22 +424,44 @@ vfsinit(void) * Initialize each file system type in the static list, * until the first NULL ->vfs_vfsops is encountered. */ - numused_vfsslots = maxtypenum = 0; + maxtypenum = VT_NON; for (vfsp = vfsconf, i = 0; i < maxvfsslots; i++, vfsp++) { + struct vfsconf vfsc; if (vfsp->vfc_vfsops == (struct vfsops *)0) break; if (i) vfsconf[i-1].vfc_next = vfsp; if (maxtypenum <= vfsp->vfc_typenum) maxtypenum = vfsp->vfc_typenum + 1; - /* a vfsconf is a prefix subset of a vfstable... */ - (*vfsp->vfc_vfsops->vfs_init)((struct vfsconf *)vfsp); - lck_mtx_init(&vfsp->vfc_lock, fsconf_lck_grp, fsconf_lck_attr); + bzero(&vfsc, sizeof(struct vfsconf)); + vfsc.vfc_reserved1 = 0; + bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); + vfsc.vfc_typenum = vfsp->vfc_typenum; + vfsc.vfc_refcount = vfsp->vfc_refcount; + vfsc.vfc_flags = vfsp->vfc_flags; + vfsc.vfc_reserved2 = 0; + vfsc.vfc_reserved3 = 0; + + if (vfsp->vfc_vfsops->vfs_sysctl) { + struct sysctl_oid *oidp = NULL; + struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); + + MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK); + *oidp = oid; + + /* Memory for VFS oid held by vfsentry forever */ + vfsp->vfc_sysctl = oidp; + oidp->oid_name = vfsp->vfc_name; + sysctl_register_oid(vfsp->vfc_sysctl); + } + (*vfsp->vfc_vfsops->vfs_init)(&vfsc); + numused_vfsslots++; + numregistered_fses++; } /* next vfc_typenum to be used */ - maxvfsconf = maxtypenum; + maxvfstypenum = maxtypenum; /* * Initialize the vnop authorization scope. @@ -410,9 +478,9 @@ vfsinit(void) /* * create a mount point for dead vnodes */ - MALLOC_ZONE(mp, struct mount *, (u_long)sizeof(struct mount), + MALLOC_ZONE(mp, struct mount *, sizeof(struct mount), M_MOUNT, M_WAITOK); - bzero((char *)mp, (u_long)sizeof(struct mount)); + bzero((char *)mp, sizeof(struct mount)); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; @@ -420,6 +488,8 @@ vfsinit(void) mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt; mp->mnt_devblocksize = DEV_BSIZE; mp->mnt_alignmentmask = PAGE_MASK; + mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH; + mp->mnt_ioscale = 1; mp->mnt_ioflags = 0; mp->mnt_realrootvp = NULLVP; mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; @@ -436,6 +506,10 @@ vfsinit(void) mac_mount_label_associate(vfs_context_kernel(), mp); #endif dead_mountp = mp; + +#if FS_COMPRESSION + decmpfs_init(); +#endif } void @@ -466,6 +540,7 @@ void mount_lock_init(mount_t mp) { lck_mtx_init(&mp->mnt_mlock, mnt_lck_grp, mnt_lck_attr); + lck_mtx_init(&mp->mnt_iter_lock, mnt_lck_grp, mnt_lck_attr); lck_mtx_init(&mp->mnt_renamelock, mnt_lck_grp, mnt_lck_attr); lck_rw_init(&mp->mnt_rwlock, mnt_lck_grp, mnt_lck_attr); } @@ -474,6 +549,7 @@ void mount_lock_destroy(mount_t mp) { lck_mtx_destroy(&mp->mnt_mlock, mnt_lck_grp); + lck_mtx_destroy(&mp->mnt_iter_lock, mnt_lck_grp); lck_mtx_destroy(&mp->mnt_renamelock, mnt_lck_grp); lck_rw_destroy(&mp->mnt_rwlock, mnt_lck_grp); } @@ -502,21 +578,38 @@ struct vfstable * vfstable_add(struct vfstable *nvfsp) { int slot; - struct vfstable *slotp; + struct vfstable *slotp, *allocated = NULL; + struct sysctl_oid *oidp = NULL; + + if (nvfsp->vfc_vfsops->vfs_sysctl) { + struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); + + MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK); + *oidp = oid; + } + /* * Find the next empty slot; we recognize an empty slot by a * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must * ensure we set the entry back to NULL. */ +findslot: + mount_list_lock(); for (slot = 0; slot < maxvfsslots; slot++) { if (vfsconf[slot].vfc_vfsops == NULL) break; } if (slot == maxvfsslots) { - /* out of static slots; allocate one instead */ - MALLOC(slotp, struct vfstable *, sizeof(struct vfstable), - M_TEMP, M_WAITOK); + if (allocated == NULL) { + mount_list_unlock(); + /* out of static slots; allocate one instead */ + MALLOC(allocated, struct vfstable *, sizeof(struct vfstable), + M_TEMP, M_WAITOK); + goto findslot; + } else { + slotp = allocated; + } } else { slotp = &vfsconf[slot]; } @@ -529,14 +622,32 @@ vfstable_add(struct vfstable *nvfsp) * with the value of 'maxvfslots' in the allocation case. */ bcopy(nvfsp, slotp, sizeof(struct vfstable)); - lck_mtx_init(&slotp->vfc_lock, fsconf_lck_grp, fsconf_lck_attr); if (slot != 0) { slotp->vfc_next = vfsconf[slot - 1].vfc_next; vfsconf[slot - 1].vfc_next = slotp; } else { slotp->vfc_next = NULL; } - numused_vfsslots++; + + if (slotp != allocated) { + /* used a statically allocated slot */ + numused_vfsslots++; + } + numregistered_fses++; + + if (oidp) { + /* Memory freed in vfstable_del after unregistration */ + slotp->vfc_sysctl = oidp; + oidp->oid_name = slotp->vfc_name; + sysctl_register_oid(slotp->vfc_sysctl); + } + + mount_list_unlock(); + + if (allocated && allocated != slotp) { + /* did allocation, but ended up using static slot */ + FREE(allocated, M_TEMP); + } return(slotp); } @@ -560,6 +671,10 @@ vfstable_del(struct vfstable * vtbl) struct vfstable **vcpp; struct vfstable *vcdelp; +#if DEBUG + lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED); +#endif /* DEBUG */ + /* * Traverse the list looking for vtbl; if found, *vcpp * will contain the address of the pointer to the entry to @@ -567,18 +682,23 @@ vfstable_del(struct vfstable * vtbl) */ for( vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) { if (*vcpp == vtbl) - break; - } + break; + } if (*vcpp == NULL) return(ESRCH); /* vtbl not on vfsconf list */ + if ((*vcpp)->vfc_sysctl) { + sysctl_unregister_oid((*vcpp)->vfc_sysctl); + (*vcpp)->vfc_sysctl->oid_name = NULL; + FREE((*vcpp)->vfc_sysctl, M_TEMP); + (*vcpp)->vfc_sysctl = NULL; + } + /* Unlink entry */ vcdelp = *vcpp; *vcpp = (*vcpp)->vfc_next; - lck_mtx_destroy(&vcdelp->vfc_lock, fsconf_lck_grp); - /* * Is this an entry from our static table? We find out by * seeing if the pointer to the object to be deleted places @@ -587,6 +707,7 @@ vfstable_del(struct vfstable * vtbl) if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */ /* Mark as empty for vfscon_add() */ bzero(vcdelp, sizeof(struct vfstable)); + numregistered_fses--; numused_vfsslots--; } else { /* N */ /* @@ -595,9 +716,16 @@ vfstable_del(struct vfstable * vtbl) * vfsconf onto our list, but it may not be persistent * because of the previous (copying) implementation. */ - FREE(vcdelp, M_TEMP); + numregistered_fses--; + mount_list_unlock(); + FREE(vcdelp, M_TEMP); + mount_list_lock(); } +#if DEBUG + lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED); +#endif /* DEBUG */ + return(0); }