X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..HEAD:/bsd/vfs/vfs_init.c diff --git a/bsd/vfs/vfs_init.c b/bsd/vfs/vfs_init.c index 78790e025..99f99e44e 100644 --- a/bsd/vfs/vfs_init.c +++ b/bsd/vfs/vfs_init.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2010 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */ @@ -65,6 +65,12 @@ * * @(#)vfs_init.c 8.5 (Berkeley) 5/11/95 */ +/* + * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce + * support for mandatory and extensible security protections. This notice + * is included in support of clause 2.2 (b) of the Apple Public License, + * Version 2.0. + */ #include @@ -76,8 +82,16 @@ #include #include #include -#include +#include +#include +#if CONFIG_MACF +#include +#include +#endif +#if QUOTA +#include +#endif /* * Sigh, such primitive tools are these... @@ -88,11 +102,14 @@ #define DODEBUG(A) #endif -extern uid_t console_user; -extern struct vnodeopv_desc *vfs_opv_descs[]; - /* a list of lists of vnodeops defns */ +ZONE_DECLARE(mount_zone, "mount", sizeof(struct mount), ZC_ZFREE_CLEARMEM); + +__private_extern__ void vntblinit(void); + +extern const struct vnodeopv_desc *vfs_opv_descs[]; +/* a list of lists of vnodeops defns */ extern struct vnodeop_desc *vfs_op_descs[]; - /* and the operations they perform */ +/* and the operations they perform */ /* * This code doesn't work if the defn is **vnodop_defns with cc. * The problem is because of the compiler sometimes putting in an @@ -101,17 +118,16 @@ extern struct vnodeop_desc *vfs_op_descs[]; */ int vfs_opv_numops; -typedef (*PFI)(); /* the standard Pointer to a Function returning an Int */ +typedef int (*PFIvp)(void *); /* * A miscellaneous routine. * A generic "default" routine that just returns an error. */ int -vn_default_error() +vn_default_error(void) { - - return (ENOTSUP); + return ENOTSUP; } /* @@ -131,33 +147,39 @@ vn_default_error() * that is a(whole)nother story.) This is a feature. */ void -vfs_opv_init() +vfs_opv_init(void) { int i, j, k; - int (***opv_desc_vector_p)(void *); - int (**opv_desc_vector)(void *); - struct vnodeopv_entry_desc *opve_descp; + int(***opv_desc_vector_p)(void *); + int(**opv_desc_vector)(void *); + const struct vnodeopv_entry_desc *opve_descp; /* * Allocate the dynamic vectors and fill them in. */ - for (i=0; vfs_opv_descs[i]; i++) { + for (i = 0; vfs_opv_descs[i]; i++) { opv_desc_vector_p = vfs_opv_descs[i]->opv_desc_vector_p; /* * Allocate and init the vector, if it needs it. * Also handle backwards compatibility. */ if (*opv_desc_vector_p == NULL) { - MALLOC(*opv_desc_vector_p, PFI*, - vfs_opv_numops*sizeof(PFI), M_TEMP, M_WAITOK); - bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFI)); + *opv_desc_vector_p = kheap_alloc(KHEAP_DEFAULT, + vfs_opv_numops * sizeof(PFIvp), Z_WAITOK | Z_ZERO); DODEBUG(printf("vector at %x allocated\n", opv_desc_vector_p)); } opv_desc_vector = *opv_desc_vector_p; - for (j=0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) { + for (j = 0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) { opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]); + /* Silently skip known-disabled operations */ + if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) { + printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n", + vfs_opv_descs[i], opve_descp->opve_op->vdesc_name); + continue; + } + /* * Sanity check: is this operation listed * in the list of operations? We check this @@ -176,18 +198,18 @@ vfs_opv_init() * list of supported operations. */ if (opve_descp->opve_op->vdesc_offset == 0 && - opve_descp->opve_op->vdesc_offset != - VOFFSET(vnop_default)) { + opve_descp->opve_op != + VDESC(vnop_default)) { printf("operation %s not listed in %s.\n", opve_descp->opve_op->vdesc_name, "vfs_op_descs"); - panic ("vfs_opv_init: bad operation"); + panic("vfs_opv_init: bad operation"); } /* * Fill in this entry. */ opv_desc_vector[opve_descp->opve_op->vdesc_offset] = - opve_descp->opve_impl; + opve_descp->opve_impl; } } /* @@ -200,13 +222,15 @@ vfs_opv_init() /* * Force every operations vector to have a default routine. */ - if (opv_desc_vector[VOFFSET(vnop_default)]==NULL) { + if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) { panic("vfs_opv_init: operation vector without default routine."); } - for (k = 0; kopv_desc_vector_p) = NULL; + } /* * Figure out how many ops there are by counting the table, * and assign each its offset. */ for (vfs_opv_numops = 0, i = 0; vfs_op_descs[i]; i++) { + /* Silently skip known-disabled operations */ + if (vfs_op_descs[i]->vdesc_flags & VDESC_DISABLED) { + continue; + } vfs_op_descs[i]->vdesc_offset = vfs_opv_numops; vfs_opv_numops++; } - DODEBUG(printf ("vfs_opv_numops=%d\n", vfs_opv_numops)); + DODEBUG(printf("vfs_opv_numops=%d\n", vfs_opv_numops)); } /* @@ -241,120 +270,36 @@ vfs_op_init() extern struct vnodeops dead_vnodeops; extern struct vnodeops spec_vnodeops; -/* vars for vnode lock */ -lck_grp_t * vnode_lck_grp; -lck_grp_attr_t * vnode_lck_grp_attr; -lck_attr_t * vnode_lck_attr; - - /* vars for vnode list lock */ -lck_grp_t * vnode_list_lck_grp; -lck_grp_attr_t * vnode_list_lck_grp_attr; -lck_attr_t * vnode_list_lck_attr; -lck_mtx_t * vnode_list_mtx_lock; -lck_mtx_t * spechash_mtx_lock; -/* Routine to lock and unlock the vnode lists */ -void vnode_list_lock(void); -void vnode_list_unlock(void); - -/* vars for vfsconf lock */ -lck_grp_t * fsconf_lck_grp; -lck_grp_attr_t * fsconf_lck_grp_attr; -lck_attr_t * fsconf_lck_attr; - +static LCK_GRP_DECLARE(vnode_list_lck_grp, "vnode list"); +static LCK_ATTR_DECLARE(vnode_list_lck_attr, 0, 0); +static LCK_SPIN_DECLARE_ATTR(vnode_list_spin_lock, + &vnode_list_lck_grp, &vnode_list_lck_attr); +static LCK_MTX_DECLARE_ATTR(spechash_mtx_lock, + &vnode_list_lck_grp, &vnode_list_lck_attr); +LCK_MTX_DECLARE_ATTR(pkg_extensions_lck, + &vnode_list_lck_grp, &vnode_list_lck_attr); /* vars for mount lock */ -lck_grp_t * mnt_lck_grp; -lck_grp_attr_t * mnt_lck_grp_attr; -lck_attr_t * mnt_lck_attr; +static LCK_GRP_DECLARE(mnt_lck_grp, "mount"); +static LCK_ATTR_DECLARE(mnt_lck_attr, 0, 0); /* vars for mount list lock */ -lck_grp_t * mnt_list_lck_grp; -lck_grp_attr_t * mnt_list_lck_grp_attr; -lck_attr_t * mnt_list_lck_attr; -lck_mtx_t * mnt_list_mtx_lock; - -extern void journal_init(); +static LCK_GRP_DECLARE(mnt_list_lck_grp, "mount list"); +LCK_MTX_DECLARE(mnt_list_mtx_lock, &mnt_list_lck_grp); struct mount * dead_mountp; + /* * Initialize the vnode structures and initialize each file system type. */ void -vfsinit() +vfsinit(void) { struct vfstable *vfsp; int i, maxtypenum; struct mount * mp; - - /* Allocate vnode list lock group attribute and group */ - vnode_list_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(vnode_list_lck_grp_attr); - - vnode_list_lck_grp = lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr); - - /* Allocate vnode list lock attribute */ - vnode_list_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(vnode_list_lck_attr); - - /* Allocate vnode list lock */ - vnode_list_mtx_lock = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr); - - /* Allocate spec hash list lock */ - spechash_mtx_lock = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr); - - /* allocate vnode lock group attribute and group */ - vnode_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(vnode_lck_grp_attr); - - vnode_lck_grp = lck_grp_alloc_init("vnode", vnode_lck_grp_attr); - - /* Allocate vnode lock attribute */ - vnode_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(vnode_lck_attr); - - /* Allocate fs config lock group attribute and group */ - fsconf_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(fsconf_lck_grp_attr); - - fsconf_lck_grp = lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr); - - /* Allocate fs config lock attribute */ - fsconf_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(fsconf_lck_attr); - - - /* Allocate mount point related lock structures */ - - /* Allocate mount list lock group attribute and group */ - mnt_list_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(mnt_list_lck_grp_attr); - - mnt_list_lck_grp = lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr); - - /* Allocate mount list lock attribute */ - mnt_list_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(mnt_list_lck_attr); - - /* Allocate mount list lock */ - mnt_list_mtx_lock = lck_mtx_alloc_init(mnt_list_lck_grp, mnt_list_lck_attr); - - - /* allocate mount lock group attribute and group */ - mnt_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(mnt_lck_grp_attr); - - mnt_lck_grp = lck_grp_alloc_init("mount", mnt_lck_grp_attr); - - /* Allocate mount lock attribute */ - mnt_lck_attr = lck_attr_alloc_init(); - //lck_attr_setdebug(mnt_lck_attr); - /* - * Initialize the "console user" for access purposes: - */ - console_user = (uid_t)0; - /* * Initialize the vnode table */ @@ -367,10 +312,7 @@ vfsinit() * Initialize the vnode name cache */ nchinit(); - /* - * Initialize the journaling locks - */ - journal_init(); + /* * Build vnode operation vectors. */ @@ -380,87 +322,131 @@ vfsinit() * Initialize each file system type in the static list, * until the first NULL ->vfs_vfsops is encountered. */ - numused_vfsslots = maxtypenum = 0; - for (vfsp = vfsconf, i = 0; i < maxvfsconf; i++, vfsp++) { - if (vfsp->vfc_vfsops == (struct vfsops *)0) + maxtypenum = VT_NON; + for (vfsp = vfsconf, i = 0; i < maxvfsslots; i++, vfsp++) { + struct vfsconf vfsc; + if (vfsp->vfc_vfsops == (struct vfsops *)0) { break; - if (i) vfsconf[i-1].vfc_next = vfsp; - if (maxtypenum <= vfsp->vfc_typenum) + } + if (i) { + vfsconf[i - 1].vfc_next = vfsp; + } + if (maxtypenum <= vfsp->vfc_typenum) { maxtypenum = vfsp->vfc_typenum + 1; - (*vfsp->vfc_vfsops->vfs_init)(vfsp); - - lck_mtx_init(&vfsp->vfc_lock, fsconf_lck_grp, fsconf_lck_attr); - + } + + bzero(&vfsc, sizeof(struct vfsconf)); + vfsc.vfc_reserved1 = 0; + bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name)); + vfsc.vfc_typenum = vfsp->vfc_typenum; + vfsc.vfc_refcount = vfsp->vfc_refcount; + vfsc.vfc_flags = vfsp->vfc_flags; + vfsc.vfc_reserved2 = 0; + vfsc.vfc_reserved3 = 0; + + if (vfsp->vfc_vfsops->vfs_sysctl) { + struct sysctl_oid *oidp = NULL; + struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); + + oidp = kheap_alloc(KHEAP_DEFAULT, sizeof(struct sysctl_oid), Z_WAITOK); + *oidp = oid; + + /* Memory for VFS oid held by vfsentry forever */ + vfsp->vfc_sysctl = oidp; + oidp->oid_name = vfsp->vfc_name; + sysctl_register_oid(vfsp->vfc_sysctl); + } + + (*vfsp->vfc_vfsops->vfs_init)(&vfsc); + numused_vfsslots++; + numregistered_fses++; } /* next vfc_typenum to be used */ - maxvfsconf = maxtypenum; + maxvfstypenum = maxtypenum; /* * Initialize the vnop authorization scope. */ vnode_authorize_init(); - - /* + + /* * create a mount point for dead vnodes */ - MALLOC_ZONE(mp, struct mount *, (u_long)sizeof(struct mount), - M_MOUNT, M_WAITOK); - bzero((char *)mp, (u_long)sizeof(struct mount)); + mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO); /* Initialize the default IO constraints */ mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS; mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32; mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt; mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt; mp->mnt_devblocksize = DEV_BSIZE; - + mp->mnt_alignmentmask = PAGE_MASK; + mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH; + mp->mnt_ioscale = 1; + mp->mnt_ioflags = 0; + mp->mnt_realrootvp = NULLVP; + mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL; + TAILQ_INIT(&mp->mnt_vnodelist); TAILQ_INIT(&mp->mnt_workerqueue); TAILQ_INIT(&mp->mnt_newvnodes); mp->mnt_flag = MNT_LOCAL; mp->mnt_lflag = MNT_LDEAD; mount_lock_init(mp); + +#if CONFIG_MACF + mac_mount_label_init(mp); + mac_mount_label_associate(vfs_context_kernel(), mp); +#endif dead_mountp = mp; + +#if FS_COMPRESSION + decmpfs_init(); +#endif + + nspace_resolver_init(); } void -vnode_list_lock() +vnode_list_lock(void) { - lck_mtx_lock(vnode_list_mtx_lock); + lck_spin_lock_grp(&vnode_list_spin_lock, &vnode_list_lck_grp); } void -vnode_list_unlock() +vnode_list_unlock(void) { - lck_mtx_unlock(vnode_list_mtx_lock); + lck_spin_unlock(&vnode_list_spin_lock); } void -mount_list_lock() +mount_list_lock(void) { - lck_mtx_lock(mnt_list_mtx_lock); + lck_mtx_lock(&mnt_list_mtx_lock); } void -mount_list_unlock() +mount_list_unlock(void) { - lck_mtx_unlock(mnt_list_mtx_lock); + lck_mtx_unlock(&mnt_list_mtx_lock); } void mount_lock_init(mount_t mp) { - lck_mtx_init(&mp->mnt_mlock, mnt_lck_grp, mnt_lck_attr); - lck_mtx_init(&mp->mnt_renamelock, mnt_lck_grp, mnt_lck_attr); - lck_rw_init(&mp->mnt_rwlock, mnt_lck_grp, mnt_lck_attr); + lck_mtx_init(&mp->mnt_mlock, &mnt_lck_grp, &mnt_lck_attr); + lck_mtx_init(&mp->mnt_iter_lock, &mnt_lck_grp, &mnt_lck_attr); + lck_mtx_init(&mp->mnt_renamelock, &mnt_lck_grp, &mnt_lck_attr); + lck_rw_init(&mp->mnt_rwlock, &mnt_lck_grp, &mnt_lck_attr); } void mount_lock_destroy(mount_t mp) { - lck_mtx_destroy(&mp->mnt_mlock, mnt_lck_grp); - lck_mtx_destroy(&mp->mnt_renamelock, mnt_lck_grp); - lck_rw_destroy(&mp->mnt_rwlock, mnt_lck_grp); + lck_mtx_destroy(&mp->mnt_mlock, &mnt_lck_grp); + lck_mtx_destroy(&mp->mnt_iter_lock, &mnt_lck_grp); + lck_mtx_destroy(&mp->mnt_renamelock, &mnt_lck_grp); + lck_rw_destroy(&mp->mnt_rwlock, &mnt_lck_grp); } @@ -487,21 +473,39 @@ struct vfstable * vfstable_add(struct vfstable *nvfsp) { int slot; - struct vfstable *slotp; + struct vfstable *slotp, *allocated = NULL; + struct sysctl_oid *oidp = NULL; + + + if (nvfsp->vfc_vfsops->vfs_sysctl) { + struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", ""); + + oidp = kheap_alloc(KHEAP_DEFAULT, sizeof(struct sysctl_oid), Z_WAITOK); + *oidp = oid; + } /* * Find the next empty slot; we recognize an empty slot by a * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must * ensure we set the entry back to NULL. */ +findslot: + mount_list_lock(); for (slot = 0; slot < maxvfsslots; slot++) { - if (vfsconf[slot].vfc_vfsops == NULL) + if (vfsconf[slot].vfc_vfsops == NULL) { break; + } } if (slot == maxvfsslots) { - /* out of static slots; allocate one instead */ - MALLOC(slotp, struct vfstable *, sizeof(struct vfstable), - M_TEMP, M_WAITOK); + if (allocated == NULL) { + mount_list_unlock(); + /* out of static slots; allocate one instead */ + allocated = kheap_alloc(KHEAP_DEFAULT, sizeof(struct vfstable), + Z_WAITOK); + goto findslot; + } else { + slotp = allocated; + } } else { slotp = &vfsconf[slot]; } @@ -514,16 +518,34 @@ vfstable_add(struct vfstable *nvfsp) * with the value of 'maxvfslots' in the allocation case. */ bcopy(nvfsp, slotp, sizeof(struct vfstable)); - lck_mtx_init(&slotp->vfc_lock, fsconf_lck_grp, fsconf_lck_attr); if (slot != 0) { slotp->vfc_next = vfsconf[slot - 1].vfc_next; vfsconf[slot - 1].vfc_next = slotp; } else { slotp->vfc_next = NULL; } - numused_vfsslots++; - return(slotp); + if (slotp != allocated) { + /* used a statically allocated slot */ + numused_vfsslots++; + } + numregistered_fses++; + + if (oidp) { + /* Memory freed in vfstable_del after unregistration */ + slotp->vfc_sysctl = oidp; + oidp->oid_name = slotp->vfc_name; + sysctl_register_oid(slotp->vfc_sysctl); + } + + mount_list_unlock(); + + if (allocated && allocated != slotp) { + /* did allocation, but ended up using static slot */ + kheap_free(KHEAP_DEFAULT, allocated, sizeof(struct vfstable)); + } + + return slotp; } /* @@ -545,56 +567,72 @@ vfstable_del(struct vfstable * vtbl) struct vfstable **vcpp; struct vfstable *vcdelp; +#if DEBUG + lck_mtx_assert(&mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED); +#endif /* DEBUG */ + /* * Traverse the list looking for vtbl; if found, *vcpp * will contain the address of the pointer to the entry to * be removed. */ - for( vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) { - if (*vcpp == vtbl) - break; - } + for (vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) { + if (*vcpp == vtbl) { + break; + } + } - if (*vcpp == NULL) - return(ESRCH); /* vtbl not on vfsconf list */ + if (*vcpp == NULL) { + return ESRCH; /* vtbl not on vfsconf list */ + } + if ((*vcpp)->vfc_sysctl) { + sysctl_unregister_oid((*vcpp)->vfc_sysctl); + (*vcpp)->vfc_sysctl->oid_name = NULL; + kheap_free(KHEAP_DEFAULT, (*vcpp)->vfc_sysctl, sizeof(struct sysctl_oid)); + } /* Unlink entry */ vcdelp = *vcpp; *vcpp = (*vcpp)->vfc_next; - lck_mtx_destroy(&vcdelp->vfc_lock, fsconf_lck_grp); - /* * Is this an entry from our static table? We find out by * seeing if the pointer to the object to be deleted places * the object in the address space containing the table (or not). */ - if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */ + if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */ /* Mark as empty for vfscon_add() */ bzero(vcdelp, sizeof(struct vfstable)); + numregistered_fses--; numused_vfsslots--; - } else { /* N */ + } else { /* N */ /* * This entry was dynamically allocated; we must free it; * we would prefer to have just linked the caller's * vfsconf onto our list, but it may not be persistent * because of the previous (copying) implementation. */ - FREE(vcdelp, M_TEMP); + numregistered_fses--; + mount_list_unlock(); + kheap_free(KHEAP_DEFAULT, vcdelp, sizeof(struct vfstable)); + mount_list_lock(); } - return(0); +#if DEBUG + lck_mtx_assert(&mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED); +#endif /* DEBUG */ + + return 0; } void SPECHASH_LOCK(void) { - lck_mtx_lock(spechash_mtx_lock); + lck_mtx_lock(&spechash_mtx_lock); } void SPECHASH_UNLOCK(void) { - lck_mtx_unlock(spechash_mtx_lock); + lck_mtx_unlock(&spechash_mtx_lock); } -