2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
33 * This code is derived from software contributed
34 * to Berkeley by John Heidemann of the UCLA Ficus project.
36 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)vfs_init.c 8.5 (Berkeley) 5/11/95
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
76 #include <sys/param.h>
77 #include <sys/mount_internal.h>
80 #include <sys/vnode_internal.h>
82 #include <sys/namei.h>
83 #include <sys/ucred.h>
84 #include <sys/errno.h>
85 #include <kern/kalloc.h>
86 #include <sys/decmpfs.h>
89 #include <security/mac_framework.h>
90 #include <sys/kauth.h>
93 #include <sys/quota.h>
97 * Sigh, such primitive tools are these...
105 ZONE_DECLARE(mount_zone
, "mount", sizeof(struct mount
), ZC_ZFREE_CLEARMEM
);
107 __private_extern__
void vntblinit(void);
109 extern const struct vnodeopv_desc
*vfs_opv_descs
[];
110 /* a list of lists of vnodeops defns */
111 extern struct vnodeop_desc
*vfs_op_descs
[];
112 /* and the operations they perform */
114 * This code doesn't work if the defn is **vnodop_defns with cc.
115 * The problem is because of the compiler sometimes putting in an
116 * extra level of indirection for arrays. It's an interesting
121 typedef int (*PFIvp
)(void *);
124 * A miscellaneous routine.
125 * A generic "default" routine that just returns an error.
128 vn_default_error(void)
136 * Allocate and fill in operations vectors.
138 * An undocumented feature of this approach to defining operations is that
139 * there can be multiple entries in vfs_opv_descs for the same operations
140 * vector. This allows third parties to extend the set of operations
141 * supported by another layer in a binary compatibile way. For example,
142 * assume that NFS needed to be modified to support Ficus. NFS has an entry
143 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
144 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
145 * listing those new operations Ficus adds to NFS, all without modifying the
146 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
147 * that is a(whole)nother story.) This is a feature.
153 int(***opv_desc_vector_p
)(void *);
154 int(**opv_desc_vector
)(void *);
155 const struct vnodeopv_entry_desc
*opve_descp
;
158 * Allocate the dynamic vectors and fill them in.
160 for (i
= 0; vfs_opv_descs
[i
]; i
++) {
161 opv_desc_vector_p
= vfs_opv_descs
[i
]->opv_desc_vector_p
;
163 * Allocate and init the vector, if it needs it.
164 * Also handle backwards compatibility.
166 if (*opv_desc_vector_p
== NULL
) {
167 *opv_desc_vector_p
= kheap_alloc(KHEAP_DEFAULT
,
168 vfs_opv_numops
* sizeof(PFIvp
), Z_WAITOK
| Z_ZERO
);
169 DODEBUG(printf("vector at %x allocated\n",
172 opv_desc_vector
= *opv_desc_vector_p
;
173 for (j
= 0; vfs_opv_descs
[i
]->opv_desc_ops
[j
].opve_op
; j
++) {
174 opve_descp
= &(vfs_opv_descs
[i
]->opv_desc_ops
[j
]);
176 /* Silently skip known-disabled operations */
177 if (opve_descp
->opve_op
->vdesc_flags
& VDESC_DISABLED
) {
178 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
179 vfs_opv_descs
[i
], opve_descp
->opve_op
->vdesc_name
);
184 * Sanity check: is this operation listed
185 * in the list of operations? We check this
186 * by seeing if its offest is zero. Since
187 * the default routine should always be listed
188 * first, it should be the only one with a zero
189 * offset. Any other operation with a zero
190 * offset is probably not listed in
191 * vfs_op_descs, and so is probably an error.
193 * A panic here means the layer programmer
194 * has committed the all-too common bug
195 * of adding a new operation to the layer's
196 * list of vnode operations but
197 * not adding the operation to the system-wide
198 * list of supported operations.
200 if (opve_descp
->opve_op
->vdesc_offset
== 0 &&
201 opve_descp
->opve_op
!=
202 VDESC(vnop_default
)) {
203 printf("operation %s not listed in %s.\n",
204 opve_descp
->opve_op
->vdesc_name
,
206 panic("vfs_opv_init: bad operation");
209 * Fill in this entry.
211 opv_desc_vector
[opve_descp
->opve_op
->vdesc_offset
] =
212 opve_descp
->opve_impl
;
216 * Finally, go back and replace unfilled routines
217 * with their default. (Sigh, an O(n^3) algorithm. I
218 * could make it better, but that'd be work, and n is small.)
220 for (i
= 0; vfs_opv_descs
[i
]; i
++) {
221 opv_desc_vector
= *(vfs_opv_descs
[i
]->opv_desc_vector_p
);
223 * Force every operations vector to have a default routine.
225 if (opv_desc_vector
[VOFFSET(vnop_default
)] == NULL
) {
226 panic("vfs_opv_init: operation vector without default routine.");
228 for (k
= 0; k
< vfs_opv_numops
; k
++) {
229 if (opv_desc_vector
[k
] == NULL
) {
231 opv_desc_vector
[VOFFSET(vnop_default
)];
238 * Initialize known vnode operations vectors.
245 DODEBUG(printf("Vnode_interface_init.\n"));
247 * Set all vnode vectors to a well known value.
249 for (i
= 0; vfs_opv_descs
[i
]; i
++) {
250 *(vfs_opv_descs
[i
]->opv_desc_vector_p
) = NULL
;
253 * Figure out how many ops there are by counting the table,
254 * and assign each its offset.
256 for (vfs_opv_numops
= 0, i
= 0; vfs_op_descs
[i
]; i
++) {
257 /* Silently skip known-disabled operations */
258 if (vfs_op_descs
[i
]->vdesc_flags
& VDESC_DISABLED
) {
261 vfs_op_descs
[i
]->vdesc_offset
= vfs_opv_numops
;
264 DODEBUG(printf("vfs_opv_numops=%d\n", vfs_opv_numops
));
268 * Routines having to do with the management of the vnode table.
270 extern struct vnodeops dead_vnodeops
;
271 extern struct vnodeops spec_vnodeops
;
273 /* vars for vnode lock */
274 lck_grp_t
* vnode_lck_grp
;
275 lck_grp_attr_t
* vnode_lck_grp_attr
;
276 lck_attr_t
* vnode_lck_attr
;
279 /* vars for vnode trigger resolver */
280 lck_grp_t
* trigger_vnode_lck_grp
;
281 lck_grp_attr_t
* trigger_vnode_lck_grp_attr
;
282 lck_attr_t
* trigger_vnode_lck_attr
;
285 lck_grp_t
* fd_vn_lck_grp
;
286 lck_grp_attr_t
* fd_vn_lck_grp_attr
;
287 lck_attr_t
* fd_vn_lck_attr
;
289 /* vars for vnode list lock */
290 lck_grp_t
* vnode_list_lck_grp
;
291 lck_grp_attr_t
* vnode_list_lck_grp_attr
;
292 lck_attr_t
* vnode_list_lck_attr
;
293 lck_spin_t
* vnode_list_spin_lock
;
294 lck_mtx_t
* spechash_mtx_lock
;
296 /* vars for vfsconf lock */
297 lck_grp_t
* fsconf_lck_grp
;
298 lck_grp_attr_t
* fsconf_lck_grp_attr
;
299 lck_attr_t
* fsconf_lck_attr
;
302 /* vars for mount lock */
303 lck_grp_t
* mnt_lck_grp
;
304 lck_grp_attr_t
* mnt_lck_grp_attr
;
305 lck_attr_t
* mnt_lck_attr
;
307 /* vars for mount list lock */
308 lck_grp_t
* mnt_list_lck_grp
;
309 lck_grp_attr_t
* mnt_list_lck_grp_attr
;
310 lck_attr_t
* mnt_list_lck_attr
;
311 lck_mtx_t
* mnt_list_mtx_lock
;
313 /* vars for sync mutex */
314 lck_grp_t
* sync_mtx_lck_grp
;
315 lck_grp_attr_t
* sync_mtx_lck_grp_attr
;
316 lck_attr_t
* sync_mtx_lck_attr
;
317 lck_mtx_t
* sync_mtx_lck
;
319 lck_mtx_t
*pkg_extensions_lck
;
321 struct mount
* dead_mountp
;
324 * Initialize the vnode structures and initialize each file system type.
329 struct vfstable
*vfsp
;
333 /* Allocate vnode list lock group attribute and group */
334 vnode_list_lck_grp_attr
= lck_grp_attr_alloc_init();
336 vnode_list_lck_grp
= lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr
);
338 /* Allocate vnode list lock attribute */
339 vnode_list_lck_attr
= lck_attr_alloc_init();
341 /* Allocate vnode list lock */
342 vnode_list_spin_lock
= lck_spin_alloc_init(vnode_list_lck_grp
, vnode_list_lck_attr
);
344 /* Allocate spec hash list lock */
345 spechash_mtx_lock
= lck_mtx_alloc_init(vnode_list_lck_grp
, vnode_list_lck_attr
);
347 /* Allocate the package extensions table lock */
348 pkg_extensions_lck
= lck_mtx_alloc_init(vnode_list_lck_grp
, vnode_list_lck_attr
);
350 /* allocate vnode lock group attribute and group */
351 vnode_lck_grp_attr
= lck_grp_attr_alloc_init();
353 vnode_lck_grp
= lck_grp_alloc_init("vnode", vnode_lck_grp_attr
);
355 /* Allocate vnode lock attribute */
356 vnode_lck_attr
= lck_attr_alloc_init();
359 trigger_vnode_lck_grp_attr
= lck_grp_attr_alloc_init();
360 trigger_vnode_lck_grp
= lck_grp_alloc_init("trigger_vnode", trigger_vnode_lck_grp_attr
);
361 trigger_vnode_lck_attr
= lck_attr_alloc_init();
363 /* Allocate per fd vnode data lock attribute and group */
364 fd_vn_lck_grp_attr
= lck_grp_attr_alloc_init();
365 fd_vn_lck_grp
= lck_grp_alloc_init("fd_vnode_data", fd_vn_lck_grp_attr
);
366 fd_vn_lck_attr
= lck_attr_alloc_init();
368 /* Allocate fs config lock group attribute and group */
369 fsconf_lck_grp_attr
= lck_grp_attr_alloc_init();
371 fsconf_lck_grp
= lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr
);
373 /* Allocate fs config lock attribute */
374 fsconf_lck_attr
= lck_attr_alloc_init();
376 /* Allocate mount point related lock structures */
378 /* Allocate mount list lock group attribute and group */
379 mnt_list_lck_grp_attr
= lck_grp_attr_alloc_init();
381 mnt_list_lck_grp
= lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr
);
383 /* Allocate mount list lock attribute */
384 mnt_list_lck_attr
= lck_attr_alloc_init();
386 /* Allocate mount list lock */
387 mnt_list_mtx_lock
= lck_mtx_alloc_init(mnt_list_lck_grp
, mnt_list_lck_attr
);
390 /* allocate mount lock group attribute and group */
391 mnt_lck_grp_attr
= lck_grp_attr_alloc_init();
393 mnt_lck_grp
= lck_grp_alloc_init("mount", mnt_lck_grp_attr
);
395 /* Allocate mount lock attribute */
396 mnt_lck_attr
= lck_attr_alloc_init();
398 /* Allocate sync lock */
399 sync_mtx_lck_grp_attr
= lck_grp_attr_alloc_init();
400 sync_mtx_lck_grp
= lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr
);
401 sync_mtx_lck_attr
= lck_attr_alloc_init();
402 sync_mtx_lck
= lck_mtx_alloc_init(sync_mtx_lck_grp
, sync_mtx_lck_attr
);
405 * Initialize the vnode table
409 * Initialize the filesystem event mechanism.
413 * Initialize the vnode name cache
418 * Build vnode operation vectors.
421 vfs_opv_init(); /* finish the job */
423 * Initialize each file system type in the static list,
424 * until the first NULL ->vfs_vfsops is encountered.
427 for (vfsp
= vfsconf
, i
= 0; i
< maxvfsslots
; i
++, vfsp
++) {
429 if (vfsp
->vfc_vfsops
== (struct vfsops
*)0) {
433 vfsconf
[i
- 1].vfc_next
= vfsp
;
435 if (maxtypenum
<= vfsp
->vfc_typenum
) {
436 maxtypenum
= vfsp
->vfc_typenum
+ 1;
439 bzero(&vfsc
, sizeof(struct vfsconf
));
440 vfsc
.vfc_reserved1
= 0;
441 bcopy(vfsp
->vfc_name
, vfsc
.vfc_name
, sizeof(vfsc
.vfc_name
));
442 vfsc
.vfc_typenum
= vfsp
->vfc_typenum
;
443 vfsc
.vfc_refcount
= vfsp
->vfc_refcount
;
444 vfsc
.vfc_flags
= vfsp
->vfc_flags
;
445 vfsc
.vfc_reserved2
= 0;
446 vfsc
.vfc_reserved3
= 0;
448 if (vfsp
->vfc_vfsops
->vfs_sysctl
) {
449 struct sysctl_oid
*oidp
= NULL
;
450 struct sysctl_oid oid
= SYSCTL_STRUCT_INIT(_vfs
, vfsp
->vfc_typenum
, , CTLTYPE_NODE
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
, NULL
, 0, vfs_sysctl_node
, "-", "");
452 oidp
= kheap_alloc(KHEAP_DEFAULT
, sizeof(struct sysctl_oid
), Z_WAITOK
);
455 /* Memory for VFS oid held by vfsentry forever */
456 vfsp
->vfc_sysctl
= oidp
;
457 oidp
->oid_name
= vfsp
->vfc_name
;
458 sysctl_register_oid(vfsp
->vfc_sysctl
);
461 (*vfsp
->vfc_vfsops
->vfs_init
)(&vfsc
);
464 numregistered_fses
++;
466 /* next vfc_typenum to be used */
467 maxvfstypenum
= maxtypenum
;
470 * Initialize the vnop authorization scope.
472 vnode_authorize_init();
475 * Initialiize the quota system.
482 * create a mount point for dead vnodes
484 mp
= zalloc_flags(mount_zone
, Z_WAITOK
| Z_ZERO
);
485 /* Initialize the default IO constraints */
486 mp
->mnt_maxreadcnt
= mp
->mnt_maxwritecnt
= MAXPHYS
;
487 mp
->mnt_segreadcnt
= mp
->mnt_segwritecnt
= 32;
488 mp
->mnt_maxsegreadsize
= mp
->mnt_maxreadcnt
;
489 mp
->mnt_maxsegwritesize
= mp
->mnt_maxwritecnt
;
490 mp
->mnt_devblocksize
= DEV_BSIZE
;
491 mp
->mnt_alignmentmask
= PAGE_MASK
;
492 mp
->mnt_ioqueue_depth
= MNT_DEFAULT_IOQUEUE_DEPTH
;
495 mp
->mnt_realrootvp
= NULLVP
;
496 mp
->mnt_authcache_ttl
= CACHED_LOOKUP_RIGHT_TTL
;
498 TAILQ_INIT(&mp
->mnt_vnodelist
);
499 TAILQ_INIT(&mp
->mnt_workerqueue
);
500 TAILQ_INIT(&mp
->mnt_newvnodes
);
501 mp
->mnt_flag
= MNT_LOCAL
;
502 mp
->mnt_lflag
= MNT_LDEAD
;
506 mac_mount_label_init(mp
);
507 mac_mount_label_associate(vfs_context_kernel(), mp
);
515 nspace_resolver_init();
519 vnode_list_lock(void)
521 lck_spin_lock_grp(vnode_list_spin_lock
, vnode_list_lck_grp
);
525 vnode_list_unlock(void)
527 lck_spin_unlock(vnode_list_spin_lock
);
531 mount_list_lock(void)
533 lck_mtx_lock(mnt_list_mtx_lock
);
537 mount_list_unlock(void)
539 lck_mtx_unlock(mnt_list_mtx_lock
);
543 mount_lock_init(mount_t mp
)
545 lck_mtx_init(&mp
->mnt_mlock
, mnt_lck_grp
, mnt_lck_attr
);
546 lck_mtx_init(&mp
->mnt_iter_lock
, mnt_lck_grp
, mnt_lck_attr
);
547 lck_mtx_init(&mp
->mnt_renamelock
, mnt_lck_grp
, mnt_lck_attr
);
548 lck_rw_init(&mp
->mnt_rwlock
, mnt_lck_grp
, mnt_lck_attr
);
552 mount_lock_destroy(mount_t mp
)
554 lck_mtx_destroy(&mp
->mnt_mlock
, mnt_lck_grp
);
555 lck_mtx_destroy(&mp
->mnt_iter_lock
, mnt_lck_grp
);
556 lck_mtx_destroy(&mp
->mnt_renamelock
, mnt_lck_grp
);
557 lck_rw_destroy(&mp
->mnt_rwlock
, mnt_lck_grp
);
564 * Description: Add a filesystem to the vfsconf list at the first
565 * unused slot. If no slots are available, return an
568 * Parameter: nvfsp vfsconf for VFS to add
573 * Notes: The vfsconf should be treated as a linked list by
574 * all external references, as the implementation is
575 * expected to change in the future. The linkage is
576 * through ->vfc_next, and the list is NULL terminated.
578 * Warning: This code assumes that vfsconf[0] is non-empty.
581 vfstable_add(struct vfstable
*nvfsp
)
584 struct vfstable
*slotp
, *allocated
= NULL
;
585 struct sysctl_oid
*oidp
= NULL
;
588 if (nvfsp
->vfc_vfsops
->vfs_sysctl
) {
589 struct sysctl_oid oid
= SYSCTL_STRUCT_INIT(_vfs
, nvfsp
->vfc_typenum
, , CTLTYPE_NODE
| CTLFLAG_KERN
| CTLFLAG_RW
| CTLFLAG_LOCKED
, NULL
, 0, vfs_sysctl_node
, "-", "");
591 oidp
= kheap_alloc(KHEAP_DEFAULT
, sizeof(struct sysctl_oid
), Z_WAITOK
);
596 * Find the next empty slot; we recognize an empty slot by a
597 * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must
598 * ensure we set the entry back to NULL.
602 for (slot
= 0; slot
< maxvfsslots
; slot
++) {
603 if (vfsconf
[slot
].vfc_vfsops
== NULL
) {
607 if (slot
== maxvfsslots
) {
608 if (allocated
== NULL
) {
610 /* out of static slots; allocate one instead */
611 allocated
= kheap_alloc(KHEAP_DEFAULT
, sizeof(struct vfstable
),
618 slotp
= &vfsconf
[slot
];
622 * Replace the contents of the next empty slot with the contents
623 * of the provided nvfsp.
625 * Note; Takes advantage of the fact that 'slot' was left
626 * with the value of 'maxvfslots' in the allocation case.
628 bcopy(nvfsp
, slotp
, sizeof(struct vfstable
));
630 slotp
->vfc_next
= vfsconf
[slot
- 1].vfc_next
;
631 vfsconf
[slot
- 1].vfc_next
= slotp
;
633 slotp
->vfc_next
= NULL
;
636 if (slotp
!= allocated
) {
637 /* used a statically allocated slot */
640 numregistered_fses
++;
643 /* Memory freed in vfstable_del after unregistration */
644 slotp
->vfc_sysctl
= oidp
;
645 oidp
->oid_name
= slotp
->vfc_name
;
646 sysctl_register_oid(slotp
->vfc_sysctl
);
651 if (allocated
&& allocated
!= slotp
) {
652 /* did allocation, but ended up using static slot */
653 kheap_free(KHEAP_DEFAULT
, allocated
, sizeof(struct vfstable
));
662 * Description: Remove a filesystem from the vfsconf list by name.
663 * If no such filesystem exists, return an error.
665 * Parameter: fs_name name of VFS to remove
670 * Notes: Hopefully all filesystems have unique names.
673 vfstable_del(struct vfstable
* vtbl
)
675 struct vfstable
**vcpp
;
676 struct vfstable
*vcdelp
;
679 lck_mtx_assert(mnt_list_mtx_lock
, LCK_MTX_ASSERT_OWNED
);
683 * Traverse the list looking for vtbl; if found, *vcpp
684 * will contain the address of the pointer to the entry to
687 for (vcpp
= &vfsconf
; *vcpp
; vcpp
= &(*vcpp
)->vfc_next
) {
694 return ESRCH
; /* vtbl not on vfsconf list */
696 if ((*vcpp
)->vfc_sysctl
) {
697 sysctl_unregister_oid((*vcpp
)->vfc_sysctl
);
698 (*vcpp
)->vfc_sysctl
->oid_name
= NULL
;
699 kheap_free(KHEAP_DEFAULT
, (*vcpp
)->vfc_sysctl
, sizeof(struct sysctl_oid
));
704 *vcpp
= (*vcpp
)->vfc_next
;
707 * Is this an entry from our static table? We find out by
708 * seeing if the pointer to the object to be deleted places
709 * the object in the address space containing the table (or not).
711 if (vcdelp
>= vfsconf
&& vcdelp
< (vfsconf
+ maxvfsslots
)) { /* Y */
712 /* Mark as empty for vfscon_add() */
713 bzero(vcdelp
, sizeof(struct vfstable
));
714 numregistered_fses
--;
718 * This entry was dynamically allocated; we must free it;
719 * we would prefer to have just linked the caller's
720 * vfsconf onto our list, but it may not be persistent
721 * because of the previous (copying) implementation.
723 numregistered_fses
--;
725 kheap_free(KHEAP_DEFAULT
, vcdelp
, sizeof(struct vfstable
));
730 lck_mtx_assert(mnt_list_mtx_lock
, LCK_MTX_ASSERT_OWNED
);
739 lck_mtx_lock(spechash_mtx_lock
);
743 SPECHASH_UNLOCK(void)
745 lck_mtx_unlock(spechash_mtx_lock
);