]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/vfs/vfs_init.c
xnu-3248.60.10.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_init.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed
34 * to Berkeley by John Heidemann of the UCLA Ficus project.
35 *
36 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_init.c 8.5 (Berkeley) 5/11/95
67 */
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75
76#include <sys/param.h>
77#include <sys/mount_internal.h>
78#include <sys/time.h>
79#include <sys/vm.h>
80#include <sys/vnode_internal.h>
81#include <sys/stat.h>
82#include <sys/namei.h>
83#include <sys/ucred.h>
84#include <sys/errno.h>
85#include <sys/malloc.h>
86
87#include <vfs/vfs_journal.h> /* journal_init() */
88#if CONFIG_MACF
89#include <security/mac_framework.h>
90#include <sys/kauth.h>
91#endif
92#if QUOTA
93#include <sys/quota.h>
94#endif
95
96/*
97 * Sigh, such primitive tools are these...
98 */
99#if 0
100#define DODEBUG(A) A
101#else
102#define DODEBUG(A)
103#endif
104
105__private_extern__ void vntblinit(void);
106
107extern struct vnodeopv_desc *vfs_opv_descs[];
108 /* a list of lists of vnodeops defns */
109extern struct vnodeop_desc *vfs_op_descs[];
110 /* and the operations they perform */
111/*
112 * This code doesn't work if the defn is **vnodop_defns with cc.
113 * The problem is because of the compiler sometimes putting in an
114 * extra level of indirection for arrays. It's an interesting
115 * "feature" of C.
116 */
117int vfs_opv_numops;
118
119typedef int (*PFIvp)(void *);
120
121/*
122 * A miscellaneous routine.
123 * A generic "default" routine that just returns an error.
124 */
125int
126vn_default_error(void)
127{
128
129 return (ENOTSUP);
130}
131
132/*
133 * vfs_init.c
134 *
135 * Allocate and fill in operations vectors.
136 *
137 * An undocumented feature of this approach to defining operations is that
138 * there can be multiple entries in vfs_opv_descs for the same operations
139 * vector. This allows third parties to extend the set of operations
140 * supported by another layer in a binary compatibile way. For example,
141 * assume that NFS needed to be modified to support Ficus. NFS has an entry
142 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
143 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
144 * listing those new operations Ficus adds to NFS, all without modifying the
145 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
146 * that is a(whole)nother story.) This is a feature.
147 */
148void
149vfs_opv_init(void)
150{
151 int i, j, k;
152 int (***opv_desc_vector_p)(void *);
153 int (**opv_desc_vector)(void *);
154 struct vnodeopv_entry_desc *opve_descp;
155
156 /*
157 * Allocate the dynamic vectors and fill them in.
158 */
159 for (i=0; vfs_opv_descs[i]; i++) {
160 opv_desc_vector_p = vfs_opv_descs[i]->opv_desc_vector_p;
161 /*
162 * Allocate and init the vector, if it needs it.
163 * Also handle backwards compatibility.
164 */
165 if (*opv_desc_vector_p == NULL) {
166 MALLOC(*opv_desc_vector_p, PFIvp*,
167 vfs_opv_numops*sizeof(PFIvp), M_TEMP, M_WAITOK);
168 bzero (*opv_desc_vector_p, vfs_opv_numops*sizeof(PFIvp));
169 DODEBUG(printf("vector at %x allocated\n",
170 opv_desc_vector_p));
171 }
172 opv_desc_vector = *opv_desc_vector_p;
173 for (j=0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) {
174 opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]);
175
176 /*
177 * Sanity check: is this operation listed
178 * in the list of operations? We check this
179 * by seeing if its offest is zero. Since
180 * the default routine should always be listed
181 * first, it should be the only one with a zero
182 * offset. Any other operation with a zero
183 * offset is probably not listed in
184 * vfs_op_descs, and so is probably an error.
185 *
186 * A panic here means the layer programmer
187 * has committed the all-too common bug
188 * of adding a new operation to the layer's
189 * list of vnode operations but
190 * not adding the operation to the system-wide
191 * list of supported operations.
192 */
193 if (opve_descp->opve_op->vdesc_offset == 0 &&
194 opve_descp->opve_op->vdesc_offset !=
195 VOFFSET(vnop_default)) {
196 printf("operation %s not listed in %s.\n",
197 opve_descp->opve_op->vdesc_name,
198 "vfs_op_descs");
199 panic ("vfs_opv_init: bad operation");
200 }
201 /*
202 * Fill in this entry.
203 */
204 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
205 opve_descp->opve_impl;
206 }
207 }
208 /*
209 * Finally, go back and replace unfilled routines
210 * with their default. (Sigh, an O(n^3) algorithm. I
211 * could make it better, but that'd be work, and n is small.)
212 */
213 for (i = 0; vfs_opv_descs[i]; i++) {
214 opv_desc_vector = *(vfs_opv_descs[i]->opv_desc_vector_p);
215 /*
216 * Force every operations vector to have a default routine.
217 */
218 if (opv_desc_vector[VOFFSET(vnop_default)]==NULL) {
219 panic("vfs_opv_init: operation vector without default routine.");
220 }
221 for (k = 0; k<vfs_opv_numops; k++)
222 if (opv_desc_vector[k] == NULL)
223 opv_desc_vector[k] =
224 opv_desc_vector[VOFFSET(vnop_default)];
225 }
226}
227
228/*
229 * Initialize known vnode operations vectors.
230 */
231void
232vfs_op_init(void)
233{
234 int i;
235
236 DODEBUG(printf("Vnode_interface_init.\n"));
237 /*
238 * Set all vnode vectors to a well known value.
239 */
240 for (i = 0; vfs_opv_descs[i]; i++)
241 *(vfs_opv_descs[i]->opv_desc_vector_p) = NULL;
242 /*
243 * Figure out how many ops there are by counting the table,
244 * and assign each its offset.
245 */
246 for (vfs_opv_numops = 0, i = 0; vfs_op_descs[i]; i++) {
247 vfs_op_descs[i]->vdesc_offset = vfs_opv_numops;
248 vfs_opv_numops++;
249 }
250 DODEBUG(printf ("vfs_opv_numops=%d\n", vfs_opv_numops));
251}
252
253/*
254 * Routines having to do with the management of the vnode table.
255 */
256extern struct vnodeops dead_vnodeops;
257extern struct vnodeops spec_vnodeops;
258
259/* vars for vnode lock */
260lck_grp_t * vnode_lck_grp;
261lck_grp_attr_t * vnode_lck_grp_attr;
262lck_attr_t * vnode_lck_attr;
263
264#if CONFIG_TRIGGERS
265/* vars for vnode trigger resolver */
266lck_grp_t * trigger_vnode_lck_grp;
267lck_grp_attr_t * trigger_vnode_lck_grp_attr;
268lck_attr_t * trigger_vnode_lck_attr;
269#endif
270
271lck_grp_t * fd_vn_lck_grp;
272lck_grp_attr_t * fd_vn_lck_grp_attr;
273lck_attr_t * fd_vn_lck_attr;
274
275/* vars for vnode list lock */
276lck_grp_t * vnode_list_lck_grp;
277lck_grp_attr_t * vnode_list_lck_grp_attr;
278lck_attr_t * vnode_list_lck_attr;
279lck_spin_t * vnode_list_spin_lock;
280lck_mtx_t * spechash_mtx_lock;
281
282/* vars for vfsconf lock */
283lck_grp_t * fsconf_lck_grp;
284lck_grp_attr_t * fsconf_lck_grp_attr;
285lck_attr_t * fsconf_lck_attr;
286
287
288/* vars for mount lock */
289lck_grp_t * mnt_lck_grp;
290lck_grp_attr_t * mnt_lck_grp_attr;
291lck_attr_t * mnt_lck_attr;
292
293/* vars for mount list lock */
294lck_grp_t * mnt_list_lck_grp;
295lck_grp_attr_t * mnt_list_lck_grp_attr;
296lck_attr_t * mnt_list_lck_attr;
297lck_mtx_t * mnt_list_mtx_lock;
298
299/* vars for sync mutex */
300lck_grp_t * sync_mtx_lck_grp;
301lck_grp_attr_t * sync_mtx_lck_grp_attr;
302lck_attr_t * sync_mtx_lck_attr;
303lck_mtx_t * sync_mtx_lck;
304
305lck_mtx_t *pkg_extensions_lck;
306
307struct mount * dead_mountp;
308
309extern void nspace_handler_init(void);
310
311/*
312 * Initialize the vnode structures and initialize each file system type.
313 */
314void
315vfsinit(void)
316{
317 struct vfstable *vfsp;
318 int i, maxtypenum;
319 struct mount * mp;
320
321 /* Allocate vnode list lock group attribute and group */
322 vnode_list_lck_grp_attr = lck_grp_attr_alloc_init();
323
324 vnode_list_lck_grp = lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr);
325
326 /* Allocate vnode list lock attribute */
327 vnode_list_lck_attr = lck_attr_alloc_init();
328
329 /* Allocate vnode list lock */
330 vnode_list_spin_lock = lck_spin_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
331
332 /* Allocate spec hash list lock */
333 spechash_mtx_lock = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
334
335 /* Allocate the package extensions table lock */
336 pkg_extensions_lck = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
337
338 /* allocate vnode lock group attribute and group */
339 vnode_lck_grp_attr= lck_grp_attr_alloc_init();
340
341 vnode_lck_grp = lck_grp_alloc_init("vnode", vnode_lck_grp_attr);
342
343 /* Allocate vnode lock attribute */
344 vnode_lck_attr = lck_attr_alloc_init();
345
346#if CONFIG_TRIGGERS
347 trigger_vnode_lck_grp_attr = lck_grp_attr_alloc_init();
348 trigger_vnode_lck_grp = lck_grp_alloc_init("trigger_vnode", trigger_vnode_lck_grp_attr);
349 trigger_vnode_lck_attr = lck_attr_alloc_init();
350#endif
351 /* Allocate per fd vnode data lock attribute and group */
352 fd_vn_lck_grp_attr = lck_grp_attr_alloc_init();
353 fd_vn_lck_grp = lck_grp_alloc_init("fd_vnode_data", fd_vn_lck_grp_attr);
354 fd_vn_lck_attr = lck_attr_alloc_init();
355
356 /* Allocate fs config lock group attribute and group */
357 fsconf_lck_grp_attr= lck_grp_attr_alloc_init();
358
359 fsconf_lck_grp = lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr);
360
361 /* Allocate fs config lock attribute */
362 fsconf_lck_attr = lck_attr_alloc_init();
363
364 /* Allocate mount point related lock structures */
365
366 /* Allocate mount list lock group attribute and group */
367 mnt_list_lck_grp_attr= lck_grp_attr_alloc_init();
368
369 mnt_list_lck_grp = lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr);
370
371 /* Allocate mount list lock attribute */
372 mnt_list_lck_attr = lck_attr_alloc_init();
373
374 /* Allocate mount list lock */
375 mnt_list_mtx_lock = lck_mtx_alloc_init(mnt_list_lck_grp, mnt_list_lck_attr);
376
377
378 /* allocate mount lock group attribute and group */
379 mnt_lck_grp_attr= lck_grp_attr_alloc_init();
380
381 mnt_lck_grp = lck_grp_alloc_init("mount", mnt_lck_grp_attr);
382
383 /* Allocate mount lock attribute */
384 mnt_lck_attr = lck_attr_alloc_init();
385
386 /* Allocate sync lock */
387 sync_mtx_lck_grp_attr = lck_grp_attr_alloc_init();
388 sync_mtx_lck_grp = lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr);
389 sync_mtx_lck_attr = lck_attr_alloc_init();
390 sync_mtx_lck = lck_mtx_alloc_init(sync_mtx_lck_grp, sync_mtx_lck_attr);
391
392 /*
393 * Initialize the vnode table
394 */
395 vntblinit();
396 /*
397 * Initialize the filesystem event mechanism.
398 */
399 vfs_event_init();
400 /*
401 * Initialize the vnode name cache
402 */
403 nchinit();
404
405#if JOURNALING
406 /*
407 * Initialize the journaling locks
408 */
409 journal_init();
410#endif
411 nspace_handler_init();
412
413 /*
414 * Build vnode operation vectors.
415 */
416 vfs_op_init();
417 vfs_opv_init(); /* finish the job */
418 /*
419 * Initialize each file system type in the static list,
420 * until the first NULL ->vfs_vfsops is encountered.
421 */
422 maxtypenum = VT_NON;
423 for (vfsp = vfsconf, i = 0; i < maxvfsslots; i++, vfsp++) {
424 struct vfsconf vfsc;
425 if (vfsp->vfc_vfsops == (struct vfsops *)0)
426 break;
427 if (i) vfsconf[i-1].vfc_next = vfsp;
428 if (maxtypenum <= vfsp->vfc_typenum)
429 maxtypenum = vfsp->vfc_typenum + 1;
430
431 bzero(&vfsc, sizeof(struct vfsconf));
432 vfsc.vfc_reserved1 = 0;
433 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
434 vfsc.vfc_typenum = vfsp->vfc_typenum;
435 vfsc.vfc_refcount = vfsp->vfc_refcount;
436 vfsc.vfc_flags = vfsp->vfc_flags;
437 vfsc.vfc_reserved2 = 0;
438 vfsc.vfc_reserved3 = 0;
439
440 if (vfsp->vfc_vfsops->vfs_sysctl) {
441 struct sysctl_oid *oidp = NULL;
442 struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", "");
443
444 MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK);
445 *oidp = oid;
446
447 /* Memory for VFS oid held by vfsentry forever */
448 vfsp->vfc_sysctl = oidp;
449 oidp->oid_name = vfsp->vfc_name;
450 sysctl_register_oid(vfsp->vfc_sysctl);
451 }
452
453 (*vfsp->vfc_vfsops->vfs_init)(&vfsc);
454
455 numused_vfsslots++;
456 numregistered_fses++;
457 }
458 /* next vfc_typenum to be used */
459 maxvfstypenum = maxtypenum;
460
461 /*
462 * Initialize the vnop authorization scope.
463 */
464 vnode_authorize_init();
465
466 /*
467 * Initialiize the quota system.
468 */
469#if QUOTA
470 dqinit();
471#endif
472
473 /*
474 * create a mount point for dead vnodes
475 */
476 MALLOC_ZONE(mp, struct mount *, sizeof(struct mount),
477 M_MOUNT, M_WAITOK);
478 bzero((char *)mp, sizeof(struct mount));
479 /* Initialize the default IO constraints */
480 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
481 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
482 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
483 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
484 mp->mnt_devblocksize = DEV_BSIZE;
485 mp->mnt_alignmentmask = PAGE_MASK;
486 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
487 mp->mnt_ioscale = 1;
488 mp->mnt_ioflags = 0;
489 mp->mnt_realrootvp = NULLVP;
490 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
491
492 TAILQ_INIT(&mp->mnt_vnodelist);
493 TAILQ_INIT(&mp->mnt_workerqueue);
494 TAILQ_INIT(&mp->mnt_newvnodes);
495 mp->mnt_flag = MNT_LOCAL;
496 mp->mnt_lflag = MNT_LDEAD;
497 mount_lock_init(mp);
498
499#if CONFIG_MACF
500 mac_mount_label_init(mp);
501 mac_mount_label_associate(vfs_context_kernel(), mp);
502#endif
503 dead_mountp = mp;
504}
505
506void
507vnode_list_lock(void)
508{
509 lck_spin_lock(vnode_list_spin_lock);
510}
511
512void
513vnode_list_unlock(void)
514{
515 lck_spin_unlock(vnode_list_spin_lock);
516}
517
518void
519mount_list_lock(void)
520{
521 lck_mtx_lock(mnt_list_mtx_lock);
522}
523
524void
525mount_list_unlock(void)
526{
527 lck_mtx_unlock(mnt_list_mtx_lock);
528}
529
530void
531mount_lock_init(mount_t mp)
532{
533 lck_mtx_init(&mp->mnt_mlock, mnt_lck_grp, mnt_lck_attr);
534 lck_mtx_init(&mp->mnt_renamelock, mnt_lck_grp, mnt_lck_attr);
535 lck_rw_init(&mp->mnt_rwlock, mnt_lck_grp, mnt_lck_attr);
536}
537
538void
539mount_lock_destroy(mount_t mp)
540{
541 lck_mtx_destroy(&mp->mnt_mlock, mnt_lck_grp);
542 lck_mtx_destroy(&mp->mnt_renamelock, mnt_lck_grp);
543 lck_rw_destroy(&mp->mnt_rwlock, mnt_lck_grp);
544}
545
546
547/*
548 * Name: vfstable_add
549 *
550 * Description: Add a filesystem to the vfsconf list at the first
551 * unused slot. If no slots are available, return an
552 * error.
553 *
554 * Parameter: nvfsp vfsconf for VFS to add
555 *
556 * Returns: 0 Success
557 * -1 Failure
558 *
559 * Notes: The vfsconf should be treated as a linked list by
560 * all external references, as the implementation is
561 * expected to change in the future. The linkage is
562 * through ->vfc_next, and the list is NULL terminated.
563 *
564 * Warning: This code assumes that vfsconf[0] is non-empty.
565 */
566struct vfstable *
567vfstable_add(struct vfstable *nvfsp)
568{
569 int slot;
570 struct vfstable *slotp, *allocated = NULL;
571 struct sysctl_oid *oidp = NULL;
572
573
574 if (nvfsp->vfc_vfsops->vfs_sysctl) {
575 struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", "");
576
577 MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK);
578 *oidp = oid;
579 }
580
581 /*
582 * Find the next empty slot; we recognize an empty slot by a
583 * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must
584 * ensure we set the entry back to NULL.
585 */
586findslot:
587 mount_list_lock();
588 for (slot = 0; slot < maxvfsslots; slot++) {
589 if (vfsconf[slot].vfc_vfsops == NULL)
590 break;
591 }
592 if (slot == maxvfsslots) {
593 if (allocated == NULL) {
594 mount_list_unlock();
595 /* out of static slots; allocate one instead */
596 MALLOC(allocated, struct vfstable *, sizeof(struct vfstable),
597 M_TEMP, M_WAITOK);
598 goto findslot;
599 } else {
600 slotp = allocated;
601 }
602 } else {
603 slotp = &vfsconf[slot];
604 }
605
606 /*
607 * Replace the contents of the next empty slot with the contents
608 * of the provided nvfsp.
609 *
610 * Note; Takes advantage of the fact that 'slot' was left
611 * with the value of 'maxvfslots' in the allocation case.
612 */
613 bcopy(nvfsp, slotp, sizeof(struct vfstable));
614 if (slot != 0) {
615 slotp->vfc_next = vfsconf[slot - 1].vfc_next;
616 vfsconf[slot - 1].vfc_next = slotp;
617 } else {
618 slotp->vfc_next = NULL;
619 }
620
621 if (slotp != allocated) {
622 /* used a statically allocated slot */
623 numused_vfsslots++;
624 }
625 numregistered_fses++;
626
627 if (oidp) {
628 /* Memory freed in vfstable_del after unregistration */
629 slotp->vfc_sysctl = oidp;
630 oidp->oid_name = slotp->vfc_name;
631 sysctl_register_oid(slotp->vfc_sysctl);
632 }
633
634 mount_list_unlock();
635
636 if (allocated && allocated != slotp) {
637 /* did allocation, but ended up using static slot */
638 FREE(allocated, M_TEMP);
639 }
640
641 return(slotp);
642}
643
644/*
645 * Name: vfstable_del
646 *
647 * Description: Remove a filesystem from the vfsconf list by name.
648 * If no such filesystem exists, return an error.
649 *
650 * Parameter: fs_name name of VFS to remove
651 *
652 * Returns: 0 Success
653 * -1 Failure
654 *
655 * Notes: Hopefully all filesystems have unique names.
656 */
657int
658vfstable_del(struct vfstable * vtbl)
659{
660 struct vfstable **vcpp;
661 struct vfstable *vcdelp;
662
663#if DEBUG
664 lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED);
665#endif /* DEBUG */
666
667 /*
668 * Traverse the list looking for vtbl; if found, *vcpp
669 * will contain the address of the pointer to the entry to
670 * be removed.
671 */
672 for( vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) {
673 if (*vcpp == vtbl)
674 break;
675 }
676
677 if (*vcpp == NULL)
678 return(ESRCH); /* vtbl not on vfsconf list */
679
680 if ((*vcpp)->vfc_sysctl) {
681 sysctl_unregister_oid((*vcpp)->vfc_sysctl);
682 (*vcpp)->vfc_sysctl->oid_name = NULL;
683 FREE((*vcpp)->vfc_sysctl, M_TEMP);
684 (*vcpp)->vfc_sysctl = NULL;
685 }
686
687 /* Unlink entry */
688 vcdelp = *vcpp;
689 *vcpp = (*vcpp)->vfc_next;
690
691 /*
692 * Is this an entry from our static table? We find out by
693 * seeing if the pointer to the object to be deleted places
694 * the object in the address space containing the table (or not).
695 */
696 if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */
697 /* Mark as empty for vfscon_add() */
698 bzero(vcdelp, sizeof(struct vfstable));
699 numregistered_fses--;
700 numused_vfsslots--;
701 } else { /* N */
702 /*
703 * This entry was dynamically allocated; we must free it;
704 * we would prefer to have just linked the caller's
705 * vfsconf onto our list, but it may not be persistent
706 * because of the previous (copying) implementation.
707 */
708 numregistered_fses--;
709 mount_list_unlock();
710 FREE(vcdelp, M_TEMP);
711 mount_list_lock();
712 }
713
714#if DEBUG
715 lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED);
716#endif /* DEBUG */
717
718 return(0);
719}
720
721void
722SPECHASH_LOCK(void)
723{
724 lck_mtx_lock(spechash_mtx_lock);
725}
726
727void
728SPECHASH_UNLOCK(void)
729{
730 lck_mtx_unlock(spechash_mtx_lock);
731}
732