]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_init.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_init.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed
34 * to Berkeley by John Heidemann of the UCLA Ficus project.
35 *
36 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_init.c 8.5 (Berkeley) 5/11/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75
76 #include <sys/param.h>
77 #include <sys/mount_internal.h>
78 #include <sys/time.h>
79 #include <sys/vm.h>
80 #include <sys/vnode_internal.h>
81 #include <sys/stat.h>
82 #include <sys/namei.h>
83 #include <sys/ucred.h>
84 #include <sys/errno.h>
85 #include <sys/malloc.h>
86 #include <sys/decmpfs.h>
87
88 #if CONFIG_MACF
89 #include <security/mac_framework.h>
90 #include <sys/kauth.h>
91 #endif
92 #if QUOTA
93 #include <sys/quota.h>
94 #endif
95
96 /*
97 * Sigh, such primitive tools are these...
98 */
99 #if 0
100 #define DODEBUG(A) A
101 #else
102 #define DODEBUG(A)
103 #endif
104
105 __private_extern__ void vntblinit(void);
106
107 extern const struct vnodeopv_desc *vfs_opv_descs[];
108 /* a list of lists of vnodeops defns */
109 extern struct vnodeop_desc *vfs_op_descs[];
110 /* and the operations they perform */
111 /*
112 * This code doesn't work if the defn is **vnodop_defns with cc.
113 * The problem is because of the compiler sometimes putting in an
114 * extra level of indirection for arrays. It's an interesting
115 * "feature" of C.
116 */
117 int vfs_opv_numops;
118
119 typedef int (*PFIvp)(void *);
120
121 /*
122 * A miscellaneous routine.
123 * A generic "default" routine that just returns an error.
124 */
125 int
126 vn_default_error(void)
127 {
128 return ENOTSUP;
129 }
130
131 /*
132 * vfs_init.c
133 *
134 * Allocate and fill in operations vectors.
135 *
136 * An undocumented feature of this approach to defining operations is that
137 * there can be multiple entries in vfs_opv_descs for the same operations
138 * vector. This allows third parties to extend the set of operations
139 * supported by another layer in a binary compatibile way. For example,
140 * assume that NFS needed to be modified to support Ficus. NFS has an entry
141 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
142 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
143 * listing those new operations Ficus adds to NFS, all without modifying the
144 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
145 * that is a(whole)nother story.) This is a feature.
146 */
147 void
148 vfs_opv_init(void)
149 {
150 int i, j, k;
151 int(***opv_desc_vector_p)(void *);
152 int(**opv_desc_vector)(void *);
153 const struct vnodeopv_entry_desc *opve_descp;
154
155 /*
156 * Allocate the dynamic vectors and fill them in.
157 */
158 for (i = 0; vfs_opv_descs[i]; i++) {
159 opv_desc_vector_p = vfs_opv_descs[i]->opv_desc_vector_p;
160 /*
161 * Allocate and init the vector, if it needs it.
162 * Also handle backwards compatibility.
163 */
164 if (*opv_desc_vector_p == NULL) {
165 MALLOC(*opv_desc_vector_p, PFIvp*,
166 vfs_opv_numops * sizeof(PFIvp), M_TEMP, M_WAITOK);
167 bzero(*opv_desc_vector_p, vfs_opv_numops * sizeof(PFIvp));
168 DODEBUG(printf("vector at %x allocated\n",
169 opv_desc_vector_p));
170 }
171 opv_desc_vector = *opv_desc_vector_p;
172 for (j = 0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) {
173 opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]);
174
175 /* Silently skip known-disabled operations */
176 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
177 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
178 vfs_opv_descs[i], opve_descp->opve_op->vdesc_name);
179 continue;
180 }
181
182 /*
183 * Sanity check: is this operation listed
184 * in the list of operations? We check this
185 * by seeing if its offest is zero. Since
186 * the default routine should always be listed
187 * first, it should be the only one with a zero
188 * offset. Any other operation with a zero
189 * offset is probably not listed in
190 * vfs_op_descs, and so is probably an error.
191 *
192 * A panic here means the layer programmer
193 * has committed the all-too common bug
194 * of adding a new operation to the layer's
195 * list of vnode operations but
196 * not adding the operation to the system-wide
197 * list of supported operations.
198 */
199 if (opve_descp->opve_op->vdesc_offset == 0 &&
200 opve_descp->opve_op !=
201 VDESC(vnop_default)) {
202 printf("operation %s not listed in %s.\n",
203 opve_descp->opve_op->vdesc_name,
204 "vfs_op_descs");
205 panic("vfs_opv_init: bad operation");
206 }
207 /*
208 * Fill in this entry.
209 */
210 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
211 opve_descp->opve_impl;
212 }
213 }
214 /*
215 * Finally, go back and replace unfilled routines
216 * with their default. (Sigh, an O(n^3) algorithm. I
217 * could make it better, but that'd be work, and n is small.)
218 */
219 for (i = 0; vfs_opv_descs[i]; i++) {
220 opv_desc_vector = *(vfs_opv_descs[i]->opv_desc_vector_p);
221 /*
222 * Force every operations vector to have a default routine.
223 */
224 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
225 panic("vfs_opv_init: operation vector without default routine.");
226 }
227 for (k = 0; k < vfs_opv_numops; k++) {
228 if (opv_desc_vector[k] == NULL) {
229 opv_desc_vector[k] =
230 opv_desc_vector[VOFFSET(vnop_default)];
231 }
232 }
233 }
234 }
235
236 /*
237 * Initialize known vnode operations vectors.
238 */
239 void
240 vfs_op_init(void)
241 {
242 int i;
243
244 DODEBUG(printf("Vnode_interface_init.\n"));
245 /*
246 * Set all vnode vectors to a well known value.
247 */
248 for (i = 0; vfs_opv_descs[i]; i++) {
249 *(vfs_opv_descs[i]->opv_desc_vector_p) = NULL;
250 }
251 /*
252 * Figure out how many ops there are by counting the table,
253 * and assign each its offset.
254 */
255 for (vfs_opv_numops = 0, i = 0; vfs_op_descs[i]; i++) {
256 /* Silently skip known-disabled operations */
257 if (vfs_op_descs[i]->vdesc_flags & VDESC_DISABLED) {
258 continue;
259 }
260 vfs_op_descs[i]->vdesc_offset = vfs_opv_numops;
261 vfs_opv_numops++;
262 }
263 DODEBUG(printf("vfs_opv_numops=%d\n", vfs_opv_numops));
264 }
265
266 /*
267 * Routines having to do with the management of the vnode table.
268 */
269 extern struct vnodeops dead_vnodeops;
270 extern struct vnodeops spec_vnodeops;
271
272 /* vars for vnode lock */
273 lck_grp_t * vnode_lck_grp;
274 lck_grp_attr_t * vnode_lck_grp_attr;
275 lck_attr_t * vnode_lck_attr;
276
277 #if CONFIG_TRIGGERS
278 /* vars for vnode trigger resolver */
279 lck_grp_t * trigger_vnode_lck_grp;
280 lck_grp_attr_t * trigger_vnode_lck_grp_attr;
281 lck_attr_t * trigger_vnode_lck_attr;
282 #endif
283
284 lck_grp_t * fd_vn_lck_grp;
285 lck_grp_attr_t * fd_vn_lck_grp_attr;
286 lck_attr_t * fd_vn_lck_attr;
287
288 /* vars for vnode list lock */
289 lck_grp_t * vnode_list_lck_grp;
290 lck_grp_attr_t * vnode_list_lck_grp_attr;
291 lck_attr_t * vnode_list_lck_attr;
292 lck_spin_t * vnode_list_spin_lock;
293 lck_mtx_t * spechash_mtx_lock;
294
295 /* vars for vfsconf lock */
296 lck_grp_t * fsconf_lck_grp;
297 lck_grp_attr_t * fsconf_lck_grp_attr;
298 lck_attr_t * fsconf_lck_attr;
299
300
301 /* vars for mount lock */
302 lck_grp_t * mnt_lck_grp;
303 lck_grp_attr_t * mnt_lck_grp_attr;
304 lck_attr_t * mnt_lck_attr;
305
306 /* vars for mount list lock */
307 lck_grp_t * mnt_list_lck_grp;
308 lck_grp_attr_t * mnt_list_lck_grp_attr;
309 lck_attr_t * mnt_list_lck_attr;
310 lck_mtx_t * mnt_list_mtx_lock;
311
312 /* vars for sync mutex */
313 lck_grp_t * sync_mtx_lck_grp;
314 lck_grp_attr_t * sync_mtx_lck_grp_attr;
315 lck_attr_t * sync_mtx_lck_attr;
316 lck_mtx_t * sync_mtx_lck;
317
318 lck_mtx_t *pkg_extensions_lck;
319
320 struct mount * dead_mountp;
321
322 /*
323 * Initialize the vnode structures and initialize each file system type.
324 */
325 void
326 vfsinit(void)
327 {
328 struct vfstable *vfsp;
329 int i, maxtypenum;
330 struct mount * mp;
331
332 /* Allocate vnode list lock group attribute and group */
333 vnode_list_lck_grp_attr = lck_grp_attr_alloc_init();
334
335 vnode_list_lck_grp = lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr);
336
337 /* Allocate vnode list lock attribute */
338 vnode_list_lck_attr = lck_attr_alloc_init();
339
340 /* Allocate vnode list lock */
341 vnode_list_spin_lock = lck_spin_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
342
343 /* Allocate spec hash list lock */
344 spechash_mtx_lock = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
345
346 /* Allocate the package extensions table lock */
347 pkg_extensions_lck = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
348
349 /* allocate vnode lock group attribute and group */
350 vnode_lck_grp_attr = lck_grp_attr_alloc_init();
351
352 vnode_lck_grp = lck_grp_alloc_init("vnode", vnode_lck_grp_attr);
353
354 /* Allocate vnode lock attribute */
355 vnode_lck_attr = lck_attr_alloc_init();
356
357 #if CONFIG_TRIGGERS
358 trigger_vnode_lck_grp_attr = lck_grp_attr_alloc_init();
359 trigger_vnode_lck_grp = lck_grp_alloc_init("trigger_vnode", trigger_vnode_lck_grp_attr);
360 trigger_vnode_lck_attr = lck_attr_alloc_init();
361 #endif
362 /* Allocate per fd vnode data lock attribute and group */
363 fd_vn_lck_grp_attr = lck_grp_attr_alloc_init();
364 fd_vn_lck_grp = lck_grp_alloc_init("fd_vnode_data", fd_vn_lck_grp_attr);
365 fd_vn_lck_attr = lck_attr_alloc_init();
366
367 /* Allocate fs config lock group attribute and group */
368 fsconf_lck_grp_attr = lck_grp_attr_alloc_init();
369
370 fsconf_lck_grp = lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr);
371
372 /* Allocate fs config lock attribute */
373 fsconf_lck_attr = lck_attr_alloc_init();
374
375 /* Allocate mount point related lock structures */
376
377 /* Allocate mount list lock group attribute and group */
378 mnt_list_lck_grp_attr = lck_grp_attr_alloc_init();
379
380 mnt_list_lck_grp = lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr);
381
382 /* Allocate mount list lock attribute */
383 mnt_list_lck_attr = lck_attr_alloc_init();
384
385 /* Allocate mount list lock */
386 mnt_list_mtx_lock = lck_mtx_alloc_init(mnt_list_lck_grp, mnt_list_lck_attr);
387
388
389 /* allocate mount lock group attribute and group */
390 mnt_lck_grp_attr = lck_grp_attr_alloc_init();
391
392 mnt_lck_grp = lck_grp_alloc_init("mount", mnt_lck_grp_attr);
393
394 /* Allocate mount lock attribute */
395 mnt_lck_attr = lck_attr_alloc_init();
396
397 /* Allocate sync lock */
398 sync_mtx_lck_grp_attr = lck_grp_attr_alloc_init();
399 sync_mtx_lck_grp = lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr);
400 sync_mtx_lck_attr = lck_attr_alloc_init();
401 sync_mtx_lck = lck_mtx_alloc_init(sync_mtx_lck_grp, sync_mtx_lck_attr);
402
403 /*
404 * Initialize the vnode table
405 */
406 vntblinit();
407 /*
408 * Initialize the filesystem event mechanism.
409 */
410 vfs_event_init();
411 /*
412 * Initialize the vnode name cache
413 */
414 nchinit();
415
416 /*
417 * Build vnode operation vectors.
418 */
419 vfs_op_init();
420 vfs_opv_init(); /* finish the job */
421 /*
422 * Initialize each file system type in the static list,
423 * until the first NULL ->vfs_vfsops is encountered.
424 */
425 maxtypenum = VT_NON;
426 for (vfsp = vfsconf, i = 0; i < maxvfsslots; i++, vfsp++) {
427 struct vfsconf vfsc;
428 if (vfsp->vfc_vfsops == (struct vfsops *)0) {
429 break;
430 }
431 if (i) {
432 vfsconf[i - 1].vfc_next = vfsp;
433 }
434 if (maxtypenum <= vfsp->vfc_typenum) {
435 maxtypenum = vfsp->vfc_typenum + 1;
436 }
437
438 bzero(&vfsc, sizeof(struct vfsconf));
439 vfsc.vfc_reserved1 = 0;
440 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
441 vfsc.vfc_typenum = vfsp->vfc_typenum;
442 vfsc.vfc_refcount = vfsp->vfc_refcount;
443 vfsc.vfc_flags = vfsp->vfc_flags;
444 vfsc.vfc_reserved2 = 0;
445 vfsc.vfc_reserved3 = 0;
446
447 if (vfsp->vfc_vfsops->vfs_sysctl) {
448 struct sysctl_oid *oidp = NULL;
449 struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", "");
450
451 MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK);
452 *oidp = oid;
453
454 /* Memory for VFS oid held by vfsentry forever */
455 vfsp->vfc_sysctl = oidp;
456 oidp->oid_name = vfsp->vfc_name;
457 sysctl_register_oid(vfsp->vfc_sysctl);
458 }
459
460 (*vfsp->vfc_vfsops->vfs_init)(&vfsc);
461
462 numused_vfsslots++;
463 numregistered_fses++;
464 }
465 /* next vfc_typenum to be used */
466 maxvfstypenum = maxtypenum;
467
468 /*
469 * Initialize the vnop authorization scope.
470 */
471 vnode_authorize_init();
472
473 /*
474 * Initialiize the quota system.
475 */
476 #if QUOTA
477 dqinit();
478 #endif
479
480 /*
481 * create a mount point for dead vnodes
482 */
483 MALLOC_ZONE(mp, struct mount *, sizeof(struct mount),
484 M_MOUNT, M_WAITOK);
485 bzero((char *)mp, sizeof(struct mount));
486 /* Initialize the default IO constraints */
487 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
488 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
489 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
490 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
491 mp->mnt_devblocksize = DEV_BSIZE;
492 mp->mnt_alignmentmask = PAGE_MASK;
493 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
494 mp->mnt_ioscale = 1;
495 mp->mnt_ioflags = 0;
496 mp->mnt_realrootvp = NULLVP;
497 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
498
499 TAILQ_INIT(&mp->mnt_vnodelist);
500 TAILQ_INIT(&mp->mnt_workerqueue);
501 TAILQ_INIT(&mp->mnt_newvnodes);
502 mp->mnt_flag = MNT_LOCAL;
503 mp->mnt_lflag = MNT_LDEAD;
504 mount_lock_init(mp);
505
506 #if CONFIG_MACF
507 mac_mount_label_init(mp);
508 mac_mount_label_associate(vfs_context_kernel(), mp);
509 #endif
510 dead_mountp = mp;
511
512 #if FS_COMPRESSION
513 decmpfs_init();
514 #endif
515
516 nspace_resolver_init();
517 }
518
519 void
520 vnode_list_lock(void)
521 {
522 lck_spin_lock_grp(vnode_list_spin_lock, vnode_list_lck_grp);
523 }
524
525 void
526 vnode_list_unlock(void)
527 {
528 lck_spin_unlock(vnode_list_spin_lock);
529 }
530
531 void
532 mount_list_lock(void)
533 {
534 lck_mtx_lock(mnt_list_mtx_lock);
535 }
536
537 void
538 mount_list_unlock(void)
539 {
540 lck_mtx_unlock(mnt_list_mtx_lock);
541 }
542
543 void
544 mount_lock_init(mount_t mp)
545 {
546 lck_mtx_init(&mp->mnt_mlock, mnt_lck_grp, mnt_lck_attr);
547 lck_mtx_init(&mp->mnt_iter_lock, mnt_lck_grp, mnt_lck_attr);
548 lck_mtx_init(&mp->mnt_renamelock, mnt_lck_grp, mnt_lck_attr);
549 lck_rw_init(&mp->mnt_rwlock, mnt_lck_grp, mnt_lck_attr);
550 }
551
552 void
553 mount_lock_destroy(mount_t mp)
554 {
555 lck_mtx_destroy(&mp->mnt_mlock, mnt_lck_grp);
556 lck_mtx_destroy(&mp->mnt_iter_lock, mnt_lck_grp);
557 lck_mtx_destroy(&mp->mnt_renamelock, mnt_lck_grp);
558 lck_rw_destroy(&mp->mnt_rwlock, mnt_lck_grp);
559 }
560
561
562 /*
563 * Name: vfstable_add
564 *
565 * Description: Add a filesystem to the vfsconf list at the first
566 * unused slot. If no slots are available, return an
567 * error.
568 *
569 * Parameter: nvfsp vfsconf for VFS to add
570 *
571 * Returns: 0 Success
572 * -1 Failure
573 *
574 * Notes: The vfsconf should be treated as a linked list by
575 * all external references, as the implementation is
576 * expected to change in the future. The linkage is
577 * through ->vfc_next, and the list is NULL terminated.
578 *
579 * Warning: This code assumes that vfsconf[0] is non-empty.
580 */
581 struct vfstable *
582 vfstable_add(struct vfstable *nvfsp)
583 {
584 int slot;
585 struct vfstable *slotp, *allocated = NULL;
586 struct sysctl_oid *oidp = NULL;
587
588
589 if (nvfsp->vfc_vfsops->vfs_sysctl) {
590 struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", "");
591
592 MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK);
593 *oidp = oid;
594 }
595
596 /*
597 * Find the next empty slot; we recognize an empty slot by a
598 * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must
599 * ensure we set the entry back to NULL.
600 */
601 findslot:
602 mount_list_lock();
603 for (slot = 0; slot < maxvfsslots; slot++) {
604 if (vfsconf[slot].vfc_vfsops == NULL) {
605 break;
606 }
607 }
608 if (slot == maxvfsslots) {
609 if (allocated == NULL) {
610 mount_list_unlock();
611 /* out of static slots; allocate one instead */
612 MALLOC(allocated, struct vfstable *, sizeof(struct vfstable),
613 M_TEMP, M_WAITOK);
614 goto findslot;
615 } else {
616 slotp = allocated;
617 }
618 } else {
619 slotp = &vfsconf[slot];
620 }
621
622 /*
623 * Replace the contents of the next empty slot with the contents
624 * of the provided nvfsp.
625 *
626 * Note; Takes advantage of the fact that 'slot' was left
627 * with the value of 'maxvfslots' in the allocation case.
628 */
629 bcopy(nvfsp, slotp, sizeof(struct vfstable));
630 if (slot != 0) {
631 slotp->vfc_next = vfsconf[slot - 1].vfc_next;
632 vfsconf[slot - 1].vfc_next = slotp;
633 } else {
634 slotp->vfc_next = NULL;
635 }
636
637 if (slotp != allocated) {
638 /* used a statically allocated slot */
639 numused_vfsslots++;
640 }
641 numregistered_fses++;
642
643 if (oidp) {
644 /* Memory freed in vfstable_del after unregistration */
645 slotp->vfc_sysctl = oidp;
646 oidp->oid_name = slotp->vfc_name;
647 sysctl_register_oid(slotp->vfc_sysctl);
648 }
649
650 mount_list_unlock();
651
652 if (allocated && allocated != slotp) {
653 /* did allocation, but ended up using static slot */
654 FREE(allocated, M_TEMP);
655 }
656
657 return slotp;
658 }
659
660 /*
661 * Name: vfstable_del
662 *
663 * Description: Remove a filesystem from the vfsconf list by name.
664 * If no such filesystem exists, return an error.
665 *
666 * Parameter: fs_name name of VFS to remove
667 *
668 * Returns: 0 Success
669 * -1 Failure
670 *
671 * Notes: Hopefully all filesystems have unique names.
672 */
673 int
674 vfstable_del(struct vfstable * vtbl)
675 {
676 struct vfstable **vcpp;
677 struct vfstable *vcdelp;
678
679 #if DEBUG
680 lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED);
681 #endif /* DEBUG */
682
683 /*
684 * Traverse the list looking for vtbl; if found, *vcpp
685 * will contain the address of the pointer to the entry to
686 * be removed.
687 */
688 for (vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) {
689 if (*vcpp == vtbl) {
690 break;
691 }
692 }
693
694 if (*vcpp == NULL) {
695 return ESRCH; /* vtbl not on vfsconf list */
696 }
697 if ((*vcpp)->vfc_sysctl) {
698 sysctl_unregister_oid((*vcpp)->vfc_sysctl);
699 (*vcpp)->vfc_sysctl->oid_name = NULL;
700 FREE((*vcpp)->vfc_sysctl, M_TEMP);
701 (*vcpp)->vfc_sysctl = NULL;
702 }
703
704 /* Unlink entry */
705 vcdelp = *vcpp;
706 *vcpp = (*vcpp)->vfc_next;
707
708 /*
709 * Is this an entry from our static table? We find out by
710 * seeing if the pointer to the object to be deleted places
711 * the object in the address space containing the table (or not).
712 */
713 if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */
714 /* Mark as empty for vfscon_add() */
715 bzero(vcdelp, sizeof(struct vfstable));
716 numregistered_fses--;
717 numused_vfsslots--;
718 } else { /* N */
719 /*
720 * This entry was dynamically allocated; we must free it;
721 * we would prefer to have just linked the caller's
722 * vfsconf onto our list, but it may not be persistent
723 * because of the previous (copying) implementation.
724 */
725 numregistered_fses--;
726 mount_list_unlock();
727 FREE(vcdelp, M_TEMP);
728 mount_list_lock();
729 }
730
731 #if DEBUG
732 lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED);
733 #endif /* DEBUG */
734
735 return 0;
736 }
737
738 void
739 SPECHASH_LOCK(void)
740 {
741 lck_mtx_lock(spechash_mtx_lock);
742 }
743
744 void
745 SPECHASH_UNLOCK(void)
746 {
747 lck_mtx_unlock(spechash_mtx_lock);
748 }