]> git.saurik.com Git - apple/xnu.git/blob - bsd/vfs/vfs_init.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / bsd / vfs / vfs_init.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * This code is derived from software contributed
34 * to Berkeley by John Heidemann of the UCLA Ficus project.
35 *
36 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_init.c 8.5 (Berkeley) 5/11/95
67 */
68 /*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections. This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75
76 #include <sys/param.h>
77 #include <sys/mount_internal.h>
78 #include <sys/time.h>
79 #include <sys/vm.h>
80 #include <sys/vnode_internal.h>
81 #include <sys/stat.h>
82 #include <sys/namei.h>
83 #include <sys/ucred.h>
84 #include <sys/errno.h>
85 #include <sys/malloc.h>
86 #include <sys/decmpfs.h>
87
88 #if CONFIG_MACF
89 #include <security/mac_framework.h>
90 #include <sys/kauth.h>
91 #endif
92 #if QUOTA
93 #include <sys/quota.h>
94 #endif
95
96 /*
97 * Sigh, such primitive tools are these...
98 */
99 #if 0
100 #define DODEBUG(A) A
101 #else
102 #define DODEBUG(A)
103 #endif
104
105 __private_extern__ void vntblinit(void);
106
107 extern struct vnodeopv_desc *vfs_opv_descs[];
108 /* a list of lists of vnodeops defns */
109 extern struct vnodeop_desc *vfs_op_descs[];
110 /* and the operations they perform */
111 /*
112 * This code doesn't work if the defn is **vnodop_defns with cc.
113 * The problem is because of the compiler sometimes putting in an
114 * extra level of indirection for arrays. It's an interesting
115 * "feature" of C.
116 */
117 int vfs_opv_numops;
118
119 typedef int (*PFIvp)(void *);
120
121 /*
122 * A miscellaneous routine.
123 * A generic "default" routine that just returns an error.
124 */
125 int
126 vn_default_error(void)
127 {
128 return ENOTSUP;
129 }
130
131 /*
132 * vfs_init.c
133 *
134 * Allocate and fill in operations vectors.
135 *
136 * An undocumented feature of this approach to defining operations is that
137 * there can be multiple entries in vfs_opv_descs for the same operations
138 * vector. This allows third parties to extend the set of operations
139 * supported by another layer in a binary compatibile way. For example,
140 * assume that NFS needed to be modified to support Ficus. NFS has an entry
141 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
142 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
143 * listing those new operations Ficus adds to NFS, all without modifying the
144 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
145 * that is a(whole)nother story.) This is a feature.
146 */
147 void
148 vfs_opv_init(void)
149 {
150 int i, j, k;
151 int(***opv_desc_vector_p)(void *);
152 int(**opv_desc_vector)(void *);
153 struct vnodeopv_entry_desc *opve_descp;
154
155 /*
156 * Allocate the dynamic vectors and fill them in.
157 */
158 for (i = 0; vfs_opv_descs[i]; i++) {
159 opv_desc_vector_p = vfs_opv_descs[i]->opv_desc_vector_p;
160 /*
161 * Allocate and init the vector, if it needs it.
162 * Also handle backwards compatibility.
163 */
164 if (*opv_desc_vector_p == NULL) {
165 MALLOC(*opv_desc_vector_p, PFIvp*,
166 vfs_opv_numops * sizeof(PFIvp), M_TEMP, M_WAITOK);
167 bzero(*opv_desc_vector_p, vfs_opv_numops * sizeof(PFIvp));
168 DODEBUG(printf("vector at %x allocated\n",
169 opv_desc_vector_p));
170 }
171 opv_desc_vector = *opv_desc_vector_p;
172 for (j = 0; vfs_opv_descs[i]->opv_desc_ops[j].opve_op; j++) {
173 opve_descp = &(vfs_opv_descs[i]->opv_desc_ops[j]);
174
175 /* Silently skip known-disabled operations */
176 if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
177 printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
178 vfs_opv_descs[i], opve_descp->opve_op->vdesc_name);
179 continue;
180 }
181
182 /*
183 * Sanity check: is this operation listed
184 * in the list of operations? We check this
185 * by seeing if its offest is zero. Since
186 * the default routine should always be listed
187 * first, it should be the only one with a zero
188 * offset. Any other operation with a zero
189 * offset is probably not listed in
190 * vfs_op_descs, and so is probably an error.
191 *
192 * A panic here means the layer programmer
193 * has committed the all-too common bug
194 * of adding a new operation to the layer's
195 * list of vnode operations but
196 * not adding the operation to the system-wide
197 * list of supported operations.
198 */
199 if (opve_descp->opve_op->vdesc_offset == 0 &&
200 opve_descp->opve_op !=
201 VDESC(vnop_default)) {
202 printf("operation %s not listed in %s.\n",
203 opve_descp->opve_op->vdesc_name,
204 "vfs_op_descs");
205 panic("vfs_opv_init: bad operation");
206 }
207 /*
208 * Fill in this entry.
209 */
210 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
211 opve_descp->opve_impl;
212 }
213 }
214 /*
215 * Finally, go back and replace unfilled routines
216 * with their default. (Sigh, an O(n^3) algorithm. I
217 * could make it better, but that'd be work, and n is small.)
218 */
219 for (i = 0; vfs_opv_descs[i]; i++) {
220 opv_desc_vector = *(vfs_opv_descs[i]->opv_desc_vector_p);
221 /*
222 * Force every operations vector to have a default routine.
223 */
224 if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
225 panic("vfs_opv_init: operation vector without default routine.");
226 }
227 for (k = 0; k < vfs_opv_numops; k++) {
228 if (opv_desc_vector[k] == NULL) {
229 opv_desc_vector[k] =
230 opv_desc_vector[VOFFSET(vnop_default)];
231 }
232 }
233 }
234 }
235
236 /*
237 * Initialize known vnode operations vectors.
238 */
239 void
240 vfs_op_init(void)
241 {
242 int i;
243
244 DODEBUG(printf("Vnode_interface_init.\n"));
245 /*
246 * Set all vnode vectors to a well known value.
247 */
248 for (i = 0; vfs_opv_descs[i]; i++) {
249 *(vfs_opv_descs[i]->opv_desc_vector_p) = NULL;
250 }
251 /*
252 * Figure out how many ops there are by counting the table,
253 * and assign each its offset.
254 */
255 for (vfs_opv_numops = 0, i = 0; vfs_op_descs[i]; i++) {
256 /* Silently skip known-disabled operations */
257 if (vfs_op_descs[i]->vdesc_flags & VDESC_DISABLED) {
258 continue;
259 }
260 vfs_op_descs[i]->vdesc_offset = vfs_opv_numops;
261 vfs_opv_numops++;
262 }
263 DODEBUG(printf("vfs_opv_numops=%d\n", vfs_opv_numops));
264 }
265
266 /*
267 * Routines having to do with the management of the vnode table.
268 */
269 extern struct vnodeops dead_vnodeops;
270 extern struct vnodeops spec_vnodeops;
271
272 /* vars for vnode lock */
273 lck_grp_t * vnode_lck_grp;
274 lck_grp_attr_t * vnode_lck_grp_attr;
275 lck_attr_t * vnode_lck_attr;
276
277 #if CONFIG_TRIGGERS
278 /* vars for vnode trigger resolver */
279 lck_grp_t * trigger_vnode_lck_grp;
280 lck_grp_attr_t * trigger_vnode_lck_grp_attr;
281 lck_attr_t * trigger_vnode_lck_attr;
282 #endif
283
284 lck_grp_t * fd_vn_lck_grp;
285 lck_grp_attr_t * fd_vn_lck_grp_attr;
286 lck_attr_t * fd_vn_lck_attr;
287
288 /* vars for vnode list lock */
289 lck_grp_t * vnode_list_lck_grp;
290 lck_grp_attr_t * vnode_list_lck_grp_attr;
291 lck_attr_t * vnode_list_lck_attr;
292 lck_spin_t * vnode_list_spin_lock;
293 lck_mtx_t * spechash_mtx_lock;
294
295 /* vars for vfsconf lock */
296 lck_grp_t * fsconf_lck_grp;
297 lck_grp_attr_t * fsconf_lck_grp_attr;
298 lck_attr_t * fsconf_lck_attr;
299
300
301 /* vars for mount lock */
302 lck_grp_t * mnt_lck_grp;
303 lck_grp_attr_t * mnt_lck_grp_attr;
304 lck_attr_t * mnt_lck_attr;
305
306 /* vars for mount list lock */
307 lck_grp_t * mnt_list_lck_grp;
308 lck_grp_attr_t * mnt_list_lck_grp_attr;
309 lck_attr_t * mnt_list_lck_attr;
310 lck_mtx_t * mnt_list_mtx_lock;
311
312 /* vars for sync mutex */
313 lck_grp_t * sync_mtx_lck_grp;
314 lck_grp_attr_t * sync_mtx_lck_grp_attr;
315 lck_attr_t * sync_mtx_lck_attr;
316 lck_mtx_t * sync_mtx_lck;
317
318 lck_mtx_t *pkg_extensions_lck;
319
320 struct mount * dead_mountp;
321
322 extern void nspace_handler_init(void);
323
324 /*
325 * Initialize the vnode structures and initialize each file system type.
326 */
327 void
328 vfsinit(void)
329 {
330 struct vfstable *vfsp;
331 int i, maxtypenum;
332 struct mount * mp;
333
334 /* Allocate vnode list lock group attribute and group */
335 vnode_list_lck_grp_attr = lck_grp_attr_alloc_init();
336
337 vnode_list_lck_grp = lck_grp_alloc_init("vnode list", vnode_list_lck_grp_attr);
338
339 /* Allocate vnode list lock attribute */
340 vnode_list_lck_attr = lck_attr_alloc_init();
341
342 /* Allocate vnode list lock */
343 vnode_list_spin_lock = lck_spin_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
344
345 /* Allocate spec hash list lock */
346 spechash_mtx_lock = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
347
348 /* Allocate the package extensions table lock */
349 pkg_extensions_lck = lck_mtx_alloc_init(vnode_list_lck_grp, vnode_list_lck_attr);
350
351 /* allocate vnode lock group attribute and group */
352 vnode_lck_grp_attr = lck_grp_attr_alloc_init();
353
354 vnode_lck_grp = lck_grp_alloc_init("vnode", vnode_lck_grp_attr);
355
356 /* Allocate vnode lock attribute */
357 vnode_lck_attr = lck_attr_alloc_init();
358
359 #if CONFIG_TRIGGERS
360 trigger_vnode_lck_grp_attr = lck_grp_attr_alloc_init();
361 trigger_vnode_lck_grp = lck_grp_alloc_init("trigger_vnode", trigger_vnode_lck_grp_attr);
362 trigger_vnode_lck_attr = lck_attr_alloc_init();
363 #endif
364 /* Allocate per fd vnode data lock attribute and group */
365 fd_vn_lck_grp_attr = lck_grp_attr_alloc_init();
366 fd_vn_lck_grp = lck_grp_alloc_init("fd_vnode_data", fd_vn_lck_grp_attr);
367 fd_vn_lck_attr = lck_attr_alloc_init();
368
369 /* Allocate fs config lock group attribute and group */
370 fsconf_lck_grp_attr = lck_grp_attr_alloc_init();
371
372 fsconf_lck_grp = lck_grp_alloc_init("fs conf", fsconf_lck_grp_attr);
373
374 /* Allocate fs config lock attribute */
375 fsconf_lck_attr = lck_attr_alloc_init();
376
377 /* Allocate mount point related lock structures */
378
379 /* Allocate mount list lock group attribute and group */
380 mnt_list_lck_grp_attr = lck_grp_attr_alloc_init();
381
382 mnt_list_lck_grp = lck_grp_alloc_init("mount list", mnt_list_lck_grp_attr);
383
384 /* Allocate mount list lock attribute */
385 mnt_list_lck_attr = lck_attr_alloc_init();
386
387 /* Allocate mount list lock */
388 mnt_list_mtx_lock = lck_mtx_alloc_init(mnt_list_lck_grp, mnt_list_lck_attr);
389
390
391 /* allocate mount lock group attribute and group */
392 mnt_lck_grp_attr = lck_grp_attr_alloc_init();
393
394 mnt_lck_grp = lck_grp_alloc_init("mount", mnt_lck_grp_attr);
395
396 /* Allocate mount lock attribute */
397 mnt_lck_attr = lck_attr_alloc_init();
398
399 /* Allocate sync lock */
400 sync_mtx_lck_grp_attr = lck_grp_attr_alloc_init();
401 sync_mtx_lck_grp = lck_grp_alloc_init("sync thread", sync_mtx_lck_grp_attr);
402 sync_mtx_lck_attr = lck_attr_alloc_init();
403 sync_mtx_lck = lck_mtx_alloc_init(sync_mtx_lck_grp, sync_mtx_lck_attr);
404
405 /*
406 * Initialize the vnode table
407 */
408 vntblinit();
409 /*
410 * Initialize the filesystem event mechanism.
411 */
412 vfs_event_init();
413 /*
414 * Initialize the vnode name cache
415 */
416 nchinit();
417
418 nspace_handler_init();
419
420 /*
421 * Build vnode operation vectors.
422 */
423 vfs_op_init();
424 vfs_opv_init(); /* finish the job */
425 /*
426 * Initialize each file system type in the static list,
427 * until the first NULL ->vfs_vfsops is encountered.
428 */
429 maxtypenum = VT_NON;
430 for (vfsp = vfsconf, i = 0; i < maxvfsslots; i++, vfsp++) {
431 struct vfsconf vfsc;
432 if (vfsp->vfc_vfsops == (struct vfsops *)0) {
433 break;
434 }
435 if (i) {
436 vfsconf[i - 1].vfc_next = vfsp;
437 }
438 if (maxtypenum <= vfsp->vfc_typenum) {
439 maxtypenum = vfsp->vfc_typenum + 1;
440 }
441
442 bzero(&vfsc, sizeof(struct vfsconf));
443 vfsc.vfc_reserved1 = 0;
444 bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
445 vfsc.vfc_typenum = vfsp->vfc_typenum;
446 vfsc.vfc_refcount = vfsp->vfc_refcount;
447 vfsc.vfc_flags = vfsp->vfc_flags;
448 vfsc.vfc_reserved2 = 0;
449 vfsc.vfc_reserved3 = 0;
450
451 if (vfsp->vfc_vfsops->vfs_sysctl) {
452 struct sysctl_oid *oidp = NULL;
453 struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, vfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", "");
454
455 MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK);
456 *oidp = oid;
457
458 /* Memory for VFS oid held by vfsentry forever */
459 vfsp->vfc_sysctl = oidp;
460 oidp->oid_name = vfsp->vfc_name;
461 sysctl_register_oid(vfsp->vfc_sysctl);
462 }
463
464 (*vfsp->vfc_vfsops->vfs_init)(&vfsc);
465
466 numused_vfsslots++;
467 numregistered_fses++;
468 }
469 /* next vfc_typenum to be used */
470 maxvfstypenum = maxtypenum;
471
472 /*
473 * Initialize the vnop authorization scope.
474 */
475 vnode_authorize_init();
476
477 /*
478 * Initialiize the quota system.
479 */
480 #if QUOTA
481 dqinit();
482 #endif
483
484 /*
485 * create a mount point for dead vnodes
486 */
487 MALLOC_ZONE(mp, struct mount *, sizeof(struct mount),
488 M_MOUNT, M_WAITOK);
489 bzero((char *)mp, sizeof(struct mount));
490 /* Initialize the default IO constraints */
491 mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
492 mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
493 mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
494 mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
495 mp->mnt_devblocksize = DEV_BSIZE;
496 mp->mnt_alignmentmask = PAGE_MASK;
497 mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
498 mp->mnt_ioscale = 1;
499 mp->mnt_ioflags = 0;
500 mp->mnt_realrootvp = NULLVP;
501 mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
502
503 TAILQ_INIT(&mp->mnt_vnodelist);
504 TAILQ_INIT(&mp->mnt_workerqueue);
505 TAILQ_INIT(&mp->mnt_newvnodes);
506 mp->mnt_flag = MNT_LOCAL;
507 mp->mnt_lflag = MNT_LDEAD;
508 mount_lock_init(mp);
509
510 #if CONFIG_MACF
511 mac_mount_label_init(mp);
512 mac_mount_label_associate(vfs_context_kernel(), mp);
513 #endif
514 dead_mountp = mp;
515
516 #if FS_COMPRESSION
517 decmpfs_init();
518 #endif
519 }
520
521 void
522 vnode_list_lock(void)
523 {
524 lck_spin_lock_grp(vnode_list_spin_lock, vnode_list_lck_grp);
525 }
526
527 void
528 vnode_list_unlock(void)
529 {
530 lck_spin_unlock(vnode_list_spin_lock);
531 }
532
533 void
534 mount_list_lock(void)
535 {
536 lck_mtx_lock(mnt_list_mtx_lock);
537 }
538
539 void
540 mount_list_unlock(void)
541 {
542 lck_mtx_unlock(mnt_list_mtx_lock);
543 }
544
545 void
546 mount_lock_init(mount_t mp)
547 {
548 lck_mtx_init(&mp->mnt_mlock, mnt_lck_grp, mnt_lck_attr);
549 lck_mtx_init(&mp->mnt_iter_lock, mnt_lck_grp, mnt_lck_attr);
550 lck_mtx_init(&mp->mnt_renamelock, mnt_lck_grp, mnt_lck_attr);
551 lck_rw_init(&mp->mnt_rwlock, mnt_lck_grp, mnt_lck_attr);
552 }
553
554 void
555 mount_lock_destroy(mount_t mp)
556 {
557 lck_mtx_destroy(&mp->mnt_mlock, mnt_lck_grp);
558 lck_mtx_destroy(&mp->mnt_iter_lock, mnt_lck_grp);
559 lck_mtx_destroy(&mp->mnt_renamelock, mnt_lck_grp);
560 lck_rw_destroy(&mp->mnt_rwlock, mnt_lck_grp);
561 }
562
563
564 /*
565 * Name: vfstable_add
566 *
567 * Description: Add a filesystem to the vfsconf list at the first
568 * unused slot. If no slots are available, return an
569 * error.
570 *
571 * Parameter: nvfsp vfsconf for VFS to add
572 *
573 * Returns: 0 Success
574 * -1 Failure
575 *
576 * Notes: The vfsconf should be treated as a linked list by
577 * all external references, as the implementation is
578 * expected to change in the future. The linkage is
579 * through ->vfc_next, and the list is NULL terminated.
580 *
581 * Warning: This code assumes that vfsconf[0] is non-empty.
582 */
583 struct vfstable *
584 vfstable_add(struct vfstable *nvfsp)
585 {
586 int slot;
587 struct vfstable *slotp, *allocated = NULL;
588 struct sysctl_oid *oidp = NULL;
589
590
591 if (nvfsp->vfc_vfsops->vfs_sysctl) {
592 struct sysctl_oid oid = SYSCTL_STRUCT_INIT(_vfs, nvfsp->vfc_typenum, , CTLTYPE_NODE | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0, vfs_sysctl_node, "-", "");
593
594 MALLOC(oidp, struct sysctl_oid *, sizeof(struct sysctl_oid), M_TEMP, M_WAITOK);
595 *oidp = oid;
596 }
597
598 /*
599 * Find the next empty slot; we recognize an empty slot by a
600 * NULL-valued ->vfc_vfsops, so if we delete a VFS, we must
601 * ensure we set the entry back to NULL.
602 */
603 findslot:
604 mount_list_lock();
605 for (slot = 0; slot < maxvfsslots; slot++) {
606 if (vfsconf[slot].vfc_vfsops == NULL) {
607 break;
608 }
609 }
610 if (slot == maxvfsslots) {
611 if (allocated == NULL) {
612 mount_list_unlock();
613 /* out of static slots; allocate one instead */
614 MALLOC(allocated, struct vfstable *, sizeof(struct vfstable),
615 M_TEMP, M_WAITOK);
616 goto findslot;
617 } else {
618 slotp = allocated;
619 }
620 } else {
621 slotp = &vfsconf[slot];
622 }
623
624 /*
625 * Replace the contents of the next empty slot with the contents
626 * of the provided nvfsp.
627 *
628 * Note; Takes advantage of the fact that 'slot' was left
629 * with the value of 'maxvfslots' in the allocation case.
630 */
631 bcopy(nvfsp, slotp, sizeof(struct vfstable));
632 if (slot != 0) {
633 slotp->vfc_next = vfsconf[slot - 1].vfc_next;
634 vfsconf[slot - 1].vfc_next = slotp;
635 } else {
636 slotp->vfc_next = NULL;
637 }
638
639 if (slotp != allocated) {
640 /* used a statically allocated slot */
641 numused_vfsslots++;
642 }
643 numregistered_fses++;
644
645 if (oidp) {
646 /* Memory freed in vfstable_del after unregistration */
647 slotp->vfc_sysctl = oidp;
648 oidp->oid_name = slotp->vfc_name;
649 sysctl_register_oid(slotp->vfc_sysctl);
650 }
651
652 mount_list_unlock();
653
654 if (allocated && allocated != slotp) {
655 /* did allocation, but ended up using static slot */
656 FREE(allocated, M_TEMP);
657 }
658
659 return slotp;
660 }
661
662 /*
663 * Name: vfstable_del
664 *
665 * Description: Remove a filesystem from the vfsconf list by name.
666 * If no such filesystem exists, return an error.
667 *
668 * Parameter: fs_name name of VFS to remove
669 *
670 * Returns: 0 Success
671 * -1 Failure
672 *
673 * Notes: Hopefully all filesystems have unique names.
674 */
675 int
676 vfstable_del(struct vfstable * vtbl)
677 {
678 struct vfstable **vcpp;
679 struct vfstable *vcdelp;
680
681 #if DEBUG
682 lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED);
683 #endif /* DEBUG */
684
685 /*
686 * Traverse the list looking for vtbl; if found, *vcpp
687 * will contain the address of the pointer to the entry to
688 * be removed.
689 */
690 for (vcpp = &vfsconf; *vcpp; vcpp = &(*vcpp)->vfc_next) {
691 if (*vcpp == vtbl) {
692 break;
693 }
694 }
695
696 if (*vcpp == NULL) {
697 return ESRCH; /* vtbl not on vfsconf list */
698 }
699 if ((*vcpp)->vfc_sysctl) {
700 sysctl_unregister_oid((*vcpp)->vfc_sysctl);
701 (*vcpp)->vfc_sysctl->oid_name = NULL;
702 FREE((*vcpp)->vfc_sysctl, M_TEMP);
703 (*vcpp)->vfc_sysctl = NULL;
704 }
705
706 /* Unlink entry */
707 vcdelp = *vcpp;
708 *vcpp = (*vcpp)->vfc_next;
709
710 /*
711 * Is this an entry from our static table? We find out by
712 * seeing if the pointer to the object to be deleted places
713 * the object in the address space containing the table (or not).
714 */
715 if (vcdelp >= vfsconf && vcdelp < (vfsconf + maxvfsslots)) { /* Y */
716 /* Mark as empty for vfscon_add() */
717 bzero(vcdelp, sizeof(struct vfstable));
718 numregistered_fses--;
719 numused_vfsslots--;
720 } else { /* N */
721 /*
722 * This entry was dynamically allocated; we must free it;
723 * we would prefer to have just linked the caller's
724 * vfsconf onto our list, but it may not be persistent
725 * because of the previous (copying) implementation.
726 */
727 numregistered_fses--;
728 mount_list_unlock();
729 FREE(vcdelp, M_TEMP);
730 mount_list_lock();
731 }
732
733 #if DEBUG
734 lck_mtx_assert(mnt_list_mtx_lock, LCK_MTX_ASSERT_OWNED);
735 #endif /* DEBUG */
736
737 return 0;
738 }
739
740 void
741 SPECHASH_LOCK(void)
742 {
743 lck_mtx_lock(spechash_mtx_lock);
744 }
745
746 void
747 SPECHASH_UNLOCK(void)
748 {
749 lck_mtx_unlock(spechash_mtx_lock);
750 }