]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
7412131ca71a2a55a59637b7038ac9a5027da6bc
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/systm.h>
24 #include <sys/kernel.h>
25 #include <sys/file.h>
26 #include <sys/dirent.h>
27 #include <sys/stat.h>
28 #include <sys/buf.h>
29 #include <sys/mount.h>
30 #include <sys/vnode.h>
31 #include <sys/malloc.h>
32 #include <sys/namei.h>
33 #include <sys/ubc.h>
34 #include <sys/quota.h>
35
36 #include <miscfs/specfs/specdev.h>
37 #include <miscfs/fifofs/fifo.h>
38 #include <vfs/vfs_support.h>
39 #include <machine/spl.h>
40
41 #include <sys/kdebug.h>
42
43 #include "hfs.h"
44 #include "hfs_catalog.h"
45 #include "hfs_cnode.h"
46 #include "hfs_lockf.h"
47 #include "hfs_dbg.h"
48 #include "hfs_mount.h"
49 #include "hfs_quota.h"
50 #include "hfs_endian.h"
51
52 #include "hfscommon/headers/BTreesInternal.h"
53 #include "hfscommon/headers/FileMgrInternal.h"
54
55 #define MAKE_DELETED_NAME(NAME,FID) \
56 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
57
58
59 extern uid_t console_user;
60
61 /* Global vfs data structures for hfs */
62
63
64 extern int groupmember(gid_t gid, struct ucred *cred);
65
66 static int hfs_makenode(int mode, struct vnode *dvp, struct vnode **vpp,
67 struct componentname *cnp);
68
69 static int hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp,
70 struct vnode **rvpp, struct proc *p);
71
72 static int hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p);
73
74 int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags);
75
76 int hfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
77 struct proc *p);
78 int hfs_chmod(struct vnode *vp, int mode, struct ucred *cred,
79 struct proc *p);
80 int hfs_chown(struct vnode *vp, uid_t uid, gid_t gid,
81 struct ucred *cred, struct proc *p);
82
83 /*****************************************************************************
84 *
85 * Common Operations on vnodes
86 *
87 *****************************************************************************/
88
89 /*
90 * Create a regular file
91 #% create dvp L U U
92 #% create vpp - L -
93 #
94 vop_create {
95 IN WILLRELE struct vnode *dvp;
96 OUT struct vnode **vpp;
97 IN struct componentname *cnp;
98 IN struct vattr *vap;
99
100 We are responsible for freeing the namei buffer,
101 it is done in hfs_makenode()
102 */
103
104 static int
105 hfs_create(ap)
106 struct vop_create_args /* {
107 struct vnode *a_dvp;
108 struct vnode **a_vpp;
109 struct componentname *a_cnp;
110 struct vattr *a_vap;
111 } */ *ap;
112 {
113 struct vattr *vap = ap->a_vap;
114
115 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
116 ap->a_dvp, ap->a_vpp, ap->a_cnp));
117 }
118
119
120 /*
121 * Mknod vnode call
122
123 #% mknod dvp L U U
124 #% mknod vpp - X -
125 #
126 vop_mknod {
127 IN WILLRELE struct vnode *dvp;
128 OUT WILLRELE struct vnode **vpp;
129 IN struct componentname *cnp;
130 IN struct vattr *vap;
131 */
132 /* ARGSUSED */
133
134 static int
135 hfs_mknod(ap)
136 struct vop_mknod_args /* {
137 struct vnode *a_dvp;
138 struct vnode **a_vpp;
139 struct componentname *a_cnp;
140 struct vattr *a_vap;
141 } */ *ap;
142 {
143 struct vattr *vap = ap->a_vap;
144 struct vnode **vpp = ap->a_vpp;
145 struct cnode *cp;
146 int error;
147
148 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
149 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
150 vput(ap->a_dvp);
151 return (EOPNOTSUPP);
152 }
153
154 /* Create the vnode */
155 error = hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
156 ap->a_dvp, vpp, ap->a_cnp);
157 if (error)
158 return (error);
159 cp = VTOC(*vpp);
160 cp->c_flag |= C_ACCESS | C_CHANGE | C_UPDATE;
161 if ((vap->va_rdev != VNOVAL) &&
162 (vap->va_type == VBLK || vap->va_type == VCHR))
163 cp->c_rdev = vap->va_rdev;
164 /*
165 * Remove cnode so that it will be reloaded by lookup and
166 * checked to see if it is an alias of an existing vnode.
167 * Note: unlike UFS, we don't bash v_type here.
168 */
169 vput(*vpp);
170 vgone(*vpp);
171 *vpp = 0;
172 return (0);
173 }
174
175
176 /*
177 * Open called.
178 #% open vp L L L
179 #
180 vop_open {
181 IN struct vnode *vp;
182 IN int mode;
183 IN struct ucred *cred;
184 IN struct proc *p;
185 */
186
187
188 static int
189 hfs_open(ap)
190 struct vop_open_args /* {
191 struct vnode *a_vp;
192 int a_mode;
193 struct ucred *a_cred;
194 struct proc *a_p;
195 } */ *ap;
196 {
197 struct vnode *vp = ap->a_vp;
198
199 /*
200 * Files marked append-only must be opened for appending.
201 */
202 if ((vp->v_type != VDIR) && (VTOC(vp)->c_flags & APPEND) &&
203 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
204 return (EPERM);
205
206 return (0);
207 }
208
209 /*
210 * Close called.
211 *
212 * Update the times on the cnode.
213 #% close vp U U U
214 #
215 vop_close {
216 IN struct vnode *vp;
217 IN int fflag;
218 IN struct ucred *cred;
219 IN struct proc *p;
220 */
221
222
223 static int
224 hfs_close(ap)
225 struct vop_close_args /* {
226 struct vnode *a_vp;
227 int a_fflag;
228 struct ucred *a_cred;
229 struct proc *a_p;
230 } */ *ap;
231 {
232 register struct vnode *vp = ap->a_vp;
233 register struct cnode *cp = VTOC(vp);
234 register struct filefork *fp = VTOF(vp);
235 struct proc *p = ap->a_p;
236 struct timeval tv;
237 off_t leof;
238 u_long blks, blocksize;
239 int devBlockSize;
240 int error;
241
242 simple_lock(&vp->v_interlock);
243 if ((!UBCISVALID(vp) && vp->v_usecount > 1)
244 || (UBCISVALID(vp) && ubc_isinuse(vp, 1))) {
245 tv = time;
246 CTIMES(cp, &tv, &tv);
247 }
248 simple_unlock(&vp->v_interlock);
249
250 /*
251 * VOP_CLOSE can be called with vp locked (from vclean).
252 * We check for this case using VOP_ISLOCKED and bail.
253 *
254 * XXX During a force unmount we won't do the cleanup below!
255 */
256 if (vp->v_type == VDIR || VOP_ISLOCKED(vp))
257 return (0);
258
259 leof = fp->ff_size;
260
261 if ((fp->ff_blocks > 0) && !ISSET(cp->c_flag, C_DELETED)) {
262 enum vtype our_type = vp->v_type;
263 u_long our_id = vp->v_id;
264 int was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
265
266 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
267 if (error)
268 return (0);
269 /*
270 * Since we can context switch in vn_lock our vnode
271 * could get recycled (eg umount -f). Double check
272 * that its still ours.
273 */
274 if (vp->v_type != our_type || vp->v_id != our_id
275 || cp != VTOC(vp) || !UBCINFOEXISTS(vp)) {
276 VOP_UNLOCK(vp, 0, p);
277 return (0);
278 }
279
280 /*
281 * Last chance to explicitly zero out the areas
282 * that are currently marked invalid:
283 */
284 VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
285 (void) cluster_push(vp);
286 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
287 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
288 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
289 off_t start = invalid_range->rl_start;
290 off_t end = invalid_range->rl_end;
291
292 /* The range about to be written must be validated
293 * first, so that VOP_CMAP() will return the
294 * appropriate mapping for the cluster code:
295 */
296 rl_remove(start, end, &fp->ff_invalidranges);
297
298 (void) cluster_write(vp, (struct uio *) 0, leof,
299 invalid_range->rl_end + 1, invalid_range->rl_start,
300 (off_t)0, devBlockSize, IO_HEADZEROFILL | IO_NOZERODIRTY);
301
302 if (ISSET(vp->v_flag, VHASDIRTY))
303 (void) cluster_push(vp);
304
305 cp->c_flag |= C_MODIFIED;
306 }
307 cp->c_flag &= ~C_ZFWANTSYNC;
308 cp->c_zftimeout = 0;
309 blocksize = VTOVCB(vp)->blockSize;
310 blks = leof / blocksize;
311 if (((off_t)blks * (off_t)blocksize) != leof)
312 blks++;
313 /*
314 * Shrink the peof to the smallest size neccessary to contain the leof.
315 */
316 if (blks < fp->ff_blocks)
317 (void) VOP_TRUNCATE(vp, leof, IO_NDELAY, ap->a_cred, p);
318 (void) cluster_push(vp);
319
320 if (!was_nocache)
321 CLR(vp->v_flag, VNOCACHE_DATA);
322
323 /*
324 * If the VOP_TRUNCATE didn't happen to flush the vnode's
325 * information out to disk, force it to be updated now that
326 * all invalid ranges have been zero-filled and validated:
327 */
328 if (cp->c_flag & C_MODIFIED) {
329 tv = time;
330 VOP_UPDATE(vp, &tv, &tv, 0);
331 }
332 VOP_UNLOCK(vp, 0, p);
333 }
334 return (0);
335 }
336
337 /*
338 #% access vp L L L
339 #
340 vop_access {
341 IN struct vnode *vp;
342 IN int mode;
343 IN struct ucred *cred;
344 IN struct proc *p;
345
346 */
347
348 static int
349 hfs_access(ap)
350 struct vop_access_args /* {
351 struct vnode *a_vp;
352 int a_mode;
353 struct ucred *a_cred;
354 struct proc *a_p;
355 } */ *ap;
356 {
357 struct vnode *vp = ap->a_vp;
358 struct cnode *cp = VTOC(vp);
359 struct ucred *cred = ap->a_cred;
360 register gid_t *gp;
361 mode_t mode = ap->a_mode;
362 mode_t mask = 0;
363 int i;
364 int error;
365
366 /*
367 * Disallow write attempts on read-only file systems;
368 * unless the file is a socket, fifo, or a block or
369 * character device resident on the file system.
370 */
371 if (mode & VWRITE) {
372 switch (vp->v_type) {
373 case VDIR:
374 case VLNK:
375 case VREG:
376 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
377 return (EROFS);
378 #if QUOTA
379 if ((error = hfs_getinoquota(cp)))
380 return (error);
381 #endif /* QUOTA */
382 break;
383 }
384 }
385
386 /* If immutable bit set, nobody gets to write it. */
387 if ((mode & VWRITE) && (cp->c_flags & IMMUTABLE))
388 return (EPERM);
389
390 /* Otherwise, user id 0 always gets access. */
391 if (ap->a_cred->cr_uid == 0)
392 return (0);
393
394 mask = 0;
395
396 /* Otherwise, check the owner. */
397 if (hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, ap->a_p, false) == 0) {
398 if (mode & VEXEC)
399 mask |= S_IXUSR;
400 if (mode & VREAD)
401 mask |= S_IRUSR;
402 if (mode & VWRITE)
403 mask |= S_IWUSR;
404 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
405 }
406
407 /* Otherwise, check the groups. */
408 if (! (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)) {
409 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++)
410 if (cp->c_gid == *gp) {
411 if (mode & VEXEC)
412 mask |= S_IXGRP;
413 if (mode & VREAD)
414 mask |= S_IRGRP;
415 if (mode & VWRITE)
416 mask |= S_IWGRP;
417 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
418 }
419 }
420
421 /* Otherwise, check everyone else. */
422 if (mode & VEXEC)
423 mask |= S_IXOTH;
424 if (mode & VREAD)
425 mask |= S_IROTH;
426 if (mode & VWRITE)
427 mask |= S_IWOTH;
428 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
429 }
430
431
432
433 /*
434 #% getattr vp = = =
435 #
436 vop_getattr {
437 IN struct vnode *vp;
438 IN struct vattr *vap;
439 IN struct ucred *cred;
440 IN struct proc *p;
441
442 */
443
444
445 /* ARGSUSED */
446 static int
447 hfs_getattr(ap)
448 struct vop_getattr_args /* {
449 struct vnode *a_vp;
450 struct vattr *a_vap;
451 struct ucred *a_cred;
452 struct proc *a_p;
453 } */ *ap;
454 {
455 struct vnode *vp = ap->a_vp;
456 struct cnode *cp = VTOC(vp);
457 struct vattr *vap = ap->a_vap;
458 struct timeval tv;
459
460 tv = time;
461 CTIMES(cp, &tv, &tv);
462
463 vap->va_type = vp->v_type;
464 /*
465 * [2856576] Since we are dynamically changing the owner, also
466 * effectively turn off the set-user-id and set-group-id bits,
467 * just like chmod(2) would when changing ownership. This prevents
468 * a security hole where set-user-id programs run as whoever is
469 * logged on (or root if nobody is logged in yet!)
470 */
471 vap->va_mode = (cp->c_uid == UNKNOWNUID) ? cp->c_mode & ~(S_ISUID | S_ISGID) : cp->c_mode;
472 vap->va_nlink = cp->c_nlink;
473 vap->va_uid = (cp->c_uid == UNKNOWNUID) ? console_user : cp->c_uid;
474 vap->va_gid = cp->c_gid;
475 vap->va_fsid = cp->c_dev;
476 /*
477 * Exporting file IDs from HFS Plus:
478 *
479 * For "normal" files the c_fileid is the same value as the
480 * c_cnid. But for hard link files, they are different - the
481 * c_cnid belongs to the active directory entry (ie the link)
482 * and the c_fileid is for the actual inode (ie the data file).
483 *
484 * The stat call (getattr) will always return the c_fileid
485 * and Carbon APIs, which are hardlink-ignorant, will always
486 * receive the c_cnid (from getattrlist).
487 */
488 vap->va_fileid = cp->c_fileid;
489 vap->va_atime.tv_sec = cp->c_atime;
490 vap->va_atime.tv_nsec = 0;
491 vap->va_mtime.tv_sec = cp->c_mtime;
492 vap->va_mtime.tv_nsec = cp->c_mtime_nsec;
493 vap->va_ctime.tv_sec = cp->c_ctime;
494 vap->va_ctime.tv_nsec = 0;
495 vap->va_gen = 0;
496 vap->va_flags = cp->c_flags;
497 vap->va_rdev = 0;
498 vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize;
499 vap->va_filerev = 0;
500 vap->va_spare = 0;
501 if (vp->v_type == VDIR) {
502 vap->va_size = cp->c_nlink * AVERAGE_HFSDIRENTRY_SIZE;
503 vap->va_bytes = 0;
504 } else {
505 vap->va_size = VTOF(vp)->ff_size;
506 vap->va_bytes = (u_quad_t)cp->c_blocks *
507 (u_quad_t)VTOVCB(vp)->blockSize;
508 if (vp->v_type == VBLK || vp->v_type == VCHR)
509 vap->va_rdev = cp->c_rdev;
510 }
511 return (0);
512 }
513
514 /*
515 * Set attribute vnode op. called from several syscalls
516 #% setattr vp L L L
517 #
518 vop_setattr {
519 IN struct vnode *vp;
520 IN struct vattr *vap;
521 IN struct ucred *cred;
522 IN struct proc *p;
523
524 */
525
526 static int
527 hfs_setattr(ap)
528 struct vop_setattr_args /* {
529 struct vnode *a_vp;
530 struct vattr *a_vap;
531 struct ucred *a_cred;
532 struct proc *a_p;
533 } */ *ap;
534 {
535 struct vattr *vap = ap->a_vap;
536 struct vnode *vp = ap->a_vp;
537 struct cnode *cp = VTOC(vp);
538 struct ucred *cred = ap->a_cred;
539 struct proc *p = ap->a_p;
540 struct timeval atimeval, mtimeval;
541 int error;
542
543 /*
544 * Check for unsettable attributes.
545 */
546 if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
547 (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
548 (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) ||
549 ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) {
550 return (EINVAL);
551 }
552
553 if (vap->va_flags != VNOVAL) {
554 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
555 return (EROFS);
556 if ((error = hfs_chflags(vp, vap->va_flags, cred, p)))
557 return (error);
558 if (vap->va_flags & (IMMUTABLE | APPEND))
559 return (0);
560 }
561
562 if (cp->c_flags & (IMMUTABLE | APPEND))
563 return (EPERM);
564
565 // XXXdbg - don't allow modification of the journal or journal_info_block
566 if (VTOHFS(vp)->jnl && cp->c_datafork) {
567 struct HFSPlusExtentDescriptor *extd;
568
569 extd = &cp->c_datafork->ff_data.cf_extents[0];
570 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
571 return EPERM;
572 }
573 }
574
575 /*
576 * Go through the fields and update iff not VNOVAL.
577 */
578 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
579 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
580 return (EROFS);
581 if ((error = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p)))
582 return (error);
583 }
584 if (vap->va_size != VNOVAL) {
585 /*
586 * Disallow write attempts on read-only file systems;
587 * unless the file is a socket, fifo, or a block or
588 * character device resident on the file system.
589 */
590 switch (vp->v_type) {
591 case VDIR:
592 return (EISDIR);
593 case VLNK:
594 case VREG:
595 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
596 return (EROFS);
597 break;
598 default:
599 break;
600 }
601 if ((error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p)))
602 return (error);
603 }
604 cp = VTOC(vp);
605 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
606 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
607 return (EROFS);
608 if (((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) &&
609 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
610 (error = VOP_ACCESS(vp, VWRITE, cred, p)))) {
611 return (error);
612 }
613 if (vap->va_atime.tv_sec != VNOVAL)
614 cp->c_flag |= C_ACCESS;
615 if (vap->va_mtime.tv_sec != VNOVAL) {
616 cp->c_flag |= C_CHANGE | C_UPDATE;
617 /*
618 * The utimes system call can reset the modification
619 * time but it doesn't know about HFS create times.
620 * So we need to insure that the creation time is
621 * always at least as old as the modification time.
622 */
623 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
624 (cp->c_cnid != kRootDirID) &&
625 (vap->va_mtime.tv_sec < cp->c_itime)) {
626 cp->c_itime = vap->va_mtime.tv_sec;
627 }
628 }
629 atimeval.tv_sec = vap->va_atime.tv_sec;
630 atimeval.tv_usec = 0;
631 mtimeval.tv_sec = vap->va_mtime.tv_sec;
632 mtimeval.tv_usec = 0;
633 if ((error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1)))
634 return (error);
635 }
636 error = 0;
637 if (vap->va_mode != (mode_t)VNOVAL) {
638 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
639 return (EROFS);
640 error = hfs_chmod(vp, (int)vap->va_mode, cred, p);
641 }
642 return (error);
643 }
644
645
646 /*
647 * Change the mode on a file.
648 * cnode must be locked before calling.
649 */
650 int
651 hfs_chmod(vp, mode, cred, p)
652 register struct vnode *vp;
653 register int mode;
654 register struct ucred *cred;
655 struct proc *p;
656 {
657 register struct cnode *cp = VTOC(vp);
658 int error;
659
660 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
661 return (0);
662
663 // XXXdbg - don't allow modification of the journal or journal_info_block
664 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
665 struct HFSPlusExtentDescriptor *extd;
666
667 extd = &cp->c_datafork->ff_data.cf_extents[0];
668 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
669 return EPERM;
670 }
671 }
672
673 #if OVERRIDE_UNKNOWN_PERMISSIONS
674 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
675 return (0);
676 };
677 #endif
678 if ((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0)
679 return (error);
680 if (cred->cr_uid) {
681 if (vp->v_type != VDIR && (mode & S_ISTXT))
682 return (EFTYPE);
683 if (!groupmember(cp->c_gid, cred) && (mode & S_ISGID))
684 return (EPERM);
685 }
686 cp->c_mode &= ~ALLPERMS;
687 cp->c_mode |= (mode & ALLPERMS);
688 cp->c_flag |= C_CHANGE;
689 return (0);
690 }
691
692
693 int
694 hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags)
695 {
696 struct cnode *cp = VTOC(vp);
697 gid_t *gp;
698 int retval = 0;
699 int i;
700
701 /*
702 * Disallow write attempts on read-only file systems;
703 * unless the file is a socket, fifo, or a block or
704 * character device resident on the file system.
705 */
706 switch (vp->v_type) {
707 case VDIR:
708 case VLNK:
709 case VREG:
710 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
711 return (EROFS);
712 break;
713 default:
714 break;
715 }
716
717 /* If immutable bit set, nobody gets to write it. */
718 if (considerFlags && (cp->c_flags & IMMUTABLE))
719 return (EPERM);
720
721 /* Otherwise, user id 0 always gets access. */
722 if (cred->cr_uid == 0)
723 return (0);
724
725 /* Otherwise, check the owner. */
726 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
727 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
728
729 /* Otherwise, check the groups. */
730 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) {
731 if (cp->c_gid == *gp)
732 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
733 }
734
735 /* Otherwise, check everyone else. */
736 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
737 }
738
739
740
741 /*
742 * Change the flags on a file or directory.
743 * cnode must be locked before calling.
744 */
745 int
746 hfs_chflags(vp, flags, cred, p)
747 register struct vnode *vp;
748 register u_long flags;
749 register struct ucred *cred;
750 struct proc *p;
751 {
752 register struct cnode *cp = VTOC(vp);
753 int retval;
754
755 if (VTOVCB(vp)->vcbSigWord == kHFSSigWord) {
756 if ((retval = hfs_write_access(vp, cred, p, false)) != 0) {
757 return retval;
758 };
759 } else if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) {
760 return retval;
761 };
762
763 if (cred->cr_uid == 0) {
764 if ((cp->c_flags & (SF_IMMUTABLE | SF_APPEND)) &&
765 securelevel > 0) {
766 return EPERM;
767 };
768 cp->c_flags = flags;
769 } else {
770 if (cp->c_flags & (SF_IMMUTABLE | SF_APPEND) ||
771 (flags & UF_SETTABLE) != flags) {
772 return EPERM;
773 };
774 cp->c_flags &= SF_SETTABLE;
775 cp->c_flags |= (flags & UF_SETTABLE);
776 }
777 cp->c_flag |= C_CHANGE;
778
779 return (0);
780 }
781
782
783 /*
784 * Perform chown operation on cnode cp;
785 * code must be locked prior to call.
786 */
787 int
788 hfs_chown(vp, uid, gid, cred, p)
789 register struct vnode *vp;
790 uid_t uid;
791 gid_t gid;
792 struct ucred *cred;
793 struct proc *p;
794 {
795 register struct cnode *cp = VTOC(vp);
796 uid_t ouid;
797 gid_t ogid;
798 int error = 0;
799 #if QUOTA
800 register int i;
801 int64_t change;
802 #endif /* QUOTA */
803
804 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
805 return (EOPNOTSUPP);
806
807 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)
808 return (0);
809
810 if (uid == (uid_t)VNOVAL)
811 uid = cp->c_uid;
812 if (gid == (gid_t)VNOVAL)
813 gid = cp->c_gid;
814 /*
815 * If we don't own the file, are trying to change the owner
816 * of the file, or are not a member of the target group,
817 * the caller must be superuser or the call fails.
818 */
819 if ((cred->cr_uid != cp->c_uid || uid != cp->c_uid ||
820 (gid != cp->c_gid && !groupmember((gid_t)gid, cred))) &&
821 (error = suser(cred, &p->p_acflag)))
822 return (error);
823
824 ogid = cp->c_gid;
825 ouid = cp->c_uid;
826 #if QUOTA
827 if ((error = hfs_getinoquota(cp)))
828 return (error);
829 if (ouid == uid) {
830 dqrele(vp, cp->c_dquot[USRQUOTA]);
831 cp->c_dquot[USRQUOTA] = NODQUOT;
832 }
833 if (ogid == gid) {
834 dqrele(vp, cp->c_dquot[GRPQUOTA]);
835 cp->c_dquot[GRPQUOTA] = NODQUOT;
836 }
837
838 /*
839 * Eventually need to account for (fake) a block per directory
840 *if (vp->v_type == VDIR)
841 *change = VTOVCB(vp)->blockSize;
842 *else
843 */
844
845 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
846 (void) hfs_chkdq(cp, -change, cred, CHOWN);
847 (void) hfs_chkiq(cp, -1, cred, CHOWN);
848 for (i = 0; i < MAXQUOTAS; i++) {
849 dqrele(vp, cp->c_dquot[i]);
850 cp->c_dquot[i] = NODQUOT;
851 }
852 #endif /* QUOTA */
853 cp->c_gid = gid;
854 cp->c_uid = uid;
855 #if QUOTA
856 if ((error = hfs_getinoquota(cp)) == 0) {
857 if (ouid == uid) {
858 dqrele(vp, cp->c_dquot[USRQUOTA]);
859 cp->c_dquot[USRQUOTA] = NODQUOT;
860 }
861 if (ogid == gid) {
862 dqrele(vp, cp->c_dquot[GRPQUOTA]);
863 cp->c_dquot[GRPQUOTA] = NODQUOT;
864 }
865 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
866 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
867 goto good;
868 else
869 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
870 }
871 for (i = 0; i < MAXQUOTAS; i++) {
872 dqrele(vp, cp->c_dquot[i]);
873 cp->c_dquot[i] = NODQUOT;
874 }
875 }
876 cp->c_gid = ogid;
877 cp->c_uid = ouid;
878 if (hfs_getinoquota(cp) == 0) {
879 if (ouid == uid) {
880 dqrele(vp, cp->c_dquot[USRQUOTA]);
881 cp->c_dquot[USRQUOTA] = NODQUOT;
882 }
883 if (ogid == gid) {
884 dqrele(vp, cp->c_dquot[GRPQUOTA]);
885 cp->c_dquot[GRPQUOTA] = NODQUOT;
886 }
887 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
888 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
889 (void) hfs_getinoquota(cp);
890 }
891 return (error);
892 good:
893 if (hfs_getinoquota(cp))
894 panic("hfs_chown: lost quota");
895 #endif /* QUOTA */
896
897 if (ouid != uid || ogid != gid)
898 cp->c_flag |= C_CHANGE;
899 if (ouid != uid && cred->cr_uid != 0)
900 cp->c_mode &= ~S_ISUID;
901 if (ogid != gid && cred->cr_uid != 0)
902 cp->c_mode &= ~S_ISGID;
903 return (0);
904 }
905
906
907 /*
908 #
909 #% exchange fvp L L L
910 #% exchange tvp L L L
911 #
912 */
913 /*
914 * The hfs_exchange routine swaps the fork data in two files by
915 * exchanging some of the information in the cnode. It is used
916 * to preserve the file ID when updating an existing file, in
917 * case the file is being tracked through its file ID. Typically
918 * its used after creating a new file during a safe-save.
919 */
920
921 static int
922 hfs_exchange(ap)
923 struct vop_exchange_args /* {
924 struct vnode *a_fvp;
925 struct vnode *a_tvp;
926 struct ucred *a_cred;
927 struct proc *a_p;
928 } */ *ap;
929 {
930 struct vnode *from_vp = ap->a_fvp;
931 struct vnode *to_vp = ap->a_tvp;
932 struct vnode *from_rvp = NULL;
933 struct vnode *to_rvp = NULL;
934 struct cnode *from_cp = VTOC(from_vp);
935 struct cnode *to_cp = VTOC(to_vp);
936 struct hfsmount *hfsmp = VTOHFS(from_vp);
937 struct cat_desc tempdesc;
938 struct cat_attr tempattr;
939 int error = 0, started_tr = 0, grabbed_lock = 0;
940
941 /* The files must be on the same volume. */
942 if (from_vp->v_mount != to_vp->v_mount)
943 return (EXDEV);
944
945 /* Only normal files can be exchanged. */
946 if ((from_vp->v_type != VREG) || (to_vp->v_type != VREG) ||
947 (from_cp->c_flag & C_HARDLINK) || (to_cp->c_flag & C_HARDLINK) ||
948 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
949 return (EINVAL);
950
951 // XXXdbg - don't allow modification of the journal or journal_info_block
952 if (hfsmp->jnl) {
953 struct HFSPlusExtentDescriptor *extd;
954
955 if (from_cp->c_datafork) {
956 extd = &from_cp->c_datafork->ff_data.cf_extents[0];
957 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
958 return EPERM;
959 }
960 }
961
962 if (to_cp->c_datafork) {
963 extd = &to_cp->c_datafork->ff_data.cf_extents[0];
964 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
965 return EPERM;
966 }
967 }
968 }
969
970 from_rvp = from_cp->c_rsrc_vp;
971 to_rvp = to_cp->c_rsrc_vp;
972
973 /* If one of the resource forks is open then get the other one. */
974 if (from_rvp || to_rvp) {
975 error = hfs_vgetrsrc(hfsmp, from_vp, &from_rvp, ap->a_p);
976 if (error)
977 return (error);
978 error = hfs_vgetrsrc(hfsmp, to_vp, &to_rvp, ap->a_p);
979 if (error) {
980 vrele(from_rvp);
981 return (error);
982 }
983 }
984
985 /* Ignore any errors, we are doing a 'best effort' on flushing */
986 if (from_vp)
987 (void) vinvalbuf(from_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
988 if (to_vp)
989 (void) vinvalbuf(to_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
990 if (from_rvp)
991 (void) vinvalbuf(from_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
992 if (to_rvp)
993 (void) vinvalbuf(to_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
994
995 // XXXdbg
996 hfs_global_shared_lock_acquire(hfsmp);
997 grabbed_lock = 1;
998 if (hfsmp->jnl) {
999 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1000 goto Err_Exit;
1001 }
1002 started_tr = 1;
1003 }
1004
1005 /* Lock catalog b-tree */
1006 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p);
1007 if (error) goto Err_Exit;
1008
1009 /* The backend code always tries to delete the virtual
1010 * extent id for exchanging files so we neeed to lock
1011 * the extents b-tree.
1012 */
1013 error = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
1014 if (error) {
1015 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1016 goto Err_Exit;
1017 }
1018
1019 /* Do the exchange */
1020 error = MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp),
1021 from_cp->c_desc.cd_nameptr, to_cp->c_desc.cd_nameptr,
1022 from_cp->c_parentcnid, to_cp->c_parentcnid,
1023 from_cp->c_hint, to_cp->c_hint));
1024
1025 (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, ap->a_p);
1026 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1027
1028 if (error != E_NONE) {
1029 goto Err_Exit;
1030 }
1031
1032 /* Purge the vnodes from the name cache */
1033 if (from_vp)
1034 cache_purge(from_vp);
1035 if (to_vp)
1036 cache_purge(to_vp);
1037
1038 /* Save a copy of from attributes before swapping. */
1039 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1040 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1041
1042 /*
1043 * Swap the descriptors and all non-fork related attributes.
1044 * (except the modify date)
1045 */
1046 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1047
1048 from_cp->c_hint = 0;
1049 from_cp->c_fileid = from_cp->c_cnid;
1050 from_cp->c_itime = to_cp->c_itime;
1051 from_cp->c_btime = to_cp->c_btime;
1052 from_cp->c_atime = to_cp->c_atime;
1053 from_cp->c_ctime = to_cp->c_ctime;
1054 from_cp->c_gid = to_cp->c_gid;
1055 from_cp->c_uid = to_cp->c_uid;
1056 from_cp->c_flags = to_cp->c_flags;
1057 from_cp->c_mode = to_cp->c_mode;
1058 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1059
1060 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1061 to_cp->c_hint = 0;
1062 to_cp->c_fileid = to_cp->c_cnid;
1063 to_cp->c_itime = tempattr.ca_itime;
1064 to_cp->c_btime = tempattr.ca_btime;
1065 to_cp->c_atime = tempattr.ca_atime;
1066 to_cp->c_ctime = tempattr.ca_ctime;
1067 to_cp->c_gid = tempattr.ca_gid;
1068 to_cp->c_uid = tempattr.ca_uid;
1069 to_cp->c_flags = tempattr.ca_flags;
1070 to_cp->c_mode = tempattr.ca_mode;
1071 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1072
1073 /* Reinsert into the cnode hash under new file IDs*/
1074 hfs_chashremove(from_cp);
1075 hfs_chashremove(to_cp);
1076
1077 hfs_chashinsert(from_cp);
1078 hfs_chashinsert(to_cp);
1079 Err_Exit:
1080 if (to_rvp)
1081 vrele(to_rvp);
1082 if (from_rvp)
1083 vrele(from_rvp);
1084
1085 // XXXdbg
1086 if (started_tr) {
1087 journal_end_transaction(hfsmp->jnl);
1088 }
1089 if (grabbed_lock) {
1090 hfs_global_shared_lock_release(hfsmp);
1091 }
1092
1093 return (error);
1094 }
1095
1096
1097 /*
1098
1099 #% fsync vp L L L
1100 #
1101 vop_fsync {
1102 IN struct vnode *vp;
1103 IN struct ucred *cred;
1104 IN int waitfor;
1105 IN struct proc *p;
1106
1107 */
1108 static int
1109 hfs_fsync(ap)
1110 struct vop_fsync_args /* {
1111 struct vnode *a_vp;
1112 struct ucred *a_cred;
1113 int a_waitfor;
1114 struct proc *a_p;
1115 } */ *ap;
1116 {
1117 struct vnode *vp = ap->a_vp;
1118 struct cnode *cp = VTOC(vp);
1119 struct filefork *fp = NULL;
1120 int retval = 0;
1121 register struct buf *bp;
1122 struct timeval tv;
1123 struct buf *nbp;
1124 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
1125 int s;
1126 int wait;
1127 int retry = 0;
1128
1129 wait = (ap->a_waitfor == MNT_WAIT);
1130
1131 /* HFS directories don't have any data blocks. */
1132 if (vp->v_type == VDIR)
1133 goto metasync;
1134
1135 /*
1136 * For system files flush the B-tree header and
1137 * for regular files write out any clusters
1138 */
1139 if (vp->v_flag & VSYSTEM) {
1140 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1141 // XXXdbg
1142 if (hfsmp->jnl) {
1143 if (BTIsDirty(VTOF(vp))) {
1144 panic("hfs: system file vp 0x%x has dirty blocks (jnl 0x%x)\n",
1145 vp, hfsmp->jnl);
1146 }
1147 } else {
1148 BTFlushPath(VTOF(vp));
1149 }
1150 }
1151 } else if (UBCINFOEXISTS(vp))
1152 (void) cluster_push(vp);
1153
1154 /*
1155 * When MNT_WAIT is requested and the zero fill timeout
1156 * has expired then we must explicitly zero out any areas
1157 * that are currently marked invalid (holes).
1158 */
1159 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1160 UBCINFOEXISTS(vp) && (fp = VTOF(vp)) &&
1161 cp->c_zftimeout != 0) {
1162 int devblksize;
1163 int was_nocache;
1164
1165 if (time.tv_sec < cp->c_zftimeout) {
1166 /* Remember that a force sync was requested. */
1167 cp->c_flag |= C_ZFWANTSYNC;
1168 goto loop;
1169 }
1170 VOP_DEVBLOCKSIZE(cp->c_devvp, &devblksize);
1171 was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
1172 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
1173
1174 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1175 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1176 off_t start = invalid_range->rl_start;
1177 off_t end = invalid_range->rl_end;
1178
1179 /* The range about to be written must be validated
1180 * first, so that VOP_CMAP() will return the
1181 * appropriate mapping for the cluster code:
1182 */
1183 rl_remove(start, end, &fp->ff_invalidranges);
1184
1185 (void) cluster_write(vp, (struct uio *) 0,
1186 fp->ff_size,
1187 invalid_range->rl_end + 1,
1188 invalid_range->rl_start,
1189 (off_t)0, devblksize,
1190 IO_HEADZEROFILL | IO_NOZERODIRTY);
1191 cp->c_flag |= C_MODIFIED;
1192 }
1193 (void) cluster_push(vp);
1194 if (!was_nocache)
1195 CLR(vp->v_flag, VNOCACHE_DATA);
1196 cp->c_flag &= ~C_ZFWANTSYNC;
1197 cp->c_zftimeout = 0;
1198 }
1199
1200 /*
1201 * Flush all dirty buffers associated with a vnode.
1202 */
1203 loop:
1204 s = splbio();
1205 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1206 nbp = bp->b_vnbufs.le_next;
1207 if ((bp->b_flags & B_BUSY))
1208 continue;
1209 if ((bp->b_flags & B_DELWRI) == 0)
1210 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp, hfsmp);
1211 // XXXdbg
1212 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1213 if ((bp->b_flags & B_META) == 0) {
1214 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1215 bp, hfsmp->jnl);
1216 }
1217 // if journal_active() returns >= 0 then the journal is ok and we
1218 // shouldn't do anything to this locked block (because it is part
1219 // of a transaction). otherwise we'll just go through the normal
1220 // code path and flush the buffer.
1221 if (journal_active(hfsmp->jnl) >= 0) {
1222 continue;
1223 }
1224 }
1225
1226 bremfree(bp);
1227 bp->b_flags |= B_BUSY;
1228 /* Clear B_LOCKED, should only be set on meta files */
1229 bp->b_flags &= ~B_LOCKED;
1230
1231 splx(s);
1232 /*
1233 * Wait for I/O associated with indirect blocks to complete,
1234 * since there is no way to quickly wait for them below.
1235 */
1236 if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT)
1237 (void) bawrite(bp);
1238 else
1239 (void) VOP_BWRITE(bp);
1240 goto loop;
1241 }
1242
1243 if (wait) {
1244 while (vp->v_numoutput) {
1245 vp->v_flag |= VBWAIT;
1246 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hfs_fsync", 0);
1247 }
1248
1249 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1250 // thing to do?
1251 if (hfsmp->jnl == NULL && vp->v_dirtyblkhd.lh_first) {
1252 /* still have some dirty buffers */
1253 if (retry++ > 10) {
1254 vprint("hfs_fsync: dirty", vp);
1255 splx(s);
1256 /*
1257 * Looks like the requests are not
1258 * getting queued to the driver.
1259 * Retrying here causes a cpu bound loop.
1260 * Yield to the other threads and hope
1261 * for the best.
1262 */
1263 (void)tsleep((caddr_t)&vp->v_numoutput,
1264 PRIBIO + 1, "hfs_fsync", hz/10);
1265 retry = 0;
1266 } else {
1267 splx(s);
1268 }
1269 /* try again */
1270 goto loop;
1271 }
1272 }
1273 splx(s);
1274
1275 metasync:
1276 tv = time;
1277 if (vp->v_flag & VSYSTEM) {
1278 if (VTOF(vp)->fcbBTCBPtr != NULL)
1279 BTSetLastSync(VTOF(vp), tv.tv_sec);
1280 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1281 } else /* User file */ {
1282 retval = VOP_UPDATE(ap->a_vp, &tv, &tv, wait);
1283
1284 /* When MNT_WAIT is requested push out any delayed meta data */
1285 if ((retval == 0) && wait && cp->c_hint &&
1286 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1287 hfs_metasync(VTOHFS(vp), cp->c_hint, ap->a_p);
1288 }
1289 }
1290
1291 return (retval);
1292 }
1293
1294 /* Sync an hfs catalog b-tree node */
1295 static int
1296 hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p)
1297 {
1298 struct vnode *vp;
1299 struct buf *bp;
1300 struct buf *nbp;
1301 int s;
1302
1303 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1304
1305 // XXXdbg - don't need to do this on a journaled volume
1306 if (hfsmp->jnl) {
1307 return 0;
1308 }
1309
1310 if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p) != 0)
1311 return (0);
1312
1313 /*
1314 * Look for a matching node that has been delayed
1315 * but is not part of a set (B_LOCKED).
1316 */
1317 s = splbio();
1318 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1319 nbp = bp->b_vnbufs.le_next;
1320 if (bp->b_flags & B_BUSY)
1321 continue;
1322 if (bp->b_lblkno == node) {
1323 if (bp->b_flags & B_LOCKED)
1324 break;
1325
1326 bremfree(bp);
1327 bp->b_flags |= B_BUSY;
1328 splx(s);
1329 (void) VOP_BWRITE(bp);
1330 goto exit;
1331 }
1332 }
1333 splx(s);
1334 exit:
1335 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1336
1337 return (0);
1338 }
1339
1340 __private_extern__
1341 int
1342 hfs_btsync(struct vnode *vp, int sync_transaction)
1343 {
1344 struct cnode *cp = VTOC(vp);
1345 register struct buf *bp;
1346 struct timeval tv;
1347 struct buf *nbp;
1348 struct hfsmount *hfsmp = VTOHFS(vp);
1349 int s;
1350
1351 /*
1352 * Flush all dirty buffers associated with b-tree.
1353 */
1354 loop:
1355 s = splbio();
1356
1357 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1358 nbp = bp->b_vnbufs.le_next;
1359 if ((bp->b_flags & B_BUSY))
1360 continue;
1361 if ((bp->b_flags & B_DELWRI) == 0)
1362 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp, hfsmp);
1363
1364 // XXXdbg
1365 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1366 if ((bp->b_flags & B_META) == 0) {
1367 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1368 bp, hfsmp->jnl);
1369 }
1370 // if journal_active() returns >= 0 then the journal is ok and we
1371 // shouldn't do anything to this locked block (because it is part
1372 // of a transaction). otherwise we'll just go through the normal
1373 // code path and flush the buffer.
1374 if (journal_active(hfsmp->jnl) >= 0) {
1375 continue;
1376 }
1377 }
1378
1379 if (sync_transaction && !(bp->b_flags & B_LOCKED))
1380 continue;
1381
1382 bremfree(bp);
1383 bp->b_flags |= B_BUSY;
1384 bp->b_flags &= ~B_LOCKED;
1385
1386 splx(s);
1387
1388 (void) bawrite(bp);
1389
1390 goto loop;
1391 }
1392 splx(s);
1393
1394 tv = time;
1395 if ((vp->v_flag & VSYSTEM) && (VTOF(vp)->fcbBTCBPtr != NULL))
1396 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1397 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1398
1399 return 0;
1400 }
1401
1402 /*
1403 * Rmdir system call.
1404 #% rmdir dvp L U U
1405 #% rmdir vp L U U
1406 #
1407 vop_rmdir {
1408 IN WILLRELE struct vnode *dvp;
1409 IN WILLRELE struct vnode *vp;
1410 IN struct componentname *cnp;
1411
1412 */
1413 static int
1414 hfs_rmdir(ap)
1415 struct vop_rmdir_args /* {
1416 struct vnode *a_dvp;
1417 struct vnode *a_vp;
1418 struct componentname *a_cnp;
1419 } */ *ap;
1420 {
1421 struct vnode *vp = ap->a_vp;
1422 struct vnode *dvp = ap->a_dvp;
1423 struct proc *p = ap->a_cnp->cn_proc;
1424 struct cnode *cp;
1425 struct cnode *dcp;
1426 struct hfsmount * hfsmp;
1427 struct timeval tv;
1428 int error = 0, started_tr = 0, grabbed_lock = 0;
1429
1430 cp = VTOC(vp);
1431 dcp = VTOC(dvp);
1432 hfsmp = VTOHFS(vp);
1433
1434 if (dcp == cp) {
1435 vrele(dvp);
1436 vput(vp);
1437 return (EINVAL); /* cannot remove "." */
1438 }
1439
1440 #if QUOTA
1441 (void)hfs_getinoquota(cp);
1442 #endif
1443
1444 // XXXdbg
1445 hfs_global_shared_lock_acquire(hfsmp);
1446 grabbed_lock = 1;
1447 if (hfsmp->jnl) {
1448 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1449 goto out;
1450 }
1451 started_tr = 1;
1452 }
1453
1454 /*
1455 * Verify the directory is empty (and valid).
1456 * (Rmdir ".." won't be valid since
1457 * ".." will contain a reference to
1458 * the current directory and thus be
1459 * non-empty.)
1460 */
1461 if (cp->c_entries != 0) {
1462 error = ENOTEMPTY;
1463 goto out;
1464 }
1465 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1466 error = EPERM;
1467 goto out;
1468 }
1469
1470 /* Remove the entry from the namei cache: */
1471 cache_purge(vp);
1472
1473 /* Lock catalog b-tree */
1474 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1475 if (error) goto out;
1476
1477 if (cp->c_entries > 0)
1478 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1479 /* Remove entry from catalog */
1480 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1481
1482 /* Unlock catalog b-tree */
1483 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1484 if (error) goto out;
1485
1486 #if QUOTA
1487 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1488 #endif /* QUOTA */
1489
1490 /* The parent lost a child */
1491 if (dcp->c_entries > 0)
1492 dcp->c_entries--;
1493 if (dcp->c_nlink > 0)
1494 dcp->c_nlink--;
1495 dcp->c_flag |= C_CHANGE | C_UPDATE;
1496 tv = time;
1497 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1498
1499 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1500
1501 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1502 cp->c_flag |= C_NOEXISTS;
1503 out:
1504 if (dvp)
1505 vput(dvp);
1506 vput(vp);
1507
1508 // XXXdbg
1509 if (started_tr) {
1510 journal_end_transaction(hfsmp->jnl);
1511 }
1512 if (grabbed_lock) {
1513 hfs_global_shared_lock_release(hfsmp);
1514 }
1515
1516 return (error);
1517 }
1518
1519 /*
1520
1521 #% remove dvp L U U
1522 #% remove vp L U U
1523 #
1524 vop_remove {
1525 IN WILLRELE struct vnode *dvp;
1526 IN WILLRELE struct vnode *vp;
1527 IN struct componentname *cnp;
1528
1529 */
1530
1531 static int
1532 hfs_remove(ap)
1533 struct vop_remove_args /* {
1534 struct vnode *a_dvp;
1535 struct vnode *a_vp;
1536 struct componentname *a_cnp;
1537 } */ *ap;
1538 {
1539 struct vnode *vp = ap->a_vp;
1540 struct vnode *dvp = ap->a_dvp;
1541 struct vnode *rvp = NULL;
1542 struct cnode *cp;
1543 struct cnode *dcp;
1544 struct hfsmount *hfsmp;
1545 struct proc *p = current_proc();
1546 int dataforkbusy = 0;
1547 int rsrcforkbusy = 0;
1548 int truncated = 0;
1549 struct timeval tv;
1550 int error = 0;
1551 int started_tr = 0, grabbed_lock = 0;
1552
1553 /* Redirect directories to rmdir */
1554 if (vp->v_type == VDIR)
1555 return (hfs_rmdir(ap));
1556
1557 cp = VTOC(vp);
1558 dcp = VTOC(dvp);
1559 hfsmp = VTOHFS(vp);
1560
1561 if (cp->c_parentcnid != dcp->c_cnid) {
1562 error = EINVAL;
1563 goto out;
1564 }
1565
1566 /* Make sure a remove is permitted */
1567 if ((cp->c_flags & (IMMUTABLE | APPEND)) ||
1568 (VTOC(dvp)->c_flags & APPEND) ||
1569 VNODE_IS_RSRC(vp)) {
1570 error = EPERM;
1571 goto out;
1572 }
1573
1574 /*
1575 * Aquire a vnode for a non-empty resource fork.
1576 * (needed for VOP_TRUNCATE)
1577 */
1578 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1579 error = hfs_vgetrsrc(hfsmp, vp, &rvp, p);
1580 if (error)
1581 goto out;
1582 }
1583
1584 // XXXdbg - don't allow deleting the journal or journal_info_block
1585 if (hfsmp->jnl && cp->c_datafork) {
1586 struct HFSPlusExtentDescriptor *extd;
1587
1588 extd = &cp->c_datafork->ff_data.cf_extents[0];
1589 if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1590 error = EPERM;
1591 goto out;
1592 }
1593 }
1594
1595 /*
1596 * Check if this file is being used.
1597 *
1598 * The namei done for the remove took a reference on the
1599 * vnode (vp). And we took a ref on the resource vnode (rvp).
1600 * Hence set 1 in the tookref parameter of ubc_isinuse().
1601 */
1602 if (UBCISVALID(vp) && ubc_isinuse(vp, 1))
1603 dataforkbusy = 1;
1604 if (rvp && UBCISVALID(rvp) && ubc_isinuse(rvp, 1))
1605 rsrcforkbusy = 1;
1606
1607 /*
1608 * Carbon semantics prohibit deleting busy files.
1609 * (enforced when NODELETEBUSY is requested)
1610 */
1611 if ((dataforkbusy || rsrcforkbusy) &&
1612 ((ap->a_cnp->cn_flags & NODELETEBUSY) ||
1613 (hfsmp->hfs_private_metadata_dir == 0))) {
1614 error = EBUSY;
1615 goto out;
1616 }
1617
1618 #if QUOTA
1619 (void)hfs_getinoquota(cp);
1620 #endif /* QUOTA */
1621
1622 // XXXdbg
1623 hfs_global_shared_lock_acquire(hfsmp);
1624 grabbed_lock = 1;
1625 if (hfsmp->jnl) {
1626 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1627 goto out;
1628 }
1629 started_tr = 1;
1630 }
1631
1632 /* Remove our entry from the namei cache. */
1633 cache_purge(vp);
1634
1635 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1636 if (hfsmp->jnl && vp->v_type == VLNK && vp->v_dirtyblkhd.lh_first) {
1637 struct buf *bp, *nbp;
1638
1639 recheck:
1640 for (bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) {
1641 nbp = bp->b_vnbufs.le_next;
1642
1643 if ((bp->b_flags & B_BUSY)) {
1644 // if it was busy, someone else must be dealing
1645 // with it so just move on.
1646 continue;
1647 }
1648
1649 if (!(bp->b_flags & B_META)) {
1650 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp);
1651 }
1652
1653 // if it's part of the current transaction, kill it.
1654 if (bp->b_flags & B_LOCKED) {
1655 bremfree(bp);
1656 bp->b_flags |= B_BUSY;
1657 journal_kill_block(hfsmp->jnl, bp);
1658 goto recheck;
1659 }
1660 }
1661 }
1662 // XXXdbg
1663
1664 /*
1665 * Truncate any non-busy forks. Busy forks will
1666 * get trucated when their vnode goes inactive.
1667 *
1668 * (Note: hard links are truncated in VOP_INACTIVE)
1669 */
1670 if ((cp->c_flag & C_HARDLINK) == 0) {
1671 int mode = cp->c_mode;
1672
1673 if (!dataforkbusy && cp->c_datafork->ff_blocks != 0) {
1674 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1675 error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p);
1676 cp->c_mode = mode;
1677 if (error)
1678 goto out;
1679 truncated = 1;
1680 }
1681 if (!rsrcforkbusy && rvp) {
1682 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1683 error = VOP_TRUNCATE(rvp, (off_t)0, IO_NDELAY, NOCRED, p);
1684 cp->c_mode = mode;
1685 if (error && !dataforkbusy)
1686 goto out;
1687 else {
1688 /*
1689 * XXX could also force an update on vp
1690 * and fail the remove.
1691 */
1692 error = 0;
1693 }
1694 truncated = 1;
1695 }
1696 }
1697 /*
1698 * There are 3 remove cases to consider:
1699 * 1. File is a hardlink ==> remove the link
1700 * 2. File is busy (in use) ==> move/rename the file
1701 * 3. File is not in use ==> remove the file
1702 */
1703
1704 if (cp->c_flag & C_HARDLINK) {
1705 struct cat_desc desc;
1706
1707 if ((ap->a_cnp->cn_flags & HASBUF) == 0 ||
1708 ap->a_cnp->cn_nameptr[0] == '\0') {
1709 error = ENOENT; /* name missing! */
1710 goto out;
1711 }
1712
1713 /* Setup a descriptor for the link */
1714 bzero(&desc, sizeof(desc));
1715 desc.cd_nameptr = ap->a_cnp->cn_nameptr;
1716 desc.cd_namelen = ap->a_cnp->cn_namelen;
1717 desc.cd_parentcnid = dcp->c_cnid;
1718 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1719 desc.cd_cnid = cp->c_cnid;
1720
1721 /* Lock catalog b-tree */
1722 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1723 if (error)
1724 goto out;
1725
1726 /* Delete the link record */
1727 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1728
1729 if ((error == 0) && (--cp->c_nlink < 1)) {
1730 char inodename[32];
1731 char delname[32];
1732 struct cat_desc to_desc;
1733 struct cat_desc from_desc;
1734
1735 /*
1736 * This is now esentially an open deleted file.
1737 * Rename it to reflect this state which makes
1738 * orphan file cleanup easier (see hfs_remove_orphans).
1739 * Note: a rename failure here is not fatal.
1740 */
1741 MAKE_INODE_NAME(inodename, cp->c_rdev);
1742 bzero(&from_desc, sizeof(from_desc));
1743 from_desc.cd_nameptr = inodename;
1744 from_desc.cd_namelen = strlen(inodename);
1745 from_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1746 from_desc.cd_flags = 0;
1747 from_desc.cd_cnid = cp->c_fileid;
1748
1749 MAKE_DELETED_NAME(delname, cp->c_fileid);
1750 bzero(&to_desc, sizeof(to_desc));
1751 to_desc.cd_nameptr = delname;
1752 to_desc.cd_namelen = strlen(delname);
1753 to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1754 to_desc.cd_flags = 0;
1755 to_desc.cd_cnid = cp->c_fileid;
1756
1757 (void) cat_rename(hfsmp, &from_desc, &hfsmp->hfs_privdir_desc,
1758 &to_desc, (struct cat_desc *)NULL);
1759 cp->c_flag |= C_DELETED;
1760 }
1761
1762 /* Unlock the Catalog */
1763 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1764
1765 /* All done with component name... */
1766 if ((ap->a_cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME))
1767 FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
1768
1769 if (error != 0)
1770 goto out;
1771
1772 cp->c_flag |= C_CHANGE;
1773 tv = time;
1774 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1775
1776 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1777
1778 } else if (dataforkbusy || rsrcforkbusy) {
1779 char delname[32];
1780 struct cat_desc to_desc;
1781 struct cat_desc todir_desc;
1782
1783 /*
1784 * Orphan this file (move to hidden directory).
1785 */
1786 bzero(&todir_desc, sizeof(todir_desc));
1787 todir_desc.cd_parentcnid = 2;
1788
1789 MAKE_DELETED_NAME(delname, cp->c_fileid);
1790 bzero(&to_desc, sizeof(to_desc));
1791 to_desc.cd_nameptr = delname;
1792 to_desc.cd_namelen = strlen(delname);
1793 to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1794 to_desc.cd_flags = 0;
1795 to_desc.cd_cnid = cp->c_cnid;
1796
1797 /* Lock catalog b-tree */
1798 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1799 if (error)
1800 goto out;
1801
1802 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc,
1803 &to_desc, (struct cat_desc *)NULL);
1804
1805 // XXXdbg - only bump this count if we were successful
1806 if (error == 0) {
1807 hfsmp->hfs_privdir_attr.ca_entries++;
1808 }
1809 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
1810 &hfsmp->hfs_privdir_attr, NULL, NULL);
1811
1812 /* Unlock the Catalog */
1813 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1814 if (error) goto out;
1815
1816 cp->c_flag |= C_CHANGE | C_DELETED | C_NOEXISTS;
1817 --cp->c_nlink;
1818 tv = time;
1819 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1820
1821 } else /* Not busy */ {
1822
1823 if (vp->v_type == VDIR && cp->c_entries > 0)
1824 panic("hfs_remove: attempting to delete a non-empty directory!");
1825 if (vp->v_type != VDIR && cp->c_blocks > 0)
1826 panic("hfs_remove: attempting to delete a non-empty file!");
1827
1828 /* Lock catalog b-tree */
1829 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1830 if (error)
1831 goto out;
1832
1833 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1834
1835 if (error && error != ENXIO && truncated) {
1836 if ((cp->c_datafork && cp->c_datafork->ff_data.cf_size != 0) ||
1837 (cp->c_rsrcfork && cp->c_rsrcfork->ff_data.cf_size != 0)) {
1838 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1839 error, cp->c_datafork->ff_data.cf_size, cp->c_rsrcfork->ff_data.cf_size);
1840 } else {
1841 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1842 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
1843 }
1844 }
1845
1846 /* Unlock the Catalog */
1847 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1848 if (error) goto out;
1849
1850 #if QUOTA
1851 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1852 #endif /* QUOTA */
1853
1854 cp->c_mode = 0;
1855 cp->c_flag |= C_CHANGE | C_NOEXISTS;
1856 --cp->c_nlink;
1857 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1858 }
1859
1860 /*
1861 * All done with this cnode's descriptor...
1862 *
1863 * Note: all future catalog calls for this cnode must be
1864 * by fileid only. This is OK for HFS (which doesn't have
1865 * file thread records) since HFS doesn't support hard
1866 * links or the removal of busy files.
1867 */
1868 cat_releasedesc(&cp->c_desc);
1869
1870 /* In all three cases the parent lost a child */
1871 if (dcp->c_entries > 0)
1872 dcp->c_entries--;
1873 if (dcp->c_nlink > 0)
1874 dcp->c_nlink--;
1875 dcp->c_flag |= C_CHANGE | C_UPDATE;
1876 tv = time;
1877 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1878
1879 // XXXdbg
1880 if (started_tr) {
1881 journal_end_transaction(hfsmp->jnl);
1882 }
1883 if (grabbed_lock) {
1884 hfs_global_shared_lock_release(hfsmp);
1885 }
1886
1887 if (rvp)
1888 vrele(rvp);
1889 VOP_UNLOCK(vp, 0, p);
1890 // XXXdbg - try to prevent the lost ubc_info panic
1891 if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) {
1892 (void) ubc_uncache(vp);
1893 }
1894 vrele(vp);
1895 vput(dvp);
1896
1897 return (0);
1898
1899 out:
1900 if (rvp)
1901 vrele(rvp);
1902
1903 /* Commit the truncation to the catalog record */
1904 if (truncated) {
1905 cp->c_flag |= C_CHANGE | C_UPDATE;
1906 tv = time;
1907 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1908 }
1909 vput(vp);
1910 vput(dvp);
1911
1912 // XXXdbg
1913 if (started_tr) {
1914 journal_end_transaction(hfsmp->jnl);
1915 }
1916 if (grabbed_lock) {
1917 hfs_global_shared_lock_release(hfsmp);
1918 }
1919
1920 return (error);
1921 }
1922
1923
1924 __private_extern__ void
1925 replace_desc(struct cnode *cp, struct cat_desc *cdp)
1926 {
1927 /* First release allocated name buffer */
1928 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
1929 char *name = cp->c_desc.cd_nameptr;
1930
1931 cp->c_desc.cd_nameptr = 0;
1932 cp->c_desc.cd_namelen = 0;
1933 cp->c_desc.cd_flags &= ~CD_HASBUF;
1934 FREE(name, M_TEMP);
1935 }
1936 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
1937
1938 /* Cnode now owns the name buffer */
1939 cdp->cd_nameptr = 0;
1940 cdp->cd_namelen = 0;
1941 cdp->cd_flags &= ~CD_HASBUF;
1942 }
1943
1944
1945 /*
1946 #
1947 #% rename fdvp U U U
1948 #% rename fvp U U U
1949 #% rename tdvp L U U
1950 #% rename tvp X U U
1951 #
1952 vop_rename {
1953 IN WILLRELE struct vnode *fdvp;
1954 IN WILLRELE struct vnode *fvp;
1955 IN struct componentname *fcnp;
1956 IN WILLRELE struct vnode *tdvp;
1957 IN WILLRELE struct vnode *tvp;
1958 IN struct componentname *tcnp;
1959 };
1960 */
1961 /*
1962 * Rename a cnode.
1963 *
1964 * The VFS layer guarantees that source and destination will
1965 * either both be directories, or both not be directories.
1966 *
1967 * When the target is a directory, hfs_rename must ensure
1968 * that it is empty.
1969 *
1970 * The rename system call is responsible for freeing
1971 * the pathname buffers (ie no need to call VOP_ABORTOP).
1972 */
1973
1974 static int
1975 hfs_rename(ap)
1976 struct vop_rename_args /* {
1977 struct vnode *a_fdvp;
1978 struct vnode *a_fvp;
1979 struct componentname *a_fcnp;
1980 struct vnode *a_tdvp;
1981 struct vnode *a_tvp;
1982 struct componentname *a_tcnp;
1983 } */ *ap;
1984 {
1985 struct vnode *tvp = ap->a_tvp;
1986 struct vnode *tdvp = ap->a_tdvp;
1987 struct vnode *fvp = ap->a_fvp;
1988 struct vnode *fdvp = ap->a_fdvp;
1989 struct componentname *tcnp = ap->a_tcnp;
1990 struct componentname *fcnp = ap->a_fcnp;
1991 struct proc *p = fcnp->cn_proc;
1992 struct cnode *fcp = NULL;
1993 struct cnode *fdcp = NULL;
1994 struct cnode *tdcp = VTOC(tdvp);
1995 struct cat_desc from_desc;
1996 struct cat_desc to_desc;
1997 struct cat_desc out_desc;
1998 struct hfsmount *hfsmp;
1999 struct timeval tv;
2000 int fdvp_locked, fvp_locked, tdvp_locked;
2001 int tvp_deleted;
2002 int started_tr = 0, grabbed_lock = 0;
2003 int error = 0;
2004
2005 hfsmp = VTOHFS(tdvp);
2006
2007 /* Establish our vnode lock state. */
2008 tdvp_locked = 1;
2009 fdvp_locked = 0;
2010 fvp_locked = 0;
2011 tvp_deleted = 0;
2012
2013 /*
2014 * When fvp matches tvp they must be case variants
2015 * or hard links.
2016 *
2017 * For the hardlink case there can be an extra ref on fvp.
2018 */
2019 if (fvp == tvp) {
2020 if (VOP_ISLOCKED(fvp) &&
2021 (VTOC(fvp)->c_lock.lk_lockholder == p->p_pid) &&
2022 (VTOC(fvp)->c_lock.lk_lockthread == current_thread())) {
2023 fvp_locked = 1;
2024 vrele(fvp); /* drop the extra ref */
2025 }
2026 tvp = NULL;
2027 /*
2028 * If this a hard link and its not a case
2029 * variant then keep tvp around for removal.
2030 */
2031 if ((VTOC(fvp)->c_flag & C_HARDLINK) &&
2032 ((fdvp != tdvp) ||
2033 (hfs_namecmp(fcnp->cn_nameptr, fcnp->cn_namelen,
2034 tcnp->cn_nameptr, tcnp->cn_namelen) != 0))) {
2035 tvp = fvp;
2036 }
2037 }
2038
2039 /*
2040 * Check for cross-device rename.
2041 */
2042 if ((fvp->v_mount != tdvp->v_mount) ||
2043 (tvp && (fvp->v_mount != tvp->v_mount))) {
2044 error = EXDEV;
2045 goto out;
2046 }
2047
2048 /*
2049 * Make sure "from" vnode and its parent are changeable.
2050 */
2051 if ((VTOC(fvp)->c_flags & (IMMUTABLE | APPEND)) ||
2052 (VTOC(fdvp)->c_flags & APPEND)) {
2053 error = EPERM;
2054 goto out;
2055 }
2056
2057 /*
2058 * Be sure we are not renaming ".", "..", or an alias of ".".
2059 */
2060 if ((fvp->v_type == VDIR) &&
2061 (((fcnp->cn_namelen == 1) && (fcnp->cn_nameptr[0] == '.')) ||
2062 (fdvp == fvp) ||
2063 (fcnp->cn_flags&ISDOTDOT))) {
2064 error = EINVAL;
2065 goto out;
2066 }
2067
2068 /*
2069 * If the destination parent directory is "sticky", then the
2070 * user must own the parent directory, or the destination of
2071 * the rename, otherwise the destination may not be changed
2072 * (except by root). This implements append-only directories.
2073 *
2074 * Note that checks for immutable, write access, and a non-empty
2075 * target are done by the call to VOP_REMOVE.
2076 */
2077 if (tvp && (tdcp->c_mode & S_ISTXT) &&
2078 (tcnp->cn_cred->cr_uid != 0) &&
2079 (tcnp->cn_cred->cr_uid != tdcp->c_uid) &&
2080 (hfs_owner_rights(hfsmp, VTOC(tvp)->c_uid, tcnp->cn_cred, p, false)) ) {
2081 error = EPERM;
2082 goto out;
2083 }
2084
2085 /*
2086 * All done with preflighting.
2087 *
2088 * We now break the call into two transactions:
2089 * 1 - Remove the destionation (if any) using VOP_REMOVE,
2090 * which in itself is a complete transaction.
2091 *
2092 * 2 - Rename source to destination.
2093 *
2094 * Since all the preflighting is done, we assume that a
2095 * rename failure is unlikely once part 1 is complete.
2096 * Breaking rename into two transactions buys us a much
2097 * simpler implementation with respect to the locking
2098 * protocol. There are only 3 vnodes to worry about
2099 * locking in the correct order (instead of 4).
2100 */
2101
2102 /*
2103 * Part 1 - If the destination exists then it needs to be removed.
2104 */
2105 if (tvp) {
2106 /*
2107 * VOP_REMOVE will vput tdvp so we better bump its
2108 * ref count and relockit, always set tvp to NULL
2109 * afterwards to indicate that we're done with it.
2110 */
2111 VREF(tdvp);
2112
2113 if (tvp == fvp) {
2114 if (fvp_locked) {
2115 VREF(fvp);
2116 } else {
2117 error = vget(fvp, LK_EXCLUSIVE | LK_RETRY, p);
2118 if (error)
2119 goto out;
2120 fvp_locked = 1;
2121 }
2122 } else {
2123 cache_purge(tvp);
2124 }
2125
2126 /* Clear SAVENAME to keep VOP_REMOVE from smashing tcnp. */
2127 tcnp->cn_flags &= ~SAVENAME;
2128
2129 if (tvp->v_type == VDIR)
2130 error = VOP_RMDIR(tdvp, tvp, tcnp);
2131 else
2132 error = VOP_REMOVE(tdvp, tvp, tcnp);
2133
2134 /* Get lock states back in sync. */
2135 tdvp_locked = 0;
2136 if (tvp == fvp)
2137 fvp_locked = 0;
2138 tvp = NULL; /* all done with tvp */
2139 tvp_deleted = 1;
2140
2141 if (error)
2142 goto out; /* couldn't remove destination! */
2143 }
2144 /*
2145 * All done with tvp.
2146 *
2147 * For POSIX compliance, if tvp was removed the only
2148 * error we can return from this point on is EIO.
2149 */
2150
2151 /*
2152 * Part 2 - rename source to destination
2153 */
2154
2155 /*
2156 * Lock the vnodes before starting a journal transaction.
2157 */
2158 if (fdvp != tdvp) {
2159 /*
2160 * fvp is a child and must be locked last.
2161 */
2162 if (fvp_locked) {
2163 VOP_UNLOCK(fvp, 0, p);
2164 fvp_locked = 0;
2165 }
2166 /*
2167 * If fdvp is the parent of tdvp then it needs to be locked first.
2168 */
2169 if ((VTOC(fdvp)->c_cnid == VTOC(tdvp)->c_parentcnid)) {
2170 if (tdvp_locked) {
2171 VOP_UNLOCK(tdvp, 0, p);
2172 tdvp_locked = 0;
2173 }
2174 if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2175 goto out;
2176 fdvp_locked = 1;
2177 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2178 goto out;
2179 tdvp_locked = 1;
2180
2181 } else /* Lock tdvp then fdvp */ {
2182 if (!tdvp_locked) {
2183 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2184 goto out;
2185 tdvp_locked = 1;
2186 }
2187 if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2188 goto out;
2189 fdvp_locked = 1;
2190 }
2191 } else if (!tdvp_locked) {
2192 /*
2193 * fvp is a child and must be locked last.
2194 */
2195 if (fvp_locked) {
2196 VOP_UNLOCK(fvp, 0, p);
2197 fvp_locked = 0;
2198 }
2199 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2200 goto out;
2201 tdvp_locked = 1;
2202 }
2203
2204 /* Now its safe to lock fvp */
2205 if (!fvp_locked) {
2206 if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p))
2207 goto out;
2208 fvp_locked = 1;
2209 }
2210
2211 fdcp = VTOC(fdvp);
2212 fcp = VTOC(fvp);
2213
2214 hfs_global_shared_lock_acquire(hfsmp);
2215 grabbed_lock = 1;
2216 if (hfsmp->jnl) {
2217 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2218 goto out;
2219 }
2220 started_tr = 1;
2221 }
2222
2223 cache_purge(fvp);
2224
2225 bzero(&from_desc, sizeof(from_desc));
2226 from_desc.cd_nameptr = fcnp->cn_nameptr;
2227 from_desc.cd_namelen = fcnp->cn_namelen;
2228 from_desc.cd_parentcnid = fdcp->c_cnid;
2229 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2230 from_desc.cd_cnid = fcp->c_cnid;
2231
2232 bzero(&to_desc, sizeof(to_desc));
2233 to_desc.cd_nameptr = tcnp->cn_nameptr;
2234 to_desc.cd_namelen = tcnp->cn_namelen;
2235 to_desc.cd_parentcnid = tdcp->c_cnid;
2236 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2237 to_desc.cd_cnid = fcp->c_cnid;
2238
2239 /* Lock catalog b-tree */
2240 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
2241 if (error)
2242 goto out;
2243
2244 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
2245
2246 /* Unlock catalog b-tree */
2247 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2248 if (error)
2249 goto out;
2250
2251 /* Update cnode's catalog descriptor */
2252 replace_desc(fcp, &out_desc);
2253
2254 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_RMDIR : VOL_RMFILE,
2255 (fdcp->c_cnid == kHFSRootFolderID));
2256 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_MKDIR : VOL_MKFILE,
2257 (tdcp->c_cnid == kHFSRootFolderID));
2258
2259 VOP_UNLOCK(fvp, 0, p);
2260 fcp = NULL;
2261 fvp_locked = 0;
2262 /* All done with fvp. */
2263
2264 /* Update both parent directories. */
2265 tv = time;
2266 if (fdvp != tdvp) {
2267 tdcp->c_nlink++;
2268 tdcp->c_entries++;
2269 if (fdcp->c_nlink > 0)
2270 fdcp->c_nlink--;
2271 if (fdcp->c_entries > 0)
2272 fdcp->c_entries--;
2273 fdcp->c_flag |= C_CHANGE | C_UPDATE;
2274 (void) VOP_UPDATE(fdvp, &tv, &tv, 0);
2275 }
2276 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2277 tdcp->c_flag |= C_CHANGE | C_UPDATE;
2278 (void) VOP_UPDATE(tdvp, &tv, &tv, 0);
2279
2280 out:
2281 if (started_tr) {
2282 journal_end_transaction(hfsmp->jnl);
2283 }
2284 if (grabbed_lock) {
2285 hfs_global_shared_lock_release(hfsmp);
2286 }
2287
2288 if (fvp_locked) {
2289 VOP_UNLOCK(fvp, 0, p);
2290 }
2291 if (fdvp_locked) {
2292 VOP_UNLOCK(fdvp, 0, p);
2293 }
2294 if (tdvp_locked) {
2295 VOP_UNLOCK(tdvp, 0, p);
2296 }
2297 if (tvp && (tvp != fvp)) {
2298 if (tvp != tdvp)
2299 VOP_UNLOCK(tvp, 0, p);
2300 vrele(tvp);
2301 }
2302
2303 vrele(fvp);
2304 vrele(fdvp);
2305 vrele(tdvp);
2306
2307 /* After tvp is removed the only acceptable error is EIO */
2308 if ((error == ENOSPC) && tvp_deleted)
2309 error = EIO;
2310
2311 return (error);
2312 }
2313
2314
2315
2316 /*
2317 * Mkdir system call
2318 #% mkdir dvp L U U
2319 #% mkdir vpp - L -
2320 #
2321 vop_mkdir {
2322 IN WILLRELE struct vnode *dvp;
2323 OUT struct vnode **vpp;
2324 IN struct componentname *cnp;
2325 IN struct vattr *vap;
2326
2327 We are responsible for freeing the namei buffer,
2328 it is done in hfs_makenode()
2329 */
2330
2331 static int
2332 hfs_mkdir(ap)
2333 struct vop_mkdir_args /* {
2334 struct vnode *a_dvp;
2335 struct vnode **a_vpp;
2336 struct componentname *a_cnp;
2337 struct vattr *a_vap;
2338 } */ *ap;
2339 {
2340 struct vattr *vap = ap->a_vap;
2341
2342 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
2343 ap->a_dvp, ap->a_vpp, ap->a_cnp));
2344 }
2345
2346
2347 /*
2348 * symlink -- make a symbolic link
2349 #% symlink dvp L U U
2350 #% symlink vpp - U -
2351 #
2352 # XXX - note that the return vnode has already been VRELE'ed
2353 # by the filesystem layer. To use it you must use vget,
2354 # possibly with a further namei.
2355 #
2356 vop_symlink {
2357 IN WILLRELE struct vnode *dvp;
2358 OUT WILLRELE struct vnode **vpp;
2359 IN struct componentname *cnp;
2360 IN struct vattr *vap;
2361 IN char *target;
2362
2363 We are responsible for freeing the namei buffer,
2364 it is done in hfs_makenode().
2365
2366 */
2367
2368 static int
2369 hfs_symlink(ap)
2370 struct vop_symlink_args /* {
2371 struct vnode *a_dvp;
2372 struct vnode **a_vpp;
2373 struct componentname *a_cnp;
2374 struct vattr *a_vap;
2375 char *a_target;
2376 } */ *ap;
2377 {
2378 register struct vnode *vp, **vpp = ap->a_vpp;
2379 struct hfsmount *hfsmp;
2380 struct filefork *fp;
2381 int len, error;
2382 struct buf *bp = NULL;
2383
2384 /* HFS standard disks don't support symbolic links */
2385 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
2386 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2387 vput(ap->a_dvp);
2388 return (EOPNOTSUPP);
2389 }
2390
2391 /* Check for empty target name */
2392 if (ap->a_target[0] == 0) {
2393 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2394 vput(ap->a_dvp);
2395 return (EINVAL);
2396 }
2397
2398
2399 hfsmp = VTOHFS(ap->a_dvp);
2400
2401 /* Create the vnode */
2402 if ((error = hfs_makenode(S_IFLNK | ap->a_vap->va_mode,
2403 ap->a_dvp, vpp, ap->a_cnp))) {
2404 return (error);
2405 }
2406
2407 vp = *vpp;
2408 len = strlen(ap->a_target);
2409 fp = VTOF(vp);
2410 fp->ff_clumpsize = VTOVCB(vp)->blockSize;
2411
2412 #if QUOTA
2413 (void)hfs_getinoquota(VTOC(vp));
2414 #endif /* QUOTA */
2415
2416 // XXXdbg
2417 hfs_global_shared_lock_acquire(hfsmp);
2418 if (hfsmp->jnl) {
2419 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2420 hfs_global_shared_lock_release(hfsmp);
2421 vput(vp);
2422 return error;
2423 }
2424 }
2425
2426 /* Allocate space for the link */
2427 error = VOP_TRUNCATE(vp, len, IO_NOZEROFILL,
2428 ap->a_cnp->cn_cred, ap->a_cnp->cn_proc);
2429 if (error)
2430 goto out; /* XXX need to remove link */
2431
2432 /* Write the link to disk */
2433 bp = getblk(vp, 0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size),
2434 0, 0, BLK_META);
2435 if (hfsmp->jnl) {
2436 journal_modify_block_start(hfsmp->jnl, bp);
2437 }
2438 bzero(bp->b_data, bp->b_bufsize);
2439 bcopy(ap->a_target, bp->b_data, len);
2440 if (hfsmp->jnl) {
2441 journal_modify_block_end(hfsmp->jnl, bp);
2442 } else {
2443 bawrite(bp);
2444 }
2445 out:
2446 if (hfsmp->jnl) {
2447 journal_end_transaction(hfsmp->jnl);
2448 }
2449 hfs_global_shared_lock_release(hfsmp);
2450 vput(vp);
2451 return (error);
2452 }
2453
2454
2455 /*
2456 * Dummy dirents to simulate the "." and ".." entries of the directory
2457 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2458 * the size of these entries is the smallest needed to represent them
2459 * (only 12 byte each).
2460 */
2461 static hfsdotentry rootdots[2] = {
2462 {
2463 1, /* d_fileno */
2464 sizeof(struct hfsdotentry), /* d_reclen */
2465 DT_DIR, /* d_type */
2466 1, /* d_namlen */
2467 "." /* d_name */
2468 },
2469 {
2470 1, /* d_fileno */
2471 sizeof(struct hfsdotentry), /* d_reclen */
2472 DT_DIR, /* d_type */
2473 2, /* d_namlen */
2474 ".." /* d_name */
2475 }
2476 };
2477
2478 /* 4.3 Note:
2479 * There is some confusion as to what the semantics of uio_offset are.
2480 * In ufs, it represents the actual byte offset within the directory
2481 * "file." HFS, however, just uses it as an entry counter - essentially
2482 * assuming that it has no meaning except to the hfs_readdir function.
2483 * This approach would be more efficient here, but some callers may
2484 * assume the uio_offset acts like a byte offset. NFS in fact
2485 * monkeys around with the offset field a lot between readdir calls.
2486 *
2487 * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2488 * fields is a mess as well. The libc function readdir() returns
2489 * NULL (indicating the end of a directory) when either
2490 * the getdirentries() syscall (which calls this and returns
2491 * the size of the buffer passed in less the value of uiop->uio_resid)
2492 * returns 0, or a direct record with a d_reclen of zero.
2493 * nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2494 * of the directory by testing uiop->uio_resid == 0. The solution
2495 * is to pad the size of the last struct direct in a given
2496 * block to fill the block if we are not at the end of the directory.
2497 */
2498
2499
2500 /*
2501 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2502 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2503 * "Nbytes must be greater than or equal to the block size associated with the file,
2504 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2505 * room for the . and .. entries that have to added manually.
2506 */
2507
2508 /*
2509 #% readdir vp L L L
2510 #
2511 vop_readdir {
2512 IN struct vnode *vp;
2513 INOUT struct uio *uio;
2514 IN struct ucred *cred;
2515 INOUT int *eofflag;
2516 OUT int *ncookies;
2517 INOUT u_long **cookies;
2518 */
2519 static int
2520 hfs_readdir(ap)
2521 struct vop_readdir_args /* {
2522 struct vnode *vp;
2523 struct uio *uio;
2524 struct ucred *cred;
2525 int *eofflag;
2526 int *ncookies;
2527 u_long **cookies;
2528 } */ *ap;
2529 {
2530 register struct uio *uio = ap->a_uio;
2531 struct cnode *cp = VTOC(ap->a_vp);
2532 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
2533 struct proc *p = current_proc();
2534 off_t off = uio->uio_offset;
2535 int retval = 0;
2536 int eofflag = 0;
2537 void *user_start = NULL;
2538 int user_len;
2539
2540 /* We assume it's all one big buffer... */
2541 if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE)
2542 return EINVAL;
2543
2544 // XXXdbg
2545 // We have to lock the user's buffer here so that we won't
2546 // fault on it after we've acquired a shared lock on the
2547 // catalog file. The issue is that you can get a 3-way
2548 // deadlock if someone else starts a transaction and then
2549 // tries to lock the catalog file but can't because we're
2550 // here and we can't service our page fault because VM is
2551 // blocked trying to start a transaction as a result of
2552 // trying to free up pages for our page fault. It's messy
2553 // but it does happen on dual-procesors that are paging
2554 // heavily (see radar 3082639 for more info). By locking
2555 // the buffer up-front we prevent ourselves from faulting
2556 // while holding the shared catalog file lock.
2557 //
2558 // Fortunately this and hfs_search() are the only two places
2559 // currently (10/30/02) that can fault on user data with a
2560 // shared lock on the catalog file.
2561 //
2562 if (hfsmp->jnl && uio->uio_segflg == UIO_USERSPACE) {
2563 user_start = uio->uio_iov->iov_base;
2564 user_len = uio->uio_iov->iov_len;
2565
2566 if ((retval = vslock(user_start, user_len)) != 0) {
2567 return retval;
2568 }
2569 }
2570
2571
2572 /* Create the entries for . and .. */
2573 if (uio->uio_offset < sizeof(rootdots)) {
2574 caddr_t dep;
2575 size_t dotsize;
2576
2577 rootdots[0].d_fileno = cp->c_cnid;
2578 rootdots[1].d_fileno = cp->c_parentcnid;
2579
2580 if (uio->uio_offset == 0) {
2581 dep = (caddr_t) &rootdots[0];
2582 dotsize = 2* sizeof(struct hfsdotentry);
2583 } else if (uio->uio_offset == sizeof(struct hfsdotentry)) {
2584 dep = (caddr_t) &rootdots[1];
2585 dotsize = sizeof(struct hfsdotentry);
2586 } else {
2587 retval = EINVAL;
2588 goto Exit;
2589 }
2590
2591 retval = uiomove(dep, dotsize, uio);
2592 if (retval != 0)
2593 goto Exit;
2594 }
2595
2596 /* If there are no children then we're done */
2597 if (cp->c_entries == 0) {
2598 eofflag = 1;
2599 retval = 0;
2600 goto Exit;
2601 }
2602
2603 /* Lock catalog b-tree */
2604 retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
2605 if (retval) goto Exit;
2606
2607 retval = cat_getdirentries(hfsmp, &cp->c_desc, uio, &eofflag);
2608
2609 /* Unlock catalog b-tree */
2610 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2611
2612 if (retval != E_NONE) {
2613 goto Exit;
2614 }
2615
2616 /* were we already past eof ? */
2617 if (uio->uio_offset == off) {
2618 retval = E_NONE;
2619 goto Exit;
2620 }
2621
2622 cp->c_flag |= C_ACCESS;
2623 /* Bake any cookies */
2624 if (!retval && ap->a_ncookies != NULL) {
2625 struct dirent* dpStart;
2626 struct dirent* dpEnd;
2627 struct dirent* dp;
2628 int ncookies;
2629 u_long *cookies;
2630 u_long *cookiep;
2631
2632 /*
2633 * Only the NFS server uses cookies, and it loads the
2634 * directory block into system space, so we can just look at
2635 * it directly.
2636 */
2637 if (uio->uio_segflg != UIO_SYSSPACE)
2638 panic("hfs_readdir: unexpected uio from NFS server");
2639 dpStart = (struct dirent *)(uio->uio_iov->iov_base - (uio->uio_offset - off));
2640 dpEnd = (struct dirent *) uio->uio_iov->iov_base;
2641 for (dp = dpStart, ncookies = 0;
2642 dp < dpEnd && dp->d_reclen != 0;
2643 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen))
2644 ncookies++;
2645 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK);
2646 for (dp = dpStart, cookiep = cookies;
2647 dp < dpEnd;
2648 dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) {
2649 off += dp->d_reclen;
2650 *cookiep++ = (u_long) off;
2651 }
2652 *ap->a_ncookies = ncookies;
2653 *ap->a_cookies = cookies;
2654 }
2655
2656 Exit:;
2657 if (hfsmp->jnl && user_start) {
2658 vsunlock(user_start, user_len, TRUE);
2659 }
2660
2661 if (ap->a_eofflag)
2662 *ap->a_eofflag = eofflag;
2663
2664 return (retval);
2665 }
2666
2667
2668 /*
2669 * Return target name of a symbolic link
2670 #% readlink vp L L L
2671 #
2672 vop_readlink {
2673 IN struct vnode *vp;
2674 INOUT struct uio *uio;
2675 IN struct ucred *cred;
2676 */
2677
2678 static int
2679 hfs_readlink(ap)
2680 struct vop_readlink_args /* {
2681 struct vnode *a_vp;
2682 struct uio *a_uio;
2683 struct ucred *a_cred;
2684 } */ *ap;
2685 {
2686 int retval;
2687 struct vnode *vp = ap->a_vp;
2688 struct cnode *cp;
2689 struct filefork *fp;
2690
2691 if (vp->v_type != VLNK)
2692 return (EINVAL);
2693
2694 cp = VTOC(vp);
2695 fp = VTOF(vp);
2696
2697 /* Zero length sym links are not allowed */
2698 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
2699 VTOVCB(vp)->vcbFlags |= kHFS_DamagedVolume;
2700 return (EINVAL);
2701 }
2702
2703 /* Cache the path so we don't waste buffer cache resources */
2704 if (fp->ff_symlinkptr == NULL) {
2705 struct buf *bp = NULL;
2706
2707 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
2708 retval = meta_bread(vp, 0,
2709 roundup((int)fp->ff_size,
2710 VTOHFS(vp)->hfs_phys_block_size),
2711 ap->a_cred, &bp);
2712 if (retval) {
2713 if (bp)
2714 brelse(bp);
2715 if (fp->ff_symlinkptr) {
2716 FREE(fp->ff_symlinkptr, M_TEMP);
2717 fp->ff_symlinkptr = NULL;
2718 }
2719 return (retval);
2720 }
2721 bcopy(bp->b_data, fp->ff_symlinkptr, (size_t)fp->ff_size);
2722 if (bp) {
2723 if (VTOHFS(vp)->jnl && (bp->b_flags & B_LOCKED) == 0) {
2724 bp->b_flags |= B_INVAL; /* data no longer needed */
2725 }
2726 brelse(bp);
2727 }
2728 }
2729 retval = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
2730
2731 return (retval);
2732 }
2733
2734
2735 /*
2736 * hfs abort op, called after namei() when a CREATE/DELETE isn't actually
2737 * done. If a buffer has been saved in anticipation of a CREATE, delete it.
2738 #% abortop dvp = = =
2739 #
2740 vop_abortop {
2741 IN struct vnode *dvp;
2742 IN struct componentname *cnp;
2743
2744 */
2745
2746 /* ARGSUSED */
2747
2748 static int
2749 hfs_abortop(ap)
2750 struct vop_abortop_args /* {
2751 struct vnode *a_dvp;
2752 struct componentname *a_cnp;
2753 } */ *ap;
2754 {
2755 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) {
2756 FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
2757 ap->a_cnp->cn_flags &= ~HASBUF;
2758 }
2759
2760 return (0);
2761 }
2762
2763
2764 /*
2765 * Lock an cnode. If its already locked, set the WANT bit and sleep.
2766 #% lock vp U L U
2767 #
2768 vop_lock {
2769 IN struct vnode *vp;
2770 IN int flags;
2771 IN struct proc *p;
2772 */
2773
2774 static int
2775 hfs_lock(ap)
2776 struct vop_lock_args /* {
2777 struct vnode *a_vp;
2778 int a_flags;
2779 struct proc *a_p;
2780 } */ *ap;
2781 {
2782 struct vnode *vp = ap->a_vp;
2783 struct cnode *cp = VTOC(vp);
2784
2785 if (cp == NULL)
2786 panic("hfs_lock: cnode in vnode is null\n");
2787
2788 return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
2789 }
2790
2791 /*
2792 * Unlock an cnode.
2793 #% unlock vp L U L
2794 #
2795 vop_unlock {
2796 IN struct vnode *vp;
2797 IN int flags;
2798 IN struct proc *p;
2799
2800 */
2801 static int
2802 hfs_unlock(ap)
2803 struct vop_unlock_args /* {
2804 struct vnode *a_vp;
2805 int a_flags;
2806 struct proc *a_p;
2807 } */ *ap;
2808 {
2809 struct vnode *vp = ap->a_vp;
2810 struct cnode *cp = VTOC(vp);
2811
2812 if (cp == NULL)
2813 panic("hfs_unlock: cnode in vnode is null\n");
2814
2815 return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE,
2816 &vp->v_interlock, ap->a_p));
2817 }
2818
2819
2820 /*
2821 * Print out the contents of a cnode.
2822 #% print vp = = =
2823 #
2824 vop_print {
2825 IN struct vnode *vp;
2826 */
2827 static int
2828 hfs_print(ap)
2829 struct vop_print_args /* {
2830 struct vnode *a_vp;
2831 } */ *ap;
2832 {
2833 struct vnode * vp = ap->a_vp;
2834 struct cnode *cp = VTOC(vp);
2835
2836 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp->c_cnid,
2837 major(cp->c_dev), minor(cp->c_dev));
2838 #if FIFO
2839 if (vp->v_type == VFIFO)
2840 fifo_printinfo(vp);
2841 #endif /* FIFO */
2842 lockmgr_printinfo(&cp->c_lock);
2843 printf("\n");
2844 return (0);
2845 }
2846
2847
2848 /*
2849 * Check for a locked cnode.
2850 #% islocked vp = = =
2851 #
2852 vop_islocked {
2853 IN struct vnode *vp;
2854
2855 */
2856 static int
2857 hfs_islocked(ap)
2858 struct vop_islocked_args /* {
2859 struct vnode *a_vp;
2860 } */ *ap;
2861 {
2862 return (lockstatus(&VTOC(ap->a_vp)->c_lock));
2863 }
2864
2865 /*
2866
2867 #% pathconf vp L L L
2868 #
2869 vop_pathconf {
2870 IN struct vnode *vp;
2871 IN int name;
2872 OUT register_t *retval;
2873
2874 */
2875 static int
2876 hfs_pathconf(ap)
2877 struct vop_pathconf_args /* {
2878 struct vnode *a_vp;
2879 int a_name;
2880 int *a_retval;
2881 } */ *ap;
2882 {
2883 int retval = 0;
2884
2885 switch (ap->a_name) {
2886 case _PC_LINK_MAX:
2887 if (VTOVCB(ap->a_vp)->vcbSigWord == kHFSPlusSigWord)
2888 *ap->a_retval = HFS_LINK_MAX;
2889 else
2890 *ap->a_retval = 1;
2891 break;
2892 case _PC_NAME_MAX:
2893 *ap->a_retval = kHFSPlusMaxFileNameBytes; /* max # of characters x max utf8 representation */
2894 break;
2895 case _PC_PATH_MAX:
2896 *ap->a_retval = PATH_MAX; /* 1024 */
2897 break;
2898 case _PC_CHOWN_RESTRICTED:
2899 *ap->a_retval = 1;
2900 break;
2901 case _PC_NO_TRUNC:
2902 *ap->a_retval = 0;
2903 break;
2904 case _PC_NAME_CHARS_MAX:
2905 *ap->a_retval = kHFSPlusMaxFileNameChars;
2906 break;
2907 case _PC_CASE_SENSITIVE:
2908 *ap->a_retval = 0;
2909 break;
2910 case _PC_CASE_PRESERVING:
2911 *ap->a_retval = 1;
2912 break;
2913 default:
2914 retval = EINVAL;
2915 }
2916
2917 return (retval);
2918 }
2919
2920
2921 /*
2922 * Advisory record locking support
2923 #% advlock vp U U U
2924 #
2925 vop_advlock {
2926 IN struct vnode *vp;
2927 IN caddr_t id;
2928 IN int op;
2929 IN struct flock *fl;
2930 IN int flags;
2931
2932 */
2933 static int
2934 hfs_advlock(ap)
2935 struct vop_advlock_args /* {
2936 struct vnode *a_vp;
2937 caddr_t a_id;
2938 int a_op;
2939 struct flock *a_fl;
2940 int a_flags;
2941 } */ *ap;
2942 {
2943 struct vnode *vp = ap->a_vp;
2944 struct flock *fl = ap->a_fl;
2945 struct hfslockf *lock;
2946 struct filefork *fork;
2947 off_t start, end;
2948 int retval;
2949
2950 /* Only regular files can have locks */
2951 if (vp->v_type != VREG)
2952 return (EISDIR);
2953
2954 fork = VTOF(ap->a_vp);
2955 /*
2956 * Avoid the common case of unlocking when cnode has no locks.
2957 */
2958 if (fork->ff_lockf == (struct hfslockf *)0) {
2959 if (ap->a_op != F_SETLK) {
2960 fl->l_type = F_UNLCK;
2961 return (0);
2962 }
2963 }
2964 /*
2965 * Convert the flock structure into a start and end.
2966 */
2967 start = 0;
2968 switch (fl->l_whence) {
2969 case SEEK_SET:
2970 case SEEK_CUR:
2971 /*
2972 * Caller is responsible for adding any necessary offset
2973 * when SEEK_CUR is used.
2974 */
2975 start = fl->l_start;
2976 break;
2977 case SEEK_END:
2978 start = fork->ff_size + fl->l_start;
2979 break;
2980 default:
2981 return (EINVAL);
2982 }
2983
2984 if (start < 0)
2985 return (EINVAL);
2986 if (fl->l_len == 0)
2987 end = -1;
2988 else
2989 end = start + fl->l_len - 1;
2990
2991 /*
2992 * Create the hfslockf structure
2993 */
2994 MALLOC(lock, struct hfslockf *, sizeof *lock, M_LOCKF, M_WAITOK);
2995 lock->lf_start = start;
2996 lock->lf_end = end;
2997 lock->lf_id = ap->a_id;
2998 lock->lf_fork = fork;
2999 lock->lf_type = fl->l_type;
3000 lock->lf_next = (struct hfslockf *)0;
3001 TAILQ_INIT(&lock->lf_blkhd);
3002 lock->lf_flags = ap->a_flags;
3003 /*
3004 * Do the requested operation.
3005 */
3006 switch(ap->a_op) {
3007 case F_SETLK:
3008 retval = hfs_setlock(lock);
3009 break;
3010 case F_UNLCK:
3011 retval = hfs_clearlock(lock);
3012 FREE(lock, M_LOCKF);
3013 break;
3014 case F_GETLK:
3015 retval = hfs_getlock(lock, fl);
3016 FREE(lock, M_LOCKF);
3017 break;
3018 default:
3019 retval = EINVAL;
3020 _FREE(lock, M_LOCKF);
3021 break;
3022 }
3023
3024 return (retval);
3025 }
3026
3027
3028
3029 /*
3030 * Update the access, modified, and node change times as specified
3031 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3032 * C_MODIFIED flag is used to specify that the node needs to be
3033 * updated but that the times have already been set. The access and
3034 * modified times are input parameters but the node change time is
3035 * always taken from the current time. If waitfor is set, then wait
3036 * for the disk write of the node to complete.
3037 */
3038 /*
3039 #% update vp L L L
3040 IN struct vnode *vp;
3041 IN struct timeval *access;
3042 IN struct timeval *modify;
3043 IN int waitfor;
3044 */
3045 static int
3046 hfs_update(ap)
3047 struct vop_update_args /* {
3048 struct vnode *a_vp;
3049 struct timeval *a_access;
3050 struct timeval *a_modify;
3051 int a_waitfor;
3052 } */ *ap;
3053 {
3054 struct vnode *vp = ap->a_vp;
3055 struct cnode *cp = VTOC(ap->a_vp);
3056 struct proc *p;
3057 struct cat_fork *dataforkp = NULL;
3058 struct cat_fork *rsrcforkp = NULL;
3059 struct cat_fork datafork;
3060 int updateflag;
3061 struct hfsmount *hfsmp;
3062 int error;
3063
3064 hfsmp = VTOHFS(vp);
3065
3066 /* XXX do we really want to clear the sytem cnode flags here???? */
3067 if ((vp->v_flag & VSYSTEM) ||
3068 (VTOVFS(vp)->mnt_flag & MNT_RDONLY) ||
3069 (cp->c_mode == 0)) {
3070 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3071 return (0);
3072 }
3073
3074 updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3075
3076 /* Nothing to update. */
3077 if (updateflag == 0) {
3078 return (0);
3079 }
3080 /* HFS standard doesn't have access times. */
3081 if ((updateflag == C_ACCESS) && (VTOVCB(vp)->vcbSigWord == kHFSSigWord)) {
3082 return (0);
3083 }
3084 if (updateflag & C_ACCESS) {
3085 /*
3086 * If only the access time is changing then defer
3087 * updating it on-disk util later (in hfs_inactive).
3088 * If it was recently updated then skip the update.
3089 */
3090 if (updateflag == C_ACCESS) {
3091 cp->c_flag &= ~C_ACCESS;
3092
3093 /* Its going to disk or its sufficiently newer... */
3094 if ((cp->c_flag & C_ATIMEMOD) ||
3095 (ap->a_access->tv_sec > (cp->c_atime + ATIME_ACCURACY))) {
3096 cp->c_atime = ap->a_access->tv_sec;
3097 cp->c_flag |= C_ATIMEMOD;
3098 }
3099 return (0);
3100 } else {
3101 cp->c_atime = ap->a_access->tv_sec;
3102 }
3103 }
3104 if (updateflag & C_UPDATE) {
3105 cp->c_mtime = ap->a_modify->tv_sec;
3106 cp->c_mtime_nsec = ap->a_modify->tv_usec * 1000;
3107 }
3108 if (updateflag & C_CHANGE) {
3109 cp->c_ctime = time.tv_sec;
3110 /*
3111 * HFS dates that WE set must be adjusted for DST
3112 */
3113 if ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3114 cp->c_ctime += 3600;
3115 cp->c_mtime = cp->c_ctime;
3116 }
3117 }
3118
3119 if (cp->c_datafork)
3120 dataforkp = &cp->c_datafork->ff_data;
3121 if (cp->c_rsrcfork)
3122 rsrcforkp = &cp->c_rsrcfork->ff_data;
3123
3124 p = current_proc();
3125
3126 /*
3127 * For delayed allocations updates are
3128 * postponed until an fsync or the file
3129 * gets written to disk.
3130 *
3131 * Deleted files can defer meta data updates until inactive.
3132 */
3133 if (ISSET(cp->c_flag, C_DELETED) ||
3134 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3135 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks)) {
3136 if (updateflag & (C_CHANGE | C_UPDATE))
3137 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3138 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3139 cp->c_flag |= C_MODIFIED;
3140
3141 return (0);
3142 }
3143
3144
3145 // XXXdbg
3146 hfs_global_shared_lock_acquire(hfsmp);
3147 if (hfsmp->jnl) {
3148 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3149 hfs_global_shared_lock_release(hfsmp);
3150 return error;
3151 }
3152 }
3153
3154
3155 /*
3156 * For files with invalid ranges (holes) the on-disk
3157 * field representing the size of the file (cf_size)
3158 * must be no larger than the start of the first hole.
3159 */
3160 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3161 bcopy(dataforkp, &datafork, sizeof(datafork));
3162 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3163 dataforkp = &datafork;
3164 }
3165
3166 /*
3167 * Lock the Catalog b-tree file.
3168 * A shared lock is sufficient since an update doesn't change
3169 * the tree and the lock on vp protects the cnode.
3170 */
3171 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3172 if (error) {
3173 if (hfsmp->jnl) {
3174 journal_end_transaction(hfsmp->jnl);
3175 }
3176 hfs_global_shared_lock_release(hfsmp);
3177 return (error);
3178 }
3179
3180 /* XXX - waitfor is not enforced */
3181 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3182
3183 /* Unlock the Catalog b-tree file. */
3184 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3185
3186 if (updateflag & (C_CHANGE | C_UPDATE))
3187 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3188
3189 // XXXdbg
3190 if (hfsmp->jnl) {
3191 journal_end_transaction(hfsmp->jnl);
3192 }
3193 hfs_global_shared_lock_release(hfsmp);
3194
3195 /* After the updates are finished, clear the flags */
3196 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_ATIMEMOD);
3197
3198 return (error);
3199 }
3200
3201 /*
3202 * Allocate a new node
3203 *
3204 * Upon leaving, namei buffer must be freed.
3205 *
3206 */
3207 static int
3208 hfs_makenode(mode, dvp, vpp, cnp)
3209 int mode;
3210 struct vnode *dvp;
3211 struct vnode **vpp;
3212 struct componentname *cnp;
3213 {
3214 struct cnode *cp;
3215 struct cnode *dcp;
3216 struct vnode *tvp;
3217 struct hfsmount *hfsmp;
3218 struct timeval tv;
3219 struct proc *p;
3220 struct cat_desc in_desc, out_desc;
3221 struct cat_attr attr;
3222 int error, started_tr = 0, grabbed_lock = 0;
3223 enum vtype vnodetype;
3224
3225 p = cnp->cn_proc;
3226 dcp = VTOC(dvp);
3227 hfsmp = VTOHFS(dvp);
3228 *vpp = NULL;
3229 tvp = NULL;
3230 bzero(&out_desc, sizeof(out_desc));
3231
3232 if ((mode & S_IFMT) == 0)
3233 mode |= S_IFREG;
3234 vnodetype = IFTOVT(mode);
3235
3236 /* Check if unmount in progress */
3237 if (VTOVFS(dvp)->mnt_kern_flag & MNTK_UNMOUNT) {
3238 error = EPERM;
3239 goto exit;
3240 }
3241 /* Check if were out of usable disk space. */
3242 if ((suser(cnp->cn_cred, NULL) != 0) && (hfs_freeblks(hfsmp, 1) <= 0)) {
3243 error = ENOSPC;
3244 goto exit;
3245 }
3246
3247 /* Setup the default attributes */
3248 bzero(&attr, sizeof(attr));
3249 attr.ca_mode = mode;
3250 attr.ca_nlink = vnodetype == VDIR ? 2 : 1;
3251 attr.ca_mtime = time.tv_sec;
3252 attr.ca_mtime_nsec = time.tv_usec * 1000;
3253 if ((VTOVCB(dvp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3254 attr.ca_mtime += 3600; /* Same as what hfs_update does */
3255 }
3256 attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime;
3257 if (VTOVFS(dvp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
3258 attr.ca_uid = hfsmp->hfs_uid;
3259 attr.ca_gid = hfsmp->hfs_gid;
3260 } else {
3261 if (vnodetype == VLNK)
3262 attr.ca_uid = dcp->c_uid;
3263 else
3264 attr.ca_uid = cnp->cn_cred->cr_uid;
3265 attr.ca_gid = dcp->c_gid;
3266 }
3267 /*
3268 * Don't tag as a special file (BLK or CHR) until *after*
3269 * hfs_getnewvnode is called. This insures that any
3270 * alias checking is defered until hfs_mknod completes.
3271 */
3272 if (vnodetype == VBLK || vnodetype == VCHR)
3273 attr.ca_mode = (attr.ca_mode & ~S_IFMT) | S_IFREG;
3274
3275 /* Tag symlinks with a type and creator. */
3276 if (vnodetype == VLNK) {
3277 struct FndrFileInfo *fip;
3278
3279 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3280 fip->fdType = SWAP_BE32(kSymLinkFileType);
3281 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3282 }
3283 if ((attr.ca_mode & S_ISGID) &&
3284 !groupmember(dcp->c_gid, cnp->cn_cred) &&
3285 suser(cnp->cn_cred, NULL)) {
3286 attr.ca_mode &= ~S_ISGID;
3287 }
3288 if (cnp->cn_flags & ISWHITEOUT)
3289 attr.ca_flags |= UF_OPAQUE;
3290
3291 /* Setup the descriptor */
3292 bzero(&in_desc, sizeof(in_desc));
3293 in_desc.cd_nameptr = cnp->cn_nameptr;
3294 in_desc.cd_namelen = cnp->cn_namelen;
3295 in_desc.cd_parentcnid = dcp->c_cnid;
3296 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3297
3298 // XXXdbg
3299 hfs_global_shared_lock_acquire(hfsmp);
3300 grabbed_lock = 1;
3301 if (hfsmp->jnl) {
3302 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3303 goto exit;
3304 }
3305 started_tr = 1;
3306 }
3307
3308 /* Lock catalog b-tree */
3309 error = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p);
3310 if (error)
3311 goto exit;
3312
3313 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3314
3315 /* Unlock catalog b-tree */
3316 (void) hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_RELEASE, p);
3317 if (error)
3318 goto exit;
3319
3320 /* Update the parent directory */
3321 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3322 dcp->c_nlink++;
3323 dcp->c_entries++;
3324 dcp->c_flag |= C_CHANGE | C_UPDATE;
3325 tv = time;
3326 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
3327
3328 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3329 (dcp->c_cnid == kHFSRootFolderID));
3330
3331 // XXXdbg
3332 // have to end the transaction here before we call hfs_getnewvnode()
3333 // because that can cause us to try and reclaim a vnode on a different
3334 // file system which could cause us to start a transaction which can
3335 // deadlock with someone on that other file system (since we could be
3336 // holding two transaction locks as well as various vnodes and we did
3337 // not obtain the locks on them in the proper order).
3338 //
3339 // NOTE: this means that if the quota check fails or we have to update
3340 // the change time on a block-special device that those changes
3341 // will happen as part of independent transactions.
3342 //
3343 if (started_tr) {
3344 journal_end_transaction(hfsmp->jnl);
3345 started_tr = 0;
3346 }
3347 if (grabbed_lock) {
3348 hfs_global_shared_lock_release(hfsmp);
3349 grabbed_lock = 0;
3350 }
3351
3352 /* Create a vnode for the object just created: */
3353 error = hfs_getnewvnode(hfsmp, NULL, &out_desc, 0, &attr, NULL, &tvp);
3354 if (error)
3355 goto exit;
3356
3357
3358 #if QUOTA
3359 cp = VTOC(tvp);
3360 /*
3361 * We call hfs_chkiq with FORCE flag so that if we
3362 * fall through to the rmdir we actually have
3363 * accounted for the inode
3364 */
3365 if ((error = hfs_getinoquota(cp)) ||
3366 (error = hfs_chkiq(cp, 1, cnp->cn_cred, FORCE))) {
3367 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) {
3368 FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
3369 cnp->cn_flags &= ~HASBUF;
3370 }
3371 if (tvp->v_type == VDIR)
3372 VOP_RMDIR(dvp,tvp, cnp);
3373 else
3374 VOP_REMOVE(dvp,tvp, cnp);
3375
3376 return (error);
3377 }
3378 #endif /* QUOTA */
3379
3380 /*
3381 * restore vtype and mode for VBLK and VCHR
3382 */
3383 if (vnodetype == VBLK || vnodetype == VCHR) {
3384 struct cnode *cp;
3385
3386 cp = VTOC(tvp);
3387 cp->c_mode = mode;
3388 tvp->v_type = IFTOVT(mode);
3389 cp->c_flag |= C_CHANGE;
3390 tv = time;
3391 if ((error = VOP_UPDATE(tvp, &tv, &tv, 1))) {
3392 vput(tvp);
3393 goto exit;
3394 }
3395 }
3396
3397 *vpp = tvp;
3398 exit:
3399 cat_releasedesc(&out_desc);
3400
3401 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
3402 FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
3403 vput(dvp);
3404
3405 // XXXdbg
3406 if (started_tr) {
3407 journal_end_transaction(hfsmp->jnl);
3408 started_tr = 0;
3409 }
3410 if (grabbed_lock) {
3411 hfs_global_shared_lock_release(hfsmp);
3412 grabbed_lock = 0;
3413 }
3414
3415 return (error);
3416 }
3417
3418
3419 static int
3420 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, struct proc *p)
3421 {
3422 struct vnode *rvp;
3423 struct cnode *cp = VTOC(vp);
3424 int error;
3425
3426 if ((rvp = cp->c_rsrc_vp)) {
3427 /* Use exising vnode */
3428 error = vget(rvp, 0, p);
3429 if (error) {
3430 char * name = VTOC(vp)->c_desc.cd_nameptr;
3431
3432 if (name)
3433 printf("hfs_vgetrsrc: couldn't get"
3434 " resource fork for %s\n", name);
3435 return (error);
3436 }
3437 } else {
3438 struct cat_fork rsrcfork;
3439
3440 /* Lock catalog b-tree */
3441 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3442 if (error)
3443 return (error);
3444
3445 /* Get resource fork data */
3446 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3447 (struct cat_attr *)0, &rsrcfork);
3448
3449 /* Unlock the Catalog */
3450 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3451 if (error)
3452 return (error);
3453
3454 error = hfs_getnewvnode(hfsmp, cp, &cp->c_desc, 1, &cp->c_attr,
3455 &rsrcfork, &rvp);
3456 if (error)
3457 return (error);
3458 }
3459
3460 *rvpp = rvp;
3461 return (0);
3462 }
3463
3464
3465 /*
3466 * Wrapper for special device reads
3467 */
3468 static int
3469 hfsspec_read(ap)
3470 struct vop_read_args /* {
3471 struct vnode *a_vp;
3472 struct uio *a_uio;
3473 int a_ioflag;
3474 struct ucred *a_cred;
3475 } */ *ap;
3476 {
3477 /*
3478 * Set access flag.
3479 */
3480 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3481 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap));
3482 }
3483
3484 /*
3485 * Wrapper for special device writes
3486 */
3487 static int
3488 hfsspec_write(ap)
3489 struct vop_write_args /* {
3490 struct vnode *a_vp;
3491 struct uio *a_uio;
3492 int a_ioflag;
3493 struct ucred *a_cred;
3494 } */ *ap;
3495 {
3496 /*
3497 * Set update and change flags.
3498 */
3499 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3500 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap));
3501 }
3502
3503 /*
3504 * Wrapper for special device close
3505 *
3506 * Update the times on the cnode then do device close.
3507 */
3508 static int
3509 hfsspec_close(ap)
3510 struct vop_close_args /* {
3511 struct vnode *a_vp;
3512 int a_fflag;
3513 struct ucred *a_cred;
3514 struct proc *a_p;
3515 } */ *ap;
3516 {
3517 struct vnode *vp = ap->a_vp;
3518 struct cnode *cp = VTOC(vp);
3519
3520 simple_lock(&vp->v_interlock);
3521 if (ap->a_vp->v_usecount > 1)
3522 CTIMES(cp, &time, &time);
3523 simple_unlock(&vp->v_interlock);
3524 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
3525 }
3526
3527 #if FIFO
3528 /*
3529 * Wrapper for fifo reads
3530 */
3531 static int
3532 hfsfifo_read(ap)
3533 struct vop_read_args /* {
3534 struct vnode *a_vp;
3535 struct uio *a_uio;
3536 int a_ioflag;
3537 struct ucred *a_cred;
3538 } */ *ap;
3539 {
3540 extern int (**fifo_vnodeop_p)(void *);
3541
3542 /*
3543 * Set access flag.
3544 */
3545 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3546 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap));
3547 }
3548
3549 /*
3550 * Wrapper for fifo writes
3551 */
3552 static int
3553 hfsfifo_write(ap)
3554 struct vop_write_args /* {
3555 struct vnode *a_vp;
3556 struct uio *a_uio;
3557 int a_ioflag;
3558 struct ucred *a_cred;
3559 } */ *ap;
3560 {
3561 extern int (**fifo_vnodeop_p)(void *);
3562
3563 /*
3564 * Set update and change flags.
3565 */
3566 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3567 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap));
3568 }
3569
3570 /*
3571 * Wrapper for fifo close
3572 *
3573 * Update the times on the cnode then do device close.
3574 */
3575 static int
3576 hfsfifo_close(ap)
3577 struct vop_close_args /* {
3578 struct vnode *a_vp;
3579 int a_fflag;
3580 struct ucred *a_cred;
3581 struct proc *a_p;
3582 } */ *ap;
3583 {
3584 extern int (**fifo_vnodeop_p)(void *);
3585 struct vnode *vp = ap->a_vp;
3586 struct cnode *cp = VTOC(vp);
3587
3588 simple_lock(&vp->v_interlock);
3589 if (ap->a_vp->v_usecount > 1)
3590 CTIMES(cp, &time, &time);
3591 simple_unlock(&vp->v_interlock);
3592 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
3593 }
3594 #endif /* FIFO */
3595
3596
3597 /*****************************************************************************
3598 *
3599 * VOP Tables
3600 *
3601 *****************************************************************************/
3602 int hfs_cache_lookup(); /* in hfs_lookup.c */
3603 int hfs_lookup(); /* in hfs_lookup.c */
3604 int hfs_read(); /* in hfs_readwrite.c */
3605 int hfs_write(); /* in hfs_readwrite.c */
3606 int hfs_ioctl(); /* in hfs_readwrite.c */
3607 int hfs_select(); /* in hfs_readwrite.c */
3608 int hfs_bmap(); /* in hfs_readwrite.c */
3609 int hfs_strategy(); /* in hfs_readwrite.c */
3610 int hfs_truncate(); /* in hfs_readwrite.c */
3611 int hfs_allocate(); /* in hfs_readwrite.c */
3612 int hfs_pagein(); /* in hfs_readwrite.c */
3613 int hfs_pageout(); /* in hfs_readwrite.c */
3614 int hfs_search(); /* in hfs_search.c */
3615 int hfs_bwrite(); /* in hfs_readwrite.c */
3616 int hfs_link(); /* in hfs_link.c */
3617 int hfs_blktooff(); /* in hfs_readwrite.c */
3618 int hfs_offtoblk(); /* in hfs_readwrite.c */
3619 int hfs_cmap(); /* in hfs_readwrite.c */
3620 int hfs_getattrlist(); /* in hfs_attrlist.c */
3621 int hfs_setattrlist(); /* in hfs_attrlist.c */
3622 int hfs_readdirattr(); /* in hfs_attrlist.c */
3623 int hfs_inactive(); /* in hfs_cnode.c */
3624 int hfs_reclaim(); /* in hfs_cnode.c */
3625
3626 int (**hfs_vnodeop_p)(void *);
3627
3628 #define VOPFUNC int (*)(void *)
3629
3630 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
3631 { &vop_default_desc, (VOPFUNC)vn_default_error },
3632 { &vop_lookup_desc, (VOPFUNC)hfs_cache_lookup }, /* lookup */
3633 { &vop_create_desc, (VOPFUNC)hfs_create }, /* create */
3634 { &vop_mknod_desc, (VOPFUNC)hfs_mknod }, /* mknod */
3635 { &vop_open_desc, (VOPFUNC)hfs_open }, /* open */
3636 { &vop_close_desc, (VOPFUNC)hfs_close }, /* close */
3637 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3638 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3639 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3640 { &vop_read_desc, (VOPFUNC)hfs_read }, /* read */
3641 { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */
3642 { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */
3643 { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */
3644 { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */
3645 { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
3646 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3647 { &vop_seek_desc, (VOPFUNC)nop_seek }, /* seek */
3648 { &vop_remove_desc, (VOPFUNC)hfs_remove }, /* remove */
3649 { &vop_link_desc, (VOPFUNC)hfs_link }, /* link */
3650 { &vop_rename_desc, (VOPFUNC)hfs_rename }, /* rename */
3651 { &vop_mkdir_desc, (VOPFUNC)hfs_mkdir }, /* mkdir */
3652 { &vop_rmdir_desc, (VOPFUNC)hfs_rmdir }, /* rmdir */
3653 { &vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex }, /* mkcomplex */
3654 { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, /* getattrlist */
3655 { &vop_setattrlist_desc, (VOPFUNC)hfs_setattrlist }, /* setattrlist */
3656 { &vop_symlink_desc, (VOPFUNC)hfs_symlink }, /* symlink */
3657 { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */
3658 { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */
3659 { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */
3660 { &vop_abortop_desc, (VOPFUNC)hfs_abortop }, /* abortop */
3661 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3662 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3663 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3664 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3665 { &vop_bmap_desc, (VOPFUNC)hfs_bmap }, /* bmap */
3666 { &vop_strategy_desc, (VOPFUNC)hfs_strategy }, /* strategy */
3667 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3668 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3669 { &vop_pathconf_desc, (VOPFUNC)hfs_pathconf }, /* pathconf */
3670 { &vop_advlock_desc, (VOPFUNC)hfs_advlock }, /* advlock */
3671 { &vop_reallocblks_desc, (VOPFUNC)err_reallocblks }, /* reallocblks */
3672 { &vop_truncate_desc, (VOPFUNC)hfs_truncate }, /* truncate */
3673 { &vop_allocate_desc, (VOPFUNC)hfs_allocate }, /* allocate */
3674 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3675 { &vop_searchfs_desc, (VOPFUNC)hfs_search }, /* search fs */
3676 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, /* bwrite */
3677 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* pagein */
3678 { &vop_pageout_desc,(VOPFUNC) hfs_pageout }, /* pageout */
3679 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3680 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3681 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3682 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
3683 { NULL, (VOPFUNC)NULL }
3684 };
3685
3686 struct vnodeopv_desc hfs_vnodeop_opv_desc =
3687 { &hfs_vnodeop_p, hfs_vnodeop_entries };
3688
3689 int (**hfs_specop_p)(void *);
3690 struct vnodeopv_entry_desc hfs_specop_entries[] = {
3691 { &vop_default_desc, (VOPFUNC)vn_default_error },
3692 { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
3693 { &vop_create_desc, (VOPFUNC)spec_create }, /* create */
3694 { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
3695 { &vop_open_desc, (VOPFUNC)spec_open }, /* open */
3696 { &vop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
3697 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3698 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3699 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3700 { &vop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
3701 { &vop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
3702 { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */
3703 { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
3704 { &vop_select_desc, (VOPFUNC)spec_select }, /* select */
3705 { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
3706 { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
3707 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3708 { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */
3709 { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
3710 { &vop_link_desc, (VOPFUNC)spec_link }, /* link */
3711 { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
3712 { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
3713 { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
3714 { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
3715 { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
3716 { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
3717 { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */
3718 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3719 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3720 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3721 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3722 { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */
3723 { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
3724 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3725 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3726 { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
3727 { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */
3728 { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */
3729 { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */
3730 { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */
3731 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
3732 { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */
3733 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3734 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
3735 { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */
3736 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
3737 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
3738 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3739 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3740 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3741 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
3742 };
3743 struct vnodeopv_desc hfs_specop_opv_desc =
3744 { &hfs_specop_p, hfs_specop_entries };
3745
3746 #if FIFO
3747 int (**hfs_fifoop_p)(void *);
3748 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
3749 { &vop_default_desc, (VOPFUNC)vn_default_error },
3750 { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
3751 { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */
3752 { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
3753 { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */
3754 { &vop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
3755 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3756 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3757 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3758 { &vop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
3759 { &vop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
3760 { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */
3761 { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
3762 { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */
3763 { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
3764 { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
3765 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3766 { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */
3767 { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
3768 { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */
3769 { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
3770 { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
3771 { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
3772 { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
3773 { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
3774 { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
3775 { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */
3776 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3777 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3778 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3779 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3780 { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */
3781 { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
3782 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3783 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3784 { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
3785 { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */
3786 { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */
3787 { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */
3788 { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */
3789 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
3790 { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */
3791 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3792 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
3793 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
3794 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
3795 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3796 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3797 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3798 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
3799 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
3800 };
3801 struct vnodeopv_desc hfs_fifoop_opv_desc =
3802 { &hfs_fifoop_p, hfs_fifoop_entries };
3803 #endif /* FIFO */
3804
3805
3806