]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
xnu-344.12.2.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/systm.h>
24 #include <sys/kernel.h>
25 #include <sys/file.h>
26 #include <sys/dirent.h>
27 #include <sys/stat.h>
28 #include <sys/buf.h>
29 #include <sys/mount.h>
30 #include <sys/vnode.h>
31 #include <sys/malloc.h>
32 #include <sys/namei.h>
33 #include <sys/ubc.h>
34 #include <sys/quota.h>
35
36 #include <miscfs/specfs/specdev.h>
37 #include <miscfs/fifofs/fifo.h>
38 #include <vfs/vfs_support.h>
39 #include <machine/spl.h>
40
41 #include <sys/kdebug.h>
42
43 #include "hfs.h"
44 #include "hfs_catalog.h"
45 #include "hfs_cnode.h"
46 #include "hfs_lockf.h"
47 #include "hfs_dbg.h"
48 #include "hfs_mount.h"
49 #include "hfs_quota.h"
50 #include "hfs_endian.h"
51
52 #include "hfscommon/headers/BTreesInternal.h"
53 #include "hfscommon/headers/FileMgrInternal.h"
54
55 #define MAKE_DELETED_NAME(NAME,FID) \
56 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
57
58
59 extern uid_t console_user;
60
61 /* Global vfs data structures for hfs */
62
63
64 extern int groupmember(gid_t gid, struct ucred *cred);
65
66 static int hfs_makenode(int mode, struct vnode *dvp, struct vnode **vpp,
67 struct componentname *cnp);
68
69 static int hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp,
70 struct vnode **rvpp, struct proc *p);
71
72 static int hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p);
73
74 int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags);
75
76 int hfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
77 struct proc *p);
78 int hfs_chmod(struct vnode *vp, int mode, struct ucred *cred,
79 struct proc *p);
80 int hfs_chown(struct vnode *vp, uid_t uid, gid_t gid,
81 struct ucred *cred, struct proc *p);
82
83 /*****************************************************************************
84 *
85 * Common Operations on vnodes
86 *
87 *****************************************************************************/
88
89 /*
90 * Create a regular file
91 #% create dvp L U U
92 #% create vpp - L -
93 #
94 vop_create {
95 IN WILLRELE struct vnode *dvp;
96 OUT struct vnode **vpp;
97 IN struct componentname *cnp;
98 IN struct vattr *vap;
99
100 We are responsible for freeing the namei buffer,
101 it is done in hfs_makenode()
102 */
103
104 static int
105 hfs_create(ap)
106 struct vop_create_args /* {
107 struct vnode *a_dvp;
108 struct vnode **a_vpp;
109 struct componentname *a_cnp;
110 struct vattr *a_vap;
111 } */ *ap;
112 {
113 struct vattr *vap = ap->a_vap;
114
115 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
116 ap->a_dvp, ap->a_vpp, ap->a_cnp));
117 }
118
119
120 /*
121 * Mknod vnode call
122
123 #% mknod dvp L U U
124 #% mknod vpp - X -
125 #
126 vop_mknod {
127 IN WILLRELE struct vnode *dvp;
128 OUT WILLRELE struct vnode **vpp;
129 IN struct componentname *cnp;
130 IN struct vattr *vap;
131 */
132 /* ARGSUSED */
133
134 static int
135 hfs_mknod(ap)
136 struct vop_mknod_args /* {
137 struct vnode *a_dvp;
138 struct vnode **a_vpp;
139 struct componentname *a_cnp;
140 struct vattr *a_vap;
141 } */ *ap;
142 {
143 struct vattr *vap = ap->a_vap;
144 struct vnode **vpp = ap->a_vpp;
145 struct cnode *cp;
146 int error;
147
148 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
149 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
150 vput(ap->a_dvp);
151 return (EOPNOTSUPP);
152 }
153
154 /* Create the vnode */
155 error = hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
156 ap->a_dvp, vpp, ap->a_cnp);
157 if (error)
158 return (error);
159 cp = VTOC(*vpp);
160 cp->c_flag |= C_ACCESS | C_CHANGE | C_UPDATE;
161 if ((vap->va_rdev != VNOVAL) &&
162 (vap->va_type == VBLK || vap->va_type == VCHR))
163 cp->c_rdev = vap->va_rdev;
164 /*
165 * Remove cnode so that it will be reloaded by lookup and
166 * checked to see if it is an alias of an existing vnode.
167 * Note: unlike UFS, we don't bash v_type here.
168 */
169 vput(*vpp);
170 vgone(*vpp);
171 *vpp = 0;
172 return (0);
173 }
174
175
176 /*
177 * Open called.
178 #% open vp L L L
179 #
180 vop_open {
181 IN struct vnode *vp;
182 IN int mode;
183 IN struct ucred *cred;
184 IN struct proc *p;
185 */
186
187
188 static int
189 hfs_open(ap)
190 struct vop_open_args /* {
191 struct vnode *a_vp;
192 int a_mode;
193 struct ucred *a_cred;
194 struct proc *a_p;
195 } */ *ap;
196 {
197 struct vnode *vp = ap->a_vp;
198
199 /*
200 * Files marked append-only must be opened for appending.
201 */
202 if ((vp->v_type != VDIR) && (VTOC(vp)->c_flags & APPEND) &&
203 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
204 return (EPERM);
205
206 return (0);
207 }
208
209 /*
210 * Close called.
211 *
212 * Update the times on the cnode.
213 #% close vp U U U
214 #
215 vop_close {
216 IN struct vnode *vp;
217 IN int fflag;
218 IN struct ucred *cred;
219 IN struct proc *p;
220 */
221
222
223 static int
224 hfs_close(ap)
225 struct vop_close_args /* {
226 struct vnode *a_vp;
227 int a_fflag;
228 struct ucred *a_cred;
229 struct proc *a_p;
230 } */ *ap;
231 {
232 register struct vnode *vp = ap->a_vp;
233 register struct cnode *cp = VTOC(vp);
234 register struct filefork *fp = VTOF(vp);
235 struct proc *p = ap->a_p;
236 struct timeval tv;
237 off_t leof;
238 u_long blks, blocksize;
239 int devBlockSize;
240 int error;
241
242 simple_lock(&vp->v_interlock);
243 if ((!UBCISVALID(vp) && vp->v_usecount > 1)
244 || (UBCISVALID(vp) && ubc_isinuse(vp, 1))) {
245 tv = time;
246 CTIMES(cp, &tv, &tv);
247 }
248 simple_unlock(&vp->v_interlock);
249
250 /*
251 * VOP_CLOSE can be called with vp locked (from vclean).
252 * We check for this case using VOP_ISLOCKED and bail.
253 *
254 * XXX During a force unmount we won't do the cleanup below!
255 */
256 if (vp->v_type == VDIR || VOP_ISLOCKED(vp))
257 return (0);
258
259 leof = fp->ff_size;
260
261 if ((fp->ff_blocks > 0) && !ISSET(cp->c_flag, C_DELETED)) {
262 enum vtype our_type = vp->v_type;
263 u_long our_id = vp->v_id;
264 int was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
265
266 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
267 if (error)
268 return (0);
269 /*
270 * Since we can context switch in vn_lock our vnode
271 * could get recycled (eg umount -f). Double check
272 * that its still ours.
273 */
274 if (vp->v_type != our_type || vp->v_id != our_id
275 || cp != VTOC(vp) || !UBCINFOEXISTS(vp)) {
276 VOP_UNLOCK(vp, 0, p);
277 return (0);
278 }
279
280 /*
281 * Last chance to explicitly zero out the areas
282 * that are currently marked invalid:
283 */
284 VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
285 (void) cluster_push(vp);
286 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
287 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
288 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
289 off_t start = invalid_range->rl_start;
290 off_t end = invalid_range->rl_end;
291
292 /* The range about to be written must be validated
293 * first, so that VOP_CMAP() will return the
294 * appropriate mapping for the cluster code:
295 */
296 rl_remove(start, end, &fp->ff_invalidranges);
297
298 (void) cluster_write(vp, (struct uio *) 0, leof,
299 invalid_range->rl_end + 1, invalid_range->rl_start,
300 (off_t)0, devBlockSize, IO_HEADZEROFILL | IO_NOZERODIRTY);
301
302 if (ISSET(vp->v_flag, VHASDIRTY))
303 (void) cluster_push(vp);
304
305 cp->c_flag |= C_MODIFIED;
306 }
307 cp->c_flag &= ~C_ZFWANTSYNC;
308 cp->c_zftimeout = 0;
309 blocksize = VTOVCB(vp)->blockSize;
310 blks = leof / blocksize;
311 if (((off_t)blks * (off_t)blocksize) != leof)
312 blks++;
313 /*
314 * Shrink the peof to the smallest size neccessary to contain the leof.
315 */
316 if (blks < fp->ff_blocks)
317 (void) VOP_TRUNCATE(vp, leof, IO_NDELAY, ap->a_cred, p);
318 (void) cluster_push(vp);
319
320 if (!was_nocache)
321 CLR(vp->v_flag, VNOCACHE_DATA);
322
323 /*
324 * If the VOP_TRUNCATE didn't happen to flush the vnode's
325 * information out to disk, force it to be updated now that
326 * all invalid ranges have been zero-filled and validated:
327 */
328 if (cp->c_flag & C_MODIFIED) {
329 tv = time;
330 VOP_UPDATE(vp, &tv, &tv, 0);
331 }
332 VOP_UNLOCK(vp, 0, p);
333 }
334 return (0);
335 }
336
337 /*
338 #% access vp L L L
339 #
340 vop_access {
341 IN struct vnode *vp;
342 IN int mode;
343 IN struct ucred *cred;
344 IN struct proc *p;
345
346 */
347
348 static int
349 hfs_access(ap)
350 struct vop_access_args /* {
351 struct vnode *a_vp;
352 int a_mode;
353 struct ucred *a_cred;
354 struct proc *a_p;
355 } */ *ap;
356 {
357 struct vnode *vp = ap->a_vp;
358 struct cnode *cp = VTOC(vp);
359 struct ucred *cred = ap->a_cred;
360 register gid_t *gp;
361 mode_t mode = ap->a_mode;
362 mode_t mask = 0;
363 int i;
364 int error;
365
366 /*
367 * Disallow write attempts on read-only file systems;
368 * unless the file is a socket, fifo, or a block or
369 * character device resident on the file system.
370 */
371 if (mode & VWRITE) {
372 switch (vp->v_type) {
373 case VDIR:
374 case VLNK:
375 case VREG:
376 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
377 return (EROFS);
378 #if QUOTA
379 if ((error = hfs_getinoquota(cp)))
380 return (error);
381 #endif /* QUOTA */
382 break;
383 }
384 }
385
386 /* If immutable bit set, nobody gets to write it. */
387 if ((mode & VWRITE) && (cp->c_flags & IMMUTABLE))
388 return (EPERM);
389
390 /* Otherwise, user id 0 always gets access. */
391 if (ap->a_cred->cr_uid == 0)
392 return (0);
393
394 mask = 0;
395
396 /* Otherwise, check the owner. */
397 if (hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, ap->a_p, false) == 0) {
398 if (mode & VEXEC)
399 mask |= S_IXUSR;
400 if (mode & VREAD)
401 mask |= S_IRUSR;
402 if (mode & VWRITE)
403 mask |= S_IWUSR;
404 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
405 }
406
407 /* Otherwise, check the groups. */
408 if (! (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)) {
409 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++)
410 if (cp->c_gid == *gp) {
411 if (mode & VEXEC)
412 mask |= S_IXGRP;
413 if (mode & VREAD)
414 mask |= S_IRGRP;
415 if (mode & VWRITE)
416 mask |= S_IWGRP;
417 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
418 }
419 }
420
421 /* Otherwise, check everyone else. */
422 if (mode & VEXEC)
423 mask |= S_IXOTH;
424 if (mode & VREAD)
425 mask |= S_IROTH;
426 if (mode & VWRITE)
427 mask |= S_IWOTH;
428 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
429 }
430
431
432
433 /*
434 #% getattr vp = = =
435 #
436 vop_getattr {
437 IN struct vnode *vp;
438 IN struct vattr *vap;
439 IN struct ucred *cred;
440 IN struct proc *p;
441
442 */
443
444
445 /* ARGSUSED */
446 static int
447 hfs_getattr(ap)
448 struct vop_getattr_args /* {
449 struct vnode *a_vp;
450 struct vattr *a_vap;
451 struct ucred *a_cred;
452 struct proc *a_p;
453 } */ *ap;
454 {
455 struct vnode *vp = ap->a_vp;
456 struct cnode *cp = VTOC(vp);
457 struct vattr *vap = ap->a_vap;
458 struct timeval tv;
459
460 tv = time;
461 CTIMES(cp, &tv, &tv);
462
463 vap->va_type = vp->v_type;
464 /*
465 * [2856576] Since we are dynamically changing the owner, also
466 * effectively turn off the set-user-id and set-group-id bits,
467 * just like chmod(2) would when changing ownership. This prevents
468 * a security hole where set-user-id programs run as whoever is
469 * logged on (or root if nobody is logged in yet!)
470 */
471 vap->va_mode = (cp->c_uid == UNKNOWNUID) ? cp->c_mode & ~(S_ISUID | S_ISGID) : cp->c_mode;
472 vap->va_nlink = cp->c_nlink;
473 vap->va_uid = (cp->c_uid == UNKNOWNUID) ? console_user : cp->c_uid;
474 vap->va_gid = cp->c_gid;
475 vap->va_fsid = cp->c_dev;
476 /*
477 * Exporting file IDs from HFS Plus:
478 *
479 * For "normal" files the c_fileid is the same value as the
480 * c_cnid. But for hard link files, they are different - the
481 * c_cnid belongs to the active directory entry (ie the link)
482 * and the c_fileid is for the actual inode (ie the data file).
483 *
484 * The stat call (getattr) will always return the c_fileid
485 * and Carbon APIs, which are hardlink-ignorant, will always
486 * receive the c_cnid (from getattrlist).
487 */
488 vap->va_fileid = cp->c_fileid;
489 vap->va_atime.tv_sec = cp->c_atime;
490 vap->va_atime.tv_nsec = 0;
491 vap->va_mtime.tv_sec = cp->c_mtime;
492 vap->va_mtime.tv_nsec = cp->c_mtime_nsec;
493 vap->va_ctime.tv_sec = cp->c_ctime;
494 vap->va_ctime.tv_nsec = 0;
495 vap->va_gen = 0;
496 vap->va_flags = cp->c_flags;
497 vap->va_rdev = 0;
498 vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize;
499 vap->va_filerev = 0;
500 vap->va_spare = 0;
501 if (vp->v_type == VDIR) {
502 vap->va_size = cp->c_nlink * AVERAGE_HFSDIRENTRY_SIZE;
503 vap->va_bytes = 0;
504 } else {
505 vap->va_size = VTOF(vp)->ff_size;
506 vap->va_bytes = (u_quad_t)cp->c_blocks *
507 (u_quad_t)VTOVCB(vp)->blockSize;
508 if (vp->v_type == VBLK || vp->v_type == VCHR)
509 vap->va_rdev = cp->c_rdev;
510 }
511 return (0);
512 }
513
514 /*
515 * Set attribute vnode op. called from several syscalls
516 #% setattr vp L L L
517 #
518 vop_setattr {
519 IN struct vnode *vp;
520 IN struct vattr *vap;
521 IN struct ucred *cred;
522 IN struct proc *p;
523
524 */
525
526 static int
527 hfs_setattr(ap)
528 struct vop_setattr_args /* {
529 struct vnode *a_vp;
530 struct vattr *a_vap;
531 struct ucred *a_cred;
532 struct proc *a_p;
533 } */ *ap;
534 {
535 struct vattr *vap = ap->a_vap;
536 struct vnode *vp = ap->a_vp;
537 struct cnode *cp = VTOC(vp);
538 struct ucred *cred = ap->a_cred;
539 struct proc *p = ap->a_p;
540 struct timeval atimeval, mtimeval;
541 int error;
542
543 /*
544 * Check for unsettable attributes.
545 */
546 if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
547 (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
548 (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) ||
549 ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) {
550 return (EINVAL);
551 }
552
553 if (vap->va_flags != VNOVAL) {
554 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
555 return (EROFS);
556 if ((error = hfs_chflags(vp, vap->va_flags, cred, p)))
557 return (error);
558 if (vap->va_flags & (IMMUTABLE | APPEND))
559 return (0);
560 }
561
562 if (cp->c_flags & (IMMUTABLE | APPEND))
563 return (EPERM);
564
565 // XXXdbg - don't allow modification of the journal or journal_info_block
566 if (VTOHFS(vp)->jnl && cp->c_datafork) {
567 struct HFSPlusExtentDescriptor *extd;
568
569 extd = &cp->c_datafork->ff_data.cf_extents[0];
570 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
571 return EPERM;
572 }
573 }
574
575 /*
576 * Go through the fields and update iff not VNOVAL.
577 */
578 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
579 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
580 return (EROFS);
581 if ((error = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p)))
582 return (error);
583 }
584 if (vap->va_size != VNOVAL) {
585 /*
586 * Disallow write attempts on read-only file systems;
587 * unless the file is a socket, fifo, or a block or
588 * character device resident on the file system.
589 */
590 switch (vp->v_type) {
591 case VDIR:
592 return (EISDIR);
593 case VLNK:
594 case VREG:
595 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
596 return (EROFS);
597 break;
598 default:
599 break;
600 }
601 if ((error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p)))
602 return (error);
603 }
604 cp = VTOC(vp);
605 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
606 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
607 return (EROFS);
608 if (((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) &&
609 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
610 (error = VOP_ACCESS(vp, VWRITE, cred, p)))) {
611 return (error);
612 }
613 if (vap->va_atime.tv_sec != VNOVAL)
614 cp->c_flag |= C_ACCESS;
615 if (vap->va_mtime.tv_sec != VNOVAL) {
616 cp->c_flag |= C_CHANGE | C_UPDATE;
617 /*
618 * The utimes system call can reset the modification
619 * time but it doesn't know about HFS create times.
620 * So we need to insure that the creation time is
621 * always at least as old as the modification time.
622 */
623 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
624 (cp->c_cnid != kRootDirID) &&
625 (vap->va_mtime.tv_sec < cp->c_itime)) {
626 cp->c_itime = vap->va_mtime.tv_sec;
627 }
628 }
629 atimeval.tv_sec = vap->va_atime.tv_sec;
630 atimeval.tv_usec = 0;
631 mtimeval.tv_sec = vap->va_mtime.tv_sec;
632 mtimeval.tv_usec = 0;
633 if ((error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1)))
634 return (error);
635 }
636 error = 0;
637 if (vap->va_mode != (mode_t)VNOVAL) {
638 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
639 return (EROFS);
640 error = hfs_chmod(vp, (int)vap->va_mode, cred, p);
641 }
642 return (error);
643 }
644
645
646 /*
647 * Change the mode on a file.
648 * cnode must be locked before calling.
649 */
650 int
651 hfs_chmod(vp, mode, cred, p)
652 register struct vnode *vp;
653 register int mode;
654 register struct ucred *cred;
655 struct proc *p;
656 {
657 register struct cnode *cp = VTOC(vp);
658 int error;
659
660 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
661 return (0);
662
663 // XXXdbg - don't allow modification of the journal or journal_info_block
664 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
665 struct HFSPlusExtentDescriptor *extd;
666
667 extd = &cp->c_datafork->ff_data.cf_extents[0];
668 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
669 return EPERM;
670 }
671 }
672
673 #if OVERRIDE_UNKNOWN_PERMISSIONS
674 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
675 return (0);
676 };
677 #endif
678 if ((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0)
679 return (error);
680 if (cred->cr_uid) {
681 if (vp->v_type != VDIR && (mode & S_ISTXT))
682 return (EFTYPE);
683 if (!groupmember(cp->c_gid, cred) && (mode & S_ISGID))
684 return (EPERM);
685 }
686 cp->c_mode &= ~ALLPERMS;
687 cp->c_mode |= (mode & ALLPERMS);
688 cp->c_flag |= C_CHANGE;
689 return (0);
690 }
691
692
693 int
694 hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags)
695 {
696 struct cnode *cp = VTOC(vp);
697 gid_t *gp;
698 int retval = 0;
699 int i;
700
701 /*
702 * Disallow write attempts on read-only file systems;
703 * unless the file is a socket, fifo, or a block or
704 * character device resident on the file system.
705 */
706 switch (vp->v_type) {
707 case VDIR:
708 case VLNK:
709 case VREG:
710 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
711 return (EROFS);
712 break;
713 default:
714 break;
715 }
716
717 /* If immutable bit set, nobody gets to write it. */
718 if (considerFlags && (cp->c_flags & IMMUTABLE))
719 return (EPERM);
720
721 /* Otherwise, user id 0 always gets access. */
722 if (cred->cr_uid == 0)
723 return (0);
724
725 /* Otherwise, check the owner. */
726 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
727 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
728
729 /* Otherwise, check the groups. */
730 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) {
731 if (cp->c_gid == *gp)
732 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
733 }
734
735 /* Otherwise, check everyone else. */
736 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
737 }
738
739
740
741 /*
742 * Change the flags on a file or directory.
743 * cnode must be locked before calling.
744 */
745 int
746 hfs_chflags(vp, flags, cred, p)
747 register struct vnode *vp;
748 register u_long flags;
749 register struct ucred *cred;
750 struct proc *p;
751 {
752 register struct cnode *cp = VTOC(vp);
753 int retval;
754
755 if (VTOVCB(vp)->vcbSigWord == kHFSSigWord) {
756 if ((retval = hfs_write_access(vp, cred, p, false)) != 0) {
757 return retval;
758 };
759 } else if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) {
760 return retval;
761 };
762
763 if (cred->cr_uid == 0) {
764 if ((cp->c_flags & (SF_IMMUTABLE | SF_APPEND)) &&
765 securelevel > 0) {
766 return EPERM;
767 };
768 cp->c_flags = flags;
769 } else {
770 if (cp->c_flags & (SF_IMMUTABLE | SF_APPEND) ||
771 (flags & UF_SETTABLE) != flags) {
772 return EPERM;
773 };
774 cp->c_flags &= SF_SETTABLE;
775 cp->c_flags |= (flags & UF_SETTABLE);
776 }
777 cp->c_flag |= C_CHANGE;
778
779 return (0);
780 }
781
782
783 /*
784 * Perform chown operation on cnode cp;
785 * code must be locked prior to call.
786 */
787 int
788 hfs_chown(vp, uid, gid, cred, p)
789 register struct vnode *vp;
790 uid_t uid;
791 gid_t gid;
792 struct ucred *cred;
793 struct proc *p;
794 {
795 register struct cnode *cp = VTOC(vp);
796 uid_t ouid;
797 gid_t ogid;
798 int error = 0;
799 #if QUOTA
800 register int i;
801 int64_t change;
802 #endif /* QUOTA */
803
804 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
805 return (EOPNOTSUPP);
806
807 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)
808 return (0);
809
810 if (uid == (uid_t)VNOVAL)
811 uid = cp->c_uid;
812 if (gid == (gid_t)VNOVAL)
813 gid = cp->c_gid;
814 /*
815 * If we don't own the file, are trying to change the owner
816 * of the file, or are not a member of the target group,
817 * the caller must be superuser or the call fails.
818 */
819 if ((cred->cr_uid != cp->c_uid || uid != cp->c_uid ||
820 (gid != cp->c_gid && !groupmember((gid_t)gid, cred))) &&
821 (error = suser(cred, &p->p_acflag)))
822 return (error);
823
824 ogid = cp->c_gid;
825 ouid = cp->c_uid;
826 #if QUOTA
827 if ((error = hfs_getinoquota(cp)))
828 return (error);
829 if (ouid == uid) {
830 dqrele(vp, cp->c_dquot[USRQUOTA]);
831 cp->c_dquot[USRQUOTA] = NODQUOT;
832 }
833 if (ogid == gid) {
834 dqrele(vp, cp->c_dquot[GRPQUOTA]);
835 cp->c_dquot[GRPQUOTA] = NODQUOT;
836 }
837
838 /*
839 * Eventually need to account for (fake) a block per directory
840 *if (vp->v_type == VDIR)
841 *change = VTOVCB(vp)->blockSize;
842 *else
843 */
844
845 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
846 (void) hfs_chkdq(cp, -change, cred, CHOWN);
847 (void) hfs_chkiq(cp, -1, cred, CHOWN);
848 for (i = 0; i < MAXQUOTAS; i++) {
849 dqrele(vp, cp->c_dquot[i]);
850 cp->c_dquot[i] = NODQUOT;
851 }
852 #endif /* QUOTA */
853 cp->c_gid = gid;
854 cp->c_uid = uid;
855 #if QUOTA
856 if ((error = hfs_getinoquota(cp)) == 0) {
857 if (ouid == uid) {
858 dqrele(vp, cp->c_dquot[USRQUOTA]);
859 cp->c_dquot[USRQUOTA] = NODQUOT;
860 }
861 if (ogid == gid) {
862 dqrele(vp, cp->c_dquot[GRPQUOTA]);
863 cp->c_dquot[GRPQUOTA] = NODQUOT;
864 }
865 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
866 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
867 goto good;
868 else
869 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
870 }
871 for (i = 0; i < MAXQUOTAS; i++) {
872 dqrele(vp, cp->c_dquot[i]);
873 cp->c_dquot[i] = NODQUOT;
874 }
875 }
876 cp->c_gid = ogid;
877 cp->c_uid = ouid;
878 if (hfs_getinoquota(cp) == 0) {
879 if (ouid == uid) {
880 dqrele(vp, cp->c_dquot[USRQUOTA]);
881 cp->c_dquot[USRQUOTA] = NODQUOT;
882 }
883 if (ogid == gid) {
884 dqrele(vp, cp->c_dquot[GRPQUOTA]);
885 cp->c_dquot[GRPQUOTA] = NODQUOT;
886 }
887 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
888 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
889 (void) hfs_getinoquota(cp);
890 }
891 return (error);
892 good:
893 if (hfs_getinoquota(cp))
894 panic("hfs_chown: lost quota");
895 #endif /* QUOTA */
896
897 if (ouid != uid || ogid != gid)
898 cp->c_flag |= C_CHANGE;
899 if (ouid != uid && cred->cr_uid != 0)
900 cp->c_mode &= ~S_ISUID;
901 if (ogid != gid && cred->cr_uid != 0)
902 cp->c_mode &= ~S_ISGID;
903 return (0);
904 }
905
906
907 /*
908 #
909 #% exchange fvp L L L
910 #% exchange tvp L L L
911 #
912 */
913 /*
914 * The hfs_exchange routine swaps the fork data in two files by
915 * exchanging some of the information in the cnode. It is used
916 * to preserve the file ID when updating an existing file, in
917 * case the file is being tracked through its file ID. Typically
918 * its used after creating a new file during a safe-save.
919 */
920
921 static int
922 hfs_exchange(ap)
923 struct vop_exchange_args /* {
924 struct vnode *a_fvp;
925 struct vnode *a_tvp;
926 struct ucred *a_cred;
927 struct proc *a_p;
928 } */ *ap;
929 {
930 struct vnode *from_vp = ap->a_fvp;
931 struct vnode *to_vp = ap->a_tvp;
932 struct vnode *from_rvp = NULL;
933 struct vnode *to_rvp = NULL;
934 struct cnode *from_cp = VTOC(from_vp);
935 struct cnode *to_cp = VTOC(to_vp);
936 struct hfsmount *hfsmp = VTOHFS(from_vp);
937 struct cat_desc tempdesc;
938 struct cat_attr tempattr;
939 int error = 0, started_tr = 0, grabbed_lock = 0;
940
941 /* The files must be on the same volume. */
942 if (from_vp->v_mount != to_vp->v_mount)
943 return (EXDEV);
944
945 /* Only normal files can be exchanged. */
946 if ((from_vp->v_type != VREG) || (to_vp->v_type != VREG) ||
947 (from_cp->c_flag & C_HARDLINK) || (to_cp->c_flag & C_HARDLINK) ||
948 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
949 return (EINVAL);
950
951 // XXXdbg - don't allow modification of the journal or journal_info_block
952 if (hfsmp->jnl) {
953 struct HFSPlusExtentDescriptor *extd;
954
955 if (from_cp->c_datafork) {
956 extd = &from_cp->c_datafork->ff_data.cf_extents[0];
957 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
958 return EPERM;
959 }
960 }
961
962 if (to_cp->c_datafork) {
963 extd = &to_cp->c_datafork->ff_data.cf_extents[0];
964 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
965 return EPERM;
966 }
967 }
968 }
969
970 from_rvp = from_cp->c_rsrc_vp;
971 to_rvp = to_cp->c_rsrc_vp;
972
973 /* If one of the resource forks is open then get the other one. */
974 if (from_rvp || to_rvp) {
975 error = hfs_vgetrsrc(hfsmp, from_vp, &from_rvp, ap->a_p);
976 if (error)
977 return (error);
978 error = hfs_vgetrsrc(hfsmp, to_vp, &to_rvp, ap->a_p);
979 if (error) {
980 vrele(from_rvp);
981 return (error);
982 }
983 }
984
985 /* Ignore any errors, we are doing a 'best effort' on flushing */
986 if (from_vp)
987 (void) vinvalbuf(from_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
988 if (to_vp)
989 (void) vinvalbuf(to_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
990 if (from_rvp)
991 (void) vinvalbuf(from_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
992 if (to_rvp)
993 (void) vinvalbuf(to_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
994
995 // XXXdbg
996 hfs_global_shared_lock_acquire(hfsmp);
997 grabbed_lock = 1;
998 if (hfsmp->jnl) {
999 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1000 goto Err_Exit;
1001 }
1002 started_tr = 1;
1003 }
1004
1005 /* Lock catalog b-tree */
1006 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p);
1007 if (error) goto Err_Exit;
1008
1009 /* The backend code always tries to delete the virtual
1010 * extent id for exchanging files so we neeed to lock
1011 * the extents b-tree.
1012 */
1013 error = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
1014 if (error) {
1015 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1016 goto Err_Exit;
1017 }
1018
1019 /* Do the exchange */
1020 error = MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp),
1021 from_cp->c_desc.cd_nameptr, to_cp->c_desc.cd_nameptr,
1022 from_cp->c_parentcnid, to_cp->c_parentcnid,
1023 from_cp->c_hint, to_cp->c_hint));
1024
1025 (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, ap->a_p);
1026 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1027
1028 if (error != E_NONE) {
1029 goto Err_Exit;
1030 }
1031
1032 /* Purge the vnodes from the name cache */
1033 if (from_vp)
1034 cache_purge(from_vp);
1035 if (to_vp)
1036 cache_purge(to_vp);
1037
1038 /* Save a copy of from attributes before swapping. */
1039 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1040 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1041
1042 /*
1043 * Swap the descriptors and all non-fork related attributes.
1044 * (except the modify date)
1045 */
1046 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1047
1048 from_cp->c_hint = 0;
1049 from_cp->c_fileid = from_cp->c_cnid;
1050 from_cp->c_itime = to_cp->c_itime;
1051 from_cp->c_btime = to_cp->c_btime;
1052 from_cp->c_atime = to_cp->c_atime;
1053 from_cp->c_ctime = to_cp->c_ctime;
1054 from_cp->c_gid = to_cp->c_gid;
1055 from_cp->c_uid = to_cp->c_uid;
1056 from_cp->c_flags = to_cp->c_flags;
1057 from_cp->c_mode = to_cp->c_mode;
1058 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1059
1060 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1061 to_cp->c_hint = 0;
1062 to_cp->c_fileid = to_cp->c_cnid;
1063 to_cp->c_itime = tempattr.ca_itime;
1064 to_cp->c_btime = tempattr.ca_btime;
1065 to_cp->c_atime = tempattr.ca_atime;
1066 to_cp->c_ctime = tempattr.ca_ctime;
1067 to_cp->c_gid = tempattr.ca_gid;
1068 to_cp->c_uid = tempattr.ca_uid;
1069 to_cp->c_flags = tempattr.ca_flags;
1070 to_cp->c_mode = tempattr.ca_mode;
1071 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1072
1073 /* Reinsert into the cnode hash under new file IDs*/
1074 hfs_chashremove(from_cp);
1075 hfs_chashremove(to_cp);
1076
1077 hfs_chashinsert(from_cp);
1078 hfs_chashinsert(to_cp);
1079 Err_Exit:
1080 if (to_rvp)
1081 vrele(to_rvp);
1082 if (from_rvp)
1083 vrele(from_rvp);
1084
1085 // XXXdbg
1086 if (started_tr) {
1087 journal_end_transaction(hfsmp->jnl);
1088 }
1089 if (grabbed_lock) {
1090 hfs_global_shared_lock_release(hfsmp);
1091 }
1092
1093 return (error);
1094 }
1095
1096
1097 /*
1098
1099 #% fsync vp L L L
1100 #
1101 vop_fsync {
1102 IN struct vnode *vp;
1103 IN struct ucred *cred;
1104 IN int waitfor;
1105 IN struct proc *p;
1106
1107 */
1108 static int
1109 hfs_fsync(ap)
1110 struct vop_fsync_args /* {
1111 struct vnode *a_vp;
1112 struct ucred *a_cred;
1113 int a_waitfor;
1114 struct proc *a_p;
1115 } */ *ap;
1116 {
1117 struct vnode *vp = ap->a_vp;
1118 struct cnode *cp = VTOC(vp);
1119 struct filefork *fp = NULL;
1120 int retval = 0;
1121 register struct buf *bp;
1122 struct timeval tv;
1123 struct buf *nbp;
1124 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
1125 int s;
1126 int wait;
1127 int retry = 0;
1128
1129 wait = (ap->a_waitfor == MNT_WAIT);
1130
1131 /* HFS directories don't have any data blocks. */
1132 if (vp->v_type == VDIR)
1133 goto metasync;
1134
1135 /*
1136 * For system files flush the B-tree header and
1137 * for regular files write out any clusters
1138 */
1139 if (vp->v_flag & VSYSTEM) {
1140 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1141 // XXXdbg
1142 if (hfsmp->jnl) {
1143 if (BTIsDirty(VTOF(vp))) {
1144 panic("hfs: system file vp 0x%x has dirty blocks (jnl 0x%x)\n",
1145 vp, hfsmp->jnl);
1146 }
1147 } else {
1148 BTFlushPath(VTOF(vp));
1149 }
1150 }
1151 } else if (UBCINFOEXISTS(vp))
1152 (void) cluster_push(vp);
1153
1154 /*
1155 * When MNT_WAIT is requested and the zero fill timeout
1156 * has expired then we must explicitly zero out any areas
1157 * that are currently marked invalid (holes).
1158 */
1159 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1160 UBCINFOEXISTS(vp) && (fp = VTOF(vp)) &&
1161 cp->c_zftimeout != 0) {
1162 int devblksize;
1163 int was_nocache;
1164
1165 if (time.tv_sec < cp->c_zftimeout) {
1166 /* Remember that a force sync was requested. */
1167 cp->c_flag |= C_ZFWANTSYNC;
1168 goto loop;
1169 }
1170 VOP_DEVBLOCKSIZE(cp->c_devvp, &devblksize);
1171 was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
1172 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
1173
1174 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1175 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1176 off_t start = invalid_range->rl_start;
1177 off_t end = invalid_range->rl_end;
1178
1179 /* The range about to be written must be validated
1180 * first, so that VOP_CMAP() will return the
1181 * appropriate mapping for the cluster code:
1182 */
1183 rl_remove(start, end, &fp->ff_invalidranges);
1184
1185 (void) cluster_write(vp, (struct uio *) 0,
1186 fp->ff_size,
1187 invalid_range->rl_end + 1,
1188 invalid_range->rl_start,
1189 (off_t)0, devblksize,
1190 IO_HEADZEROFILL | IO_NOZERODIRTY);
1191 cp->c_flag |= C_MODIFIED;
1192 }
1193 (void) cluster_push(vp);
1194 if (!was_nocache)
1195 CLR(vp->v_flag, VNOCACHE_DATA);
1196 cp->c_flag &= ~C_ZFWANTSYNC;
1197 cp->c_zftimeout = 0;
1198 }
1199
1200 /*
1201 * Flush all dirty buffers associated with a vnode.
1202 */
1203 loop:
1204 s = splbio();
1205 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1206 nbp = bp->b_vnbufs.le_next;
1207 if ((bp->b_flags & B_BUSY))
1208 continue;
1209 if ((bp->b_flags & B_DELWRI) == 0)
1210 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp, hfsmp);
1211 // XXXdbg
1212 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1213 if ((bp->b_flags & B_META) == 0) {
1214 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1215 bp, hfsmp->jnl);
1216 }
1217 // if journal_active() returns >= 0 then the journal is ok and we
1218 // shouldn't do anything to this locked block (because it is part
1219 // of a transaction). otherwise we'll just go through the normal
1220 // code path and flush the buffer.
1221 if (journal_active(hfsmp->jnl) >= 0) {
1222 continue;
1223 }
1224 }
1225
1226 bremfree(bp);
1227 bp->b_flags |= B_BUSY;
1228 /* Clear B_LOCKED, should only be set on meta files */
1229 bp->b_flags &= ~B_LOCKED;
1230
1231 splx(s);
1232 /*
1233 * Wait for I/O associated with indirect blocks to complete,
1234 * since there is no way to quickly wait for them below.
1235 */
1236 if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT)
1237 (void) bawrite(bp);
1238 else
1239 (void) VOP_BWRITE(bp);
1240 goto loop;
1241 }
1242
1243 if (wait) {
1244 while (vp->v_numoutput) {
1245 vp->v_flag |= VBWAIT;
1246 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hfs_fsync", 0);
1247 }
1248
1249 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1250 // thing to do?
1251 if (hfsmp->jnl == NULL && vp->v_dirtyblkhd.lh_first) {
1252 /* still have some dirty buffers */
1253 if (retry++ > 10) {
1254 vprint("hfs_fsync: dirty", vp);
1255 splx(s);
1256 /*
1257 * Looks like the requests are not
1258 * getting queued to the driver.
1259 * Retrying here causes a cpu bound loop.
1260 * Yield to the other threads and hope
1261 * for the best.
1262 */
1263 (void)tsleep((caddr_t)&vp->v_numoutput,
1264 PRIBIO + 1, "hfs_fsync", hz/10);
1265 retry = 0;
1266 } else {
1267 splx(s);
1268 }
1269 /* try again */
1270 goto loop;
1271 }
1272 }
1273 splx(s);
1274
1275 metasync:
1276 tv = time;
1277 if (vp->v_flag & VSYSTEM) {
1278 if (VTOF(vp)->fcbBTCBPtr != NULL)
1279 BTSetLastSync(VTOF(vp), tv.tv_sec);
1280 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1281 } else /* User file */ {
1282 retval = VOP_UPDATE(ap->a_vp, &tv, &tv, wait);
1283
1284 /* When MNT_WAIT is requested push out any delayed meta data */
1285 if ((retval == 0) && wait && cp->c_hint &&
1286 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1287 hfs_metasync(VTOHFS(vp), cp->c_hint, ap->a_p);
1288 }
1289 }
1290
1291 return (retval);
1292 }
1293
1294 /* Sync an hfs catalog b-tree node */
1295 static int
1296 hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p)
1297 {
1298 struct vnode *vp;
1299 struct buf *bp;
1300 struct buf *nbp;
1301 int s;
1302
1303 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1304
1305 // XXXdbg - don't need to do this on a journaled volume
1306 if (hfsmp->jnl) {
1307 return 0;
1308 }
1309
1310 if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p) != 0)
1311 return (0);
1312
1313 /*
1314 * Look for a matching node that has been delayed
1315 * but is not part of a set (B_LOCKED).
1316 */
1317 s = splbio();
1318 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1319 nbp = bp->b_vnbufs.le_next;
1320 if (bp->b_flags & B_BUSY)
1321 continue;
1322 if (bp->b_lblkno == node) {
1323 if (bp->b_flags & B_LOCKED)
1324 break;
1325
1326 bremfree(bp);
1327 bp->b_flags |= B_BUSY;
1328 splx(s);
1329 (void) VOP_BWRITE(bp);
1330 goto exit;
1331 }
1332 }
1333 splx(s);
1334 exit:
1335 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1336
1337 return (0);
1338 }
1339
1340 __private_extern__
1341 int
1342 hfs_btsync(struct vnode *vp, int sync_transaction)
1343 {
1344 struct cnode *cp = VTOC(vp);
1345 register struct buf *bp;
1346 struct timeval tv;
1347 struct buf *nbp;
1348 struct hfsmount *hfsmp = VTOHFS(vp);
1349 int s;
1350
1351 /*
1352 * Flush all dirty buffers associated with b-tree.
1353 */
1354 loop:
1355 s = splbio();
1356
1357 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1358 nbp = bp->b_vnbufs.le_next;
1359 if ((bp->b_flags & B_BUSY))
1360 continue;
1361 if ((bp->b_flags & B_DELWRI) == 0)
1362 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp, hfsmp);
1363
1364 // XXXdbg
1365 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1366 if ((bp->b_flags & B_META) == 0) {
1367 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1368 bp, hfsmp->jnl);
1369 }
1370 // if journal_active() returns >= 0 then the journal is ok and we
1371 // shouldn't do anything to this locked block (because it is part
1372 // of a transaction). otherwise we'll just go through the normal
1373 // code path and flush the buffer.
1374 if (journal_active(hfsmp->jnl) >= 0) {
1375 continue;
1376 }
1377 }
1378
1379 if (sync_transaction && !(bp->b_flags & B_LOCKED))
1380 continue;
1381
1382 bremfree(bp);
1383 bp->b_flags |= B_BUSY;
1384 bp->b_flags &= ~B_LOCKED;
1385
1386 splx(s);
1387
1388 (void) bawrite(bp);
1389
1390 goto loop;
1391 }
1392 splx(s);
1393
1394 tv = time;
1395 if ((vp->v_flag & VSYSTEM) && (VTOF(vp)->fcbBTCBPtr != NULL))
1396 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1397 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1398
1399 return 0;
1400 }
1401
1402 /*
1403 * Rmdir system call.
1404 #% rmdir dvp L U U
1405 #% rmdir vp L U U
1406 #
1407 vop_rmdir {
1408 IN WILLRELE struct vnode *dvp;
1409 IN WILLRELE struct vnode *vp;
1410 IN struct componentname *cnp;
1411
1412 */
1413 static int
1414 hfs_rmdir(ap)
1415 struct vop_rmdir_args /* {
1416 struct vnode *a_dvp;
1417 struct vnode *a_vp;
1418 struct componentname *a_cnp;
1419 } */ *ap;
1420 {
1421 struct vnode *vp = ap->a_vp;
1422 struct vnode *dvp = ap->a_dvp;
1423 struct proc *p = ap->a_cnp->cn_proc;
1424 struct cnode *cp;
1425 struct cnode *dcp;
1426 struct hfsmount * hfsmp;
1427 struct timeval tv;
1428 int error = 0, started_tr = 0, grabbed_lock = 0;
1429
1430 cp = VTOC(vp);
1431 dcp = VTOC(dvp);
1432 hfsmp = VTOHFS(vp);
1433
1434 if (dcp == cp) {
1435 vrele(dvp);
1436 vput(vp);
1437 return (EINVAL); /* cannot remove "." */
1438 }
1439
1440 // XXXdbg
1441 hfs_global_shared_lock_acquire(hfsmp);
1442 grabbed_lock = 1;
1443 if (hfsmp->jnl) {
1444 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1445 goto out;
1446 }
1447 started_tr = 1;
1448 }
1449
1450 /*
1451 * Verify the directory is empty (and valid).
1452 * (Rmdir ".." won't be valid since
1453 * ".." will contain a reference to
1454 * the current directory and thus be
1455 * non-empty.)
1456 */
1457 if (cp->c_entries != 0) {
1458 error = ENOTEMPTY;
1459 goto out;
1460 }
1461 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1462 error = EPERM;
1463 goto out;
1464 }
1465
1466 /* Remove the entry from the namei cache: */
1467 cache_purge(vp);
1468
1469 /* Lock catalog b-tree */
1470 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1471 if (error) goto out;
1472
1473 if (cp->c_entries > 0)
1474 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1475 /* Remove entry from catalog */
1476 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1477
1478 /* Unlock catalog b-tree */
1479 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1480 if (error) goto out;
1481
1482 #if QUOTA
1483 if (!hfs_getinoquota(cp))
1484 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1485 #endif /* QUOTA */
1486
1487 /* The parent lost a child */
1488 if (dcp->c_entries > 0)
1489 dcp->c_entries--;
1490 if (dcp->c_nlink > 0)
1491 dcp->c_nlink--;
1492 dcp->c_flag |= C_CHANGE | C_UPDATE;
1493 tv = time;
1494 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1495
1496 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1497
1498 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1499 cp->c_flag |= C_NOEXISTS;
1500 out:
1501 if (dvp)
1502 vput(dvp);
1503 vput(vp);
1504
1505 // XXXdbg
1506 if (started_tr) {
1507 journal_end_transaction(hfsmp->jnl);
1508 }
1509 if (grabbed_lock) {
1510 hfs_global_shared_lock_release(hfsmp);
1511 }
1512
1513 return (error);
1514 }
1515
1516 /*
1517
1518 #% remove dvp L U U
1519 #% remove vp L U U
1520 #
1521 vop_remove {
1522 IN WILLRELE struct vnode *dvp;
1523 IN WILLRELE struct vnode *vp;
1524 IN struct componentname *cnp;
1525
1526 */
1527
1528 static int
1529 hfs_remove(ap)
1530 struct vop_remove_args /* {
1531 struct vnode *a_dvp;
1532 struct vnode *a_vp;
1533 struct componentname *a_cnp;
1534 } */ *ap;
1535 {
1536 struct vnode *vp = ap->a_vp;
1537 struct vnode *dvp = ap->a_dvp;
1538 struct vnode *rvp = NULL;
1539 struct cnode *cp;
1540 struct cnode *dcp;
1541 struct hfsmount *hfsmp;
1542 struct proc *p = current_proc();
1543 int dataforkbusy = 0;
1544 int rsrcforkbusy = 0;
1545 int truncated = 0;
1546 struct timeval tv;
1547 int error = 0;
1548 int started_tr = 0, grabbed_lock = 0;
1549
1550 /* Redirect directories to rmdir */
1551 if (vp->v_type == VDIR)
1552 return (hfs_rmdir(ap));
1553
1554 cp = VTOC(vp);
1555 dcp = VTOC(dvp);
1556 hfsmp = VTOHFS(vp);
1557
1558 if (cp->c_parentcnid != dcp->c_cnid) {
1559 error = EINVAL;
1560 goto out;
1561 }
1562
1563 /* Make sure a remove is permitted */
1564 if ((cp->c_flags & (IMMUTABLE | APPEND)) ||
1565 (VTOC(dvp)->c_flags & APPEND) ||
1566 VNODE_IS_RSRC(vp)) {
1567 error = EPERM;
1568 goto out;
1569 }
1570
1571 /*
1572 * Aquire a vnode for a non-empty resource fork.
1573 * (needed for VOP_TRUNCATE)
1574 */
1575 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1576 error = hfs_vgetrsrc(hfsmp, vp, &rvp, p);
1577 if (error)
1578 goto out;
1579 }
1580
1581 // XXXdbg - don't allow deleting the journal or journal_info_block
1582 if (hfsmp->jnl && cp->c_datafork) {
1583 struct HFSPlusExtentDescriptor *extd;
1584
1585 extd = &cp->c_datafork->ff_data.cf_extents[0];
1586 if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1587 error = EPERM;
1588 goto out;
1589 }
1590 }
1591
1592 /*
1593 * Check if this file is being used.
1594 *
1595 * The namei done for the remove took a reference on the
1596 * vnode (vp). And we took a ref on the resource vnode (rvp).
1597 * Hence set 1 in the tookref parameter of ubc_isinuse().
1598 */
1599 if (UBCISVALID(vp) && ubc_isinuse(vp, 1))
1600 dataforkbusy = 1;
1601 if (rvp && UBCISVALID(rvp) && ubc_isinuse(rvp, 1))
1602 rsrcforkbusy = 1;
1603
1604 /*
1605 * Carbon semantics prohibit deleting busy files.
1606 * (enforced when NODELETEBUSY is requested)
1607 */
1608 if ((dataforkbusy || rsrcforkbusy) &&
1609 ((ap->a_cnp->cn_flags & NODELETEBUSY) ||
1610 (hfsmp->hfs_private_metadata_dir == 0))) {
1611 error = EBUSY;
1612 goto out;
1613 }
1614
1615 // XXXdbg
1616 hfs_global_shared_lock_acquire(hfsmp);
1617 grabbed_lock = 1;
1618 if (hfsmp->jnl) {
1619 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1620 goto out;
1621 }
1622 started_tr = 1;
1623 }
1624
1625 /* Remove our entry from the namei cache. */
1626 cache_purge(vp);
1627
1628 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1629 if (hfsmp->jnl && vp->v_type == VLNK && vp->v_dirtyblkhd.lh_first) {
1630 struct buf *bp, *nbp;
1631
1632 recheck:
1633 for (bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) {
1634 nbp = bp->b_vnbufs.le_next;
1635
1636 if ((bp->b_flags & B_BUSY)) {
1637 // if it was busy, someone else must be dealing
1638 // with it so just move on.
1639 continue;
1640 }
1641
1642 if (!(bp->b_flags & B_META)) {
1643 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp);
1644 }
1645
1646 // if it's part of the current transaction, kill it.
1647 if (bp->b_flags & B_LOCKED) {
1648 bremfree(bp);
1649 bp->b_flags |= B_BUSY;
1650 journal_kill_block(hfsmp->jnl, bp);
1651 goto recheck;
1652 }
1653 }
1654 }
1655 // XXXdbg
1656
1657 /*
1658 * Truncate any non-busy forks. Busy forks will
1659 * get trucated when their vnode goes inactive.
1660 *
1661 * (Note: hard links are truncated in VOP_INACTIVE)
1662 */
1663 if ((cp->c_flag & C_HARDLINK) == 0) {
1664 int mode = cp->c_mode;
1665
1666 if (!dataforkbusy && cp->c_datafork->ff_blocks != 0) {
1667 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1668 error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p);
1669 cp->c_mode = mode;
1670 if (error)
1671 goto out;
1672 truncated = 1;
1673 }
1674 if (!rsrcforkbusy && rvp) {
1675 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1676 error = VOP_TRUNCATE(rvp, (off_t)0, IO_NDELAY, NOCRED, p);
1677 cp->c_mode = mode;
1678 if (error && !dataforkbusy)
1679 goto out;
1680 else {
1681 /*
1682 * XXX could also force an update on vp
1683 * and fail the remove.
1684 */
1685 error = 0;
1686 }
1687 truncated = 1;
1688 }
1689 }
1690 /*
1691 * There are 3 remove cases to consider:
1692 * 1. File is a hardlink ==> remove the link
1693 * 2. File is busy (in use) ==> move/rename the file
1694 * 3. File is not in use ==> remove the file
1695 */
1696
1697 if (cp->c_flag & C_HARDLINK) {
1698 struct cat_desc desc;
1699
1700 if ((ap->a_cnp->cn_flags & HASBUF) == 0 ||
1701 ap->a_cnp->cn_nameptr[0] == '\0') {
1702 error = ENOENT; /* name missing! */
1703 goto out;
1704 }
1705
1706 /* Setup a descriptor for the link */
1707 bzero(&desc, sizeof(desc));
1708 desc.cd_nameptr = ap->a_cnp->cn_nameptr;
1709 desc.cd_namelen = ap->a_cnp->cn_namelen;
1710 desc.cd_parentcnid = dcp->c_cnid;
1711 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1712 desc.cd_cnid = cp->c_cnid;
1713
1714 /* Lock catalog b-tree */
1715 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1716 if (error)
1717 goto out;
1718
1719 /* Delete the link record */
1720 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1721
1722 if ((error == 0) && (--cp->c_nlink < 1)) {
1723 char inodename[32];
1724 char delname[32];
1725 struct cat_desc to_desc;
1726 struct cat_desc from_desc;
1727
1728 /*
1729 * This is now esentially an open deleted file.
1730 * Rename it to reflect this state which makes
1731 * orphan file cleanup easier (see hfs_remove_orphans).
1732 * Note: a rename failure here is not fatal.
1733 */
1734 MAKE_INODE_NAME(inodename, cp->c_rdev);
1735 bzero(&from_desc, sizeof(from_desc));
1736 from_desc.cd_nameptr = inodename;
1737 from_desc.cd_namelen = strlen(inodename);
1738 from_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1739 from_desc.cd_flags = 0;
1740 from_desc.cd_cnid = cp->c_fileid;
1741
1742 MAKE_DELETED_NAME(delname, cp->c_fileid);
1743 bzero(&to_desc, sizeof(to_desc));
1744 to_desc.cd_nameptr = delname;
1745 to_desc.cd_namelen = strlen(delname);
1746 to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1747 to_desc.cd_flags = 0;
1748 to_desc.cd_cnid = cp->c_fileid;
1749
1750 (void) cat_rename(hfsmp, &from_desc, &hfsmp->hfs_privdir_desc,
1751 &to_desc, (struct cat_desc *)NULL);
1752 cp->c_flag |= C_DELETED;
1753 }
1754
1755 /* Unlock the Catalog */
1756 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1757
1758 /* All done with component name... */
1759 if ((ap->a_cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME))
1760 FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
1761
1762 if (error != 0)
1763 goto out;
1764
1765 cp->c_flag |= C_CHANGE;
1766 tv = time;
1767 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1768
1769 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1770
1771 } else if (dataforkbusy || rsrcforkbusy) {
1772 char delname[32];
1773 struct cat_desc to_desc;
1774 struct cat_desc todir_desc;
1775
1776 /*
1777 * Orphan this file (move to hidden directory).
1778 */
1779 bzero(&todir_desc, sizeof(todir_desc));
1780 todir_desc.cd_parentcnid = 2;
1781
1782 MAKE_DELETED_NAME(delname, cp->c_fileid);
1783 bzero(&to_desc, sizeof(to_desc));
1784 to_desc.cd_nameptr = delname;
1785 to_desc.cd_namelen = strlen(delname);
1786 to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1787 to_desc.cd_flags = 0;
1788 to_desc.cd_cnid = cp->c_cnid;
1789
1790 /* Lock catalog b-tree */
1791 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1792 if (error)
1793 goto out;
1794
1795 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc,
1796 &to_desc, (struct cat_desc *)NULL);
1797
1798 // XXXdbg - only bump this count if we were successful
1799 if (error == 0) {
1800 hfsmp->hfs_privdir_attr.ca_entries++;
1801 }
1802 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
1803 &hfsmp->hfs_privdir_attr, NULL, NULL);
1804
1805 /* Unlock the Catalog */
1806 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1807 if (error) goto out;
1808
1809 cp->c_flag |= C_CHANGE | C_DELETED | C_NOEXISTS;
1810 --cp->c_nlink;
1811 tv = time;
1812 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1813
1814 } else /* Not busy */ {
1815
1816 if (vp->v_type == VDIR && cp->c_entries > 0)
1817 panic("hfs_remove: attempting to delete a non-empty directory!");
1818 if (vp->v_type != VDIR && cp->c_blocks > 0)
1819 panic("hfs_remove: attempting to delete a non-empty file!");
1820
1821 /* Lock catalog b-tree */
1822 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1823 if (error)
1824 goto out;
1825
1826 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1827
1828 if (error && error != ENXIO && truncated) {
1829 if ((cp->c_datafork && cp->c_datafork->ff_data.cf_size != 0) ||
1830 (cp->c_rsrcfork && cp->c_rsrcfork->ff_data.cf_size != 0)) {
1831 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1832 error, cp->c_datafork->ff_data.cf_size, cp->c_rsrcfork->ff_data.cf_size);
1833 } else {
1834 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1835 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
1836 }
1837 }
1838
1839 /* Unlock the Catalog */
1840 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1841 if (error) goto out;
1842
1843 #if QUOTA
1844 if (!hfs_getinoquota(cp))
1845 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1846 #endif /* QUOTA */
1847
1848 cp->c_mode = 0;
1849 cp->c_flag |= C_CHANGE | C_NOEXISTS;
1850 --cp->c_nlink;
1851 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1852 }
1853
1854 /*
1855 * All done with this cnode's descriptor...
1856 *
1857 * Note: all future catalog calls for this cnode must be
1858 * by fileid only. This is OK for HFS (which doesn't have
1859 * file thread records) since HFS doesn't support hard
1860 * links or the removal of busy files.
1861 */
1862 cat_releasedesc(&cp->c_desc);
1863
1864 /* In all three cases the parent lost a child */
1865 if (dcp->c_entries > 0)
1866 dcp->c_entries--;
1867 if (dcp->c_nlink > 0)
1868 dcp->c_nlink--;
1869 dcp->c_flag |= C_CHANGE | C_UPDATE;
1870 tv = time;
1871 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1872
1873 if (rvp)
1874 vrele(rvp);
1875 VOP_UNLOCK(vp, 0, p);
1876 // XXXdbg - try to prevent the lost ubc_info panic
1877 if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) {
1878 (void) ubc_uncache(vp);
1879 }
1880 vrele(vp);
1881 vput(dvp);
1882
1883 // XXXdbg
1884 if (started_tr) {
1885 journal_end_transaction(hfsmp->jnl);
1886 }
1887 if (grabbed_lock) {
1888 hfs_global_shared_lock_release(hfsmp);
1889 }
1890
1891 return (0);
1892
1893 out:
1894 if (rvp)
1895 vrele(rvp);
1896
1897 /* Commit the truncation to the catalog record */
1898 if (truncated) {
1899 cp->c_flag |= C_CHANGE | C_UPDATE;
1900 tv = time;
1901 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1902 }
1903 vput(vp);
1904 vput(dvp);
1905
1906 // XXXdbg
1907 if (started_tr) {
1908 journal_end_transaction(hfsmp->jnl);
1909 }
1910 if (grabbed_lock) {
1911 hfs_global_shared_lock_release(hfsmp);
1912 }
1913
1914 return (error);
1915 }
1916
1917
1918 __private_extern__ void
1919 replace_desc(struct cnode *cp, struct cat_desc *cdp)
1920 {
1921 /* First release allocated name buffer */
1922 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
1923 char *name = cp->c_desc.cd_nameptr;
1924
1925 cp->c_desc.cd_nameptr = 0;
1926 cp->c_desc.cd_namelen = 0;
1927 cp->c_desc.cd_flags &= ~CD_HASBUF;
1928 FREE(name, M_TEMP);
1929 }
1930 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
1931
1932 /* Cnode now owns the name buffer */
1933 cdp->cd_nameptr = 0;
1934 cdp->cd_namelen = 0;
1935 cdp->cd_flags &= ~CD_HASBUF;
1936 }
1937
1938
1939 /*
1940 #
1941 #% rename fdvp U U U
1942 #% rename fvp U U U
1943 #% rename tdvp L U U
1944 #% rename tvp X U U
1945 #
1946 vop_rename {
1947 IN WILLRELE struct vnode *fdvp;
1948 IN WILLRELE struct vnode *fvp;
1949 IN struct componentname *fcnp;
1950 IN WILLRELE struct vnode *tdvp;
1951 IN WILLRELE struct vnode *tvp;
1952 IN struct componentname *tcnp;
1953 };
1954 */
1955 /*
1956 * Rename a cnode.
1957 *
1958 * The VFS layer guarantees that source and destination will
1959 * either both be directories, or both not be directories.
1960 *
1961 * When the target is a directory, hfs_rename must ensure
1962 * that it is empty.
1963 */
1964
1965 static int
1966 hfs_rename(ap)
1967 struct vop_rename_args /* {
1968 struct vnode *a_fdvp;
1969 struct vnode *a_fvp;
1970 struct componentname *a_fcnp;
1971 struct vnode *a_tdvp;
1972 struct vnode *a_tvp;
1973 struct componentname *a_tcnp;
1974 } */ *ap;
1975 {
1976 struct vnode *tvp = ap->a_tvp;
1977 struct vnode *tdvp = ap->a_tdvp;
1978 struct vnode *fvp = ap->a_fvp;
1979 struct vnode *fdvp = ap->a_fdvp;
1980 struct componentname *tcnp = ap->a_tcnp;
1981 struct componentname *fcnp = ap->a_fcnp;
1982 struct cnode *fcp = NULL;
1983 struct cnode *fdcp = NULL;
1984 struct cnode *tdcp = NULL;
1985 struct cnode *tcp = NULL;
1986 struct cat_desc from_desc;
1987 struct cat_desc to_desc;
1988 struct cat_desc out_desc;
1989 struct hfsmount *hfsmp;
1990 struct proc *p = fcnp->cn_proc;
1991 struct timeval tv;
1992 int retval = 0, started_tr = 0, grabbed_lock = 0;
1993 int fdvp_locked = 0;
1994 int fvp_locked = 0;
1995 cnid_t oldparent = 0;
1996 cnid_t newparent = 0;
1997
1998 // XXXdbg
1999 if (fvp)
2000 hfsmp = VTOHFS(fvp);
2001 else if (tvp)
2002 hfsmp = VTOHFS(tvp);
2003 else
2004 hfsmp = NULL;
2005
2006 #if HFS_DIAGNOSTIC
2007 if ((tcnp->cn_flags & HASBUF) == 0 ||
2008 (fcnp->cn_flags & HASBUF) == 0)
2009 panic("hfs_rename: no name");
2010 #endif
2011 /*
2012 * When fvp matches tvp they must be case variants
2013 * or hard links, and if they are in the same directory then
2014 * tvp really doesn't exist (see VFS rename).
2015 * XXX Hard link rename is still broken/ignored. If they are
2016 * in different directories then we must have hard links.
2017 * Comments further down describe behaviour of hard links in same dir.
2018 * Note case insensitivity was and still is presumed.
2019 */
2020 if (fvp == tvp) {
2021 if (fdvp != tdvp) {
2022 retval = 0;
2023 goto abortop;
2024 }
2025 tvp = NULL;
2026 }
2027
2028 /*
2029 * Check for cross-device rename.
2030 */
2031 if ((fvp->v_mount != tdvp->v_mount) ||
2032 (tvp && (fvp->v_mount != tvp->v_mount))) {
2033 retval = EXDEV;
2034 goto abortop;
2035 }
2036
2037 /*
2038 * Make sure a remove of "to" vnode is permitted.
2039 */
2040 if (tvp && ((VTOC(tvp)->c_flags & (IMMUTABLE | APPEND)) ||
2041 (VTOC(tdvp)->c_flags & APPEND))) {
2042 retval = EPERM;
2043 goto abortop;
2044 }
2045
2046 /*
2047 * Make sure "from" vnode and its parent are changeable.
2048 */
2049 fdcp = VTOC(fdvp);
2050 fcp = VTOC(fvp);
2051 oldparent = fdcp->c_cnid;
2052 if ((fcp->c_flags & (IMMUTABLE | APPEND)) || (fdcp->c_flags & APPEND)) {
2053 retval = EPERM;
2054 goto abortop;
2055 }
2056
2057 if (fcp->c_parentcnid != fdcp->c_cnid) {
2058 retval = EINVAL;
2059 goto abortop;
2060 }
2061
2062 /*
2063 * Check if names already match...
2064 * XXX The name being checked is from fcp rather than fcnp! If
2065 * there are hard links, fcp yields the name which was
2066 * most recently looked up (yes that design is vulnerable to races)
2067 * and the name most recently looked up was the target, so they
2068 * compare equal and we ignore the rename. XXX
2069 */
2070 if (fvp == ap->a_tvp &&
2071 (bcmp(fcp->c_desc.cd_nameptr, tcnp->cn_nameptr,
2072 fcp->c_desc.cd_namelen) == 0)) {
2073 retval = 0;
2074 goto abortop;
2075 }
2076
2077 /* XXX This doesn't make sense for HFS...
2078 *
2079 * Be sure we are not renaming ".", "..", or an alias of ".". This
2080 * leads to a crippled directory tree. It's pretty tough to do a
2081 * "ls" or "pwd" with the "." directory entry missing, and "cd .."
2082 * doesn't work if the ".." entry is missing.
2083 */
2084 if (fvp->v_type == VDIR) {
2085 if ((fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.')
2086 || fdcp == fcp
2087 || (fcnp->cn_flags&ISDOTDOT)
2088 || (fcp->c_flag & C_RENAME)) {
2089 retval = EINVAL;
2090 goto abortop;
2091 }
2092 fcp->c_flag |= C_RENAME;
2093 }
2094
2095 /* XXX UFS does vrele(fdvp) here */
2096
2097 /* From now on use bad instead of abort to exit */
2098
2099 tdcp = VTOC(tdvp);
2100 if (tvp)
2101 tcp = VTOC(tvp);
2102
2103 newparent = tdcp->c_cnid;
2104
2105 // XXXdbg - don't allow renaming the journal or journal_info_block
2106 if (hfsmp->jnl && fcp->c_datafork) {
2107 struct HFSPlusExtentDescriptor *extd;
2108
2109 extd = &fcp->c_datafork->ff_data.cf_extents[0];
2110 if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
2111 retval = EPERM;
2112 goto bad;
2113 }
2114 }
2115
2116 if (hfsmp->jnl && tcp && tcp->c_datafork) {
2117 struct HFSPlusExtentDescriptor *extd;
2118
2119 extd = &tcp->c_datafork->ff_data.cf_extents[0];
2120 if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
2121 retval = EPERM;
2122 goto bad;
2123 }
2124 }
2125
2126 retval = VOP_ACCESS(fvp, VWRITE, tcnp->cn_cred, tcnp->cn_proc);
2127 if ((fvp->v_type == VDIR) && (newparent != oldparent)) {
2128 if (retval) /* write access check above */
2129 goto bad;
2130 }
2131 retval = 0; /* Reset value from above, we dont care about it anymore */
2132
2133 /* XXX
2134 * Prevent lock heirarchy violation (deadlock):
2135 *
2136 * If fdvp is the parent of tdvp then we must drop
2137 * tdvp lock before aquiring the lock for fdvp.
2138 *
2139 * XXXdbg - moved this to happen up here *before* we
2140 * start a transaction. otherwise we can
2141 * deadlock because the vnode layer may get
2142 * this lock for someone else and then they'll
2143 * never be able to start a transaction.
2144 */
2145 if (newparent != oldparent) {
2146 if (fdcp->c_cnid == tdcp->c_parentcnid) {
2147 vput(tdvp);
2148 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p);
2149 vget(tdvp, LK_EXCLUSIVE | LK_RETRY, p);
2150 } else {
2151 vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p);
2152 }
2153 }
2154 fdvp_locked = 1;
2155 if ((retval = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)))
2156 goto bad;
2157 fvp_locked = 1;
2158
2159 // XXXdbg
2160 hfs_global_shared_lock_acquire(hfsmp);
2161 grabbed_lock = 1;
2162 if (hfsmp->jnl) {
2163 if ((retval = journal_start_transaction(hfsmp->jnl)) != 0) {
2164 goto bad;
2165 }
2166 started_tr = 1;
2167 }
2168
2169 /*
2170 * If the destination exists, then be sure its type (file or dir)
2171 * matches that of the source. And, if it is a directory make sure
2172 * it is empty. Then delete the destination.
2173 */
2174 if (tvp) {
2175 /*
2176 * If the parent directory is "sticky", then the user must
2177 * own the parent directory, or the destination of the rename,
2178 * otherwise the destination may not be changed (except by
2179 * root). This implements append-only directories.
2180 */
2181 if ((tdcp->c_mode & S_ISTXT) && (tcnp->cn_cred->cr_uid != 0) &&
2182 tcnp->cn_cred->cr_uid != tdcp->c_uid &&
2183 tcnp->cn_cred->cr_uid != tcp->c_uid) {
2184 retval = EPERM;
2185 goto bad;
2186 }
2187
2188 /*
2189 * Target must be empty if a directory.
2190 */
2191 if (S_ISDIR(tcp->c_mode) && (tcp->c_nlink > 2)) {
2192 retval = ENOTEMPTY;
2193 goto bad;
2194 }
2195
2196 /*
2197 * VOP_REMOVE will vput tdvp so we better bump
2198 * its ref count and relockit, always set tvp to
2199 * NULL afterwards to indicate that were done with it.
2200 */
2201 VREF(tdvp);
2202
2203 cache_purge(tvp);
2204
2205 tcnp->cn_flags &= ~SAVENAME;
2206
2207 if (tvp->v_type == VDIR)
2208 retval = VOP_RMDIR(tdvp, tvp, tcnp);
2209 else
2210 retval = VOP_REMOVE(tdvp, tvp, tcnp);
2211
2212 (void) vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p);
2213 tvp = NULL;
2214 tcp = NULL;
2215 if (retval)
2216 goto bad;
2217
2218 }
2219
2220 /* remove the existing entry from the namei cache: */
2221 cache_purge(fvp);
2222
2223 bzero(&from_desc, sizeof(from_desc));
2224 from_desc.cd_nameptr = fcnp->cn_nameptr;
2225 from_desc.cd_namelen = fcnp->cn_namelen;
2226 from_desc.cd_parentcnid = fdcp->c_cnid;
2227 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2228 from_desc.cd_cnid = fcp->c_cnid;
2229 bzero(&to_desc, sizeof(to_desc));
2230 to_desc.cd_nameptr = tcnp->cn_nameptr;
2231 to_desc.cd_namelen = tcnp->cn_namelen;
2232 to_desc.cd_parentcnid = tdcp->c_cnid;
2233 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2234 to_desc.cd_cnid = fcp->c_cnid;
2235
2236 /* Lock catalog b-tree */
2237 retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
2238 if (retval) {
2239 goto bad;
2240 }
2241 retval = cat_rename(hfsmp, &from_desc, &tdcp->c_desc,
2242 &to_desc, &out_desc);
2243
2244 /* Unlock catalog b-tree */
2245 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2246
2247 if (newparent != oldparent) {
2248 VOP_UNLOCK(fdvp, 0, p);
2249 fdvp_locked = 0;
2250 }
2251
2252 if (retval) goto bad;
2253
2254 /* update cnode's catalog descriptor */
2255 replace_desc(fcp, &out_desc);
2256
2257 fcp->c_flag &= ~C_RENAME;
2258
2259 /*
2260 * Time stamp both parent directories.
2261 * Note that if this is a rename within the same directory,
2262 * (where tdcp == fdcp)
2263 * the code below is still safe and correct.
2264 */
2265 if (fdcp->c_nlink > 0)
2266 fdcp->c_nlink--;
2267 if (fdcp->c_entries > 0)
2268 fdcp->c_entries--;
2269 tdcp->c_nlink++;
2270 tdcp->c_entries++;
2271 fdcp->c_flag |= C_CHANGE | C_UPDATE;
2272 tdcp->c_flag |= C_CHANGE | C_UPDATE;
2273 tv = time;
2274 CTIMES(fdcp, &tv, &tv);
2275 CTIMES(tdcp, &tv, &tv);
2276 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2277
2278 // make sure both directories get updated on disk.
2279 if (fdvp != tdvp) {
2280 (void) VOP_UPDATE(fdvp, &tv, &tv, 0);
2281 }
2282 (void) VOP_UPDATE(tdvp, &tv, &tv, 0);
2283
2284 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_RMDIR : VOL_RMFILE,
2285 (fdcp->c_cnid == kHFSRootFolderID));
2286 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_MKDIR : VOL_MKFILE,
2287 (tdcp->c_cnid == kHFSRootFolderID));
2288
2289 vput(tdvp);
2290 vrele(fdvp);
2291 vput(fvp);
2292
2293 // XXXdbg
2294 if (started_tr) {
2295 journal_end_transaction(hfsmp->jnl);
2296 }
2297 if (grabbed_lock) {
2298 hfs_global_shared_lock_release(hfsmp);
2299 }
2300
2301 return (0);
2302
2303 bad:
2304 if (fcp)
2305 fcp->c_flag &= ~C_RENAME;
2306
2307 // XXXdbg make sure both directories get updated on disk.
2308 if (fdvp != tdvp) {
2309 (void) VOP_UPDATE(fdvp, &tv, &tv, 0);
2310 }
2311 (void) VOP_UPDATE(tdvp, &tv, &tv, 0);
2312
2313 if (tdvp == tvp)
2314 vrele(tdvp);
2315 else
2316 vput(tdvp);
2317 if (tvp)
2318 vput(tvp);
2319
2320 if (fdvp_locked)
2321 vput(fdvp);
2322 else
2323 vrele(fdvp);
2324
2325 if (fvp_locked)
2326 vput(fvp);
2327 else
2328 vrele(fvp);
2329
2330 // XXXdbg
2331 if (started_tr) {
2332 journal_end_transaction(hfsmp->jnl);
2333 }
2334 if (grabbed_lock) {
2335 hfs_global_shared_lock_release(hfsmp);
2336 }
2337
2338 return (retval);
2339
2340 abortop:
2341
2342 VOP_ABORTOP(tdvp, tcnp);
2343 if (tdvp == tvp)
2344 vrele(tdvp);
2345 else
2346 vput(tdvp);
2347 if (tvp)
2348 vput(tvp);
2349 VOP_ABORTOP(fdvp, fcnp);
2350 vrele(fdvp);
2351 vrele(fvp);
2352
2353 return (retval);
2354 }
2355
2356
2357
2358 /*
2359 * Mkdir system call
2360 #% mkdir dvp L U U
2361 #% mkdir vpp - L -
2362 #
2363 vop_mkdir {
2364 IN WILLRELE struct vnode *dvp;
2365 OUT struct vnode **vpp;
2366 IN struct componentname *cnp;
2367 IN struct vattr *vap;
2368
2369 We are responsible for freeing the namei buffer,
2370 it is done in hfs_makenode()
2371 */
2372
2373 static int
2374 hfs_mkdir(ap)
2375 struct vop_mkdir_args /* {
2376 struct vnode *a_dvp;
2377 struct vnode **a_vpp;
2378 struct componentname *a_cnp;
2379 struct vattr *a_vap;
2380 } */ *ap;
2381 {
2382 struct vattr *vap = ap->a_vap;
2383
2384 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
2385 ap->a_dvp, ap->a_vpp, ap->a_cnp));
2386 }
2387
2388
2389 /*
2390 * symlink -- make a symbolic link
2391 #% symlink dvp L U U
2392 #% symlink vpp - U -
2393 #
2394 # XXX - note that the return vnode has already been VRELE'ed
2395 # by the filesystem layer. To use it you must use vget,
2396 # possibly with a further namei.
2397 #
2398 vop_symlink {
2399 IN WILLRELE struct vnode *dvp;
2400 OUT WILLRELE struct vnode **vpp;
2401 IN struct componentname *cnp;
2402 IN struct vattr *vap;
2403 IN char *target;
2404
2405 We are responsible for freeing the namei buffer,
2406 it is done in hfs_makenode().
2407
2408 */
2409
2410 static int
2411 hfs_symlink(ap)
2412 struct vop_symlink_args /* {
2413 struct vnode *a_dvp;
2414 struct vnode **a_vpp;
2415 struct componentname *a_cnp;
2416 struct vattr *a_vap;
2417 char *a_target;
2418 } */ *ap;
2419 {
2420 register struct vnode *vp, **vpp = ap->a_vpp;
2421 struct hfsmount *hfsmp;
2422 struct filefork *fp;
2423 int len, error;
2424 struct buf *bp = NULL;
2425
2426 /* HFS standard disks don't support symbolic links */
2427 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
2428 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2429 vput(ap->a_dvp);
2430 return (EOPNOTSUPP);
2431 }
2432
2433 /* Check for empty target name */
2434 if (ap->a_target[0] == 0) {
2435 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2436 vput(ap->a_dvp);
2437 return (EINVAL);
2438 }
2439
2440
2441 hfsmp = VTOHFS(ap->a_dvp);
2442
2443 /* Create the vnode */
2444 if ((error = hfs_makenode(S_IFLNK | ap->a_vap->va_mode,
2445 ap->a_dvp, vpp, ap->a_cnp))) {
2446 return (error);
2447 }
2448
2449 vp = *vpp;
2450 len = strlen(ap->a_target);
2451 fp = VTOF(vp);
2452 fp->ff_clumpsize = VTOVCB(vp)->blockSize;
2453
2454 // XXXdbg
2455 hfs_global_shared_lock_acquire(hfsmp);
2456 if (hfsmp->jnl) {
2457 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2458 hfs_global_shared_lock_release(hfsmp);
2459 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2460 vput(ap->a_dvp);
2461 return (error);
2462 }
2463 }
2464
2465 /* Allocate space for the link */
2466 error = VOP_TRUNCATE(vp, len, IO_NOZEROFILL,
2467 ap->a_cnp->cn_cred, ap->a_cnp->cn_proc);
2468 if (error)
2469 goto out; /* XXX need to remove link */
2470
2471 /* Write the link to disk */
2472 bp = getblk(vp, 0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size),
2473 0, 0, BLK_META);
2474 if (hfsmp->jnl) {
2475 journal_modify_block_start(hfsmp->jnl, bp);
2476 }
2477 bzero(bp->b_data, bp->b_bufsize);
2478 bcopy(ap->a_target, bp->b_data, len);
2479 if (hfsmp->jnl) {
2480 journal_modify_block_end(hfsmp->jnl, bp);
2481 } else {
2482 bawrite(bp);
2483 }
2484 out:
2485 if (hfsmp->jnl) {
2486 journal_end_transaction(hfsmp->jnl);
2487 }
2488 hfs_global_shared_lock_release(hfsmp);
2489 vput(vp);
2490 return (error);
2491 }
2492
2493
2494 /*
2495 * Dummy dirents to simulate the "." and ".." entries of the directory
2496 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2497 * the size of these entries is the smallest needed to represent them
2498 * (only 12 byte each).
2499 */
2500 static hfsdotentry rootdots[2] = {
2501 {
2502 1, /* d_fileno */
2503 sizeof(struct hfsdotentry), /* d_reclen */
2504 DT_DIR, /* d_type */
2505 1, /* d_namlen */
2506 "." /* d_name */
2507 },
2508 {
2509 1, /* d_fileno */
2510 sizeof(struct hfsdotentry), /* d_reclen */
2511 DT_DIR, /* d_type */
2512 2, /* d_namlen */
2513 ".." /* d_name */
2514 }
2515 };
2516
2517 /* 4.3 Note:
2518 * There is some confusion as to what the semantics of uio_offset are.
2519 * In ufs, it represents the actual byte offset within the directory
2520 * "file." HFS, however, just uses it as an entry counter - essentially
2521 * assuming that it has no meaning except to the hfs_readdir function.
2522 * This approach would be more efficient here, but some callers may
2523 * assume the uio_offset acts like a byte offset. NFS in fact
2524 * monkeys around with the offset field a lot between readdir calls.
2525 *
2526 * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2527 * fields is a mess as well. The libc function readdir() returns
2528 * NULL (indicating the end of a directory) when either
2529 * the getdirentries() syscall (which calls this and returns
2530 * the size of the buffer passed in less the value of uiop->uio_resid)
2531 * returns 0, or a direct record with a d_reclen of zero.
2532 * nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2533 * of the directory by testing uiop->uio_resid == 0. The solution
2534 * is to pad the size of the last struct direct in a given
2535 * block to fill the block if we are not at the end of the directory.
2536 */
2537
2538
2539 /*
2540 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2541 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2542 * "Nbytes must be greater than or equal to the block size associated with the file,
2543 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2544 * room for the . and .. entries that have to added manually.
2545 */
2546
2547 /*
2548 #% readdir vp L L L
2549 #
2550 vop_readdir {
2551 IN struct vnode *vp;
2552 INOUT struct uio *uio;
2553 IN struct ucred *cred;
2554 INOUT int *eofflag;
2555 OUT int *ncookies;
2556 INOUT u_long **cookies;
2557 */
2558 static int
2559 hfs_readdir(ap)
2560 struct vop_readdir_args /* {
2561 struct vnode *vp;
2562 struct uio *uio;
2563 struct ucred *cred;
2564 int *eofflag;
2565 int *ncookies;
2566 u_long **cookies;
2567 } */ *ap;
2568 {
2569 register struct uio *uio = ap->a_uio;
2570 struct cnode *cp = VTOC(ap->a_vp);
2571 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
2572 struct proc *p = current_proc();
2573 off_t off = uio->uio_offset;
2574 int retval = 0;
2575 int eofflag = 0;
2576 void *user_start = NULL;
2577 int user_len;
2578
2579 /* We assume it's all one big buffer... */
2580 if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE)
2581 return EINVAL;
2582
2583 // XXXdbg
2584 // We have to lock the user's buffer here so that we won't
2585 // fault on it after we've acquired a shared lock on the
2586 // catalog file. The issue is that you can get a 3-way
2587 // deadlock if someone else starts a transaction and then
2588 // tries to lock the catalog file but can't because we're
2589 // here and we can't service our page fault because VM is
2590 // blocked trying to start a transaction as a result of
2591 // trying to free up pages for our page fault. It's messy
2592 // but it does happen on dual-procesors that are paging
2593 // heavily (see radar 3082639 for more info). By locking
2594 // the buffer up-front we prevent ourselves from faulting
2595 // while holding the shared catalog file lock.
2596 //
2597 // Fortunately this and hfs_search() are the only two places
2598 // currently (10/30/02) that can fault on user data with a
2599 // shared lock on the catalog file.
2600 //
2601 if (hfsmp->jnl && uio->uio_segflg == UIO_USERSPACE) {
2602 user_start = uio->uio_iov->iov_base;
2603 user_len = uio->uio_iov->iov_len;
2604
2605 if ((retval = vslock(user_start, user_len)) != 0) {
2606 return retval;
2607 }
2608 }
2609
2610
2611 /* Create the entries for . and .. */
2612 if (uio->uio_offset < sizeof(rootdots)) {
2613 caddr_t dep;
2614 size_t dotsize;
2615
2616 rootdots[0].d_fileno = cp->c_cnid;
2617 rootdots[1].d_fileno = cp->c_parentcnid;
2618
2619 if (uio->uio_offset == 0) {
2620 dep = (caddr_t) &rootdots[0];
2621 dotsize = 2* sizeof(struct hfsdotentry);
2622 } else if (uio->uio_offset == sizeof(struct hfsdotentry)) {
2623 dep = (caddr_t) &rootdots[1];
2624 dotsize = sizeof(struct hfsdotentry);
2625 } else {
2626 retval = EINVAL;
2627 goto Exit;
2628 }
2629
2630 retval = uiomove(dep, dotsize, uio);
2631 if (retval != 0)
2632 goto Exit;
2633 }
2634
2635 /* If there are no children then we're done */
2636 if (cp->c_entries == 0) {
2637 eofflag = 1;
2638 retval = 0;
2639 goto Exit;
2640 }
2641
2642 /* Lock catalog b-tree */
2643 retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
2644 if (retval) goto Exit;
2645
2646 retval = cat_getdirentries(hfsmp, &cp->c_desc, uio, &eofflag);
2647
2648 /* Unlock catalog b-tree */
2649 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2650
2651 if (retval != E_NONE) {
2652 goto Exit;
2653 }
2654
2655 /* were we already past eof ? */
2656 if (uio->uio_offset == off) {
2657 retval = E_NONE;
2658 goto Exit;
2659 }
2660
2661 cp->c_flag |= C_ACCESS;
2662 /* Bake any cookies */
2663 if (!retval && ap->a_ncookies != NULL) {
2664 struct dirent* dpStart;
2665 struct dirent* dpEnd;
2666 struct dirent* dp;
2667 int ncookies;
2668 u_long *cookies;
2669 u_long *cookiep;
2670
2671 /*
2672 * Only the NFS server uses cookies, and it loads the
2673 * directory block into system space, so we can just look at
2674 * it directly.
2675 */
2676 if (uio->uio_segflg != UIO_SYSSPACE)
2677 panic("hfs_readdir: unexpected uio from NFS server");
2678 dpStart = (struct dirent *)(uio->uio_iov->iov_base - (uio->uio_offset - off));
2679 dpEnd = (struct dirent *) uio->uio_iov->iov_base;
2680 for (dp = dpStart, ncookies = 0;
2681 dp < dpEnd && dp->d_reclen != 0;
2682 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen))
2683 ncookies++;
2684 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK);
2685 for (dp = dpStart, cookiep = cookies;
2686 dp < dpEnd;
2687 dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) {
2688 off += dp->d_reclen;
2689 *cookiep++ = (u_long) off;
2690 }
2691 *ap->a_ncookies = ncookies;
2692 *ap->a_cookies = cookies;
2693 }
2694
2695 Exit:;
2696 if (hfsmp->jnl && user_start) {
2697 vsunlock(user_start, user_len, TRUE);
2698 }
2699
2700 if (ap->a_eofflag)
2701 *ap->a_eofflag = eofflag;
2702
2703 return (retval);
2704 }
2705
2706
2707 /*
2708 * Return target name of a symbolic link
2709 #% readlink vp L L L
2710 #
2711 vop_readlink {
2712 IN struct vnode *vp;
2713 INOUT struct uio *uio;
2714 IN struct ucred *cred;
2715 */
2716
2717 static int
2718 hfs_readlink(ap)
2719 struct vop_readlink_args /* {
2720 struct vnode *a_vp;
2721 struct uio *a_uio;
2722 struct ucred *a_cred;
2723 } */ *ap;
2724 {
2725 int retval;
2726 struct vnode *vp = ap->a_vp;
2727 struct cnode *cp;
2728 struct filefork *fp;
2729
2730 if (vp->v_type != VLNK)
2731 return (EINVAL);
2732
2733 cp = VTOC(vp);
2734 fp = VTOF(vp);
2735
2736 /* Zero length sym links are not allowed */
2737 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
2738 VTOVCB(vp)->vcbFlags |= kHFS_DamagedVolume;
2739 return (EINVAL);
2740 }
2741
2742 /* Cache the path so we don't waste buffer cache resources */
2743 if (fp->ff_symlinkptr == NULL) {
2744 struct buf *bp = NULL;
2745
2746 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
2747 retval = meta_bread(vp, 0,
2748 roundup((int)fp->ff_size,
2749 VTOHFS(vp)->hfs_phys_block_size),
2750 ap->a_cred, &bp);
2751 if (retval) {
2752 if (bp)
2753 brelse(bp);
2754 if (fp->ff_symlinkptr) {
2755 FREE(fp->ff_symlinkptr, M_TEMP);
2756 fp->ff_symlinkptr = NULL;
2757 }
2758 return (retval);
2759 }
2760 bcopy(bp->b_data, fp->ff_symlinkptr, (size_t)fp->ff_size);
2761 if (bp) {
2762 if (VTOHFS(vp)->jnl && (bp->b_flags & B_LOCKED) == 0) {
2763 bp->b_flags |= B_INVAL; /* data no longer needed */
2764 }
2765 brelse(bp);
2766 }
2767 }
2768 retval = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
2769
2770 return (retval);
2771 }
2772
2773
2774 /*
2775 * hfs abort op, called after namei() when a CREATE/DELETE isn't actually
2776 * done. If a buffer has been saved in anticipation of a CREATE, delete it.
2777 #% abortop dvp = = =
2778 #
2779 vop_abortop {
2780 IN struct vnode *dvp;
2781 IN struct componentname *cnp;
2782
2783 */
2784
2785 /* ARGSUSED */
2786
2787 static int
2788 hfs_abortop(ap)
2789 struct vop_abortop_args /* {
2790 struct vnode *a_dvp;
2791 struct componentname *a_cnp;
2792 } */ *ap;
2793 {
2794 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
2795 FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
2796
2797 return (0);
2798 }
2799
2800
2801 /*
2802 * Lock an cnode. If its already locked, set the WANT bit and sleep.
2803 #% lock vp U L U
2804 #
2805 vop_lock {
2806 IN struct vnode *vp;
2807 IN int flags;
2808 IN struct proc *p;
2809 */
2810
2811 static int
2812 hfs_lock(ap)
2813 struct vop_lock_args /* {
2814 struct vnode *a_vp;
2815 int a_flags;
2816 struct proc *a_p;
2817 } */ *ap;
2818 {
2819 struct vnode *vp = ap->a_vp;
2820 struct cnode *cp = VTOC(vp);
2821
2822 if (cp == NULL)
2823 panic("hfs_lock: cnode in vnode is null\n");
2824
2825 return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
2826 }
2827
2828 /*
2829 * Unlock an cnode.
2830 #% unlock vp L U L
2831 #
2832 vop_unlock {
2833 IN struct vnode *vp;
2834 IN int flags;
2835 IN struct proc *p;
2836
2837 */
2838 static int
2839 hfs_unlock(ap)
2840 struct vop_unlock_args /* {
2841 struct vnode *a_vp;
2842 int a_flags;
2843 struct proc *a_p;
2844 } */ *ap;
2845 {
2846 struct vnode *vp = ap->a_vp;
2847 struct cnode *cp = VTOC(vp);
2848
2849 if (cp == NULL)
2850 panic("hfs_unlock: cnode in vnode is null\n");
2851
2852 return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE,
2853 &vp->v_interlock, ap->a_p));
2854 }
2855
2856
2857 /*
2858 * Print out the contents of a cnode.
2859 #% print vp = = =
2860 #
2861 vop_print {
2862 IN struct vnode *vp;
2863 */
2864 static int
2865 hfs_print(ap)
2866 struct vop_print_args /* {
2867 struct vnode *a_vp;
2868 } */ *ap;
2869 {
2870 struct vnode * vp = ap->a_vp;
2871 struct cnode *cp = VTOC(vp);
2872
2873 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp->c_cnid,
2874 major(cp->c_dev), minor(cp->c_dev));
2875 #if FIFO
2876 if (vp->v_type == VFIFO)
2877 fifo_printinfo(vp);
2878 #endif /* FIFO */
2879 lockmgr_printinfo(&cp->c_lock);
2880 printf("\n");
2881 return (0);
2882 }
2883
2884
2885 /*
2886 * Check for a locked cnode.
2887 #% islocked vp = = =
2888 #
2889 vop_islocked {
2890 IN struct vnode *vp;
2891
2892 */
2893 static int
2894 hfs_islocked(ap)
2895 struct vop_islocked_args /* {
2896 struct vnode *a_vp;
2897 } */ *ap;
2898 {
2899 return (lockstatus(&VTOC(ap->a_vp)->c_lock));
2900 }
2901
2902 /*
2903
2904 #% pathconf vp L L L
2905 #
2906 vop_pathconf {
2907 IN struct vnode *vp;
2908 IN int name;
2909 OUT register_t *retval;
2910
2911 */
2912 static int
2913 hfs_pathconf(ap)
2914 struct vop_pathconf_args /* {
2915 struct vnode *a_vp;
2916 int a_name;
2917 int *a_retval;
2918 } */ *ap;
2919 {
2920 int retval = 0;
2921
2922 switch (ap->a_name) {
2923 case _PC_LINK_MAX:
2924 if (VTOVCB(ap->a_vp)->vcbSigWord == kHFSPlusSigWord)
2925 *ap->a_retval = HFS_LINK_MAX;
2926 else
2927 *ap->a_retval = 1;
2928 break;
2929 case _PC_NAME_MAX:
2930 *ap->a_retval = kHFSPlusMaxFileNameBytes; /* max # of characters x max utf8 representation */
2931 break;
2932 case _PC_PATH_MAX:
2933 *ap->a_retval = PATH_MAX; /* 1024 */
2934 break;
2935 case _PC_CHOWN_RESTRICTED:
2936 *ap->a_retval = 1;
2937 break;
2938 case _PC_NO_TRUNC:
2939 *ap->a_retval = 0;
2940 break;
2941 case _PC_NAME_CHARS_MAX:
2942 *ap->a_retval = kHFSPlusMaxFileNameChars;
2943 break;
2944 case _PC_CASE_SENSITIVE:
2945 *ap->a_retval = 0;
2946 break;
2947 case _PC_CASE_PRESERVING:
2948 *ap->a_retval = 1;
2949 break;
2950 default:
2951 retval = EINVAL;
2952 }
2953
2954 return (retval);
2955 }
2956
2957
2958 /*
2959 * Advisory record locking support
2960 #% advlock vp U U U
2961 #
2962 vop_advlock {
2963 IN struct vnode *vp;
2964 IN caddr_t id;
2965 IN int op;
2966 IN struct flock *fl;
2967 IN int flags;
2968
2969 */
2970 static int
2971 hfs_advlock(ap)
2972 struct vop_advlock_args /* {
2973 struct vnode *a_vp;
2974 caddr_t a_id;
2975 int a_op;
2976 struct flock *a_fl;
2977 int a_flags;
2978 } */ *ap;
2979 {
2980 struct vnode *vp = ap->a_vp;
2981 struct flock *fl = ap->a_fl;
2982 struct hfslockf *lock;
2983 struct filefork *fork;
2984 off_t start, end;
2985 int retval;
2986
2987 /* Only regular files can have locks */
2988 if (vp->v_type != VREG)
2989 return (EISDIR);
2990
2991 fork = VTOF(ap->a_vp);
2992 /*
2993 * Avoid the common case of unlocking when cnode has no locks.
2994 */
2995 if (fork->ff_lockf == (struct hfslockf *)0) {
2996 if (ap->a_op != F_SETLK) {
2997 fl->l_type = F_UNLCK;
2998 return (0);
2999 }
3000 }
3001 /*
3002 * Convert the flock structure into a start and end.
3003 */
3004 start = 0;
3005 switch (fl->l_whence) {
3006 case SEEK_SET:
3007 case SEEK_CUR:
3008 /*
3009 * Caller is responsible for adding any necessary offset
3010 * when SEEK_CUR is used.
3011 */
3012 start = fl->l_start;
3013 break;
3014 case SEEK_END:
3015 start = fork->ff_size + fl->l_start;
3016 break;
3017 default:
3018 return (EINVAL);
3019 }
3020
3021 if (start < 0)
3022 return (EINVAL);
3023 if (fl->l_len == 0)
3024 end = -1;
3025 else
3026 end = start + fl->l_len - 1;
3027
3028 /*
3029 * Create the hfslockf structure
3030 */
3031 MALLOC(lock, struct hfslockf *, sizeof *lock, M_LOCKF, M_WAITOK);
3032 lock->lf_start = start;
3033 lock->lf_end = end;
3034 lock->lf_id = ap->a_id;
3035 lock->lf_fork = fork;
3036 lock->lf_type = fl->l_type;
3037 lock->lf_next = (struct hfslockf *)0;
3038 TAILQ_INIT(&lock->lf_blkhd);
3039 lock->lf_flags = ap->a_flags;
3040 /*
3041 * Do the requested operation.
3042 */
3043 switch(ap->a_op) {
3044 case F_SETLK:
3045 retval = hfs_setlock(lock);
3046 break;
3047 case F_UNLCK:
3048 retval = hfs_clearlock(lock);
3049 FREE(lock, M_LOCKF);
3050 break;
3051 case F_GETLK:
3052 retval = hfs_getlock(lock, fl);
3053 FREE(lock, M_LOCKF);
3054 break;
3055 default:
3056 retval = EINVAL;
3057 _FREE(lock, M_LOCKF);
3058 break;
3059 }
3060
3061 return (retval);
3062 }
3063
3064
3065
3066 /*
3067 * Update the access, modified, and node change times as specified
3068 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3069 * C_MODIFIED flag is used to specify that the node needs to be
3070 * updated but that the times have already been set. The access and
3071 * modified times are input parameters but the node change time is
3072 * always taken from the current time. If waitfor is set, then wait
3073 * for the disk write of the node to complete.
3074 */
3075 /*
3076 #% update vp L L L
3077 IN struct vnode *vp;
3078 IN struct timeval *access;
3079 IN struct timeval *modify;
3080 IN int waitfor;
3081 */
3082 static int
3083 hfs_update(ap)
3084 struct vop_update_args /* {
3085 struct vnode *a_vp;
3086 struct timeval *a_access;
3087 struct timeval *a_modify;
3088 int a_waitfor;
3089 } */ *ap;
3090 {
3091 struct vnode *vp = ap->a_vp;
3092 struct cnode *cp = VTOC(ap->a_vp);
3093 struct proc *p;
3094 struct cat_fork *dataforkp = NULL;
3095 struct cat_fork *rsrcforkp = NULL;
3096 struct cat_fork datafork;
3097 int updateflag;
3098 struct hfsmount *hfsmp;
3099 int error;
3100
3101 hfsmp = VTOHFS(vp);
3102
3103 /* XXX do we really want to clear the sytem cnode flags here???? */
3104 if ((vp->v_flag & VSYSTEM) ||
3105 (VTOVFS(vp)->mnt_flag & MNT_RDONLY) ||
3106 (cp->c_mode == 0)) {
3107 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3108 return (0);
3109 }
3110
3111 updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3112
3113 /* Nothing to update. */
3114 if (updateflag == 0) {
3115 return (0);
3116 }
3117 /* HFS standard doesn't have access times. */
3118 if ((updateflag == C_ACCESS) && (VTOVCB(vp)->vcbSigWord == kHFSSigWord)) {
3119 return (0);
3120 }
3121 if (updateflag & C_ACCESS) {
3122 /*
3123 * If only the access time is changing then defer
3124 * updating it on-disk util later (in hfs_inactive).
3125 * If it was recently updated then skip the update.
3126 */
3127 if (updateflag == C_ACCESS) {
3128 cp->c_flag &= ~C_ACCESS;
3129
3130 /* Its going to disk or its sufficiently newer... */
3131 if ((cp->c_flag & C_ATIMEMOD) ||
3132 (ap->a_access->tv_sec > (cp->c_atime + ATIME_ACCURACY))) {
3133 cp->c_atime = ap->a_access->tv_sec;
3134 cp->c_flag |= C_ATIMEMOD;
3135 }
3136 return (0);
3137 } else {
3138 cp->c_atime = ap->a_access->tv_sec;
3139 }
3140 }
3141 if (updateflag & C_UPDATE) {
3142 cp->c_mtime = ap->a_modify->tv_sec;
3143 cp->c_mtime_nsec = ap->a_modify->tv_usec * 1000;
3144 }
3145 if (updateflag & C_CHANGE) {
3146 cp->c_ctime = time.tv_sec;
3147 /*
3148 * HFS dates that WE set must be adjusted for DST
3149 */
3150 if ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3151 cp->c_ctime += 3600;
3152 cp->c_mtime = cp->c_ctime;
3153 }
3154 }
3155
3156 if (cp->c_datafork)
3157 dataforkp = &cp->c_datafork->ff_data;
3158 if (cp->c_rsrcfork)
3159 rsrcforkp = &cp->c_rsrcfork->ff_data;
3160
3161 p = current_proc();
3162
3163 /*
3164 * For delayed allocations updates are
3165 * postponed until an fsync or the file
3166 * gets written to disk.
3167 *
3168 * Deleted files can defer meta data updates until inactive.
3169 */
3170 if (ISSET(cp->c_flag, C_DELETED) ||
3171 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3172 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks)) {
3173 if (updateflag & (C_CHANGE | C_UPDATE))
3174 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3175 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3176 cp->c_flag |= C_MODIFIED;
3177
3178 return (0);
3179 }
3180
3181
3182 // XXXdbg
3183 hfs_global_shared_lock_acquire(hfsmp);
3184 if (hfsmp->jnl) {
3185 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3186 hfs_global_shared_lock_release(hfsmp);
3187 return error;
3188 }
3189 }
3190
3191
3192 /*
3193 * For files with invalid ranges (holes) the on-disk
3194 * field representing the size of the file (cf_size)
3195 * must be no larger than the start of the first hole.
3196 */
3197 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3198 bcopy(dataforkp, &datafork, sizeof(datafork));
3199 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3200 dataforkp = &datafork;
3201 }
3202
3203 /*
3204 * Lock the Catalog b-tree file.
3205 * A shared lock is sufficient since an update doesn't change
3206 * the tree and the lock on vp protects the cnode.
3207 */
3208 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3209 if (error) {
3210 if (hfsmp->jnl) {
3211 journal_end_transaction(hfsmp->jnl);
3212 }
3213 hfs_global_shared_lock_release(hfsmp);
3214 return (error);
3215 }
3216
3217 /* XXX - waitfor is not enforced */
3218 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3219
3220 /* Unlock the Catalog b-tree file. */
3221 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3222
3223 if (updateflag & (C_CHANGE | C_UPDATE))
3224 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3225
3226 // XXXdbg
3227 if (hfsmp->jnl) {
3228 journal_end_transaction(hfsmp->jnl);
3229 }
3230 hfs_global_shared_lock_release(hfsmp);
3231
3232 /* After the updates are finished, clear the flags */
3233 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_ATIMEMOD);
3234
3235 return (error);
3236 }
3237
3238 /*
3239 * Allocate a new node
3240 *
3241 * Upon leaving, namei buffer must be freed.
3242 *
3243 */
3244 static int
3245 hfs_makenode(mode, dvp, vpp, cnp)
3246 int mode;
3247 struct vnode *dvp;
3248 struct vnode **vpp;
3249 struct componentname *cnp;
3250 {
3251 struct cnode *cp;
3252 struct cnode *dcp;
3253 struct vnode *tvp;
3254 struct hfsmount *hfsmp;
3255 struct timeval tv;
3256 struct proc *p;
3257 struct cat_desc in_desc, out_desc;
3258 struct cat_attr attr;
3259 int error, started_tr = 0, grabbed_lock = 0;
3260 enum vtype vnodetype;
3261
3262 p = cnp->cn_proc;
3263 dcp = VTOC(dvp);
3264 hfsmp = VTOHFS(dvp);
3265 *vpp = NULL;
3266 tvp = NULL;
3267 bzero(&out_desc, sizeof(out_desc));
3268
3269 if ((mode & S_IFMT) == 0)
3270 mode |= S_IFREG;
3271 vnodetype = IFTOVT(mode);
3272
3273 /* Check if unmount in progress */
3274 if (VTOVFS(dvp)->mnt_kern_flag & MNTK_UNMOUNT) {
3275 error = EPERM;
3276 goto exit;
3277 }
3278 /* Check if were out of usable disk space. */
3279 if ((suser(cnp->cn_cred, NULL) != 0) && (hfs_freeblks(hfsmp, 1) <= 0)) {
3280 error = ENOSPC;
3281 goto exit;
3282 }
3283
3284 /* Setup the default attributes */
3285 bzero(&attr, sizeof(attr));
3286 attr.ca_mode = mode;
3287 attr.ca_nlink = vnodetype == VDIR ? 2 : 1;
3288 attr.ca_mtime = time.tv_sec;
3289 attr.ca_mtime_nsec = time.tv_usec * 1000;
3290 if ((VTOVCB(dvp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3291 attr.ca_mtime += 3600; /* Same as what hfs_update does */
3292 }
3293 attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime;
3294 if (VTOVFS(dvp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
3295 attr.ca_uid = hfsmp->hfs_uid;
3296 attr.ca_gid = hfsmp->hfs_gid;
3297 } else {
3298 if (vnodetype == VLNK)
3299 attr.ca_uid = dcp->c_uid;
3300 else
3301 attr.ca_uid = cnp->cn_cred->cr_uid;
3302 attr.ca_gid = dcp->c_gid;
3303 }
3304 /*
3305 * Don't tag as a special file (BLK or CHR) until *after*
3306 * hfs_getnewvnode is called. This insures that any
3307 * alias checking is defered until hfs_mknod completes.
3308 */
3309 if (vnodetype == VBLK || vnodetype == VCHR)
3310 attr.ca_mode = (attr.ca_mode & ~S_IFMT) | S_IFREG;
3311
3312 /* Tag symlinks with a type and creator. */
3313 if (vnodetype == VLNK) {
3314 struct FndrFileInfo *fip;
3315
3316 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3317 fip->fdType = SWAP_BE32(kSymLinkFileType);
3318 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3319 }
3320 if ((attr.ca_mode & S_ISGID) &&
3321 !groupmember(dcp->c_gid, cnp->cn_cred) &&
3322 suser(cnp->cn_cred, NULL)) {
3323 attr.ca_mode &= ~S_ISGID;
3324 }
3325 if (cnp->cn_flags & ISWHITEOUT)
3326 attr.ca_flags |= UF_OPAQUE;
3327
3328 /* Setup the descriptor */
3329 bzero(&in_desc, sizeof(in_desc));
3330 in_desc.cd_nameptr = cnp->cn_nameptr;
3331 in_desc.cd_namelen = cnp->cn_namelen;
3332 in_desc.cd_parentcnid = dcp->c_cnid;
3333 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3334
3335 // XXXdbg
3336 hfs_global_shared_lock_acquire(hfsmp);
3337 grabbed_lock = 1;
3338 if (hfsmp->jnl) {
3339 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3340 goto exit;
3341 }
3342 started_tr = 1;
3343 }
3344
3345 /* Lock catalog b-tree */
3346 error = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p);
3347 if (error)
3348 goto exit;
3349
3350 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3351
3352 /* Unlock catalog b-tree */
3353 (void) hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_RELEASE, p);
3354 if (error)
3355 goto exit;
3356
3357 /* Update the parent directory */
3358 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3359 dcp->c_nlink++;
3360 dcp->c_entries++;
3361 dcp->c_flag |= C_CHANGE | C_UPDATE;
3362 tv = time;
3363 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
3364
3365 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3366 (dcp->c_cnid == kHFSRootFolderID));
3367
3368 // XXXdbg
3369 // have to end the transaction here before we call hfs_getnewvnode()
3370 // because that can cause us to try and reclaim a vnode on a different
3371 // file system which could cause us to start a transaction which can
3372 // deadlock with someone on that other file system (since we could be
3373 // holding two transaction locks as well as various vnodes and we did
3374 // not obtain the locks on them in the proper order).
3375 //
3376 // NOTE: this means that if the quota check fails or we have to update
3377 // the change time on a block-special device that those changes
3378 // will happen as part of independent transactions.
3379 //
3380 if (started_tr) {
3381 journal_end_transaction(hfsmp->jnl);
3382 started_tr = 0;
3383 }
3384 if (grabbed_lock) {
3385 hfs_global_shared_lock_release(hfsmp);
3386 grabbed_lock = 0;
3387 }
3388
3389 /* Create a vnode for the object just created: */
3390 error = hfs_getnewvnode(hfsmp, NULL, &out_desc, 0, &attr, NULL, &tvp);
3391 if (error)
3392 goto exit;
3393
3394
3395 #if QUOTA
3396 cp = VTOC(tvp);
3397 /*
3398 * We call hfs_chkiq with FORCE flag so that if we
3399 * fall through to the rmdir we actually have
3400 * accounted for the inode
3401 */
3402 if ((error = hfs_getinoquota(cp)) ||
3403 (error = hfs_chkiq(cp, 1, cnp->cn_cred, FORCE))) {
3404 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) {
3405 FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
3406 }
3407 if (tvp->v_type == VDIR)
3408 VOP_RMDIR(dvp,tvp, cnp);
3409 else
3410 VOP_REMOVE(dvp,tvp, cnp);
3411
3412 return (error);
3413 }
3414 #endif /* QUOTA */
3415
3416 /*
3417 * restore vtype and mode for VBLK and VCHR
3418 */
3419 if (vnodetype == VBLK || vnodetype == VCHR) {
3420 struct cnode *cp;
3421
3422 cp = VTOC(tvp);
3423 cp->c_mode = mode;
3424 tvp->v_type = IFTOVT(mode);
3425 cp->c_flag |= C_CHANGE;
3426 tv = time;
3427 if ((error = VOP_UPDATE(tvp, &tv, &tv, 1))) {
3428 vput(tvp);
3429 goto exit;
3430 }
3431 }
3432
3433 *vpp = tvp;
3434 exit:
3435 cat_releasedesc(&out_desc);
3436
3437 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
3438 FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
3439 vput(dvp);
3440
3441 // XXXdbg
3442 if (started_tr) {
3443 journal_end_transaction(hfsmp->jnl);
3444 started_tr = 0;
3445 }
3446 if (grabbed_lock) {
3447 hfs_global_shared_lock_release(hfsmp);
3448 grabbed_lock = 0;
3449 }
3450
3451 return (error);
3452 }
3453
3454
3455 static int
3456 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, struct proc *p)
3457 {
3458 struct vnode *rvp;
3459 struct cnode *cp = VTOC(vp);
3460 int error;
3461
3462 if ((rvp = cp->c_rsrc_vp)) {
3463 /* Use exising vnode */
3464 error = vget(rvp, 0, p);
3465 if (error) {
3466 char * name = VTOC(vp)->c_desc.cd_nameptr;
3467
3468 if (name)
3469 printf("hfs_vgetrsrc: couldn't get"
3470 " resource fork for %s\n", name);
3471 return (error);
3472 }
3473 } else {
3474 struct cat_fork rsrcfork;
3475
3476 /* Lock catalog b-tree */
3477 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3478 if (error)
3479 return (error);
3480
3481 /* Get resource fork data */
3482 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3483 (struct cat_attr *)0, &rsrcfork);
3484
3485 /* Unlock the Catalog */
3486 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3487 if (error)
3488 return (error);
3489
3490 error = hfs_getnewvnode(hfsmp, cp, &cp->c_desc, 1, &cp->c_attr,
3491 &rsrcfork, &rvp);
3492 if (error)
3493 return (error);
3494 }
3495
3496 *rvpp = rvp;
3497 return (0);
3498 }
3499
3500
3501 /*
3502 * Wrapper for special device reads
3503 */
3504 static int
3505 hfsspec_read(ap)
3506 struct vop_read_args /* {
3507 struct vnode *a_vp;
3508 struct uio *a_uio;
3509 int a_ioflag;
3510 struct ucred *a_cred;
3511 } */ *ap;
3512 {
3513 /*
3514 * Set access flag.
3515 */
3516 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3517 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap));
3518 }
3519
3520 /*
3521 * Wrapper for special device writes
3522 */
3523 static int
3524 hfsspec_write(ap)
3525 struct vop_write_args /* {
3526 struct vnode *a_vp;
3527 struct uio *a_uio;
3528 int a_ioflag;
3529 struct ucred *a_cred;
3530 } */ *ap;
3531 {
3532 /*
3533 * Set update and change flags.
3534 */
3535 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3536 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap));
3537 }
3538
3539 /*
3540 * Wrapper for special device close
3541 *
3542 * Update the times on the cnode then do device close.
3543 */
3544 static int
3545 hfsspec_close(ap)
3546 struct vop_close_args /* {
3547 struct vnode *a_vp;
3548 int a_fflag;
3549 struct ucred *a_cred;
3550 struct proc *a_p;
3551 } */ *ap;
3552 {
3553 struct vnode *vp = ap->a_vp;
3554 struct cnode *cp = VTOC(vp);
3555
3556 simple_lock(&vp->v_interlock);
3557 if (ap->a_vp->v_usecount > 1)
3558 CTIMES(cp, &time, &time);
3559 simple_unlock(&vp->v_interlock);
3560 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
3561 }
3562
3563 #if FIFO
3564 /*
3565 * Wrapper for fifo reads
3566 */
3567 static int
3568 hfsfifo_read(ap)
3569 struct vop_read_args /* {
3570 struct vnode *a_vp;
3571 struct uio *a_uio;
3572 int a_ioflag;
3573 struct ucred *a_cred;
3574 } */ *ap;
3575 {
3576 extern int (**fifo_vnodeop_p)(void *);
3577
3578 /*
3579 * Set access flag.
3580 */
3581 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3582 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap));
3583 }
3584
3585 /*
3586 * Wrapper for fifo writes
3587 */
3588 static int
3589 hfsfifo_write(ap)
3590 struct vop_write_args /* {
3591 struct vnode *a_vp;
3592 struct uio *a_uio;
3593 int a_ioflag;
3594 struct ucred *a_cred;
3595 } */ *ap;
3596 {
3597 extern int (**fifo_vnodeop_p)(void *);
3598
3599 /*
3600 * Set update and change flags.
3601 */
3602 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3603 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap));
3604 }
3605
3606 /*
3607 * Wrapper for fifo close
3608 *
3609 * Update the times on the cnode then do device close.
3610 */
3611 static int
3612 hfsfifo_close(ap)
3613 struct vop_close_args /* {
3614 struct vnode *a_vp;
3615 int a_fflag;
3616 struct ucred *a_cred;
3617 struct proc *a_p;
3618 } */ *ap;
3619 {
3620 extern int (**fifo_vnodeop_p)(void *);
3621 struct vnode *vp = ap->a_vp;
3622 struct cnode *cp = VTOC(vp);
3623
3624 simple_lock(&vp->v_interlock);
3625 if (ap->a_vp->v_usecount > 1)
3626 CTIMES(cp, &time, &time);
3627 simple_unlock(&vp->v_interlock);
3628 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
3629 }
3630 #endif /* FIFO */
3631
3632
3633 /*****************************************************************************
3634 *
3635 * VOP Tables
3636 *
3637 *****************************************************************************/
3638 int hfs_cache_lookup(); /* in hfs_lookup.c */
3639 int hfs_lookup(); /* in hfs_lookup.c */
3640 int hfs_read(); /* in hfs_readwrite.c */
3641 int hfs_write(); /* in hfs_readwrite.c */
3642 int hfs_ioctl(); /* in hfs_readwrite.c */
3643 int hfs_select(); /* in hfs_readwrite.c */
3644 int hfs_bmap(); /* in hfs_readwrite.c */
3645 int hfs_strategy(); /* in hfs_readwrite.c */
3646 int hfs_truncate(); /* in hfs_readwrite.c */
3647 int hfs_allocate(); /* in hfs_readwrite.c */
3648 int hfs_pagein(); /* in hfs_readwrite.c */
3649 int hfs_pageout(); /* in hfs_readwrite.c */
3650 int hfs_search(); /* in hfs_search.c */
3651 int hfs_bwrite(); /* in hfs_readwrite.c */
3652 int hfs_link(); /* in hfs_link.c */
3653 int hfs_blktooff(); /* in hfs_readwrite.c */
3654 int hfs_offtoblk(); /* in hfs_readwrite.c */
3655 int hfs_cmap(); /* in hfs_readwrite.c */
3656 int hfs_getattrlist(); /* in hfs_attrlist.c */
3657 int hfs_setattrlist(); /* in hfs_attrlist.c */
3658 int hfs_readdirattr(); /* in hfs_attrlist.c */
3659 int hfs_inactive(); /* in hfs_cnode.c */
3660 int hfs_reclaim(); /* in hfs_cnode.c */
3661
3662 int (**hfs_vnodeop_p)(void *);
3663
3664 #define VOPFUNC int (*)(void *)
3665
3666 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
3667 { &vop_default_desc, (VOPFUNC)vn_default_error },
3668 { &vop_lookup_desc, (VOPFUNC)hfs_cache_lookup }, /* lookup */
3669 { &vop_create_desc, (VOPFUNC)hfs_create }, /* create */
3670 { &vop_mknod_desc, (VOPFUNC)hfs_mknod }, /* mknod */
3671 { &vop_open_desc, (VOPFUNC)hfs_open }, /* open */
3672 { &vop_close_desc, (VOPFUNC)hfs_close }, /* close */
3673 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3674 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3675 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3676 { &vop_read_desc, (VOPFUNC)hfs_read }, /* read */
3677 { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */
3678 { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */
3679 { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */
3680 { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */
3681 { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
3682 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3683 { &vop_seek_desc, (VOPFUNC)nop_seek }, /* seek */
3684 { &vop_remove_desc, (VOPFUNC)hfs_remove }, /* remove */
3685 { &vop_link_desc, (VOPFUNC)hfs_link }, /* link */
3686 { &vop_rename_desc, (VOPFUNC)hfs_rename }, /* rename */
3687 { &vop_mkdir_desc, (VOPFUNC)hfs_mkdir }, /* mkdir */
3688 { &vop_rmdir_desc, (VOPFUNC)hfs_rmdir }, /* rmdir */
3689 { &vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex }, /* mkcomplex */
3690 { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, /* getattrlist */
3691 { &vop_setattrlist_desc, (VOPFUNC)hfs_setattrlist }, /* setattrlist */
3692 { &vop_symlink_desc, (VOPFUNC)hfs_symlink }, /* symlink */
3693 { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */
3694 { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */
3695 { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */
3696 { &vop_abortop_desc, (VOPFUNC)hfs_abortop }, /* abortop */
3697 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3698 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3699 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3700 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3701 { &vop_bmap_desc, (VOPFUNC)hfs_bmap }, /* bmap */
3702 { &vop_strategy_desc, (VOPFUNC)hfs_strategy }, /* strategy */
3703 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3704 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3705 { &vop_pathconf_desc, (VOPFUNC)hfs_pathconf }, /* pathconf */
3706 { &vop_advlock_desc, (VOPFUNC)hfs_advlock }, /* advlock */
3707 { &vop_reallocblks_desc, (VOPFUNC)err_reallocblks }, /* reallocblks */
3708 { &vop_truncate_desc, (VOPFUNC)hfs_truncate }, /* truncate */
3709 { &vop_allocate_desc, (VOPFUNC)hfs_allocate }, /* allocate */
3710 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3711 { &vop_searchfs_desc, (VOPFUNC)hfs_search }, /* search fs */
3712 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, /* bwrite */
3713 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* pagein */
3714 { &vop_pageout_desc,(VOPFUNC) hfs_pageout }, /* pageout */
3715 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3716 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3717 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3718 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
3719 { NULL, (VOPFUNC)NULL }
3720 };
3721
3722 struct vnodeopv_desc hfs_vnodeop_opv_desc =
3723 { &hfs_vnodeop_p, hfs_vnodeop_entries };
3724
3725 int (**hfs_specop_p)(void *);
3726 struct vnodeopv_entry_desc hfs_specop_entries[] = {
3727 { &vop_default_desc, (VOPFUNC)vn_default_error },
3728 { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
3729 { &vop_create_desc, (VOPFUNC)spec_create }, /* create */
3730 { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
3731 { &vop_open_desc, (VOPFUNC)spec_open }, /* open */
3732 { &vop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
3733 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3734 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3735 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3736 { &vop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
3737 { &vop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
3738 { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */
3739 { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
3740 { &vop_select_desc, (VOPFUNC)spec_select }, /* select */
3741 { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
3742 { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
3743 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3744 { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */
3745 { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
3746 { &vop_link_desc, (VOPFUNC)spec_link }, /* link */
3747 { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
3748 { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
3749 { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
3750 { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
3751 { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
3752 { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
3753 { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */
3754 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3755 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3756 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3757 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3758 { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */
3759 { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
3760 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3761 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3762 { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
3763 { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */
3764 { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */
3765 { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */
3766 { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */
3767 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
3768 { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */
3769 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3770 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
3771 { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */
3772 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
3773 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
3774 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3775 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3776 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3777 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
3778 };
3779 struct vnodeopv_desc hfs_specop_opv_desc =
3780 { &hfs_specop_p, hfs_specop_entries };
3781
3782 #if FIFO
3783 int (**hfs_fifoop_p)(void *);
3784 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
3785 { &vop_default_desc, (VOPFUNC)vn_default_error },
3786 { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
3787 { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */
3788 { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
3789 { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */
3790 { &vop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
3791 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3792 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3793 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3794 { &vop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
3795 { &vop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
3796 { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */
3797 { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
3798 { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */
3799 { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
3800 { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
3801 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3802 { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */
3803 { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
3804 { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */
3805 { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
3806 { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
3807 { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
3808 { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
3809 { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
3810 { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
3811 { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */
3812 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3813 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3814 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3815 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3816 { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */
3817 { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
3818 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3819 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3820 { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
3821 { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */
3822 { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */
3823 { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */
3824 { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */
3825 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
3826 { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */
3827 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3828 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
3829 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
3830 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
3831 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3832 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3833 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3834 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
3835 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
3836 };
3837 struct vnodeopv_desc hfs_fifoop_opv_desc =
3838 { &hfs_fifoop_p, hfs_fifoop_entries };
3839 #endif /* FIFO */
3840
3841
3842