]> git.saurik.com Git - apple/xnu.git/blame_incremental - bsd/hfs/hfs_vnops.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25
26#include <sys/systm.h>
27#include <sys/kernel.h>
28#include <sys/file.h>
29#include <sys/dirent.h>
30#include <sys/stat.h>
31#include <sys/buf.h>
32#include <sys/mount.h>
33#include <sys/vnode.h>
34#include <sys/malloc.h>
35#include <sys/namei.h>
36#include <sys/ubc.h>
37#include <sys/quota.h>
38
39#include <miscfs/specfs/specdev.h>
40#include <miscfs/fifofs/fifo.h>
41#include <vfs/vfs_support.h>
42#include <machine/spl.h>
43
44#include <sys/kdebug.h>
45
46#include "hfs.h"
47#include "hfs_catalog.h"
48#include "hfs_cnode.h"
49#include "hfs_lockf.h"
50#include "hfs_dbg.h"
51#include "hfs_mount.h"
52#include "hfs_quota.h"
53#include "hfs_endian.h"
54
55#include "hfscommon/headers/BTreesInternal.h"
56#include "hfscommon/headers/FileMgrInternal.h"
57
58#define MAKE_DELETED_NAME(NAME,FID) \
59 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
60
61
62extern uid_t console_user;
63
64extern unsigned long strtoul(const char *, char **, int);
65
66/* Global vfs data structures for hfs */
67
68
69extern int groupmember(gid_t gid, struct ucred *cred);
70
71static int hfs_makenode(int mode, struct vnode *dvp, struct vnode **vpp,
72 struct componentname *cnp);
73
74static int hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp,
75 struct vnode **rvpp, struct proc *p);
76
77static int hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p);
78
79int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags);
80
81int hfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
82 struct proc *p);
83int hfs_chmod(struct vnode *vp, int mode, struct ucred *cred,
84 struct proc *p);
85int hfs_chown(struct vnode *vp, uid_t uid, gid_t gid,
86 struct ucred *cred, struct proc *p);
87
88/*****************************************************************************
89*
90* Common Operations on vnodes
91*
92*****************************************************************************/
93
94/*
95 * Create a regular file
96#% create dvp L U U
97#% create vpp - L -
98#
99 vop_create {
100 IN WILLRELE struct vnode *dvp;
101 OUT struct vnode **vpp;
102 IN struct componentname *cnp;
103 IN struct vattr *vap;
104
105 We are responsible for freeing the namei buffer,
106 it is done in hfs_makenode()
107*/
108
109static int
110hfs_create(ap)
111 struct vop_create_args /* {
112 struct vnode *a_dvp;
113 struct vnode **a_vpp;
114 struct componentname *a_cnp;
115 struct vattr *a_vap;
116 } */ *ap;
117{
118 struct vattr *vap = ap->a_vap;
119
120 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
121 ap->a_dvp, ap->a_vpp, ap->a_cnp));
122}
123
124
125/*
126 * Mknod vnode call
127
128#% mknod dvp L U U
129#% mknod vpp - X -
130#
131 vop_mknod {
132 IN WILLRELE struct vnode *dvp;
133 OUT WILLRELE struct vnode **vpp;
134 IN struct componentname *cnp;
135 IN struct vattr *vap;
136 */
137/* ARGSUSED */
138
139static int
140hfs_mknod(ap)
141 struct vop_mknod_args /* {
142 struct vnode *a_dvp;
143 struct vnode **a_vpp;
144 struct componentname *a_cnp;
145 struct vattr *a_vap;
146 } */ *ap;
147{
148 struct vattr *vap = ap->a_vap;
149 struct vnode **vpp = ap->a_vpp;
150 struct cnode *cp;
151 int error;
152
153 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
154 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
155 vput(ap->a_dvp);
156 return (EOPNOTSUPP);
157 }
158
159 /* Create the vnode */
160 error = hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
161 ap->a_dvp, vpp, ap->a_cnp);
162 if (error)
163 return (error);
164 cp = VTOC(*vpp);
165 cp->c_flag |= C_ACCESS | C_CHANGE | C_UPDATE;
166 if ((vap->va_rdev != VNOVAL) &&
167 (vap->va_type == VBLK || vap->va_type == VCHR))
168 cp->c_rdev = vap->va_rdev;
169 /*
170 * Remove cnode so that it will be reloaded by lookup and
171 * checked to see if it is an alias of an existing vnode.
172 * Note: unlike UFS, we don't bash v_type here.
173 */
174 vput(*vpp);
175 vgone(*vpp);
176 *vpp = 0;
177 return (0);
178}
179
180
181/*
182 * Open called.
183#% open vp L L L
184#
185 vop_open {
186 IN struct vnode *vp;
187 IN int mode;
188 IN struct ucred *cred;
189 IN struct proc *p;
190 */
191
192
193static int
194hfs_open(ap)
195 struct vop_open_args /* {
196 struct vnode *a_vp;
197 int a_mode;
198 struct ucred *a_cred;
199 struct proc *a_p;
200 } */ *ap;
201{
202 struct vnode *vp = ap->a_vp;
203
204 /*
205 * Files marked append-only must be opened for appending.
206 */
207 if ((vp->v_type != VDIR) && (VTOC(vp)->c_flags & APPEND) &&
208 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
209 return (EPERM);
210
211 return (0);
212}
213
214/*
215 * Close called.
216 *
217 * Update the times on the cnode.
218#% close vp U U U
219#
220 vop_close {
221 IN struct vnode *vp;
222 IN int fflag;
223 IN struct ucred *cred;
224 IN struct proc *p;
225 */
226
227
228static int
229hfs_close(ap)
230 struct vop_close_args /* {
231 struct vnode *a_vp;
232 int a_fflag;
233 struct ucred *a_cred;
234 struct proc *a_p;
235 } */ *ap;
236{
237 register struct vnode *vp = ap->a_vp;
238 register struct cnode *cp = VTOC(vp);
239 register struct filefork *fp = VTOF(vp);
240 struct proc *p = ap->a_p;
241 struct timeval tv;
242 off_t leof;
243 u_long blks, blocksize;
244 int devBlockSize;
245 int error;
246
247 simple_lock(&vp->v_interlock);
248 if ((!UBCISVALID(vp) && vp->v_usecount > 1)
249 || (UBCISVALID(vp) && ubc_isinuse(vp, 1))) {
250 tv = time;
251 CTIMES(cp, &tv, &tv);
252 }
253 simple_unlock(&vp->v_interlock);
254
255 /*
256 * VOP_CLOSE can be called with vp locked (from vclean).
257 * We check for this case using VOP_ISLOCKED and bail.
258 *
259 * XXX During a force unmount we won't do the cleanup below!
260 */
261 if (vp->v_type == VDIR || VOP_ISLOCKED(vp))
262 return (0);
263
264 leof = fp->ff_size;
265
266 if ((fp->ff_blocks > 0) && !ISSET(cp->c_flag, C_DELETED)) {
267 enum vtype our_type = vp->v_type;
268 u_long our_id = vp->v_id;
269 int was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
270
271 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
272 if (error)
273 return (0);
274 /*
275 * Since we can context switch in vn_lock our vnode
276 * could get recycled (eg umount -f). Double check
277 * that its still ours.
278 */
279 if (vp->v_type != our_type || vp->v_id != our_id
280 || cp != VTOC(vp) || !UBCINFOEXISTS(vp)) {
281 VOP_UNLOCK(vp, 0, p);
282 return (0);
283 }
284
285 /*
286 * Last chance to explicitly zero out the areas
287 * that are currently marked invalid:
288 */
289 VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
290 (void) cluster_push(vp);
291 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
292 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
293 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
294 off_t start = invalid_range->rl_start;
295 off_t end = invalid_range->rl_end;
296
297 /* The range about to be written must be validated
298 * first, so that VOP_CMAP() will return the
299 * appropriate mapping for the cluster code:
300 */
301 rl_remove(start, end, &fp->ff_invalidranges);
302
303 (void) cluster_write(vp, (struct uio *) 0, leof,
304 invalid_range->rl_end + 1, invalid_range->rl_start,
305 (off_t)0, devBlockSize, IO_HEADZEROFILL | IO_NOZERODIRTY);
306
307 if (ISSET(vp->v_flag, VHASDIRTY))
308 (void) cluster_push(vp);
309
310 cp->c_flag |= C_MODIFIED;
311 }
312 cp->c_flag &= ~C_ZFWANTSYNC;
313 cp->c_zftimeout = 0;
314 blocksize = VTOVCB(vp)->blockSize;
315 blks = leof / blocksize;
316 if (((off_t)blks * (off_t)blocksize) != leof)
317 blks++;
318 /*
319 * Shrink the peof to the smallest size neccessary to contain the leof.
320 */
321 if (blks < fp->ff_blocks)
322 (void) VOP_TRUNCATE(vp, leof, IO_NDELAY, ap->a_cred, p);
323 (void) cluster_push(vp);
324
325 if (!was_nocache)
326 CLR(vp->v_flag, VNOCACHE_DATA);
327
328 /*
329 * If the VOP_TRUNCATE didn't happen to flush the vnode's
330 * information out to disk, force it to be updated now that
331 * all invalid ranges have been zero-filled and validated:
332 */
333 if (cp->c_flag & C_MODIFIED) {
334 tv = time;
335 VOP_UPDATE(vp, &tv, &tv, 0);
336 }
337 VOP_UNLOCK(vp, 0, p);
338 }
339 return (0);
340}
341
342/*
343#% access vp L L L
344#
345 vop_access {
346 IN struct vnode *vp;
347 IN int mode;
348 IN struct ucred *cred;
349 IN struct proc *p;
350
351 */
352
353static int
354hfs_access(ap)
355 struct vop_access_args /* {
356 struct vnode *a_vp;
357 int a_mode;
358 struct ucred *a_cred;
359 struct proc *a_p;
360 } */ *ap;
361{
362 struct vnode *vp = ap->a_vp;
363 struct cnode *cp = VTOC(vp);
364 struct ucred *cred = ap->a_cred;
365 register gid_t *gp;
366 mode_t mode = ap->a_mode;
367 mode_t mask = 0;
368 int i;
369 int error;
370
371 /*
372 * Disallow write attempts on read-only file systems;
373 * unless the file is a socket, fifo, or a block or
374 * character device resident on the file system.
375 */
376 if (mode & VWRITE) {
377 switch (vp->v_type) {
378 case VDIR:
379 case VLNK:
380 case VREG:
381 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
382 return (EROFS);
383#if QUOTA
384 if ((error = hfs_getinoquota(cp)))
385 return (error);
386#endif /* QUOTA */
387 break;
388 }
389 }
390
391 /* If immutable bit set, nobody gets to write it. */
392 if ((mode & VWRITE) && (cp->c_flags & IMMUTABLE))
393 return (EPERM);
394
395 /* Otherwise, user id 0 always gets access. */
396 if (ap->a_cred->cr_uid == 0)
397 return (0);
398
399 mask = 0;
400
401 /* Otherwise, check the owner. */
402 if (hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, ap->a_p, false) == 0) {
403 if (mode & VEXEC)
404 mask |= S_IXUSR;
405 if (mode & VREAD)
406 mask |= S_IRUSR;
407 if (mode & VWRITE)
408 mask |= S_IWUSR;
409 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
410 }
411
412 /* Otherwise, check the groups. */
413 if (! (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)) {
414 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++)
415 if (cp->c_gid == *gp) {
416 if (mode & VEXEC)
417 mask |= S_IXGRP;
418 if (mode & VREAD)
419 mask |= S_IRGRP;
420 if (mode & VWRITE)
421 mask |= S_IWGRP;
422 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
423 }
424 }
425
426 /* Otherwise, check everyone else. */
427 if (mode & VEXEC)
428 mask |= S_IXOTH;
429 if (mode & VREAD)
430 mask |= S_IROTH;
431 if (mode & VWRITE)
432 mask |= S_IWOTH;
433 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
434}
435
436
437
438/*
439#% getattr vp = = =
440#
441 vop_getattr {
442 IN struct vnode *vp;
443 IN struct vattr *vap;
444 IN struct ucred *cred;
445 IN struct proc *p;
446
447 */
448
449
450/* ARGSUSED */
451static int
452hfs_getattr(ap)
453 struct vop_getattr_args /* {
454 struct vnode *a_vp;
455 struct vattr *a_vap;
456 struct ucred *a_cred;
457 struct proc *a_p;
458 } */ *ap;
459{
460 struct vnode *vp = ap->a_vp;
461 struct cnode *cp = VTOC(vp);
462 struct vattr *vap = ap->a_vap;
463 struct timeval tv;
464
465 tv = time;
466 CTIMES(cp, &tv, &tv);
467
468 vap->va_type = vp->v_type;
469 /*
470 * [2856576] Since we are dynamically changing the owner, also
471 * effectively turn off the set-user-id and set-group-id bits,
472 * just like chmod(2) would when changing ownership. This prevents
473 * a security hole where set-user-id programs run as whoever is
474 * logged on (or root if nobody is logged in yet!)
475 */
476 vap->va_mode = (cp->c_uid == UNKNOWNUID) ? cp->c_mode & ~(S_ISUID | S_ISGID) : cp->c_mode;
477 vap->va_nlink = cp->c_nlink;
478 vap->va_uid = (cp->c_uid == UNKNOWNUID) ? console_user : cp->c_uid;
479 vap->va_gid = cp->c_gid;
480 vap->va_fsid = cp->c_dev;
481 /*
482 * Exporting file IDs from HFS Plus:
483 *
484 * For "normal" files the c_fileid is the same value as the
485 * c_cnid. But for hard link files, they are different - the
486 * c_cnid belongs to the active directory entry (ie the link)
487 * and the c_fileid is for the actual inode (ie the data file).
488 *
489 * The stat call (getattr) will always return the c_fileid
490 * and Carbon APIs, which are hardlink-ignorant, will always
491 * receive the c_cnid (from getattrlist).
492 */
493 vap->va_fileid = cp->c_fileid;
494 vap->va_atime.tv_sec = cp->c_atime;
495 vap->va_atime.tv_nsec = 0;
496 vap->va_mtime.tv_sec = cp->c_mtime;
497 vap->va_mtime.tv_nsec = cp->c_mtime_nsec;
498 vap->va_ctime.tv_sec = cp->c_ctime;
499 vap->va_ctime.tv_nsec = 0;
500 vap->va_gen = 0;
501 vap->va_flags = cp->c_flags;
502 vap->va_rdev = 0;
503 vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize;
504 vap->va_filerev = 0;
505 vap->va_spare = 0;
506 if (vp->v_type == VDIR) {
507 vap->va_size = cp->c_nlink * AVERAGE_HFSDIRENTRY_SIZE;
508 vap->va_bytes = 0;
509 } else {
510 vap->va_size = VTOF(vp)->ff_size;
511 vap->va_bytes = (u_quad_t)cp->c_blocks *
512 (u_quad_t)VTOVCB(vp)->blockSize;
513 if (vp->v_type == VBLK || vp->v_type == VCHR)
514 vap->va_rdev = cp->c_rdev;
515 }
516 return (0);
517}
518
519/*
520 * Set attribute vnode op. called from several syscalls
521#% setattr vp L L L
522#
523 vop_setattr {
524 IN struct vnode *vp;
525 IN struct vattr *vap;
526 IN struct ucred *cred;
527 IN struct proc *p;
528
529 */
530
531static int
532hfs_setattr(ap)
533 struct vop_setattr_args /* {
534 struct vnode *a_vp;
535 struct vattr *a_vap;
536 struct ucred *a_cred;
537 struct proc *a_p;
538 } */ *ap;
539{
540 struct vattr *vap = ap->a_vap;
541 struct vnode *vp = ap->a_vp;
542 struct cnode *cp = VTOC(vp);
543 struct ucred *cred = ap->a_cred;
544 struct proc *p = ap->a_p;
545 struct timeval atimeval, mtimeval;
546 int error;
547
548 /*
549 * Check for unsettable attributes.
550 */
551 if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
552 (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
553 (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) ||
554 ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) {
555 return (EINVAL);
556 }
557
558 if (vap->va_flags != VNOVAL) {
559 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
560 return (EROFS);
561 if ((error = hfs_chflags(vp, vap->va_flags, cred, p)))
562 return (error);
563 if (vap->va_flags & (IMMUTABLE | APPEND))
564 return (0);
565 }
566
567 if (cp->c_flags & (IMMUTABLE | APPEND))
568 return (EPERM);
569
570 // XXXdbg - don't allow modification of the journal or journal_info_block
571 if (VTOHFS(vp)->jnl && cp->c_datafork) {
572 struct HFSPlusExtentDescriptor *extd;
573
574 extd = &cp->c_datafork->ff_data.cf_extents[0];
575 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
576 return EPERM;
577 }
578 }
579
580 /*
581 * Go through the fields and update iff not VNOVAL.
582 */
583 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
584 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
585 return (EROFS);
586 if ((error = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p)))
587 return (error);
588 }
589 if (vap->va_size != VNOVAL) {
590 /*
591 * Disallow write attempts on read-only file systems;
592 * unless the file is a socket, fifo, or a block or
593 * character device resident on the file system.
594 */
595 switch (vp->v_type) {
596 case VDIR:
597 return (EISDIR);
598 case VLNK:
599 case VREG:
600 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
601 return (EROFS);
602 break;
603 default:
604 break;
605 }
606 if ((error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p)))
607 return (error);
608 }
609 cp = VTOC(vp);
610 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
611 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
612 return (EROFS);
613 if (((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) &&
614 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
615 (error = VOP_ACCESS(vp, VWRITE, cred, p)))) {
616 return (error);
617 }
618 if (vap->va_atime.tv_sec != VNOVAL)
619 cp->c_flag |= C_ACCESS;
620 if (vap->va_mtime.tv_sec != VNOVAL) {
621 cp->c_flag |= C_CHANGE | C_UPDATE;
622 /*
623 * The utimes system call can reset the modification
624 * time but it doesn't know about HFS create times.
625 * So we need to insure that the creation time is
626 * always at least as old as the modification time.
627 */
628 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
629 (cp->c_cnid != kRootDirID) &&
630 (vap->va_mtime.tv_sec < cp->c_itime)) {
631 cp->c_itime = vap->va_mtime.tv_sec;
632 }
633 }
634 atimeval.tv_sec = vap->va_atime.tv_sec;
635 atimeval.tv_usec = 0;
636 mtimeval.tv_sec = vap->va_mtime.tv_sec;
637 mtimeval.tv_usec = 0;
638 if ((error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1)))
639 return (error);
640 }
641 error = 0;
642 if (vap->va_mode != (mode_t)VNOVAL) {
643 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
644 return (EROFS);
645 error = hfs_chmod(vp, (int)vap->va_mode, cred, p);
646 }
647 return (error);
648}
649
650
651/*
652 * Change the mode on a file.
653 * cnode must be locked before calling.
654 */
655int
656hfs_chmod(vp, mode, cred, p)
657 register struct vnode *vp;
658 register int mode;
659 register struct ucred *cred;
660 struct proc *p;
661{
662 register struct cnode *cp = VTOC(vp);
663 int error;
664
665 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
666 return (0);
667
668 // XXXdbg - don't allow modification of the journal or journal_info_block
669 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
670 struct HFSPlusExtentDescriptor *extd;
671
672 extd = &cp->c_datafork->ff_data.cf_extents[0];
673 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
674 return EPERM;
675 }
676 }
677
678#if OVERRIDE_UNKNOWN_PERMISSIONS
679 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
680 return (0);
681 };
682#endif
683 if ((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0)
684 return (error);
685 if (cred->cr_uid) {
686 if (vp->v_type != VDIR && (mode & S_ISTXT))
687 return (EFTYPE);
688 if (!groupmember(cp->c_gid, cred) && (mode & S_ISGID))
689 return (EPERM);
690 }
691 cp->c_mode &= ~ALLPERMS;
692 cp->c_mode |= (mode & ALLPERMS);
693 cp->c_flag |= C_CHANGE;
694 return (0);
695}
696
697
698int
699hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags)
700{
701 struct cnode *cp = VTOC(vp);
702 gid_t *gp;
703 int retval = 0;
704 int i;
705
706 /*
707 * Disallow write attempts on read-only file systems;
708 * unless the file is a socket, fifo, or a block or
709 * character device resident on the file system.
710 */
711 switch (vp->v_type) {
712 case VDIR:
713 case VLNK:
714 case VREG:
715 if (VTOVFS(vp)->mnt_flag & MNT_RDONLY)
716 return (EROFS);
717 break;
718 default:
719 break;
720 }
721
722 /* If immutable bit set, nobody gets to write it. */
723 if (considerFlags && (cp->c_flags & IMMUTABLE))
724 return (EPERM);
725
726 /* Otherwise, user id 0 always gets access. */
727 if (cred->cr_uid == 0)
728 return (0);
729
730 /* Otherwise, check the owner. */
731 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
732 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
733
734 /* Otherwise, check the groups. */
735 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) {
736 if (cp->c_gid == *gp)
737 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
738 }
739
740 /* Otherwise, check everyone else. */
741 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
742}
743
744
745
746/*
747 * Change the flags on a file or directory.
748 * cnode must be locked before calling.
749 */
750int
751hfs_chflags(vp, flags, cred, p)
752 register struct vnode *vp;
753 register u_long flags;
754 register struct ucred *cred;
755 struct proc *p;
756{
757 register struct cnode *cp = VTOC(vp);
758 int retval;
759
760 if (VTOVCB(vp)->vcbSigWord == kHFSSigWord) {
761 if ((retval = hfs_write_access(vp, cred, p, false)) != 0) {
762 return retval;
763 };
764 } else if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) {
765 return retval;
766 };
767
768 if (cred->cr_uid == 0) {
769 if ((cp->c_flags & (SF_IMMUTABLE | SF_APPEND)) &&
770 securelevel > 0) {
771 return EPERM;
772 };
773 cp->c_flags = flags;
774 } else {
775 if (cp->c_flags & (SF_IMMUTABLE | SF_APPEND) ||
776 (flags & UF_SETTABLE) != flags) {
777 return EPERM;
778 };
779 cp->c_flags &= SF_SETTABLE;
780 cp->c_flags |= (flags & UF_SETTABLE);
781 }
782 cp->c_flag |= C_CHANGE;
783
784 return (0);
785}
786
787
788/*
789 * Perform chown operation on cnode cp;
790 * code must be locked prior to call.
791 */
792int
793hfs_chown(vp, uid, gid, cred, p)
794 register struct vnode *vp;
795 uid_t uid;
796 gid_t gid;
797 struct ucred *cred;
798 struct proc *p;
799{
800 register struct cnode *cp = VTOC(vp);
801 uid_t ouid;
802 gid_t ogid;
803 int error = 0;
804#if QUOTA
805 register int i;
806 int64_t change;
807#endif /* QUOTA */
808
809 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
810 return (EOPNOTSUPP);
811
812 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)
813 return (0);
814
815 if (uid == (uid_t)VNOVAL)
816 uid = cp->c_uid;
817 if (gid == (gid_t)VNOVAL)
818 gid = cp->c_gid;
819 /*
820 * If we don't own the file, are trying to change the owner
821 * of the file, or are not a member of the target group,
822 * the caller must be superuser or the call fails.
823 */
824 if ((cred->cr_uid != cp->c_uid || uid != cp->c_uid ||
825 (gid != cp->c_gid && !groupmember((gid_t)gid, cred))) &&
826 (error = suser(cred, &p->p_acflag)))
827 return (error);
828
829 ogid = cp->c_gid;
830 ouid = cp->c_uid;
831#if QUOTA
832 if ((error = hfs_getinoquota(cp)))
833 return (error);
834 if (ouid == uid) {
835 dqrele(vp, cp->c_dquot[USRQUOTA]);
836 cp->c_dquot[USRQUOTA] = NODQUOT;
837 }
838 if (ogid == gid) {
839 dqrele(vp, cp->c_dquot[GRPQUOTA]);
840 cp->c_dquot[GRPQUOTA] = NODQUOT;
841 }
842
843 /*
844 * Eventually need to account for (fake) a block per directory
845 *if (vp->v_type == VDIR)
846 *change = VTOVCB(vp)->blockSize;
847 *else
848 */
849
850 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
851 (void) hfs_chkdq(cp, -change, cred, CHOWN);
852 (void) hfs_chkiq(cp, -1, cred, CHOWN);
853 for (i = 0; i < MAXQUOTAS; i++) {
854 dqrele(vp, cp->c_dquot[i]);
855 cp->c_dquot[i] = NODQUOT;
856 }
857#endif /* QUOTA */
858 cp->c_gid = gid;
859 cp->c_uid = uid;
860#if QUOTA
861 if ((error = hfs_getinoquota(cp)) == 0) {
862 if (ouid == uid) {
863 dqrele(vp, cp->c_dquot[USRQUOTA]);
864 cp->c_dquot[USRQUOTA] = NODQUOT;
865 }
866 if (ogid == gid) {
867 dqrele(vp, cp->c_dquot[GRPQUOTA]);
868 cp->c_dquot[GRPQUOTA] = NODQUOT;
869 }
870 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
871 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
872 goto good;
873 else
874 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
875 }
876 for (i = 0; i < MAXQUOTAS; i++) {
877 dqrele(vp, cp->c_dquot[i]);
878 cp->c_dquot[i] = NODQUOT;
879 }
880 }
881 cp->c_gid = ogid;
882 cp->c_uid = ouid;
883 if (hfs_getinoquota(cp) == 0) {
884 if (ouid == uid) {
885 dqrele(vp, cp->c_dquot[USRQUOTA]);
886 cp->c_dquot[USRQUOTA] = NODQUOT;
887 }
888 if (ogid == gid) {
889 dqrele(vp, cp->c_dquot[GRPQUOTA]);
890 cp->c_dquot[GRPQUOTA] = NODQUOT;
891 }
892 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
893 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
894 (void) hfs_getinoquota(cp);
895 }
896 return (error);
897good:
898 if (hfs_getinoquota(cp))
899 panic("hfs_chown: lost quota");
900#endif /* QUOTA */
901
902 if (ouid != uid || ogid != gid)
903 cp->c_flag |= C_CHANGE;
904 if (ouid != uid && cred->cr_uid != 0)
905 cp->c_mode &= ~S_ISUID;
906 if (ogid != gid && cred->cr_uid != 0)
907 cp->c_mode &= ~S_ISGID;
908 return (0);
909}
910
911
912/*
913#
914#% exchange fvp L L L
915#% exchange tvp L L L
916#
917 */
918 /*
919 * The hfs_exchange routine swaps the fork data in two files by
920 * exchanging some of the information in the cnode. It is used
921 * to preserve the file ID when updating an existing file, in
922 * case the file is being tracked through its file ID. Typically
923 * its used after creating a new file during a safe-save.
924 */
925
926static int
927hfs_exchange(ap)
928 struct vop_exchange_args /* {
929 struct vnode *a_fvp;
930 struct vnode *a_tvp;
931 struct ucred *a_cred;
932 struct proc *a_p;
933 } */ *ap;
934{
935 struct vnode *from_vp = ap->a_fvp;
936 struct vnode *to_vp = ap->a_tvp;
937 struct vnode *from_rvp = NULL;
938 struct vnode *to_rvp = NULL;
939 struct cnode *from_cp = VTOC(from_vp);
940 struct cnode *to_cp = VTOC(to_vp);
941 struct hfsmount *hfsmp = VTOHFS(from_vp);
942 struct cat_desc tempdesc;
943 struct cat_attr tempattr;
944 int error = 0, started_tr = 0, grabbed_lock = 0;
945
946 /* The files must be on the same volume. */
947 if (from_vp->v_mount != to_vp->v_mount)
948 return (EXDEV);
949
950 /* Only normal files can be exchanged. */
951 if ((from_vp->v_type != VREG) || (to_vp->v_type != VREG) ||
952 (from_cp->c_flag & C_HARDLINK) || (to_cp->c_flag & C_HARDLINK) ||
953 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
954 return (EINVAL);
955
956 // XXXdbg - don't allow modification of the journal or journal_info_block
957 if (hfsmp->jnl) {
958 struct HFSPlusExtentDescriptor *extd;
959
960 if (from_cp->c_datafork) {
961 extd = &from_cp->c_datafork->ff_data.cf_extents[0];
962 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
963 return EPERM;
964 }
965 }
966
967 if (to_cp->c_datafork) {
968 extd = &to_cp->c_datafork->ff_data.cf_extents[0];
969 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
970 return EPERM;
971 }
972 }
973 }
974
975 from_rvp = from_cp->c_rsrc_vp;
976 to_rvp = to_cp->c_rsrc_vp;
977
978 /* If one of the resource forks is open then get the other one. */
979 if (from_rvp || to_rvp) {
980 error = hfs_vgetrsrc(hfsmp, from_vp, &from_rvp, ap->a_p);
981 if (error)
982 return (error);
983 error = hfs_vgetrsrc(hfsmp, to_vp, &to_rvp, ap->a_p);
984 if (error) {
985 vrele(from_rvp);
986 return (error);
987 }
988 }
989
990 /* Ignore any errors, we are doing a 'best effort' on flushing */
991 if (from_vp)
992 (void) vinvalbuf(from_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
993 if (to_vp)
994 (void) vinvalbuf(to_vp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
995 if (from_rvp)
996 (void) vinvalbuf(from_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
997 if (to_rvp)
998 (void) vinvalbuf(to_rvp, V_SAVE, ap->a_cred, ap->a_p, 0, 0);
999
1000 // XXXdbg
1001 hfs_global_shared_lock_acquire(hfsmp);
1002 grabbed_lock = 1;
1003 if (hfsmp->jnl) {
1004 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1005 goto Err_Exit;
1006 }
1007 started_tr = 1;
1008 }
1009
1010 /* Lock catalog b-tree */
1011 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p);
1012 if (error) goto Err_Exit;
1013
1014 /* The backend code always tries to delete the virtual
1015 * extent id for exchanging files so we neeed to lock
1016 * the extents b-tree.
1017 */
1018 error = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
1019 if (error) {
1020 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1021 goto Err_Exit;
1022 }
1023
1024 /* Do the exchange */
1025 error = MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp),
1026 from_cp->c_desc.cd_nameptr, to_cp->c_desc.cd_nameptr,
1027 from_cp->c_parentcnid, to_cp->c_parentcnid,
1028 from_cp->c_hint, to_cp->c_hint));
1029
1030 (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, ap->a_p);
1031 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1032
1033 if (error != E_NONE) {
1034 goto Err_Exit;
1035 }
1036
1037 /* Purge the vnodes from the name cache */
1038 if (from_vp)
1039 cache_purge(from_vp);
1040 if (to_vp)
1041 cache_purge(to_vp);
1042
1043 /* Save a copy of from attributes before swapping. */
1044 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1045 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1046
1047 /*
1048 * Swap the descriptors and all non-fork related attributes.
1049 * (except the modify date)
1050 */
1051 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1052
1053 from_cp->c_hint = 0;
1054 from_cp->c_fileid = from_cp->c_cnid;
1055 from_cp->c_itime = to_cp->c_itime;
1056 from_cp->c_btime = to_cp->c_btime;
1057 from_cp->c_atime = to_cp->c_atime;
1058 from_cp->c_ctime = to_cp->c_ctime;
1059 from_cp->c_gid = to_cp->c_gid;
1060 from_cp->c_uid = to_cp->c_uid;
1061 from_cp->c_flags = to_cp->c_flags;
1062 from_cp->c_mode = to_cp->c_mode;
1063 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1064
1065 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1066 to_cp->c_hint = 0;
1067 to_cp->c_fileid = to_cp->c_cnid;
1068 to_cp->c_itime = tempattr.ca_itime;
1069 to_cp->c_btime = tempattr.ca_btime;
1070 to_cp->c_atime = tempattr.ca_atime;
1071 to_cp->c_ctime = tempattr.ca_ctime;
1072 to_cp->c_gid = tempattr.ca_gid;
1073 to_cp->c_uid = tempattr.ca_uid;
1074 to_cp->c_flags = tempattr.ca_flags;
1075 to_cp->c_mode = tempattr.ca_mode;
1076 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1077
1078 /* Reinsert into the cnode hash under new file IDs*/
1079 hfs_chashremove(from_cp);
1080 hfs_chashremove(to_cp);
1081
1082 hfs_chashinsert(from_cp);
1083 hfs_chashinsert(to_cp);
1084
1085 /*
1086 * When a file moves out of "Cleanup At Startup"
1087 * we can drop its NODUMP status.
1088 */
1089 if ((from_cp->c_flags & UF_NODUMP) &&
1090 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1091 from_cp->c_flags &= ~UF_NODUMP;
1092 from_cp->c_flag |= C_CHANGE;
1093 }
1094
1095 if ((to_cp->c_flags & UF_NODUMP) &&
1096 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1097 to_cp->c_flags &= ~UF_NODUMP;
1098 to_cp->c_flag |= C_CHANGE;
1099 }
1100
1101Err_Exit:
1102 if (to_rvp)
1103 vrele(to_rvp);
1104 if (from_rvp)
1105 vrele(from_rvp);
1106
1107 // XXXdbg
1108 if (started_tr) {
1109 journal_end_transaction(hfsmp->jnl);
1110 }
1111 if (grabbed_lock) {
1112 hfs_global_shared_lock_release(hfsmp);
1113 }
1114
1115 return (error);
1116}
1117
1118
1119/*
1120
1121#% fsync vp L L L
1122#
1123 vop_fsync {
1124 IN struct vnode *vp;
1125 IN struct ucred *cred;
1126 IN int waitfor;
1127 IN struct proc *p;
1128
1129 */
1130static int
1131hfs_fsync(ap)
1132 struct vop_fsync_args /* {
1133 struct vnode *a_vp;
1134 struct ucred *a_cred;
1135 int a_waitfor;
1136 struct proc *a_p;
1137 } */ *ap;
1138{
1139 struct vnode *vp = ap->a_vp;
1140 struct cnode *cp = VTOC(vp);
1141 struct filefork *fp = NULL;
1142 int retval = 0;
1143 register struct buf *bp;
1144 struct timeval tv;
1145 struct buf *nbp;
1146 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
1147 int s;
1148 int wait;
1149 int retry = 0;
1150
1151 wait = (ap->a_waitfor == MNT_WAIT);
1152
1153 /* HFS directories don't have any data blocks. */
1154 if (vp->v_type == VDIR)
1155 goto metasync;
1156
1157 /*
1158 * For system files flush the B-tree header and
1159 * for regular files write out any clusters
1160 */
1161 if (vp->v_flag & VSYSTEM) {
1162 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1163 // XXXdbg
1164 if (hfsmp->jnl) {
1165 if (BTIsDirty(VTOF(vp))) {
1166 panic("hfs: system file vp 0x%x has dirty blocks (jnl 0x%x)\n",
1167 vp, hfsmp->jnl);
1168 }
1169 } else {
1170 BTFlushPath(VTOF(vp));
1171 }
1172 }
1173 } else if (UBCINFOEXISTS(vp))
1174 (void) cluster_push(vp);
1175
1176 /*
1177 * When MNT_WAIT is requested and the zero fill timeout
1178 * has expired then we must explicitly zero out any areas
1179 * that are currently marked invalid (holes).
1180 *
1181 * Files with NODUMP can bypass zero filling here.
1182 */
1183 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1184 ((cp->c_flags & UF_NODUMP) == 0) &&
1185 UBCINFOEXISTS(vp) && (fp = VTOF(vp)) &&
1186 cp->c_zftimeout != 0) {
1187 int devblksize;
1188 int was_nocache;
1189
1190 if (time.tv_sec < cp->c_zftimeout) {
1191 /* Remember that a force sync was requested. */
1192 cp->c_flag |= C_ZFWANTSYNC;
1193 goto loop;
1194 }
1195 VOP_DEVBLOCKSIZE(cp->c_devvp, &devblksize);
1196 was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
1197 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
1198
1199 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1200 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1201 off_t start = invalid_range->rl_start;
1202 off_t end = invalid_range->rl_end;
1203
1204 /* The range about to be written must be validated
1205 * first, so that VOP_CMAP() will return the
1206 * appropriate mapping for the cluster code:
1207 */
1208 rl_remove(start, end, &fp->ff_invalidranges);
1209
1210 (void) cluster_write(vp, (struct uio *) 0,
1211 fp->ff_size,
1212 invalid_range->rl_end + 1,
1213 invalid_range->rl_start,
1214 (off_t)0, devblksize,
1215 IO_HEADZEROFILL | IO_NOZERODIRTY);
1216 cp->c_flag |= C_MODIFIED;
1217 }
1218 (void) cluster_push(vp);
1219 if (!was_nocache)
1220 CLR(vp->v_flag, VNOCACHE_DATA);
1221 cp->c_flag &= ~C_ZFWANTSYNC;
1222 cp->c_zftimeout = 0;
1223 }
1224
1225 /*
1226 * Flush all dirty buffers associated with a vnode.
1227 */
1228loop:
1229 s = splbio();
1230 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1231 nbp = bp->b_vnbufs.le_next;
1232 if ((bp->b_flags & B_BUSY))
1233 continue;
1234 if ((bp->b_flags & B_DELWRI) == 0)
1235 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp, hfsmp);
1236 // XXXdbg
1237 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1238 if ((bp->b_flags & B_META) == 0) {
1239 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1240 bp, hfsmp->jnl);
1241 }
1242 // if journal_active() returns >= 0 then the journal is ok and we
1243 // shouldn't do anything to this locked block (because it is part
1244 // of a transaction). otherwise we'll just go through the normal
1245 // code path and flush the buffer.
1246 if (journal_active(hfsmp->jnl) >= 0) {
1247 continue;
1248 }
1249 }
1250
1251 bremfree(bp);
1252 bp->b_flags |= B_BUSY;
1253 /* Clear B_LOCKED, should only be set on meta files */
1254 bp->b_flags &= ~B_LOCKED;
1255
1256 splx(s);
1257 /*
1258 * Wait for I/O associated with indirect blocks to complete,
1259 * since there is no way to quickly wait for them below.
1260 */
1261 if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT)
1262 (void) bawrite(bp);
1263 else
1264 (void) VOP_BWRITE(bp);
1265 goto loop;
1266 }
1267
1268 if (wait) {
1269 while (vp->v_numoutput) {
1270 vp->v_flag |= VBWAIT;
1271 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hfs_fsync", 0);
1272 }
1273
1274 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1275 // thing to do?
1276 if (hfsmp->jnl == NULL && vp->v_dirtyblkhd.lh_first) {
1277 /* still have some dirty buffers */
1278 if (retry++ > 10) {
1279 vprint("hfs_fsync: dirty", vp);
1280 splx(s);
1281 /*
1282 * Looks like the requests are not
1283 * getting queued to the driver.
1284 * Retrying here causes a cpu bound loop.
1285 * Yield to the other threads and hope
1286 * for the best.
1287 */
1288 (void)tsleep((caddr_t)&vp->v_numoutput,
1289 PRIBIO + 1, "hfs_fsync", hz/10);
1290 retry = 0;
1291 } else {
1292 splx(s);
1293 }
1294 /* try again */
1295 goto loop;
1296 }
1297 }
1298 splx(s);
1299
1300metasync:
1301 tv = time;
1302 if (vp->v_flag & VSYSTEM) {
1303 if (VTOF(vp)->fcbBTCBPtr != NULL)
1304 BTSetLastSync(VTOF(vp), tv.tv_sec);
1305 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1306 } else /* User file */ {
1307 retval = VOP_UPDATE(ap->a_vp, &tv, &tv, wait);
1308
1309 /* When MNT_WAIT is requested push out any delayed meta data */
1310 if ((retval == 0) && wait && cp->c_hint &&
1311 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1312 hfs_metasync(VTOHFS(vp), cp->c_hint, ap->a_p);
1313 }
1314 }
1315
1316 return (retval);
1317}
1318
1319/* Sync an hfs catalog b-tree node */
1320static int
1321hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p)
1322{
1323 struct vnode *vp;
1324 struct buf *bp;
1325 struct buf *nbp;
1326 int s;
1327
1328 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1329
1330 // XXXdbg - don't need to do this on a journaled volume
1331 if (hfsmp->jnl) {
1332 return 0;
1333 }
1334
1335 if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p) != 0)
1336 return (0);
1337
1338 /*
1339 * Look for a matching node that has been delayed
1340 * but is not part of a set (B_LOCKED).
1341 */
1342 s = splbio();
1343 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1344 nbp = bp->b_vnbufs.le_next;
1345 if (bp->b_flags & B_BUSY)
1346 continue;
1347 if (bp->b_lblkno == node) {
1348 if (bp->b_flags & B_LOCKED)
1349 break;
1350
1351 bremfree(bp);
1352 bp->b_flags |= B_BUSY;
1353 splx(s);
1354 (void) VOP_BWRITE(bp);
1355 goto exit;
1356 }
1357 }
1358 splx(s);
1359exit:
1360 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1361
1362 return (0);
1363}
1364
1365__private_extern__
1366int
1367hfs_btsync(struct vnode *vp, int sync_transaction)
1368{
1369 struct cnode *cp = VTOC(vp);
1370 register struct buf *bp;
1371 struct timeval tv;
1372 struct buf *nbp;
1373 struct hfsmount *hfsmp = VTOHFS(vp);
1374 int s;
1375
1376 /*
1377 * Flush all dirty buffers associated with b-tree.
1378 */
1379loop:
1380 s = splbio();
1381
1382 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1383 nbp = bp->b_vnbufs.le_next;
1384 if ((bp->b_flags & B_BUSY))
1385 continue;
1386 if ((bp->b_flags & B_DELWRI) == 0)
1387 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp, hfsmp);
1388
1389 // XXXdbg
1390 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1391 if ((bp->b_flags & B_META) == 0) {
1392 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1393 bp, hfsmp->jnl);
1394 }
1395 // if journal_active() returns >= 0 then the journal is ok and we
1396 // shouldn't do anything to this locked block (because it is part
1397 // of a transaction). otherwise we'll just go through the normal
1398 // code path and flush the buffer.
1399 if (journal_active(hfsmp->jnl) >= 0) {
1400 continue;
1401 }
1402 }
1403
1404 if (sync_transaction && !(bp->b_flags & B_LOCKED))
1405 continue;
1406
1407 bremfree(bp);
1408 bp->b_flags |= B_BUSY;
1409 bp->b_flags &= ~B_LOCKED;
1410
1411 splx(s);
1412
1413 (void) bawrite(bp);
1414
1415 goto loop;
1416 }
1417 splx(s);
1418
1419 tv = time;
1420 if ((vp->v_flag & VSYSTEM) && (VTOF(vp)->fcbBTCBPtr != NULL))
1421 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1422 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1423
1424 return 0;
1425}
1426
1427/*
1428 * Rmdir system call.
1429#% rmdir dvp L U U
1430#% rmdir vp L U U
1431#
1432 vop_rmdir {
1433 IN WILLRELE struct vnode *dvp;
1434 IN WILLRELE struct vnode *vp;
1435 IN struct componentname *cnp;
1436
1437 */
1438static int
1439hfs_rmdir(ap)
1440 struct vop_rmdir_args /* {
1441 struct vnode *a_dvp;
1442 struct vnode *a_vp;
1443 struct componentname *a_cnp;
1444 } */ *ap;
1445{
1446 struct vnode *vp = ap->a_vp;
1447 struct vnode *dvp = ap->a_dvp;
1448 struct proc *p = ap->a_cnp->cn_proc;
1449 struct cnode *cp;
1450 struct cnode *dcp;
1451 struct hfsmount * hfsmp;
1452 struct timeval tv;
1453 int error = 0, started_tr = 0, grabbed_lock = 0;
1454
1455 cp = VTOC(vp);
1456 dcp = VTOC(dvp);
1457 hfsmp = VTOHFS(vp);
1458
1459 if (dcp == cp) {
1460 vrele(dvp);
1461 vput(vp);
1462 return (EINVAL); /* cannot remove "." */
1463 }
1464
1465#if QUOTA
1466 (void)hfs_getinoquota(cp);
1467#endif
1468
1469 // XXXdbg
1470 hfs_global_shared_lock_acquire(hfsmp);
1471 grabbed_lock = 1;
1472 if (hfsmp->jnl) {
1473 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1474 goto out;
1475 }
1476 started_tr = 1;
1477 }
1478
1479 /*
1480 * Verify the directory is empty (and valid).
1481 * (Rmdir ".." won't be valid since
1482 * ".." will contain a reference to
1483 * the current directory and thus be
1484 * non-empty.)
1485 */
1486 if (cp->c_entries != 0) {
1487 error = ENOTEMPTY;
1488 goto out;
1489 }
1490 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1491 error = EPERM;
1492 goto out;
1493 }
1494
1495 /* Remove the entry from the namei cache: */
1496 cache_purge(vp);
1497
1498 /* Lock catalog b-tree */
1499 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1500 if (error) goto out;
1501
1502 if (cp->c_entries > 0)
1503 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1504 /* Remove entry from catalog */
1505 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1506
1507 /* Unlock catalog b-tree */
1508 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1509 if (error) goto out;
1510
1511#if QUOTA
1512 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1513#endif /* QUOTA */
1514
1515 /* The parent lost a child */
1516 if (dcp->c_entries > 0)
1517 dcp->c_entries--;
1518 if (dcp->c_nlink > 0)
1519 dcp->c_nlink--;
1520 dcp->c_flag |= C_CHANGE | C_UPDATE;
1521 tv = time;
1522 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1523
1524 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1525
1526 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1527 cp->c_flag |= C_NOEXISTS;
1528out:
1529 if (dvp)
1530 vput(dvp);
1531 vput(vp);
1532
1533 // XXXdbg
1534 if (started_tr) {
1535 journal_end_transaction(hfsmp->jnl);
1536 }
1537 if (grabbed_lock) {
1538 hfs_global_shared_lock_release(hfsmp);
1539 }
1540
1541 return (error);
1542}
1543
1544/*
1545
1546#% remove dvp L U U
1547#% remove vp L U U
1548#
1549 vop_remove {
1550 IN WILLRELE struct vnode *dvp;
1551 IN WILLRELE struct vnode *vp;
1552 IN struct componentname *cnp;
1553
1554 */
1555
1556static int
1557hfs_remove(ap)
1558 struct vop_remove_args /* {
1559 struct vnode *a_dvp;
1560 struct vnode *a_vp;
1561 struct componentname *a_cnp;
1562 } */ *ap;
1563{
1564 struct vnode *vp = ap->a_vp;
1565 struct vnode *dvp = ap->a_dvp;
1566 struct vnode *rvp = NULL;
1567 struct cnode *cp;
1568 struct cnode *dcp;
1569 struct hfsmount *hfsmp;
1570 struct proc *p = current_proc();
1571 int dataforkbusy = 0;
1572 int rsrcforkbusy = 0;
1573 int truncated = 0;
1574 struct timeval tv;
1575 int error = 0;
1576 int started_tr = 0, grabbed_lock = 0;
1577
1578 /* Redirect directories to rmdir */
1579 if (vp->v_type == VDIR)
1580 return (hfs_rmdir(ap));
1581
1582 cp = VTOC(vp);
1583 dcp = VTOC(dvp);
1584 hfsmp = VTOHFS(vp);
1585
1586 if (cp->c_parentcnid != dcp->c_cnid) {
1587 error = EINVAL;
1588 goto out;
1589 }
1590
1591 /* Make sure a remove is permitted */
1592 if ((cp->c_flags & (IMMUTABLE | APPEND)) ||
1593 (VTOC(dvp)->c_flags & APPEND) ||
1594 VNODE_IS_RSRC(vp)) {
1595 error = EPERM;
1596 goto out;
1597 }
1598
1599 /*
1600 * Aquire a vnode for a non-empty resource fork.
1601 * (needed for VOP_TRUNCATE)
1602 */
1603 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1604 error = hfs_vgetrsrc(hfsmp, vp, &rvp, p);
1605 if (error)
1606 goto out;
1607 }
1608
1609 // XXXdbg - don't allow deleting the journal or journal_info_block
1610 if (hfsmp->jnl && cp->c_datafork) {
1611 struct HFSPlusExtentDescriptor *extd;
1612
1613 extd = &cp->c_datafork->ff_data.cf_extents[0];
1614 if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1615 error = EPERM;
1616 goto out;
1617 }
1618 }
1619
1620 /*
1621 * Check if this file is being used.
1622 *
1623 * The namei done for the remove took a reference on the
1624 * vnode (vp). And we took a ref on the resource vnode (rvp).
1625 * Hence set 1 in the tookref parameter of ubc_isinuse().
1626 */
1627 if (UBCISVALID(vp) && ubc_isinuse(vp, 1))
1628 dataforkbusy = 1;
1629 if (rvp && UBCISVALID(rvp) && ubc_isinuse(rvp, 1))
1630 rsrcforkbusy = 1;
1631
1632 /*
1633 * Carbon semantics prohibit deleting busy files.
1634 * (enforced when NODELETEBUSY is requested)
1635 */
1636 if ((dataforkbusy || rsrcforkbusy) &&
1637 ((ap->a_cnp->cn_flags & NODELETEBUSY) ||
1638 (hfsmp->hfs_private_metadata_dir == 0))) {
1639 error = EBUSY;
1640 goto out;
1641 }
1642
1643#if QUOTA
1644 (void)hfs_getinoquota(cp);
1645#endif /* QUOTA */
1646
1647 // XXXdbg
1648 hfs_global_shared_lock_acquire(hfsmp);
1649 grabbed_lock = 1;
1650 if (hfsmp->jnl) {
1651 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1652 goto out;
1653 }
1654 started_tr = 1;
1655 }
1656
1657 /* Remove our entry from the namei cache. */
1658 cache_purge(vp);
1659
1660 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1661 if (hfsmp->jnl && vp->v_type == VLNK && vp->v_dirtyblkhd.lh_first) {
1662 struct buf *bp, *nbp;
1663
1664 recheck:
1665 for (bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) {
1666 nbp = bp->b_vnbufs.le_next;
1667
1668 if ((bp->b_flags & B_BUSY)) {
1669 // if it was busy, someone else must be dealing
1670 // with it so just move on.
1671 continue;
1672 }
1673
1674 if (!(bp->b_flags & B_META)) {
1675 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp);
1676 }
1677
1678 // if it's part of the current transaction, kill it.
1679 if (bp->b_flags & B_LOCKED) {
1680 bremfree(bp);
1681 bp->b_flags |= B_BUSY;
1682 journal_kill_block(hfsmp->jnl, bp);
1683 goto recheck;
1684 }
1685 }
1686 }
1687 // XXXdbg
1688
1689 /*
1690 * Truncate any non-busy forks. Busy forks will
1691 * get trucated when their vnode goes inactive.
1692 *
1693 * (Note: hard links are truncated in VOP_INACTIVE)
1694 */
1695 if ((cp->c_flag & C_HARDLINK) == 0) {
1696 int mode = cp->c_mode;
1697
1698 if (!dataforkbusy && cp->c_datafork->ff_blocks != 0) {
1699 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1700 error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p);
1701 cp->c_mode = mode;
1702 if (error)
1703 goto out;
1704 truncated = 1;
1705 }
1706 if (!rsrcforkbusy && rvp) {
1707 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1708 error = VOP_TRUNCATE(rvp, (off_t)0, IO_NDELAY, NOCRED, p);
1709 cp->c_mode = mode;
1710 if (error)
1711 goto out;
1712 truncated = 1;
1713 }
1714 }
1715 /*
1716 * There are 3 remove cases to consider:
1717 * 1. File is a hardlink ==> remove the link
1718 * 2. File is busy (in use) ==> move/rename the file
1719 * 3. File is not in use ==> remove the file
1720 */
1721
1722 if (cp->c_flag & C_HARDLINK) {
1723 struct cat_desc desc;
1724
1725 if ((ap->a_cnp->cn_flags & HASBUF) == 0 ||
1726 ap->a_cnp->cn_nameptr[0] == '\0') {
1727 error = ENOENT; /* name missing! */
1728 goto out;
1729 }
1730
1731 /* Setup a descriptor for the link */
1732 bzero(&desc, sizeof(desc));
1733 desc.cd_nameptr = ap->a_cnp->cn_nameptr;
1734 desc.cd_namelen = ap->a_cnp->cn_namelen;
1735 desc.cd_parentcnid = dcp->c_cnid;
1736 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1737 desc.cd_cnid = cp->c_cnid;
1738
1739 /* Lock catalog b-tree */
1740 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1741 if (error)
1742 goto out;
1743
1744 /* Delete the link record */
1745 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1746
1747 if ((error == 0) && (--cp->c_nlink < 1)) {
1748 char inodename[32];
1749 char delname[32];
1750 struct cat_desc to_desc;
1751 struct cat_desc from_desc;
1752
1753 /*
1754 * This is now esentially an open deleted file.
1755 * Rename it to reflect this state which makes
1756 * orphan file cleanup easier (see hfs_remove_orphans).
1757 * Note: a rename failure here is not fatal.
1758 */
1759 MAKE_INODE_NAME(inodename, cp->c_rdev);
1760 bzero(&from_desc, sizeof(from_desc));
1761 from_desc.cd_nameptr = inodename;
1762 from_desc.cd_namelen = strlen(inodename);
1763 from_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1764 from_desc.cd_flags = 0;
1765 from_desc.cd_cnid = cp->c_fileid;
1766
1767 MAKE_DELETED_NAME(delname, cp->c_fileid);
1768 bzero(&to_desc, sizeof(to_desc));
1769 to_desc.cd_nameptr = delname;
1770 to_desc.cd_namelen = strlen(delname);
1771 to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1772 to_desc.cd_flags = 0;
1773 to_desc.cd_cnid = cp->c_fileid;
1774
1775 (void) cat_rename(hfsmp, &from_desc, &hfsmp->hfs_privdir_desc,
1776 &to_desc, (struct cat_desc *)NULL);
1777 cp->c_flag |= C_DELETED;
1778 }
1779
1780 /* Unlock the Catalog */
1781 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1782
1783 /* All done with component name... */
1784 if ((ap->a_cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME))
1785 FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
1786
1787 if (error != 0)
1788 goto out;
1789
1790 cp->c_flag |= C_CHANGE;
1791 tv = time;
1792 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1793
1794 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1795
1796 } else if (dataforkbusy || rsrcforkbusy) {
1797 char delname[32];
1798 struct cat_desc to_desc;
1799 struct cat_desc todir_desc;
1800
1801 /*
1802 * Orphan this file (move to hidden directory).
1803 */
1804 bzero(&todir_desc, sizeof(todir_desc));
1805 todir_desc.cd_parentcnid = 2;
1806
1807 MAKE_DELETED_NAME(delname, cp->c_fileid);
1808 bzero(&to_desc, sizeof(to_desc));
1809 to_desc.cd_nameptr = delname;
1810 to_desc.cd_namelen = strlen(delname);
1811 to_desc.cd_parentcnid = hfsmp->hfs_private_metadata_dir;
1812 to_desc.cd_flags = 0;
1813 to_desc.cd_cnid = cp->c_cnid;
1814
1815 /* Lock catalog b-tree */
1816 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1817 if (error)
1818 goto out;
1819
1820 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc,
1821 &to_desc, (struct cat_desc *)NULL);
1822
1823 // XXXdbg - only bump this count if we were successful
1824 if (error == 0) {
1825 hfsmp->hfs_privdir_attr.ca_entries++;
1826 }
1827 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
1828 &hfsmp->hfs_privdir_attr, NULL, NULL);
1829
1830 /* Unlock the Catalog */
1831 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1832 if (error) goto out;
1833
1834 cp->c_flag |= C_CHANGE | C_DELETED | C_NOEXISTS;
1835 --cp->c_nlink;
1836 tv = time;
1837 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1838
1839 } else /* Not busy */ {
1840
1841 if (cp->c_blocks > 0) {
1842 printf("hfs_remove: attempting to delete a non-empty file!");
1843 error = EBUSY;
1844 goto out;
1845 }
1846
1847 /* Lock catalog b-tree */
1848 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1849 if (error)
1850 goto out;
1851
1852 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1853
1854 if (error && error != ENXIO && error != ENOENT && truncated) {
1855 if ((cp->c_datafork && cp->c_datafork->ff_data.cf_size != 0) ||
1856 (cp->c_rsrcfork && cp->c_rsrcfork->ff_data.cf_size != 0)) {
1857 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1858 error, cp->c_datafork->ff_data.cf_size, cp->c_rsrcfork->ff_data.cf_size);
1859 } else {
1860 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1861 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
1862 }
1863 }
1864
1865 /* Unlock the Catalog */
1866 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1867 if (error) goto out;
1868
1869#if QUOTA
1870 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1871#endif /* QUOTA */
1872
1873 cp->c_mode = 0;
1874 cp->c_flag |= C_CHANGE | C_NOEXISTS;
1875 --cp->c_nlink;
1876 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1877 }
1878
1879 /*
1880 * All done with this cnode's descriptor...
1881 *
1882 * Note: all future catalog calls for this cnode must be
1883 * by fileid only. This is OK for HFS (which doesn't have
1884 * file thread records) since HFS doesn't support hard
1885 * links or the removal of busy files.
1886 */
1887 cat_releasedesc(&cp->c_desc);
1888
1889 /* In all three cases the parent lost a child */
1890 if (dcp->c_entries > 0)
1891 dcp->c_entries--;
1892 if (dcp->c_nlink > 0)
1893 dcp->c_nlink--;
1894 dcp->c_flag |= C_CHANGE | C_UPDATE;
1895 tv = time;
1896 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1897
1898 // XXXdbg
1899 if (started_tr) {
1900 journal_end_transaction(hfsmp->jnl);
1901 }
1902 if (grabbed_lock) {
1903 hfs_global_shared_lock_release(hfsmp);
1904 }
1905
1906 if (rvp)
1907 vrele(rvp);
1908 VOP_UNLOCK(vp, 0, p);
1909 // XXXdbg - try to prevent the lost ubc_info panic
1910 if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) {
1911 (void) ubc_uncache(vp);
1912 }
1913 vrele(vp);
1914 vput(dvp);
1915
1916 return (0);
1917
1918out:
1919 if (rvp)
1920 vrele(rvp);
1921
1922 /* Commit the truncation to the catalog record */
1923 if (truncated) {
1924 cp->c_flag |= C_CHANGE | C_UPDATE;
1925 tv = time;
1926 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1927 }
1928 vput(vp);
1929 vput(dvp);
1930
1931 // XXXdbg
1932 if (started_tr) {
1933 journal_end_transaction(hfsmp->jnl);
1934 }
1935 if (grabbed_lock) {
1936 hfs_global_shared_lock_release(hfsmp);
1937 }
1938
1939 return (error);
1940}
1941
1942
1943__private_extern__ void
1944replace_desc(struct cnode *cp, struct cat_desc *cdp)
1945{
1946 /* First release allocated name buffer */
1947 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
1948 char *name = cp->c_desc.cd_nameptr;
1949
1950 cp->c_desc.cd_nameptr = 0;
1951 cp->c_desc.cd_namelen = 0;
1952 cp->c_desc.cd_flags &= ~CD_HASBUF;
1953 FREE(name, M_TEMP);
1954 }
1955 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
1956
1957 /* Cnode now owns the name buffer */
1958 cdp->cd_nameptr = 0;
1959 cdp->cd_namelen = 0;
1960 cdp->cd_flags &= ~CD_HASBUF;
1961}
1962
1963
1964/*
1965#
1966#% rename fdvp U U U
1967#% rename fvp U U U
1968#% rename tdvp L U U
1969#% rename tvp X U U
1970#
1971 vop_rename {
1972 IN WILLRELE struct vnode *fdvp;
1973 IN WILLRELE struct vnode *fvp;
1974 IN struct componentname *fcnp;
1975 IN WILLRELE struct vnode *tdvp;
1976 IN WILLRELE struct vnode *tvp;
1977 IN struct componentname *tcnp;
1978 };
1979*/
1980/*
1981 * Rename a cnode.
1982 *
1983 * The VFS layer guarantees that source and destination will
1984 * either both be directories, or both not be directories.
1985 *
1986 * When the target is a directory, hfs_rename must ensure
1987 * that it is empty.
1988 *
1989 * The rename system call is responsible for freeing
1990 * the pathname buffers (ie no need to call VOP_ABORTOP).
1991 */
1992
1993static int
1994hfs_rename(ap)
1995 struct vop_rename_args /* {
1996 struct vnode *a_fdvp;
1997 struct vnode *a_fvp;
1998 struct componentname *a_fcnp;
1999 struct vnode *a_tdvp;
2000 struct vnode *a_tvp;
2001 struct componentname *a_tcnp;
2002 } */ *ap;
2003{
2004 struct vnode *tvp = ap->a_tvp;
2005 struct vnode *tdvp = ap->a_tdvp;
2006 struct vnode *fvp = ap->a_fvp;
2007 struct vnode *fdvp = ap->a_fdvp;
2008 struct componentname *tcnp = ap->a_tcnp;
2009 struct componentname *fcnp = ap->a_fcnp;
2010 struct proc *p = fcnp->cn_proc;
2011 struct cnode *fcp = NULL;
2012 struct cnode *fdcp = NULL;
2013 struct cnode *tdcp = VTOC(tdvp);
2014 struct cat_desc from_desc;
2015 struct cat_desc to_desc;
2016 struct cat_desc out_desc;
2017 struct hfsmount *hfsmp;
2018 struct timeval tv;
2019 int fdvp_locked, fvp_locked, tdvp_locked;
2020 int tvp_deleted;
2021 int started_tr = 0, grabbed_lock = 0;
2022 int error = 0;
2023
2024 hfsmp = VTOHFS(tdvp);
2025
2026 /* Establish our vnode lock state. */
2027 tdvp_locked = 1;
2028 fdvp_locked = 0;
2029 fvp_locked = 0;
2030 tvp_deleted = 0;
2031
2032 /*
2033 * When fvp matches tvp they must be case variants
2034 * or hard links.
2035 *
2036 * For the hardlink case there can be an extra ref on fvp.
2037 */
2038 if (fvp == tvp) {
2039 if (VOP_ISLOCKED(fvp) &&
2040 (VTOC(fvp)->c_lock.lk_lockholder == p->p_pid) &&
2041 (VTOC(fvp)->c_lock.lk_lockthread == current_thread())) {
2042 fvp_locked = 1;
2043 vrele(fvp); /* drop the extra ref */
2044 }
2045 tvp = NULL;
2046 /*
2047 * If this a hard link and its not a case
2048 * variant then keep tvp around for removal.
2049 */
2050 if ((VTOC(fvp)->c_flag & C_HARDLINK) &&
2051 ((fdvp != tdvp) ||
2052 (hfs_namecmp(fcnp->cn_nameptr, fcnp->cn_namelen,
2053 tcnp->cn_nameptr, tcnp->cn_namelen) != 0))) {
2054 tvp = fvp;
2055 }
2056 }
2057
2058 /*
2059 * Check for cross-device rename.
2060 */
2061 if ((fvp->v_mount != tdvp->v_mount) ||
2062 (tvp && (fvp->v_mount != tvp->v_mount))) {
2063 error = EXDEV;
2064 goto out;
2065 }
2066
2067 /*
2068 * Make sure "from" vnode and its parent are changeable.
2069 */
2070 if ((VTOC(fvp)->c_flags & (IMMUTABLE | APPEND)) ||
2071 (VTOC(fdvp)->c_flags & APPEND)) {
2072 error = EPERM;
2073 goto out;
2074 }
2075
2076 /*
2077 * Be sure we are not renaming ".", "..", or an alias of ".".
2078 */
2079 if ((fvp->v_type == VDIR) &&
2080 (((fcnp->cn_namelen == 1) && (fcnp->cn_nameptr[0] == '.')) ||
2081 (fdvp == fvp) ||
2082 (fcnp->cn_flags&ISDOTDOT))) {
2083 error = EINVAL;
2084 goto out;
2085 }
2086
2087 /*
2088 * If the destination parent directory is "sticky", then the
2089 * user must own the parent directory, or the destination of
2090 * the rename, otherwise the destination may not be changed
2091 * (except by root). This implements append-only directories.
2092 *
2093 * Note that checks for immutable, write access, and a non-empty
2094 * target are done by the call to VOP_REMOVE.
2095 */
2096 if (tvp && (tdcp->c_mode & S_ISTXT) &&
2097 (tcnp->cn_cred->cr_uid != 0) &&
2098 (tcnp->cn_cred->cr_uid != tdcp->c_uid) &&
2099 (hfs_owner_rights(hfsmp, VTOC(tvp)->c_uid, tcnp->cn_cred, p, false)) ) {
2100 error = EPERM;
2101 goto out;
2102 }
2103
2104 /*
2105 * All done with preflighting.
2106 *
2107 * We now break the call into two transactions:
2108 * 1 - Remove the destionation (if any) using VOP_REMOVE,
2109 * which in itself is a complete transaction.
2110 *
2111 * 2 - Rename source to destination.
2112 *
2113 * Since all the preflighting is done, we assume that a
2114 * rename failure is unlikely once part 1 is complete.
2115 * Breaking rename into two transactions buys us a much
2116 * simpler implementation with respect to the locking
2117 * protocol. There are only 3 vnodes to worry about
2118 * locking in the correct order (instead of 4).
2119 */
2120
2121 /*
2122 * Part 1 - If the destination exists then it needs to be removed.
2123 */
2124 if (tvp) {
2125 /*
2126 * VOP_REMOVE will vput tdvp so we better bump its
2127 * ref count and relockit, always set tvp to NULL
2128 * afterwards to indicate that we're done with it.
2129 */
2130 VREF(tdvp);
2131
2132 if (tvp == fvp) {
2133 if (fvp_locked) {
2134 VREF(fvp);
2135 } else {
2136 error = vget(fvp, LK_EXCLUSIVE | LK_RETRY, p);
2137 if (error)
2138 goto out;
2139 fvp_locked = 1;
2140 }
2141 } else {
2142 cache_purge(tvp);
2143 }
2144
2145 /* Clear SAVENAME to keep VOP_REMOVE from smashing tcnp. */
2146 tcnp->cn_flags &= ~SAVENAME;
2147
2148 if (tvp->v_type == VDIR)
2149 error = VOP_RMDIR(tdvp, tvp, tcnp);
2150 else
2151 error = VOP_REMOVE(tdvp, tvp, tcnp);
2152
2153 /* Get lock states back in sync. */
2154 tdvp_locked = 0;
2155 if (tvp == fvp)
2156 fvp_locked = 0;
2157 tvp = NULL; /* all done with tvp */
2158 tvp_deleted = 1;
2159
2160 if (error)
2161 goto out; /* couldn't remove destination! */
2162 }
2163 /*
2164 * All done with tvp.
2165 *
2166 * For POSIX compliance, if tvp was removed the only
2167 * error we can return from this point on is EIO.
2168 */
2169
2170 /*
2171 * Part 2 - rename source to destination
2172 */
2173
2174 /*
2175 * Lock the vnodes before starting a journal transaction.
2176 */
2177 if (fdvp != tdvp) {
2178 /*
2179 * fvp is a child and must be locked last.
2180 */
2181 if (fvp_locked) {
2182 VOP_UNLOCK(fvp, 0, p);
2183 fvp_locked = 0;
2184 }
2185 /*
2186 * If fdvp is the parent of tdvp then it needs to be locked first.
2187 */
2188 if ((VTOC(fdvp)->c_cnid == VTOC(tdvp)->c_parentcnid)) {
2189 if (tdvp_locked) {
2190 VOP_UNLOCK(tdvp, 0, p);
2191 tdvp_locked = 0;
2192 }
2193 if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2194 goto out;
2195 fdvp_locked = 1;
2196 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2197 goto out;
2198 tdvp_locked = 1;
2199
2200 } else /* Lock tdvp then fdvp */ {
2201 if (!tdvp_locked) {
2202 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2203 goto out;
2204 tdvp_locked = 1;
2205 }
2206 if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2207 goto out;
2208 fdvp_locked = 1;
2209 }
2210 } else if (!tdvp_locked) {
2211 /*
2212 * fvp is a child and must be locked last.
2213 */
2214 if (fvp_locked) {
2215 VOP_UNLOCK(fvp, 0, p);
2216 fvp_locked = 0;
2217 }
2218 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2219 goto out;
2220 tdvp_locked = 1;
2221 }
2222
2223 /* Now its safe to lock fvp */
2224 if (!fvp_locked) {
2225 if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p))
2226 goto out;
2227 fvp_locked = 1;
2228 }
2229
2230 fdcp = VTOC(fdvp);
2231 fcp = VTOC(fvp);
2232
2233 /*
2234 * When a file moves out of "Cleanup At Startup"
2235 * we can drop its NODUMP status.
2236 */
2237 if ((fcp->c_flags & UF_NODUMP) &&
2238 (fvp->v_type == VREG) &&
2239 (fdvp != tdvp) &&
2240 (fdcp->c_desc.cd_nameptr != NULL) &&
2241 (strcmp(fdcp->c_desc.cd_nameptr, "Cleanup At Startup") == 0)) {
2242 fcp->c_flags &= ~UF_NODUMP;
2243 fcp->c_flag |= C_CHANGE;
2244 tv = time;
2245 (void) VOP_UPDATE(fvp, &tv, &tv, 0);
2246 }
2247
2248 hfs_global_shared_lock_acquire(hfsmp);
2249 grabbed_lock = 1;
2250 if (hfsmp->jnl) {
2251 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2252 goto out;
2253 }
2254 started_tr = 1;
2255 }
2256
2257 cache_purge(fvp);
2258
2259 bzero(&from_desc, sizeof(from_desc));
2260 from_desc.cd_nameptr = fcnp->cn_nameptr;
2261 from_desc.cd_namelen = fcnp->cn_namelen;
2262 from_desc.cd_parentcnid = fdcp->c_cnid;
2263 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2264 from_desc.cd_cnid = fcp->c_cnid;
2265
2266 bzero(&to_desc, sizeof(to_desc));
2267 to_desc.cd_nameptr = tcnp->cn_nameptr;
2268 to_desc.cd_namelen = tcnp->cn_namelen;
2269 to_desc.cd_parentcnid = tdcp->c_cnid;
2270 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2271 to_desc.cd_cnid = fcp->c_cnid;
2272
2273 /* Lock catalog b-tree */
2274 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
2275 if (error)
2276 goto out;
2277
2278 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
2279
2280 /* Unlock catalog b-tree */
2281 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2282 if (error)
2283 goto out;
2284
2285 /* Update cnode's catalog descriptor */
2286 replace_desc(fcp, &out_desc);
2287
2288 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_RMDIR : VOL_RMFILE,
2289 (fdcp->c_cnid == kHFSRootFolderID));
2290 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_MKDIR : VOL_MKFILE,
2291 (tdcp->c_cnid == kHFSRootFolderID));
2292
2293 VOP_UNLOCK(fvp, 0, p);
2294 fcp = NULL;
2295 fvp_locked = 0;
2296 /* All done with fvp. */
2297
2298 /* Update both parent directories. */
2299 tv = time;
2300 if (fdvp != tdvp) {
2301 tdcp->c_nlink++;
2302 tdcp->c_entries++;
2303 if (fdcp->c_nlink > 0)
2304 fdcp->c_nlink--;
2305 if (fdcp->c_entries > 0)
2306 fdcp->c_entries--;
2307 fdcp->c_flag |= C_CHANGE | C_UPDATE;
2308 (void) VOP_UPDATE(fdvp, &tv, &tv, 0);
2309 }
2310 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2311 tdcp->c_flag |= C_CHANGE | C_UPDATE;
2312 (void) VOP_UPDATE(tdvp, &tv, &tv, 0);
2313
2314out:
2315 if (started_tr) {
2316 journal_end_transaction(hfsmp->jnl);
2317 }
2318 if (grabbed_lock) {
2319 hfs_global_shared_lock_release(hfsmp);
2320 }
2321
2322 if (fvp_locked) {
2323 VOP_UNLOCK(fvp, 0, p);
2324 }
2325 if (fdvp_locked) {
2326 VOP_UNLOCK(fdvp, 0, p);
2327 }
2328 if (tdvp_locked) {
2329 VOP_UNLOCK(tdvp, 0, p);
2330 }
2331 if (tvp && (tvp != fvp)) {
2332 if (tvp != tdvp)
2333 VOP_UNLOCK(tvp, 0, p);
2334 vrele(tvp);
2335 }
2336
2337 vrele(fvp);
2338 vrele(fdvp);
2339 vrele(tdvp);
2340
2341 /* After tvp is removed the only acceptable error is EIO */
2342 if ((error == ENOSPC) && tvp_deleted)
2343 error = EIO;
2344
2345 return (error);
2346}
2347
2348
2349
2350/*
2351 * Mkdir system call
2352#% mkdir dvp L U U
2353#% mkdir vpp - L -
2354#
2355 vop_mkdir {
2356 IN WILLRELE struct vnode *dvp;
2357 OUT struct vnode **vpp;
2358 IN struct componentname *cnp;
2359 IN struct vattr *vap;
2360
2361 We are responsible for freeing the namei buffer,
2362 it is done in hfs_makenode()
2363*/
2364
2365static int
2366hfs_mkdir(ap)
2367 struct vop_mkdir_args /* {
2368 struct vnode *a_dvp;
2369 struct vnode **a_vpp;
2370 struct componentname *a_cnp;
2371 struct vattr *a_vap;
2372 } */ *ap;
2373{
2374 struct vattr *vap = ap->a_vap;
2375
2376 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
2377 ap->a_dvp, ap->a_vpp, ap->a_cnp));
2378}
2379
2380
2381/*
2382 * symlink -- make a symbolic link
2383#% symlink dvp L U U
2384#% symlink vpp - U -
2385#
2386# XXX - note that the return vnode has already been VRELE'ed
2387# by the filesystem layer. To use it you must use vget,
2388# possibly with a further namei.
2389#
2390 vop_symlink {
2391 IN WILLRELE struct vnode *dvp;
2392 OUT WILLRELE struct vnode **vpp;
2393 IN struct componentname *cnp;
2394 IN struct vattr *vap;
2395 IN char *target;
2396
2397 We are responsible for freeing the namei buffer,
2398 it is done in hfs_makenode().
2399
2400*/
2401
2402static int
2403hfs_symlink(ap)
2404 struct vop_symlink_args /* {
2405 struct vnode *a_dvp;
2406 struct vnode **a_vpp;
2407 struct componentname *a_cnp;
2408 struct vattr *a_vap;
2409 char *a_target;
2410 } */ *ap;
2411{
2412 register struct vnode *vp, **vpp = ap->a_vpp;
2413 struct hfsmount *hfsmp;
2414 struct filefork *fp;
2415 int len, error;
2416 struct buf *bp = NULL;
2417
2418 /* HFS standard disks don't support symbolic links */
2419 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
2420 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2421 vput(ap->a_dvp);
2422 return (EOPNOTSUPP);
2423 }
2424
2425 /* Check for empty target name */
2426 if (ap->a_target[0] == 0) {
2427 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2428 vput(ap->a_dvp);
2429 return (EINVAL);
2430 }
2431
2432
2433 hfsmp = VTOHFS(ap->a_dvp);
2434
2435 /* Create the vnode */
2436 if ((error = hfs_makenode(S_IFLNK | ap->a_vap->va_mode,
2437 ap->a_dvp, vpp, ap->a_cnp))) {
2438 return (error);
2439 }
2440
2441 vp = *vpp;
2442 len = strlen(ap->a_target);
2443 fp = VTOF(vp);
2444 fp->ff_clumpsize = VTOVCB(vp)->blockSize;
2445
2446#if QUOTA
2447 (void)hfs_getinoquota(VTOC(vp));
2448#endif /* QUOTA */
2449
2450 // XXXdbg
2451 hfs_global_shared_lock_acquire(hfsmp);
2452 if (hfsmp->jnl) {
2453 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2454 hfs_global_shared_lock_release(hfsmp);
2455 vput(vp);
2456 return error;
2457 }
2458 }
2459
2460 /* Allocate space for the link */
2461 error = VOP_TRUNCATE(vp, len, IO_NOZEROFILL,
2462 ap->a_cnp->cn_cred, ap->a_cnp->cn_proc);
2463 if (error)
2464 goto out; /* XXX need to remove link */
2465
2466 /* Write the link to disk */
2467 bp = getblk(vp, 0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size),
2468 0, 0, BLK_META);
2469 if (hfsmp->jnl) {
2470 journal_modify_block_start(hfsmp->jnl, bp);
2471 }
2472 bzero(bp->b_data, bp->b_bufsize);
2473 bcopy(ap->a_target, bp->b_data, len);
2474 if (hfsmp->jnl) {
2475 journal_modify_block_end(hfsmp->jnl, bp);
2476 } else {
2477 bawrite(bp);
2478 }
2479out:
2480 if (hfsmp->jnl) {
2481 journal_end_transaction(hfsmp->jnl);
2482 }
2483 hfs_global_shared_lock_release(hfsmp);
2484 vput(vp);
2485 return (error);
2486}
2487
2488
2489/*
2490 * Dummy dirents to simulate the "." and ".." entries of the directory
2491 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2492 * the size of these entries is the smallest needed to represent them
2493 * (only 12 byte each).
2494 */
2495static hfsdotentry rootdots[2] = {
2496 {
2497 1, /* d_fileno */
2498 sizeof(struct hfsdotentry), /* d_reclen */
2499 DT_DIR, /* d_type */
2500 1, /* d_namlen */
2501 "." /* d_name */
2502 },
2503 {
2504 1, /* d_fileno */
2505 sizeof(struct hfsdotentry), /* d_reclen */
2506 DT_DIR, /* d_type */
2507 2, /* d_namlen */
2508 ".." /* d_name */
2509 }
2510};
2511
2512/* 4.3 Note:
2513* There is some confusion as to what the semantics of uio_offset are.
2514* In ufs, it represents the actual byte offset within the directory
2515* "file." HFS, however, just uses it as an entry counter - essentially
2516* assuming that it has no meaning except to the hfs_readdir function.
2517* This approach would be more efficient here, but some callers may
2518* assume the uio_offset acts like a byte offset. NFS in fact
2519* monkeys around with the offset field a lot between readdir calls.
2520*
2521* The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2522* fields is a mess as well. The libc function readdir() returns
2523* NULL (indicating the end of a directory) when either
2524* the getdirentries() syscall (which calls this and returns
2525* the size of the buffer passed in less the value of uiop->uio_resid)
2526* returns 0, or a direct record with a d_reclen of zero.
2527* nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2528* of the directory by testing uiop->uio_resid == 0. The solution
2529* is to pad the size of the last struct direct in a given
2530* block to fill the block if we are not at the end of the directory.
2531*/
2532
2533
2534/*
2535 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2536 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2537 * "Nbytes must be greater than or equal to the block size associated with the file,
2538 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2539 * room for the . and .. entries that have to added manually.
2540 */
2541
2542/*
2543#% readdir vp L L L
2544#
2545vop_readdir {
2546 IN struct vnode *vp;
2547 INOUT struct uio *uio;
2548 IN struct ucred *cred;
2549 INOUT int *eofflag;
2550 OUT int *ncookies;
2551 INOUT u_long **cookies;
2552 */
2553static int
2554hfs_readdir(ap)
2555 struct vop_readdir_args /* {
2556 struct vnode *vp;
2557 struct uio *uio;
2558 struct ucred *cred;
2559 int *eofflag;
2560 int *ncookies;
2561 u_long **cookies;
2562 } */ *ap;
2563{
2564 register struct uio *uio = ap->a_uio;
2565 struct cnode *cp = VTOC(ap->a_vp);
2566 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
2567 struct proc *p = current_proc();
2568 off_t off = uio->uio_offset;
2569 int retval = 0;
2570 int eofflag = 0;
2571 void *user_start = NULL;
2572 int user_len;
2573
2574 /* We assume it's all one big buffer... */
2575 if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE)
2576 return EINVAL;
2577
2578 // XXXdbg
2579 // We have to lock the user's buffer here so that we won't
2580 // fault on it after we've acquired a shared lock on the
2581 // catalog file. The issue is that you can get a 3-way
2582 // deadlock if someone else starts a transaction and then
2583 // tries to lock the catalog file but can't because we're
2584 // here and we can't service our page fault because VM is
2585 // blocked trying to start a transaction as a result of
2586 // trying to free up pages for our page fault. It's messy
2587 // but it does happen on dual-procesors that are paging
2588 // heavily (see radar 3082639 for more info). By locking
2589 // the buffer up-front we prevent ourselves from faulting
2590 // while holding the shared catalog file lock.
2591 //
2592 // Fortunately this and hfs_search() are the only two places
2593 // currently (10/30/02) that can fault on user data with a
2594 // shared lock on the catalog file.
2595 //
2596 if (hfsmp->jnl && uio->uio_segflg == UIO_USERSPACE) {
2597 user_start = uio->uio_iov->iov_base;
2598 user_len = uio->uio_iov->iov_len;
2599
2600 if ((retval = vslock(user_start, user_len)) != 0) {
2601 return retval;
2602 }
2603 }
2604
2605
2606 /* Create the entries for . and .. */
2607 if (uio->uio_offset < sizeof(rootdots)) {
2608 caddr_t dep;
2609 size_t dotsize;
2610
2611 rootdots[0].d_fileno = cp->c_cnid;
2612 rootdots[1].d_fileno = cp->c_parentcnid;
2613
2614 if (uio->uio_offset == 0) {
2615 dep = (caddr_t) &rootdots[0];
2616 dotsize = 2* sizeof(struct hfsdotentry);
2617 } else if (uio->uio_offset == sizeof(struct hfsdotentry)) {
2618 dep = (caddr_t) &rootdots[1];
2619 dotsize = sizeof(struct hfsdotentry);
2620 } else {
2621 retval = EINVAL;
2622 goto Exit;
2623 }
2624
2625 retval = uiomove(dep, dotsize, uio);
2626 if (retval != 0)
2627 goto Exit;
2628 }
2629
2630 /* If there are no children then we're done */
2631 if (cp->c_entries == 0) {
2632 eofflag = 1;
2633 retval = 0;
2634 goto Exit;
2635 }
2636
2637 /* Lock catalog b-tree */
2638 retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
2639 if (retval) goto Exit;
2640
2641 retval = cat_getdirentries(hfsmp, &cp->c_desc, uio, &eofflag);
2642
2643 /* Unlock catalog b-tree */
2644 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2645
2646 if (retval != E_NONE) {
2647 goto Exit;
2648 }
2649
2650 /* were we already past eof ? */
2651 if (uio->uio_offset == off) {
2652 retval = E_NONE;
2653 goto Exit;
2654 }
2655
2656 cp->c_flag |= C_ACCESS;
2657 /* Bake any cookies */
2658 if (!retval && ap->a_ncookies != NULL) {
2659 struct dirent* dpStart;
2660 struct dirent* dpEnd;
2661 struct dirent* dp;
2662 int ncookies;
2663 u_long *cookies;
2664 u_long *cookiep;
2665
2666 /*
2667 * Only the NFS server uses cookies, and it loads the
2668 * directory block into system space, so we can just look at
2669 * it directly.
2670 */
2671 if (uio->uio_segflg != UIO_SYSSPACE)
2672 panic("hfs_readdir: unexpected uio from NFS server");
2673 dpStart = (struct dirent *)(uio->uio_iov->iov_base - (uio->uio_offset - off));
2674 dpEnd = (struct dirent *) uio->uio_iov->iov_base;
2675 for (dp = dpStart, ncookies = 0;
2676 dp < dpEnd && dp->d_reclen != 0;
2677 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen))
2678 ncookies++;
2679 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK);
2680 for (dp = dpStart, cookiep = cookies;
2681 dp < dpEnd;
2682 dp = (struct dirent *)((caddr_t) dp + dp->d_reclen)) {
2683 off += dp->d_reclen;
2684 *cookiep++ = (u_long) off;
2685 }
2686 *ap->a_ncookies = ncookies;
2687 *ap->a_cookies = cookies;
2688 }
2689
2690Exit:;
2691 if (hfsmp->jnl && user_start) {
2692 vsunlock(user_start, user_len, TRUE);
2693 }
2694
2695 if (ap->a_eofflag)
2696 *ap->a_eofflag = eofflag;
2697
2698 return (retval);
2699}
2700
2701
2702/*
2703 * Return target name of a symbolic link
2704#% readlink vp L L L
2705#
2706 vop_readlink {
2707 IN struct vnode *vp;
2708 INOUT struct uio *uio;
2709 IN struct ucred *cred;
2710 */
2711
2712static int
2713hfs_readlink(ap)
2714 struct vop_readlink_args /* {
2715 struct vnode *a_vp;
2716 struct uio *a_uio;
2717 struct ucred *a_cred;
2718 } */ *ap;
2719{
2720 int retval;
2721 struct vnode *vp = ap->a_vp;
2722 struct cnode *cp;
2723 struct filefork *fp;
2724
2725 if (vp->v_type != VLNK)
2726 return (EINVAL);
2727
2728 cp = VTOC(vp);
2729 fp = VTOF(vp);
2730
2731 /* Zero length sym links are not allowed */
2732 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
2733 VTOVCB(vp)->vcbFlags |= kHFS_DamagedVolume;
2734 return (EINVAL);
2735 }
2736
2737 /* Cache the path so we don't waste buffer cache resources */
2738 if (fp->ff_symlinkptr == NULL) {
2739 struct buf *bp = NULL;
2740
2741 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
2742 retval = meta_bread(vp, 0,
2743 roundup((int)fp->ff_size,
2744 VTOHFS(vp)->hfs_phys_block_size),
2745 ap->a_cred, &bp);
2746 if (retval) {
2747 if (bp)
2748 brelse(bp);
2749 if (fp->ff_symlinkptr) {
2750 FREE(fp->ff_symlinkptr, M_TEMP);
2751 fp->ff_symlinkptr = NULL;
2752 }
2753 return (retval);
2754 }
2755 bcopy(bp->b_data, fp->ff_symlinkptr, (size_t)fp->ff_size);
2756 if (bp) {
2757 if (VTOHFS(vp)->jnl && (bp->b_flags & B_LOCKED) == 0) {
2758 bp->b_flags |= B_INVAL; /* data no longer needed */
2759 }
2760 brelse(bp);
2761 }
2762 }
2763 retval = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
2764
2765 return (retval);
2766}
2767
2768
2769/*
2770 * hfs abort op, called after namei() when a CREATE/DELETE isn't actually
2771 * done. If a buffer has been saved in anticipation of a CREATE, delete it.
2772#% abortop dvp = = =
2773#
2774 vop_abortop {
2775 IN struct vnode *dvp;
2776 IN struct componentname *cnp;
2777
2778 */
2779
2780/* ARGSUSED */
2781
2782static int
2783hfs_abortop(ap)
2784 struct vop_abortop_args /* {
2785 struct vnode *a_dvp;
2786 struct componentname *a_cnp;
2787 } */ *ap;
2788{
2789 if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) {
2790 FREE_ZONE(ap->a_cnp->cn_pnbuf, ap->a_cnp->cn_pnlen, M_NAMEI);
2791 ap->a_cnp->cn_flags &= ~HASBUF;
2792 }
2793
2794 return (0);
2795}
2796
2797
2798/*
2799 * Lock an cnode. If its already locked, set the WANT bit and sleep.
2800#% lock vp U L U
2801#
2802 vop_lock {
2803 IN struct vnode *vp;
2804 IN int flags;
2805 IN struct proc *p;
2806 */
2807
2808static int
2809hfs_lock(ap)
2810 struct vop_lock_args /* {
2811 struct vnode *a_vp;
2812 int a_flags;
2813 struct proc *a_p;
2814 } */ *ap;
2815{
2816 struct vnode *vp = ap->a_vp;
2817 struct cnode *cp = VTOC(vp);
2818
2819 if (cp == NULL)
2820 panic("hfs_lock: cnode in vnode is null\n");
2821
2822 return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
2823}
2824
2825/*
2826 * Unlock an cnode.
2827#% unlock vp L U L
2828#
2829 vop_unlock {
2830 IN struct vnode *vp;
2831 IN int flags;
2832 IN struct proc *p;
2833
2834 */
2835static int
2836hfs_unlock(ap)
2837 struct vop_unlock_args /* {
2838 struct vnode *a_vp;
2839 int a_flags;
2840 struct proc *a_p;
2841 } */ *ap;
2842{
2843 struct vnode *vp = ap->a_vp;
2844 struct cnode *cp = VTOC(vp);
2845
2846 if (cp == NULL)
2847 panic("hfs_unlock: cnode in vnode is null\n");
2848
2849 return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE,
2850 &vp->v_interlock, ap->a_p));
2851}
2852
2853
2854/*
2855 * Print out the contents of a cnode.
2856#% print vp = = =
2857#
2858 vop_print {
2859 IN struct vnode *vp;
2860 */
2861static int
2862hfs_print(ap)
2863 struct vop_print_args /* {
2864 struct vnode *a_vp;
2865 } */ *ap;
2866{
2867 struct vnode * vp = ap->a_vp;
2868 struct cnode *cp = VTOC(vp);
2869
2870 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp->c_cnid,
2871 major(cp->c_dev), minor(cp->c_dev));
2872#if FIFO
2873 if (vp->v_type == VFIFO)
2874 fifo_printinfo(vp);
2875#endif /* FIFO */
2876 lockmgr_printinfo(&cp->c_lock);
2877 printf("\n");
2878 return (0);
2879}
2880
2881
2882/*
2883 * Check for a locked cnode.
2884#% islocked vp = = =
2885#
2886 vop_islocked {
2887 IN struct vnode *vp;
2888
2889 */
2890static int
2891hfs_islocked(ap)
2892 struct vop_islocked_args /* {
2893 struct vnode *a_vp;
2894 } */ *ap;
2895{
2896 return (lockstatus(&VTOC(ap->a_vp)->c_lock));
2897}
2898
2899/*
2900
2901#% pathconf vp L L L
2902#
2903 vop_pathconf {
2904 IN struct vnode *vp;
2905 IN int name;
2906 OUT register_t *retval;
2907
2908 */
2909static int
2910hfs_pathconf(ap)
2911 struct vop_pathconf_args /* {
2912 struct vnode *a_vp;
2913 int a_name;
2914 int *a_retval;
2915 } */ *ap;
2916{
2917 int retval = 0;
2918
2919 switch (ap->a_name) {
2920 case _PC_LINK_MAX:
2921 if (VTOVCB(ap->a_vp)->vcbSigWord == kHFSPlusSigWord)
2922 *ap->a_retval = HFS_LINK_MAX;
2923 else
2924 *ap->a_retval = 1;
2925 break;
2926 case _PC_NAME_MAX:
2927 *ap->a_retval = kHFSPlusMaxFileNameBytes; /* max # of characters x max utf8 representation */
2928 break;
2929 case _PC_PATH_MAX:
2930 *ap->a_retval = PATH_MAX; /* 1024 */
2931 break;
2932 case _PC_CHOWN_RESTRICTED:
2933 *ap->a_retval = 1;
2934 break;
2935 case _PC_NO_TRUNC:
2936 *ap->a_retval = 0;
2937 break;
2938 case _PC_NAME_CHARS_MAX:
2939 *ap->a_retval = kHFSPlusMaxFileNameChars;
2940 break;
2941 case _PC_CASE_SENSITIVE:
2942 *ap->a_retval = 0;
2943 break;
2944 case _PC_CASE_PRESERVING:
2945 *ap->a_retval = 1;
2946 break;
2947 default:
2948 retval = EINVAL;
2949 }
2950
2951 return (retval);
2952}
2953
2954
2955/*
2956 * Advisory record locking support
2957#% advlock vp U U U
2958#
2959 vop_advlock {
2960 IN struct vnode *vp;
2961 IN caddr_t id;
2962 IN int op;
2963 IN struct flock *fl;
2964 IN int flags;
2965
2966 */
2967static int
2968hfs_advlock(ap)
2969 struct vop_advlock_args /* {
2970 struct vnode *a_vp;
2971 caddr_t a_id;
2972 int a_op;
2973 struct flock *a_fl;
2974 int a_flags;
2975 } */ *ap;
2976{
2977 struct vnode *vp = ap->a_vp;
2978 struct flock *fl = ap->a_fl;
2979 struct hfslockf *lock;
2980 struct filefork *fork;
2981 off_t start, end;
2982 int retval;
2983
2984 /* Only regular files can have locks */
2985 if (vp->v_type != VREG)
2986 return (EISDIR);
2987
2988 fork = VTOF(ap->a_vp);
2989 /*
2990 * Avoid the common case of unlocking when cnode has no locks.
2991 */
2992 if (fork->ff_lockf == (struct hfslockf *)0) {
2993 if (ap->a_op != F_SETLK) {
2994 fl->l_type = F_UNLCK;
2995 return (0);
2996 }
2997 }
2998 /*
2999 * Convert the flock structure into a start and end.
3000 */
3001 start = 0;
3002 switch (fl->l_whence) {
3003 case SEEK_SET:
3004 case SEEK_CUR:
3005 /*
3006 * Caller is responsible for adding any necessary offset
3007 * when SEEK_CUR is used.
3008 */
3009 start = fl->l_start;
3010 break;
3011 case SEEK_END:
3012 start = fork->ff_size + fl->l_start;
3013 break;
3014 default:
3015 return (EINVAL);
3016 }
3017
3018 if (start < 0)
3019 return (EINVAL);
3020 if (fl->l_len == 0)
3021 end = -1;
3022 else
3023 end = start + fl->l_len - 1;
3024
3025 /*
3026 * Create the hfslockf structure
3027 */
3028 MALLOC(lock, struct hfslockf *, sizeof *lock, M_LOCKF, M_WAITOK);
3029 lock->lf_start = start;
3030 lock->lf_end = end;
3031 lock->lf_id = ap->a_id;
3032 lock->lf_fork = fork;
3033 lock->lf_type = fl->l_type;
3034 lock->lf_next = (struct hfslockf *)0;
3035 TAILQ_INIT(&lock->lf_blkhd);
3036 lock->lf_flags = ap->a_flags;
3037 /*
3038 * Do the requested operation.
3039 */
3040 switch(ap->a_op) {
3041 case F_SETLK:
3042 retval = hfs_setlock(lock);
3043 break;
3044 case F_UNLCK:
3045 retval = hfs_clearlock(lock);
3046 FREE(lock, M_LOCKF);
3047 break;
3048 case F_GETLK:
3049 retval = hfs_getlock(lock, fl);
3050 FREE(lock, M_LOCKF);
3051 break;
3052 default:
3053 retval = EINVAL;
3054 _FREE(lock, M_LOCKF);
3055 break;
3056 }
3057
3058 return (retval);
3059}
3060
3061
3062
3063/*
3064 * Update the access, modified, and node change times as specified
3065 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3066 * C_MODIFIED flag is used to specify that the node needs to be
3067 * updated but that the times have already been set. The access and
3068 * modified times are input parameters but the node change time is
3069 * always taken from the current time. If waitfor is set, then wait
3070 * for the disk write of the node to complete.
3071 */
3072/*
3073#% update vp L L L
3074 IN struct vnode *vp;
3075 IN struct timeval *access;
3076 IN struct timeval *modify;
3077 IN int waitfor;
3078*/
3079static int
3080hfs_update(ap)
3081 struct vop_update_args /* {
3082 struct vnode *a_vp;
3083 struct timeval *a_access;
3084 struct timeval *a_modify;
3085 int a_waitfor;
3086 } */ *ap;
3087{
3088 struct vnode *vp = ap->a_vp;
3089 struct cnode *cp = VTOC(ap->a_vp);
3090 struct proc *p;
3091 struct cat_fork *dataforkp = NULL;
3092 struct cat_fork *rsrcforkp = NULL;
3093 struct cat_fork datafork;
3094 int updateflag;
3095 struct hfsmount *hfsmp;
3096 int error;
3097
3098 hfsmp = VTOHFS(vp);
3099
3100 /* XXX do we really want to clear the sytem cnode flags here???? */
3101 if ((vp->v_flag & VSYSTEM) ||
3102 (VTOVFS(vp)->mnt_flag & MNT_RDONLY) ||
3103 (cp->c_mode == 0)) {
3104 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3105 return (0);
3106 }
3107
3108 updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3109
3110 /* Nothing to update. */
3111 if (updateflag == 0) {
3112 return (0);
3113 }
3114 /* HFS standard doesn't have access times. */
3115 if ((updateflag == C_ACCESS) && (VTOVCB(vp)->vcbSigWord == kHFSSigWord)) {
3116 return (0);
3117 }
3118 if (updateflag & C_ACCESS) {
3119 /*
3120 * If only the access time is changing then defer
3121 * updating it on-disk util later (in hfs_inactive).
3122 * If it was recently updated then skip the update.
3123 */
3124 if (updateflag == C_ACCESS) {
3125 cp->c_flag &= ~C_ACCESS;
3126
3127 /* Its going to disk or its sufficiently newer... */
3128 if ((cp->c_flag & C_ATIMEMOD) ||
3129 (ap->a_access->tv_sec > (cp->c_atime + ATIME_ACCURACY))) {
3130 cp->c_atime = ap->a_access->tv_sec;
3131 cp->c_flag |= C_ATIMEMOD;
3132 }
3133 return (0);
3134 } else {
3135 cp->c_atime = ap->a_access->tv_sec;
3136 }
3137 }
3138 if (updateflag & C_UPDATE) {
3139 cp->c_mtime = ap->a_modify->tv_sec;
3140 cp->c_mtime_nsec = ap->a_modify->tv_usec * 1000;
3141 }
3142 if (updateflag & C_CHANGE) {
3143 cp->c_ctime = time.tv_sec;
3144 /*
3145 * HFS dates that WE set must be adjusted for DST
3146 */
3147 if ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3148 cp->c_ctime += 3600;
3149 cp->c_mtime = cp->c_ctime;
3150 }
3151 }
3152
3153 if (cp->c_datafork)
3154 dataforkp = &cp->c_datafork->ff_data;
3155 if (cp->c_rsrcfork)
3156 rsrcforkp = &cp->c_rsrcfork->ff_data;
3157
3158 p = current_proc();
3159
3160 /*
3161 * For delayed allocations updates are
3162 * postponed until an fsync or the file
3163 * gets written to disk.
3164 *
3165 * Deleted files can defer meta data updates until inactive.
3166 */
3167 if (ISSET(cp->c_flag, C_DELETED) ||
3168 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3169 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks)) {
3170 if (updateflag & (C_CHANGE | C_UPDATE))
3171 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3172 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3173 cp->c_flag |= C_MODIFIED;
3174
3175 return (0);
3176 }
3177
3178
3179 // XXXdbg
3180 hfs_global_shared_lock_acquire(hfsmp);
3181 if (hfsmp->jnl) {
3182 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3183 hfs_global_shared_lock_release(hfsmp);
3184 return error;
3185 }
3186 }
3187
3188
3189 /*
3190 * For files with invalid ranges (holes) the on-disk
3191 * field representing the size of the file (cf_size)
3192 * must be no larger than the start of the first hole.
3193 */
3194 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3195 bcopy(dataforkp, &datafork, sizeof(datafork));
3196 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3197 dataforkp = &datafork;
3198 }
3199
3200 /*
3201 * Lock the Catalog b-tree file.
3202 * A shared lock is sufficient since an update doesn't change
3203 * the tree and the lock on vp protects the cnode.
3204 */
3205 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3206 if (error) {
3207 if (hfsmp->jnl) {
3208 journal_end_transaction(hfsmp->jnl);
3209 }
3210 hfs_global_shared_lock_release(hfsmp);
3211 return (error);
3212 }
3213
3214 /* XXX - waitfor is not enforced */
3215 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3216
3217 /* Unlock the Catalog b-tree file. */
3218 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3219
3220 if (updateflag & (C_CHANGE | C_UPDATE))
3221 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3222
3223 // XXXdbg
3224 if (hfsmp->jnl) {
3225 journal_end_transaction(hfsmp->jnl);
3226 }
3227 hfs_global_shared_lock_release(hfsmp);
3228
3229 /* After the updates are finished, clear the flags */
3230 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_ATIMEMOD);
3231
3232 return (error);
3233}
3234
3235/*
3236 * Allocate a new node
3237 *
3238 * Upon leaving, namei buffer must be freed.
3239 *
3240 */
3241static int
3242hfs_makenode(mode, dvp, vpp, cnp)
3243 int mode;
3244 struct vnode *dvp;
3245 struct vnode **vpp;
3246 struct componentname *cnp;
3247{
3248 struct cnode *cp;
3249 struct cnode *dcp;
3250 struct vnode *tvp;
3251 struct hfsmount *hfsmp;
3252 struct timeval tv;
3253 struct proc *p;
3254 struct cat_desc in_desc, out_desc;
3255 struct cat_attr attr;
3256 int error, started_tr = 0, grabbed_lock = 0;
3257 enum vtype vnodetype;
3258
3259 p = cnp->cn_proc;
3260 dcp = VTOC(dvp);
3261 hfsmp = VTOHFS(dvp);
3262 *vpp = NULL;
3263 tvp = NULL;
3264 bzero(&out_desc, sizeof(out_desc));
3265
3266 if ((mode & S_IFMT) == 0)
3267 mode |= S_IFREG;
3268 vnodetype = IFTOVT(mode);
3269
3270 /* Check if unmount in progress */
3271 if (VTOVFS(dvp)->mnt_kern_flag & MNTK_UNMOUNT) {
3272 error = EPERM;
3273 goto exit;
3274 }
3275 /* Check if were out of usable disk space. */
3276 if ((suser(cnp->cn_cred, NULL) != 0) && (hfs_freeblks(hfsmp, 1) <= 0)) {
3277 error = ENOSPC;
3278 goto exit;
3279 }
3280
3281 /* Setup the default attributes */
3282 bzero(&attr, sizeof(attr));
3283 attr.ca_mode = mode;
3284 attr.ca_nlink = vnodetype == VDIR ? 2 : 1;
3285 attr.ca_mtime = time.tv_sec;
3286 attr.ca_mtime_nsec = time.tv_usec * 1000;
3287 if ((VTOVCB(dvp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3288 attr.ca_mtime += 3600; /* Same as what hfs_update does */
3289 }
3290 attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime;
3291 if (VTOVFS(dvp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
3292 attr.ca_uid = hfsmp->hfs_uid;
3293 attr.ca_gid = hfsmp->hfs_gid;
3294 } else {
3295 if (vnodetype == VLNK)
3296 attr.ca_uid = dcp->c_uid;
3297 else
3298 attr.ca_uid = cnp->cn_cred->cr_uid;
3299 attr.ca_gid = dcp->c_gid;
3300 }
3301 /*
3302 * Don't tag as a special file (BLK or CHR) until *after*
3303 * hfs_getnewvnode is called. This insures that any
3304 * alias checking is defered until hfs_mknod completes.
3305 */
3306 if (vnodetype == VBLK || vnodetype == VCHR)
3307 attr.ca_mode = (attr.ca_mode & ~S_IFMT) | S_IFREG;
3308
3309 /* Tag symlinks with a type and creator. */
3310 if (vnodetype == VLNK) {
3311 struct FndrFileInfo *fip;
3312
3313 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3314 fip->fdType = SWAP_BE32(kSymLinkFileType);
3315 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3316 }
3317 if ((attr.ca_mode & S_ISGID) &&
3318 !groupmember(dcp->c_gid, cnp->cn_cred) &&
3319 suser(cnp->cn_cred, NULL)) {
3320 attr.ca_mode &= ~S_ISGID;
3321 }
3322 if (cnp->cn_flags & ISWHITEOUT)
3323 attr.ca_flags |= UF_OPAQUE;
3324
3325 /* Setup the descriptor */
3326 bzero(&in_desc, sizeof(in_desc));
3327 in_desc.cd_nameptr = cnp->cn_nameptr;
3328 in_desc.cd_namelen = cnp->cn_namelen;
3329 in_desc.cd_parentcnid = dcp->c_cnid;
3330 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3331
3332 // XXXdbg
3333 hfs_global_shared_lock_acquire(hfsmp);
3334 grabbed_lock = 1;
3335 if (hfsmp->jnl) {
3336 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3337 goto exit;
3338 }
3339 started_tr = 1;
3340 }
3341
3342 /* Lock catalog b-tree */
3343 error = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p);
3344 if (error)
3345 goto exit;
3346
3347 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3348
3349 /* Unlock catalog b-tree */
3350 (void) hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_RELEASE, p);
3351 if (error)
3352 goto exit;
3353
3354 /* Update the parent directory */
3355 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3356 dcp->c_nlink++;
3357 dcp->c_entries++;
3358 dcp->c_flag |= C_CHANGE | C_UPDATE;
3359 tv = time;
3360 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
3361
3362 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3363 (dcp->c_cnid == kHFSRootFolderID));
3364
3365 // XXXdbg
3366 // have to end the transaction here before we call hfs_getnewvnode()
3367 // because that can cause us to try and reclaim a vnode on a different
3368 // file system which could cause us to start a transaction which can
3369 // deadlock with someone on that other file system (since we could be
3370 // holding two transaction locks as well as various vnodes and we did
3371 // not obtain the locks on them in the proper order).
3372 //
3373 // NOTE: this means that if the quota check fails or we have to update
3374 // the change time on a block-special device that those changes
3375 // will happen as part of independent transactions.
3376 //
3377 if (started_tr) {
3378 journal_end_transaction(hfsmp->jnl);
3379 started_tr = 0;
3380 }
3381 if (grabbed_lock) {
3382 hfs_global_shared_lock_release(hfsmp);
3383 grabbed_lock = 0;
3384 }
3385
3386 /* Create a vnode for the object just created: */
3387 error = hfs_getnewvnode(hfsmp, NULL, &out_desc, 0, &attr, NULL, &tvp);
3388 if (error)
3389 goto exit;
3390
3391
3392#if QUOTA
3393 cp = VTOC(tvp);
3394 /*
3395 * We call hfs_chkiq with FORCE flag so that if we
3396 * fall through to the rmdir we actually have
3397 * accounted for the inode
3398 */
3399 if ((error = hfs_getinoquota(cp)) ||
3400 (error = hfs_chkiq(cp, 1, cnp->cn_cred, FORCE))) {
3401 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) {
3402 FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
3403 cnp->cn_flags &= ~HASBUF;
3404 }
3405 if (tvp->v_type == VDIR)
3406 VOP_RMDIR(dvp,tvp, cnp);
3407 else
3408 VOP_REMOVE(dvp,tvp, cnp);
3409
3410 return (error);
3411 }
3412#endif /* QUOTA */
3413
3414 /*
3415 * restore vtype and mode for VBLK and VCHR
3416 */
3417 if (vnodetype == VBLK || vnodetype == VCHR) {
3418 struct cnode *cp;
3419
3420 cp = VTOC(tvp);
3421 cp->c_mode = mode;
3422 tvp->v_type = IFTOVT(mode);
3423 cp->c_flag |= C_CHANGE;
3424 tv = time;
3425 if ((error = VOP_UPDATE(tvp, &tv, &tv, 1))) {
3426 vput(tvp);
3427 goto exit;
3428 }
3429 }
3430
3431 *vpp = tvp;
3432exit:
3433 cat_releasedesc(&out_desc);
3434
3435 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
3436 FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI);
3437
3438 /*
3439 * Check if a file is located in the "Cleanup At Startup"
3440 * directory. If it is then tag it as NODUMP so that we
3441 * can be lazy about zero filling data holes.
3442 */
3443 if ((error == 0) && (vnodetype == VREG) &&
3444 (dcp->c_desc.cd_nameptr != NULL) &&
3445 (strcmp(dcp->c_desc.cd_nameptr, "Cleanup At Startup") == 0)) {
3446 struct vnode *ddvp;
3447 cnid_t parid;
3448
3449 parid = dcp->c_parentcnid;
3450 vput(dvp);
3451 dvp = NULL;
3452
3453 /*
3454 * The parent of "Cleanup At Startup" should
3455 * have the ASCII name of the userid.
3456 */
3457 if (VFS_VGET(HFSTOVFS(hfsmp), &parid, &ddvp) == 0) {
3458 if (VTOC(ddvp)->c_desc.cd_nameptr &&
3459 (cp->c_uid == strtoul(VTOC(ddvp)->c_desc.cd_nameptr, 0, 0))) {
3460 cp->c_flags |= UF_NODUMP;
3461 cp->c_flag |= C_CHANGE;
3462 }
3463 vput(ddvp);
3464 }
3465 }
3466
3467 if (dvp)
3468 vput(dvp);
3469
3470 // XXXdbg
3471 if (started_tr) {
3472 journal_end_transaction(hfsmp->jnl);
3473 started_tr = 0;
3474 }
3475 if (grabbed_lock) {
3476 hfs_global_shared_lock_release(hfsmp);
3477 grabbed_lock = 0;
3478 }
3479
3480 return (error);
3481}
3482
3483
3484static int
3485hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, struct proc *p)
3486{
3487 struct vnode *rvp;
3488 struct cnode *cp = VTOC(vp);
3489 int error;
3490
3491 if ((rvp = cp->c_rsrc_vp)) {
3492 /* Use exising vnode */
3493 error = vget(rvp, 0, p);
3494 if (error) {
3495 char * name = VTOC(vp)->c_desc.cd_nameptr;
3496
3497 if (name)
3498 printf("hfs_vgetrsrc: couldn't get"
3499 " resource fork for %s\n", name);
3500 return (error);
3501 }
3502 } else {
3503 struct cat_fork rsrcfork;
3504
3505 /* Lock catalog b-tree */
3506 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3507 if (error)
3508 return (error);
3509
3510 /* Get resource fork data */
3511 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3512 (struct cat_attr *)0, &rsrcfork);
3513
3514 /* Unlock the Catalog */
3515 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3516 if (error)
3517 return (error);
3518
3519 error = hfs_getnewvnode(hfsmp, cp, &cp->c_desc, 1, &cp->c_attr,
3520 &rsrcfork, &rvp);
3521 if (error)
3522 return (error);
3523 }
3524
3525 *rvpp = rvp;
3526 return (0);
3527}
3528
3529
3530/*
3531 * Wrapper for special device reads
3532 */
3533static int
3534hfsspec_read(ap)
3535 struct vop_read_args /* {
3536 struct vnode *a_vp;
3537 struct uio *a_uio;
3538 int a_ioflag;
3539 struct ucred *a_cred;
3540 } */ *ap;
3541{
3542 /*
3543 * Set access flag.
3544 */
3545 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3546 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap));
3547}
3548
3549/*
3550 * Wrapper for special device writes
3551 */
3552static int
3553hfsspec_write(ap)
3554 struct vop_write_args /* {
3555 struct vnode *a_vp;
3556 struct uio *a_uio;
3557 int a_ioflag;
3558 struct ucred *a_cred;
3559 } */ *ap;
3560{
3561 /*
3562 * Set update and change flags.
3563 */
3564 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3565 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap));
3566}
3567
3568/*
3569 * Wrapper for special device close
3570 *
3571 * Update the times on the cnode then do device close.
3572 */
3573static int
3574hfsspec_close(ap)
3575 struct vop_close_args /* {
3576 struct vnode *a_vp;
3577 int a_fflag;
3578 struct ucred *a_cred;
3579 struct proc *a_p;
3580 } */ *ap;
3581{
3582 struct vnode *vp = ap->a_vp;
3583 struct cnode *cp = VTOC(vp);
3584
3585 simple_lock(&vp->v_interlock);
3586 if (ap->a_vp->v_usecount > 1)
3587 CTIMES(cp, &time, &time);
3588 simple_unlock(&vp->v_interlock);
3589 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
3590}
3591
3592#if FIFO
3593/*
3594 * Wrapper for fifo reads
3595 */
3596static int
3597hfsfifo_read(ap)
3598 struct vop_read_args /* {
3599 struct vnode *a_vp;
3600 struct uio *a_uio;
3601 int a_ioflag;
3602 struct ucred *a_cred;
3603 } */ *ap;
3604{
3605 extern int (**fifo_vnodeop_p)(void *);
3606
3607 /*
3608 * Set access flag.
3609 */
3610 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3611 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap));
3612}
3613
3614/*
3615 * Wrapper for fifo writes
3616 */
3617static int
3618hfsfifo_write(ap)
3619 struct vop_write_args /* {
3620 struct vnode *a_vp;
3621 struct uio *a_uio;
3622 int a_ioflag;
3623 struct ucred *a_cred;
3624 } */ *ap;
3625{
3626 extern int (**fifo_vnodeop_p)(void *);
3627
3628 /*
3629 * Set update and change flags.
3630 */
3631 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3632 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap));
3633}
3634
3635/*
3636 * Wrapper for fifo close
3637 *
3638 * Update the times on the cnode then do device close.
3639 */
3640static int
3641hfsfifo_close(ap)
3642 struct vop_close_args /* {
3643 struct vnode *a_vp;
3644 int a_fflag;
3645 struct ucred *a_cred;
3646 struct proc *a_p;
3647 } */ *ap;
3648{
3649 extern int (**fifo_vnodeop_p)(void *);
3650 struct vnode *vp = ap->a_vp;
3651 struct cnode *cp = VTOC(vp);
3652
3653 simple_lock(&vp->v_interlock);
3654 if (ap->a_vp->v_usecount > 1)
3655 CTIMES(cp, &time, &time);
3656 simple_unlock(&vp->v_interlock);
3657 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
3658}
3659#endif /* FIFO */
3660
3661
3662/*****************************************************************************
3663*
3664* VOP Tables
3665*
3666*****************************************************************************/
3667int hfs_cache_lookup(); /* in hfs_lookup.c */
3668int hfs_lookup(); /* in hfs_lookup.c */
3669int hfs_read(); /* in hfs_readwrite.c */
3670int hfs_write(); /* in hfs_readwrite.c */
3671int hfs_ioctl(); /* in hfs_readwrite.c */
3672int hfs_select(); /* in hfs_readwrite.c */
3673int hfs_bmap(); /* in hfs_readwrite.c */
3674int hfs_strategy(); /* in hfs_readwrite.c */
3675int hfs_truncate(); /* in hfs_readwrite.c */
3676int hfs_allocate(); /* in hfs_readwrite.c */
3677int hfs_pagein(); /* in hfs_readwrite.c */
3678int hfs_pageout(); /* in hfs_readwrite.c */
3679int hfs_search(); /* in hfs_search.c */
3680int hfs_bwrite(); /* in hfs_readwrite.c */
3681int hfs_link(); /* in hfs_link.c */
3682int hfs_blktooff(); /* in hfs_readwrite.c */
3683int hfs_offtoblk(); /* in hfs_readwrite.c */
3684int hfs_cmap(); /* in hfs_readwrite.c */
3685int hfs_getattrlist(); /* in hfs_attrlist.c */
3686int hfs_setattrlist(); /* in hfs_attrlist.c */
3687int hfs_readdirattr(); /* in hfs_attrlist.c */
3688int hfs_inactive(); /* in hfs_cnode.c */
3689int hfs_reclaim(); /* in hfs_cnode.c */
3690
3691int (**hfs_vnodeop_p)(void *);
3692
3693#define VOPFUNC int (*)(void *)
3694
3695struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
3696 { &vop_default_desc, (VOPFUNC)vn_default_error },
3697 { &vop_lookup_desc, (VOPFUNC)hfs_cache_lookup }, /* lookup */
3698 { &vop_create_desc, (VOPFUNC)hfs_create }, /* create */
3699 { &vop_mknod_desc, (VOPFUNC)hfs_mknod }, /* mknod */
3700 { &vop_open_desc, (VOPFUNC)hfs_open }, /* open */
3701 { &vop_close_desc, (VOPFUNC)hfs_close }, /* close */
3702 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3703 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3704 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3705 { &vop_read_desc, (VOPFUNC)hfs_read }, /* read */
3706 { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */
3707 { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */
3708 { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */
3709 { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */
3710 { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
3711 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3712 { &vop_seek_desc, (VOPFUNC)nop_seek }, /* seek */
3713 { &vop_remove_desc, (VOPFUNC)hfs_remove }, /* remove */
3714 { &vop_link_desc, (VOPFUNC)hfs_link }, /* link */
3715 { &vop_rename_desc, (VOPFUNC)hfs_rename }, /* rename */
3716 { &vop_mkdir_desc, (VOPFUNC)hfs_mkdir }, /* mkdir */
3717 { &vop_rmdir_desc, (VOPFUNC)hfs_rmdir }, /* rmdir */
3718 { &vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex }, /* mkcomplex */
3719 { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, /* getattrlist */
3720 { &vop_setattrlist_desc, (VOPFUNC)hfs_setattrlist }, /* setattrlist */
3721 { &vop_symlink_desc, (VOPFUNC)hfs_symlink }, /* symlink */
3722 { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */
3723 { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */
3724 { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */
3725 { &vop_abortop_desc, (VOPFUNC)hfs_abortop }, /* abortop */
3726 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3727 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3728 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3729 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3730 { &vop_bmap_desc, (VOPFUNC)hfs_bmap }, /* bmap */
3731 { &vop_strategy_desc, (VOPFUNC)hfs_strategy }, /* strategy */
3732 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3733 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3734 { &vop_pathconf_desc, (VOPFUNC)hfs_pathconf }, /* pathconf */
3735 { &vop_advlock_desc, (VOPFUNC)hfs_advlock }, /* advlock */
3736 { &vop_reallocblks_desc, (VOPFUNC)err_reallocblks }, /* reallocblks */
3737 { &vop_truncate_desc, (VOPFUNC)hfs_truncate }, /* truncate */
3738 { &vop_allocate_desc, (VOPFUNC)hfs_allocate }, /* allocate */
3739 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3740 { &vop_searchfs_desc, (VOPFUNC)hfs_search }, /* search fs */
3741 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, /* bwrite */
3742 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* pagein */
3743 { &vop_pageout_desc,(VOPFUNC) hfs_pageout }, /* pageout */
3744 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3745 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3746 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3747 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
3748 { NULL, (VOPFUNC)NULL }
3749};
3750
3751struct vnodeopv_desc hfs_vnodeop_opv_desc =
3752{ &hfs_vnodeop_p, hfs_vnodeop_entries };
3753
3754int (**hfs_specop_p)(void *);
3755struct vnodeopv_entry_desc hfs_specop_entries[] = {
3756 { &vop_default_desc, (VOPFUNC)vn_default_error },
3757 { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
3758 { &vop_create_desc, (VOPFUNC)spec_create }, /* create */
3759 { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
3760 { &vop_open_desc, (VOPFUNC)spec_open }, /* open */
3761 { &vop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
3762 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3763 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3764 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3765 { &vop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
3766 { &vop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
3767 { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */
3768 { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
3769 { &vop_select_desc, (VOPFUNC)spec_select }, /* select */
3770 { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
3771 { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
3772 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3773 { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */
3774 { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
3775 { &vop_link_desc, (VOPFUNC)spec_link }, /* link */
3776 { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
3777 { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
3778 { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
3779 { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
3780 { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
3781 { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
3782 { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */
3783 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3784 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3785 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3786 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3787 { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */
3788 { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
3789 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3790 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3791 { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
3792 { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */
3793 { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */
3794 { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */
3795 { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */
3796 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
3797 { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */
3798 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3799 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
3800 { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */
3801 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
3802 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
3803 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3804 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3805 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3806 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
3807};
3808struct vnodeopv_desc hfs_specop_opv_desc =
3809 { &hfs_specop_p, hfs_specop_entries };
3810
3811#if FIFO
3812int (**hfs_fifoop_p)(void *);
3813struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
3814 { &vop_default_desc, (VOPFUNC)vn_default_error },
3815 { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
3816 { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */
3817 { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
3818 { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */
3819 { &vop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
3820 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
3821 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
3822 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
3823 { &vop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
3824 { &vop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
3825 { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */
3826 { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
3827 { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */
3828 { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
3829 { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
3830 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
3831 { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */
3832 { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
3833 { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */
3834 { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
3835 { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
3836 { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
3837 { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
3838 { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
3839 { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
3840 { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */
3841 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
3842 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
3843 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
3844 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
3845 { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */
3846 { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
3847 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
3848 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
3849 { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
3850 { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */
3851 { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */
3852 { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */
3853 { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */
3854 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
3855 { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */
3856 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
3857 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
3858 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
3859 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
3860 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
3861 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
3862 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
3863 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
3864 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
3865};
3866struct vnodeopv_desc hfs_fifoop_opv_desc =
3867 { &hfs_fifoop_p, hfs_fifoop_entries };
3868#endif /* FIFO */
3869
3870
3871