]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
2b1552c4348006e147a06366c0ec61da7e1b2caa
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <sys/systm.h>
24 #include <sys/kernel.h>
25 #include <sys/file.h>
26 #include <sys/dirent.h>
27 #include <sys/stat.h>
28 #include <sys/buf.h>
29 #include <sys/mount.h>
30 #include <sys/vnode.h>
31 #include <sys/malloc.h>
32 #include <sys/namei.h>
33 #include <sys/ubc.h>
34 #include <sys/quota.h>
35 #include <sys/time.h>
36 #include <sys/disk.h>
37
38 #include <miscfs/specfs/specdev.h>
39 #include <miscfs/fifofs/fifo.h>
40 #include <vfs/vfs_support.h>
41 #include <machine/spl.h>
42
43 #include <sys/kdebug.h>
44
45 #include "hfs.h"
46 #include "hfs_catalog.h"
47 #include "hfs_cnode.h"
48 #include "hfs_lockf.h"
49 #include "hfs_dbg.h"
50 #include "hfs_mount.h"
51 #include "hfs_quota.h"
52 #include "hfs_endian.h"
53
54 #include "hfscommon/headers/BTreesInternal.h"
55 #include "hfscommon/headers/FileMgrInternal.h"
56
57 #define MAKE_DELETED_NAME(NAME,FID) \
58 (void) sprintf((NAME), "%s%d", HFS_DELETE_PREFIX, (FID))
59
60 #define KNDETACH_VNLOCKED 0x00000001
61
62 #define CARBON_TEMP_DIR_NAME "Cleanup At Startup"
63
64
65 /* Global vfs data structures for hfs */
66
67
68 extern unsigned long strtoul(const char *, char **, int);
69
70 extern int groupmember(gid_t gid, struct ucred *cred);
71
72 static int hfs_makenode(int mode, struct vnode *dvp, struct vnode **vpp,
73 struct componentname *cnp);
74
75 static int hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp,
76 struct vnode **rvpp, struct proc *p);
77
78 static int hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p);
79
80 static int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
81 int);
82
83 static int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
84 int);
85
86 /* Options for hfs_removedir and hfs_removefile */
87 #define HFSRM_PARENT_LOCKED 0x01
88 #define HFSRM_SKIP_RESERVE 0x02
89 #define HFSRM_SAVE_NAME 0x04
90 #define HFSRM_RENAMEOPTS 0x07
91
92
93 int hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags);
94
95 int hfs_chflags(struct vnode *vp, u_long flags, struct ucred *cred,
96 struct proc *p);
97 int hfs_chmod(struct vnode *vp, int mode, struct ucred *cred,
98 struct proc *p);
99 int hfs_chown(struct vnode *vp, uid_t uid, gid_t gid,
100 struct ucred *cred, struct proc *p);
101
102 /*****************************************************************************
103 *
104 * Common Operations on vnodes
105 *
106 *****************************************************************************/
107
108 /*
109 * Create a regular file
110 #% create dvp L U U
111 #% create vpp - L -
112 #
113 vop_create {
114 IN WILLRELE struct vnode *dvp;
115 OUT struct vnode **vpp;
116 IN struct componentname *cnp;
117 IN struct vattr *vap;
118
119 We are responsible for freeing the namei buffer,
120 it is done in hfs_makenode()
121 */
122
123 static int
124 hfs_create(ap)
125 struct vop_create_args /* {
126 struct vnode *a_dvp;
127 struct vnode **a_vpp;
128 struct componentname *a_cnp;
129 struct vattr *a_vap;
130 } */ *ap;
131 {
132 struct vattr *vap = ap->a_vap;
133
134 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
135 ap->a_dvp, ap->a_vpp, ap->a_cnp));
136 }
137
138
139 /*
140 * Mknod vnode call
141
142 #% mknod dvp L U U
143 #% mknod vpp - X -
144 #
145 vop_mknod {
146 IN WILLRELE struct vnode *dvp;
147 OUT WILLRELE struct vnode **vpp;
148 IN struct componentname *cnp;
149 IN struct vattr *vap;
150 */
151 /* ARGSUSED */
152
153 static int
154 hfs_mknod(ap)
155 struct vop_mknod_args /* {
156 struct vnode *a_dvp;
157 struct vnode **a_vpp;
158 struct componentname *a_cnp;
159 struct vattr *a_vap;
160 } */ *ap;
161 {
162 struct vattr *vap = ap->a_vap;
163 struct vnode **vpp = ap->a_vpp;
164 struct cnode *cp;
165 int error;
166
167 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
168 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
169 vput(ap->a_dvp);
170 return (EOPNOTSUPP);
171 }
172
173 /* Create the vnode */
174 error = hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
175 ap->a_dvp, vpp, ap->a_cnp);
176 if (error)
177 return (error);
178 cp = VTOC(*vpp);
179 cp->c_flag |= C_ACCESS | C_CHANGE | C_UPDATE;
180 if ((vap->va_rdev != VNOVAL) &&
181 (vap->va_type == VBLK || vap->va_type == VCHR))
182 cp->c_rdev = vap->va_rdev;
183 /*
184 * Remove cnode so that it will be reloaded by lookup and
185 * checked to see if it is an alias of an existing vnode.
186 * Note: unlike UFS, we don't bash v_type here.
187 */
188 vput(*vpp);
189 vgone(*vpp);
190 *vpp = 0;
191 return (0);
192 }
193
194
195 /*
196 * Open called.
197 #% open vp L L L
198 #
199 vop_open {
200 IN struct vnode *vp;
201 IN int mode;
202 IN struct ucred *cred;
203 IN struct proc *p;
204 */
205
206
207 static int
208 hfs_open(ap)
209 struct vop_open_args /* {
210 struct vnode *a_vp;
211 int a_mode;
212 struct ucred *a_cred;
213 struct proc *a_p;
214 } */ *ap;
215 {
216 struct vnode *vp = ap->a_vp;
217 struct filefork *fp = VTOF(vp);
218 struct timeval tv;
219
220 /*
221 * Files marked append-only must be opened for appending.
222 */
223 if ((vp->v_type != VDIR) && (VTOC(vp)->c_flags & APPEND) &&
224 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
225 return (EPERM);
226
227 if (ap->a_mode & O_EVTONLY) {
228 if (vp->v_type == VREG) {
229 ++VTOF(vp)->ff_evtonly_refs;
230 } else {
231 ++VTOC(vp)->c_evtonly_refs;
232 };
233 };
234
235 /*
236 * On the first (non-busy) open of a fragmented
237 * file attempt to de-frag it (if its less than 20MB).
238 */
239 if ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) ||
240 !UBCISVALID(vp) || ubc_isinuse(vp, 1)) {
241 return (0);
242 }
243 fp = VTOF(vp);
244 if (fp->ff_blocks &&
245 fp->ff_extents[7].blockCount != 0 &&
246 fp->ff_size <= (20 * 1024 * 1024)) {
247 /*
248 * Wait until system bootup is done (3 min).
249 */
250 microuptime(&tv);
251 if (tv.tv_sec < (60 * 3)) {
252 return (0);
253 }
254 (void) hfs_relocate(vp, VTOVCB(vp)->nextAllocation + 4096, ap->a_cred, ap->a_p);
255 }
256
257 return (0);
258 }
259
260 /*
261 * Close called.
262 *
263 * Update the times on the cnode.
264 #% close vp U U U
265 #
266 vop_close {
267 IN struct vnode *vp;
268 IN int fflag;
269 IN struct ucred *cred;
270 IN struct proc *p;
271 */
272
273
274 static int
275 hfs_close(ap)
276 struct vop_close_args /* {
277 struct vnode *a_vp;
278 int a_fflag;
279 struct ucred *a_cred;
280 struct proc *a_p;
281 } */ *ap;
282 {
283 register struct vnode *vp = ap->a_vp;
284 register struct cnode *cp = VTOC(vp);
285 register struct filefork *fp = VTOF(vp);
286 struct proc *p = ap->a_p;
287 struct timeval tv;
288 off_t leof;
289 u_long blks, blocksize;
290 int devBlockSize;
291 int error;
292
293 simple_lock(&vp->v_interlock);
294 if ((!UBCISVALID(vp) && vp->v_usecount > 1)
295 || (UBCISVALID(vp) && ubc_isinuse(vp, 1))) {
296 tv = time;
297 CTIMES(cp, &tv, &tv);
298 }
299 simple_unlock(&vp->v_interlock);
300
301 if (ap->a_fflag & O_EVTONLY) {
302 if (vp->v_type == VREG) {
303 --VTOF(vp)->ff_evtonly_refs;
304 } else {
305 --VTOC(vp)->c_evtonly_refs;
306 };
307 };
308
309 /*
310 * VOP_CLOSE can be called with vp locked (from vclean).
311 * We check for this case using VOP_ISLOCKED and bail.
312 *
313 * XXX During a force unmount we won't do the cleanup below!
314 */
315 if (vp->v_type == VDIR || VOP_ISLOCKED(vp))
316 return (0);
317
318 leof = fp->ff_size;
319
320 if ((fp->ff_blocks > 0) &&
321 !ISSET(cp->c_flag, C_DELETED) &&
322 ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) == 0)) {
323 enum vtype our_type = vp->v_type;
324 u_long our_id = vp->v_id;
325 int was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
326
327 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
328 if (error)
329 return (0);
330 /*
331 * Since we can context switch in vn_lock our vnode
332 * could get recycled (eg umount -f). Double check
333 * that its still ours.
334 */
335 if (vp->v_type != our_type || vp->v_id != our_id
336 || cp != VTOC(vp) || !UBCINFOEXISTS(vp)) {
337 VOP_UNLOCK(vp, 0, p);
338 return (0);
339 }
340
341 /*
342 * Last chance to explicitly zero out the areas
343 * that are currently marked invalid:
344 */
345 VOP_DEVBLOCKSIZE(cp->c_devvp, &devBlockSize);
346 (void) cluster_push(vp);
347 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
348 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
349 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
350 off_t start = invalid_range->rl_start;
351 off_t end = invalid_range->rl_end;
352
353 /* The range about to be written must be validated
354 * first, so that VOP_CMAP() will return the
355 * appropriate mapping for the cluster code:
356 */
357 rl_remove(start, end, &fp->ff_invalidranges);
358
359 (void) cluster_write(vp, (struct uio *) 0, leof,
360 invalid_range->rl_end + 1, invalid_range->rl_start,
361 (off_t)0, devBlockSize, IO_HEADZEROFILL | IO_NOZERODIRTY);
362
363 if (ISSET(vp->v_flag, VHASDIRTY))
364 (void) cluster_push(vp);
365
366 cp->c_flag |= C_MODIFIED;
367 }
368 cp->c_flag &= ~C_ZFWANTSYNC;
369 cp->c_zftimeout = 0;
370 blocksize = VTOVCB(vp)->blockSize;
371 blks = leof / blocksize;
372 if (((off_t)blks * (off_t)blocksize) != leof)
373 blks++;
374 /*
375 * Shrink the peof to the smallest size neccessary to contain the leof.
376 */
377 if (blks < fp->ff_blocks)
378 (void) VOP_TRUNCATE(vp, leof, IO_NDELAY, ap->a_cred, p);
379 (void) cluster_push(vp);
380
381 if (!was_nocache)
382 CLR(vp->v_flag, VNOCACHE_DATA);
383
384 /*
385 * If the VOP_TRUNCATE didn't happen to flush the vnode's
386 * information out to disk, force it to be updated now that
387 * all invalid ranges have been zero-filled and validated:
388 */
389 if (cp->c_flag & C_MODIFIED) {
390 tv = time;
391 VOP_UPDATE(vp, &tv, &tv, 0);
392 }
393 VOP_UNLOCK(vp, 0, p);
394 }
395 if ((vp->v_flag & VSYSTEM) && (vp->v_usecount == 1))
396 vgone(vp);
397 return (0);
398 }
399
400 /*
401 #% access vp L L L
402 #
403 vop_access {
404 IN struct vnode *vp;
405 IN int mode;
406 IN struct ucred *cred;
407 IN struct proc *p;
408
409 */
410
411 static int
412 hfs_access(ap)
413 struct vop_access_args /* {
414 struct vnode *a_vp;
415 int a_mode;
416 struct ucred *a_cred;
417 struct proc *a_p;
418 } */ *ap;
419 {
420 struct vnode *vp = ap->a_vp;
421 struct cnode *cp = VTOC(vp);
422 struct ucred *cred = ap->a_cred;
423 register gid_t *gp;
424 mode_t mode = ap->a_mode;
425 mode_t mask = 0;
426 int i;
427 int error;
428
429 /*
430 * Disallow write attempts on read-only file systems;
431 * unless the file is a socket, fifo, or a block or
432 * character device resident on the file system.
433 */
434 if (mode & VWRITE) {
435 switch (vp->v_type) {
436 case VDIR:
437 case VLNK:
438 case VREG:
439 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
440 return (EROFS);
441 #if QUOTA
442 if ((error = hfs_getinoquota(cp)))
443 return (error);
444 #endif /* QUOTA */
445 break;
446 }
447 /* If immutable bit set, nobody gets to write it. */
448 if (cp->c_flags & IMMUTABLE)
449 return (EPERM);
450 }
451
452
453 /* Otherwise, user id 0 always gets access. */
454 if (cred->cr_uid == 0)
455 return (0);
456
457 mask = 0;
458
459 /* Otherwise, check the owner. */
460 if ( (cp->c_uid == cred->cr_uid) || (cp->c_uid == UNKNOWNUID) ) {
461 if (mode & VEXEC)
462 mask |= S_IXUSR;
463 if (mode & VREAD)
464 mask |= S_IRUSR;
465 if (mode & VWRITE)
466 mask |= S_IWUSR;
467 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
468 }
469
470 /* Otherwise, check the groups. */
471 if (! (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)) {
472 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++)
473 if (cp->c_gid == *gp) {
474 if (mode & VEXEC)
475 mask |= S_IXGRP;
476 if (mode & VREAD)
477 mask |= S_IRGRP;
478 if (mode & VWRITE)
479 mask |= S_IWGRP;
480 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
481 }
482 }
483
484 /* Otherwise, check everyone else. */
485 if (mode & VEXEC)
486 mask |= S_IXOTH;
487 if (mode & VREAD)
488 mask |= S_IROTH;
489 if (mode & VWRITE)
490 mask |= S_IWOTH;
491 return ((cp->c_mode & mask) == mask ? 0 : EACCES);
492 }
493
494
495
496 /*
497 #% getattr vp = = =
498 #
499 vop_getattr {
500 IN struct vnode *vp;
501 IN struct vattr *vap;
502 IN struct ucred *cred;
503 IN struct proc *p;
504
505 */
506
507
508 /* ARGSUSED */
509 static int
510 hfs_getattr(ap)
511 struct vop_getattr_args /* {
512 struct vnode *a_vp;
513 struct vattr *a_vap;
514 struct ucred *a_cred;
515 struct proc *a_p;
516 } */ *ap;
517 {
518 struct vnode *vp = ap->a_vp;
519 struct cnode *cp = VTOC(vp);
520 struct vattr *vap = ap->a_vap;
521 struct timeval tv;
522
523 tv = time;
524 CTIMES(cp, &tv, &tv);
525
526 vap->va_type = vp->v_type;
527 vap->va_mode = cp->c_mode;
528 vap->va_nlink = cp->c_nlink;
529 /*
530 * [2856576] Since we are dynamically changing the owner, also
531 * effectively turn off the set-user-id and set-group-id bits,
532 * just like chmod(2) would when changing ownership. This prevents
533 * a security hole where set-user-id programs run as whoever is
534 * logged on (or root if nobody is logged in yet!)
535 */
536 if (cp->c_uid == UNKNOWNUID) {
537 vap->va_mode &= ~(S_ISUID | S_ISGID);
538 vap->va_uid = ap->a_cred->cr_uid;
539 } else {
540 vap->va_uid = cp->c_uid;
541 }
542 vap->va_gid = cp->c_gid;
543 vap->va_fsid = cp->c_dev;
544 /*
545 * Exporting file IDs from HFS Plus:
546 *
547 * For "normal" files the c_fileid is the same value as the
548 * c_cnid. But for hard link files, they are different - the
549 * c_cnid belongs to the active directory entry (ie the link)
550 * and the c_fileid is for the actual inode (ie the data file).
551 *
552 * The stat call (getattr) will always return the c_fileid
553 * and Carbon APIs, which are hardlink-ignorant, will always
554 * receive the c_cnid (from getattrlist).
555 */
556 vap->va_fileid = cp->c_fileid;
557 vap->va_atime.tv_sec = cp->c_atime;
558 vap->va_atime.tv_nsec = 0;
559 vap->va_mtime.tv_sec = cp->c_mtime;
560 vap->va_mtime.tv_nsec = cp->c_mtime_nsec;
561 vap->va_ctime.tv_sec = cp->c_ctime;
562 vap->va_ctime.tv_nsec = 0;
563 vap->va_gen = 0;
564 vap->va_flags = cp->c_flags;
565 vap->va_rdev = 0;
566 vap->va_blocksize = VTOVFS(vp)->mnt_stat.f_iosize;
567 vap->va_filerev = 0;
568 if (vp->v_type == VDIR) {
569 vap->va_size = cp->c_nlink * AVERAGE_HFSDIRENTRY_SIZE;
570 vap->va_bytes = 0;
571 } else {
572 vap->va_size = VTOF(vp)->ff_size;
573 vap->va_bytes = (u_quad_t)cp->c_blocks *
574 (u_quad_t)VTOVCB(vp)->blockSize;
575 if (vp->v_type == VBLK || vp->v_type == VCHR)
576 vap->va_rdev = cp->c_rdev;
577 }
578 return (0);
579 }
580
581 /*
582 * Set attribute vnode op. called from several syscalls
583 #% setattr vp L L L
584 #
585 vop_setattr {
586 IN struct vnode *vp;
587 IN struct vattr *vap;
588 IN struct ucred *cred;
589 IN struct proc *p;
590
591 */
592
593 static int
594 hfs_setattr(ap)
595 struct vop_setattr_args /* {
596 struct vnode *a_vp;
597 struct vattr *a_vap;
598 struct ucred *a_cred;
599 struct proc *a_p;
600 } */ *ap;
601 {
602 struct vattr *vap = ap->a_vap;
603 struct vnode *vp = ap->a_vp;
604 struct cnode *cp = VTOC(vp);
605 struct ucred *cred = ap->a_cred;
606 struct proc *p = ap->a_p;
607 struct timeval atimeval, mtimeval;
608 int error;
609
610 /*
611 * Check for unsettable attributes.
612 */
613 if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) ||
614 (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) ||
615 (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) ||
616 ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) {
617 return (EINVAL);
618 }
619
620 // XXXdbg
621 // don't allow people to set the attributes of symlinks
622 // (nfs has a bad habit of doing ths and it can cause
623 // problems for journaling).
624 //
625 if (vp->v_type == VLNK) {
626 return 0;
627 }
628
629
630
631 if (vap->va_flags != VNOVAL) {
632 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
633 return (EROFS);
634 if ((error = hfs_chflags(vp, vap->va_flags, cred, p)))
635 return (error);
636 if (vap->va_flags & (IMMUTABLE | APPEND))
637 return (0);
638 }
639
640 if (cp->c_flags & (IMMUTABLE | APPEND))
641 return (EPERM);
642
643 // XXXdbg - don't allow modification of the journal or journal_info_block
644 if (VTOHFS(vp)->jnl && cp->c_datafork) {
645 struct HFSPlusExtentDescriptor *extd;
646
647 extd = &cp->c_datafork->ff_extents[0];
648 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
649 return EPERM;
650 }
651 }
652
653 /*
654 * Go through the fields and update iff not VNOVAL.
655 */
656 if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) {
657 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
658 return (EROFS);
659 if ((error = hfs_chown(vp, vap->va_uid, vap->va_gid, cred, p)))
660 return (error);
661 }
662 if (vap->va_size != VNOVAL) {
663 /*
664 * Disallow write attempts on read-only file systems;
665 * unless the file is a socket, fifo, or a block or
666 * character device resident on the file system.
667 */
668 switch (vp->v_type) {
669 case VDIR:
670 return (EISDIR);
671 case VLNK:
672 case VREG:
673 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
674 return (EROFS);
675 break;
676 default:
677 break;
678 }
679 if ((error = VOP_TRUNCATE(vp, vap->va_size, 0, cred, p)))
680 return (error);
681 }
682 cp = VTOC(vp);
683 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
684 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
685 return (EROFS);
686 if (((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) &&
687 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
688 (error = VOP_ACCESS(vp, VWRITE, cred, p)))) {
689 return (error);
690 }
691 if (vap->va_atime.tv_sec != VNOVAL)
692 cp->c_flag |= C_ACCESS;
693 if (vap->va_mtime.tv_sec != VNOVAL) {
694 cp->c_flag |= C_CHANGE | C_UPDATE;
695 /*
696 * The utimes system call can reset the modification
697 * time but it doesn't know about HFS create times.
698 * So we need to insure that the creation time is
699 * always at least as old as the modification time.
700 */
701 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
702 (cp->c_cnid != kRootDirID) &&
703 (vap->va_mtime.tv_sec < cp->c_itime)) {
704 cp->c_itime = vap->va_mtime.tv_sec;
705 }
706 }
707 atimeval.tv_sec = vap->va_atime.tv_sec;
708 atimeval.tv_usec = 0;
709 mtimeval.tv_sec = vap->va_mtime.tv_sec;
710 mtimeval.tv_usec = 0;
711 if ((error = VOP_UPDATE(vp, &atimeval, &mtimeval, 1)))
712 return (error);
713 }
714 error = 0;
715 if (vap->va_mode != (mode_t)VNOVAL) {
716 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
717 return (EROFS);
718 error = hfs_chmod(vp, (int)vap->va_mode, cred, p);
719 }
720 HFS_KNOTE(vp, NOTE_ATTRIB);
721 return (error);
722 }
723
724
725 /*
726 * Change the mode on a file.
727 * cnode must be locked before calling.
728 */
729 __private_extern__
730 int
731 hfs_chmod(vp, mode, cred, p)
732 register struct vnode *vp;
733 register int mode;
734 register struct ucred *cred;
735 struct proc *p;
736 {
737 register struct cnode *cp = VTOC(vp);
738 int error;
739
740 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
741 return (0);
742
743 // XXXdbg - don't allow modification of the journal or journal_info_block
744 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
745 struct HFSPlusExtentDescriptor *extd;
746
747 extd = &cp->c_datafork->ff_extents[0];
748 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
749 return EPERM;
750 }
751 }
752
753 #if OVERRIDE_UNKNOWN_PERMISSIONS
754 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
755 return (0);
756 };
757 #endif
758 if ((error = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0)
759 return (error);
760 if (cred->cr_uid) {
761 if (vp->v_type != VDIR && (mode & S_ISTXT))
762 return (EFTYPE);
763 if (!groupmember(cp->c_gid, cred) && (mode & S_ISGID))
764 return (EPERM);
765 }
766 cp->c_mode &= ~ALLPERMS;
767 cp->c_mode |= (mode & ALLPERMS);
768 cp->c_flag |= C_CHANGE;
769 return (0);
770 }
771
772
773 __private_extern__
774 int
775 hfs_write_access(struct vnode *vp, struct ucred *cred, struct proc *p, Boolean considerFlags)
776 {
777 struct cnode *cp = VTOC(vp);
778 gid_t *gp;
779 int retval = 0;
780 int i;
781
782 /*
783 * Disallow write attempts on read-only file systems;
784 * unless the file is a socket, fifo, or a block or
785 * character device resident on the file system.
786 */
787 switch (vp->v_type) {
788 case VDIR:
789 case VLNK:
790 case VREG:
791 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
792 return (EROFS);
793 break;
794 default:
795 break;
796 }
797
798 /* If immutable bit set, nobody gets to write it. */
799 if (considerFlags && (cp->c_flags & IMMUTABLE))
800 return (EPERM);
801
802 /* Otherwise, user id 0 always gets access. */
803 if (cred->cr_uid == 0)
804 return (0);
805
806 /* Otherwise, check the owner. */
807 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
808 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
809
810 /* Otherwise, check the groups. */
811 for (i = 0, gp = cred->cr_groups; i < cred->cr_ngroups; i++, gp++) {
812 if (cp->c_gid == *gp)
813 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
814 }
815
816 /* Otherwise, check everyone else. */
817 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
818 }
819
820
821
822 /*
823 * Change the flags on a file or directory.
824 * cnode must be locked before calling.
825 */
826 __private_extern__
827 int
828 hfs_chflags(vp, flags, cred, p)
829 register struct vnode *vp;
830 register u_long flags;
831 register struct ucred *cred;
832 struct proc *p;
833 {
834 register struct cnode *cp = VTOC(vp);
835 int retval;
836
837 if (VTOVCB(vp)->vcbSigWord == kHFSSigWord) {
838 if ((retval = hfs_write_access(vp, cred, p, false)) != 0) {
839 return retval;
840 };
841 } else if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, true)) != 0) {
842 return retval;
843 };
844
845 if (cred->cr_uid == 0) {
846 if ((cp->c_flags & (SF_IMMUTABLE | SF_APPEND)) &&
847 securelevel > 0) {
848 return EPERM;
849 };
850 cp->c_flags = flags;
851 } else {
852 if (cp->c_flags & (SF_IMMUTABLE | SF_APPEND) ||
853 (flags & UF_SETTABLE) != flags) {
854 return EPERM;
855 };
856 cp->c_flags &= SF_SETTABLE;
857 cp->c_flags |= (flags & UF_SETTABLE);
858 }
859 cp->c_flag |= C_CHANGE;
860
861 return (0);
862 }
863
864
865 /*
866 * Perform chown operation on cnode cp;
867 * code must be locked prior to call.
868 */
869 __private_extern__
870 int
871 hfs_chown(vp, uid, gid, cred, p)
872 register struct vnode *vp;
873 uid_t uid;
874 gid_t gid;
875 struct ucred *cred;
876 struct proc *p;
877 {
878 register struct cnode *cp = VTOC(vp);
879 uid_t ouid;
880 gid_t ogid;
881 int error = 0;
882 #if QUOTA
883 register int i;
884 int64_t change;
885 #endif /* QUOTA */
886
887 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
888 return (EOPNOTSUPP);
889
890 if (VTOVFS(vp)->mnt_flag & MNT_UNKNOWNPERMISSIONS)
891 return (0);
892
893 if (uid == (uid_t)VNOVAL)
894 uid = cp->c_uid;
895 if (gid == (gid_t)VNOVAL)
896 gid = cp->c_gid;
897 /*
898 * If we don't own the file, are trying to change the owner
899 * of the file, or are not a member of the target group,
900 * the caller must be superuser or the call fails.
901 */
902 if ((cred->cr_uid != cp->c_uid || uid != cp->c_uid ||
903 (gid != cp->c_gid && !groupmember((gid_t)gid, cred))) &&
904 (error = suser(cred, &p->p_acflag)))
905 return (error);
906
907 ogid = cp->c_gid;
908 ouid = cp->c_uid;
909 #if QUOTA
910 if ((error = hfs_getinoquota(cp)))
911 return (error);
912 if (ouid == uid) {
913 dqrele(vp, cp->c_dquot[USRQUOTA]);
914 cp->c_dquot[USRQUOTA] = NODQUOT;
915 }
916 if (ogid == gid) {
917 dqrele(vp, cp->c_dquot[GRPQUOTA]);
918 cp->c_dquot[GRPQUOTA] = NODQUOT;
919 }
920
921 /*
922 * Eventually need to account for (fake) a block per directory
923 *if (vp->v_type == VDIR)
924 *change = VTOVCB(vp)->blockSize;
925 *else
926 */
927
928 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
929 (void) hfs_chkdq(cp, -change, cred, CHOWN);
930 (void) hfs_chkiq(cp, -1, cred, CHOWN);
931 for (i = 0; i < MAXQUOTAS; i++) {
932 dqrele(vp, cp->c_dquot[i]);
933 cp->c_dquot[i] = NODQUOT;
934 }
935 #endif /* QUOTA */
936 cp->c_gid = gid;
937 cp->c_uid = uid;
938 #if QUOTA
939 if ((error = hfs_getinoquota(cp)) == 0) {
940 if (ouid == uid) {
941 dqrele(vp, cp->c_dquot[USRQUOTA]);
942 cp->c_dquot[USRQUOTA] = NODQUOT;
943 }
944 if (ogid == gid) {
945 dqrele(vp, cp->c_dquot[GRPQUOTA]);
946 cp->c_dquot[GRPQUOTA] = NODQUOT;
947 }
948 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
949 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
950 goto good;
951 else
952 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
953 }
954 for (i = 0; i < MAXQUOTAS; i++) {
955 dqrele(vp, cp->c_dquot[i]);
956 cp->c_dquot[i] = NODQUOT;
957 }
958 }
959 cp->c_gid = ogid;
960 cp->c_uid = ouid;
961 if (hfs_getinoquota(cp) == 0) {
962 if (ouid == uid) {
963 dqrele(vp, cp->c_dquot[USRQUOTA]);
964 cp->c_dquot[USRQUOTA] = NODQUOT;
965 }
966 if (ogid == gid) {
967 dqrele(vp, cp->c_dquot[GRPQUOTA]);
968 cp->c_dquot[GRPQUOTA] = NODQUOT;
969 }
970 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
971 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
972 (void) hfs_getinoquota(cp);
973 }
974 return (error);
975 good:
976 if (hfs_getinoquota(cp))
977 panic("hfs_chown: lost quota");
978 #endif /* QUOTA */
979
980 if (ouid != uid || ogid != gid)
981 cp->c_flag |= C_CHANGE;
982 if (ouid != uid && cred->cr_uid != 0)
983 cp->c_mode &= ~S_ISUID;
984 if (ogid != gid && cred->cr_uid != 0)
985 cp->c_mode &= ~S_ISGID;
986 return (0);
987 }
988
989
990 /*
991 #
992 #% exchange fvp L L L
993 #% exchange tvp L L L
994 #
995 */
996 /*
997 * The hfs_exchange routine swaps the fork data in two files by
998 * exchanging some of the information in the cnode. It is used
999 * to preserve the file ID when updating an existing file, in
1000 * case the file is being tracked through its file ID. Typically
1001 * its used after creating a new file during a safe-save.
1002 */
1003
1004 static int
1005 hfs_exchange(ap)
1006 struct vop_exchange_args /* {
1007 struct vnode *a_fvp;
1008 struct vnode *a_tvp;
1009 struct ucred *a_cred;
1010 struct proc *a_p;
1011 } */ *ap;
1012 {
1013 struct vnode *from_vp = ap->a_fvp;
1014 struct vnode *to_vp = ap->a_tvp;
1015 struct cnode *from_cp = VTOC(from_vp);
1016 struct cnode *to_cp = VTOC(to_vp);
1017 struct hfsmount *hfsmp = VTOHFS(from_vp);
1018 struct cat_desc tempdesc;
1019 struct cat_attr tempattr;
1020 int error = 0, started_tr = 0, grabbed_lock = 0;
1021 cat_cookie_t cookie = {0};
1022
1023 /* The files must be on the same volume. */
1024 if (from_vp->v_mount != to_vp->v_mount)
1025 return (EXDEV);
1026
1027 /* Only normal files can be exchanged. */
1028 if ((from_vp->v_type != VREG) || (to_vp->v_type != VREG) ||
1029 (from_cp->c_flag & C_HARDLINK) || (to_cp->c_flag & C_HARDLINK) ||
1030 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp))
1031 return (EINVAL);
1032
1033 // XXXdbg - don't allow modification of the journal or journal_info_block
1034 if (hfsmp->jnl) {
1035 struct HFSPlusExtentDescriptor *extd;
1036
1037 if (from_cp->c_datafork) {
1038 extd = &from_cp->c_datafork->ff_extents[0];
1039 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1040 return EPERM;
1041 }
1042 }
1043
1044 if (to_cp->c_datafork) {
1045 extd = &to_cp->c_datafork->ff_extents[0];
1046 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1047 return EPERM;
1048 }
1049 }
1050 }
1051
1052 // XXXdbg
1053 hfs_global_shared_lock_acquire(hfsmp);
1054 grabbed_lock = 1;
1055 if (hfsmp->jnl) {
1056 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1057 goto Err_Exit;
1058 }
1059 started_tr = 1;
1060 }
1061
1062 /*
1063 * Reserve some space in the Catalog file.
1064 */
1065 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, ap->a_p))) {
1066 goto Err_Exit;
1067 }
1068
1069 /* Lock catalog b-tree */
1070 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, ap->a_p);
1071 if (error) goto Err_Exit;
1072
1073 /* The backend code always tries to delete the virtual
1074 * extent id for exchanging files so we neeed to lock
1075 * the extents b-tree.
1076 */
1077 error = hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_EXCLUSIVE, ap->a_p);
1078 if (error) {
1079 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1080 goto Err_Exit;
1081 }
1082
1083 /* Do the exchange */
1084 error = MacToVFSError(ExchangeFileIDs(HFSTOVCB(hfsmp),
1085 from_cp->c_desc.cd_nameptr, to_cp->c_desc.cd_nameptr,
1086 from_cp->c_parentcnid, to_cp->c_parentcnid,
1087 from_cp->c_hint, to_cp->c_hint));
1088
1089 (void) hfs_metafilelocking(hfsmp, kHFSExtentsFileID, LK_RELEASE, ap->a_p);
1090 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, ap->a_p);
1091
1092 if (error != E_NONE) {
1093 goto Err_Exit;
1094 }
1095
1096 /* Purge the vnodes from the name cache */
1097 if (from_vp)
1098 cache_purge(from_vp);
1099 if (to_vp)
1100 cache_purge(to_vp);
1101
1102 /* Save a copy of from attributes before swapping. */
1103 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1104 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1105
1106 /*
1107 * Swap the descriptors and all non-fork related attributes.
1108 * (except the modify date)
1109 */
1110 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1111
1112 from_cp->c_hint = 0;
1113 from_cp->c_fileid = from_cp->c_cnid;
1114 from_cp->c_itime = to_cp->c_itime;
1115 from_cp->c_btime = to_cp->c_btime;
1116 from_cp->c_atime = to_cp->c_atime;
1117 from_cp->c_ctime = to_cp->c_ctime;
1118 from_cp->c_gid = to_cp->c_gid;
1119 from_cp->c_uid = to_cp->c_uid;
1120 from_cp->c_flags = to_cp->c_flags;
1121 from_cp->c_mode = to_cp->c_mode;
1122 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1123
1124 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1125 to_cp->c_hint = 0;
1126 to_cp->c_fileid = to_cp->c_cnid;
1127 to_cp->c_itime = tempattr.ca_itime;
1128 to_cp->c_btime = tempattr.ca_btime;
1129 to_cp->c_atime = tempattr.ca_atime;
1130 to_cp->c_ctime = tempattr.ca_ctime;
1131 to_cp->c_gid = tempattr.ca_gid;
1132 to_cp->c_uid = tempattr.ca_uid;
1133 to_cp->c_flags = tempattr.ca_flags;
1134 to_cp->c_mode = tempattr.ca_mode;
1135 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1136
1137 /* Reinsert into the cnode hash under new file IDs*/
1138 hfs_chashremove(from_cp);
1139 hfs_chashremove(to_cp);
1140
1141 hfs_chashinsert(from_cp);
1142 hfs_chashinsert(to_cp);
1143
1144 /*
1145 * When a file moves out of "Cleanup At Startup"
1146 * we can drop its NODUMP status.
1147 */
1148 if ((from_cp->c_flags & UF_NODUMP) &&
1149 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1150 from_cp->c_flags &= ~UF_NODUMP;
1151 from_cp->c_flag |= C_CHANGE;
1152 }
1153 if ((to_cp->c_flags & UF_NODUMP) &&
1154 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1155 to_cp->c_flags &= ~UF_NODUMP;
1156 to_cp->c_flag |= C_CHANGE;
1157 }
1158
1159 HFS_KNOTE(from_vp, NOTE_ATTRIB);
1160 HFS_KNOTE(to_vp, NOTE_ATTRIB);
1161
1162 Err_Exit:
1163 cat_postflight(hfsmp, &cookie, ap->a_p);
1164
1165 // XXXdbg
1166 if (started_tr) {
1167 journal_end_transaction(hfsmp->jnl);
1168 }
1169 if (grabbed_lock) {
1170 hfs_global_shared_lock_release(hfsmp);
1171 }
1172
1173 return (error);
1174 }
1175
1176
1177 /*
1178
1179 #% fsync vp L L L
1180 #
1181 vop_fsync {
1182 IN struct vnode *vp;
1183 IN struct ucred *cred;
1184 IN int waitfor;
1185 IN struct proc *p;
1186
1187 */
1188 static int
1189 hfs_fsync(ap)
1190 struct vop_fsync_args /* {
1191 struct vnode *a_vp;
1192 struct ucred *a_cred;
1193 int a_waitfor;
1194 struct proc *a_p;
1195 } */ *ap;
1196 {
1197 struct vnode *vp = ap->a_vp;
1198 struct cnode *cp = VTOC(vp);
1199 struct filefork *fp = NULL;
1200 int retval = 0;
1201 register struct buf *bp;
1202 struct timeval tv;
1203 struct buf *nbp;
1204 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
1205 int s;
1206 int wait;
1207 int retry = 0;
1208
1209 wait = (ap->a_waitfor == MNT_WAIT);
1210
1211 /* HFS directories don't have any data blocks. */
1212 if (vp->v_type == VDIR)
1213 goto metasync;
1214
1215 /*
1216 * For system files flush the B-tree header and
1217 * for regular files write out any clusters
1218 */
1219 if (vp->v_flag & VSYSTEM) {
1220 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1221 // XXXdbg
1222 if (hfsmp->jnl == NULL) {
1223 BTFlushPath(VTOF(vp));
1224 }
1225 }
1226 } else if (UBCINFOEXISTS(vp))
1227 (void) cluster_push(vp);
1228
1229 /*
1230 * When MNT_WAIT is requested and the zero fill timeout
1231 * has expired then we must explicitly zero out any areas
1232 * that are currently marked invalid (holes).
1233 *
1234 * Files with NODUMP can bypass zero filling here.
1235 */
1236 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1237 ((cp->c_flags & UF_NODUMP) == 0) &&
1238 UBCINFOEXISTS(vp) && (fp = VTOF(vp)) &&
1239 cp->c_zftimeout != 0) {
1240 int devblksize;
1241 int was_nocache;
1242
1243 if (time.tv_sec < cp->c_zftimeout) {
1244 /* Remember that a force sync was requested. */
1245 cp->c_flag |= C_ZFWANTSYNC;
1246 goto loop;
1247 }
1248 VOP_DEVBLOCKSIZE(cp->c_devvp, &devblksize);
1249 was_nocache = ISSET(vp->v_flag, VNOCACHE_DATA);
1250 SET(vp->v_flag, VNOCACHE_DATA); /* Don't cache zeros */
1251
1252 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1253 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1254 off_t start = invalid_range->rl_start;
1255 off_t end = invalid_range->rl_end;
1256
1257 /* The range about to be written must be validated
1258 * first, so that VOP_CMAP() will return the
1259 * appropriate mapping for the cluster code:
1260 */
1261 rl_remove(start, end, &fp->ff_invalidranges);
1262
1263 (void) cluster_write(vp, (struct uio *) 0,
1264 fp->ff_size,
1265 invalid_range->rl_end + 1,
1266 invalid_range->rl_start,
1267 (off_t)0, devblksize,
1268 IO_HEADZEROFILL | IO_NOZERODIRTY);
1269 cp->c_flag |= C_MODIFIED;
1270 }
1271 (void) cluster_push(vp);
1272 if (!was_nocache)
1273 CLR(vp->v_flag, VNOCACHE_DATA);
1274 cp->c_flag &= ~C_ZFWANTSYNC;
1275 cp->c_zftimeout = 0;
1276 }
1277
1278 /*
1279 * Flush all dirty buffers associated with a vnode.
1280 */
1281 loop:
1282 s = splbio();
1283 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1284 nbp = bp->b_vnbufs.le_next;
1285 if ((bp->b_flags & B_BUSY))
1286 continue;
1287 if ((bp->b_flags & B_DELWRI) == 0)
1288 panic("hfs_fsync: bp 0x% not dirty (hfsmp 0x%x)", bp, hfsmp);
1289 // XXXdbg
1290 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1291 if ((bp->b_flags & B_META) == 0) {
1292 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1293 bp, hfsmp->jnl);
1294 }
1295 // if journal_active() returns >= 0 then the journal is ok and we
1296 // shouldn't do anything to this locked block (because it is part
1297 // of a transaction). otherwise we'll just go through the normal
1298 // code path and flush the buffer.
1299 if (journal_active(hfsmp->jnl) >= 0) {
1300 continue;
1301 }
1302 }
1303
1304 bremfree(bp);
1305 bp->b_flags |= B_BUSY;
1306 /* Clear B_LOCKED, should only be set on meta files */
1307 bp->b_flags &= ~B_LOCKED;
1308
1309 splx(s);
1310 /*
1311 * Wait for I/O associated with indirect blocks to complete,
1312 * since there is no way to quickly wait for them below.
1313 */
1314 if (bp->b_vp == vp || ap->a_waitfor == MNT_NOWAIT)
1315 (void) bawrite(bp);
1316 else
1317 (void) VOP_BWRITE(bp);
1318 goto loop;
1319 }
1320
1321 if (wait) {
1322 while (vp->v_numoutput) {
1323 vp->v_flag |= VBWAIT;
1324 tsleep((caddr_t)&vp->v_numoutput, PRIBIO + 1, "hfs_fsync", 0);
1325 }
1326
1327 // XXXdbg -- is checking for hfsmp->jnl == NULL the right
1328 // thing to do?
1329 if (hfsmp->jnl == NULL && vp->v_dirtyblkhd.lh_first) {
1330 /* still have some dirty buffers */
1331 if (retry++ > 10) {
1332 vprint("hfs_fsync: dirty", vp);
1333 splx(s);
1334 /*
1335 * Looks like the requests are not
1336 * getting queued to the driver.
1337 * Retrying here causes a cpu bound loop.
1338 * Yield to the other threads and hope
1339 * for the best.
1340 */
1341 (void)tsleep((caddr_t)&vp->v_numoutput,
1342 PRIBIO + 1, "hfs_fsync", hz/10);
1343 retry = 0;
1344 } else {
1345 splx(s);
1346 }
1347 /* try again */
1348 goto loop;
1349 }
1350 }
1351 splx(s);
1352
1353 metasync:
1354 tv = time;
1355 if (vp->v_flag & VSYSTEM) {
1356 if (VTOF(vp)->fcbBTCBPtr != NULL)
1357 BTSetLastSync(VTOF(vp), tv.tv_sec);
1358 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1359 } else /* User file */ {
1360 retval = VOP_UPDATE(ap->a_vp, &tv, &tv, wait);
1361
1362 /* When MNT_WAIT is requested push out any delayed meta data */
1363 if ((retval == 0) && wait && cp->c_hint &&
1364 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1365 hfs_metasync(VTOHFS(vp), cp->c_hint, ap->a_p);
1366 }
1367
1368 // make sure that we've really been called from the user
1369 // fsync() and if so push out any pending transactions
1370 // that this file might is a part of (and get them on
1371 // stable storage).
1372 if (vp->v_flag & VFULLFSYNC) {
1373 if (hfsmp->jnl) {
1374 journal_flush(hfsmp->jnl);
1375 } else {
1376 VOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NOCRED, ap->a_p);
1377 }
1378 }
1379 }
1380
1381 return (retval);
1382 }
1383
1384 /* Sync an hfs catalog b-tree node */
1385 static int
1386 hfs_metasync(struct hfsmount *hfsmp, daddr_t node, struct proc *p)
1387 {
1388 struct vnode *vp;
1389 struct buf *bp;
1390 struct buf *nbp;
1391 int s;
1392
1393 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1394
1395 // XXXdbg - don't need to do this on a journaled volume
1396 if (hfsmp->jnl) {
1397 return 0;
1398 }
1399
1400 if (hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p) != 0)
1401 return (0);
1402
1403 /*
1404 * Look for a matching node that has been delayed
1405 * but is not part of a set (B_LOCKED).
1406 */
1407 s = splbio();
1408 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1409 nbp = bp->b_vnbufs.le_next;
1410 if (bp->b_flags & B_BUSY)
1411 continue;
1412 if (bp->b_lblkno == node) {
1413 if (bp->b_flags & B_LOCKED)
1414 break;
1415
1416 bremfree(bp);
1417 bp->b_flags |= B_BUSY;
1418 splx(s);
1419 (void) VOP_BWRITE(bp);
1420 goto exit;
1421 }
1422 }
1423 splx(s);
1424 exit:
1425 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1426
1427 return (0);
1428 }
1429
1430 __private_extern__
1431 int
1432 hfs_btsync(struct vnode *vp, int sync_transaction)
1433 {
1434 struct cnode *cp = VTOC(vp);
1435 register struct buf *bp;
1436 struct timeval tv;
1437 struct buf *nbp;
1438 struct hfsmount *hfsmp = VTOHFS(vp);
1439 int s;
1440
1441 /*
1442 * Flush all dirty buffers associated with b-tree.
1443 */
1444 loop:
1445 s = splbio();
1446
1447 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
1448 nbp = bp->b_vnbufs.le_next;
1449 if ((bp->b_flags & B_BUSY))
1450 continue;
1451 if ((bp->b_flags & B_DELWRI) == 0)
1452 panic("hfs_btsync: not dirty (bp 0x%x hfsmp 0x%x)", bp, hfsmp);
1453
1454 // XXXdbg
1455 if (hfsmp->jnl && (bp->b_flags & B_LOCKED)) {
1456 if ((bp->b_flags & B_META) == 0) {
1457 panic("hfs: bp @ 0x%x is locked but not meta! jnl 0x%x\n",
1458 bp, hfsmp->jnl);
1459 }
1460 // if journal_active() returns >= 0 then the journal is ok and we
1461 // shouldn't do anything to this locked block (because it is part
1462 // of a transaction). otherwise we'll just go through the normal
1463 // code path and flush the buffer.
1464 if (journal_active(hfsmp->jnl) >= 0) {
1465 continue;
1466 }
1467 }
1468
1469 if (sync_transaction && !(bp->b_flags & B_LOCKED))
1470 continue;
1471
1472 bremfree(bp);
1473 bp->b_flags |= B_BUSY;
1474 bp->b_flags &= ~B_LOCKED;
1475
1476 splx(s);
1477
1478 (void) bawrite(bp);
1479
1480 goto loop;
1481 }
1482 splx(s);
1483
1484 tv = time;
1485 if ((vp->v_flag & VSYSTEM) && (VTOF(vp)->fcbBTCBPtr != NULL))
1486 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1487 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
1488
1489 return 0;
1490 }
1491
1492 /*
1493 * Rmdir system call.
1494 #% rmdir dvp L U U
1495 #% rmdir vp L U U
1496 #
1497 vop_rmdir {
1498 IN WILLRELE struct vnode *dvp;
1499 IN WILLRELE struct vnode *vp;
1500 IN struct componentname *cnp;
1501
1502 */
1503 static int
1504 hfs_rmdir(ap)
1505 struct vop_rmdir_args /* {
1506 struct vnode *a_dvp;
1507 struct vnode *a_vp;
1508 struct componentname *a_cnp;
1509 } */ *ap;
1510 {
1511 return (hfs_removedir(ap->a_dvp, ap->a_vp, ap->a_cnp, 0));
1512 }
1513
1514 /*
1515 * hfs_removedir
1516 */
1517 static int
1518 hfs_removedir(dvp, vp, cnp, options)
1519 struct vnode *dvp;
1520 struct vnode *vp;
1521 struct componentname *cnp;
1522 int options;
1523 {
1524 struct proc *p = cnp->cn_proc;
1525 struct cnode *cp;
1526 struct cnode *dcp;
1527 struct hfsmount * hfsmp;
1528 struct timeval tv;
1529 cat_cookie_t cookie = {0};
1530 int error = 0, started_tr = 0, grabbed_lock = 0;
1531
1532 cp = VTOC(vp);
1533 dcp = VTOC(dvp);
1534 hfsmp = VTOHFS(vp);
1535
1536 if (dcp == cp) {
1537 vrele(dvp);
1538 vput(vp);
1539 return (EINVAL); /* cannot remove "." */
1540 }
1541
1542 #if QUOTA
1543 (void)hfs_getinoquota(cp);
1544 #endif
1545 // XXXdbg
1546 hfs_global_shared_lock_acquire(hfsmp);
1547 grabbed_lock = 1;
1548 if (hfsmp->jnl) {
1549 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1550 goto out;
1551 }
1552 started_tr = 1;
1553 }
1554
1555 if (!(options & HFSRM_SKIP_RESERVE)) {
1556 /*
1557 * Reserve some space in the Catalog file.
1558 */
1559 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
1560 goto out;
1561 }
1562 }
1563
1564 /*
1565 * Verify the directory is empty (and valid).
1566 * (Rmdir ".." won't be valid since
1567 * ".." will contain a reference to
1568 * the current directory and thus be
1569 * non-empty.)
1570 */
1571 if (cp->c_entries != 0) {
1572 error = ENOTEMPTY;
1573 goto out;
1574 }
1575 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1576 error = EPERM;
1577 goto out;
1578 }
1579
1580 /* Remove the entry from the namei cache: */
1581 cache_purge(vp);
1582
1583 /* Lock catalog b-tree */
1584 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1585 if (error) goto out;
1586
1587 if (cp->c_entries > 0)
1588 panic("hfs_rmdir: attempting to delete a non-empty directory!");
1589 /* Remove entry from catalog */
1590 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1591
1592 /* Unlock catalog b-tree */
1593 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1594 if (error) goto out;
1595
1596 #if QUOTA
1597 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1598 #endif /* QUOTA */
1599
1600 /* The parent lost a child */
1601 if (dcp->c_entries > 0)
1602 dcp->c_entries--;
1603 if (dcp->c_nlink > 0)
1604 dcp->c_nlink--;
1605 dcp->c_flag |= C_CHANGE | C_UPDATE;
1606 tv = time;
1607 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
1608 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
1609
1610 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1611
1612 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1613 cp->c_flag |= C_NOEXISTS;
1614 out:
1615 if (!(options & HFSRM_PARENT_LOCKED)) {
1616 vput(dvp);
1617 }
1618 HFS_KNOTE(vp, NOTE_DELETE);
1619 vput(vp);
1620
1621 if (!(options & HFSRM_SKIP_RESERVE)) {
1622 cat_postflight(hfsmp, &cookie, p);
1623 }
1624 // XXXdbg
1625 if (started_tr) {
1626 journal_end_transaction(hfsmp->jnl);
1627 }
1628 if (grabbed_lock) {
1629 hfs_global_shared_lock_release(hfsmp);
1630 }
1631
1632 return (error);
1633 }
1634
1635 /*
1636
1637 #% remove dvp L U U
1638 #% remove vp L U U
1639 #
1640 vop_remove {
1641 IN WILLRELE struct vnode *dvp;
1642 IN WILLRELE struct vnode *vp;
1643 IN struct componentname *cnp;
1644
1645 */
1646
1647 static int
1648 hfs_remove(ap)
1649 struct vop_remove_args /* {
1650 struct vnode *a_dvp;
1651 struct vnode *a_vp;
1652 struct componentname *a_cnp;
1653 } */ *ap;
1654 {
1655 return (hfs_removefile(ap->a_dvp, ap->a_vp, ap->a_cnp, 0));
1656 }
1657
1658
1659
1660 /*
1661 * hfs_removefile
1662 *
1663 * Similar to hfs_remove except there are additional options.
1664 */
1665 static int
1666 hfs_removefile(dvp, vp, cnp, options)
1667 struct vnode *dvp;
1668 struct vnode *vp;
1669 struct componentname *cnp;
1670 int options;
1671 {
1672 struct vnode *rvp = NULL;
1673 struct cnode *cp;
1674 struct cnode *dcp;
1675 struct hfsmount *hfsmp;
1676 struct proc *p = cnp->cn_proc;
1677 int dataforkbusy = 0;
1678 int rsrcforkbusy = 0;
1679 int truncated = 0;
1680 struct timeval tv;
1681 cat_cookie_t cookie = {0};
1682 int error = 0;
1683 int started_tr = 0, grabbed_lock = 0;
1684 int refcount, isbigfile = 0;
1685
1686 /* Directories should call hfs_rmdir! */
1687 if (vp->v_type == VDIR) {
1688 error = EISDIR;
1689 goto out;
1690 }
1691
1692 cp = VTOC(vp);
1693 dcp = VTOC(dvp);
1694 hfsmp = VTOHFS(vp);
1695
1696 if (cp->c_parentcnid != dcp->c_cnid) {
1697 error = EINVAL;
1698 goto out;
1699 }
1700
1701 /* Make sure a remove is permitted */
1702 if ((cp->c_flags & (IMMUTABLE | APPEND)) ||
1703 (VTOC(dvp)->c_flags & APPEND) ||
1704 VNODE_IS_RSRC(vp)) {
1705 error = EPERM;
1706 goto out;
1707 }
1708
1709 /*
1710 * Aquire a vnode for a non-empty resource fork.
1711 * (needed for VOP_TRUNCATE)
1712 */
1713 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
1714 error = hfs_vgetrsrc(hfsmp, vp, &rvp, p);
1715 if (error)
1716 goto out;
1717 }
1718
1719 // XXXdbg - don't allow deleting the journal or journal_info_block
1720 if (hfsmp->jnl && cp->c_datafork) {
1721 struct HFSPlusExtentDescriptor *extd;
1722
1723 extd = &cp->c_datafork->ff_extents[0];
1724 if (extd->startBlock == HFSTOVCB(hfsmp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1725 error = EPERM;
1726 goto out;
1727 }
1728 }
1729
1730 /*
1731 * Check if this file is being used.
1732 *
1733 * The namei done for the remove took a reference on the
1734 * vnode (vp). And we took a ref on the resource vnode (rvp).
1735 * Hence set 1 in the tookref parameter of ubc_isinuse().
1736 */
1737 if (VTOC(vp)->c_flag & C_VPREFHELD) {
1738 refcount = 2;
1739 } else {
1740 refcount = 1;
1741 }
1742 if (UBCISVALID(vp) && ubc_isinuse(vp, refcount))
1743 dataforkbusy = 1;
1744 if (rvp && UBCISVALID(rvp) && ubc_isinuse(rvp, 1))
1745 rsrcforkbusy = 1;
1746
1747 // need this to check if we have to break the deletion
1748 // into multiple pieces
1749 isbigfile = (VTOC(vp)->c_datafork->ff_size >= HFS_BIGFILE_SIZE);
1750
1751 /*
1752 * Carbon semantics prohibit deleting busy files.
1753 * (enforced when NODELETEBUSY is requested)
1754 */
1755 if ((dataforkbusy || rsrcforkbusy) &&
1756 ((cnp->cn_flags & NODELETEBUSY) ||
1757 (hfsmp->hfs_privdir_desc.cd_cnid == 0))) {
1758 error = EBUSY;
1759 goto out;
1760 }
1761
1762 #if QUOTA
1763 (void)hfs_getinoquota(cp);
1764 #endif /* QUOTA */
1765
1766 // XXXdbg
1767 hfs_global_shared_lock_acquire(hfsmp);
1768 grabbed_lock = 1;
1769 if (hfsmp->jnl) {
1770 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
1771 goto out;
1772 }
1773 started_tr = 1;
1774 }
1775
1776 if (!(options & HFSRM_SKIP_RESERVE)) {
1777 /*
1778 * Reserve some space in the Catalog file.
1779 */
1780 if ((error = cat_preflight(hfsmp, CAT_DELETE, &cookie, p))) {
1781 goto out;
1782 }
1783 }
1784
1785 /* Remove our entry from the namei cache. */
1786 cache_purge(vp);
1787
1788 // XXXdbg - if we're journaled, kill any dirty symlink buffers
1789 if (hfsmp->jnl && vp->v_type == VLNK && vp->v_dirtyblkhd.lh_first) {
1790 struct buf *bp, *nbp;
1791
1792 recheck:
1793 for (bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) {
1794 nbp = bp->b_vnbufs.le_next;
1795
1796 if ((bp->b_flags & B_BUSY)) {
1797 // if it was busy, someone else must be dealing
1798 // with it so just move on.
1799 continue;
1800 }
1801
1802 if (!(bp->b_flags & B_META)) {
1803 panic("hfs: symlink bp @ 0x%x is not marked meta-data!\n", bp);
1804 }
1805
1806 // if it's part of the current transaction, kill it.
1807 if (bp->b_flags & B_LOCKED) {
1808 bremfree(bp);
1809 bp->b_flags |= B_BUSY;
1810 journal_kill_block(hfsmp->jnl, bp);
1811 goto recheck;
1812 }
1813 }
1814 }
1815 // XXXdbg
1816
1817 /*
1818 * Truncate any non-busy forks. Busy forks will
1819 * get trucated when their vnode goes inactive.
1820 *
1821 * (Note: hard links are truncated in VOP_INACTIVE)
1822 */
1823 if ((cp->c_flag & C_HARDLINK) == 0) {
1824 int mode = cp->c_mode;
1825
1826 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
1827 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1828 error = VOP_TRUNCATE(vp, (off_t)0, IO_NDELAY, NOCRED, p);
1829 cp->c_mode = mode;
1830 if (error)
1831 goto out;
1832 truncated = 1;
1833 }
1834 if (!rsrcforkbusy && rvp) {
1835 cp->c_mode = 0; /* Suppress VOP_UPDATES */
1836 error = VOP_TRUNCATE(rvp, (off_t)0, IO_NDELAY, NOCRED, p);
1837 cp->c_mode = mode;
1838 if (error)
1839 goto out;
1840 truncated = 1;
1841 }
1842 }
1843 /*
1844 * There are 3 remove cases to consider:
1845 * 1. File is a hardlink ==> remove the link
1846 * 2. File is busy (in use) ==> move/rename the file
1847 * 3. File is not in use ==> remove the file
1848 */
1849
1850 if (cp->c_flag & C_HARDLINK) {
1851 struct cat_desc desc;
1852
1853 if ((cnp->cn_flags & HASBUF) == 0 ||
1854 cnp->cn_nameptr[0] == '\0') {
1855 error = ENOENT; /* name missing! */
1856 goto out;
1857 }
1858
1859 /* Setup a descriptor for the link */
1860 bzero(&desc, sizeof(desc));
1861 desc.cd_nameptr = cnp->cn_nameptr;
1862 desc.cd_namelen = cnp->cn_namelen;
1863 desc.cd_parentcnid = dcp->c_cnid;
1864 /* XXX - if cnid is out of sync then the wrong thread rec will get deleted. */
1865 desc.cd_cnid = cp->c_cnid;
1866
1867 /* Lock catalog b-tree */
1868 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1869 if (error)
1870 goto out;
1871
1872 /* Delete the link record */
1873 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1874
1875 if ((error == 0) && (--cp->c_nlink < 1)) {
1876 char inodename[32];
1877 char delname[32];
1878 struct cat_desc to_desc;
1879 struct cat_desc from_desc;
1880
1881 /*
1882 * This is now esentially an open deleted file.
1883 * Rename it to reflect this state which makes
1884 * orphan file cleanup easier (see hfs_remove_orphans).
1885 * Note: a rename failure here is not fatal.
1886 */
1887 MAKE_INODE_NAME(inodename, cp->c_rdev);
1888 bzero(&from_desc, sizeof(from_desc));
1889 from_desc.cd_nameptr = inodename;
1890 from_desc.cd_namelen = strlen(inodename);
1891 from_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid;
1892 from_desc.cd_flags = 0;
1893 from_desc.cd_cnid = cp->c_fileid;
1894
1895 MAKE_DELETED_NAME(delname, cp->c_fileid);
1896 bzero(&to_desc, sizeof(to_desc));
1897 to_desc.cd_nameptr = delname;
1898 to_desc.cd_namelen = strlen(delname);
1899 to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid;
1900 to_desc.cd_flags = 0;
1901 to_desc.cd_cnid = cp->c_fileid;
1902
1903 (void) cat_rename(hfsmp, &from_desc, &hfsmp->hfs_privdir_desc,
1904 &to_desc, (struct cat_desc *)NULL);
1905 cp->c_flag |= C_DELETED;
1906 }
1907
1908 /* Unlock the Catalog */
1909 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1910
1911 if (error != 0)
1912 goto out;
1913
1914 cp->c_flag |= C_CHANGE;
1915 tv = time;
1916 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1917
1918 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
1919
1920 } else if (dataforkbusy || rsrcforkbusy || isbigfile) {
1921 char delname[32];
1922 struct cat_desc to_desc;
1923 struct cat_desc todir_desc;
1924
1925 /*
1926 * Orphan this file (move to hidden directory).
1927 */
1928 bzero(&todir_desc, sizeof(todir_desc));
1929 todir_desc.cd_parentcnid = 2;
1930
1931 MAKE_DELETED_NAME(delname, cp->c_fileid);
1932 bzero(&to_desc, sizeof(to_desc));
1933 to_desc.cd_nameptr = delname;
1934 to_desc.cd_namelen = strlen(delname);
1935 to_desc.cd_parentcnid = hfsmp->hfs_privdir_desc.cd_cnid;
1936 to_desc.cd_flags = 0;
1937 to_desc.cd_cnid = cp->c_cnid;
1938
1939 /* Lock catalog b-tree */
1940 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1941 if (error)
1942 goto out;
1943
1944 error = cat_rename(hfsmp, &cp->c_desc, &todir_desc,
1945 &to_desc, (struct cat_desc *)NULL);
1946
1947 // XXXdbg - only bump this count if we were successful
1948 if (error == 0) {
1949 hfsmp->hfs_privdir_attr.ca_entries++;
1950 }
1951 (void)cat_update(hfsmp, &hfsmp->hfs_privdir_desc,
1952 &hfsmp->hfs_privdir_attr, NULL, NULL);
1953
1954 /* Unlock the Catalog */
1955 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1956 if (error) goto out;
1957
1958 cp->c_flag |= C_CHANGE | C_DELETED | C_NOEXISTS;
1959 --cp->c_nlink;
1960 tv = time;
1961 (void) VOP_UPDATE(vp, &tv, &tv, 0);
1962
1963 } else /* Not busy */ {
1964
1965 if (cp->c_blocks > 0) {
1966 #if 0
1967 panic("hfs_remove: attempting to delete a non-empty file!");
1968 #else
1969 printf("hfs_remove: attempting to delete a non-empty file %s\n",
1970 cp->c_desc.cd_nameptr);
1971 error = EBUSY;
1972 goto out;
1973 #endif
1974 }
1975
1976 /* Lock catalog b-tree */
1977 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
1978 if (error)
1979 goto out;
1980
1981 error = cat_delete(hfsmp, &cp->c_desc, &cp->c_attr);
1982
1983 if (error && error != ENXIO && error != ENOENT && truncated) {
1984 if ((cp->c_datafork && cp->c_datafork->ff_size != 0) ||
1985 (cp->c_rsrcfork && cp->c_rsrcfork->ff_size != 0)) {
1986 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
1987 error, cp->c_datafork->ff_size, cp->c_rsrcfork->ff_size);
1988 } else {
1989 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
1990 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
1991 }
1992 }
1993
1994 /* Unlock the Catalog */
1995 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
1996 if (error) goto out;
1997
1998 #if QUOTA
1999 (void)hfs_chkiq(cp, -1, NOCRED, 0);
2000 #endif /* QUOTA */
2001
2002 cp->c_mode = 0;
2003 truncated = 0; // because the catalog entry is gone
2004 cp->c_flag |= C_CHANGE | C_NOEXISTS;
2005 --cp->c_nlink;
2006 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
2007 }
2008
2009 /*
2010 * All done with this cnode's descriptor...
2011 *
2012 * Note: all future catalog calls for this cnode must be
2013 * by fileid only. This is OK for HFS (which doesn't have
2014 * file thread records) since HFS doesn't support hard
2015 * links or the removal of busy files.
2016 */
2017 cat_releasedesc(&cp->c_desc);
2018
2019 /* In all three cases the parent lost a child */
2020 if (dcp->c_entries > 0)
2021 dcp->c_entries--;
2022 if (dcp->c_nlink > 0)
2023 dcp->c_nlink--;
2024 dcp->c_flag |= C_CHANGE | C_UPDATE;
2025 tv = time;
2026 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
2027 HFS_KNOTE(dvp, NOTE_WRITE);
2028
2029 out:
2030 /* All done with component name... */
2031 if ((options & HFSRM_SAVE_NAME) == 0 &&
2032 (cnp != 0) &&
2033 (cnp->cn_flags & (HASBUF | SAVENAME)) == (HASBUF | SAVENAME)) {
2034 char *tmp = cnp->cn_pnbuf;
2035 cnp->cn_pnbuf = NULL;
2036 cnp->cn_flags &= ~HASBUF;
2037 FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI);
2038 }
2039
2040 if (!(options & HFSRM_SKIP_RESERVE)) {
2041 cat_postflight(hfsmp, &cookie, p);
2042 }
2043
2044 /* Commit the truncation to the catalog record */
2045 if (truncated) {
2046 cp->c_flag |= C_CHANGE | C_UPDATE | C_FORCEUPDATE;
2047 tv = time;
2048 (void) VOP_UPDATE(vp, &tv, &tv, 0);
2049 }
2050
2051 // XXXdbg
2052 if (started_tr) {
2053 journal_end_transaction(hfsmp->jnl);
2054 }
2055 if (grabbed_lock) {
2056 hfs_global_shared_lock_release(hfsmp);
2057 }
2058
2059 HFS_KNOTE(vp, NOTE_DELETE);
2060 if (rvp) {
2061 HFS_KNOTE(rvp, NOTE_DELETE);
2062 vrele(rvp);
2063 };
2064
2065 if (error) {
2066 vput(vp);
2067 } else {
2068 VOP_UNLOCK(vp, 0, p);
2069 // XXXdbg - try to prevent the lost ubc_info panic
2070 if ((cp->c_flag & C_HARDLINK) == 0 || cp->c_nlink == 0) {
2071 (void) ubc_uncache(vp);
2072 }
2073 vrele(vp);
2074 }
2075 if (!(options & HFSRM_PARENT_LOCKED)) {
2076 vput(dvp);
2077 }
2078
2079 return (error);
2080 }
2081
2082
2083 __private_extern__ void
2084 replace_desc(struct cnode *cp, struct cat_desc *cdp)
2085 {
2086 /* First release allocated name buffer */
2087 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
2088 char *name = cp->c_desc.cd_nameptr;
2089
2090 cp->c_desc.cd_nameptr = 0;
2091 cp->c_desc.cd_namelen = 0;
2092 cp->c_desc.cd_flags &= ~CD_HASBUF;
2093 remove_name(name);
2094 }
2095 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
2096
2097 /* Cnode now owns the name buffer */
2098 cdp->cd_nameptr = 0;
2099 cdp->cd_namelen = 0;
2100 cdp->cd_flags &= ~CD_HASBUF;
2101 }
2102
2103
2104 /*
2105 #
2106 #% rename fdvp U U U
2107 #% rename fvp U U U
2108 #% rename tdvp L U U
2109 #% rename tvp X U U
2110 #
2111 */
2112 /*
2113 * Rename a cnode.
2114 *
2115 * The VFS layer guarantees that source and destination will
2116 * either both be directories, or both not be directories.
2117 *
2118 * When the target is a directory, hfs_rename must ensure
2119 * that it is empty.
2120 *
2121 * The rename system call is responsible for freeing
2122 * the pathname buffers (ie no need to call VOP_ABORTOP).
2123 */
2124
2125 static int
2126 hfs_rename(ap)
2127 struct vop_rename_args /* {
2128 struct vnode *a_fdvp;
2129 struct vnode *a_fvp;
2130 struct componentname *a_fcnp;
2131 struct vnode *a_tdvp;
2132 struct vnode *a_tvp;
2133 struct componentname *a_tcnp;
2134 } */ *ap;
2135 {
2136 struct vnode *tvp = ap->a_tvp;
2137 struct vnode *tdvp = ap->a_tdvp;
2138 struct vnode *fvp = ap->a_fvp;
2139 struct vnode *fdvp = ap->a_fdvp;
2140 struct componentname *tcnp = ap->a_tcnp;
2141 struct componentname *fcnp = ap->a_fcnp;
2142 struct proc *p = fcnp->cn_proc;
2143 struct cnode *fcp = NULL;
2144 struct cnode *fdcp = NULL;
2145 struct cnode *tdcp = VTOC(tdvp);
2146 struct cat_desc from_desc;
2147 struct cat_desc to_desc;
2148 struct cat_desc out_desc;
2149 struct hfsmount *hfsmp = NULL;
2150 struct timeval tv;
2151 cat_cookie_t cookie = {0};
2152 int fdvp_locked, fvp_locked, tdvp_locked, tvp_locked;
2153 int tvp_deleted;
2154 int started_tr = 0, grabbed_lock = 0;
2155 int error = 0;
2156
2157
2158 /* Establish our vnode lock state. */
2159 tdvp_locked = 1;
2160 tvp_locked = (tvp != 0);
2161 fdvp_locked = 0;
2162 fvp_locked = 0;
2163 tvp_deleted = 0;
2164
2165 /*
2166 * Check for cross-device rename.
2167 */
2168 if ((fvp->v_mount != tdvp->v_mount) ||
2169 (tvp && (fvp->v_mount != tvp->v_mount))) {
2170 error = EXDEV;
2171 goto out;
2172 }
2173
2174 /*
2175 * When fvp matches tvp they must be case variants
2176 * or hard links.
2177 *
2178 * In some cases tvp will be locked in other cases
2179 * it be unlocked with no reference. Normalize the
2180 * state here (unlocked with a reference) so that
2181 * we can exit in a known state.
2182 */
2183 if (fvp == tvp) {
2184 if (VOP_ISLOCKED(tvp) &&
2185 (VTOC(tvp)->c_lock.lk_lockholder == p->p_pid) &&
2186 (VTOC(tvp)->c_lock.lk_lockthread == current_thread())) {
2187 vput(tvp);
2188 }
2189 tvp = NULL;
2190 tvp_locked = 0;
2191
2192 /*
2193 * If this a hard link with different parents
2194 * and its not a case variant then keep tvp
2195 * around for removal.
2196 */
2197 if ((VTOC(fvp)->c_flag & C_HARDLINK) &&
2198 ((fdvp != tdvp) ||
2199 (hfs_namecmp(fcnp->cn_nameptr, fcnp->cn_namelen,
2200 tcnp->cn_nameptr, tcnp->cn_namelen) != 0))) {
2201 tvp = fvp;
2202 vref(tvp);
2203 }
2204 }
2205
2206 /*
2207 * The following edge case is caught here:
2208 * (to cannot be a descendent of from)
2209 *
2210 * o fdvp
2211 * /
2212 * /
2213 * o fvp
2214 * \
2215 * \
2216 * o tdvp
2217 * /
2218 * /
2219 * o tvp
2220 */
2221 if (tdcp->c_parentcnid == VTOC(fvp)->c_cnid) {
2222 error = EINVAL;
2223 goto out;
2224 }
2225
2226 /*
2227 * The following two edge cases are caught here:
2228 * (note tvp is not empty)
2229 *
2230 * o tdvp o tdvp
2231 * / /
2232 * / /
2233 * o tvp tvp o fdvp
2234 * \ \
2235 * \ \
2236 * o fdvp o fvp
2237 * /
2238 * /
2239 * o fvp
2240 */
2241 if (tvp && (tvp->v_type == VDIR) && (VTOC(tvp)->c_entries != 0)) {
2242 error = ENOTEMPTY;
2243 goto out;
2244 }
2245
2246 /*
2247 * The following edge case is caught here:
2248 * (the from child and parent are the same)
2249 *
2250 * o tdvp
2251 * /
2252 * /
2253 * fdvp o fvp
2254 */
2255 if (fdvp == fvp) {
2256 error = EINVAL;
2257 goto out;
2258 }
2259
2260 /*
2261 * Make sure "from" vnode and its parent are changeable.
2262 */
2263 if ((VTOC(fvp)->c_flags & (IMMUTABLE | APPEND)) ||
2264 (VTOC(fdvp)->c_flags & APPEND)) {
2265 error = EPERM;
2266 goto out;
2267 }
2268
2269 hfsmp = VTOHFS(tdvp);
2270
2271 /*
2272 * If the destination parent directory is "sticky", then the
2273 * user must own the parent directory, or the destination of
2274 * the rename, otherwise the destination may not be changed
2275 * (except by root). This implements append-only directories.
2276 *
2277 * Note that checks for immutable and write access are done
2278 * by the call to VOP_REMOVE.
2279 */
2280 if (tvp && (tdcp->c_mode & S_ISTXT) &&
2281 (tcnp->cn_cred->cr_uid != 0) &&
2282 (tcnp->cn_cred->cr_uid != tdcp->c_uid) &&
2283 (hfs_owner_rights(hfsmp, VTOC(tvp)->c_uid, tcnp->cn_cred, p, false)) ) {
2284 error = EPERM;
2285 goto out;
2286 }
2287
2288 #if QUOTA
2289 if (tvp)
2290 (void)hfs_getinoquota(VTOC(tvp));
2291 #endif
2292
2293 /*
2294 * Lock all the vnodes before starting a journal transaction.
2295 */
2296
2297 /*
2298 * Simple case (same parent) - just lock child (fvp).
2299 */
2300 if (fdvp == tdvp) {
2301 if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p))
2302 goto out;
2303 fvp_locked = 1;
2304 goto vnlocked;
2305 }
2306
2307 /*
2308 * If fdvp is the parent of tdvp then we'll need to
2309 * drop tdvp's lock before acquiring a lock on fdvp.
2310 *
2311 * fdvp
2312 * o
2313 * / \
2314 * / \
2315 * tdvp o o fvp
2316 * \
2317 * \
2318 * o tvp
2319 *
2320 *
2321 * If the parent directories are unrelated then we'll
2322 * need to aquire their vnode locks in vnode address
2323 * order. Otherwise we can race with another rename
2324 * call that involves the same vnodes except that to
2325 * and from are switched and potentially deadlock.
2326 * [ie rename("a/b", "c/d") vs rename("c/d", "a/b")]
2327 *
2328 * If its not either of the two above cases then we
2329 * can safely lock fdvp and fvp.
2330 */
2331 if ((VTOC(fdvp)->c_cnid == VTOC(tdvp)->c_parentcnid) ||
2332 ((VTOC(tdvp)->c_cnid != VTOC(fdvp)->c_parentcnid) &&
2333 (fdvp < tdvp))) {
2334
2335 /* Drop locks on tvp and tdvp */
2336 if (tvp_locked) {
2337 VOP_UNLOCK(tvp, 0, p);
2338 tvp_locked = 0;
2339 }
2340 VOP_UNLOCK(tdvp, 0, p);
2341 tdvp_locked = 0;
2342
2343 /* Aquire locks in correct order */
2344 if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2345 goto out;
2346 fdvp_locked = 1;
2347 if ((error = vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2348 goto out;
2349 tdvp_locked = 1;
2350
2351 /*
2352 * Now that the parents are locked only one thread
2353 * can continue. So the lock order of the children
2354 * doesn't really matter
2355 */
2356 if (tvp == fvp) {
2357 if ((error = vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p)))
2358 goto out;
2359 tvp_locked = 1;
2360 } else {
2361 if (tvp) {
2362 if ((error = vn_lock(tvp, LK_EXCLUSIVE | LK_RETRY, p)))
2363 goto out;
2364 tvp_locked = 1;
2365 }
2366 if ((error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p)))
2367 goto out;
2368 fvp_locked = 1;
2369 }
2370
2371 } else /* OK to lock fdvp and fvp */ {
2372 if ((error = vn_lock(fdvp, LK_EXCLUSIVE | LK_RETRY, p)))
2373 goto out;
2374 fdvp_locked = 1;
2375 if (error = vn_lock(fvp, LK_EXCLUSIVE | LK_RETRY, p))
2376 goto out;
2377 if (tvp == fvp)
2378 tvp_locked = 1;
2379 else
2380 fvp_locked = 1;
2381 }
2382
2383 vnlocked:
2384 fdcp = VTOC(fdvp);
2385 fcp = VTOC(fvp);
2386
2387 /*
2388 * While fvp is still locked, purge it from the name cache and
2389 * grab it's c_cnid value. Note that the removal of tvp (below)
2390 * can drop fvp's lock when fvp == tvp.
2391 */
2392 cache_purge(fvp);
2393
2394 /*
2395 * When a file moves out of "Cleanup At Startup"
2396 * we can drop its NODUMP status.
2397 */
2398 if ((fcp->c_flags & UF_NODUMP) &&
2399 (fvp->v_type == VREG) &&
2400 (fdvp != tdvp) &&
2401 (fdcp->c_desc.cd_nameptr != NULL) &&
2402 (strcmp(fdcp->c_desc.cd_nameptr, CARBON_TEMP_DIR_NAME) == 0)) {
2403 fcp->c_flags &= ~UF_NODUMP;
2404 fcp->c_flag |= C_CHANGE;
2405 tv = time;
2406 (void) VOP_UPDATE(fvp, &tv, &tv, 0);
2407 }
2408
2409 bzero(&from_desc, sizeof(from_desc));
2410 from_desc.cd_nameptr = fcnp->cn_nameptr;
2411 from_desc.cd_namelen = fcnp->cn_namelen;
2412 from_desc.cd_parentcnid = fdcp->c_cnid;
2413 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2414 from_desc.cd_cnid = fcp->c_cnid;
2415
2416 bzero(&to_desc, sizeof(to_desc));
2417 to_desc.cd_nameptr = tcnp->cn_nameptr;
2418 to_desc.cd_namelen = tcnp->cn_namelen;
2419 to_desc.cd_parentcnid = tdcp->c_cnid;
2420 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2421 to_desc.cd_cnid = fcp->c_cnid;
2422
2423 hfs_global_shared_lock_acquire(hfsmp);
2424 grabbed_lock = 1;
2425 if (hfsmp->jnl) {
2426 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2427 goto out;
2428 }
2429 started_tr = 1;
2430 }
2431
2432 /*
2433 * Reserve some space in the Catalog file.
2434 */
2435 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
2436 goto out;
2437 }
2438
2439 /*
2440 * If the destination exists then it needs to be removed.
2441 */
2442
2443 if (tvp) {
2444 if (tvp != fvp)
2445 cache_purge(tvp);
2446 /*
2447 * Note that hfs_removedir and hfs_removefile
2448 * will keep tdvp locked with a reference.
2449 * But tvp will lose its lock and reference.
2450 */
2451 if (tvp->v_type == VDIR)
2452 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_RENAMEOPTS);
2453 else
2454 error = hfs_removefile(tdvp, tvp, tcnp, HFSRM_RENAMEOPTS);
2455
2456 if (tvp == fvp)
2457 fvp_locked = 0;
2458 tvp = NULL;
2459 tvp_locked = 0;
2460 tvp_deleted = 1;
2461 if (error)
2462 goto out;
2463 }
2464
2465 /*
2466 * All done with tvp and fvp
2467 */
2468
2469 /* Lock catalog b-tree */
2470 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_EXCLUSIVE, p);
2471 if (error)
2472 goto out;
2473
2474 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
2475
2476 /* Unlock catalog b-tree */
2477 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2478
2479 if (error) {
2480 goto out;
2481 }
2482
2483 /* Update cnode's catalog descriptor */
2484 if (fvp_locked) {
2485 replace_desc(fcp, &out_desc);
2486 fcp->c_parentcnid = tdcp->c_cnid;
2487 fcp->c_hint = 0;
2488 }
2489
2490 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_RMDIR : VOL_RMFILE,
2491 (fdcp->c_cnid == kHFSRootFolderID));
2492 hfs_volupdate(hfsmp, fvp->v_type == VDIR ? VOL_MKDIR : VOL_MKFILE,
2493 (tdcp->c_cnid == kHFSRootFolderID));
2494
2495 /* Update both parent directories. */
2496 tv = time;
2497 if (fdvp != tdvp) {
2498 tdcp->c_nlink++;
2499 tdcp->c_entries++;
2500 if (fdcp->c_nlink > 0)
2501 fdcp->c_nlink--;
2502 if (fdcp->c_entries > 0)
2503 fdcp->c_entries--;
2504 fdcp->c_flag |= C_CHANGE | C_UPDATE;
2505 (void) VOP_UPDATE(fdvp, &tv, &tv, 0);
2506 }
2507 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2508 tdcp->c_flag |= C_CHANGE | C_UPDATE;
2509 (void) VOP_UPDATE(tdvp, &tv, &tv, 0);
2510
2511 out:
2512 if (hfsmp) {
2513 cat_postflight(hfsmp, &cookie, p);
2514 }
2515 if (started_tr) {
2516 journal_end_transaction(hfsmp->jnl);
2517 }
2518 if (grabbed_lock) {
2519 hfs_global_shared_lock_release(hfsmp);
2520 }
2521
2522 /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have
2523 generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp.
2524 */
2525 if (error == 0) {
2526 HFS_KNOTE(fvp, NOTE_RENAME);
2527 HFS_KNOTE(fdvp, NOTE_WRITE);
2528 if (tdvp != fdvp) HFS_KNOTE(tdvp, NOTE_WRITE);
2529 };
2530 if (fvp_locked) {
2531 VOP_UNLOCK(fvp, 0, p);
2532 }
2533 if (fdvp_locked) {
2534 VOP_UNLOCK(fdvp, 0, p);
2535 }
2536 if (tdvp_locked) {
2537 VOP_UNLOCK(tdvp, 0, p);
2538 }
2539 if (tvp_locked) {
2540 VOP_UNLOCK(tvp, 0, p);
2541 }
2542
2543 vrele(fvp);
2544 vrele(fdvp);
2545 if (tvp)
2546 vrele(tvp);
2547 vrele(tdvp);
2548
2549 /* After tvp is removed the only acceptable error is EIO */
2550 if (error && tvp_deleted)
2551 error = EIO;
2552
2553 return (error);
2554 }
2555
2556
2557
2558 /*
2559 * Mkdir system call
2560 #% mkdir dvp L U U
2561 #% mkdir vpp - L -
2562 #
2563 vop_mkdir {
2564 IN WILLRELE struct vnode *dvp;
2565 OUT struct vnode **vpp;
2566 IN struct componentname *cnp;
2567 IN struct vattr *vap;
2568
2569 We are responsible for freeing the namei buffer,
2570 it is done in hfs_makenode()
2571 */
2572
2573 static int
2574 hfs_mkdir(ap)
2575 struct vop_mkdir_args /* {
2576 struct vnode *a_dvp;
2577 struct vnode **a_vpp;
2578 struct componentname *a_cnp;
2579 struct vattr *a_vap;
2580 } */ *ap;
2581 {
2582 struct vattr *vap = ap->a_vap;
2583
2584 return (hfs_makenode(MAKEIMODE(vap->va_type, vap->va_mode),
2585 ap->a_dvp, ap->a_vpp, ap->a_cnp));
2586 }
2587
2588
2589 /*
2590 * symlink -- make a symbolic link
2591 #% symlink dvp L U U
2592 #% symlink vpp - U -
2593 #
2594 # XXX - note that the return vnode has already been VRELE'ed
2595 # by the filesystem layer. To use it you must use vget,
2596 # possibly with a further namei.
2597 #
2598 vop_symlink {
2599 IN WILLRELE struct vnode *dvp;
2600 OUT WILLRELE struct vnode **vpp;
2601 IN struct componentname *cnp;
2602 IN struct vattr *vap;
2603 IN char *target;
2604
2605 We are responsible for freeing the namei buffer,
2606 it is done in hfs_makenode().
2607
2608 */
2609
2610 static int
2611 hfs_symlink(ap)
2612 struct vop_symlink_args /* {
2613 struct vnode *a_dvp;
2614 struct vnode **a_vpp;
2615 struct componentname *a_cnp;
2616 struct vattr *a_vap;
2617 char *a_target;
2618 } */ *ap;
2619 {
2620 register struct vnode *vp, **vpp = ap->a_vpp;
2621 struct hfsmount *hfsmp;
2622 struct filefork *fp;
2623 int len, error;
2624 struct buf *bp = NULL;
2625
2626 /* HFS standard disks don't support symbolic links */
2627 if (VTOVCB(ap->a_dvp)->vcbSigWord != kHFSPlusSigWord) {
2628 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2629 vput(ap->a_dvp);
2630 return (EOPNOTSUPP);
2631 }
2632
2633 /* Check for empty target name */
2634 if (ap->a_target[0] == 0) {
2635 VOP_ABORTOP(ap->a_dvp, ap->a_cnp);
2636 vput(ap->a_dvp);
2637 return (EINVAL);
2638 }
2639
2640
2641 hfsmp = VTOHFS(ap->a_dvp);
2642
2643 /* Create the vnode */
2644 if ((error = hfs_makenode(S_IFLNK | ap->a_vap->va_mode,
2645 ap->a_dvp, vpp, ap->a_cnp))) {
2646 return (error);
2647 }
2648
2649 vp = *vpp;
2650 len = strlen(ap->a_target);
2651 fp = VTOF(vp);
2652
2653 #if QUOTA
2654 (void)hfs_getinoquota(VTOC(vp));
2655 #endif /* QUOTA */
2656
2657 // XXXdbg
2658 hfs_global_shared_lock_acquire(hfsmp);
2659 if (hfsmp->jnl) {
2660 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
2661 hfs_global_shared_lock_release(hfsmp);
2662 vput(vp);
2663 return error;
2664 }
2665 }
2666
2667 /* Allocate space for the link */
2668 error = VOP_TRUNCATE(vp, len, IO_NOZEROFILL,
2669 ap->a_cnp->cn_cred, ap->a_cnp->cn_proc);
2670 if (error)
2671 goto out; /* XXX need to remove link */
2672
2673 /* Write the link to disk */
2674 bp = getblk(vp, 0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size),
2675 0, 0, BLK_META);
2676 if (hfsmp->jnl) {
2677 journal_modify_block_start(hfsmp->jnl, bp);
2678 }
2679 bzero(bp->b_data, bp->b_bufsize);
2680 bcopy(ap->a_target, bp->b_data, len);
2681 if (hfsmp->jnl) {
2682 journal_modify_block_end(hfsmp->jnl, bp);
2683 } else {
2684 bawrite(bp);
2685 }
2686 out:
2687 if (hfsmp->jnl) {
2688 journal_end_transaction(hfsmp->jnl);
2689 }
2690 hfs_global_shared_lock_release(hfsmp);
2691 vput(vp);
2692 return (error);
2693 }
2694
2695
2696 /*
2697 * Dummy dirents to simulate the "." and ".." entries of the directory
2698 * in a hfs filesystem. HFS doesn't provide these on disk. Note that
2699 * the size of these entries is the smallest needed to represent them
2700 * (only 12 byte each).
2701 */
2702 static hfsdotentry rootdots[2] = {
2703 {
2704 1, /* d_fileno */
2705 sizeof(struct hfsdotentry), /* d_reclen */
2706 DT_DIR, /* d_type */
2707 1, /* d_namlen */
2708 "." /* d_name */
2709 },
2710 {
2711 1, /* d_fileno */
2712 sizeof(struct hfsdotentry), /* d_reclen */
2713 DT_DIR, /* d_type */
2714 2, /* d_namlen */
2715 ".." /* d_name */
2716 }
2717 };
2718
2719 /* 4.3 Note:
2720 * There is some confusion as to what the semantics of uio_offset are.
2721 * In ufs, it represents the actual byte offset within the directory
2722 * "file." HFS, however, just uses it as an entry counter - essentially
2723 * assuming that it has no meaning except to the hfs_readdir function.
2724 * This approach would be more efficient here, but some callers may
2725 * assume the uio_offset acts like a byte offset. NFS in fact
2726 * monkeys around with the offset field a lot between readdir calls.
2727 *
2728 * The use of the resid uiop->uio_resid and uiop->uio_iov->iov_len
2729 * fields is a mess as well. The libc function readdir() returns
2730 * NULL (indicating the end of a directory) when either
2731 * the getdirentries() syscall (which calls this and returns
2732 * the size of the buffer passed in less the value of uiop->uio_resid)
2733 * returns 0, or a direct record with a d_reclen of zero.
2734 * nfs_server.c:rfs_readdir(), on the other hand, checks for the end
2735 * of the directory by testing uiop->uio_resid == 0. The solution
2736 * is to pad the size of the last struct direct in a given
2737 * block to fill the block if we are not at the end of the directory.
2738 */
2739
2740
2741 /*
2742 * NOTE: We require a minimal buffer size of DIRBLKSIZ for two reasons. One, it is the same value
2743 * returned be stat() call as the block size. This is mentioned in the man page for getdirentries():
2744 * "Nbytes must be greater than or equal to the block size associated with the file,
2745 * see stat(2)". Might as well settle on the same size of ufs. Second, this makes sure there is enough
2746 * room for the . and .. entries that have to added manually.
2747 */
2748
2749 /*
2750 #% readdir vp L L L
2751 #
2752 vop_readdir {
2753 IN struct vnode *vp;
2754 INOUT struct uio *uio;
2755 IN struct ucred *cred;
2756 INOUT int *eofflag;
2757 OUT int *ncookies;
2758 INOUT u_long **cookies;
2759 */
2760 static int
2761 hfs_readdir(ap)
2762 struct vop_readdir_args /* {
2763 struct vnode *vp;
2764 struct uio *uio;
2765 struct ucred *cred;
2766 int *eofflag;
2767 int *ncookies;
2768 u_long **cookies;
2769 } */ *ap;
2770 {
2771 register struct uio *uio = ap->a_uio;
2772 struct cnode *cp = VTOC(ap->a_vp);
2773 struct hfsmount *hfsmp = VTOHFS(ap->a_vp);
2774 struct proc *p = current_proc();
2775 off_t off = uio->uio_offset;
2776 int retval = 0;
2777 int eofflag = 0;
2778 void *user_start = NULL;
2779 int user_len;
2780
2781 int ncookies=0;
2782 u_long *cookies=NULL;
2783 u_long *cookiep=NULL;
2784
2785 /* We assume it's all one big buffer... */
2786 if (uio->uio_iovcnt > 1 || uio->uio_resid < AVERAGE_HFSDIRENTRY_SIZE)
2787 return EINVAL;
2788
2789 // XXXdbg
2790 // We have to lock the user's buffer here so that we won't
2791 // fault on it after we've acquired a shared lock on the
2792 // catalog file. The issue is that you can get a 3-way
2793 // deadlock if someone else starts a transaction and then
2794 // tries to lock the catalog file but can't because we're
2795 // here and we can't service our page fault because VM is
2796 // blocked trying to start a transaction as a result of
2797 // trying to free up pages for our page fault. It's messy
2798 // but it does happen on dual-procesors that are paging
2799 // heavily (see radar 3082639 for more info). By locking
2800 // the buffer up-front we prevent ourselves from faulting
2801 // while holding the shared catalog file lock.
2802 //
2803 // Fortunately this and hfs_search() are the only two places
2804 // currently (10/30/02) that can fault on user data with a
2805 // shared lock on the catalog file.
2806 //
2807 if (hfsmp->jnl && uio->uio_segflg == UIO_USERSPACE) {
2808 user_start = uio->uio_iov->iov_base;
2809 user_len = uio->uio_iov->iov_len;
2810
2811 if ((retval = vslock(user_start, user_len)) != 0) {
2812 return retval;
2813 }
2814 }
2815
2816 /* Create the entries for . and .. */
2817 if (uio->uio_offset < sizeof(rootdots)) {
2818 caddr_t dep;
2819 size_t dotsize;
2820
2821 rootdots[0].d_fileno = cp->c_cnid;
2822 rootdots[1].d_fileno = cp->c_parentcnid;
2823
2824 if (uio->uio_offset == 0) {
2825 dep = (caddr_t) &rootdots[0];
2826 dotsize = 2* sizeof(struct hfsdotentry);
2827 } else if (uio->uio_offset == sizeof(struct hfsdotentry)) {
2828 dep = (caddr_t) &rootdots[1];
2829 dotsize = sizeof(struct hfsdotentry);
2830 } else {
2831 retval = EINVAL;
2832 goto Exit;
2833 }
2834
2835 retval = uiomove(dep, dotsize, uio);
2836 if (retval != 0)
2837 goto Exit;
2838 }
2839
2840 if (ap->a_ncookies != NULL) {
2841 /*
2842 * These cookies are handles that allow NFS to restart
2843 * scanning through a directory. If a directory is large
2844 * enough, NFS will issue a successive readdir() with a
2845 * uio->uio_offset that is equal to one of these cookies.
2846 *
2847 * The cookies that we generate are synthesized byte-offsets.
2848 * The offset is where the dirent the dirent would be if the
2849 * directory were an array of packed dirent structs. It is
2850 * synthetic because that's not how directories are stored in
2851 * HFS but other code expects that the cookie is a byte offset.
2852 *
2853 * We have to pre-allocate the cookies because cat_getdirentries()
2854 * is the only one that can properly synthesize the offsets (since
2855 * it may have to skip over entries and only it knows the true
2856 * virtual offset of any particular directory entry). So we allocate
2857 * a cookie table here and pass it in to cat_getdirentries().
2858 *
2859 * Note that the handling of "." and ".." is mostly done here but
2860 * cat_getdirentries() is aware of.
2861 *
2862 * Only the NFS server uses cookies so fortunately this code is
2863 * not executed unless the NFS server is issuing the readdir
2864 * request.
2865 *
2866 * Also note that the NFS server is the one responsible for
2867 * free'ing the cookies even though we allocated them. Ick.
2868 *
2869 * We allocate a reasonable number of entries for the size of
2870 * the buffer that we're going to fill in. cat_getdirentries()
2871 * is smart enough to not overflow if there's more room in the
2872 * buffer but not enough room in the cookie table.
2873 */
2874 if (uio->uio_segflg != UIO_SYSSPACE)
2875 panic("hfs_readdir: unexpected uio from NFS server");
2876
2877 ncookies = uio->uio_iov->iov_len / (AVERAGE_HFSDIRENTRY_SIZE/2);
2878 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP, M_WAITOK);
2879
2880 *ap->a_ncookies = ncookies;
2881 *ap->a_cookies = cookies;
2882
2883 /* handle cookies for "." and ".." */
2884 if (off == 0) {
2885 cookies[0] = 0;
2886 cookies[1] = sizeof(struct hfsdotentry);
2887 } else if (off == sizeof(struct hfsdotentry)) {
2888 cookies[0] = sizeof(struct hfsdotentry);
2889 }
2890 }
2891
2892 /* If there are no children then we're done */
2893 if (cp->c_entries == 0) {
2894 eofflag = 1;
2895 retval = 0;
2896 if (cookies) {
2897 cookies[0] = 0;
2898 cookies[1] = sizeof(struct hfsdotentry);
2899 }
2900 goto Exit;
2901 }
2902
2903 /* Lock catalog b-tree */
2904 retval = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
2905 if (retval) goto Exit;
2906
2907 retval = cat_getdirentries(hfsmp, &cp->c_desc, cp->c_entries, uio, &eofflag, cookies, ncookies);
2908
2909 /* Unlock catalog b-tree */
2910 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
2911
2912 if (retval != E_NONE) {
2913 goto Exit;
2914 }
2915
2916 /* were we already past eof ? */
2917 if (uio->uio_offset == off) {
2918 retval = E_NONE;
2919 goto Exit;
2920 }
2921
2922 cp->c_flag |= C_ACCESS;
2923
2924 Exit:;
2925 if (hfsmp->jnl && user_start) {
2926 vsunlock(user_start, user_len, TRUE);
2927 }
2928
2929 if (ap->a_eofflag)
2930 *ap->a_eofflag = eofflag;
2931
2932 return (retval);
2933 }
2934
2935
2936 /*
2937 * Return target name of a symbolic link
2938 #% readlink vp L L L
2939 #
2940 vop_readlink {
2941 IN struct vnode *vp;
2942 INOUT struct uio *uio;
2943 IN struct ucred *cred;
2944 */
2945
2946 static int
2947 hfs_readlink(ap)
2948 struct vop_readlink_args /* {
2949 struct vnode *a_vp;
2950 struct uio *a_uio;
2951 struct ucred *a_cred;
2952 } */ *ap;
2953 {
2954 int retval;
2955 struct vnode *vp = ap->a_vp;
2956 struct cnode *cp;
2957 struct filefork *fp;
2958
2959 if (vp->v_type != VLNK)
2960 return (EINVAL);
2961
2962 cp = VTOC(vp);
2963 fp = VTOF(vp);
2964
2965 /* Zero length sym links are not allowed */
2966 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
2967 VTOVCB(vp)->vcbFlags |= kHFS_DamagedVolume;
2968 return (EINVAL);
2969 }
2970
2971 /* Cache the path so we don't waste buffer cache resources */
2972 if (fp->ff_symlinkptr == NULL) {
2973 struct buf *bp = NULL;
2974
2975 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
2976 retval = meta_bread(vp, 0,
2977 roundup((int)fp->ff_size,
2978 VTOHFS(vp)->hfs_phys_block_size),
2979 ap->a_cred, &bp);
2980 if (retval) {
2981 if (bp)
2982 brelse(bp);
2983 if (fp->ff_symlinkptr) {
2984 FREE(fp->ff_symlinkptr, M_TEMP);
2985 fp->ff_symlinkptr = NULL;
2986 }
2987 return (retval);
2988 }
2989 bcopy(bp->b_data, fp->ff_symlinkptr, (size_t)fp->ff_size);
2990 if (bp) {
2991 if (VTOHFS(vp)->jnl && (bp->b_flags & B_LOCKED) == 0) {
2992 bp->b_flags |= B_INVAL; /* data no longer needed */
2993 }
2994 brelse(bp);
2995 }
2996 }
2997 retval = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
2998 #if 1
2999 /*
3000 * Keep track blocks read
3001 */
3002 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (retval == 0)) {
3003
3004 /*
3005 * If this file hasn't been seen since the start of
3006 * the current sampling period then start over.
3007 */
3008 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
3009 VTOF(vp)->ff_bytesread = fp->ff_size;
3010 else
3011 VTOF(vp)->ff_bytesread += fp->ff_size;
3012
3013 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
3014 // cp->c_flag |= C_ACCESS;
3015 }
3016 #endif
3017 return (retval);
3018 }
3019
3020 /*
3021 * Lock an cnode. If its already locked, set the WANT bit and sleep.
3022 #% lock vp U L U
3023 #
3024 vop_lock {
3025 IN struct vnode *vp;
3026 IN int flags;
3027 IN struct proc *p;
3028 */
3029
3030 static int
3031 hfs_lock(ap)
3032 struct vop_lock_args /* {
3033 struct vnode *a_vp;
3034 int a_flags;
3035 struct proc *a_p;
3036 } */ *ap;
3037 {
3038 struct vnode *vp = ap->a_vp;
3039 struct cnode *cp = VTOC(vp);
3040
3041 return (lockmgr(&cp->c_lock, ap->a_flags, &vp->v_interlock, ap->a_p));
3042 }
3043
3044 /*
3045 * Unlock an cnode.
3046 #% unlock vp L U L
3047 #
3048 vop_unlock {
3049 IN struct vnode *vp;
3050 IN int flags;
3051 IN struct proc *p;
3052
3053 */
3054 static int
3055 hfs_unlock(ap)
3056 struct vop_unlock_args /* {
3057 struct vnode *a_vp;
3058 int a_flags;
3059 struct proc *a_p;
3060 } */ *ap;
3061 {
3062 struct vnode *vp = ap->a_vp;
3063 struct cnode *cp = VTOC(vp);
3064 #if 0
3065 if (!lockstatus(&cp->c_lock)) {
3066 printf("hfs_unlock: vnode %s wasn't locked!\n",
3067 cp->c_desc.cd_nameptr ? cp->c_desc.cd_nameptr : "");
3068 }
3069 #endif
3070 return (lockmgr(&cp->c_lock, ap->a_flags | LK_RELEASE,
3071 &vp->v_interlock, ap->a_p));
3072 }
3073
3074
3075 /*
3076 * Print out the contents of a cnode.
3077 #% print vp = = =
3078 #
3079 vop_print {
3080 IN struct vnode *vp;
3081 */
3082 static int
3083 hfs_print(ap)
3084 struct vop_print_args /* {
3085 struct vnode *a_vp;
3086 } */ *ap;
3087 {
3088 struct vnode * vp = ap->a_vp;
3089 struct cnode *cp = VTOC(vp);
3090
3091 printf("tag VT_HFS, cnid %d, on dev %d, %d", cp->c_cnid,
3092 major(cp->c_dev), minor(cp->c_dev));
3093 #if FIFO
3094 if (vp->v_type == VFIFO)
3095 fifo_printinfo(vp);
3096 #endif /* FIFO */
3097 lockmgr_printinfo(&cp->c_lock);
3098 printf("\n");
3099 return (0);
3100 }
3101
3102
3103 /*
3104 * Check for a locked cnode.
3105 #% islocked vp = = =
3106 #
3107 vop_islocked {
3108 IN struct vnode *vp;
3109
3110 */
3111 static int
3112 hfs_islocked(ap)
3113 struct vop_islocked_args /* {
3114 struct vnode *a_vp;
3115 } */ *ap;
3116 {
3117 return (lockstatus(&VTOC(ap->a_vp)->c_lock));
3118 }
3119
3120 /*
3121
3122 #% pathconf vp L L L
3123 #
3124 vop_pathconf {
3125 IN struct vnode *vp;
3126 IN int name;
3127 OUT register_t *retval;
3128
3129 */
3130 static int
3131 hfs_pathconf(ap)
3132 struct vop_pathconf_args /* {
3133 struct vnode *a_vp;
3134 int a_name;
3135 int *a_retval;
3136 } */ *ap;
3137 {
3138 int retval = 0;
3139
3140 switch (ap->a_name) {
3141 case _PC_LINK_MAX:
3142 if (VTOVCB(ap->a_vp)->vcbSigWord == kHFSPlusSigWord)
3143 *ap->a_retval = HFS_LINK_MAX;
3144 else
3145 *ap->a_retval = 1;
3146 break;
3147 case _PC_NAME_MAX:
3148 *ap->a_retval = kHFSPlusMaxFileNameBytes; /* max # of characters x max utf8 representation */
3149 break;
3150 case _PC_PATH_MAX:
3151 *ap->a_retval = PATH_MAX; /* 1024 */
3152 break;
3153 case _PC_PIPE_BUF:
3154 *ap->a_retval = PIPE_BUF;
3155 break;
3156 case _PC_CHOWN_RESTRICTED:
3157 *ap->a_retval = 1;
3158 break;
3159 case _PC_NO_TRUNC:
3160 *ap->a_retval = 0;
3161 break;
3162 case _PC_NAME_CHARS_MAX:
3163 *ap->a_retval = kHFSPlusMaxFileNameChars;
3164 break;
3165 case _PC_CASE_SENSITIVE:
3166 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
3167 *ap->a_retval = 1;
3168 else
3169 *ap->a_retval = 0;
3170 break;
3171 case _PC_CASE_PRESERVING:
3172 *ap->a_retval = 1;
3173 break;
3174 default:
3175 retval = EINVAL;
3176 }
3177
3178 return (retval);
3179 }
3180
3181
3182 /*
3183 * Advisory record locking support
3184 #% advlock vp U U U
3185 #
3186 vop_advlock {
3187 IN struct vnode *vp;
3188 IN caddr_t id;
3189 IN int op;
3190 IN struct flock *fl;
3191 IN int flags;
3192
3193 */
3194 static int
3195 hfs_advlock(ap)
3196 struct vop_advlock_args /* {
3197 struct vnode *a_vp;
3198 caddr_t a_id;
3199 int a_op;
3200 struct flock *a_fl;
3201 int a_flags;
3202 } */ *ap;
3203 {
3204 struct vnode *vp = ap->a_vp;
3205 struct flock *fl = ap->a_fl;
3206 struct hfslockf *lock;
3207 struct filefork *fork;
3208 off_t start, end;
3209 int retval;
3210
3211 /* Only regular files can have locks */
3212 if (vp->v_type != VREG)
3213 return (EISDIR);
3214
3215 fork = VTOF(ap->a_vp);
3216 /*
3217 * Avoid the common case of unlocking when cnode has no locks.
3218 */
3219 if (fork->ff_lockf == (struct hfslockf *)0) {
3220 if (ap->a_op != F_SETLK) {
3221 fl->l_type = F_UNLCK;
3222 return (0);
3223 }
3224 }
3225 /*
3226 * Convert the flock structure into a start and end.
3227 */
3228 start = 0;
3229 switch (fl->l_whence) {
3230 case SEEK_SET:
3231 case SEEK_CUR:
3232 /*
3233 * Caller is responsible for adding any necessary offset
3234 * when SEEK_CUR is used.
3235 */
3236 start = fl->l_start;
3237 break;
3238 case SEEK_END:
3239 start = fork->ff_size + fl->l_start;
3240 break;
3241 default:
3242 return (EINVAL);
3243 }
3244
3245 if (fl->l_len == 0)
3246 end = -1;
3247 else if (fl->l_len > 0)
3248 end = start + fl->l_len - 1;
3249 else { /* l_len is negative */
3250 end = start - 1;
3251 start += fl->l_len;
3252 }
3253 if (start < 0)
3254 return (EINVAL);
3255
3256 /*
3257 * Create the hfslockf structure
3258 */
3259 MALLOC(lock, struct hfslockf *, sizeof *lock, M_LOCKF, M_WAITOK);
3260 lock->lf_start = start;
3261 lock->lf_end = end;
3262 lock->lf_id = ap->a_id;
3263 lock->lf_fork = fork;
3264 lock->lf_type = fl->l_type;
3265 lock->lf_next = (struct hfslockf *)0;
3266 TAILQ_INIT(&lock->lf_blkhd);
3267 lock->lf_flags = ap->a_flags;
3268 /*
3269 * Do the requested operation.
3270 */
3271 switch(ap->a_op) {
3272 case F_SETLK:
3273 retval = hfs_setlock(lock);
3274 break;
3275 case F_UNLCK:
3276 retval = hfs_clearlock(lock);
3277 FREE(lock, M_LOCKF);
3278 break;
3279 case F_GETLK:
3280 retval = hfs_getlock(lock, fl);
3281 FREE(lock, M_LOCKF);
3282 break;
3283 default:
3284 retval = EINVAL;
3285 _FREE(lock, M_LOCKF);
3286 break;
3287 }
3288
3289 return (retval);
3290 }
3291
3292
3293
3294 /*
3295 * Update the access, modified, and node change times as specified
3296 * by the C_ACCESS, C_UPDATE, and C_CHANGE flags respectively. The
3297 * C_MODIFIED flag is used to specify that the node needs to be
3298 * updated but that the times have already been set. The access and
3299 * modified times are input parameters but the node change time is
3300 * always taken from the current time. If waitfor is set, then wait
3301 * for the disk write of the node to complete.
3302 */
3303 /*
3304 #% update vp L L L
3305 IN struct vnode *vp;
3306 IN struct timeval *access;
3307 IN struct timeval *modify;
3308 IN int waitfor;
3309 */
3310 static int
3311 hfs_update(ap)
3312 struct vop_update_args /* {
3313 struct vnode *a_vp;
3314 struct timeval *a_access;
3315 struct timeval *a_modify;
3316 int a_waitfor;
3317 } */ *ap;
3318 {
3319 struct vnode *vp = ap->a_vp;
3320 struct cnode *cp = VTOC(ap->a_vp);
3321 struct proc *p;
3322 struct cat_fork *dataforkp = NULL;
3323 struct cat_fork *rsrcforkp = NULL;
3324 struct cat_fork datafork;
3325 int updateflag;
3326 struct hfsmount *hfsmp;
3327 int error;
3328
3329 hfsmp = VTOHFS(vp);
3330
3331 /* XXX do we really want to clear the sytem cnode flags here???? */
3332 if (((vp->v_flag & VSYSTEM) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))||
3333 (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) ||
3334 (cp->c_mode == 0)) {
3335 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE);
3336 return (0);
3337 }
3338
3339 updateflag = cp->c_flag & (C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_FORCEUPDATE);
3340
3341 /* Nothing to update. */
3342 if (updateflag == 0) {
3343 return (0);
3344 }
3345 /* HFS standard doesn't have access times. */
3346 if ((updateflag == C_ACCESS) && (VTOVCB(vp)->vcbSigWord == kHFSSigWord)) {
3347 return (0);
3348 }
3349 if (updateflag & C_ACCESS) {
3350 /*
3351 * When the access time is the only thing changing
3352 * then make sure its sufficiently newer before
3353 * committing it to disk.
3354 */
3355 if ((updateflag == C_ACCESS) &&
3356 (ap->a_access->tv_sec < (cp->c_atime + ATIME_ONDISK_ACCURACY))) {
3357 return (0);
3358 }
3359 cp->c_atime = ap->a_access->tv_sec;
3360 }
3361 if (updateflag & C_UPDATE) {
3362 cp->c_mtime = ap->a_modify->tv_sec;
3363 cp->c_mtime_nsec = ap->a_modify->tv_usec * 1000;
3364 }
3365 if (updateflag & C_CHANGE) {
3366 cp->c_ctime = time.tv_sec;
3367 /*
3368 * HFS dates that WE set must be adjusted for DST
3369 */
3370 if ((VTOVCB(vp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3371 cp->c_ctime += 3600;
3372 cp->c_mtime = cp->c_ctime;
3373 }
3374 }
3375
3376 if (cp->c_datafork)
3377 dataforkp = &cp->c_datafork->ff_data;
3378 if (cp->c_rsrcfork)
3379 rsrcforkp = &cp->c_rsrcfork->ff_data;
3380
3381 p = current_proc();
3382
3383 /*
3384 * For delayed allocations updates are
3385 * postponed until an fsync or the file
3386 * gets written to disk.
3387 *
3388 * Deleted files can defer meta data updates until inactive.
3389 *
3390 * If we're ever called with the C_FORCEUPDATE flag though
3391 * we have to do the update.
3392 */
3393 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
3394 (ISSET(cp->c_flag, C_DELETED) ||
3395 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3396 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
3397 if (updateflag & (C_CHANGE | C_UPDATE))
3398 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3399 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3400 cp->c_flag |= C_MODIFIED;
3401
3402 HFS_KNOTE(vp, NOTE_ATTRIB);
3403
3404 return (0);
3405 }
3406
3407
3408 // XXXdbg
3409 hfs_global_shared_lock_acquire(hfsmp);
3410 if (hfsmp->jnl) {
3411 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3412 hfs_global_shared_lock_release(hfsmp);
3413 return error;
3414 }
3415 }
3416
3417
3418 /*
3419 * For files with invalid ranges (holes) the on-disk
3420 * field representing the size of the file (cf_size)
3421 * must be no larger than the start of the first hole.
3422 */
3423 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3424 bcopy(dataforkp, &datafork, sizeof(datafork));
3425 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3426 dataforkp = &datafork;
3427 } else if (dataforkp && (cp->c_datafork->ff_unallocblocks != 0)) {
3428 // always make sure the block count and the size
3429 // of the file match the number of blocks actually
3430 // allocated to the file on disk
3431 bcopy(dataforkp, &datafork, sizeof(datafork));
3432 // make sure that we don't assign a negative block count
3433 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
3434 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
3435 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
3436 }
3437 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
3438 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3439 dataforkp = &datafork;
3440 }
3441
3442 /*
3443 * Lock the Catalog b-tree file.
3444 * A shared lock is sufficient since an update doesn't change
3445 * the tree and the lock on vp protects the cnode.
3446 */
3447 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3448 if (error) {
3449 if (hfsmp->jnl) {
3450 journal_end_transaction(hfsmp->jnl);
3451 }
3452 hfs_global_shared_lock_release(hfsmp);
3453 return (error);
3454 }
3455
3456 /* XXX - waitfor is not enforced */
3457 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3458
3459 /* Unlock the Catalog b-tree file. */
3460 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3461
3462 if (updateflag & (C_CHANGE | C_UPDATE | C_FORCEUPDATE))
3463 hfs_volupdate(hfsmp, VOL_UPDATE, 0);
3464
3465 /* After the updates are finished, clear the flags */
3466 cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_MODIFIED | C_UPDATE | C_FORCEUPDATE);
3467
3468 // XXXdbg
3469 if (hfsmp->jnl) {
3470 journal_end_transaction(hfsmp->jnl);
3471 }
3472 hfs_global_shared_lock_release(hfsmp);
3473
3474 HFS_KNOTE(vp, NOTE_ATTRIB);
3475
3476 return (error);
3477 }
3478
3479 /*
3480 * Allocate a new node
3481 *
3482 * Upon leaving, namei buffer must be freed.
3483 *
3484 */
3485 static int
3486 hfs_makenode(mode, dvp, vpp, cnp)
3487 int mode;
3488 struct vnode *dvp;
3489 struct vnode **vpp;
3490 struct componentname *cnp;
3491 {
3492 struct cnode *cp;
3493 struct cnode *dcp;
3494 struct vnode *tvp;
3495 struct hfsmount *hfsmp;
3496 struct timeval tv;
3497 struct proc *p;
3498 struct cat_desc in_desc, out_desc;
3499 struct cat_attr attr;
3500 cat_cookie_t cookie = {0};
3501 int error, started_tr = 0, grabbed_lock = 0;
3502 enum vtype vnodetype;
3503
3504 p = cnp->cn_proc;
3505 dcp = VTOC(dvp);
3506 hfsmp = VTOHFS(dvp);
3507 *vpp = NULL;
3508 tvp = NULL;
3509 bzero(&out_desc, sizeof(out_desc));
3510
3511 if ((mode & S_IFMT) == 0)
3512 mode |= S_IFREG;
3513 vnodetype = IFTOVT(mode);
3514
3515 /* Check if unmount in progress */
3516 if (VTOVFS(dvp)->mnt_kern_flag & MNTK_UNMOUNT) {
3517 error = EPERM;
3518 goto exit;
3519 }
3520 /* Check if were out of usable disk space. */
3521 if ((suser(cnp->cn_cred, NULL) != 0) && (hfs_freeblks(hfsmp, 1) <= 0)) {
3522 error = ENOSPC;
3523 goto exit;
3524 }
3525
3526 /* Setup the default attributes */
3527 bzero(&attr, sizeof(attr));
3528 attr.ca_mode = mode;
3529 attr.ca_nlink = vnodetype == VDIR ? 2 : 1;
3530 attr.ca_mtime = time.tv_sec;
3531 attr.ca_mtime_nsec = time.tv_usec * 1000;
3532 if ((VTOVCB(dvp)->vcbSigWord == kHFSSigWord) && gTimeZone.tz_dsttime) {
3533 attr.ca_mtime += 3600; /* Same as what hfs_update does */
3534 }
3535 attr.ca_atime = attr.ca_ctime = attr.ca_itime = attr.ca_mtime;
3536 if (VTOVFS(dvp)->mnt_flag & MNT_UNKNOWNPERMISSIONS) {
3537 attr.ca_uid = hfsmp->hfs_uid;
3538 attr.ca_gid = hfsmp->hfs_gid;
3539 } else {
3540 if (vnodetype == VLNK)
3541 attr.ca_uid = dcp->c_uid;
3542 else
3543 attr.ca_uid = cnp->cn_cred->cr_uid;
3544 attr.ca_gid = dcp->c_gid;
3545 }
3546 /*
3547 * Don't tag as a special file (BLK or CHR) until *after*
3548 * hfs_getnewvnode is called. This insures that any
3549 * alias checking is defered until hfs_mknod completes.
3550 */
3551 if (vnodetype == VBLK || vnodetype == VCHR)
3552 attr.ca_mode = (attr.ca_mode & ~S_IFMT) | S_IFREG;
3553
3554 /* Tag symlinks with a type and creator. */
3555 if (vnodetype == VLNK) {
3556 struct FndrFileInfo *fip;
3557
3558 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3559 fip->fdType = SWAP_BE32(kSymLinkFileType);
3560 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3561 }
3562 if ((attr.ca_mode & S_ISGID) &&
3563 !groupmember(dcp->c_gid, cnp->cn_cred) &&
3564 suser(cnp->cn_cred, NULL)) {
3565 attr.ca_mode &= ~S_ISGID;
3566 }
3567 if (cnp->cn_flags & ISWHITEOUT)
3568 attr.ca_flags |= UF_OPAQUE;
3569
3570 /* Setup the descriptor */
3571 bzero(&in_desc, sizeof(in_desc));
3572 in_desc.cd_nameptr = cnp->cn_nameptr;
3573 in_desc.cd_namelen = cnp->cn_namelen;
3574 in_desc.cd_parentcnid = dcp->c_cnid;
3575 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3576
3577 // XXXdbg
3578 hfs_global_shared_lock_acquire(hfsmp);
3579 grabbed_lock = 1;
3580 if (hfsmp->jnl) {
3581 if ((error = journal_start_transaction(hfsmp->jnl)) != 0) {
3582 goto exit;
3583 }
3584 started_tr = 1;
3585 }
3586
3587 /*
3588 * Reserve some space in the Catalog file.
3589 *
3590 * (we also add CAT_DELETE since our getnewvnode
3591 * request can cause an hfs_inactive call to
3592 * delete an unlinked file)
3593 */
3594 if ((error = cat_preflight(hfsmp, CAT_CREATE | CAT_DELETE, &cookie, p))) {
3595 goto exit;
3596 }
3597
3598 /* Lock catalog b-tree */
3599 error = hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_EXCLUSIVE, p);
3600 if (error)
3601 goto exit;
3602
3603 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3604
3605 /* Unlock catalog b-tree */
3606 (void) hfs_metafilelocking(VTOHFS(dvp), kHFSCatalogFileID, LK_RELEASE, p);
3607 if (error)
3608 goto exit;
3609
3610 /* Update the parent directory */
3611 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3612 dcp->c_nlink++;
3613 dcp->c_entries++;
3614 dcp->c_flag |= C_CHANGE | C_UPDATE;
3615 tv = time;
3616 (void) VOP_UPDATE(dvp, &tv, &tv, 0);
3617 if (vnodetype == VDIR) {
3618 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
3619 } else {
3620 HFS_KNOTE(dvp, NOTE_WRITE);
3621 };
3622
3623 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3624 (dcp->c_cnid == kHFSRootFolderID));
3625
3626 // XXXdbg
3627 // have to end the transaction here before we call hfs_getnewvnode()
3628 // because that can cause us to try and reclaim a vnode on a different
3629 // file system which could cause us to start a transaction which can
3630 // deadlock with someone on that other file system (since we could be
3631 // holding two transaction locks as well as various vnodes and we did
3632 // not obtain the locks on them in the proper order).
3633 //
3634 // NOTE: this means that if the quota check fails or we have to update
3635 // the change time on a block-special device that those changes
3636 // will happen as part of independent transactions.
3637 //
3638 if (started_tr) {
3639 journal_end_transaction(hfsmp->jnl);
3640 started_tr = 0;
3641 }
3642 if (grabbed_lock) {
3643 hfs_global_shared_lock_release(hfsmp);
3644 grabbed_lock = 0;
3645 }
3646
3647 /* Create a vnode for the object just created: */
3648 error = hfs_getnewvnode(hfsmp, NULL, &out_desc, 0, &attr, NULL, &tvp);
3649 if (error)
3650 goto exit;
3651
3652 // XXXdbg
3653 cache_enter(dvp, tvp, cnp);
3654
3655 #if QUOTA
3656 cp = VTOC(tvp);
3657 /*
3658 * We call hfs_chkiq with FORCE flag so that if we
3659 * fall through to the rmdir we actually have
3660 * accounted for the inode
3661 */
3662 if ((error = hfs_getinoquota(cp)) ||
3663 (error = hfs_chkiq(cp, 1, cnp->cn_cred, FORCE))) {
3664 if (tvp->v_type == VDIR)
3665 VOP_RMDIR(dvp,tvp, cnp);
3666 else
3667 VOP_REMOVE(dvp,tvp, cnp);
3668
3669 // because VOP_RMDIR and VOP_REMOVE already
3670 // have done the vput()
3671 dvp = NULL;
3672 goto exit;
3673 }
3674 #endif /* QUOTA */
3675
3676 /*
3677 * restore vtype and mode for VBLK and VCHR
3678 */
3679 if (vnodetype == VBLK || vnodetype == VCHR) {
3680 struct cnode *cp;
3681
3682 cp = VTOC(tvp);
3683 cp->c_mode = mode;
3684 tvp->v_type = IFTOVT(mode);
3685 cp->c_flag |= C_CHANGE;
3686 tv = time;
3687 if ((error = VOP_UPDATE(tvp, &tv, &tv, 1))) {
3688 vput(tvp);
3689 goto exit;
3690 }
3691 }
3692
3693 *vpp = tvp;
3694 exit:
3695 cat_releasedesc(&out_desc);
3696
3697 cat_postflight(hfsmp, &cookie, p);
3698
3699 if ((cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF) {
3700 char *tmp = cnp->cn_pnbuf;
3701 cnp->cn_pnbuf = NULL;
3702 cnp->cn_flags &= ~HASBUF;
3703 FREE_ZONE(tmp, cnp->cn_pnlen, M_NAMEI);
3704 }
3705 /*
3706 * Check if a file is located in the "Cleanup At Startup"
3707 * directory. If it is then tag it as NODUMP so that we
3708 * can be lazy about zero filling data holes.
3709 */
3710 if ((error == 0) && dvp && (vnodetype == VREG) &&
3711 (dcp->c_desc.cd_nameptr != NULL) &&
3712 (strcmp(dcp->c_desc.cd_nameptr, CARBON_TEMP_DIR_NAME) == 0)) {
3713 struct vnode *ddvp;
3714 cnid_t parid;
3715
3716 parid = dcp->c_parentcnid;
3717 vput(dvp);
3718 dvp = NULL;
3719
3720 /*
3721 * The parent of "Cleanup At Startup" should
3722 * have the ASCII name of the userid.
3723 */
3724 if (VFS_VGET(HFSTOVFS(hfsmp), &parid, &ddvp) == 0) {
3725 if (VTOC(ddvp)->c_desc.cd_nameptr &&
3726 (cp->c_uid == strtoul(VTOC(ddvp)->c_desc.cd_nameptr, 0, 0))) {
3727 cp->c_flags |= UF_NODUMP;
3728 cp->c_flag |= C_CHANGE;
3729 }
3730 vput(ddvp);
3731 }
3732 }
3733 if (dvp)
3734 vput(dvp);
3735
3736 if (started_tr) {
3737 journal_end_transaction(hfsmp->jnl);
3738 started_tr = 0;
3739 }
3740 if (grabbed_lock) {
3741 hfs_global_shared_lock_release(hfsmp);
3742 grabbed_lock = 0;
3743 }
3744
3745 return (error);
3746 }
3747
3748
3749 static int
3750 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, struct proc *p)
3751 {
3752 struct vnode *rvp;
3753 struct cnode *cp = VTOC(vp);
3754 int error;
3755
3756 if ((rvp = cp->c_rsrc_vp)) {
3757 /* Use exising vnode */
3758 error = vget(rvp, 0, p);
3759 if (error) {
3760 char * name = VTOC(vp)->c_desc.cd_nameptr;
3761
3762 if (name)
3763 printf("hfs_vgetrsrc: couldn't get"
3764 " resource fork for %s\n", name);
3765 return (error);
3766 }
3767 } else {
3768 struct cat_fork rsrcfork;
3769
3770 /* Lock catalog b-tree */
3771 error = hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_SHARED, p);
3772 if (error)
3773 return (error);
3774
3775 /* Get resource fork data */
3776 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3777 (struct cat_attr *)0, &rsrcfork);
3778
3779 /* Unlock the Catalog */
3780 (void) hfs_metafilelocking(hfsmp, kHFSCatalogFileID, LK_RELEASE, p);
3781 if (error)
3782 return (error);
3783
3784 error = hfs_getnewvnode(hfsmp, cp, &cp->c_desc, 1, &cp->c_attr,
3785 &rsrcfork, &rvp);
3786 if (error)
3787 return (error);
3788 }
3789
3790 *rvpp = rvp;
3791 return (0);
3792 }
3793
3794
3795 static void
3796 filt_hfsdetach(struct knote *kn)
3797 {
3798 struct vnode *vp;
3799 int result;
3800 struct proc *p = current_proc();
3801
3802 vp = (struct vnode *)kn->kn_hook;
3803 if (1) { /* ! KNDETACH_VNLOCKED */
3804 result = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
3805 if (result) return;
3806 };
3807
3808 result = KNOTE_DETACH(&VTOC(vp)->c_knotes, kn);
3809
3810 if (1) { /* ! KNDETACH_VNLOCKED */
3811 VOP_UNLOCK(vp, 0, p);
3812 };
3813 }
3814
3815 /*ARGSUSED*/
3816 static int
3817 filt_hfsread(struct knote *kn, long hint)
3818 {
3819 struct vnode *vp = (struct vnode *)kn->kn_fp->f_data;
3820
3821 if (hint == NOTE_REVOKE) {
3822 /*
3823 * filesystem is gone, so set the EOF flag and schedule
3824 * the knote for deletion.
3825 */
3826 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3827 return (1);
3828 }
3829
3830 kn->kn_data = VTOF(vp)->ff_size - kn->kn_fp->f_offset;
3831 return (kn->kn_data != 0);
3832 }
3833
3834 /*ARGSUSED*/
3835 static int
3836 filt_hfswrite(struct knote *kn, long hint)
3837 {
3838 if (hint == NOTE_REVOKE) {
3839 /*
3840 * filesystem is gone, so set the EOF flag and schedule
3841 * the knote for deletion.
3842 */
3843 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3844 }
3845
3846 kn->kn_data = 0;
3847 return (1);
3848 }
3849
3850 static int
3851 filt_hfsvnode(struct knote *kn, long hint)
3852 {
3853
3854 if (kn->kn_sfflags & hint)
3855 kn->kn_fflags |= hint;
3856 if (hint == NOTE_REVOKE) {
3857 kn->kn_flags |= EV_EOF;
3858 return (1);
3859 }
3860 return (kn->kn_fflags != 0);
3861 }
3862
3863 static struct filterops hfsread_filtops =
3864 { 1, NULL, filt_hfsdetach, filt_hfsread };
3865 static struct filterops hfswrite_filtops =
3866 { 1, NULL, filt_hfsdetach, filt_hfswrite };
3867 static struct filterops hfsvnode_filtops =
3868 { 1, NULL, filt_hfsdetach, filt_hfsvnode };
3869
3870 /*
3871 #
3872 #% kqfilt_add vp L L L
3873 #
3874 vop_kqfilt_add
3875 IN struct vnode *vp;
3876 IN struct knote *kn;
3877 IN struct proc *p;
3878 */
3879 static int
3880 hfs_kqfilt_add(ap)
3881 struct vop_kqfilt_add_args /* {
3882 struct vnode *a_vp;
3883 struct knote *a_kn;
3884 struct proc *p;
3885 } */ *ap;
3886 {
3887 struct vnode *vp = ap->a_vp;
3888 struct knote *kn = ap->a_kn;
3889
3890 switch (kn->kn_filter) {
3891 case EVFILT_READ:
3892 if (vp->v_type == VREG) {
3893 kn->kn_fop = &hfsread_filtops;
3894 } else {
3895 return EINVAL;
3896 };
3897 break;
3898 case EVFILT_WRITE:
3899 if (vp->v_type == VREG) {
3900 kn->kn_fop = &hfswrite_filtops;
3901 } else {
3902 return EINVAL;
3903 };
3904 break;
3905 case EVFILT_VNODE:
3906 kn->kn_fop = &hfsvnode_filtops;
3907 break;
3908 default:
3909 return (1);
3910 }
3911
3912 kn->kn_hook = (caddr_t)vp;
3913
3914 /* simple_lock(&vp->v_pollinfo.vpi_lock); */
3915 KNOTE_ATTACH(&VTOC(vp)->c_knotes, kn);
3916 /* simple_unlock(&vp->v_pollinfo.vpi_lock); */
3917
3918 return (0);
3919 }
3920
3921 /*
3922 #
3923 #% kqfilt_remove vp L L L
3924 #
3925 vop_kqfilt_remove
3926 IN struct vnode *vp;
3927 IN uintptr_t ident;
3928 IN struct proc *p;
3929 */
3930 static int
3931 hfs_kqfilt_remove(ap)
3932 struct vop_kqfilt_remove_args /* {
3933 struct vnode *a_vp;
3934 uintptr_t ident;
3935 struct proc *p;
3936 } */ *ap;
3937 {
3938 struct vnode *vp = ap->a_vp;
3939 uintptr_t ident = ap->a_ident;
3940 int result;
3941
3942 result = ENOTSUP; /* XXX */
3943
3944 return (result);
3945 }
3946
3947 /*
3948 * Wrapper for special device reads
3949 */
3950 static int
3951 hfsspec_read(ap)
3952 struct vop_read_args /* {
3953 struct vnode *a_vp;
3954 struct uio *a_uio;
3955 int a_ioflag;
3956 struct ucred *a_cred;
3957 } */ *ap;
3958 {
3959 /*
3960 * Set access flag.
3961 */
3962 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
3963 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_read), ap));
3964 }
3965
3966 /*
3967 * Wrapper for special device writes
3968 */
3969 static int
3970 hfsspec_write(ap)
3971 struct vop_write_args /* {
3972 struct vnode *a_vp;
3973 struct uio *a_uio;
3974 int a_ioflag;
3975 struct ucred *a_cred;
3976 } */ *ap;
3977 {
3978 /*
3979 * Set update and change flags.
3980 */
3981 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
3982 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_write), ap));
3983 }
3984
3985 /*
3986 * Wrapper for special device close
3987 *
3988 * Update the times on the cnode then do device close.
3989 */
3990 static int
3991 hfsspec_close(ap)
3992 struct vop_close_args /* {
3993 struct vnode *a_vp;
3994 int a_fflag;
3995 struct ucred *a_cred;
3996 struct proc *a_p;
3997 } */ *ap;
3998 {
3999 struct vnode *vp = ap->a_vp;
4000 struct cnode *cp = VTOC(vp);
4001
4002 simple_lock(&vp->v_interlock);
4003 if (ap->a_vp->v_usecount > 1)
4004 CTIMES(cp, &time, &time);
4005 simple_unlock(&vp->v_interlock);
4006 return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
4007 }
4008
4009 #if FIFO
4010 /*
4011 * Wrapper for fifo reads
4012 */
4013 static int
4014 hfsfifo_read(ap)
4015 struct vop_read_args /* {
4016 struct vnode *a_vp;
4017 struct uio *a_uio;
4018 int a_ioflag;
4019 struct ucred *a_cred;
4020 } */ *ap;
4021 {
4022 extern int (**fifo_vnodeop_p)(void *);
4023
4024 /*
4025 * Set access flag.
4026 */
4027 VTOC(ap->a_vp)->c_flag |= C_ACCESS;
4028 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_read), ap));
4029 }
4030
4031 /*
4032 * Wrapper for fifo writes
4033 */
4034 static int
4035 hfsfifo_write(ap)
4036 struct vop_write_args /* {
4037 struct vnode *a_vp;
4038 struct uio *a_uio;
4039 int a_ioflag;
4040 struct ucred *a_cred;
4041 } */ *ap;
4042 {
4043 extern int (**fifo_vnodeop_p)(void *);
4044
4045 /*
4046 * Set update and change flags.
4047 */
4048 VTOC(ap->a_vp)->c_flag |= C_CHANGE | C_UPDATE;
4049 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_write), ap));
4050 }
4051
4052 /*
4053 * Wrapper for fifo close
4054 *
4055 * Update the times on the cnode then do device close.
4056 */
4057 static int
4058 hfsfifo_close(ap)
4059 struct vop_close_args /* {
4060 struct vnode *a_vp;
4061 int a_fflag;
4062 struct ucred *a_cred;
4063 struct proc *a_p;
4064 } */ *ap;
4065 {
4066 extern int (**fifo_vnodeop_p)(void *);
4067 struct vnode *vp = ap->a_vp;
4068 struct cnode *cp = VTOC(vp);
4069
4070 simple_lock(&vp->v_interlock);
4071 if (ap->a_vp->v_usecount > 1)
4072 CTIMES(cp, &time, &time);
4073 simple_unlock(&vp->v_interlock);
4074 return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
4075 }
4076
4077 /*
4078 * kqfilt_add wrapper for fifos.
4079 *
4080 * Fall through to hfs kqfilt_add routines if needed
4081 */
4082 int
4083 hfsfifo_kqfilt_add(ap)
4084 struct vop_kqfilt_add_args *ap;
4085 {
4086 extern int (**fifo_vnodeop_p)(void *);
4087 int error;
4088
4089 error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_add), ap);
4090 if (error)
4091 error = hfs_kqfilt_add(ap);
4092 return (error);
4093 }
4094
4095 /*
4096 * kqfilt_remove wrapper for fifos.
4097 *
4098 * Fall through to hfs kqfilt_remove routines if needed
4099 */
4100 int
4101 hfsfifo_kqfilt_remove(ap)
4102 struct vop_kqfilt_remove_args *ap;
4103 {
4104 extern int (**fifo_vnodeop_p)(void *);
4105 int error;
4106
4107 error = VOCALL(fifo_vnodeop_p, VOFFSET(vop_kqfilt_remove), ap);
4108 if (error)
4109 error = hfs_kqfilt_remove(ap);
4110 return (error);
4111 }
4112
4113 #endif /* FIFO */
4114
4115
4116 /*****************************************************************************
4117 *
4118 * VOP Tables
4119 *
4120 *****************************************************************************/
4121 int hfs_cache_lookup(); /* in hfs_lookup.c */
4122 int hfs_lookup(); /* in hfs_lookup.c */
4123 int hfs_read(); /* in hfs_readwrite.c */
4124 int hfs_write(); /* in hfs_readwrite.c */
4125 int hfs_ioctl(); /* in hfs_readwrite.c */
4126 int hfs_select(); /* in hfs_readwrite.c */
4127 int hfs_bmap(); /* in hfs_readwrite.c */
4128 int hfs_strategy(); /* in hfs_readwrite.c */
4129 int hfs_truncate(); /* in hfs_readwrite.c */
4130 int hfs_allocate(); /* in hfs_readwrite.c */
4131 int hfs_pagein(); /* in hfs_readwrite.c */
4132 int hfs_pageout(); /* in hfs_readwrite.c */
4133 int hfs_search(); /* in hfs_search.c */
4134 int hfs_bwrite(); /* in hfs_readwrite.c */
4135 int hfs_link(); /* in hfs_link.c */
4136 int hfs_blktooff(); /* in hfs_readwrite.c */
4137 int hfs_offtoblk(); /* in hfs_readwrite.c */
4138 int hfs_cmap(); /* in hfs_readwrite.c */
4139 int hfs_getattrlist(); /* in hfs_attrlist.c */
4140 int hfs_setattrlist(); /* in hfs_attrlist.c */
4141 int hfs_readdirattr(); /* in hfs_attrlist.c */
4142 int hfs_inactive(); /* in hfs_cnode.c */
4143 int hfs_reclaim(); /* in hfs_cnode.c */
4144
4145 int (**hfs_vnodeop_p)(void *);
4146
4147 #define VOPFUNC int (*)(void *)
4148
4149 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
4150 { &vop_default_desc, (VOPFUNC)vn_default_error },
4151 { &vop_lookup_desc, (VOPFUNC)hfs_cache_lookup }, /* lookup */
4152 { &vop_create_desc, (VOPFUNC)hfs_create }, /* create */
4153 { &vop_mknod_desc, (VOPFUNC)hfs_mknod }, /* mknod */
4154 { &vop_open_desc, (VOPFUNC)hfs_open }, /* open */
4155 { &vop_close_desc, (VOPFUNC)hfs_close }, /* close */
4156 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
4157 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
4158 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
4159 { &vop_read_desc, (VOPFUNC)hfs_read }, /* read */
4160 { &vop_write_desc, (VOPFUNC)hfs_write }, /* write */
4161 { &vop_ioctl_desc, (VOPFUNC)hfs_ioctl }, /* ioctl */
4162 { &vop_select_desc, (VOPFUNC)hfs_select }, /* select */
4163 { &vop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
4164 { &vop_exchange_desc, (VOPFUNC)hfs_exchange }, /* exchange */
4165 { &vop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
4166 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
4167 { &vop_seek_desc, (VOPFUNC)nop_seek }, /* seek */
4168 { &vop_remove_desc, (VOPFUNC)hfs_remove }, /* remove */
4169 { &vop_link_desc, (VOPFUNC)hfs_link }, /* link */
4170 { &vop_rename_desc, (VOPFUNC)hfs_rename }, /* rename */
4171 { &vop_mkdir_desc, (VOPFUNC)hfs_mkdir }, /* mkdir */
4172 { &vop_rmdir_desc, (VOPFUNC)hfs_rmdir }, /* rmdir */
4173 { &vop_mkcomplex_desc, (VOPFUNC)err_mkcomplex }, /* mkcomplex */
4174 { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist }, /* getattrlist */
4175 { &vop_setattrlist_desc, (VOPFUNC)hfs_setattrlist }, /* setattrlist */
4176 { &vop_symlink_desc, (VOPFUNC)hfs_symlink }, /* symlink */
4177 { &vop_readdir_desc, (VOPFUNC)hfs_readdir }, /* readdir */
4178 { &vop_readdirattr_desc, (VOPFUNC)hfs_readdirattr }, /* readdirattr */
4179 { &vop_readlink_desc, (VOPFUNC)hfs_readlink }, /* readlink */
4180 { &vop_abortop_desc, (VOPFUNC)nop_abortop }, /* abortop */
4181 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
4182 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
4183 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
4184 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
4185 { &vop_bmap_desc, (VOPFUNC)hfs_bmap }, /* bmap */
4186 { &vop_strategy_desc, (VOPFUNC)hfs_strategy }, /* strategy */
4187 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
4188 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
4189 { &vop_pathconf_desc, (VOPFUNC)hfs_pathconf }, /* pathconf */
4190 { &vop_advlock_desc, (VOPFUNC)hfs_advlock }, /* advlock */
4191 { &vop_reallocblks_desc, (VOPFUNC)err_reallocblks }, /* reallocblks */
4192 { &vop_truncate_desc, (VOPFUNC)hfs_truncate }, /* truncate */
4193 { &vop_allocate_desc, (VOPFUNC)hfs_allocate }, /* allocate */
4194 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
4195 { &vop_searchfs_desc, (VOPFUNC)hfs_search }, /* search fs */
4196 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite }, /* bwrite */
4197 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* pagein */
4198 { &vop_pageout_desc,(VOPFUNC) hfs_pageout }, /* pageout */
4199 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4200 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
4201 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
4202 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
4203 { &vop_kqfilt_add_desc, (VOPFUNC)hfs_kqfilt_add }, /* kqfilt_add */
4204 { &vop_kqfilt_remove_desc, (VOPFUNC)hfs_kqfilt_remove }, /* kqfilt_remove */
4205 { NULL, (VOPFUNC)NULL }
4206 };
4207
4208 struct vnodeopv_desc hfs_vnodeop_opv_desc =
4209 { &hfs_vnodeop_p, hfs_vnodeop_entries };
4210
4211 int (**hfs_specop_p)(void *);
4212 struct vnodeopv_entry_desc hfs_specop_entries[] = {
4213 { &vop_default_desc, (VOPFUNC)vn_default_error },
4214 { &vop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
4215 { &vop_create_desc, (VOPFUNC)spec_create }, /* create */
4216 { &vop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
4217 { &vop_open_desc, (VOPFUNC)spec_open }, /* open */
4218 { &vop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
4219 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
4220 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
4221 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
4222 { &vop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
4223 { &vop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
4224 { &vop_lease_desc, (VOPFUNC)spec_lease_check }, /* lease */
4225 { &vop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
4226 { &vop_select_desc, (VOPFUNC)spec_select }, /* select */
4227 { &vop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
4228 { &vop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
4229 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
4230 { &vop_seek_desc, (VOPFUNC)spec_seek }, /* seek */
4231 { &vop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
4232 { &vop_link_desc, (VOPFUNC)spec_link }, /* link */
4233 { &vop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
4234 { &vop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
4235 { &vop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
4236 { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist },
4237 { &vop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
4238 { &vop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
4239 { &vop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
4240 { &vop_abortop_desc, (VOPFUNC)spec_abortop }, /* abortop */
4241 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
4242 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
4243 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
4244 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
4245 { &vop_bmap_desc, (VOPFUNC)spec_bmap }, /* bmap */
4246 { &vop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
4247 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
4248 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
4249 { &vop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
4250 { &vop_advlock_desc, (VOPFUNC)spec_advlock }, /* advlock */
4251 { &vop_blkatoff_desc, (VOPFUNC)spec_blkatoff }, /* blkatoff */
4252 { &vop_valloc_desc, (VOPFUNC)spec_valloc }, /* valloc */
4253 { &vop_reallocblks_desc, (VOPFUNC)spec_reallocblks }, /* reallocblks */
4254 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
4255 { &vop_truncate_desc, (VOPFUNC)spec_truncate }, /* truncate */
4256 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
4257 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
4258 { &vop_devblocksize_desc, (VOPFUNC)spec_devblocksize }, /* devblocksize */
4259 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
4260 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
4261 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4262 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
4263 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
4264 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4265 };
4266 struct vnodeopv_desc hfs_specop_opv_desc =
4267 { &hfs_specop_p, hfs_specop_entries };
4268
4269 #if FIFO
4270 int (**hfs_fifoop_p)(void *);
4271 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
4272 { &vop_default_desc, (VOPFUNC)vn_default_error },
4273 { &vop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
4274 { &vop_create_desc, (VOPFUNC)fifo_create }, /* create */
4275 { &vop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
4276 { &vop_open_desc, (VOPFUNC)fifo_open }, /* open */
4277 { &vop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
4278 { &vop_access_desc, (VOPFUNC)hfs_access }, /* access */
4279 { &vop_getattr_desc, (VOPFUNC)hfs_getattr }, /* getattr */
4280 { &vop_setattr_desc, (VOPFUNC)hfs_setattr }, /* setattr */
4281 { &vop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
4282 { &vop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
4283 { &vop_lease_desc, (VOPFUNC)fifo_lease_check }, /* lease */
4284 { &vop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
4285 { &vop_select_desc, (VOPFUNC)fifo_select }, /* select */
4286 { &vop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
4287 { &vop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
4288 { &vop_fsync_desc, (VOPFUNC)hfs_fsync }, /* fsync */
4289 { &vop_seek_desc, (VOPFUNC)fifo_seek }, /* seek */
4290 { &vop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
4291 { &vop_link_desc, (VOPFUNC)fifo_link }, /* link */
4292 { &vop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
4293 { &vop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
4294 { &vop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
4295 { &vop_getattrlist_desc, (VOPFUNC)hfs_getattrlist },
4296 { &vop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
4297 { &vop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
4298 { &vop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
4299 { &vop_abortop_desc, (VOPFUNC)fifo_abortop }, /* abortop */
4300 { &vop_inactive_desc, (VOPFUNC)hfs_inactive }, /* inactive */
4301 { &vop_reclaim_desc, (VOPFUNC)hfs_reclaim }, /* reclaim */
4302 { &vop_lock_desc, (VOPFUNC)hfs_lock }, /* lock */
4303 { &vop_unlock_desc, (VOPFUNC)hfs_unlock }, /* unlock */
4304 { &vop_bmap_desc, (VOPFUNC)fifo_bmap }, /* bmap */
4305 { &vop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
4306 { &vop_print_desc, (VOPFUNC)hfs_print }, /* print */
4307 { &vop_islocked_desc, (VOPFUNC)hfs_islocked }, /* islocked */
4308 { &vop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
4309 { &vop_advlock_desc, (VOPFUNC)fifo_advlock }, /* advlock */
4310 { &vop_blkatoff_desc, (VOPFUNC)fifo_blkatoff }, /* blkatoff */
4311 { &vop_valloc_desc, (VOPFUNC)fifo_valloc }, /* valloc */
4312 { &vop_reallocblks_desc, (VOPFUNC)fifo_reallocblks }, /* reallocblks */
4313 { &vop_vfree_desc, (VOPFUNC)err_vfree }, /* vfree */
4314 { &vop_truncate_desc, (VOPFUNC)fifo_truncate }, /* truncate */
4315 { &vop_update_desc, (VOPFUNC)hfs_update }, /* update */
4316 { &vop_bwrite_desc, (VOPFUNC)hfs_bwrite },
4317 { &vop_pagein_desc, (VOPFUNC)hfs_pagein }, /* Pagein */
4318 { &vop_pageout_desc, (VOPFUNC)hfs_pageout }, /* Pageout */
4319 { &vop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4320 { &vop_blktooff_desc, (VOPFUNC)hfs_blktooff }, /* blktooff */
4321 { &vop_offtoblk_desc, (VOPFUNC)hfs_offtoblk }, /* offtoblk */
4322 { &vop_cmap_desc, (VOPFUNC)hfs_cmap }, /* cmap */
4323 { &vop_kqfilt_add_desc, (VOPFUNC)hfsfifo_kqfilt_add }, /* kqfilt_add */
4324 { &vop_kqfilt_remove_desc, (VOPFUNC)hfsfifo_kqfilt_remove }, /* kqfilt_remove */
4325 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4326 };
4327 struct vnodeopv_desc hfs_fifoop_opv_desc =
4328 { &hfs_fifoop_p, hfs_fifoop_entries };
4329 #endif /* FIFO */
4330
4331
4332