]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
eef6b5e9660486b10484d49bd60c81c25a9bcf21
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/param.h>
32 #include <sys/file_internal.h>
33 #include <sys/dirent.h>
34 #include <sys/stat.h>
35 #include <sys/buf.h>
36 #include <sys/mount.h>
37 #include <sys/vnode_if.h>
38 #include <sys/vnode_internal.h>
39 #include <sys/malloc.h>
40 #include <sys/ubc.h>
41 #include <sys/ubc_internal.h>
42 #include <sys/paths.h>
43 #include <sys/quota.h>
44 #include <sys/time.h>
45 #include <sys/disk.h>
46 #include <sys/kauth.h>
47 #include <sys/uio_internal.h>
48
49 #include <miscfs/specfs/specdev.h>
50 #include <miscfs/fifofs/fifo.h>
51 #include <vfs/vfs_support.h>
52 #include <machine/spl.h>
53
54 #include <sys/kdebug.h>
55 #include <sys/sysctl.h>
56
57 #include "hfs.h"
58 #include "hfs_catalog.h"
59 #include "hfs_cnode.h"
60 #include "hfs_dbg.h"
61 #include "hfs_mount.h"
62 #include "hfs_quota.h"
63 #include "hfs_endian.h"
64
65 #include "hfscommon/headers/BTreesInternal.h"
66 #include "hfscommon/headers/FileMgrInternal.h"
67
68
69 #define KNDETACH_VNLOCKED 0x00000001
70
71 #define CARBON_TEMP_DIR_NAME "Cleanup At Startup"
72
73
74 /* Global vfs data structures for hfs */
75
76 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
77 int always_do_fullfsync = 0;
78 SYSCTL_INT (_kern, OID_AUTO, always_do_fullfsync, CTLFLAG_RW, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
79
80 static int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
81 struct componentname *cnp, struct vnode_attr *vap,
82 vfs_context_t ctx);
83
84 static int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
85 static int hfs_metasync_all(struct hfsmount *hfsmp);
86
87 static int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
88 int);
89
90 static int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
91 int, int, int);
92
93 #if FIFO
94 static int hfsfifo_read(struct vnop_read_args *);
95 static int hfsfifo_write(struct vnop_write_args *);
96 static int hfsfifo_close(struct vnop_close_args *);
97 static int hfsfifo_kqfilt_add(struct vnop_kqfilt_add_args *);
98 static int hfsfifo_kqfilt_remove(struct vnop_kqfilt_remove_args *);
99
100 extern int (**fifo_vnodeop_p)(void *);
101 #endif /* FIFO */
102
103 static int hfs_vnop_close(struct vnop_close_args*);
104 static int hfs_vnop_create(struct vnop_create_args*);
105 static int hfs_vnop_exchange(struct vnop_exchange_args*);
106 static int hfs_vnop_fsync(struct vnop_fsync_args*);
107 static int hfs_vnop_mkdir(struct vnop_mkdir_args*);
108 static int hfs_vnop_mknod(struct vnop_mknod_args*);
109 static int hfs_vnop_getattr(struct vnop_getattr_args*);
110 static int hfs_vnop_open(struct vnop_open_args*);
111 static int hfs_vnop_readdir(struct vnop_readdir_args*);
112 static int hfs_vnop_remove(struct vnop_remove_args*);
113 static int hfs_vnop_rename(struct vnop_rename_args*);
114 static int hfs_vnop_rmdir(struct vnop_rmdir_args*);
115 static int hfs_vnop_symlink(struct vnop_symlink_args*);
116 static int hfs_vnop_setattr(struct vnop_setattr_args*);
117 static int hfs_vnop_readlink(struct vnop_readlink_args *);
118 static int hfs_vnop_pathconf(struct vnop_pathconf_args *);
119 static int hfs_vnop_kqfiltremove(struct vnop_kqfilt_remove_args *);
120 static int hfs_vnop_whiteout(struct vnop_whiteout_args *);
121 static int hfsspec_read(struct vnop_read_args *);
122 static int hfsspec_write(struct vnop_write_args *);
123 static int hfsspec_close(struct vnop_close_args *);
124
125 /* Options for hfs_removedir and hfs_removefile */
126 #define HFSRM_SKIP_RESERVE 0x01
127
128
129
130
131 /*****************************************************************************
132 *
133 * Common Operations on vnodes
134 *
135 *****************************************************************************/
136
137 /*
138 * Create a regular file.
139 */
140 static int
141 hfs_vnop_create(struct vnop_create_args *ap)
142 {
143 int error;
144
145 again:
146 error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
147
148 /*
149 * We speculatively skipped the original lookup of the leaf
150 * for CREATE. Since it exists, go get it as long as they
151 * didn't want an exclusive create.
152 */
153 if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) {
154 struct vnop_lookup_args args;
155
156 args.a_desc = &vnop_lookup_desc;
157 args.a_dvp = ap->a_dvp;
158 args.a_vpp = ap->a_vpp;
159 args.a_cnp = ap->a_cnp;
160 args.a_context = ap->a_context;
161 args.a_cnp->cn_nameiop = LOOKUP;
162 error = hfs_vnop_lookup(&args);
163 /*
164 * We can also race with remove for this file.
165 */
166 if (error == ENOENT) {
167 goto again;
168 }
169
170 /* Make sure it was file. */
171 if ((error == 0) && !vnode_isreg(*args.a_vpp)) {
172 vnode_put(*args.a_vpp);
173 error = EEXIST;
174 }
175 args.a_cnp->cn_nameiop = CREATE;
176 }
177 return (error);
178 }
179
180 /*
181 * Make device special file.
182 */
183 static int
184 hfs_vnop_mknod(struct vnop_mknod_args *ap)
185 {
186 struct vnode_attr *vap = ap->a_vap;
187 struct vnode *dvp = ap->a_dvp;
188 struct vnode **vpp = ap->a_vpp;
189 struct cnode *cp;
190 int error;
191
192 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
193 return (ENOTSUP);
194 }
195
196 /* Create the vnode */
197 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
198 if (error)
199 return (error);
200
201 cp = VTOC(*vpp);
202 cp->c_touch_acctime = TRUE;
203 cp->c_touch_chgtime = TRUE;
204 cp->c_touch_modtime = TRUE;
205
206 if ((vap->va_rdev != VNOVAL) &&
207 (vap->va_type == VBLK || vap->va_type == VCHR))
208 cp->c_rdev = vap->va_rdev;
209
210 return (0);
211 }
212
213 /*
214 * Open a file/directory.
215 */
216 static int
217 hfs_vnop_open(struct vnop_open_args *ap)
218 {
219 struct vnode *vp = ap->a_vp;
220 struct filefork *fp;
221 struct timeval tv;
222 int error;
223
224 /*
225 * Files marked append-only must be opened for appending.
226 */
227 if ((VTOC(vp)->c_flags & APPEND) && !vnode_isdir(vp) &&
228 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
229 return (EPERM);
230
231 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
232 return (EBUSY); /* file is in use by the kernel */
233
234 /* Don't allow journal file to be opened externally. */
235 if (VTOC(vp)->c_fileid == VTOHFS(vp)->hfs_jnlfileid)
236 return (EPERM);
237 /*
238 * On the first (non-busy) open of a fragmented
239 * file attempt to de-frag it (if its less than 20MB).
240 */
241 if ((VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) ||
242 (VTOHFS(vp)->jnl == NULL) ||
243 #if NAMEDSTREAMS
244 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
245 #else
246 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
247 #endif
248 return (0);
249 }
250
251 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
252 return (error);
253 fp = VTOF(vp);
254 if (fp->ff_blocks &&
255 fp->ff_extents[7].blockCount != 0 &&
256 fp->ff_size <= (20 * 1024 * 1024)) {
257 struct timeval now;
258 struct cnode *cp = VTOC(vp);
259 /*
260 * Wait until system bootup is done (3 min).
261 * And don't relocate a file that's been modified
262 * within the past minute -- this can lead to
263 * system thrashing.
264 */
265 microuptime(&tv);
266 microtime(&now);
267 if (tv.tv_sec > (60 * 3) &&
268 ((now.tv_sec - cp->c_mtime) > 60)) {
269 (void) hfs_relocate(vp, VTOVCB(vp)->nextAllocation + 4096,
270 vfs_context_ucred(ap->a_context),
271 vfs_context_proc(ap->a_context));
272 }
273 }
274 hfs_unlock(VTOC(vp));
275
276 return (0);
277 }
278
279
280 /*
281 * Close a file/directory.
282 */
283 static int
284 hfs_vnop_close(ap)
285 struct vnop_close_args /* {
286 struct vnode *a_vp;
287 int a_fflag;
288 vfs_context_t a_context;
289 } */ *ap;
290 {
291 register struct vnode *vp = ap->a_vp;
292 register struct cnode *cp;
293 struct proc *p = vfs_context_proc(ap->a_context);
294 struct hfsmount *hfsmp;
295 int busy;
296 int knownrefs = 0;
297 int tooktrunclock = 0;
298
299 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0)
300 return (0);
301 cp = VTOC(vp);
302 hfsmp = VTOHFS(vp);
303
304 /*
305 * If the rsrc fork is a named stream, it holds a usecount on
306 * the data fork, which prevents the data fork from getting recycled, which
307 * then prevents the de-allocation of its extra blocks.
308 * Do checks for truncation on close. Purge extra extents if they
309 * exist. Make sure the vp is not a directory, that it has a resource
310 * fork, and that rsrc fork is a named stream.
311 */
312
313 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
314 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
315 uint32_t blks;
316
317 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
318 /*
319 * If there are any extra blocks and there are only 2 refs on
320 * this vp (ourselves + rsrc fork holding ref on us), go ahead
321 * and try to truncate the extra blocks away.
322 */
323 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
324 // release cnode lock ; must acquire truncate lock BEFORE cnode lock
325 hfs_unlock (cp);
326
327 hfs_lock_truncate(cp, TRUE);
328 tooktrunclock = 1;
329
330 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
331 hfs_unlock_truncate(cp, TRUE);
332 return (0);
333 }
334
335 //now re-test to make sure it's still valid.
336 if (cp->c_rsrc_vp) {
337 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
338 if (!vnode_isinuse(vp, knownrefs)) {
339 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
340 if (blks < VTOF(vp)->ff_blocks) {
341 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, ap->a_context);
342 }
343 }
344 }
345 }
346 }
347
348 // if we froze the fs and we're exiting, then "thaw" the fs
349 if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
350 hfsmp->hfs_freezing_proc = NULL;
351 hfs_global_exclusive_lock_release(hfsmp);
352 lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
353 }
354
355 busy = vnode_isinuse(vp, 1);
356
357 if (busy) {
358 hfs_touchtimes(VTOHFS(vp), cp);
359 }
360 if (vnode_isdir(vp)) {
361 hfs_reldirhints(cp, busy);
362 } else if (vnode_issystem(vp) && !busy) {
363 vnode_recycle(vp);
364 }
365 if (tooktrunclock) {
366 hfs_unlock_truncate(cp, TRUE);
367 }
368
369 hfs_unlock(cp);
370 return (0);
371 }
372
373 /*
374 * Get basic attributes.
375 */
376 static int
377 hfs_vnop_getattr(struct vnop_getattr_args *ap)
378 {
379 #define VNODE_ATTR_TIMES \
380 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
381 #define VNODE_ATTR_AUTH \
382 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
383 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
384
385 struct vnode *vp = ap->a_vp;
386 struct vnode_attr *vap = ap->a_vap;
387 struct vnode *rvp = NULLVP;
388 struct hfsmount *hfsmp;
389 struct cnode *cp;
390 uint64_t data_size;
391 enum vtype v_type;
392 int error = 0;
393
394 cp = VTOC(vp);
395
396 /*
397 * Shortcut for vnode_authorize path. Each of the attributes
398 * in this set is updated atomically so we don't need to take
399 * the cnode lock to access them.
400 */
401 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
402 /* Make sure file still exists. */
403 if (cp->c_flag & C_NOEXISTS)
404 return (ENOENT);
405
406 vap->va_uid = cp->c_uid;
407 vap->va_gid = cp->c_gid;
408 vap->va_mode = cp->c_mode;
409 vap->va_flags = cp->c_flags;
410 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
411
412 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
413 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
414 VATTR_SET_SUPPORTED(vap, va_acl);
415 }
416 return (0);
417 }
418 hfsmp = VTOHFS(vp);
419 v_type = vnode_vtype(vp);
420
421 /*
422 * If time attributes are requested and we have cnode times
423 * that require updating, then acquire an exclusive lock on
424 * the cnode before updating the times. Otherwise we can
425 * just acquire a shared lock.
426 */
427 if ((vap->va_active & VNODE_ATTR_TIMES) &&
428 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
429 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)))
430 return (error);
431 hfs_touchtimes(hfsmp, cp);
432 } else {
433 if ((error = hfs_lock(cp, HFS_SHARED_LOCK)))
434 return (error);
435 }
436
437 if (v_type == VDIR) {
438 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
439
440 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
441 int nlink;
442
443 /*
444 * For directories, the va_nlink is esentially a count
445 * of the ".." references to a directory plus the "."
446 * reference and the directory itself. So for HFS+ this
447 * becomes the sub-directory count plus two.
448 *
449 * In the absence of a sub-directory count we use the
450 * directory's item count. This will be too high in
451 * most cases since it also includes files.
452 */
453 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
454 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
455 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
456 else
457 nlink = cp->c_entries;
458
459 /* Account for ourself and our "." entry */
460 nlink += 2;
461 /* Hide our private directories. */
462 if (cp->c_cnid == kHFSRootFolderID) {
463 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
464 --nlink;
465 }
466 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
467 --nlink;
468 }
469 }
470 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
471 }
472 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
473 int entries;
474
475 entries = cp->c_entries;
476 /* Hide our private files and directories. */
477 if (cp->c_cnid == kHFSRootFolderID) {
478 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
479 --entries;
480 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
481 --entries;
482 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
483 entries -= 2; /* hide the journal files */
484 }
485 VATTR_RETURN(vap, va_nchildren, entries);
486 }
487 /*
488 * The va_dirlinkcount is the count of real directory hard links.
489 * (i.e. its not the sum of the implied "." and ".." references)
490 */
491 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
492 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
493 }
494 } else /* !VDIR */ {
495 data_size = VCTOF(vp, cp)->ff_size;
496
497 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
498 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
499 u_int64_t blocks;
500
501 blocks = VCTOF(vp, cp)->ff_blocks;
502 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
503 }
504 }
505
506 /* conditional because 64-bit arithmetic can be expensive */
507 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
508 if (v_type == VDIR) {
509 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
510 } else {
511 u_int64_t total_size = 0;
512 struct cnode *rcp;
513
514 if (cp->c_datafork) {
515 total_size = cp->c_datafork->ff_size;
516 }
517
518 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
519 /* We deal with resource fork vnode iocount at the end of the function */
520 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE);
521 if (error) {
522 goto out;
523 }
524 rcp = VTOC(rvp);
525 if (rcp && rcp->c_rsrcfork) {
526 total_size += rcp->c_rsrcfork->ff_size;
527 }
528 }
529
530 VATTR_RETURN(vap, va_total_size, total_size);
531 }
532 }
533 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
534 if (v_type == VDIR) {
535 VATTR_RETURN(vap, va_total_alloc, 0);
536 } else {
537 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
538 }
539 }
540
541 /*
542 * If the VFS wants extended security data, and we know that we
543 * don't have any (because it never told us it was setting any)
544 * then we can return the supported bit and no data. If we do
545 * have extended security, we can just leave the bit alone and
546 * the VFS will use the fallback path to fetch it.
547 */
548 if (VATTR_IS_ACTIVE(vap, va_acl)) {
549 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
550 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
551 VATTR_SET_SUPPORTED(vap, va_acl);
552 }
553 }
554 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
555 /* Access times are lazily updated, get current time if needed */
556 if (cp->c_touch_acctime) {
557 struct timeval tv;
558
559 microtime(&tv);
560 vap->va_access_time.tv_sec = tv.tv_sec;
561 } else {
562 vap->va_access_time.tv_sec = cp->c_atime;
563 }
564 vap->va_access_time.tv_nsec = 0;
565 VATTR_SET_SUPPORTED(vap, va_access_time);
566 }
567 vap->va_create_time.tv_sec = cp->c_itime;
568 vap->va_create_time.tv_nsec = 0;
569 vap->va_modify_time.tv_sec = cp->c_mtime;
570 vap->va_modify_time.tv_nsec = 0;
571 vap->va_change_time.tv_sec = cp->c_ctime;
572 vap->va_change_time.tv_nsec = 0;
573 vap->va_backup_time.tv_sec = cp->c_btime;
574 vap->va_backup_time.tv_nsec = 0;
575
576 /* XXX is this really a good 'optimal I/O size'? */
577 vap->va_iosize = hfsmp->hfs_logBlockSize;
578 vap->va_uid = cp->c_uid;
579 vap->va_gid = cp->c_gid;
580 vap->va_mode = cp->c_mode;
581 vap->va_flags = cp->c_flags;
582
583 /*
584 * Exporting file IDs from HFS Plus:
585 *
586 * For "normal" files the c_fileid is the same value as the
587 * c_cnid. But for hard link files, they are different - the
588 * c_cnid belongs to the active directory entry (ie the link)
589 * and the c_fileid is for the actual inode (ie the data file).
590 *
591 * The stat call (getattr) uses va_fileid and the Carbon APIs,
592 * which are hardlink-ignorant, will ask for va_linkid.
593 */
594 vap->va_fileid = (u_int64_t)cp->c_fileid;
595 /*
596 * We need to use the origin cache for both hardlinked files
597 * and directories. Hardlinked directories have multiple cnids
598 * and parents (one per link). Hardlinked files also have their
599 * own parents and link IDs separate from the indirect inode number.
600 * If we don't use the cache, we could end up vending the wrong ID
601 * because the cnode will only reflect the link that was looked up most recently.
602 */
603 if (cp->c_flag & C_HARDLINK) {
604 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
605 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
606 } else {
607 vap->va_linkid = (u_int64_t)cp->c_cnid;
608 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
609 }
610 vap->va_fsid = cp->c_dev;
611 vap->va_filerev = 0;
612 vap->va_encoding = cp->c_encoding;
613 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
614 vap->va_data_size = data_size;
615
616 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
617 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
618 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
619 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
620 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
621 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
622 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
623 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
624 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev |
625 VNODE_ATTR_va_data_size;
626
627 /* If this is the root, let VFS to find out the mount name, which may be different from the real name.
628 * Otherwise, we need to just take care for hardlinked files, which need to be looked up, if necessary
629 */
630 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
631 struct cat_desc linkdesc;
632 int lockflags;
633 int uselinkdesc = 0;
634 cnid_t nextlinkid = 0;
635 cnid_t prevlinkid = 0;
636
637 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
638 * here because the info. for the link ID requested by getattrlist may be
639 * different than what's currently in the cnode. This is because the cnode
640 * will be filled in with the information for the most recent link ID that went
641 * through namei/lookup(). If there are competing lookups for hardlinks that point
642 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
643 * Also, we need to beware of open-unlinked files which could have a namelen of 0. Note
644 * that if another hardlink sibling of this file is being unlinked, that could also thrash
645 * the name fields but it should *not* be treated like an open-unlinked file here.
646 */
647 if ((cp->c_flag & C_HARDLINK) &&
648 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
649 /* If we have no name and our linkID is the raw inode number, then we may
650 * have an open-unlinked file. Go to the next link in this case.
651 */
652 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
653 if ((error = hfs_lookuplink(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))) {
654 goto out;
655 }
656 }
657 else {
658 nextlinkid = vap->va_linkid;
659 }
660 /* Now probe the catalog for the linkID. Note that we don't know if we have
661 * the exclusive lock here for the cnode, so we can't just update the descriptor.
662 * Instead, we should just store the descriptor's value locally and then use it to pass
663 * out the name value as needed below.
664 */
665 if (nextlinkid) {
666 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
667 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
668 hfs_systemfile_unlock(hfsmp, lockflags);
669 if (error == 0) {
670 uselinkdesc = 1;
671 }
672 }
673 }
674
675 /* By this point, we either patched the name above, and the c_desc points
676 * to correct data, or it already did, in which case we just proceed by copying
677 * the name into the VAP. Note that we will never set va_name to supported if
678 * nextlinkid is never initialized. This could happen in the degenerate case above
679 * involving the raw inode number, where it has no nextlinkid. In this case, we will
680 * simply not export the name as supported.
681 */
682 if (uselinkdesc) {
683 strlcpy(vap->va_name, (const char *)linkdesc.cd_nameptr, MAXPATHLEN);
684 VATTR_SET_SUPPORTED(vap, va_name);
685 cat_releasedesc(&linkdesc);
686 }
687 else if (cp->c_desc.cd_namelen) {
688 strlcpy(vap->va_name, (const char *)cp->c_desc.cd_nameptr, MAXPATHLEN);
689 VATTR_SET_SUPPORTED(vap, va_name);
690 }
691 }
692
693 out:
694 hfs_unlock(cp);
695 /*
696 * We need to drop the iocount on the rsrc fork vnode only *after* we've
697 * released the cnode lock, since vnode_put can trigger an inactive call, which
698 * will go back into the HFS and try to acquire a cnode lock.
699 */
700 if (rvp) {
701 vnode_put(rvp);
702 }
703 return (error);
704 }
705
706 static int
707 hfs_vnop_setattr(ap)
708 struct vnop_setattr_args /* {
709 struct vnode *a_vp;
710 struct vnode_attr *a_vap;
711 vfs_context_t a_context;
712 } */ *ap;
713 {
714 struct vnode_attr *vap = ap->a_vap;
715 struct vnode *vp = ap->a_vp;
716 struct cnode *cp = NULL;
717 struct hfsmount *hfsmp;
718 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
719 struct proc *p = vfs_context_proc(ap->a_context);
720 int error = 0;
721 uid_t nuid;
722 gid_t ngid;
723
724 hfsmp = VTOHFS(vp);
725
726 /* Don't allow modification of the journal file. */
727 if (hfsmp->hfs_jnlfileid == VTOC(vp)->c_fileid) {
728 return (EPERM);
729 }
730
731 /*
732 * File size change request.
733 * We are guaranteed that this is not a directory, and that
734 * the filesystem object is writeable.
735 */
736 VATTR_SET_SUPPORTED(vap, va_data_size);
737 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
738
739 /* Take truncate lock before taking cnode lock. */
740 hfs_lock_truncate(VTOC(vp), TRUE);
741
742 /* Perform the ubc_setsize before taking the cnode lock. */
743 ubc_setsize(vp, vap->va_data_size);
744
745 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
746 hfs_unlock_truncate(VTOC(vp), TRUE);
747 return (error);
748 }
749 cp = VTOC(vp);
750
751 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, ap->a_context);
752
753 hfs_unlock_truncate(cp, TRUE);
754 if (error)
755 goto out;
756 }
757 if (cp == NULL) {
758 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
759 return (error);
760 cp = VTOC(vp);
761 }
762
763 /*
764 * If it is just an access time update request by itself
765 * we know the request is from kernel level code, and we
766 * can delay it without being as worried about consistency.
767 * This change speeds up mmaps, in the rare case that they
768 * get caught behind a sync.
769 */
770
771 if (vap->va_active == VNODE_ATTR_va_access_time) {
772 cp->c_touch_acctime=TRUE;
773 goto out;
774 }
775
776
777
778 /*
779 * Owner/group change request.
780 * We are guaranteed that the new owner/group is valid and legal.
781 */
782 VATTR_SET_SUPPORTED(vap, va_uid);
783 VATTR_SET_SUPPORTED(vap, va_gid);
784 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
785 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
786 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
787 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
788 goto out;
789
790 /*
791 * Mode change request.
792 * We are guaranteed that the mode value is valid and that in
793 * conjunction with the owner and group, this change is legal.
794 */
795 VATTR_SET_SUPPORTED(vap, va_mode);
796 if (VATTR_IS_ACTIVE(vap, va_mode) &&
797 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
798 goto out;
799
800 /*
801 * File flags change.
802 * We are guaranteed that only flags allowed to change given the
803 * current securelevel are being changed.
804 */
805 VATTR_SET_SUPPORTED(vap, va_flags);
806 if (VATTR_IS_ACTIVE(vap, va_flags)) {
807 u_int16_t *fdFlags;
808
809 cp->c_flags = vap->va_flags;
810 cp->c_touch_chgtime = TRUE;
811
812 /*
813 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
814 *
815 * The fdFlags for files and frFlags for folders are both 8 bytes
816 * into the userInfo (the first 16 bytes of the Finder Info). They
817 * are both 16-bit fields.
818 */
819 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
820 if (vap->va_flags & UF_HIDDEN)
821 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
822 else
823 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
824 }
825
826 /*
827 * Timestamp updates.
828 */
829 VATTR_SET_SUPPORTED(vap, va_create_time);
830 VATTR_SET_SUPPORTED(vap, va_access_time);
831 VATTR_SET_SUPPORTED(vap, va_modify_time);
832 VATTR_SET_SUPPORTED(vap, va_backup_time);
833 VATTR_SET_SUPPORTED(vap, va_change_time);
834 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
835 VATTR_IS_ACTIVE(vap, va_access_time) ||
836 VATTR_IS_ACTIVE(vap, va_modify_time) ||
837 VATTR_IS_ACTIVE(vap, va_backup_time)) {
838 if (VATTR_IS_ACTIVE(vap, va_create_time))
839 cp->c_itime = vap->va_create_time.tv_sec;
840 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
841 cp->c_atime = vap->va_access_time.tv_sec;
842 cp->c_touch_acctime = FALSE;
843 }
844 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
845 cp->c_mtime = vap->va_modify_time.tv_sec;
846 cp->c_touch_modtime = FALSE;
847 cp->c_touch_chgtime = TRUE;
848
849 /*
850 * The utimes system call can reset the modification
851 * time but it doesn't know about HFS create times.
852 * So we need to ensure that the creation time is
853 * always at least as old as the modification time.
854 */
855 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
856 (cp->c_cnid != kHFSRootFolderID) &&
857 (cp->c_mtime < cp->c_itime)) {
858 cp->c_itime = cp->c_mtime;
859 }
860 }
861 if (VATTR_IS_ACTIVE(vap, va_backup_time))
862 cp->c_btime = vap->va_backup_time.tv_sec;
863 cp->c_flag |= C_MODIFIED;
864 }
865
866 /*
867 * Set name encoding.
868 */
869 VATTR_SET_SUPPORTED(vap, va_encoding);
870 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
871 cp->c_encoding = vap->va_encoding;
872 hfs_setencodingbits(hfsmp, cp->c_encoding);
873 }
874
875 if ((error = hfs_update(vp, TRUE)) != 0)
876 goto out;
877 HFS_KNOTE(vp, NOTE_ATTRIB);
878 out:
879 if (cp)
880 hfs_unlock(cp);
881 return (error);
882 }
883
884
885 /*
886 * Change the mode on a file.
887 * cnode must be locked before calling.
888 */
889 __private_extern__
890 int
891 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
892 {
893 register struct cnode *cp = VTOC(vp);
894
895 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
896 return (0);
897
898 // XXXdbg - don't allow modification of the journal or journal_info_block
899 if (VTOHFS(vp)->jnl && cp && cp->c_datafork) {
900 struct HFSPlusExtentDescriptor *extd;
901
902 extd = &cp->c_datafork->ff_extents[0];
903 if (extd->startBlock == VTOVCB(vp)->vcbJinfoBlock || extd->startBlock == VTOHFS(vp)->jnl_start) {
904 return EPERM;
905 }
906 }
907
908 #if OVERRIDE_UNKNOWN_PERMISSIONS
909 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
910 return (0);
911 };
912 #endif
913 cp->c_mode &= ~ALLPERMS;
914 cp->c_mode |= (mode & ALLPERMS);
915 cp->c_touch_chgtime = TRUE;
916 return (0);
917 }
918
919
920 __private_extern__
921 int
922 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
923 {
924 struct cnode *cp = VTOC(vp);
925 int retval = 0;
926 int is_member;
927
928 /*
929 * Disallow write attempts on read-only file systems;
930 * unless the file is a socket, fifo, or a block or
931 * character device resident on the file system.
932 */
933 switch (vnode_vtype(vp)) {
934 case VDIR:
935 case VLNK:
936 case VREG:
937 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
938 return (EROFS);
939 break;
940 default:
941 break;
942 }
943
944 /* If immutable bit set, nobody gets to write it. */
945 if (considerFlags && (cp->c_flags & IMMUTABLE))
946 return (EPERM);
947
948 /* Otherwise, user id 0 always gets access. */
949 if (!suser(cred, NULL))
950 return (0);
951
952 /* Otherwise, check the owner. */
953 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
954 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
955
956 /* Otherwise, check the groups. */
957 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
958 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
959 }
960
961 /* Otherwise, check everyone else. */
962 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
963 }
964
965
966 /*
967 * Perform chown operation on cnode cp;
968 * code must be locked prior to call.
969 */
970 __private_extern__
971 int
972 #if !QUOTA
973 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
974 __unused struct proc *p)
975 #else
976 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
977 __unused struct proc *p)
978 #endif
979 {
980 register struct cnode *cp = VTOC(vp);
981 uid_t ouid;
982 gid_t ogid;
983 #if QUOTA
984 int error = 0;
985 register int i;
986 int64_t change;
987 #endif /* QUOTA */
988
989 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
990 return (ENOTSUP);
991
992 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
993 return (0);
994
995 if (uid == (uid_t)VNOVAL)
996 uid = cp->c_uid;
997 if (gid == (gid_t)VNOVAL)
998 gid = cp->c_gid;
999
1000 #if 0 /* we are guaranteed that this is already the case */
1001 /*
1002 * If we don't own the file, are trying to change the owner
1003 * of the file, or are not a member of the target group,
1004 * the caller must be superuser or the call fails.
1005 */
1006 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1007 (gid != cp->c_gid &&
1008 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1009 (error = suser(cred, 0)))
1010 return (error);
1011 #endif
1012
1013 ogid = cp->c_gid;
1014 ouid = cp->c_uid;
1015 #if QUOTA
1016 if ((error = hfs_getinoquota(cp)))
1017 return (error);
1018 if (ouid == uid) {
1019 dqrele(cp->c_dquot[USRQUOTA]);
1020 cp->c_dquot[USRQUOTA] = NODQUOT;
1021 }
1022 if (ogid == gid) {
1023 dqrele(cp->c_dquot[GRPQUOTA]);
1024 cp->c_dquot[GRPQUOTA] = NODQUOT;
1025 }
1026
1027 /*
1028 * Eventually need to account for (fake) a block per directory
1029 * if (vnode_isdir(vp))
1030 * change = VTOHFS(vp)->blockSize;
1031 * else
1032 */
1033
1034 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1035 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1036 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1037 for (i = 0; i < MAXQUOTAS; i++) {
1038 dqrele(cp->c_dquot[i]);
1039 cp->c_dquot[i] = NODQUOT;
1040 }
1041 #endif /* QUOTA */
1042 cp->c_gid = gid;
1043 cp->c_uid = uid;
1044 #if QUOTA
1045 if ((error = hfs_getinoquota(cp)) == 0) {
1046 if (ouid == uid) {
1047 dqrele(cp->c_dquot[USRQUOTA]);
1048 cp->c_dquot[USRQUOTA] = NODQUOT;
1049 }
1050 if (ogid == gid) {
1051 dqrele(cp->c_dquot[GRPQUOTA]);
1052 cp->c_dquot[GRPQUOTA] = NODQUOT;
1053 }
1054 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1055 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1056 goto good;
1057 else
1058 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1059 }
1060 for (i = 0; i < MAXQUOTAS; i++) {
1061 dqrele(cp->c_dquot[i]);
1062 cp->c_dquot[i] = NODQUOT;
1063 }
1064 }
1065 cp->c_gid = ogid;
1066 cp->c_uid = ouid;
1067 if (hfs_getinoquota(cp) == 0) {
1068 if (ouid == uid) {
1069 dqrele(cp->c_dquot[USRQUOTA]);
1070 cp->c_dquot[USRQUOTA] = NODQUOT;
1071 }
1072 if (ogid == gid) {
1073 dqrele(cp->c_dquot[GRPQUOTA]);
1074 cp->c_dquot[GRPQUOTA] = NODQUOT;
1075 }
1076 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1077 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1078 (void) hfs_getinoquota(cp);
1079 }
1080 return (error);
1081 good:
1082 if (hfs_getinoquota(cp))
1083 panic("hfs_chown: lost quota");
1084 #endif /* QUOTA */
1085
1086
1087 /*
1088 According to the SUSv3 Standard, chown() shall mark
1089 for update the st_ctime field of the file.
1090 (No exceptions mentioned)
1091 */
1092 cp->c_touch_chgtime = TRUE;
1093 return (0);
1094 }
1095
1096
1097 /*
1098 * The hfs_exchange routine swaps the fork data in two files by
1099 * exchanging some of the information in the cnode. It is used
1100 * to preserve the file ID when updating an existing file, in
1101 * case the file is being tracked through its file ID. Typically
1102 * its used after creating a new file during a safe-save.
1103 */
1104 static int
1105 hfs_vnop_exchange(ap)
1106 struct vnop_exchange_args /* {
1107 struct vnode *a_fvp;
1108 struct vnode *a_tvp;
1109 int a_options;
1110 vfs_context_t a_context;
1111 } */ *ap;
1112 {
1113 struct vnode *from_vp = ap->a_fvp;
1114 struct vnode *to_vp = ap->a_tvp;
1115 struct cnode *from_cp;
1116 struct cnode *to_cp;
1117 struct hfsmount *hfsmp;
1118 struct cat_desc tempdesc;
1119 struct cat_attr tempattr;
1120 const unsigned char *from_nameptr;
1121 const unsigned char *to_nameptr;
1122 char from_iname[32];
1123 char to_iname[32];
1124 u_int32_t tempflag;
1125 cnid_t from_parid;
1126 cnid_t to_parid;
1127 int lockflags;
1128 int error = 0, started_tr = 0, got_cookie = 0;
1129 cat_cookie_t cookie;
1130
1131 /* The files must be on the same volume. */
1132 if (vnode_mount(from_vp) != vnode_mount(to_vp))
1133 return (EXDEV);
1134
1135 if (from_vp == to_vp)
1136 return (EINVAL);
1137
1138 if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
1139 return (error);
1140
1141 from_cp = VTOC(from_vp);
1142 to_cp = VTOC(to_vp);
1143 hfsmp = VTOHFS(from_vp);
1144
1145 /* Only normal files can be exchanged. */
1146 if (!vnode_isreg(from_vp) || !vnode_isreg(to_vp) ||
1147 VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
1148 error = EINVAL;
1149 goto exit;
1150 }
1151
1152 // XXXdbg - don't allow modification of the journal or journal_info_block
1153 if (hfsmp->jnl) {
1154 struct HFSPlusExtentDescriptor *extd;
1155
1156 if (from_cp->c_datafork) {
1157 extd = &from_cp->c_datafork->ff_extents[0];
1158 if (extd->startBlock == VTOVCB(from_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1159 error = EPERM;
1160 goto exit;
1161 }
1162 }
1163
1164 if (to_cp->c_datafork) {
1165 extd = &to_cp->c_datafork->ff_extents[0];
1166 if (extd->startBlock == VTOVCB(to_vp)->vcbJinfoBlock || extd->startBlock == hfsmp->jnl_start) {
1167 error = EPERM;
1168 goto exit;
1169 }
1170 }
1171 }
1172
1173 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1174 goto exit;
1175 }
1176 started_tr = 1;
1177
1178 /*
1179 * Reserve some space in the Catalog file.
1180 */
1181 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
1182 goto exit;
1183 }
1184 got_cookie = 1;
1185
1186 /* The backend code always tries to delete the virtual
1187 * extent id for exchanging files so we need to lock
1188 * the extents b-tree.
1189 */
1190 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1191
1192 /* Account for the location of the catalog objects. */
1193 if (from_cp->c_flag & C_HARDLINK) {
1194 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
1195 from_cp->c_attr.ca_linkref);
1196 from_nameptr = (unsigned char *)from_iname;
1197 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1198 from_cp->c_hint = 0;
1199 } else {
1200 from_nameptr = from_cp->c_desc.cd_nameptr;
1201 from_parid = from_cp->c_parentcnid;
1202 }
1203 if (to_cp->c_flag & C_HARDLINK) {
1204 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
1205 to_cp->c_attr.ca_linkref);
1206 to_nameptr = (unsigned char *)to_iname;
1207 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1208 to_cp->c_hint = 0;
1209 } else {
1210 to_nameptr = to_cp->c_desc.cd_nameptr;
1211 to_parid = to_cp->c_parentcnid;
1212 }
1213
1214 /* Do the exchange */
1215 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
1216 to_parid, from_cp->c_hint, to_cp->c_hint);
1217 hfs_systemfile_unlock(hfsmp, lockflags);
1218
1219 /*
1220 * Note that we don't need to exchange any extended attributes
1221 * since the attributes are keyed by file ID.
1222 */
1223
1224 if (error != E_NONE) {
1225 error = MacToVFSError(error);
1226 goto exit;
1227 }
1228
1229 /* Purge the vnodes from the name cache */
1230 if (from_vp)
1231 cache_purge(from_vp);
1232 if (to_vp)
1233 cache_purge(to_vp);
1234
1235 /* Save a copy of from attributes before swapping. */
1236 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1237 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1238 tempflag = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1239
1240 /*
1241 * Swap the descriptors and all non-fork related attributes.
1242 * (except the modify date)
1243 */
1244 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1245
1246 from_cp->c_hint = 0;
1247 from_cp->c_fileid = from_cp->c_cnid;
1248 from_cp->c_itime = to_cp->c_itime;
1249 from_cp->c_btime = to_cp->c_btime;
1250 from_cp->c_atime = to_cp->c_atime;
1251 from_cp->c_ctime = to_cp->c_ctime;
1252 from_cp->c_gid = to_cp->c_gid;
1253 from_cp->c_uid = to_cp->c_uid;
1254 from_cp->c_flags = to_cp->c_flags;
1255 from_cp->c_mode = to_cp->c_mode;
1256 from_cp->c_linkcount = to_cp->c_linkcount;
1257 from_cp->c_flag = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1258 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
1259 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1260
1261 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1262 to_cp->c_hint = 0;
1263 to_cp->c_fileid = to_cp->c_cnid;
1264 to_cp->c_itime = tempattr.ca_itime;
1265 to_cp->c_btime = tempattr.ca_btime;
1266 to_cp->c_atime = tempattr.ca_atime;
1267 to_cp->c_ctime = tempattr.ca_ctime;
1268 to_cp->c_gid = tempattr.ca_gid;
1269 to_cp->c_uid = tempattr.ca_uid;
1270 to_cp->c_flags = tempattr.ca_flags;
1271 to_cp->c_mode = tempattr.ca_mode;
1272 to_cp->c_linkcount = tempattr.ca_linkcount;
1273 to_cp->c_flag = tempflag;
1274 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
1275 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1276
1277 /* Rehash the cnodes using their new file IDs */
1278 hfs_chash_rehash(from_cp, to_cp);
1279
1280 /*
1281 * When a file moves out of "Cleanup At Startup"
1282 * we can drop its NODUMP status.
1283 */
1284 if ((from_cp->c_flags & UF_NODUMP) &&
1285 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1286 from_cp->c_flags &= ~UF_NODUMP;
1287 from_cp->c_touch_chgtime = TRUE;
1288 }
1289 if ((to_cp->c_flags & UF_NODUMP) &&
1290 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1291 to_cp->c_flags &= ~UF_NODUMP;
1292 to_cp->c_touch_chgtime = TRUE;
1293 }
1294
1295 HFS_KNOTE(from_vp, NOTE_ATTRIB);
1296 HFS_KNOTE(to_vp, NOTE_ATTRIB);
1297
1298 exit:
1299 if (got_cookie) {
1300 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
1301 }
1302 if (started_tr) {
1303 hfs_end_transaction(hfsmp);
1304 }
1305
1306 hfs_unlockpair(from_cp, to_cp);
1307 return (error);
1308 }
1309
1310
1311 /*
1312 * cnode must be locked
1313 */
1314 __private_extern__
1315 int
1316 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
1317 {
1318 struct cnode *cp = VTOC(vp);
1319 struct filefork *fp = NULL;
1320 int retval = 0;
1321 struct hfsmount *hfsmp = VTOHFS(vp);
1322 struct timeval tv;
1323 int wait;
1324 int lockflag;
1325 int took_trunc_lock = 0;
1326
1327 wait = (waitfor == MNT_WAIT);
1328 if (always_do_fullfsync)
1329 fullsync = 1;
1330
1331 /* HFS directories don't have any data blocks. */
1332 if (vnode_isdir(vp))
1333 goto metasync;
1334
1335 /*
1336 * For system files flush the B-tree header and
1337 * for regular files write out any clusters
1338 */
1339 if (vnode_issystem(vp)) {
1340 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1341 // XXXdbg
1342 if (hfsmp->jnl == NULL) {
1343 BTFlushPath(VTOF(vp));
1344 }
1345 }
1346 } else if (UBCINFOEXISTS(vp)) {
1347 hfs_unlock(cp);
1348 hfs_lock_truncate(cp, TRUE);
1349 took_trunc_lock = 1;
1350
1351 /* Don't hold cnode lock when calling into cluster layer. */
1352 (void) cluster_push(vp, wait ? IO_SYNC : 0);
1353
1354 hfs_lock(cp, HFS_FORCE_LOCK);
1355 }
1356 /*
1357 * When MNT_WAIT is requested and the zero fill timeout
1358 * has expired then we must explicitly zero out any areas
1359 * that are currently marked invalid (holes).
1360 *
1361 * Files with NODUMP can bypass zero filling here.
1362 */
1363 if ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
1364 ((cp->c_flags & UF_NODUMP) == 0) &&
1365 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) && (fp = VTOF(vp)) &&
1366 cp->c_zftimeout != 0) {
1367 microuptime(&tv);
1368 if (!fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
1369 /* Remember that a force sync was requested. */
1370 cp->c_flag |= C_ZFWANTSYNC;
1371 goto datasync;
1372 }
1373 if (!took_trunc_lock) {
1374 hfs_unlock(cp);
1375 hfs_lock_truncate(cp, TRUE);
1376 hfs_lock(cp, HFS_FORCE_LOCK);
1377 took_trunc_lock = 1;
1378 }
1379
1380 while (!CIRCLEQ_EMPTY(&fp->ff_invalidranges)) {
1381 struct rl_entry *invalid_range = CIRCLEQ_FIRST(&fp->ff_invalidranges);
1382 off_t start = invalid_range->rl_start;
1383 off_t end = invalid_range->rl_end;
1384
1385 /* The range about to be written must be validated
1386 * first, so that VNOP_BLOCKMAP() will return the
1387 * appropriate mapping for the cluster code:
1388 */
1389 rl_remove(start, end, &fp->ff_invalidranges);
1390
1391 /* Don't hold cnode lock when calling into cluster layer. */
1392 hfs_unlock(cp);
1393 (void) cluster_write(vp, (struct uio *) 0,
1394 fp->ff_size, end + 1, start, (off_t)0,
1395 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
1396 hfs_lock(cp, HFS_FORCE_LOCK);
1397 cp->c_flag |= C_MODIFIED;
1398 }
1399 hfs_unlock(cp);
1400 (void) cluster_push(vp, wait ? IO_SYNC : 0);
1401 hfs_lock(cp, HFS_FORCE_LOCK);
1402
1403 cp->c_flag &= ~C_ZFWANTSYNC;
1404 cp->c_zftimeout = 0;
1405 }
1406 datasync:
1407 if (took_trunc_lock)
1408 hfs_unlock_truncate(cp, TRUE);
1409
1410 /*
1411 * if we have a journal and if journal_active() returns != 0 then the
1412 * we shouldn't do anything to a locked block (because it is part
1413 * of a transaction). otherwise we'll just go through the normal
1414 * code path and flush the buffer. note journal_active() can return
1415 * -1 if the journal is invalid -- however we still need to skip any
1416 * locked blocks as they get cleaned up when we finish the transaction
1417 * or close the journal.
1418 */
1419 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
1420 if (hfsmp->jnl)
1421 lockflag = BUF_SKIP_LOCKED;
1422 else
1423 lockflag = 0;
1424
1425 /*
1426 * Flush all dirty buffers associated with a vnode.
1427 */
1428 buf_flushdirtyblks(vp, wait, lockflag, "hfs_fsync");
1429
1430 metasync:
1431 if (vnode_isreg(vp) && vnode_issystem(vp)) {
1432 if (VTOF(vp)->fcbBTCBPtr != NULL) {
1433 microuptime(&tv);
1434 BTSetLastSync(VTOF(vp), tv.tv_sec);
1435 }
1436 cp->c_touch_acctime = FALSE;
1437 cp->c_touch_chgtime = FALSE;
1438 cp->c_touch_modtime = FALSE;
1439 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
1440 retval = hfs_update(vp, wait);
1441
1442 /*
1443 * When MNT_WAIT is requested push out the catalog record for
1444 * this file. If they asked for a full fsync, we can skip this
1445 * because the journal_flush or hfs_metasync_all will push out
1446 * all of the metadata changes.
1447 */
1448 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
1449 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
1450 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
1451 }
1452
1453 /*
1454 * If this was a full fsync, make sure all metadata
1455 * changes get to stable storage.
1456 */
1457 if (fullsync) {
1458 if (hfsmp->jnl) {
1459 journal_flush(hfsmp->jnl);
1460 } else {
1461 retval = hfs_metasync_all(hfsmp);
1462 /* XXX need to pass context! */
1463 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
1464 }
1465 }
1466 }
1467
1468 return (retval);
1469 }
1470
1471
1472 /* Sync an hfs catalog b-tree node */
1473 static int
1474 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
1475 {
1476 vnode_t vp;
1477 buf_t bp;
1478 int lockflags;
1479
1480 vp = HFSTOVCB(hfsmp)->catalogRefNum;
1481
1482 // XXXdbg - don't need to do this on a journaled volume
1483 if (hfsmp->jnl) {
1484 return 0;
1485 }
1486
1487 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
1488 /*
1489 * Look for a matching node that has been delayed
1490 * but is not part of a set (B_LOCKED).
1491 *
1492 * BLK_ONLYVALID causes buf_getblk to return a
1493 * buf_t for the daddr64_t specified only if it's
1494 * currently resident in the cache... the size
1495 * parameter to buf_getblk is ignored when this flag
1496 * is set
1497 */
1498 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
1499
1500 if (bp) {
1501 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
1502 (void) VNOP_BWRITE(bp);
1503 else
1504 buf_brelse(bp);
1505 }
1506
1507 hfs_systemfile_unlock(hfsmp, lockflags);
1508
1509 return (0);
1510 }
1511
1512
1513 /*
1514 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
1515 * without a journal. Note that the volume bitmap does not get written;
1516 * we rely on fsck_hfs to fix that up (which it can do without any loss
1517 * of data).
1518 */
1519 static int
1520 hfs_metasync_all(struct hfsmount *hfsmp)
1521 {
1522 int lockflags;
1523
1524 /* Lock all of the B-trees so we get a mutually consistent state */
1525 lockflags = hfs_systemfile_lock(hfsmp,
1526 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1527
1528 /* Sync each of the B-trees */
1529 if (hfsmp->hfs_catalog_vp)
1530 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
1531 if (hfsmp->hfs_extents_vp)
1532 hfs_btsync(hfsmp->hfs_extents_vp, 0);
1533 if (hfsmp->hfs_attribute_vp)
1534 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
1535
1536 /* Wait for all of the writes to complete */
1537 if (hfsmp->hfs_catalog_vp)
1538 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
1539 if (hfsmp->hfs_extents_vp)
1540 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
1541 if (hfsmp->hfs_attribute_vp)
1542 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
1543
1544 hfs_systemfile_unlock(hfsmp, lockflags);
1545
1546 return 0;
1547 }
1548
1549
1550 /*ARGSUSED 1*/
1551 static int
1552 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
1553 {
1554 buf_clearflags(bp, B_LOCKED);
1555 (void) buf_bawrite(bp);
1556
1557 return(BUF_CLAIMED);
1558 }
1559
1560
1561 __private_extern__
1562 int
1563 hfs_btsync(struct vnode *vp, int sync_transaction)
1564 {
1565 struct cnode *cp = VTOC(vp);
1566 struct timeval tv;
1567 int flags = 0;
1568
1569 if (sync_transaction)
1570 flags |= BUF_SKIP_NONLOCKED;
1571 /*
1572 * Flush all dirty buffers associated with b-tree.
1573 */
1574 buf_iterate(vp, hfs_btsync_callback, flags, 0);
1575
1576 microuptime(&tv);
1577 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
1578 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
1579 cp->c_touch_acctime = FALSE;
1580 cp->c_touch_chgtime = FALSE;
1581 cp->c_touch_modtime = FALSE;
1582
1583 return 0;
1584 }
1585
1586 /*
1587 * Remove a directory.
1588 */
1589 static int
1590 hfs_vnop_rmdir(ap)
1591 struct vnop_rmdir_args /* {
1592 struct vnode *a_dvp;
1593 struct vnode *a_vp;
1594 struct componentname *a_cnp;
1595 vfs_context_t a_context;
1596 } */ *ap;
1597 {
1598 struct vnode *dvp = ap->a_dvp;
1599 struct vnode *vp = ap->a_vp;
1600 struct cnode *dcp = VTOC(dvp);
1601 struct cnode *cp = VTOC(vp);
1602 int error;
1603
1604 if (!S_ISDIR(cp->c_mode)) {
1605 return (ENOTDIR);
1606 }
1607 if (dvp == vp) {
1608 return (EINVAL);
1609 }
1610 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
1611 return (error);
1612 }
1613 error = hfs_removedir(dvp, vp, ap->a_cnp, 0);
1614 hfs_unlockpair(dcp, cp);
1615
1616 return (error);
1617 }
1618
1619 /*
1620 * Remove a directory
1621 *
1622 * Both dvp and vp cnodes are locked
1623 */
1624 static int
1625 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1626 int skip_reserve)
1627 {
1628 struct cnode *cp;
1629 struct cnode *dcp;
1630 struct hfsmount * hfsmp;
1631 struct cat_desc desc;
1632 int lockflags;
1633 int error = 0, started_tr = 0;
1634
1635 cp = VTOC(vp);
1636 dcp = VTOC(dvp);
1637 hfsmp = VTOHFS(vp);
1638
1639 if (dcp == cp) {
1640 return (EINVAL); /* cannot remove "." */
1641 }
1642 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
1643 return (0);
1644 }
1645 if (cp->c_entries != 0) {
1646 return (ENOTEMPTY);
1647 }
1648
1649 /* Check if we're removing the last link to an empty directory. */
1650 if (cp->c_flag & C_HARDLINK) {
1651 /* We could also return EBUSY here */
1652 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
1653 }
1654
1655 if ((hfsmp->hfs_attribute_vp != NULL) &&
1656 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
1657
1658 return hfs_removefile(dvp, vp, cnp, 0, 0, 1);
1659 }
1660
1661 dcp->c_flag |= C_DIR_MODIFICATION;
1662
1663 #if QUOTA
1664 if (hfsmp->hfs_flags & HFS_QUOTAS)
1665 (void)hfs_getinoquota(cp);
1666 #endif
1667 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1668 goto out;
1669 }
1670 started_tr = 1;
1671
1672 /*
1673 * Verify the directory is empty (and valid).
1674 * (Rmdir ".." won't be valid since
1675 * ".." will contain a reference to
1676 * the current directory and thus be
1677 * non-empty.)
1678 */
1679 if ((dcp->c_flags & APPEND) || (cp->c_flags & (IMMUTABLE | APPEND))) {
1680 error = EPERM;
1681 goto out;
1682 }
1683
1684 /* Remove the entry from the namei cache: */
1685 cache_purge(vp);
1686
1687 /*
1688 * Protect against a race with rename by using the component
1689 * name passed in and parent id from dvp (instead of using
1690 * the cp->c_desc which may have changed).
1691 */
1692 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
1693 desc.cd_namelen = cnp->cn_namelen;
1694 desc.cd_parentcnid = dcp->c_fileid;
1695 desc.cd_cnid = cp->c_cnid;
1696 desc.cd_flags = CD_ISDIR;
1697 desc.cd_encoding = cp->c_encoding;
1698 desc.cd_hint = 0;
1699
1700 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
1701 error = 0;
1702 goto out;
1703 }
1704
1705 /* Remove entry from catalog */
1706 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1707
1708 if (!skip_reserve) {
1709 /*
1710 * Reserve some space in the Catalog file.
1711 */
1712 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
1713 hfs_systemfile_unlock(hfsmp, lockflags);
1714 goto out;
1715 }
1716 }
1717
1718 error = cat_delete(hfsmp, &desc, &cp->c_attr);
1719 if (error == 0) {
1720 /* The parent lost a child */
1721 if (dcp->c_entries > 0)
1722 dcp->c_entries--;
1723 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
1724 dcp->c_dirchangecnt++;
1725 dcp->c_touch_chgtime = TRUE;
1726 dcp->c_touch_modtime = TRUE;
1727 hfs_touchtimes(hfsmp, cp);
1728 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
1729 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
1730 }
1731
1732 hfs_systemfile_unlock(hfsmp, lockflags);
1733
1734 if (error)
1735 goto out;
1736
1737 #if QUOTA
1738 if (hfsmp->hfs_flags & HFS_QUOTAS)
1739 (void)hfs_chkiq(cp, -1, NOCRED, 0);
1740 #endif /* QUOTA */
1741
1742 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK | NOTE_ATTRIB);
1743
1744 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
1745
1746 /*
1747 * directory open or in use (e.g. opendir() or current working
1748 * directory for some process); wait for inactive to actually
1749 * remove catalog entry
1750 */
1751 if (vnode_isinuse(vp, 0)) {
1752 cp->c_flag |= C_DELETED;
1753 } else {
1754 cp->c_mode = 0; /* Makes the vnode go away...see inactive */
1755 cp->c_flag |= C_NOEXISTS;
1756 }
1757 out:
1758 dcp->c_flag &= ~C_DIR_MODIFICATION;
1759 wakeup((caddr_t)&dcp->c_flag);
1760
1761 HFS_KNOTE(vp, NOTE_DELETE);
1762
1763 if (started_tr) {
1764 hfs_end_transaction(hfsmp);
1765 }
1766
1767 return (error);
1768 }
1769
1770
1771 /*
1772 * Remove a file or link.
1773 */
1774 static int
1775 hfs_vnop_remove(ap)
1776 struct vnop_remove_args /* {
1777 struct vnode *a_dvp;
1778 struct vnode *a_vp;
1779 struct componentname *a_cnp;
1780 int a_flags;
1781 vfs_context_t a_context;
1782 } */ *ap;
1783 {
1784 struct vnode *dvp = ap->a_dvp;
1785 struct vnode *vp = ap->a_vp;
1786 struct cnode *dcp = VTOC(dvp);
1787 struct cnode *cp = VTOC(vp);
1788 struct vnode *rvp = cp->c_rsrc_vp;
1789 int error=0, recycle_rsrc=0, rvid=0;
1790
1791 if (dvp == vp) {
1792 return (EINVAL);
1793 }
1794
1795 hfs_lock_truncate(cp, TRUE);
1796
1797 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
1798 hfs_unlock_truncate(cp, TRUE);
1799 return (error);
1800 }
1801 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0);
1802
1803 //
1804 // If the remove succeeded and it's an open-unlinked file that has
1805 // a resource fork vnode that's not in use, we will want to recycle
1806 // the rvp *after* we're done unlocking everything. Otherwise the
1807 // resource vnode will keep a v_parent reference on this vnode which
1808 // prevents it from going through inactive/reclaim which means that
1809 // the disk space associated with this file won't get free'd until
1810 // something forces the resource vnode to get recycled (and that can
1811 // take a very long time).
1812 //
1813 if (error == 0 && (cp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
1814 rvid = vnode_vid(rvp);
1815 recycle_rsrc = 1;
1816 }
1817
1818 /*
1819 * Drop the truncate lock before unlocking the cnode
1820 * (which can potentially perform a vnode_put and
1821 * recycle the vnode which in turn might require the
1822 * truncate lock)
1823 */
1824 hfs_unlock_truncate(cp, TRUE);
1825 hfs_unlockpair(dcp, cp);
1826
1827 if (recycle_rsrc && vnode_getwithvid(rvp, rvid) == 0) {
1828 vnode_ref(rvp);
1829 vnode_rele(rvp);
1830 vnode_recycle(rvp);
1831 vnode_put(rvp);
1832 }
1833
1834 return (error);
1835 }
1836
1837
1838 static int
1839 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
1840
1841 if ( !(buf_flags(bp) & B_META))
1842 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
1843 /*
1844 * it's part of the current transaction, kill it.
1845 */
1846 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
1847
1848 return (BUF_CLAIMED);
1849 }
1850
1851 /*
1852 * hfs_removefile
1853 *
1854 * Similar to hfs_vnop_remove except there are additional options.
1855 *
1856 * Requires cnode and truncate locks to be held.
1857 */
1858 static int
1859 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
1860 int flags, int skip_reserve, int allow_dirs)
1861 {
1862 struct vnode *rvp = NULL;
1863 struct cnode *cp;
1864 struct cnode *dcp;
1865 struct hfsmount *hfsmp;
1866 struct cat_desc desc;
1867 struct timeval tv;
1868 vfs_context_t ctx = cnp->cn_context;
1869 int dataforkbusy = 0;
1870 int rsrcforkbusy = 0;
1871 int truncated = 0;
1872 int lockflags;
1873 int error = 0;
1874 int started_tr = 0;
1875 int isbigfile = 0, defer_remove=0, isdir=0;
1876
1877 cp = VTOC(vp);
1878 dcp = VTOC(dvp);
1879 hfsmp = VTOHFS(vp);
1880
1881 /* Check if we lost a race post lookup. */
1882 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
1883 return (0);
1884 }
1885
1886 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid)) {
1887 return 0;
1888 }
1889
1890 /* Make sure a remove is permitted */
1891 if (VNODE_IS_RSRC(vp)) {
1892 return (EPERM);
1893 }
1894 /* Don't allow deleting the journal or journal_info_block. */
1895 if (hfsmp->jnl &&
1896 (cp->c_fileid == hfsmp->hfs_jnlfileid || cp->c_fileid == hfsmp->hfs_jnlinfoblkid)) {
1897 return (EPERM);
1898 }
1899 /*
1900 * Hard links require special handling.
1901 */
1902 if (cp->c_flag & C_HARDLINK) {
1903 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
1904 return (EBUSY);
1905 } else {
1906 /* A directory hard link with a link count of one is
1907 * treated as a regular directory. Therefore it should
1908 * only be removed using rmdir().
1909 */
1910 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
1911 (allow_dirs == 0)) {
1912 return (EPERM);
1913 }
1914 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
1915 }
1916 }
1917 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
1918 if (vnode_isdir(vp)) {
1919 if (allow_dirs == 0)
1920 return (EPERM); /* POSIX */
1921 isdir = 1;
1922 }
1923 /* Sanity check the parent ids. */
1924 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
1925 (cp->c_parentcnid != dcp->c_fileid)) {
1926 return (EINVAL);
1927 }
1928
1929 dcp->c_flag |= C_DIR_MODIFICATION;
1930
1931 // this guy is going away so mark him as such
1932 cp->c_flag |= C_DELETED;
1933
1934
1935 /* Remove our entry from the namei cache. */
1936 cache_purge(vp);
1937
1938 /*
1939 * Acquire a vnode for a non-empty resource fork.
1940 * (needed for hfs_truncate)
1941 */
1942 if (isdir == 0 && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
1943 /*
1944 * We must avoid calling hfs_vgetrsrc() when we have
1945 * an active resource fork vnode to avoid deadlocks
1946 * when that vnode is in the VL_TERMINATE state. We
1947 * can defer removing the file and its resource fork
1948 * until the call to hfs_vnop_inactive() occurs.
1949 */
1950 if (cp->c_rsrc_vp) {
1951 defer_remove = 1;
1952 } else {
1953 error = hfs_vgetrsrc(hfsmp, vp, &rvp, FALSE);
1954 if (error)
1955 goto out;
1956 /* Defer the vnode_put on rvp until the hfs_unlock(). */
1957 cp->c_flag |= C_NEED_RVNODE_PUT;
1958 }
1959 }
1960 /* Check if this file is being used. */
1961 if (isdir == 0) {
1962 dataforkbusy = vnode_isinuse(vp, 0);
1963 rsrcforkbusy = rvp ? vnode_isinuse(rvp, 0) : 0;
1964 }
1965
1966 /* Check if we have to break the deletion into multiple pieces. */
1967 if (isdir == 0) {
1968 isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp)));
1969 }
1970
1971 /* Check if the file has xattrs. If it does we'll have to delete them in
1972 individual transactions in case there are too many */
1973 if ((hfsmp->hfs_attribute_vp != NULL) &&
1974 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
1975 defer_remove = 1;
1976 }
1977
1978 /*
1979 * Carbon semantics prohibit deleting busy files.
1980 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
1981 */
1982 if (dataforkbusy || rsrcforkbusy) {
1983 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
1984 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
1985 error = EBUSY;
1986 goto out;
1987 }
1988 }
1989
1990 #if QUOTA
1991 if (hfsmp->hfs_flags & HFS_QUOTAS)
1992 (void)hfs_getinoquota(cp);
1993 #endif /* QUOTA */
1994
1995 /* Check if we need a ubc_setsize. */
1996 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy)) {
1997 /*
1998 * A ubc_setsize can cause a pagein so defer it
1999 * until after the cnode lock is dropped. The
2000 * cnode lock cannot be dropped/reacquired here
2001 * since we might already hold the journal lock.
2002 */
2003 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
2004 cp->c_flag |= C_NEED_DATA_SETSIZE;
2005 }
2006 if (!rsrcforkbusy && rvp) {
2007 cp->c_flag |= C_NEED_RSRC_SETSIZE;
2008 }
2009 }
2010
2011 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2012 goto out;
2013 }
2014 started_tr = 1;
2015
2016 // XXXdbg - if we're journaled, kill any dirty symlink buffers
2017 if (hfsmp->jnl && vnode_islnk(vp))
2018 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
2019
2020 /*
2021 * Truncate any non-busy forks. Busy forks will
2022 * get truncated when their vnode goes inactive.
2023 *
2024 * Since we're already inside a transaction,
2025 * tell hfs_truncate to skip the ubc_setsize.
2026 */
2027 if (isdir == 0) {
2028 int mode = cp->c_mode;
2029
2030 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
2031 cp->c_mode = 0; /* Suppress hfs_update */
2032 error = hfs_truncate(vp, (off_t)0, IO_NDELAY, 1, ctx);
2033 cp->c_mode = mode;
2034 if (error)
2035 goto out;
2036 truncated = 1;
2037 }
2038 if (!rsrcforkbusy && rvp) {
2039 cp->c_mode = 0; /* Suppress hfs_update */
2040 error = hfs_truncate(rvp, (off_t)0, IO_NDELAY, 1, ctx);
2041 cp->c_mode = mode;
2042 if (error)
2043 goto out;
2044 truncated = 1;
2045 }
2046 }
2047
2048 /*
2049 * Protect against a race with rename by using the component
2050 * name passed in and parent id from dvp (instead of using
2051 * the cp->c_desc which may have changed).
2052 */
2053 desc.cd_flags = 0;
2054 desc.cd_encoding = cp->c_desc.cd_encoding;
2055 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
2056 desc.cd_namelen = cnp->cn_namelen;
2057 desc.cd_parentcnid = dcp->c_fileid;
2058 desc.cd_hint = cp->c_desc.cd_hint;
2059 desc.cd_cnid = cp->c_cnid;
2060 microtime(&tv);
2061
2062 /*
2063 * There are two cases to consider:
2064 * 1. File is busy/big/defer_remove ==> move/rename the file
2065 * 2. File is not in use ==> remove the file
2066 */
2067 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
2068 char delname[32];
2069 struct cat_desc to_desc;
2070 struct cat_desc todir_desc;
2071
2072 /*
2073 * Orphan this file (move to hidden directory).
2074 */
2075 bzero(&todir_desc, sizeof(todir_desc));
2076 todir_desc.cd_parentcnid = 2;
2077
2078 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
2079 bzero(&to_desc, sizeof(to_desc));
2080 to_desc.cd_nameptr = (const u_int8_t *)delname;
2081 to_desc.cd_namelen = strlen(delname);
2082 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
2083 to_desc.cd_flags = 0;
2084 to_desc.cd_cnid = cp->c_cnid;
2085
2086 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2087 if (!skip_reserve) {
2088 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
2089 hfs_systemfile_unlock(hfsmp, lockflags);
2090 goto out;
2091 }
2092 }
2093
2094 error = cat_rename(hfsmp, &desc, &todir_desc,
2095 &to_desc, (struct cat_desc *)NULL);
2096
2097 if (error == 0) {
2098 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
2099 if (isdir == 1) {
2100 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
2101 }
2102 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
2103 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
2104
2105 /* Update the parent directory */
2106 if (dcp->c_entries > 0)
2107 dcp->c_entries--;
2108 if (isdir == 1) {
2109 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
2110 }
2111 dcp->c_dirchangecnt++;
2112 dcp->c_ctime = tv.tv_sec;
2113 dcp->c_mtime = tv.tv_sec;
2114 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2115
2116 /* Update the file's state */
2117 cp->c_flag |= C_DELETED;
2118 cp->c_ctime = tv.tv_sec;
2119 --cp->c_linkcount;
2120 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
2121 }
2122 hfs_systemfile_unlock(hfsmp, lockflags);
2123 if (error)
2124 goto out;
2125
2126 } else /* Not busy */ {
2127
2128 if (cp->c_blocks > 0) {
2129 printf("hfs_remove: attempting to delete a non-empty file %s\n",
2130 cp->c_desc.cd_nameptr);
2131 error = EBUSY;
2132 goto out;
2133 }
2134
2135 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2136 if (!skip_reserve) {
2137 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
2138 hfs_systemfile_unlock(hfsmp, lockflags);
2139 goto out;
2140 }
2141 }
2142
2143 error = cat_delete(hfsmp, &desc, &cp->c_attr);
2144
2145 if (error && error != ENXIO && error != ENOENT && truncated) {
2146 if ((cp->c_datafork && cp->c_datafork->ff_size != 0) ||
2147 (cp->c_rsrcfork && cp->c_rsrcfork->ff_size != 0)) {
2148 panic("hfs: remove: couldn't delete a truncated file! (%d, data sz %lld; rsrc sz %lld)",
2149 error, cp->c_datafork->ff_size, cp->c_rsrcfork->ff_size);
2150 } else {
2151 printf("hfs: remove: strangely enough, deleting truncated file %s (%d) got err %d\n",
2152 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, error);
2153 }
2154 }
2155 if (error == 0) {
2156 /* Update the parent directory */
2157 if (dcp->c_entries > 0)
2158 dcp->c_entries--;
2159 dcp->c_dirchangecnt++;
2160 dcp->c_ctime = tv.tv_sec;
2161 dcp->c_mtime = tv.tv_sec;
2162 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2163 }
2164 hfs_systemfile_unlock(hfsmp, lockflags);
2165 if (error)
2166 goto out;
2167
2168 #if QUOTA
2169 if (hfsmp->hfs_flags & HFS_QUOTAS)
2170 (void)hfs_chkiq(cp, -1, NOCRED, 0);
2171 #endif /* QUOTA */
2172
2173 cp->c_mode = 0;
2174 truncated = 0; // because the catalog entry is gone
2175 cp->c_flag |= C_NOEXISTS;
2176 cp->c_flag &= ~C_DELETED;
2177 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
2178 --cp->c_linkcount;
2179
2180 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
2181 }
2182
2183 /*
2184 * All done with this cnode's descriptor...
2185 *
2186 * Note: all future catalog calls for this cnode must be by
2187 * fileid only. This is OK for HFS (which doesn't have file
2188 * thread records) since HFS doesn't support the removal of
2189 * busy files.
2190 */
2191 cat_releasedesc(&cp->c_desc);
2192
2193 HFS_KNOTE(dvp, NOTE_WRITE);
2194
2195 out:
2196 if (error) {
2197 cp->c_flag &= ~C_DELETED;
2198 }
2199
2200 /* Commit the truncation to the catalog record */
2201 if (truncated) {
2202 cp->c_flag |= C_FORCEUPDATE;
2203 cp->c_touch_chgtime = TRUE;
2204 cp->c_touch_modtime = TRUE;
2205 (void) hfs_update(vp, 0);
2206 }
2207
2208 if (started_tr) {
2209 hfs_end_transaction(hfsmp);
2210 }
2211
2212 dcp->c_flag &= ~C_DIR_MODIFICATION;
2213 wakeup((caddr_t)&dcp->c_flag);
2214
2215 HFS_KNOTE(vp, NOTE_DELETE);
2216 if (rvp) {
2217 HFS_KNOTE(rvp, NOTE_DELETE);
2218 }
2219
2220 return (error);
2221 }
2222
2223
2224 __private_extern__ void
2225 replace_desc(struct cnode *cp, struct cat_desc *cdp)
2226 {
2227 // fixes 4348457 and 4463138
2228 if (&cp->c_desc == cdp) {
2229 return;
2230 }
2231
2232 /* First release allocated name buffer */
2233 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
2234 const u_int8_t *name = cp->c_desc.cd_nameptr;
2235
2236 cp->c_desc.cd_nameptr = 0;
2237 cp->c_desc.cd_namelen = 0;
2238 cp->c_desc.cd_flags &= ~CD_HASBUF;
2239 vfs_removename((const char *)name);
2240 }
2241 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
2242
2243 /* Cnode now owns the name buffer */
2244 cdp->cd_nameptr = 0;
2245 cdp->cd_namelen = 0;
2246 cdp->cd_flags &= ~CD_HASBUF;
2247 }
2248
2249
2250 /*
2251 * Rename a cnode.
2252 *
2253 * The VFS layer guarantees that:
2254 * - source and destination will either both be directories, or
2255 * both not be directories.
2256 * - all the vnodes are from the same file system
2257 *
2258 * When the target is a directory, HFS must ensure that its empty.
2259 */
2260 static int
2261 hfs_vnop_rename(ap)
2262 struct vnop_rename_args /* {
2263 struct vnode *a_fdvp;
2264 struct vnode *a_fvp;
2265 struct componentname *a_fcnp;
2266 struct vnode *a_tdvp;
2267 struct vnode *a_tvp;
2268 struct componentname *a_tcnp;
2269 vfs_context_t a_context;
2270 } */ *ap;
2271 {
2272 struct vnode *tvp = ap->a_tvp;
2273 struct vnode *tdvp = ap->a_tdvp;
2274 struct vnode *fvp = ap->a_fvp;
2275 struct vnode *fdvp = ap->a_fdvp;
2276 struct vnode *rvp = NULLVP;
2277 struct componentname *tcnp = ap->a_tcnp;
2278 struct componentname *fcnp = ap->a_fcnp;
2279 struct proc *p = vfs_context_proc(ap->a_context);
2280 struct cnode *fcp;
2281 struct cnode *fdcp;
2282 struct cnode *tdcp;
2283 struct cnode *tcp;
2284 struct cat_desc from_desc;
2285 struct cat_desc to_desc;
2286 struct cat_desc out_desc;
2287 struct hfsmount *hfsmp;
2288 cat_cookie_t cookie;
2289 int tvp_deleted = 0;
2290 int started_tr = 0, got_cookie = 0;
2291 int took_trunc_lock = 0;
2292 int lockflags;
2293 int error;
2294 int rsrc_vid = 0;
2295 int recycle_rsrc = 0;
2296
2297 /* When tvp exist, take the truncate lock for the hfs_removefile(). */
2298 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
2299 hfs_lock_truncate(VTOC(tvp), TRUE);
2300 took_trunc_lock = 1;
2301 }
2302
2303 retry:
2304 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
2305 HFS_EXCLUSIVE_LOCK);
2306 if (error) {
2307 if (took_trunc_lock)
2308 hfs_unlock_truncate(VTOC(tvp), TRUE);
2309 return (error);
2310 }
2311
2312 fdcp = VTOC(fdvp);
2313 fcp = VTOC(fvp);
2314 tdcp = VTOC(tdvp);
2315 tcp = tvp ? VTOC(tvp) : NULL;
2316 hfsmp = VTOHFS(tdvp);
2317
2318 /* Check for a race against unlink. */
2319 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid)) {
2320 error = ENOENT;
2321 goto out;
2322 }
2323
2324 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid))) {
2325 //
2326 // hmm, the destination vnode isn't valid any more.
2327 // in this case we can just drop him and pretend he
2328 // never existed in the first place.
2329 //
2330 if (took_trunc_lock) {
2331 hfs_unlock_truncate(VTOC(tvp), TRUE);
2332 took_trunc_lock = 0;
2333 }
2334
2335 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
2336
2337 tcp = NULL;
2338 tvp = NULL;
2339
2340 // retry the locking with tvp null'ed out
2341 goto retry;
2342 }
2343
2344 fdcp->c_flag |= C_DIR_MODIFICATION;
2345 if (fdvp != tdvp) {
2346 tdcp->c_flag |= C_DIR_MODIFICATION;
2347 }
2348
2349 /*
2350 * Disallow renaming of a directory hard link if the source and
2351 * destination parent directories are different, or a directory whose
2352 * descendant is a directory hard link and the one of the ancestors
2353 * of the destination directory is a directory hard link.
2354 */
2355 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
2356 if (fcp->c_flag & C_HARDLINK) {
2357 error = EPERM;
2358 goto out;
2359 }
2360 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
2361 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2362 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
2363 error = EPERM;
2364 hfs_systemfile_unlock(hfsmp, lockflags);
2365 goto out;
2366 }
2367 hfs_systemfile_unlock(hfsmp, lockflags);
2368 }
2369 }
2370
2371 /*
2372 * The following edge case is caught here:
2373 * (to cannot be a descendent of from)
2374 *
2375 * o fdvp
2376 * /
2377 * /
2378 * o fvp
2379 * \
2380 * \
2381 * o tdvp
2382 * /
2383 * /
2384 * o tvp
2385 */
2386 if (tdcp->c_parentcnid == fcp->c_fileid) {
2387 error = EINVAL;
2388 goto out;
2389 }
2390
2391 /*
2392 * The following two edge cases are caught here:
2393 * (note tvp is not empty)
2394 *
2395 * o tdvp o tdvp
2396 * / /
2397 * / /
2398 * o tvp tvp o fdvp
2399 * \ \
2400 * \ \
2401 * o fdvp o fvp
2402 * /
2403 * /
2404 * o fvp
2405 */
2406 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
2407 error = ENOTEMPTY;
2408 goto out;
2409 }
2410
2411 /*
2412 * The following edge case is caught here:
2413 * (the from child and parent are the same)
2414 *
2415 * o tdvp
2416 * /
2417 * /
2418 * fdvp o fvp
2419 */
2420 if (fdvp == fvp) {
2421 error = EINVAL;
2422 goto out;
2423 }
2424
2425 /*
2426 * Make sure "from" vnode and its parent are changeable.
2427 */
2428 if ((fcp->c_flags & (IMMUTABLE | APPEND)) || (fdcp->c_flags & APPEND)) {
2429 error = EPERM;
2430 goto out;
2431 }
2432
2433 /*
2434 * If the destination parent directory is "sticky", then the
2435 * user must own the parent directory, or the destination of
2436 * the rename, otherwise the destination may not be changed
2437 * (except by root). This implements append-only directories.
2438 *
2439 * Note that checks for immutable and write access are done
2440 * by the call to hfs_removefile.
2441 */
2442 if (tvp && (tdcp->c_mode & S_ISTXT) &&
2443 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
2444 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
2445 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
2446 error = EPERM;
2447 goto out;
2448 }
2449
2450 #if QUOTA
2451 if (tvp)
2452 (void)hfs_getinoquota(tcp);
2453 #endif
2454 /* Preflighting done, take fvp out of the name space. */
2455 cache_purge(fvp);
2456
2457 /*
2458 * When a file moves out of "Cleanup At Startup"
2459 * we can drop its NODUMP status.
2460 */
2461 if ((fcp->c_flags & UF_NODUMP) &&
2462 vnode_isreg(fvp) &&
2463 (fdvp != tdvp) &&
2464 (fdcp->c_desc.cd_nameptr != NULL) &&
2465 (strncmp((const char *)fdcp->c_desc.cd_nameptr,
2466 CARBON_TEMP_DIR_NAME,
2467 sizeof(CARBON_TEMP_DIR_NAME)) == 0)) {
2468 fcp->c_flags &= ~UF_NODUMP;
2469 fcp->c_touch_chgtime = TRUE;
2470 (void) hfs_update(fvp, 0);
2471 }
2472
2473 bzero(&from_desc, sizeof(from_desc));
2474 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
2475 from_desc.cd_namelen = fcnp->cn_namelen;
2476 from_desc.cd_parentcnid = fdcp->c_fileid;
2477 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2478 from_desc.cd_cnid = fcp->c_cnid;
2479
2480 bzero(&to_desc, sizeof(to_desc));
2481 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
2482 to_desc.cd_namelen = tcnp->cn_namelen;
2483 to_desc.cd_parentcnid = tdcp->c_fileid;
2484 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
2485 to_desc.cd_cnid = fcp->c_cnid;
2486
2487 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2488 goto out;
2489 }
2490 started_tr = 1;
2491
2492 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
2493 * inside a journal transaction and without holding a cnode lock.
2494 * As setting of this bit depends on being in journal transaction for
2495 * concurrency, check this bit again after we start journal transaction for rename
2496 * to ensure that this directory does not have any descendant that
2497 * is a directory hard link.
2498 */
2499 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
2500 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
2501 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2502 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
2503 error = EPERM;
2504 hfs_systemfile_unlock(hfsmp, lockflags);
2505 goto out;
2506 }
2507 hfs_systemfile_unlock(hfsmp, lockflags);
2508 }
2509 }
2510
2511 // if it's a hardlink then re-lookup the name so
2512 // that we get the correct cnid in from_desc (see
2513 // the comment in hfs_removefile for more details)
2514 //
2515 if (fcp->c_flag & C_HARDLINK) {
2516 struct cat_desc tmpdesc;
2517 cnid_t real_cnid;
2518
2519 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
2520 tmpdesc.cd_namelen = fcnp->cn_namelen;
2521 tmpdesc.cd_parentcnid = fdcp->c_fileid;
2522 tmpdesc.cd_hint = fdcp->c_childhint;
2523 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
2524 tmpdesc.cd_encoding = 0;
2525
2526 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2527
2528 if (cat_lookup(hfsmp, &tmpdesc, 0, NULL, NULL, NULL, &real_cnid) != 0) {
2529 hfs_systemfile_unlock(hfsmp, lockflags);
2530 goto out;
2531 }
2532
2533 // use the real cnid instead of whatever happened to be there
2534 from_desc.cd_cnid = real_cnid;
2535 hfs_systemfile_unlock(hfsmp, lockflags);
2536 }
2537
2538 /*
2539 * Reserve some space in the Catalog file.
2540 */
2541 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
2542 goto out;
2543 }
2544 got_cookie = 1;
2545
2546 /*
2547 * If the destination exists then it may need to be removed.
2548 */
2549 if (tvp) {
2550 /*
2551 * When fvp matches tvp they could be case variants
2552 * or matching hard links.
2553 */
2554 if (fvp == tvp) {
2555 if (!(fcp->c_flag & C_HARDLINK)) {
2556 goto skip_rm; /* simple case variant */
2557
2558 } else if ((fdvp != tdvp) ||
2559 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
2560 goto out; /* matching hardlinks, nothing to do */
2561
2562 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
2563 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
2564 goto skip_rm; /* case-variant hardlink in the same dir */
2565 } else {
2566 goto out; /* matching hardlink, nothing to do */
2567 }
2568 }
2569
2570 if (vnode_isdir(tvp))
2571 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE);
2572 else {
2573 if (tcp){
2574 rvp = tcp->c_rsrc_vp;
2575 }
2576 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0);
2577
2578 /* If the destination file had a resource fork vnode, we couldn't do
2579 * anything about it in hfs_removefile because we didn't have a reference on it.
2580 * We need to take action here to prevent it from leaking blocks. If removefile
2581 * succeeded, then squirrel away the vid of the resource fork vnode and force a
2582 * recycle after dropping all of the locks. The vid is guaranteed not to change
2583 * at this point because we still hold the cnode lock.
2584 */
2585 if ((error == 0) && (tcp->c_flag & C_DELETED) && rvp && !vnode_isinuse(rvp, 0)) {
2586 rsrc_vid = vnode_vid(rvp);
2587 recycle_rsrc = 1;
2588 }
2589 }
2590
2591 if (error)
2592 goto out;
2593 tvp_deleted = 1;
2594 }
2595 skip_rm:
2596 /*
2597 * All done with tvp and fvp
2598 */
2599
2600 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2601 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
2602 hfs_systemfile_unlock(hfsmp, lockflags);
2603
2604 if (error) {
2605 goto out;
2606 }
2607
2608 /* Invalidate negative cache entries in the destination directory */
2609 if (tdcp->c_flag & C_NEG_ENTRIES) {
2610 cache_purge_negatives(tdvp);
2611 tdcp->c_flag &= ~C_NEG_ENTRIES;
2612 }
2613
2614 /* Update cnode's catalog descriptor */
2615 replace_desc(fcp, &out_desc);
2616 fcp->c_parentcnid = tdcp->c_fileid;
2617 fcp->c_hint = 0;
2618
2619 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
2620 (fdcp->c_cnid == kHFSRootFolderID));
2621 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
2622 (tdcp->c_cnid == kHFSRootFolderID));
2623
2624 /* Update both parent directories. */
2625 if (fdvp != tdvp) {
2626 if (vnode_isdir(fvp)) {
2627 /* If the source directory has directory hard link
2628 * descendants, set the kHFSHasChildLinkBit in the
2629 * destination parent hierarchy
2630 */
2631 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
2632 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
2633
2634 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
2635
2636 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
2637 if (error) {
2638 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
2639 error = 0;
2640 }
2641 }
2642 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
2643 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
2644 }
2645 tdcp->c_entries++;
2646 tdcp->c_dirchangecnt++;
2647 if (fdcp->c_entries > 0)
2648 fdcp->c_entries--;
2649 fdcp->c_dirchangecnt++;
2650 fdcp->c_touch_chgtime = TRUE;
2651 fdcp->c_touch_modtime = TRUE;
2652
2653 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
2654 (void) hfs_update(fdvp, 0);
2655 }
2656 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
2657 tdcp->c_touch_chgtime = TRUE;
2658 tdcp->c_touch_modtime = TRUE;
2659
2660 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
2661 (void) hfs_update(tdvp, 0);
2662 out:
2663 if (got_cookie) {
2664 cat_postflight(hfsmp, &cookie, p);
2665 }
2666 if (started_tr) {
2667 hfs_end_transaction(hfsmp);
2668 }
2669
2670 /* Note that if hfs_removedir or hfs_removefile was invoked above they will already have
2671 generated a NOTE_WRITE for tdvp and a NOTE_DELETE for tvp.
2672 */
2673 if (error == 0) {
2674 HFS_KNOTE(fvp, NOTE_RENAME);
2675 HFS_KNOTE(fdvp, NOTE_WRITE);
2676 if (tdvp != fdvp) HFS_KNOTE(tdvp, NOTE_WRITE);
2677 };
2678
2679 fdcp->c_flag &= ~C_DIR_MODIFICATION;
2680 wakeup((caddr_t)&fdcp->c_flag);
2681 if (fdvp != tdvp) {
2682 tdcp->c_flag &= ~C_DIR_MODIFICATION;
2683 wakeup((caddr_t)&tdcp->c_flag);
2684 }
2685
2686 if (took_trunc_lock)
2687 hfs_unlock_truncate(VTOC(tvp), TRUE);
2688
2689 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
2690
2691 /* Now that we've dropped locks, see if we need to force recycle on the old
2692 * destination's rsrc fork, preventing a leak of the rsrc fork's blocks. Note that
2693 * doing the ref/rele is in order to twiddle the VL_INACTIVE bit to the vnode's flags
2694 * so that on the last vnode_put for this vnode, we will force vnop_inactive to be triggered.
2695 */
2696 if ((recycle_rsrc) && (vnode_getwithvid(rvp, rsrc_vid) == 0)) {
2697 vnode_ref(rvp);
2698 vnode_rele(rvp);
2699 vnode_recycle(rvp);
2700 vnode_put (rvp);
2701 }
2702
2703
2704 /* After tvp is removed the only acceptable error is EIO */
2705 if (error && tvp_deleted)
2706 error = EIO;
2707
2708 return (error);
2709 }
2710
2711
2712 /*
2713 * Make a directory.
2714 */
2715 static int
2716 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
2717 {
2718 /***** HACK ALERT ********/
2719 ap->a_cnp->cn_flags |= MAKEENTRY;
2720 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
2721 }
2722
2723
2724 /*
2725 * Create a symbolic link.
2726 */
2727 static int
2728 hfs_vnop_symlink(struct vnop_symlink_args *ap)
2729 {
2730 struct vnode **vpp = ap->a_vpp;
2731 struct vnode *dvp = ap->a_dvp;
2732 struct vnode *vp = NULL;
2733 struct cnode *cp = NULL;
2734 struct hfsmount *hfsmp;
2735 struct filefork *fp;
2736 struct buf *bp = NULL;
2737 char *datap;
2738 int started_tr = 0;
2739 u_int32_t len;
2740 int error;
2741
2742 /* HFS standard disks don't support symbolic links */
2743 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
2744 return (ENOTSUP);
2745
2746 /* Check for empty target name */
2747 if (ap->a_target[0] == 0)
2748 return (EINVAL);
2749
2750 hfsmp = VTOHFS(dvp);
2751 len = strlen(ap->a_target);
2752
2753 /* Check for free space */
2754 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
2755 return (ENOSPC);
2756 }
2757
2758 /* Create the vnode */
2759 ap->a_vap->va_mode |= S_IFLNK;
2760 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
2761 goto out;
2762 }
2763 vp = *vpp;
2764 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK))) {
2765 goto out;
2766 }
2767 cp = VTOC(vp);
2768 fp = VTOF(vp);
2769
2770 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
2771 goto out;
2772 }
2773
2774 #if QUOTA
2775 (void)hfs_getinoquota(cp);
2776 #endif /* QUOTA */
2777
2778 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2779 goto out;
2780 }
2781 started_tr = 1;
2782
2783 /*
2784 * Allocate space for the link.
2785 *
2786 * Since we're already inside a transaction,
2787 * tell hfs_truncate to skip the ubc_setsize.
2788 *
2789 * Don't need truncate lock since a symlink is treated as a system file.
2790 */
2791 error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, ap->a_context);
2792
2793 /* On errors, remove the symlink file */
2794 if (error) {
2795 /*
2796 * End the transaction so we don't re-take the cnode lock
2797 * below while inside a transaction (lock order violation).
2798 */
2799 hfs_end_transaction(hfsmp);
2800
2801 /* hfs_removefile() requires holding the truncate lock */
2802 hfs_unlock(cp);
2803 hfs_lock_truncate(cp, TRUE);
2804 hfs_lock(cp, HFS_FORCE_LOCK);
2805
2806 if (hfs_start_transaction(hfsmp) != 0) {
2807 started_tr = 0;
2808 hfs_unlock_truncate(cp, TRUE);
2809 goto out;
2810 }
2811
2812 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0);
2813 hfs_unlock_truncate(cp, TRUE);
2814 goto out;
2815 }
2816
2817 /* Write the link to disk */
2818 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, VTOHFS(vp)->hfs_phys_block_size),
2819 0, 0, BLK_META);
2820 if (hfsmp->jnl) {
2821 journal_modify_block_start(hfsmp->jnl, bp);
2822 }
2823 datap = (char *)buf_dataptr(bp);
2824 bzero(datap, buf_size(bp));
2825 bcopy(ap->a_target, datap, len);
2826
2827 if (hfsmp->jnl) {
2828 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
2829 } else {
2830 buf_bawrite(bp);
2831 }
2832 /*
2833 * We defered the ubc_setsize for hfs_truncate
2834 * since we were inside a transaction.
2835 *
2836 * We don't need to drop the cnode lock here
2837 * since this is a symlink.
2838 */
2839 ubc_setsize(vp, len);
2840 out:
2841 if (started_tr)
2842 hfs_end_transaction(hfsmp);
2843 if ((cp != NULL) && (vp != NULL)) {
2844 hfs_unlock(cp);
2845 }
2846 if (error) {
2847 if (vp) {
2848 vnode_put(vp);
2849 }
2850 *vpp = NULL;
2851 }
2852 return (error);
2853 }
2854
2855
2856 /* structures to hold a "." or ".." directory entry */
2857 struct hfs_stddotentry {
2858 u_int32_t d_fileno; /* unique file number */
2859 u_int16_t d_reclen; /* length of this structure */
2860 u_int8_t d_type; /* dirent file type */
2861 u_int8_t d_namlen; /* len of filename */
2862 char d_name[4]; /* "." or ".." */
2863 };
2864
2865 struct hfs_extdotentry {
2866 u_int64_t d_fileno; /* unique file number */
2867 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
2868 u_int16_t d_reclen; /* length of this structure */
2869 u_int16_t d_namlen; /* len of filename */
2870 u_int8_t d_type; /* dirent file type */
2871 u_char d_name[3]; /* "." or ".." */
2872 };
2873
2874 typedef union {
2875 struct hfs_stddotentry std;
2876 struct hfs_extdotentry ext;
2877 } hfs_dotentry_t;
2878
2879 /*
2880 * hfs_vnop_readdir reads directory entries into the buffer pointed
2881 * to by uio, in a filesystem independent format. Up to uio_resid
2882 * bytes of data can be transferred. The data in the buffer is a
2883 * series of packed dirent structures where each one contains the
2884 * following entries:
2885 *
2886 * u_int32_t d_fileno; // file number of entry
2887 * u_int16_t d_reclen; // length of this record
2888 * u_int8_t d_type; // file type
2889 * u_int8_t d_namlen; // length of string in d_name
2890 * char d_name[MAXNAMELEN+1]; // null terminated file name
2891 *
2892 * The current position (uio_offset) refers to the next block of
2893 * entries. The offset can only be set to a value previously
2894 * returned by hfs_vnop_readdir or zero. This offset does not have
2895 * to match the number of bytes returned (in uio_resid).
2896 *
2897 * In fact, the offset used by HFS is essentially an index (26 bits)
2898 * with a tag (6 bits). The tag is for associating the next request
2899 * with the current request. This enables us to have multiple threads
2900 * reading the directory while the directory is also being modified.
2901 *
2902 * Each tag/index pair is tied to a unique directory hint. The hint
2903 * contains information (filename) needed to build the catalog b-tree
2904 * key for finding the next set of entries.
2905 *
2906 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
2907 * do NOT synthesize entries for "." and "..".
2908 */
2909 static int
2910 hfs_vnop_readdir(ap)
2911 struct vnop_readdir_args /* {
2912 vnode_t a_vp;
2913 uio_t a_uio;
2914 int a_flags;
2915 int *a_eofflag;
2916 int *a_numdirent;
2917 vfs_context_t a_context;
2918 } */ *ap;
2919 {
2920 struct vnode *vp = ap->a_vp;
2921 uio_t uio = ap->a_uio;
2922 struct cnode *cp;
2923 struct hfsmount *hfsmp;
2924 directoryhint_t *dirhint = NULL;
2925 directoryhint_t localhint;
2926 off_t offset;
2927 off_t startoffset;
2928 int error = 0;
2929 int eofflag = 0;
2930 user_addr_t user_start = 0;
2931 user_size_t user_len = 0;
2932 int index;
2933 unsigned int tag;
2934 int items;
2935 int lockflags;
2936 int extended;
2937 int nfs_cookies;
2938 caddr_t bufstart;
2939 cnid_t cnid_hint = 0;
2940
2941 items = 0;
2942 startoffset = offset = uio_offset(uio);
2943 bufstart = CAST_DOWN(caddr_t, uio_iov_base(uio));
2944 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
2945 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
2946
2947 /* Sanity check the uio data. */
2948 if ((uio_iovcnt(uio) > 1) ||
2949 (uio_resid(uio) < (int)sizeof(struct dirent))) {
2950 return (EINVAL);
2951 }
2952 /* Note that the dirhint calls require an exclusive lock. */
2953 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
2954 return (error);
2955 cp = VTOC(vp);
2956 hfsmp = VTOHFS(vp);
2957
2958 /* Pick up cnid hint (if any). */
2959 if (nfs_cookies) {
2960 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
2961 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
2962 if (cnid_hint == INT_MAX) { /* searching pass the last item */
2963 eofflag = 1;
2964 goto out;
2965 }
2966 }
2967 /*
2968 * Synthesize entries for "." and "..", unless the directory has
2969 * been deleted, but not closed yet (lazy delete in progress).
2970 */
2971 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
2972 hfs_dotentry_t dotentry[2];
2973 size_t uiosize;
2974
2975 if (extended) {
2976 struct hfs_extdotentry *entry = &dotentry[0].ext;
2977
2978 entry->d_fileno = cp->c_cnid;
2979 entry->d_reclen = sizeof(struct hfs_extdotentry);
2980 entry->d_type = DT_DIR;
2981 entry->d_namlen = 1;
2982 entry->d_name[0] = '.';
2983 entry->d_name[1] = '\0';
2984 entry->d_name[2] = '\0';
2985 entry->d_seekoff = 1;
2986
2987 ++entry;
2988 entry->d_fileno = cp->c_parentcnid;
2989 entry->d_reclen = sizeof(struct hfs_extdotentry);
2990 entry->d_type = DT_DIR;
2991 entry->d_namlen = 2;
2992 entry->d_name[0] = '.';
2993 entry->d_name[1] = '.';
2994 entry->d_name[2] = '\0';
2995 entry->d_seekoff = 2;
2996 uiosize = 2 * sizeof(struct hfs_extdotentry);
2997 } else {
2998 struct hfs_stddotentry *entry = &dotentry[0].std;
2999
3000 entry->d_fileno = cp->c_cnid;
3001 entry->d_reclen = sizeof(struct hfs_stddotentry);
3002 entry->d_type = DT_DIR;
3003 entry->d_namlen = 1;
3004 *(int *)&entry->d_name[0] = 0;
3005 entry->d_name[0] = '.';
3006
3007 ++entry;
3008 entry->d_fileno = cp->c_parentcnid;
3009 entry->d_reclen = sizeof(struct hfs_stddotentry);
3010 entry->d_type = DT_DIR;
3011 entry->d_namlen = 2;
3012 *(int *)&entry->d_name[0] = 0;
3013 entry->d_name[0] = '.';
3014 entry->d_name[1] = '.';
3015 uiosize = 2 * sizeof(struct hfs_stddotentry);
3016 }
3017 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
3018 goto out;
3019 }
3020 offset += 2;
3021 }
3022
3023 /* If there are no real entries then we're done. */
3024 if (cp->c_entries == 0) {
3025 error = 0;
3026 eofflag = 1;
3027 uio_setoffset(uio, offset);
3028 goto seekoffcalc;
3029 }
3030
3031 //
3032 // We have to lock the user's buffer here so that we won't
3033 // fault on it after we've acquired a shared lock on the
3034 // catalog file. The issue is that you can get a 3-way
3035 // deadlock if someone else starts a transaction and then
3036 // tries to lock the catalog file but can't because we're
3037 // here and we can't service our page fault because VM is
3038 // blocked trying to start a transaction as a result of
3039 // trying to free up pages for our page fault. It's messy
3040 // but it does happen on dual-processors that are paging
3041 // heavily (see radar 3082639 for more info). By locking
3042 // the buffer up-front we prevent ourselves from faulting
3043 // while holding the shared catalog file lock.
3044 //
3045 // Fortunately this and hfs_search() are the only two places
3046 // currently (10/30/02) that can fault on user data with a
3047 // shared lock on the catalog file.
3048 //
3049 if (hfsmp->jnl && uio_isuserspace(uio)) {
3050 user_start = uio_curriovbase(uio);
3051 user_len = uio_curriovlen(uio);
3052
3053 if ((error = vslock(user_start, user_len)) != 0) {
3054 user_start = 0;
3055 goto out;
3056 }
3057 }
3058 /* Convert offset into a catalog directory index. */
3059 index = (offset & HFS_INDEX_MASK) - 2;
3060 tag = offset & ~HFS_INDEX_MASK;
3061
3062 /* Lock catalog during cat_findname and cat_getdirentries. */
3063 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3064
3065 /* When called from NFS, try and resolve a cnid hint. */
3066 if (nfs_cookies && cnid_hint != 0) {
3067 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
3068 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
3069 localhint.dh_index = index - 1;
3070 localhint.dh_time = 0;
3071 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
3072 dirhint = &localhint; /* don't forget to release the descriptor */
3073 } else {
3074 cat_releasedesc(&localhint.dh_desc);
3075 }
3076 }
3077 }
3078
3079 /* Get a directory hint (cnode must be locked exclusive) */
3080 if (dirhint == NULL) {
3081 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
3082
3083 /* Hide tag from catalog layer. */
3084 dirhint->dh_index &= HFS_INDEX_MASK;
3085 if (dirhint->dh_index == HFS_INDEX_MASK) {
3086 dirhint->dh_index = -1;
3087 }
3088 }
3089
3090 if (index == 0) {
3091 dirhint->dh_threadhint = cp->c_dirthreadhint;
3092 }
3093
3094 /* Pack the buffer with dirent entries. */
3095 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, extended, &items, &eofflag);
3096
3097 if (index == 0 && error == 0) {
3098 cp->c_dirthreadhint = dirhint->dh_threadhint;
3099 }
3100
3101 hfs_systemfile_unlock(hfsmp, lockflags);
3102
3103 if (error != 0) {
3104 goto out;
3105 }
3106
3107 /* Get index to the next item */
3108 index += items;
3109
3110 if (items >= (int)cp->c_entries) {
3111 eofflag = 1;
3112 }
3113
3114 /* Convert catalog directory index back into an offset. */
3115 while (tag == 0)
3116 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
3117 uio_setoffset(uio, (index + 2) | tag);
3118 dirhint->dh_index |= tag;
3119
3120 seekoffcalc:
3121 cp->c_touch_acctime = TRUE;
3122
3123 if (ap->a_numdirent) {
3124 if (startoffset == 0)
3125 items += 2;
3126 *ap->a_numdirent = items;
3127 }
3128
3129 out:
3130 if (hfsmp->jnl && user_start) {
3131 vsunlock(user_start, user_len, TRUE);
3132 }
3133 /* If we didn't do anything then go ahead and dump the hint. */
3134 if ((dirhint != NULL) &&
3135 (dirhint != &localhint) &&
3136 (uio_offset(uio) == startoffset)) {
3137 hfs_reldirhint(cp, dirhint);
3138 eofflag = 1;
3139 }
3140 if (ap->a_eofflag) {
3141 *ap->a_eofflag = eofflag;
3142 }
3143 if (dirhint == &localhint) {
3144 cat_releasedesc(&localhint.dh_desc);
3145 }
3146 hfs_unlock(cp);
3147 return (error);
3148 }
3149
3150
3151 /*
3152 * Read contents of a symbolic link.
3153 */
3154 static int
3155 hfs_vnop_readlink(ap)
3156 struct vnop_readlink_args /* {
3157 struct vnode *a_vp;
3158 struct uio *a_uio;
3159 vfs_context_t a_context;
3160 } */ *ap;
3161 {
3162 struct vnode *vp = ap->a_vp;
3163 struct cnode *cp;
3164 struct filefork *fp;
3165 int error;
3166
3167 if (!vnode_islnk(vp))
3168 return (EINVAL);
3169
3170 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
3171 return (error);
3172 cp = VTOC(vp);
3173 fp = VTOF(vp);
3174
3175 /* Zero length sym links are not allowed */
3176 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
3177 printf("hfs: zero length symlink on fileid %d\n", cp->c_fileid);
3178 error = EINVAL;
3179 goto exit;
3180 }
3181
3182 /* Cache the path so we don't waste buffer cache resources */
3183 if (fp->ff_symlinkptr == NULL) {
3184 struct buf *bp = NULL;
3185
3186 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
3187 error = (int)buf_meta_bread(vp, (daddr64_t)0,
3188 roundup((int)fp->ff_size,
3189 VTOHFS(vp)->hfs_phys_block_size),
3190 vfs_context_ucred(ap->a_context), &bp);
3191 if (error) {
3192 if (bp)
3193 buf_brelse(bp);
3194 if (fp->ff_symlinkptr) {
3195 FREE(fp->ff_symlinkptr, M_TEMP);
3196 fp->ff_symlinkptr = NULL;
3197 }
3198 goto exit;
3199 }
3200 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
3201
3202 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
3203 buf_markinvalid(bp); /* data no longer needed */
3204 }
3205 buf_brelse(bp);
3206 }
3207 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
3208
3209 /*
3210 * Keep track blocks read
3211 */
3212 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
3213
3214 /*
3215 * If this file hasn't been seen since the start of
3216 * the current sampling period then start over.
3217 */
3218 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
3219 VTOF(vp)->ff_bytesread = fp->ff_size;
3220 else
3221 VTOF(vp)->ff_bytesread += fp->ff_size;
3222
3223 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
3224 // cp->c_touch_acctime = TRUE;
3225 }
3226
3227 exit:
3228 hfs_unlock(cp);
3229 return (error);
3230 }
3231
3232
3233 /*
3234 * Get configurable pathname variables.
3235 */
3236 static int
3237 hfs_vnop_pathconf(ap)
3238 struct vnop_pathconf_args /* {
3239 struct vnode *a_vp;
3240 int a_name;
3241 int *a_retval;
3242 vfs_context_t a_context;
3243 } */ *ap;
3244 {
3245 switch (ap->a_name) {
3246 case _PC_LINK_MAX:
3247 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
3248 *ap->a_retval = 1;
3249 else
3250 *ap->a_retval = HFS_LINK_MAX;
3251 break;
3252 case _PC_NAME_MAX:
3253 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD)
3254 *ap->a_retval = kHFSMaxFileNameChars; /* 255 */
3255 else
3256 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 31 */
3257 break;
3258 case _PC_PATH_MAX:
3259 *ap->a_retval = PATH_MAX; /* 1024 */
3260 break;
3261 case _PC_PIPE_BUF:
3262 *ap->a_retval = PIPE_BUF;
3263 break;
3264 case _PC_CHOWN_RESTRICTED:
3265 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
3266 break;
3267 case _PC_NO_TRUNC:
3268 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
3269 break;
3270 case _PC_NAME_CHARS_MAX:
3271 *ap->a_retval = kHFSPlusMaxFileNameChars;
3272 break;
3273 case _PC_CASE_SENSITIVE:
3274 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
3275 *ap->a_retval = 1;
3276 else
3277 *ap->a_retval = 0;
3278 break;
3279 case _PC_CASE_PRESERVING:
3280 *ap->a_retval = 1;
3281 break;
3282 case _PC_FILESIZEBITS:
3283 *ap->a_retval = 64; /* number of bits to store max file size */
3284 break;
3285 default:
3286 return (EINVAL);
3287 }
3288
3289 return (0);
3290 }
3291
3292
3293 /*
3294 * Update a cnode's on-disk metadata.
3295 *
3296 * If waitfor is set, then wait for the disk write of
3297 * the node to complete.
3298 *
3299 * The cnode must be locked exclusive
3300 */
3301 __private_extern__
3302 int
3303 hfs_update(struct vnode *vp, __unused int waitfor)
3304 {
3305 struct cnode *cp = VTOC(vp);
3306 struct proc *p;
3307 struct cat_fork *dataforkp = NULL;
3308 struct cat_fork *rsrcforkp = NULL;
3309 struct cat_fork datafork;
3310 struct cat_fork rsrcfork;
3311 struct hfsmount *hfsmp;
3312 int lockflags;
3313 int error;
3314
3315 p = current_proc();
3316 hfsmp = VTOHFS(vp);
3317
3318 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
3319 hfsmp->hfs_catalog_vp == NULL){
3320 return (0);
3321 }
3322 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
3323 cp->c_flag &= ~C_MODIFIED;
3324 cp->c_touch_acctime = 0;
3325 cp->c_touch_chgtime = 0;
3326 cp->c_touch_modtime = 0;
3327 return (0);
3328 }
3329
3330 hfs_touchtimes(hfsmp, cp);
3331
3332 /* Nothing to update. */
3333 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
3334 return (0);
3335 }
3336
3337 if (cp->c_datafork)
3338 dataforkp = &cp->c_datafork->ff_data;
3339 if (cp->c_rsrcfork)
3340 rsrcforkp = &cp->c_rsrcfork->ff_data;
3341
3342 /*
3343 * For delayed allocations updates are
3344 * postponed until an fsync or the file
3345 * gets written to disk.
3346 *
3347 * Deleted files can defer meta data updates until inactive.
3348 *
3349 * If we're ever called with the C_FORCEUPDATE flag though
3350 * we have to do the update.
3351 */
3352 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
3353 (ISSET(cp->c_flag, C_DELETED) ||
3354 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
3355 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
3356 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
3357 cp->c_flag |= C_MODIFIED;
3358
3359 HFS_KNOTE(vp, NOTE_ATTRIB);
3360
3361 return (0);
3362 }
3363
3364 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3365 return error;
3366 }
3367
3368 /*
3369 * For files with invalid ranges (holes) the on-disk
3370 * field representing the size of the file (cf_size)
3371 * must be no larger than the start of the first hole.
3372 */
3373 if (dataforkp && !CIRCLEQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
3374 bcopy(dataforkp, &datafork, sizeof(datafork));
3375 datafork.cf_size = CIRCLEQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
3376 dataforkp = &datafork;
3377 } else if (dataforkp && (cp->c_datafork->ff_unallocblocks != 0)) {
3378 // always make sure the block count and the size
3379 // of the file match the number of blocks actually
3380 // allocated to the file on disk
3381 bcopy(dataforkp, &datafork, sizeof(datafork));
3382 // make sure that we don't assign a negative block count
3383 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
3384 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
3385 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
3386 }
3387 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
3388 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3389 dataforkp = &datafork;
3390 }
3391
3392 /*
3393 * For resource forks with delayed allocations, make sure
3394 * the block count and file size match the number of blocks
3395 * actually allocated to the file on disk.
3396 */
3397 if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) {
3398 bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork));
3399 rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks);
3400 rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
3401 rsrcforkp = &rsrcfork;
3402 }
3403
3404 /*
3405 * Lock the Catalog b-tree file.
3406 */
3407 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3408
3409 /* XXX - waitfor is not enforced */
3410 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
3411
3412 hfs_systemfile_unlock(hfsmp, lockflags);
3413
3414 /* After the updates are finished, clear the flags */
3415 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
3416
3417 hfs_end_transaction(hfsmp);
3418
3419 HFS_KNOTE(vp, NOTE_ATTRIB);
3420
3421 return (error);
3422 }
3423
3424 /*
3425 * Allocate a new node
3426 * Note - Function does not create and return a vnode for whiteout creation.
3427 */
3428 static int
3429 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
3430 struct vnode_attr *vap, vfs_context_t ctx)
3431 {
3432 struct cnode *cp = NULL;
3433 struct cnode *dcp;
3434 struct vnode *tvp;
3435 struct hfsmount *hfsmp;
3436 struct cat_desc in_desc, out_desc;
3437 struct cat_attr attr;
3438 struct timeval tv;
3439 int lockflags;
3440 int error, started_tr = 0;
3441 enum vtype vnodetype;
3442 int mode;
3443
3444 dcp = VTOC(dvp);
3445 if ((error = hfs_lock(dcp, HFS_EXCLUSIVE_LOCK)))
3446 return (error);
3447
3448 dcp->c_flag |= C_DIR_MODIFICATION;
3449
3450 hfsmp = VTOHFS(dvp);
3451 *vpp = NULL;
3452 tvp = NULL;
3453 out_desc.cd_flags = 0;
3454 out_desc.cd_nameptr = NULL;
3455
3456 vnodetype = vap->va_type;
3457 if (vnodetype == VNON)
3458 vnodetype = VREG;
3459 mode = MAKEIMODE(vnodetype, vap->va_mode);
3460
3461 /* Check if were out of usable disk space. */
3462 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
3463 error = ENOSPC;
3464 goto exit;
3465 }
3466
3467 microtime(&tv);
3468
3469 /* Setup the default attributes */
3470 bzero(&attr, sizeof(attr));
3471 attr.ca_mode = mode;
3472 attr.ca_linkcount = 1;
3473 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
3474 attr.ca_rdev = vap->va_rdev;
3475 }
3476 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
3477 VATTR_SET_SUPPORTED(vap, va_create_time);
3478 attr.ca_itime = vap->va_create_time.tv_sec;
3479 } else {
3480 attr.ca_itime = tv.tv_sec;
3481 }
3482 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
3483 attr.ca_itime += 3600; /* Same as what hfs_update does */
3484 }
3485 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
3486 attr.ca_atimeondisk = attr.ca_atime;
3487 if (VATTR_IS_ACTIVE(vap, va_flags)) {
3488 VATTR_SET_SUPPORTED(vap, va_flags);
3489 attr.ca_flags = vap->va_flags;
3490 }
3491
3492 /*
3493 * HFS+ only: all files get ThreadExists
3494 * HFSX only: dirs get HasFolderCount
3495 */
3496 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
3497 if (vnodetype == VDIR) {
3498 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
3499 attr.ca_recflags = kHFSHasFolderCountMask;
3500 } else {
3501 attr.ca_recflags = kHFSThreadExistsMask;
3502 }
3503 }
3504
3505 attr.ca_uid = vap->va_uid;
3506 attr.ca_gid = vap->va_gid;
3507 VATTR_SET_SUPPORTED(vap, va_mode);
3508 VATTR_SET_SUPPORTED(vap, va_uid);
3509 VATTR_SET_SUPPORTED(vap, va_gid);
3510
3511 /* Tag symlinks with a type and creator. */
3512 if (vnodetype == VLNK) {
3513 struct FndrFileInfo *fip;
3514
3515 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
3516 fip->fdType = SWAP_BE32(kSymLinkFileType);
3517 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
3518 }
3519 if (cnp->cn_flags & ISWHITEOUT)
3520 attr.ca_flags |= UF_OPAQUE;
3521
3522 /* Setup the descriptor */
3523 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3524 in_desc.cd_namelen = cnp->cn_namelen;
3525 in_desc.cd_parentcnid = dcp->c_fileid;
3526 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
3527 in_desc.cd_hint = dcp->c_childhint;
3528 in_desc.cd_encoding = 0;
3529
3530 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3531 goto exit;
3532 }
3533 started_tr = 1;
3534
3535 // have to also lock the attribute file because cat_create() needs
3536 // to check that any fileID it wants to use does not have orphaned
3537 // attributes in it.
3538 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
3539
3540 /* Reserve some space in the Catalog file. */
3541 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
3542 hfs_systemfile_unlock(hfsmp, lockflags);
3543 goto exit;
3544 }
3545 error = cat_create(hfsmp, &in_desc, &attr, &out_desc);
3546 if (error == 0) {
3547 /* Update the parent directory */
3548 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
3549 dcp->c_entries++;
3550 if (vnodetype == VDIR) {
3551 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3552 }
3553 dcp->c_dirchangecnt++;
3554 dcp->c_ctime = tv.tv_sec;
3555 dcp->c_mtime = tv.tv_sec;
3556 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3557 HFS_KNOTE(dvp, NOTE_ATTRIB);
3558 }
3559 hfs_systemfile_unlock(hfsmp, lockflags);
3560 if (error)
3561 goto exit;
3562
3563 /* Invalidate negative cache entries in the directory */
3564 if (dcp->c_flag & C_NEG_ENTRIES) {
3565 cache_purge_negatives(dvp);
3566 dcp->c_flag &= ~C_NEG_ENTRIES;
3567 }
3568
3569 if (vnodetype == VDIR) {
3570 HFS_KNOTE(dvp, NOTE_WRITE | NOTE_LINK);
3571 } else {
3572 HFS_KNOTE(dvp, NOTE_WRITE);
3573 };
3574
3575 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
3576 (dcp->c_cnid == kHFSRootFolderID));
3577
3578 // XXXdbg
3579 // have to end the transaction here before we call hfs_getnewvnode()
3580 // because that can cause us to try and reclaim a vnode on a different
3581 // file system which could cause us to start a transaction which can
3582 // deadlock with someone on that other file system (since we could be
3583 // holding two transaction locks as well as various vnodes and we did
3584 // not obtain the locks on them in the proper order).
3585 //
3586 // NOTE: this means that if the quota check fails or we have to update
3587 // the change time on a block-special device that those changes
3588 // will happen as part of independent transactions.
3589 //
3590 if (started_tr) {
3591 hfs_end_transaction(hfsmp);
3592 started_tr = 0;
3593 }
3594
3595 /* Do not create vnode for whiteouts */
3596 if (S_ISWHT(mode)) {
3597 goto exit;
3598 }
3599
3600 /*
3601 * Create a vnode for the object just created.
3602 *
3603 * The cnode is locked on successful return.
3604 */
3605 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, GNV_CREATE, &attr, NULL, &tvp);
3606 if (error)
3607 goto exit;
3608
3609 cp = VTOC(tvp);
3610 #if QUOTA
3611 /*
3612 * We call hfs_chkiq with FORCE flag so that if we
3613 * fall through to the rmdir we actually have
3614 * accounted for the inode
3615 */
3616 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3617 if ((error = hfs_getinoquota(cp)) ||
3618 (error = hfs_chkiq(cp, 1, vfs_context_ucred(ctx), FORCE))) {
3619
3620 if (vnode_isdir(tvp))
3621 (void) hfs_removedir(dvp, tvp, cnp, 0);
3622 else {
3623 hfs_unlock(cp);
3624 hfs_lock_truncate(cp, TRUE);
3625 hfs_lock(cp, HFS_FORCE_LOCK);
3626 (void) hfs_removefile(dvp, tvp, cnp, 0, 0, 0);
3627 hfs_unlock_truncate(cp, TRUE);
3628 }
3629 /*
3630 * we successfully allocated a new vnode, but
3631 * the quota check is telling us we're beyond
3632 * our limit, so we need to dump our lock + reference
3633 */
3634 hfs_unlock(cp);
3635 vnode_put(tvp);
3636
3637 goto exit;
3638 }
3639 }
3640 #endif /* QUOTA */
3641
3642 *vpp = tvp;
3643 exit:
3644 cat_releasedesc(&out_desc);
3645
3646 /*
3647 * Check if a file is located in the "Cleanup At Startup"
3648 * directory. If it is then tag it as NODUMP so that we
3649 * can be lazy about zero filling data holes.
3650 */
3651 if ((error == 0) && dvp && (vnodetype == VREG) &&
3652 (dcp->c_desc.cd_nameptr != NULL) &&
3653 (strncmp((const char *)dcp->c_desc.cd_nameptr,
3654 CARBON_TEMP_DIR_NAME,
3655 sizeof(CARBON_TEMP_DIR_NAME)) == 0)) {
3656 struct vnode *ddvp;
3657
3658 dcp->c_flag &= ~C_DIR_MODIFICATION;
3659 wakeup((caddr_t)&dcp->c_flag);
3660
3661 hfs_unlock(dcp);
3662 dvp = NULL;
3663
3664 /*
3665 * The parent of "Cleanup At Startup" should
3666 * have the ASCII name of the userid.
3667 */
3668 if (hfs_vget(hfsmp, dcp->c_parentcnid, &ddvp, 0) == 0) {
3669 if (VTOC(ddvp)->c_desc.cd_nameptr) {
3670 uid_t uid;
3671
3672 uid = strtoul((const char *)VTOC(ddvp)->c_desc.cd_nameptr, 0, 0);
3673 if ((uid == cp->c_uid) ||
3674 (uid == vfs_context_ucred(ctx)->cr_uid)) {
3675 cp->c_flags |= UF_NODUMP;
3676 cp->c_touch_chgtime = TRUE;
3677 }
3678 }
3679 hfs_unlock(VTOC(ddvp));
3680 vnode_put(ddvp);
3681 }
3682 }
3683 if (dvp) {
3684 dcp->c_flag &= ~C_DIR_MODIFICATION;
3685 wakeup((caddr_t)&dcp->c_flag);
3686
3687 hfs_unlock(dcp);
3688 }
3689 if (error == 0 && cp != NULL) {
3690 hfs_unlock(cp);
3691 }
3692 if (started_tr) {
3693 hfs_end_transaction(hfsmp);
3694 started_tr = 0;
3695 }
3696
3697 return (error);
3698 }
3699
3700
3701 /*
3702 * Return a referenced vnode for the resource fork
3703 *
3704 * cnode for vnode vp must already be locked.
3705 *
3706 * can_drop_lock is true if its safe to temporally drop/re-acquire the cnode lock
3707 */
3708 __private_extern__
3709 int
3710 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp, int can_drop_lock)
3711 {
3712 struct vnode *rvp;
3713 struct vnode *dvp = NULLVP;
3714 struct cnode *cp = VTOC(vp);
3715 int error;
3716 int vid;
3717
3718 restart:
3719 /* Attempt to use exising vnode */
3720 if ((rvp = cp->c_rsrc_vp)) {
3721 vid = vnode_vid(rvp);
3722
3723 /*
3724 * It is not safe to hold the cnode lock when calling vnode_getwithvid()
3725 * for the alternate fork -- vnode_getwithvid() could deadlock waiting
3726 * for a VL_WANTTERM while another thread has an iocount on the alternate
3727 * fork vnode and is attempting to acquire the common cnode lock.
3728 *
3729 * But it's also not safe to drop the cnode lock when we're holding
3730 * multiple cnode locks, like during a hfs_removefile() operation
3731 * since we could lock out of order when re-acquiring the cnode lock.
3732 *
3733 * So we can only drop the lock here if its safe to drop it -- which is
3734 * most of the time with the exception being hfs_removefile().
3735 */
3736 if (can_drop_lock)
3737 hfs_unlock(cp);
3738
3739 error = vnode_getwithvid(rvp, vid);
3740
3741 if (can_drop_lock) {
3742 (void) hfs_lock(cp, HFS_FORCE_LOCK);
3743 /*
3744 * When our lock was relinquished, the resource fork
3745 * could have been recycled. Check for this and try
3746 * again.
3747 */
3748 if (error == ENOENT)
3749 goto restart;
3750 }
3751 if (error) {
3752 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
3753
3754 if (name)
3755 printf("hfs_vgetrsrc: couldn't get resource"
3756 " fork for %s, err %d\n", name, error);
3757 return (error);
3758 }
3759 } else {
3760 struct cat_fork rsrcfork;
3761 struct componentname cn;
3762 int lockflags;
3763
3764 /*
3765 * Make sure cnode lock is exclusive, if not upgrade it.
3766 *
3767 * We assume that we were called from a read-only VNOP (getattr)
3768 * and that its safe to have the cnode lock dropped and reacquired.
3769 */
3770 if (cp->c_lockowner != current_thread()) {
3771 if (!can_drop_lock)
3772 return (EINVAL);
3773 /*
3774 * If the upgrade fails we loose the lock and
3775 * have to take the exclusive lock on our own.
3776 */
3777 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
3778 lck_rw_lock_exclusive(&cp->c_rwlock);
3779 cp->c_lockowner = current_thread();
3780 }
3781
3782 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3783
3784 /* Get resource fork data */
3785 error = cat_lookup(hfsmp, &cp->c_desc, 1, (struct cat_desc *)0,
3786 (struct cat_attr *)0, &rsrcfork, NULL);
3787
3788 hfs_systemfile_unlock(hfsmp, lockflags);
3789 if (error)
3790 return (error);
3791
3792 /*
3793 * Supply hfs_getnewvnode with a component name.
3794 */
3795 cn.cn_pnbuf = NULL;
3796 if (cp->c_desc.cd_nameptr) {
3797 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
3798 cn.cn_nameiop = LOOKUP;
3799 cn.cn_flags = ISLASTCN | HASBUF;
3800 cn.cn_context = NULL;
3801 cn.cn_pnlen = MAXPATHLEN;
3802 cn.cn_nameptr = cn.cn_pnbuf;
3803 cn.cn_hash = 0;
3804 cn.cn_consume = 0;
3805 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
3806 "%s%s", cp->c_desc.cd_nameptr,
3807 _PATH_RSRCFORKSPEC);
3808 }
3809 dvp = vnode_getparent(vp);
3810 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
3811 &cp->c_desc, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
3812 &rsrcfork, &rvp);
3813 if (dvp)
3814 vnode_put(dvp);
3815 if (cn.cn_pnbuf)
3816 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
3817 if (error)
3818 return (error);
3819 }
3820
3821 *rvpp = rvp;
3822 return (0);
3823 }
3824
3825
3826 static void
3827 filt_hfsdetach(struct knote *kn)
3828 {
3829 struct vnode *vp;
3830
3831 vp = (struct vnode *)kn->kn_hook;
3832 if (vnode_getwithvid(vp, kn->kn_hookid))
3833 return;
3834
3835 if (1) { /* ! KNDETACH_VNLOCKED */
3836 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
3837 (void) KNOTE_DETACH(&VTOC(vp)->c_knotes, kn);
3838 hfs_unlock(VTOC(vp));
3839 }
3840 }
3841
3842 vnode_put(vp);
3843 }
3844
3845 /*ARGSUSED*/
3846 static int
3847 filt_hfsread(struct knote *kn, long hint)
3848 {
3849 struct vnode *vp = (struct vnode *)kn->kn_hook;
3850 int dropvp = 0;
3851
3852 if (hint == 0) {
3853 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3854 hint = NOTE_REVOKE;
3855 } else
3856 dropvp = 1;
3857 }
3858 if (hint == NOTE_REVOKE) {
3859 /*
3860 * filesystem is gone, so set the EOF flag and schedule
3861 * the knote for deletion.
3862 */
3863 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3864 return (1);
3865 }
3866
3867 /* poll(2) semantics dictate always saying there is data */
3868 if (!(kn->kn_flags & EV_POLL)) {
3869 off_t amount;
3870
3871 amount = VTOF(vp)->ff_size - kn->kn_fp->f_fglob->fg_offset;
3872 if (amount > (off_t)INTPTR_MAX)
3873 kn->kn_data = INTPTR_MAX;
3874 else if (amount < (off_t)INTPTR_MIN)
3875 kn->kn_data = INTPTR_MIN;
3876 else
3877 kn->kn_data = (intptr_t)amount;
3878 } else {
3879 kn->kn_data = 1;
3880 }
3881
3882 if (dropvp)
3883 vnode_put(vp);
3884
3885 return (kn->kn_data != 0);
3886 }
3887
3888 /*ARGSUSED*/
3889 static int
3890 filt_hfswrite(struct knote *kn, long hint)
3891 {
3892 struct vnode *vp = (struct vnode *)kn->kn_hook;
3893
3894 if (hint == 0) {
3895 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3896 hint = NOTE_REVOKE;
3897 } else
3898 vnode_put(vp);
3899 }
3900 if (hint == NOTE_REVOKE) {
3901 /*
3902 * filesystem is gone, so set the EOF flag and schedule
3903 * the knote for deletion.
3904 */
3905 kn->kn_data = 0;
3906 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3907 return (1);
3908 }
3909 kn->kn_data = 0;
3910 return (1);
3911 }
3912
3913 static int
3914 filt_hfsvnode(struct knote *kn, long hint)
3915 {
3916 struct vnode *vp = (struct vnode *)kn->kn_hook;
3917
3918 if (hint == 0) {
3919 if ((vnode_getwithvid(vp, kn->kn_hookid) != 0)) {
3920 hint = NOTE_REVOKE;
3921 } else
3922 vnode_put(vp);
3923 }
3924 if (kn->kn_sfflags & hint)
3925 kn->kn_fflags |= hint;
3926 if ((hint == NOTE_REVOKE)) {
3927 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
3928 return (1);
3929 }
3930
3931 return (kn->kn_fflags != 0);
3932 }
3933
3934 static struct filterops hfsread_filtops =
3935 { 1, NULL, filt_hfsdetach, filt_hfsread };
3936 static struct filterops hfswrite_filtops =
3937 { 1, NULL, filt_hfsdetach, filt_hfswrite };
3938 static struct filterops hfsvnode_filtops =
3939 { 1, NULL, filt_hfsdetach, filt_hfsvnode };
3940
3941 /*
3942 * Add a kqueue filter.
3943 */
3944 static int
3945 hfs_vnop_kqfiltadd(
3946 struct vnop_kqfilt_add_args /* {
3947 struct vnode *a_vp;
3948 struct knote *a_kn;
3949 struct proc *p;
3950 vfs_context_t a_context;
3951 } */ *ap)
3952 {
3953 struct vnode *vp = ap->a_vp;
3954 struct knote *kn = ap->a_kn;
3955 int error;
3956
3957 switch (kn->kn_filter) {
3958 case EVFILT_READ:
3959 if (vnode_isreg(vp)) {
3960 kn->kn_fop = &hfsread_filtops;
3961 } else {
3962 return EINVAL;
3963 };
3964 break;
3965 case EVFILT_WRITE:
3966 if (vnode_isreg(vp)) {
3967 kn->kn_fop = &hfswrite_filtops;
3968 } else {
3969 return EINVAL;
3970 };
3971 break;
3972 case EVFILT_VNODE:
3973 kn->kn_fop = &hfsvnode_filtops;
3974 break;
3975 default:
3976 return (1);
3977 }
3978
3979 kn->kn_hook = (caddr_t)vp;
3980 kn->kn_hookid = vnode_vid(vp);
3981
3982 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK)))
3983 return (error);
3984 KNOTE_ATTACH(&VTOC(vp)->c_knotes, kn);
3985 hfs_unlock(VTOC(vp));
3986
3987 return (0);
3988 }
3989
3990 /*
3991 * Remove a kqueue filter
3992 */
3993 static int
3994 hfs_vnop_kqfiltremove(ap)
3995 struct vnop_kqfilt_remove_args /* {
3996 struct vnode *a_vp;
3997 uintptr_t ident;
3998 vfs_context_t a_context;
3999 } */__unused *ap;
4000 {
4001 int result;
4002
4003 result = ENOTSUP; /* XXX */
4004
4005 return (result);
4006 }
4007
4008 /*
4009 * Wrapper for special device reads
4010 */
4011 static int
4012 hfsspec_read(ap)
4013 struct vnop_read_args /* {
4014 struct vnode *a_vp;
4015 struct uio *a_uio;
4016 int a_ioflag;
4017 vfs_context_t a_context;
4018 } */ *ap;
4019 {
4020 /*
4021 * Set access flag.
4022 */
4023 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
4024 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
4025 }
4026
4027 /*
4028 * Wrapper for special device writes
4029 */
4030 static int
4031 hfsspec_write(ap)
4032 struct vnop_write_args /* {
4033 struct vnode *a_vp;
4034 struct uio *a_uio;
4035 int a_ioflag;
4036 vfs_context_t a_context;
4037 } */ *ap;
4038 {
4039 /*
4040 * Set update and change flags.
4041 */
4042 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
4043 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
4044 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
4045 }
4046
4047 /*
4048 * Wrapper for special device close
4049 *
4050 * Update the times on the cnode then do device close.
4051 */
4052 static int
4053 hfsspec_close(ap)
4054 struct vnop_close_args /* {
4055 struct vnode *a_vp;
4056 int a_fflag;
4057 vfs_context_t a_context;
4058 } */ *ap;
4059 {
4060 struct vnode *vp = ap->a_vp;
4061 struct cnode *cp;
4062
4063 if (vnode_isinuse(ap->a_vp, 1)) {
4064 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
4065 cp = VTOC(vp);
4066 hfs_touchtimes(VTOHFS(vp), cp);
4067 hfs_unlock(cp);
4068 }
4069 }
4070 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
4071 }
4072
4073 #if FIFO
4074 /*
4075 * Wrapper for fifo reads
4076 */
4077 static int
4078 hfsfifo_read(ap)
4079 struct vnop_read_args /* {
4080 struct vnode *a_vp;
4081 struct uio *a_uio;
4082 int a_ioflag;
4083 vfs_context_t a_context;
4084 } */ *ap;
4085 {
4086 /*
4087 * Set access flag.
4088 */
4089 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
4090 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
4091 }
4092
4093 /*
4094 * Wrapper for fifo writes
4095 */
4096 static int
4097 hfsfifo_write(ap)
4098 struct vnop_write_args /* {
4099 struct vnode *a_vp;
4100 struct uio *a_uio;
4101 int a_ioflag;
4102 vfs_context_t a_context;
4103 } */ *ap;
4104 {
4105 /*
4106 * Set update and change flags.
4107 */
4108 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
4109 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
4110 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
4111 }
4112
4113 /*
4114 * Wrapper for fifo close
4115 *
4116 * Update the times on the cnode then do device close.
4117 */
4118 static int
4119 hfsfifo_close(ap)
4120 struct vnop_close_args /* {
4121 struct vnode *a_vp;
4122 int a_fflag;
4123 vfs_context_t a_context;
4124 } */ *ap;
4125 {
4126 struct vnode *vp = ap->a_vp;
4127 struct cnode *cp;
4128
4129 if (vnode_isinuse(ap->a_vp, 1)) {
4130 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) == 0) {
4131 cp = VTOC(vp);
4132 hfs_touchtimes(VTOHFS(vp), cp);
4133 hfs_unlock(cp);
4134 }
4135 }
4136 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
4137 }
4138
4139 /*
4140 * kqfilt_add wrapper for fifos.
4141 *
4142 * Fall through to hfs kqfilt_add routines if needed
4143 */
4144 int
4145 hfsfifo_kqfilt_add(ap)
4146 struct vnop_kqfilt_add_args *ap;
4147 {
4148 int error;
4149
4150 error = VOCALL(fifo_vnodeop_p, VOFFSET(vnop_kqfilt_add), ap);
4151 if (error)
4152 error = hfs_vnop_kqfiltadd(ap);
4153 return (error);
4154 }
4155
4156 /*
4157 * kqfilt_remove wrapper for fifos.
4158 *
4159 * Fall through to hfs kqfilt_remove routines if needed
4160 */
4161 int
4162 hfsfifo_kqfilt_remove(ap)
4163 struct vnop_kqfilt_remove_args *ap;
4164 {
4165 int error;
4166
4167 error = VOCALL(fifo_vnodeop_p, VOFFSET(vnop_kqfilt_remove), ap);
4168 if (error)
4169 error = hfs_vnop_kqfiltremove(ap);
4170 return (error);
4171 }
4172
4173 #endif /* FIFO */
4174
4175 /*
4176 * Synchronize a file's in-core state with that on disk.
4177 */
4178 static int
4179 hfs_vnop_fsync(ap)
4180 struct vnop_fsync_args /* {
4181 struct vnode *a_vp;
4182 int a_waitfor;
4183 vfs_context_t a_context;
4184 } */ *ap;
4185 {
4186 struct vnode* vp = ap->a_vp;
4187 int error;
4188
4189 /*
4190 * We need to allow ENOENT lock errors since unlink
4191 * systenm call can call VNOP_FSYNC during vclean.
4192 */
4193 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK);
4194 if (error)
4195 return (0);
4196
4197 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
4198
4199 hfs_unlock(VTOC(vp));
4200 return (error);
4201 }
4202
4203
4204 static int
4205 hfs_vnop_whiteout(ap)
4206 struct vnop_whiteout_args /* {
4207 struct vnode *a_dvp;
4208 struct componentname *a_cnp;
4209 int a_flags;
4210 vfs_context_t a_context;
4211 } */ *ap;
4212 {
4213 int error = 0;
4214 struct vnode *vp = NULL;
4215 struct vnode_attr va;
4216 struct vnop_lookup_args lookup_args;
4217 struct vnop_remove_args remove_args;
4218 struct hfsmount *hfsmp;
4219
4220 hfsmp = VTOHFS(ap->a_dvp);
4221 if (hfsmp->hfs_flags & HFS_STANDARD) {
4222 error = ENOTSUP;
4223 goto exit;
4224 }
4225
4226 switch (ap->a_flags) {
4227 case LOOKUP:
4228 error = 0;
4229 break;
4230
4231 case CREATE:
4232 VATTR_INIT(&va);
4233 VATTR_SET(&va, va_type, VREG);
4234 VATTR_SET(&va, va_mode, S_IFWHT);
4235 VATTR_SET(&va, va_uid, 0);
4236 VATTR_SET(&va, va_gid, 0);
4237
4238 error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context);
4239 /* No need to release the vnode as no vnode is created for whiteouts */
4240 break;
4241
4242 case DELETE:
4243 lookup_args.a_dvp = ap->a_dvp;
4244 lookup_args.a_vpp = &vp;
4245 lookup_args.a_cnp = ap->a_cnp;
4246 lookup_args.a_context = ap->a_context;
4247
4248 error = hfs_vnop_lookup(&lookup_args);
4249 if (error) {
4250 break;
4251 }
4252
4253 remove_args.a_dvp = ap->a_dvp;
4254 remove_args.a_vp = vp;
4255 remove_args.a_cnp = ap->a_cnp;
4256 remove_args.a_flags = 0;
4257 remove_args.a_context = ap->a_context;
4258
4259 error = hfs_vnop_remove(&remove_args);
4260 vnode_put(vp);
4261 break;
4262
4263 default:
4264 panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags);
4265 };
4266
4267 exit:
4268 return (error);
4269 }
4270
4271 int (**hfs_vnodeop_p)(void *);
4272
4273 #define VOPFUNC int (*)(void *)
4274
4275 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
4276 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4277 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
4278 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
4279 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
4280 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
4281 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
4282 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4283 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4284 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
4285 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
4286 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
4287 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
4288 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
4289 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
4290 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
4291 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4292 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
4293 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
4294 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
4295 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
4296 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
4297 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
4298 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
4299 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
4300 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
4301 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4302 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4303 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
4304 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
4305 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4306 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
4307 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
4308 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
4309 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
4310 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
4311 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4312 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4313 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4314 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
4315 { &vnop_kqfilt_add_desc, (VOPFUNC)hfs_vnop_kqfiltadd }, /* kqfilt_add */
4316 { &vnop_kqfilt_remove_desc, (VOPFUNC)hfs_vnop_kqfiltremove }, /* kqfilt_remove */
4317 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
4318 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
4319 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
4320 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
4321 { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout},
4322 #if NAMEDSTREAMS
4323 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
4324 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
4325 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
4326 #endif
4327 { NULL, (VOPFUNC)NULL }
4328 };
4329
4330 struct vnodeopv_desc hfs_vnodeop_opv_desc =
4331 { &hfs_vnodeop_p, hfs_vnodeop_entries };
4332
4333 int (**hfs_specop_p)(void *);
4334 struct vnodeopv_entry_desc hfs_specop_entries[] = {
4335 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4336 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
4337 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
4338 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
4339 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
4340 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
4341 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4342 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4343 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
4344 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
4345 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
4346 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
4347 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
4348 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
4349 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4350 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
4351 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
4352 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
4353 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
4354 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
4355 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
4356 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
4357 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
4358 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4359 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4360 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
4361 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
4362 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4363 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
4364 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
4365 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
4366 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4367 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4368 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4369 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4370 };
4371 struct vnodeopv_desc hfs_specop_opv_desc =
4372 { &hfs_specop_p, hfs_specop_entries };
4373
4374 #if FIFO
4375 int (**hfs_fifoop_p)(void *);
4376 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
4377 { &vnop_default_desc, (VOPFUNC)vn_default_error },
4378 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
4379 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
4380 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
4381 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
4382 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
4383 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
4384 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
4385 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
4386 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
4387 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
4388 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
4389 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
4390 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
4391 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
4392 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
4393 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
4394 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
4395 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
4396 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
4397 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
4398 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
4399 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
4400 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
4401 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
4402 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
4403 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
4404 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
4405 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
4406 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
4407 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
4408 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
4409 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
4410 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
4411 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
4412 { &vnop_kqfilt_add_desc, (VOPFUNC)hfsfifo_kqfilt_add }, /* kqfilt_add */
4413 { &vnop_kqfilt_remove_desc, (VOPFUNC)hfsfifo_kqfilt_remove }, /* kqfilt_remove */
4414 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
4415 };
4416 struct vnodeopv_desc hfs_fifoop_opv_desc =
4417 { &hfs_fifoop_p, hfs_fifoop_entries };
4418 #endif /* FIFO */
4419
4420
4421