]> git.saurik.com Git - apple/xnu.git/blob - bsd/hfs/hfs_vnops.c
xnu-2422.90.20.tar.gz
[apple/xnu.git] / bsd / hfs / hfs_vnops.c
1 /*
2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/systm.h>
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/file_internal.h>
33 #include <sys/dirent.h>
34 #include <sys/stat.h>
35 #include <sys/buf.h>
36 #include <sys/buf_internal.h>
37 #include <sys/mount.h>
38 #include <sys/vnode_if.h>
39 #include <sys/vnode_internal.h>
40 #include <sys/malloc.h>
41 #include <sys/ubc.h>
42 #include <sys/ubc_internal.h>
43 #include <sys/paths.h>
44 #include <sys/quota.h>
45 #include <sys/time.h>
46 #include <sys/disk.h>
47 #include <sys/kauth.h>
48 #include <sys/uio_internal.h>
49 #include <sys/fsctl.h>
50 #include <sys/cprotect.h>
51 #include <sys/xattr.h>
52 #include <string.h>
53
54 #include <miscfs/specfs/specdev.h>
55 #include <miscfs/fifofs/fifo.h>
56 #include <vfs/vfs_support.h>
57 #include <machine/spl.h>
58
59 #include <sys/kdebug.h>
60 #include <sys/sysctl.h>
61
62 #include "hfs.h"
63 #include "hfs_catalog.h"
64 #include "hfs_cnode.h"
65 #include "hfs_dbg.h"
66 #include "hfs_mount.h"
67 #include "hfs_quota.h"
68 #include "hfs_endian.h"
69
70 #include "hfscommon/headers/BTreesInternal.h"
71 #include "hfscommon/headers/FileMgrInternal.h"
72
73 #define KNDETACH_VNLOCKED 0x00000001
74
75 /* Global vfs data structures for hfs */
76
77 /* Always F_FULLFSYNC? 1=yes,0=no (default due to "various" reasons is 'no') */
78 int always_do_fullfsync = 0;
79 SYSCTL_DECL(_vfs_generic);
80 SYSCTL_INT (_vfs_generic, OID_AUTO, always_do_fullfsync, CTLFLAG_RW | CTLFLAG_LOCKED, &always_do_fullfsync, 0, "always F_FULLFSYNC when fsync is called");
81
82 int hfs_makenode(struct vnode *dvp, struct vnode **vpp,
83 struct componentname *cnp, struct vnode_attr *vap,
84 vfs_context_t ctx);
85 int hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p);
86 int hfs_metasync_all(struct hfsmount *hfsmp);
87
88 int hfs_removedir(struct vnode *, struct vnode *, struct componentname *,
89 int, int);
90 int hfs_removefile(struct vnode *, struct vnode *, struct componentname *,
91 int, int, int, struct vnode *, int);
92
93 /* Used here and in cnode teardown -- for symlinks */
94 int hfs_removefile_callback(struct buf *bp, void *hfsmp);
95
96 int hfs_movedata (struct vnode *, struct vnode*);
97 static int hfs_move_fork (struct filefork *srcfork, struct cnode *src,
98 struct filefork *dstfork, struct cnode *dst);
99
100 decmpfs_cnode* hfs_lazy_init_decmpfs_cnode (struct cnode *cp);
101
102 #if FIFO
103 static int hfsfifo_read(struct vnop_read_args *);
104 static int hfsfifo_write(struct vnop_write_args *);
105 static int hfsfifo_close(struct vnop_close_args *);
106
107 extern int (**fifo_vnodeop_p)(void *);
108 #endif /* FIFO */
109
110 int hfs_vnop_close(struct vnop_close_args*);
111 int hfs_vnop_create(struct vnop_create_args*);
112 int hfs_vnop_exchange(struct vnop_exchange_args*);
113 int hfs_vnop_fsync(struct vnop_fsync_args*);
114 int hfs_vnop_mkdir(struct vnop_mkdir_args*);
115 int hfs_vnop_mknod(struct vnop_mknod_args*);
116 int hfs_vnop_getattr(struct vnop_getattr_args*);
117 int hfs_vnop_open(struct vnop_open_args*);
118 int hfs_vnop_readdir(struct vnop_readdir_args*);
119 int hfs_vnop_remove(struct vnop_remove_args*);
120 int hfs_vnop_rename(struct vnop_rename_args*);
121 int hfs_vnop_rmdir(struct vnop_rmdir_args*);
122 int hfs_vnop_symlink(struct vnop_symlink_args*);
123 int hfs_vnop_setattr(struct vnop_setattr_args*);
124 int hfs_vnop_readlink(struct vnop_readlink_args *);
125 int hfs_vnop_pathconf(struct vnop_pathconf_args *);
126 int hfs_vnop_whiteout(struct vnop_whiteout_args *);
127 int hfs_vnop_mmap(struct vnop_mmap_args *ap);
128 int hfsspec_read(struct vnop_read_args *);
129 int hfsspec_write(struct vnop_write_args *);
130 int hfsspec_close(struct vnop_close_args *);
131
132 /* Options for hfs_removedir and hfs_removefile */
133 #define HFSRM_SKIP_RESERVE 0x01
134
135
136
137
138 /*****************************************************************************
139 *
140 * Common Operations on vnodes
141 *
142 *****************************************************************************/
143
144 /*
145 * Is the given cnode either the .journal or .journal_info_block file on
146 * a volume with an active journal? Many VNOPs use this to deny access
147 * to those files.
148 *
149 * Note: the .journal file on a volume with an external journal still
150 * returns true here, even though it does not actually hold the contents
151 * of the volume's journal.
152 */
153 static _Bool
154 hfs_is_journal_file(struct hfsmount *hfsmp, struct cnode *cp)
155 {
156 if (hfsmp->jnl != NULL &&
157 (cp->c_fileid == hfsmp->hfs_jnlinfoblkid ||
158 cp->c_fileid == hfsmp->hfs_jnlfileid)) {
159 return true;
160 } else {
161 return false;
162 }
163 }
164
165 /*
166 * Create a regular file.
167 */
168 int
169 hfs_vnop_create(struct vnop_create_args *ap)
170 {
171 int error;
172
173 again:
174 error = hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
175
176 /*
177 * We speculatively skipped the original lookup of the leaf
178 * for CREATE. Since it exists, go get it as long as they
179 * didn't want an exclusive create.
180 */
181 if ((error == EEXIST) && !(ap->a_vap->va_vaflags & VA_EXCLUSIVE)) {
182 struct vnop_lookup_args args;
183
184 args.a_desc = &vnop_lookup_desc;
185 args.a_dvp = ap->a_dvp;
186 args.a_vpp = ap->a_vpp;
187 args.a_cnp = ap->a_cnp;
188 args.a_context = ap->a_context;
189 args.a_cnp->cn_nameiop = LOOKUP;
190 error = hfs_vnop_lookup(&args);
191 /*
192 * We can also race with remove for this file.
193 */
194 if (error == ENOENT) {
195 goto again;
196 }
197
198 /* Make sure it was file. */
199 if ((error == 0) && !vnode_isreg(*args.a_vpp)) {
200 vnode_put(*args.a_vpp);
201 *args.a_vpp = NULLVP;
202 error = EEXIST;
203 }
204 args.a_cnp->cn_nameiop = CREATE;
205 }
206 return (error);
207 }
208
209 /*
210 * Make device special file.
211 */
212 int
213 hfs_vnop_mknod(struct vnop_mknod_args *ap)
214 {
215 struct vnode_attr *vap = ap->a_vap;
216 struct vnode *dvp = ap->a_dvp;
217 struct vnode **vpp = ap->a_vpp;
218 struct cnode *cp;
219 int error;
220
221 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord) {
222 return (ENOTSUP);
223 }
224
225 /* Create the vnode */
226 error = hfs_makenode(dvp, vpp, ap->a_cnp, vap, ap->a_context);
227 if (error)
228 return (error);
229
230 cp = VTOC(*vpp);
231 cp->c_touch_acctime = TRUE;
232 cp->c_touch_chgtime = TRUE;
233 cp->c_touch_modtime = TRUE;
234
235 if ((vap->va_rdev != VNOVAL) &&
236 (vap->va_type == VBLK || vap->va_type == VCHR))
237 cp->c_rdev = vap->va_rdev;
238
239 return (0);
240 }
241
242 #if HFS_COMPRESSION
243 /*
244 * hfs_ref_data_vp(): returns the data fork vnode for a given cnode.
245 * In the (hopefully rare) case where the data fork vnode is not
246 * present, it will use hfs_vget() to create a new vnode for the
247 * data fork.
248 *
249 * NOTE: If successful and a vnode is returned, the caller is responsible
250 * for releasing the returned vnode with vnode_rele().
251 */
252 static int
253 hfs_ref_data_vp(struct cnode *cp, struct vnode **data_vp, int skiplock)
254 {
255 int vref = 0;
256
257 if (!data_vp || !cp) /* sanity check incoming parameters */
258 return EINVAL;
259
260 /* maybe we should take the hfs cnode lock here, and if so, use the skiplock parameter to tell us not to */
261
262 if (!skiplock) hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
263 struct vnode *c_vp = cp->c_vp;
264 if (c_vp) {
265 /* we already have a data vnode */
266 *data_vp = c_vp;
267 vref = vnode_ref(*data_vp);
268 if (!skiplock) hfs_unlock(cp);
269 if (vref == 0) {
270 return 0;
271 }
272 return EINVAL;
273 }
274 /* no data fork vnode in the cnode, so ask hfs for one. */
275
276 if (!cp->c_rsrc_vp) {
277 /* if we don't have either a c_vp or c_rsrc_vp, we can't really do anything useful */
278 *data_vp = NULL;
279 if (!skiplock) hfs_unlock(cp);
280 return EINVAL;
281 }
282
283 if (0 == hfs_vget(VTOHFS(cp->c_rsrc_vp), cp->c_cnid, data_vp, 1, 0) &&
284 0 != data_vp) {
285 vref = vnode_ref(*data_vp);
286 vnode_put(*data_vp);
287 if (!skiplock) hfs_unlock(cp);
288 if (vref == 0) {
289 return 0;
290 }
291 return EINVAL;
292 }
293 /* there was an error getting the vnode */
294 *data_vp = NULL;
295 if (!skiplock) hfs_unlock(cp);
296 return EINVAL;
297 }
298
299 /*
300 * hfs_lazy_init_decmpfs_cnode(): returns the decmpfs_cnode for a cnode,
301 * allocating it if necessary; returns NULL if there was an allocation error.
302 * function is non-static so that it can be used from the FCNTL handler.
303 */
304 decmpfs_cnode *
305 hfs_lazy_init_decmpfs_cnode(struct cnode *cp)
306 {
307 if (!cp->c_decmp) {
308 decmpfs_cnode *dp = NULL;
309 MALLOC_ZONE(dp, decmpfs_cnode *, sizeof(decmpfs_cnode), M_DECMPFS_CNODE, M_WAITOK);
310 if (!dp) {
311 /* error allocating a decmpfs cnode */
312 return NULL;
313 }
314 decmpfs_cnode_init(dp);
315 if (!OSCompareAndSwapPtr(NULL, dp, (void * volatile *)&cp->c_decmp)) {
316 /* another thread got here first, so free the decmpfs_cnode we allocated */
317 decmpfs_cnode_destroy(dp);
318 FREE_ZONE(dp, sizeof(*dp), M_DECMPFS_CNODE);
319 }
320 }
321
322 return cp->c_decmp;
323 }
324
325 /*
326 * hfs_file_is_compressed(): returns 1 if the file is compressed, and 0 (zero) if not.
327 * if the file's compressed flag is set, makes sure that the decmpfs_cnode field
328 * is allocated by calling hfs_lazy_init_decmpfs_cnode(), then makes sure it is populated,
329 * or else fills it in via the decmpfs_file_is_compressed() function.
330 */
331 int
332 hfs_file_is_compressed(struct cnode *cp, int skiplock)
333 {
334 int ret = 0;
335
336 /* fast check to see if file is compressed. If flag is clear, just answer no */
337 if (!(cp->c_bsdflags & UF_COMPRESSED)) {
338 return 0;
339 }
340
341 decmpfs_cnode *dp = hfs_lazy_init_decmpfs_cnode(cp);
342 if (!dp) {
343 /* error allocating a decmpfs cnode, treat the file as uncompressed */
344 return 0;
345 }
346
347 /* flag was set, see if the decmpfs_cnode state is valid (zero == invalid) */
348 uint32_t decmpfs_state = decmpfs_cnode_get_vnode_state(dp);
349 switch(decmpfs_state) {
350 case FILE_IS_COMPRESSED:
351 case FILE_IS_CONVERTING: /* treat decompressing files as if they are compressed */
352 return 1;
353 case FILE_IS_NOT_COMPRESSED:
354 return 0;
355 /* otherwise the state is not cached yet */
356 }
357
358 /* decmpfs hasn't seen this file yet, so call decmpfs_file_is_compressed() to init the decmpfs_cnode struct */
359 struct vnode *data_vp = NULL;
360 if (0 == hfs_ref_data_vp(cp, &data_vp, skiplock)) {
361 if (data_vp) {
362 ret = decmpfs_file_is_compressed(data_vp, VTOCMP(data_vp)); // fill in decmpfs_cnode
363 vnode_rele(data_vp);
364 }
365 }
366 return ret;
367 }
368
369 /* hfs_uncompressed_size_of_compressed_file() - get the uncompressed size of the file.
370 * if the caller has passed a valid vnode (has a ref count > 0), then hfsmp and fid are not required.
371 * if the caller doesn't have a vnode, pass NULL in vp, and pass valid hfsmp and fid.
372 * files size is returned in size (required)
373 * if the indicated file is a directory (or something that doesn't have a data fork), then this call
374 * will return an error and the caller should fall back to treating the item as an uncompressed file
375 */
376 int
377 hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock)
378 {
379 int ret = 0;
380 int putaway = 0; /* flag to remember if we used hfs_vget() */
381
382 if (!size) {
383 return EINVAL; /* no place to put the file size */
384 }
385
386 if (NULL == vp) {
387 if (!hfsmp || !fid) { /* make sure we have the required parameters */
388 return EINVAL;
389 }
390 if (0 != hfs_vget(hfsmp, fid, &vp, skiplock, 0)) { /* vnode is null, use hfs_vget() to get it */
391 vp = NULL;
392 } else {
393 putaway = 1; /* note that hfs_vget() was used to aquire the vnode */
394 }
395 }
396 /* this double check for compression (hfs_file_is_compressed)
397 * ensures the cached size is present in case decmpfs hasn't
398 * encountered this node yet.
399 */
400 if (vp) {
401 if (hfs_file_is_compressed(VTOC(vp), skiplock) ) {
402 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
403 } else {
404 if (VTOCMP(vp) && VTOCMP(vp)->cmp_type >= CMP_MAX) {
405 if (VTOCMP(vp)->cmp_type != DATALESS_CMPFS_TYPE) {
406 // if we don't recognize this type, just use the real data fork size
407 if (VTOC(vp)->c_datafork) {
408 *size = VTOC(vp)->c_datafork->ff_size;
409 ret = 0;
410 } else {
411 ret = EINVAL;
412 }
413 } else {
414 *size = decmpfs_cnode_get_vnode_cached_size(VTOCMP(vp)); /* file info will be cached now, so get size */
415 ret = 0;
416 }
417 } else {
418 ret = EINVAL;
419 }
420 }
421 }
422
423 if (putaway) { /* did we use hfs_vget() to get this vnode? */
424 vnode_put(vp); /* if so, release it and set it to null */
425 vp = NULL;
426 }
427 return ret;
428 }
429
430 int
431 hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock)
432 {
433 if (ctx == decmpfs_ctx)
434 return 0;
435 if (!hfs_file_is_compressed(cp, skiplock))
436 return 0;
437 return decmpfs_hides_rsrc(ctx, cp->c_decmp);
438 }
439
440 int
441 hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock)
442 {
443 if (ctx == decmpfs_ctx)
444 return 0;
445 if (!hfs_file_is_compressed(cp, skiplock))
446 return 0;
447 return decmpfs_hides_xattr(ctx, cp->c_decmp, name);
448 }
449 #endif /* HFS_COMPRESSION */
450
451 /*
452 * Open a file/directory.
453 */
454 int
455 hfs_vnop_open(struct vnop_open_args *ap)
456 {
457 struct vnode *vp = ap->a_vp;
458 struct filefork *fp;
459 struct timeval tv;
460 int error;
461 static int past_bootup = 0;
462 struct cnode *cp = VTOC(vp);
463 struct hfsmount *hfsmp = VTOHFS(vp);
464
465 #if HFS_COMPRESSION
466 if (ap->a_mode & FWRITE) {
467 /* open for write */
468 if ( hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
469 /* opening a compressed file for write, so convert it to decompressed */
470 struct vnode *data_vp = NULL;
471 error = hfs_ref_data_vp(cp, &data_vp, 1); /* 1 == don't take the cnode lock */
472 if (0 == error) {
473 if (data_vp) {
474 error = decmpfs_decompress_file(data_vp, VTOCMP(data_vp), -1, 1, 0);
475 vnode_rele(data_vp);
476 } else {
477 error = EINVAL;
478 }
479 }
480 if (error != 0)
481 return error;
482 }
483 } else {
484 /* open for read */
485 if (hfs_file_is_compressed(cp, 1) ) { /* 1 == don't take the cnode lock */
486 if (VNODE_IS_RSRC(vp)) {
487 /* opening the resource fork of a compressed file, so nothing to do */
488 } else {
489 /* opening a compressed file for read, make sure it validates */
490 error = decmpfs_validate_compressed_file(vp, VTOCMP(vp));
491 if (error != 0)
492 return error;
493 }
494 }
495 }
496 #endif
497
498 /*
499 * Files marked append-only must be opened for appending.
500 */
501 if ((cp->c_bsdflags & APPEND) && !vnode_isdir(vp) &&
502 (ap->a_mode & (FWRITE | O_APPEND)) == FWRITE)
503 return (EPERM);
504
505 if (vnode_isreg(vp) && !UBCINFOEXISTS(vp))
506 return (EBUSY); /* file is in use by the kernel */
507
508 /* Don't allow journal to be opened externally. */
509 if (hfs_is_journal_file(hfsmp, cp))
510 return (EPERM);
511
512 if ((hfsmp->hfs_flags & HFS_READ_ONLY) ||
513 (hfsmp->jnl == NULL) ||
514 #if NAMEDSTREAMS
515 !vnode_isreg(vp) || vnode_isinuse(vp, 0) || vnode_isnamedstream(vp)) {
516 #else
517 !vnode_isreg(vp) || vnode_isinuse(vp, 0)) {
518 #endif
519 return (0);
520 }
521
522 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
523 return (error);
524
525 #if QUOTA
526 /* If we're going to write to the file, initialize quotas. */
527 if ((ap->a_mode & FWRITE) && (hfsmp->hfs_flags & HFS_QUOTAS))
528 (void)hfs_getinoquota(cp);
529 #endif /* QUOTA */
530
531 /*
532 * On the first (non-busy) open of a fragmented
533 * file attempt to de-frag it (if its less than 20MB).
534 */
535 fp = VTOF(vp);
536 if (fp->ff_blocks &&
537 fp->ff_extents[7].blockCount != 0 &&
538 fp->ff_size <= (20 * 1024 * 1024)) {
539 int no_mods = 0;
540 struct timeval now;
541 /*
542 * Wait until system bootup is done (3 min).
543 * And don't relocate a file that's been modified
544 * within the past minute -- this can lead to
545 * system thrashing.
546 */
547
548 if (!past_bootup) {
549 microuptime(&tv);
550 if (tv.tv_sec > (60*3)) {
551 past_bootup = 1;
552 }
553 }
554
555 microtime(&now);
556 if ((now.tv_sec - cp->c_mtime) > 60) {
557 no_mods = 1;
558 }
559
560 if (past_bootup && no_mods) {
561 (void) hfs_relocate(vp, hfsmp->nextAllocation + 4096,
562 vfs_context_ucred(ap->a_context),
563 vfs_context_proc(ap->a_context));
564 }
565 }
566
567 hfs_unlock(cp);
568
569 return (0);
570 }
571
572
573 /*
574 * Close a file/directory.
575 */
576 int
577 hfs_vnop_close(ap)
578 struct vnop_close_args /* {
579 struct vnode *a_vp;
580 int a_fflag;
581 vfs_context_t a_context;
582 } */ *ap;
583 {
584 register struct vnode *vp = ap->a_vp;
585 register struct cnode *cp;
586 struct proc *p = vfs_context_proc(ap->a_context);
587 struct hfsmount *hfsmp;
588 int busy;
589 int tooktrunclock = 0;
590 int knownrefs = 0;
591
592 if ( hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0)
593 return (0);
594 cp = VTOC(vp);
595 hfsmp = VTOHFS(vp);
596
597 /*
598 * If the rsrc fork is a named stream, it can cause the data fork to
599 * stay around, preventing de-allocation of these blocks.
600 * Do checks for truncation on close. Purge extra extents if they exist.
601 * Make sure the vp is not a directory, and that it has a resource fork,
602 * and that resource fork is also a named stream.
603 */
604
605 if ((vp->v_type == VREG) && (cp->c_rsrc_vp)
606 && (vnode_isnamedstream(cp->c_rsrc_vp))) {
607 uint32_t blks;
608
609 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
610 /*
611 * If there are extra blocks and there are only 2 refs on
612 * this vp (ourselves + rsrc fork holding ref on us), go ahead
613 * and try to truncate.
614 */
615 if ((blks < VTOF(vp)->ff_blocks) && (!vnode_isinuse(vp, 2))) {
616 // release cnode lock; must acquire truncate lock BEFORE cnode lock
617 hfs_unlock(cp);
618
619 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
620 tooktrunclock = 1;
621
622 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
623 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
624 // bail out if we can't re-acquire cnode lock
625 return 0;
626 }
627 // now re-test to make sure it's still valid
628 if (cp->c_rsrc_vp) {
629 knownrefs = 1 + vnode_isnamedstream(cp->c_rsrc_vp);
630 if (!vnode_isinuse(vp, knownrefs)){
631 // now we can truncate the file, if necessary
632 blks = howmany(VTOF(vp)->ff_size, VTOVCB(vp)->blockSize);
633 if (blks < VTOF(vp)->ff_blocks){
634 (void) hfs_truncate(vp, VTOF(vp)->ff_size, IO_NDELAY, 0, 0, ap->a_context);
635 }
636 }
637 }
638 }
639 }
640
641
642 // if we froze the fs and we're exiting, then "thaw" the fs
643 if (hfsmp->hfs_freezing_proc == p && proc_exiting(p)) {
644 hfsmp->hfs_freezing_proc = NULL;
645 hfs_unlock_global (hfsmp);
646 lck_rw_unlock_exclusive(&hfsmp->hfs_insync);
647 }
648
649 busy = vnode_isinuse(vp, 1);
650
651 if (busy) {
652 hfs_touchtimes(VTOHFS(vp), cp);
653 }
654 if (vnode_isdir(vp)) {
655 hfs_reldirhints(cp, busy);
656 } else if (vnode_issystem(vp) && !busy) {
657 vnode_recycle(vp);
658 }
659
660 if (tooktrunclock){
661 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
662 }
663 hfs_unlock(cp);
664
665 if (ap->a_fflag & FWASWRITTEN) {
666 hfs_sync_ejectable(hfsmp);
667 }
668
669 return (0);
670 }
671
672 /*
673 * Get basic attributes.
674 */
675 int
676 hfs_vnop_getattr(struct vnop_getattr_args *ap)
677 {
678 #define VNODE_ATTR_TIMES \
679 (VNODE_ATTR_va_access_time|VNODE_ATTR_va_change_time|VNODE_ATTR_va_modify_time)
680 #define VNODE_ATTR_AUTH \
681 (VNODE_ATTR_va_mode | VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | \
682 VNODE_ATTR_va_flags | VNODE_ATTR_va_acl)
683
684 struct vnode *vp = ap->a_vp;
685 struct vnode_attr *vap = ap->a_vap;
686 struct vnode *rvp = NULLVP;
687 struct hfsmount *hfsmp;
688 struct cnode *cp;
689 uint64_t data_size;
690 enum vtype v_type;
691 int error = 0;
692 cp = VTOC(vp);
693
694 #if HFS_COMPRESSION
695 /* we need to inspect the decmpfs state of the file before we take the hfs cnode lock */
696 int compressed = 0;
697 int hide_size = 0;
698 off_t uncompressed_size = -1;
699 if (VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_alloc) || VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_size)) {
700 /* we only care about whether the file is compressed if asked for the uncompressed size */
701 if (VNODE_IS_RSRC(vp)) {
702 /* if it's a resource fork, decmpfs may want us to hide the size */
703 hide_size = hfs_hides_rsrc(ap->a_context, cp, 0);
704 } else {
705 /* if it's a data fork, we need to know if it was compressed so we can report the uncompressed size */
706 compressed = hfs_file_is_compressed(cp, 0);
707 }
708 if ((VATTR_IS_ACTIVE(vap, va_data_size) || VATTR_IS_ACTIVE(vap, va_total_size))) {
709 // if it's compressed
710 if (compressed || (!VNODE_IS_RSRC(vp) && cp->c_decmp && cp->c_decmp->cmp_type >= CMP_MAX)) {
711 if (0 != hfs_uncompressed_size_of_compressed_file(NULL, vp, 0, &uncompressed_size, 0)) {
712 /* failed to get the uncompressed size, we'll check for this later */
713 uncompressed_size = -1;
714 } else {
715 // fake that it's compressed
716 compressed = 1;
717 }
718 }
719 }
720 }
721 #endif
722
723 /*
724 * Shortcut for vnode_authorize path. Each of the attributes
725 * in this set is updated atomically so we don't need to take
726 * the cnode lock to access them.
727 */
728 if ((vap->va_active & ~VNODE_ATTR_AUTH) == 0) {
729 /* Make sure file still exists. */
730 if (cp->c_flag & C_NOEXISTS)
731 return (ENOENT);
732
733 vap->va_uid = cp->c_uid;
734 vap->va_gid = cp->c_gid;
735 vap->va_mode = cp->c_mode;
736 vap->va_flags = cp->c_bsdflags;
737 vap->va_supported |= VNODE_ATTR_AUTH & ~VNODE_ATTR_va_acl;
738
739 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
740 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
741 VATTR_SET_SUPPORTED(vap, va_acl);
742 }
743
744 return (0);
745 }
746
747 hfsmp = VTOHFS(vp);
748 v_type = vnode_vtype(vp);
749 /*
750 * If time attributes are requested and we have cnode times
751 * that require updating, then acquire an exclusive lock on
752 * the cnode before updating the times. Otherwise we can
753 * just acquire a shared lock.
754 */
755 if ((vap->va_active & VNODE_ATTR_TIMES) &&
756 (cp->c_touch_acctime || cp->c_touch_chgtime || cp->c_touch_modtime)) {
757 if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
758 return (error);
759 hfs_touchtimes(hfsmp, cp);
760 }
761 else {
762 if ((error = hfs_lock(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT)))
763 return (error);
764 }
765
766 if (v_type == VDIR) {
767 data_size = (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE;
768
769 if (VATTR_IS_ACTIVE(vap, va_nlink)) {
770 int nlink;
771
772 /*
773 * For directories, the va_nlink is esentially a count
774 * of the ".." references to a directory plus the "."
775 * reference and the directory itself. So for HFS+ this
776 * becomes the sub-directory count plus two.
777 *
778 * In the absence of a sub-directory count we use the
779 * directory's item count. This will be too high in
780 * most cases since it also includes files.
781 */
782 if ((hfsmp->hfs_flags & HFS_FOLDERCOUNT) &&
783 (cp->c_attr.ca_recflags & kHFSHasFolderCountMask))
784 nlink = cp->c_attr.ca_dircount; /* implied ".." entries */
785 else
786 nlink = cp->c_entries;
787
788 /* Account for ourself and our "." entry */
789 nlink += 2;
790 /* Hide our private directories. */
791 if (cp->c_cnid == kHFSRootFolderID) {
792 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0) {
793 --nlink;
794 }
795 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0) {
796 --nlink;
797 }
798 }
799 VATTR_RETURN(vap, va_nlink, (u_int64_t)nlink);
800 }
801 if (VATTR_IS_ACTIVE(vap, va_nchildren)) {
802 int entries;
803
804 entries = cp->c_entries;
805 /* Hide our private files and directories. */
806 if (cp->c_cnid == kHFSRootFolderID) {
807 if (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid != 0)
808 --entries;
809 if (hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid != 0)
810 --entries;
811 if (hfsmp->jnl || ((hfsmp->vcbAtrb & kHFSVolumeJournaledMask) && (hfsmp->hfs_flags & HFS_READ_ONLY)))
812 entries -= 2; /* hide the journal files */
813 }
814 VATTR_RETURN(vap, va_nchildren, entries);
815 }
816 /*
817 * The va_dirlinkcount is the count of real directory hard links.
818 * (i.e. its not the sum of the implied "." and ".." references)
819 */
820 if (VATTR_IS_ACTIVE(vap, va_dirlinkcount)) {
821 VATTR_RETURN(vap, va_dirlinkcount, (uint32_t)cp->c_linkcount);
822 }
823 } else /* !VDIR */ {
824 data_size = VCTOF(vp, cp)->ff_size;
825
826 VATTR_RETURN(vap, va_nlink, (u_int64_t)cp->c_linkcount);
827 if (VATTR_IS_ACTIVE(vap, va_data_alloc)) {
828 u_int64_t blocks;
829
830 #if HFS_COMPRESSION
831 if (hide_size) {
832 VATTR_RETURN(vap, va_data_alloc, 0);
833 } else if (compressed) {
834 /* for compressed files, we report all allocated blocks as belonging to the data fork */
835 blocks = cp->c_blocks;
836 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
837 }
838 else
839 #endif
840 {
841 blocks = VCTOF(vp, cp)->ff_blocks;
842 VATTR_RETURN(vap, va_data_alloc, blocks * (u_int64_t)hfsmp->blockSize);
843 }
844 }
845 }
846
847 /* conditional because 64-bit arithmetic can be expensive */
848 if (VATTR_IS_ACTIVE(vap, va_total_size)) {
849 if (v_type == VDIR) {
850 VATTR_RETURN(vap, va_total_size, (cp->c_entries + 2) * AVERAGE_HFSDIRENTRY_SIZE);
851 } else {
852 u_int64_t total_size = ~0ULL;
853 struct cnode *rcp;
854 #if HFS_COMPRESSION
855 if (hide_size) {
856 /* we're hiding the size of this file, so just return 0 */
857 total_size = 0;
858 } else if (compressed) {
859 if (uncompressed_size == -1) {
860 /*
861 * We failed to get the uncompressed size above,
862 * so we'll fall back to the standard path below
863 * since total_size is still -1
864 */
865 } else {
866 /* use the uncompressed size we fetched above */
867 total_size = uncompressed_size;
868 }
869 }
870 #endif
871 if (total_size == ~0ULL) {
872 if (cp->c_datafork) {
873 total_size = cp->c_datafork->ff_size;
874 }
875
876 if (cp->c_blocks - VTOF(vp)->ff_blocks) {
877 /* We deal with rsrc fork vnode iocount at the end of the function */
878 error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE, FALSE);
879 if (error) {
880 /*
881 * Note that we call hfs_vgetrsrc with error_on_unlinked
882 * set to FALSE. This is because we may be invoked via
883 * fstat() on an open-unlinked file descriptor and we must
884 * continue to support access to the rsrc fork until it disappears.
885 * The code at the end of this function will be
886 * responsible for releasing the iocount generated by
887 * hfs_vgetrsrc. This is because we can't drop the iocount
888 * without unlocking the cnode first.
889 */
890 goto out;
891 }
892
893 rcp = VTOC(rvp);
894 if (rcp && rcp->c_rsrcfork) {
895 total_size += rcp->c_rsrcfork->ff_size;
896 }
897 }
898 }
899
900 VATTR_RETURN(vap, va_total_size, total_size);
901 }
902 }
903 if (VATTR_IS_ACTIVE(vap, va_total_alloc)) {
904 if (v_type == VDIR) {
905 VATTR_RETURN(vap, va_total_alloc, 0);
906 } else {
907 VATTR_RETURN(vap, va_total_alloc, (u_int64_t)cp->c_blocks * (u_int64_t)hfsmp->blockSize);
908 }
909 }
910
911 /*
912 * If the VFS wants extended security data, and we know that we
913 * don't have any (because it never told us it was setting any)
914 * then we can return the supported bit and no data. If we do
915 * have extended security, we can just leave the bit alone and
916 * the VFS will use the fallback path to fetch it.
917 */
918 if (VATTR_IS_ACTIVE(vap, va_acl)) {
919 if ((cp->c_attr.ca_recflags & kHFSHasSecurityMask) == 0) {
920 vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
921 VATTR_SET_SUPPORTED(vap, va_acl);
922 }
923 }
924 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
925 /* Access times are lazily updated, get current time if needed */
926 if (cp->c_touch_acctime) {
927 struct timeval tv;
928
929 microtime(&tv);
930 vap->va_access_time.tv_sec = tv.tv_sec;
931 } else {
932 vap->va_access_time.tv_sec = cp->c_atime;
933 }
934 vap->va_access_time.tv_nsec = 0;
935 VATTR_SET_SUPPORTED(vap, va_access_time);
936 }
937 vap->va_create_time.tv_sec = cp->c_itime;
938 vap->va_create_time.tv_nsec = 0;
939 vap->va_modify_time.tv_sec = cp->c_mtime;
940 vap->va_modify_time.tv_nsec = 0;
941 vap->va_change_time.tv_sec = cp->c_ctime;
942 vap->va_change_time.tv_nsec = 0;
943 vap->va_backup_time.tv_sec = cp->c_btime;
944 vap->va_backup_time.tv_nsec = 0;
945
946 /* See if we need to emit the date added field to the user */
947 if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
948 u_int32_t dateadded = hfs_get_dateadded (cp);
949 if (dateadded) {
950 vap->va_addedtime.tv_sec = dateadded;
951 vap->va_addedtime.tv_nsec = 0;
952 VATTR_SET_SUPPORTED (vap, va_addedtime);
953 }
954 }
955
956 /* XXX is this really a good 'optimal I/O size'? */
957 vap->va_iosize = hfsmp->hfs_logBlockSize;
958 vap->va_uid = cp->c_uid;
959 vap->va_gid = cp->c_gid;
960 vap->va_mode = cp->c_mode;
961 vap->va_flags = cp->c_bsdflags;
962
963 /*
964 * Exporting file IDs from HFS Plus:
965 *
966 * For "normal" files the c_fileid is the same value as the
967 * c_cnid. But for hard link files, they are different - the
968 * c_cnid belongs to the active directory entry (ie the link)
969 * and the c_fileid is for the actual inode (ie the data file).
970 *
971 * The stat call (getattr) uses va_fileid and the Carbon APIs,
972 * which are hardlink-ignorant, will ask for va_linkid.
973 */
974 vap->va_fileid = (u_int64_t)cp->c_fileid;
975 /*
976 * We need to use the origin cache for both hardlinked files
977 * and directories. Hardlinked directories have multiple cnids
978 * and parents (one per link). Hardlinked files also have their
979 * own parents and link IDs separate from the indirect inode number.
980 * If we don't use the cache, we could end up vending the wrong ID
981 * because the cnode will only reflect the link that was looked up most recently.
982 */
983 if (cp->c_flag & C_HARDLINK) {
984 vap->va_linkid = (u_int64_t)hfs_currentcnid(cp);
985 vap->va_parentid = (u_int64_t)hfs_currentparent(cp);
986 } else {
987 vap->va_linkid = (u_int64_t)cp->c_cnid;
988 vap->va_parentid = (u_int64_t)cp->c_parentcnid;
989 }
990 vap->va_fsid = hfsmp->hfs_raw_dev;
991 vap->va_filerev = 0;
992 vap->va_encoding = cp->c_encoding;
993 vap->va_rdev = (v_type == VBLK || v_type == VCHR) ? cp->c_rdev : 0;
994 #if HFS_COMPRESSION
995 if (VATTR_IS_ACTIVE(vap, va_data_size)) {
996 if (hide_size)
997 vap->va_data_size = 0;
998 else if (compressed) {
999 if (uncompressed_size == -1) {
1000 /* failed to get the uncompressed size above, so just return data_size */
1001 vap->va_data_size = data_size;
1002 } else {
1003 /* use the uncompressed size we fetched above */
1004 vap->va_data_size = uncompressed_size;
1005 }
1006 } else
1007 vap->va_data_size = data_size;
1008 // vap->va_supported |= VNODE_ATTR_va_data_size;
1009 VATTR_SET_SUPPORTED(vap, va_data_size);
1010 }
1011 #else
1012 vap->va_data_size = data_size;
1013 vap->va_supported |= VNODE_ATTR_va_data_size;
1014 #endif
1015
1016 /* Mark them all at once instead of individual VATTR_SET_SUPPORTED calls. */
1017 vap->va_supported |= VNODE_ATTR_va_create_time | VNODE_ATTR_va_modify_time |
1018 VNODE_ATTR_va_change_time| VNODE_ATTR_va_backup_time |
1019 VNODE_ATTR_va_iosize | VNODE_ATTR_va_uid |
1020 VNODE_ATTR_va_gid | VNODE_ATTR_va_mode |
1021 VNODE_ATTR_va_flags |VNODE_ATTR_va_fileid |
1022 VNODE_ATTR_va_linkid | VNODE_ATTR_va_parentid |
1023 VNODE_ATTR_va_fsid | VNODE_ATTR_va_filerev |
1024 VNODE_ATTR_va_encoding | VNODE_ATTR_va_rdev;
1025
1026 /* If this is the root, let VFS to find out the mount name, which
1027 * may be different from the real name. Otherwise, we need to take care
1028 * for hardlinked files, which need to be looked up, if necessary
1029 */
1030 if (VATTR_IS_ACTIVE(vap, va_name) && (cp->c_cnid != kHFSRootFolderID)) {
1031 struct cat_desc linkdesc;
1032 int lockflags;
1033 int uselinkdesc = 0;
1034 cnid_t nextlinkid = 0;
1035 cnid_t prevlinkid = 0;
1036
1037 /* Get the name for ATTR_CMN_NAME. We need to take special care for hardlinks
1038 * here because the info. for the link ID requested by getattrlist may be
1039 * different than what's currently in the cnode. This is because the cnode
1040 * will be filled in with the information for the most recent link ID that went
1041 * through namei/lookup(). If there are competing lookups for hardlinks that point
1042 * to the same inode, one (or more) getattrlists could be vended incorrect name information.
1043 * Also, we need to beware of open-unlinked files which could have a namelen of 0.
1044 */
1045
1046 if ((cp->c_flag & C_HARDLINK) &&
1047 ((cp->c_desc.cd_namelen == 0) || (vap->va_linkid != cp->c_cnid))) {
1048 /*
1049 * If we have no name and our link ID is the raw inode number, then we may
1050 * have an open-unlinked file. Go to the next link in this case.
1051 */
1052 if ((cp->c_desc.cd_namelen == 0) && (vap->va_linkid == cp->c_fileid)) {
1053 if ((error = hfs_lookup_siblinglinks(hfsmp, vap->va_linkid, &prevlinkid, &nextlinkid))){
1054 goto out;
1055 }
1056 }
1057 else {
1058 /* just use link obtained from vap above */
1059 nextlinkid = vap->va_linkid;
1060 }
1061
1062 /* We need to probe the catalog for the descriptor corresponding to the link ID
1063 * stored in nextlinkid. Note that we don't know if we have the exclusive lock
1064 * for the cnode here, so we can't just update the descriptor. Instead,
1065 * we should just store the descriptor's value locally and then use it to pass
1066 * out the name value as needed below.
1067 */
1068 if (nextlinkid){
1069 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
1070 error = cat_findname(hfsmp, nextlinkid, &linkdesc);
1071 hfs_systemfile_unlock(hfsmp, lockflags);
1072 if (error == 0) {
1073 uselinkdesc = 1;
1074 }
1075 }
1076 }
1077
1078 /* By this point, we've either patched up the name above and the c_desc
1079 * points to the correct data, or it already did, in which case we just proceed
1080 * by copying the name into the vap. Note that we will never set va_name to
1081 * supported if nextlinkid is never initialized. This could happen in the degenerate
1082 * case above involving the raw inode number, where it has no nextlinkid. In this case
1083 * we will simply not mark the name bit as supported.
1084 */
1085 if (uselinkdesc) {
1086 strlcpy(vap->va_name, (const char*) linkdesc.cd_nameptr, MAXPATHLEN);
1087 VATTR_SET_SUPPORTED(vap, va_name);
1088 cat_releasedesc(&linkdesc);
1089 }
1090 else if (cp->c_desc.cd_namelen) {
1091 strlcpy(vap->va_name, (const char*) cp->c_desc.cd_nameptr, MAXPATHLEN);
1092 VATTR_SET_SUPPORTED(vap, va_name);
1093 }
1094 }
1095
1096 out:
1097 hfs_unlock(cp);
1098 /*
1099 * We need to vnode_put the rsrc fork vnode only *after* we've released
1100 * the cnode lock, since vnode_put can trigger an inactive call, which
1101 * will go back into HFS and try to acquire a cnode lock.
1102 */
1103 if (rvp) {
1104 vnode_put (rvp);
1105 }
1106
1107 return (error);
1108 }
1109
1110 int
1111 hfs_vnop_setattr(ap)
1112 struct vnop_setattr_args /* {
1113 struct vnode *a_vp;
1114 struct vnode_attr *a_vap;
1115 vfs_context_t a_context;
1116 } */ *ap;
1117 {
1118 struct vnode_attr *vap = ap->a_vap;
1119 struct vnode *vp = ap->a_vp;
1120 struct cnode *cp = NULL;
1121 struct hfsmount *hfsmp;
1122 kauth_cred_t cred = vfs_context_ucred(ap->a_context);
1123 struct proc *p = vfs_context_proc(ap->a_context);
1124 int error = 0;
1125 uid_t nuid;
1126 gid_t ngid;
1127 time_t orig_ctime;
1128
1129 orig_ctime = VTOC(vp)->c_ctime;
1130
1131 #if HFS_COMPRESSION
1132 int decmpfs_reset_state = 0;
1133 /*
1134 we call decmpfs_update_attributes even if the file is not compressed
1135 because we want to update the incoming flags if the xattrs are invalid
1136 */
1137 error = decmpfs_update_attributes(vp, vap);
1138 if (error)
1139 return error;
1140 #endif
1141 //
1142 // if this is not a size-changing setattr and it is not just
1143 // an atime update, then check for a snapshot.
1144 //
1145 if (!VATTR_IS_ACTIVE(vap, va_data_size) && !(vap->va_active == VNODE_ATTR_va_access_time)) {
1146 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_METADATA_MOD, NSPACE_REARM_NO_ARG);
1147 }
1148
1149 #if CONFIG_PROTECT
1150 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
1151 return (error);
1152 }
1153 #endif /* CONFIG_PROTECT */
1154
1155 hfsmp = VTOHFS(vp);
1156
1157 /* Don't allow modification of the journal. */
1158 if (hfs_is_journal_file(hfsmp, VTOC(vp))) {
1159 return (EPERM);
1160 }
1161
1162 /*
1163 * File size change request.
1164 * We are guaranteed that this is not a directory, and that
1165 * the filesystem object is writeable.
1166 *
1167 * NOTE: HFS COMPRESSION depends on the data_size being set *before* the bsd flags are updated
1168 */
1169 VATTR_SET_SUPPORTED(vap, va_data_size);
1170 if (VATTR_IS_ACTIVE(vap, va_data_size) && !vnode_islnk(vp)) {
1171 #if HFS_COMPRESSION
1172 /* keep the compressed state locked until we're done truncating the file */
1173 decmpfs_cnode *dp = VTOCMP(vp);
1174 if (!dp) {
1175 /*
1176 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1177 * is filled in; we need a decmpfs_cnode to lock out decmpfs state changes
1178 * on this file while it's truncating
1179 */
1180 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1181 if (!dp) {
1182 /* failed to allocate a decmpfs_cnode */
1183 return ENOMEM; /* what should this be? */
1184 }
1185 }
1186
1187 check_for_tracked_file(vp, orig_ctime, vap->va_data_size == 0 ? NAMESPACE_HANDLER_TRUNCATE_OP|NAMESPACE_HANDLER_DELETE_OP : NAMESPACE_HANDLER_TRUNCATE_OP, NULL);
1188
1189 decmpfs_lock_compressed_data(dp, 1);
1190 if (hfs_file_is_compressed(VTOC(vp), 1)) {
1191 error = decmpfs_decompress_file(vp, dp, -1/*vap->va_data_size*/, 0, 1);
1192 if (error != 0) {
1193 decmpfs_unlock_compressed_data(dp, 1);
1194 return error;
1195 }
1196 }
1197 #endif
1198
1199 /* Take truncate lock before taking cnode lock. */
1200 hfs_lock_truncate(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
1201
1202 /* Perform the ubc_setsize before taking the cnode lock. */
1203 ubc_setsize(vp, vap->va_data_size);
1204
1205 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1206 hfs_unlock_truncate(VTOC(vp), HFS_LOCK_DEFAULT);
1207 #if HFS_COMPRESSION
1208 decmpfs_unlock_compressed_data(dp, 1);
1209 #endif
1210 return (error);
1211 }
1212 cp = VTOC(vp);
1213
1214 error = hfs_truncate(vp, vap->va_data_size, vap->va_vaflags & 0xffff, 1, 0, ap->a_context);
1215
1216 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
1217 #if HFS_COMPRESSION
1218 decmpfs_unlock_compressed_data(dp, 1);
1219 #endif
1220 if (error)
1221 goto out;
1222 }
1223 if (cp == NULL) {
1224 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
1225 return (error);
1226 cp = VTOC(vp);
1227 }
1228
1229 /*
1230 * If it is just an access time update request by itself
1231 * we know the request is from kernel level code, and we
1232 * can delay it without being as worried about consistency.
1233 * This change speeds up mmaps, in the rare case that they
1234 * get caught behind a sync.
1235 */
1236
1237 if (vap->va_active == VNODE_ATTR_va_access_time) {
1238 cp->c_touch_acctime=TRUE;
1239 goto out;
1240 }
1241
1242
1243
1244 /*
1245 * Owner/group change request.
1246 * We are guaranteed that the new owner/group is valid and legal.
1247 */
1248 VATTR_SET_SUPPORTED(vap, va_uid);
1249 VATTR_SET_SUPPORTED(vap, va_gid);
1250 nuid = VATTR_IS_ACTIVE(vap, va_uid) ? vap->va_uid : (uid_t)VNOVAL;
1251 ngid = VATTR_IS_ACTIVE(vap, va_gid) ? vap->va_gid : (gid_t)VNOVAL;
1252 if (((nuid != (uid_t)VNOVAL) || (ngid != (gid_t)VNOVAL)) &&
1253 ((error = hfs_chown(vp, nuid, ngid, cred, p)) != 0))
1254 goto out;
1255
1256 /*
1257 * Mode change request.
1258 * We are guaranteed that the mode value is valid and that in
1259 * conjunction with the owner and group, this change is legal.
1260 */
1261 VATTR_SET_SUPPORTED(vap, va_mode);
1262 if (VATTR_IS_ACTIVE(vap, va_mode) &&
1263 ((error = hfs_chmod(vp, (int)vap->va_mode, cred, p)) != 0))
1264 goto out;
1265
1266 /*
1267 * File flags change.
1268 * We are guaranteed that only flags allowed to change given the
1269 * current securelevel are being changed.
1270 */
1271 VATTR_SET_SUPPORTED(vap, va_flags);
1272 if (VATTR_IS_ACTIVE(vap, va_flags)) {
1273 u_int16_t *fdFlags;
1274
1275 #if HFS_COMPRESSION
1276 if ((cp->c_bsdflags ^ vap->va_flags) & UF_COMPRESSED) {
1277 /*
1278 * the UF_COMPRESSED was toggled, so reset our cached compressed state
1279 * but we don't want to actually do the update until we've released the cnode lock down below
1280 * NOTE: turning the flag off doesn't actually decompress the file, so that we can
1281 * turn off the flag and look at the "raw" file for debugging purposes
1282 */
1283 decmpfs_reset_state = 1;
1284 }
1285 #endif
1286
1287 cp->c_bsdflags = vap->va_flags;
1288 cp->c_touch_chgtime = TRUE;
1289
1290 /*
1291 * Mirror the UF_HIDDEN flag to the invisible bit of the Finder Info.
1292 *
1293 * The fdFlags for files and frFlags for folders are both 8 bytes
1294 * into the userInfo (the first 16 bytes of the Finder Info). They
1295 * are both 16-bit fields.
1296 */
1297 fdFlags = (u_int16_t *) &cp->c_finderinfo[8];
1298 if (vap->va_flags & UF_HIDDEN)
1299 *fdFlags |= OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1300 else
1301 *fdFlags &= ~OSSwapHostToBigConstInt16(kFinderInvisibleMask);
1302 }
1303
1304 /*
1305 * Timestamp updates.
1306 */
1307 VATTR_SET_SUPPORTED(vap, va_create_time);
1308 VATTR_SET_SUPPORTED(vap, va_access_time);
1309 VATTR_SET_SUPPORTED(vap, va_modify_time);
1310 VATTR_SET_SUPPORTED(vap, va_backup_time);
1311 VATTR_SET_SUPPORTED(vap, va_change_time);
1312 if (VATTR_IS_ACTIVE(vap, va_create_time) ||
1313 VATTR_IS_ACTIVE(vap, va_access_time) ||
1314 VATTR_IS_ACTIVE(vap, va_modify_time) ||
1315 VATTR_IS_ACTIVE(vap, va_backup_time)) {
1316 if (VATTR_IS_ACTIVE(vap, va_create_time))
1317 cp->c_itime = vap->va_create_time.tv_sec;
1318 if (VATTR_IS_ACTIVE(vap, va_access_time)) {
1319 cp->c_atime = vap->va_access_time.tv_sec;
1320 cp->c_touch_acctime = FALSE;
1321 }
1322 if (VATTR_IS_ACTIVE(vap, va_modify_time)) {
1323 cp->c_mtime = vap->va_modify_time.tv_sec;
1324 cp->c_touch_modtime = FALSE;
1325 cp->c_touch_chgtime = TRUE;
1326
1327 /*
1328 * The utimes system call can reset the modification
1329 * time but it doesn't know about HFS create times.
1330 * So we need to ensure that the creation time is
1331 * always at least as old as the modification time.
1332 */
1333 if ((VTOVCB(vp)->vcbSigWord == kHFSPlusSigWord) &&
1334 (cp->c_cnid != kHFSRootFolderID) &&
1335 (cp->c_mtime < cp->c_itime)) {
1336 cp->c_itime = cp->c_mtime;
1337 }
1338 }
1339 if (VATTR_IS_ACTIVE(vap, va_backup_time))
1340 cp->c_btime = vap->va_backup_time.tv_sec;
1341 cp->c_flag |= C_MODIFIED;
1342 }
1343
1344 /*
1345 * Set name encoding.
1346 */
1347 VATTR_SET_SUPPORTED(vap, va_encoding);
1348 if (VATTR_IS_ACTIVE(vap, va_encoding)) {
1349 cp->c_encoding = vap->va_encoding;
1350 hfs_setencodingbits(hfsmp, cp->c_encoding);
1351 }
1352
1353 if ((error = hfs_update(vp, TRUE)) != 0)
1354 goto out;
1355 out:
1356 if (cp) {
1357 /* Purge origin cache for cnode, since caller now has correct link ID for it
1358 * We purge it here since it was acquired for us during lookup, and we no longer need it.
1359 */
1360 if ((cp->c_flag & C_HARDLINK) && (vp->v_type != VDIR)){
1361 hfs_relorigin(cp, 0);
1362 }
1363
1364 hfs_unlock(cp);
1365 #if HFS_COMPRESSION
1366 if (decmpfs_reset_state) {
1367 /*
1368 * we've changed the UF_COMPRESSED flag, so reset the decmpfs state for this cnode
1369 * but don't do it while holding the hfs cnode lock
1370 */
1371 decmpfs_cnode *dp = VTOCMP(vp);
1372 if (!dp) {
1373 /*
1374 * call hfs_lazy_init_decmpfs_cnode() to make sure that the decmpfs_cnode
1375 * is filled in; we need a decmpfs_cnode to prevent decmpfs state changes
1376 * on this file if it's locked
1377 */
1378 dp = hfs_lazy_init_decmpfs_cnode(VTOC(vp));
1379 if (!dp) {
1380 /* failed to allocate a decmpfs_cnode */
1381 return ENOMEM; /* what should this be? */
1382 }
1383 }
1384 decmpfs_cnode_set_vnode_state(dp, FILE_TYPE_UNKNOWN, 0);
1385 }
1386 #endif
1387 }
1388 return (error);
1389 }
1390
1391
1392 /*
1393 * Change the mode on a file.
1394 * cnode must be locked before calling.
1395 */
1396 int
1397 hfs_chmod(struct vnode *vp, int mode, __unused kauth_cred_t cred, __unused struct proc *p)
1398 {
1399 register struct cnode *cp = VTOC(vp);
1400
1401 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1402 return (0);
1403
1404 // Don't allow modification of the journal or journal_info_block
1405 if (hfs_is_journal_file(VTOHFS(vp), cp)) {
1406 return EPERM;
1407 }
1408
1409 #if OVERRIDE_UNKNOWN_PERMISSIONS
1410 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS) {
1411 return (0);
1412 };
1413 #endif
1414 cp->c_mode &= ~ALLPERMS;
1415 cp->c_mode |= (mode & ALLPERMS);
1416 cp->c_touch_chgtime = TRUE;
1417 return (0);
1418 }
1419
1420
1421 int
1422 hfs_write_access(struct vnode *vp, kauth_cred_t cred, struct proc *p, Boolean considerFlags)
1423 {
1424 struct cnode *cp = VTOC(vp);
1425 int retval = 0;
1426 int is_member;
1427
1428 /*
1429 * Disallow write attempts on read-only file systems;
1430 * unless the file is a socket, fifo, or a block or
1431 * character device resident on the file system.
1432 */
1433 switch (vnode_vtype(vp)) {
1434 case VDIR:
1435 case VLNK:
1436 case VREG:
1437 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY)
1438 return (EROFS);
1439 break;
1440 default:
1441 break;
1442 }
1443
1444 /* If immutable bit set, nobody gets to write it. */
1445 if (considerFlags && (cp->c_bsdflags & IMMUTABLE))
1446 return (EPERM);
1447
1448 /* Otherwise, user id 0 always gets access. */
1449 if (!suser(cred, NULL))
1450 return (0);
1451
1452 /* Otherwise, check the owner. */
1453 if ((retval = hfs_owner_rights(VTOHFS(vp), cp->c_uid, cred, p, false)) == 0)
1454 return ((cp->c_mode & S_IWUSR) == S_IWUSR ? 0 : EACCES);
1455
1456 /* Otherwise, check the groups. */
1457 if (kauth_cred_ismember_gid(cred, cp->c_gid, &is_member) == 0 && is_member) {
1458 return ((cp->c_mode & S_IWGRP) == S_IWGRP ? 0 : EACCES);
1459 }
1460
1461 /* Otherwise, check everyone else. */
1462 return ((cp->c_mode & S_IWOTH) == S_IWOTH ? 0 : EACCES);
1463 }
1464
1465
1466 /*
1467 * Perform chown operation on cnode cp;
1468 * code must be locked prior to call.
1469 */
1470 int
1471 #if !QUOTA
1472 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, __unused kauth_cred_t cred,
1473 __unused struct proc *p)
1474 #else
1475 hfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred,
1476 __unused struct proc *p)
1477 #endif
1478 {
1479 register struct cnode *cp = VTOC(vp);
1480 uid_t ouid;
1481 gid_t ogid;
1482 #if QUOTA
1483 int error = 0;
1484 register int i;
1485 int64_t change;
1486 #endif /* QUOTA */
1487
1488 if (VTOVCB(vp)->vcbSigWord != kHFSPlusSigWord)
1489 return (ENOTSUP);
1490
1491 if (((unsigned int)vfs_flags(VTOVFS(vp))) & MNT_UNKNOWNPERMISSIONS)
1492 return (0);
1493
1494 if (uid == (uid_t)VNOVAL)
1495 uid = cp->c_uid;
1496 if (gid == (gid_t)VNOVAL)
1497 gid = cp->c_gid;
1498
1499 #if 0 /* we are guaranteed that this is already the case */
1500 /*
1501 * If we don't own the file, are trying to change the owner
1502 * of the file, or are not a member of the target group,
1503 * the caller must be superuser or the call fails.
1504 */
1505 if ((kauth_cred_getuid(cred) != cp->c_uid || uid != cp->c_uid ||
1506 (gid != cp->c_gid &&
1507 (kauth_cred_ismember_gid(cred, gid, &is_member) || !is_member))) &&
1508 (error = suser(cred, 0)))
1509 return (error);
1510 #endif
1511
1512 ogid = cp->c_gid;
1513 ouid = cp->c_uid;
1514 #if QUOTA
1515 if ((error = hfs_getinoquota(cp)))
1516 return (error);
1517 if (ouid == uid) {
1518 dqrele(cp->c_dquot[USRQUOTA]);
1519 cp->c_dquot[USRQUOTA] = NODQUOT;
1520 }
1521 if (ogid == gid) {
1522 dqrele(cp->c_dquot[GRPQUOTA]);
1523 cp->c_dquot[GRPQUOTA] = NODQUOT;
1524 }
1525
1526 /*
1527 * Eventually need to account for (fake) a block per directory
1528 * if (vnode_isdir(vp))
1529 * change = VTOHFS(vp)->blockSize;
1530 * else
1531 */
1532
1533 change = (int64_t)(cp->c_blocks) * (int64_t)VTOVCB(vp)->blockSize;
1534 (void) hfs_chkdq(cp, -change, cred, CHOWN);
1535 (void) hfs_chkiq(cp, -1, cred, CHOWN);
1536 for (i = 0; i < MAXQUOTAS; i++) {
1537 dqrele(cp->c_dquot[i]);
1538 cp->c_dquot[i] = NODQUOT;
1539 }
1540 #endif /* QUOTA */
1541 cp->c_gid = gid;
1542 cp->c_uid = uid;
1543 #if QUOTA
1544 if ((error = hfs_getinoquota(cp)) == 0) {
1545 if (ouid == uid) {
1546 dqrele(cp->c_dquot[USRQUOTA]);
1547 cp->c_dquot[USRQUOTA] = NODQUOT;
1548 }
1549 if (ogid == gid) {
1550 dqrele(cp->c_dquot[GRPQUOTA]);
1551 cp->c_dquot[GRPQUOTA] = NODQUOT;
1552 }
1553 if ((error = hfs_chkdq(cp, change, cred, CHOWN)) == 0) {
1554 if ((error = hfs_chkiq(cp, 1, cred, CHOWN)) == 0)
1555 goto good;
1556 else
1557 (void) hfs_chkdq(cp, -change, cred, CHOWN|FORCE);
1558 }
1559 for (i = 0; i < MAXQUOTAS; i++) {
1560 dqrele(cp->c_dquot[i]);
1561 cp->c_dquot[i] = NODQUOT;
1562 }
1563 }
1564 cp->c_gid = ogid;
1565 cp->c_uid = ouid;
1566 if (hfs_getinoquota(cp) == 0) {
1567 if (ouid == uid) {
1568 dqrele(cp->c_dquot[USRQUOTA]);
1569 cp->c_dquot[USRQUOTA] = NODQUOT;
1570 }
1571 if (ogid == gid) {
1572 dqrele(cp->c_dquot[GRPQUOTA]);
1573 cp->c_dquot[GRPQUOTA] = NODQUOT;
1574 }
1575 (void) hfs_chkdq(cp, change, cred, FORCE|CHOWN);
1576 (void) hfs_chkiq(cp, 1, cred, FORCE|CHOWN);
1577 (void) hfs_getinoquota(cp);
1578 }
1579 return (error);
1580 good:
1581 if (hfs_getinoquota(cp))
1582 panic("hfs_chown: lost quota");
1583 #endif /* QUOTA */
1584
1585
1586 /*
1587 According to the SUSv3 Standard, chown() shall mark
1588 for update the st_ctime field of the file.
1589 (No exceptions mentioned)
1590 */
1591 cp->c_touch_chgtime = TRUE;
1592 return (0);
1593 }
1594
1595
1596 /*
1597 * hfs_vnop_exchange:
1598 *
1599 * Inputs:
1600 * 'from' vnode/cnode
1601 * 'to' vnode/cnode
1602 * options flag bits
1603 * vfs_context
1604 *
1605 * Discussion:
1606 * hfs_vnop_exchange is used to service the exchangedata(2) system call.
1607 * Per the requirements of that system call, this function "swaps" some
1608 * of the information that lives in one catalog record for some that
1609 * lives in another. Note that not everything is swapped; in particular,
1610 * the extent information stored in each cnode is kept local to that
1611 * cnode. This allows existing file descriptor references to continue
1612 * to operate on the same content, regardless of the location in the
1613 * namespace that the file may have moved to. See inline comments
1614 * in the function for more information.
1615 */
1616 int
1617 hfs_vnop_exchange(ap)
1618 struct vnop_exchange_args /* {
1619 struct vnode *a_fvp;
1620 struct vnode *a_tvp;
1621 int a_options;
1622 vfs_context_t a_context;
1623 } */ *ap;
1624 {
1625 struct vnode *from_vp = ap->a_fvp;
1626 struct vnode *to_vp = ap->a_tvp;
1627 struct cnode *from_cp;
1628 struct cnode *to_cp;
1629 struct hfsmount *hfsmp;
1630 struct cat_desc tempdesc;
1631 struct cat_attr tempattr;
1632 const unsigned char *from_nameptr;
1633 const unsigned char *to_nameptr;
1634 char from_iname[32];
1635 char to_iname[32];
1636 uint32_t to_flag_special;
1637 uint32_t from_flag_special;
1638 cnid_t from_parid;
1639 cnid_t to_parid;
1640 int lockflags;
1641 int error = 0, started_tr = 0, got_cookie = 0;
1642 cat_cookie_t cookie;
1643 time_t orig_from_ctime, orig_to_ctime;
1644
1645 /*
1646 * VFS does the following checks:
1647 * 1. Validate that both are files.
1648 * 2. Validate that both are on the same mount.
1649 * 3. Validate that they're not the same vnode.
1650 */
1651
1652 orig_from_ctime = VTOC(from_vp)->c_ctime;
1653 orig_to_ctime = VTOC(to_vp)->c_ctime;
1654
1655
1656 #if CONFIG_PROTECT
1657 /*
1658 * Do not allow exchangedata/F_MOVEDATAEXTENTS on data-protected filesystems
1659 * because the EAs will not be swapped. As a result, the persistent keys would not
1660 * match and the files will be garbage.
1661 */
1662 if (cp_fs_protected (vnode_mount(from_vp))) {
1663 return EINVAL;
1664 }
1665 #endif
1666
1667 #if HFS_COMPRESSION
1668 if ( hfs_file_is_compressed(VTOC(from_vp), 0) ) {
1669 if ( 0 != ( error = decmpfs_decompress_file(from_vp, VTOCMP(from_vp), -1, 0, 1) ) ) {
1670 return error;
1671 }
1672 }
1673
1674 if ( hfs_file_is_compressed(VTOC(to_vp), 0) ) {
1675 if ( 0 != ( error = decmpfs_decompress_file(to_vp, VTOCMP(to_vp), -1, 0, 1) ) ) {
1676 return error;
1677 }
1678 }
1679 #endif // HFS_COMPRESSION
1680
1681 /*
1682 * Normally, we want to notify the user handlers about the event,
1683 * except if it's a handler driving the event.
1684 */
1685 if ((ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) == 0) {
1686 check_for_tracked_file(from_vp, orig_from_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1687 check_for_tracked_file(to_vp, orig_to_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
1688 } else {
1689 /*
1690 * We're doing a data-swap.
1691 * Take the truncate lock/cnode lock, then verify there are no mmap references.
1692 * Issue a hfs_filedone to flush out all of the remaining state for this file.
1693 * Allow the rest of the codeflow to re-acquire the cnode locks in order.
1694 */
1695
1696 hfs_lock_truncate (VTOC(from_vp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
1697
1698 if ((error = hfs_lock(VTOC(from_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
1699 hfs_unlock_truncate (VTOC(from_vp), HFS_LOCK_DEFAULT);
1700 return error;
1701 }
1702
1703 /* Verify the source file is not in use by anyone besides us (including mmap refs) */
1704 if (vnode_isinuse(from_vp, 1)) {
1705 error = EBUSY;
1706 hfs_unlock(VTOC(from_vp));
1707 hfs_unlock_truncate (VTOC(from_vp), HFS_LOCK_DEFAULT);
1708 return error;
1709 }
1710
1711 /* Flush out the data in the source file */
1712 VTOC(from_vp)->c_flag |= C_SWAPINPROGRESS;
1713 error = hfs_filedone (from_vp, ap->a_context);
1714 VTOC(from_vp)->c_flag &= ~C_SWAPINPROGRESS;
1715 hfs_unlock(VTOC(from_vp));
1716 hfs_unlock_truncate(VTOC(from_vp), HFS_LOCK_DEFAULT);
1717
1718 if (error) {
1719 return error;
1720 }
1721 }
1722
1723 if ((error = hfs_lockpair(VTOC(from_vp), VTOC(to_vp), HFS_EXCLUSIVE_LOCK)))
1724 return (error);
1725
1726 from_cp = VTOC(from_vp);
1727 to_cp = VTOC(to_vp);
1728 hfsmp = VTOHFS(from_vp);
1729
1730 /* Resource forks cannot be exchanged. */
1731 if ( VNODE_IS_RSRC(from_vp) || VNODE_IS_RSRC(to_vp)) {
1732 error = EINVAL;
1733 goto exit;
1734 }
1735
1736 // Don't allow modification of the journal or journal_info_block
1737 if (hfs_is_journal_file(hfsmp, from_cp) ||
1738 hfs_is_journal_file(hfsmp, to_cp)) {
1739 error = EPERM;
1740 goto exit;
1741 }
1742
1743 /*
1744 * Ok, now that all of the pre-flighting is done, call the underlying
1745 * function if needed.
1746 */
1747 if (ap->a_options & FSOPT_EXCHANGE_DATA_ONLY) {
1748 error = hfs_movedata(from_vp, to_vp);
1749 goto exit;
1750 }
1751
1752
1753 if ((error = hfs_start_transaction(hfsmp)) != 0) {
1754 goto exit;
1755 }
1756 started_tr = 1;
1757
1758 /*
1759 * Reserve some space in the Catalog file.
1760 */
1761 if ((error = cat_preflight(hfsmp, CAT_EXCHANGE, &cookie, vfs_context_proc(ap->a_context)))) {
1762 goto exit;
1763 }
1764 got_cookie = 1;
1765
1766 /* The backend code always tries to delete the virtual
1767 * extent id for exchanging files so we need to lock
1768 * the extents b-tree.
1769 */
1770 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
1771
1772 /* Account for the location of the catalog objects. */
1773 if (from_cp->c_flag & C_HARDLINK) {
1774 MAKE_INODE_NAME(from_iname, sizeof(from_iname),
1775 from_cp->c_attr.ca_linkref);
1776 from_nameptr = (unsigned char *)from_iname;
1777 from_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1778 from_cp->c_hint = 0;
1779 } else {
1780 from_nameptr = from_cp->c_desc.cd_nameptr;
1781 from_parid = from_cp->c_parentcnid;
1782 }
1783 if (to_cp->c_flag & C_HARDLINK) {
1784 MAKE_INODE_NAME(to_iname, sizeof(to_iname),
1785 to_cp->c_attr.ca_linkref);
1786 to_nameptr = (unsigned char *)to_iname;
1787 to_parid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
1788 to_cp->c_hint = 0;
1789 } else {
1790 to_nameptr = to_cp->c_desc.cd_nameptr;
1791 to_parid = to_cp->c_parentcnid;
1792 }
1793
1794 /*
1795 * ExchangeFileIDs swaps the extent information attached to two
1796 * different file IDs. It also swaps the extent information that
1797 * may live in the extents-overflow B-Tree.
1798 *
1799 * We do this in a transaction as this may require a lot of B-Tree nodes
1800 * to do completely, particularly if one of the files in question
1801 * has a lot of extents.
1802 *
1803 * For example, assume "file1" has fileID 50, and "file2" has fileID 52.
1804 * For the on-disk records, which are assumed to be synced, we will
1805 * first swap the resident inline-8 extents as part of the catalog records.
1806 * Then we will swap any extents overflow records for each file.
1807 *
1808 * When this function is done, "file1" will have fileID 52, and "file2" will
1809 * have fileID 50.
1810 */
1811 error = ExchangeFileIDs(hfsmp, from_nameptr, to_nameptr, from_parid,
1812 to_parid, from_cp->c_hint, to_cp->c_hint);
1813 hfs_systemfile_unlock(hfsmp, lockflags);
1814
1815 /*
1816 * Note that we don't need to exchange any extended attributes
1817 * since the attributes are keyed by file ID.
1818 */
1819
1820 if (error != E_NONE) {
1821 error = MacToVFSError(error);
1822 goto exit;
1823 }
1824
1825 /* Purge the vnodes from the name cache */
1826 if (from_vp)
1827 cache_purge(from_vp);
1828 if (to_vp)
1829 cache_purge(to_vp);
1830
1831 /* Bump both source and destination write counts before any swaps. */
1832 {
1833 hfs_incr_gencount (from_cp);
1834 hfs_incr_gencount (to_cp);
1835 }
1836
1837
1838 /* Save a copy of "from" attributes before swapping. */
1839 bcopy(&from_cp->c_desc, &tempdesc, sizeof(struct cat_desc));
1840 bcopy(&from_cp->c_attr, &tempattr, sizeof(struct cat_attr));
1841
1842 /* Save whether or not each cnode is a hardlink or has EAs */
1843 from_flag_special = from_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1844 to_flag_special = to_cp->c_flag & (C_HARDLINK | C_HASXATTRS);
1845
1846 /* Drop the special bits from each cnode */
1847 from_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
1848 to_cp->c_flag &= ~(C_HARDLINK | C_HASXATTRS);
1849
1850 /*
1851 * Complete the in-memory portion of the copy.
1852 *
1853 * ExchangeFileIDs swaps the on-disk records involved. We complete the
1854 * operation by swapping the in-memory contents of the two files here.
1855 * We swap the cnode descriptors, which contain name, BSD attributes,
1856 * timestamps, etc, about the file.
1857 *
1858 * NOTE: We do *NOT* swap the fileforks of the two cnodes. We have
1859 * already swapped the on-disk extent information. As long as we swap the
1860 * IDs, the in-line resident 8 extents that live in the filefork data
1861 * structure will point to the right data for the new file ID if we leave
1862 * them alone.
1863 *
1864 * As a result, any file descriptor that points to a particular
1865 * vnode (even though it should change names), will continue
1866 * to point to the same content.
1867 */
1868
1869 /* Copy the "to" -> "from" cnode */
1870 bcopy(&to_cp->c_desc, &from_cp->c_desc, sizeof(struct cat_desc));
1871
1872 from_cp->c_hint = 0;
1873 /*
1874 * If 'to' was a hardlink, then we copied over its link ID/CNID/(namespace ID)
1875 * when we bcopy'd the descriptor above. However, the cnode attributes
1876 * are not bcopied. As a result, make sure to swap the file IDs of each item.
1877 *
1878 * Further, other hardlink attributes must be moved along in this swap:
1879 * the linkcount, the linkref, and the firstlink all need to move
1880 * along with the file IDs. See note below regarding the flags and
1881 * what moves vs. what does not.
1882 *
1883 * For Reference:
1884 * linkcount == total # of hardlinks.
1885 * linkref == the indirect inode pointer.
1886 * firstlink == the first hardlink in the chain (written to the raw inode).
1887 * These three are tied to the fileID and must move along with the rest of the data.
1888 */
1889 from_cp->c_fileid = to_cp->c_attr.ca_fileid;
1890
1891 from_cp->c_itime = to_cp->c_itime;
1892 from_cp->c_btime = to_cp->c_btime;
1893 from_cp->c_atime = to_cp->c_atime;
1894 from_cp->c_ctime = to_cp->c_ctime;
1895 from_cp->c_gid = to_cp->c_gid;
1896 from_cp->c_uid = to_cp->c_uid;
1897 from_cp->c_bsdflags = to_cp->c_bsdflags;
1898 from_cp->c_mode = to_cp->c_mode;
1899 from_cp->c_linkcount = to_cp->c_linkcount;
1900 from_cp->c_attr.ca_linkref = to_cp->c_attr.ca_linkref;
1901 from_cp->c_attr.ca_firstlink = to_cp->c_attr.ca_firstlink;
1902
1903 /*
1904 * The cnode flags need to stay with the cnode and not get transferred
1905 * over along with everything else because they describe the content; they are
1906 * not attributes that reflect changes specific to the file ID. In general,
1907 * fields that are tied to the file ID are the ones that will move.
1908 *
1909 * This reflects the fact that the file may have borrowed blocks, dirty metadata,
1910 * or other extents, which may not yet have been written to the catalog. If
1911 * they were, they would have been transferred above in the ExchangeFileIDs call above...
1912 *
1913 * The flags that are special are:
1914 * C_HARDLINK, C_HASXATTRS
1915 *
1916 * These flags move with the item and file ID in the namespace since their
1917 * state is tied to that of the file ID.
1918 *
1919 * So to transfer the flags, we have to take the following steps
1920 * 1) Store in a localvar whether or not the special bits are set.
1921 * 2) Drop the special bits from the current flags
1922 * 3) swap the special flag bits to their destination
1923 */
1924 from_cp->c_flag |= to_flag_special;
1925 from_cp->c_attr.ca_recflags = to_cp->c_attr.ca_recflags;
1926 bcopy(to_cp->c_finderinfo, from_cp->c_finderinfo, 32);
1927
1928
1929 /* Copy the "from" -> "to" cnode */
1930 bcopy(&tempdesc, &to_cp->c_desc, sizeof(struct cat_desc));
1931 to_cp->c_hint = 0;
1932 /*
1933 * Pull the file ID from the tempattr we copied above. We can't assume
1934 * it is the same as the CNID.
1935 */
1936 to_cp->c_fileid = tempattr.ca_fileid;
1937 to_cp->c_itime = tempattr.ca_itime;
1938 to_cp->c_btime = tempattr.ca_btime;
1939 to_cp->c_atime = tempattr.ca_atime;
1940 to_cp->c_ctime = tempattr.ca_ctime;
1941 to_cp->c_gid = tempattr.ca_gid;
1942 to_cp->c_uid = tempattr.ca_uid;
1943 to_cp->c_bsdflags = tempattr.ca_flags;
1944 to_cp->c_mode = tempattr.ca_mode;
1945 to_cp->c_linkcount = tempattr.ca_linkcount;
1946 to_cp->c_attr.ca_linkref = tempattr.ca_linkref;
1947 to_cp->c_attr.ca_firstlink = tempattr.ca_firstlink;
1948
1949 /*
1950 * Only OR in the "from" flags into our cnode flags below.
1951 * Leave the rest of the flags alone.
1952 */
1953 to_cp->c_flag |= from_flag_special;
1954
1955 to_cp->c_attr.ca_recflags = tempattr.ca_recflags;
1956 bcopy(tempattr.ca_finderinfo, to_cp->c_finderinfo, 32);
1957
1958
1959 /* Rehash the cnodes using their new file IDs */
1960 hfs_chash_rehash(hfsmp, from_cp, to_cp);
1961
1962 /*
1963 * When a file moves out of "Cleanup At Startup"
1964 * we can drop its NODUMP status.
1965 */
1966 if ((from_cp->c_bsdflags & UF_NODUMP) &&
1967 (from_cp->c_parentcnid != to_cp->c_parentcnid)) {
1968 from_cp->c_bsdflags &= ~UF_NODUMP;
1969 from_cp->c_touch_chgtime = TRUE;
1970 }
1971 if ((to_cp->c_bsdflags & UF_NODUMP) &&
1972 (to_cp->c_parentcnid != from_cp->c_parentcnid)) {
1973 to_cp->c_bsdflags &= ~UF_NODUMP;
1974 to_cp->c_touch_chgtime = TRUE;
1975 }
1976
1977 exit:
1978 if (got_cookie) {
1979 cat_postflight(hfsmp, &cookie, vfs_context_proc(ap->a_context));
1980 }
1981 if (started_tr) {
1982 hfs_end_transaction(hfsmp);
1983 }
1984
1985 hfs_unlockpair(from_cp, to_cp);
1986 return (error);
1987 }
1988
1989 int
1990 hfs_vnop_mmap(struct vnop_mmap_args *ap)
1991 {
1992 struct vnode *vp = ap->a_vp;
1993 int error;
1994
1995 if (VNODE_IS_RSRC(vp)) {
1996 /* allow pageins of the resource fork */
1997 } else {
1998 int compressed = hfs_file_is_compressed(VTOC(vp), 1); /* 1 == don't take the cnode lock */
1999 time_t orig_ctime = VTOC(vp)->c_ctime;
2000
2001 if (!compressed && (VTOC(vp)->c_bsdflags & UF_COMPRESSED)) {
2002 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
2003 if (error != 0) {
2004 return error;
2005 }
2006 }
2007
2008 if (ap->a_fflags & PROT_WRITE) {
2009 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_WRITE_OP, NULL);
2010
2011 /* even though we're manipulating a cnode field here, we're only monotonically increasing
2012 * the generation counter. The vnode can't be recycled (because we hold a FD in order to cause the
2013 * map to happen). So it's safe to do this without holding the cnode lock. The caller's only
2014 * requirement is that the number has been changed.
2015 */
2016 struct cnode *cp = VTOC(vp);
2017 if (S_ISREG(cp->c_attr.ca_mode) || S_ISLNK(cp->c_attr.ca_mode)) {
2018 hfs_incr_gencount(cp);
2019 }
2020 }
2021 }
2022
2023 //
2024 // NOTE: we return ENOTSUP because we want the cluster layer
2025 // to actually do all the real work.
2026 //
2027 return (ENOTSUP);
2028 }
2029
2030 /*
2031 * hfs_movedata
2032 *
2033 * This is a non-symmetric variant of exchangedata. In this function,
2034 * the contents of the fork in from_vp are moved to the fork
2035 * specified by to_vp.
2036 *
2037 * The cnodes pointed to by 'from_vp' and 'to_vp' must be locked.
2038 *
2039 * The vnode pointed to by 'to_vp' *must* be empty prior to invoking this function.
2040 * We impose this restriction because we may not be able to fully delete the entire
2041 * file's contents in a single transaction, particularly if it has a lot of extents.
2042 * In the normal file deletion codepath, the file is screened for two conditions:
2043 * 1) bigger than 400MB, and 2) more than 8 extents. If so, the file is relocated to
2044 * the hidden directory and the deletion is broken up into multiple truncates. We can't
2045 * do that here because both files need to exist in the namespace. The main reason this
2046 * is imposed is that we may have to touch a whole lot of bitmap blocks if there are
2047 * many extents.
2048 *
2049 * Any data written to 'from_vp' after this call completes is not guaranteed
2050 * to be moved.
2051 *
2052 * Arguments:
2053 * vnode from_vp: source file
2054 * vnode to_vp: destination file; must be empty
2055 *
2056 * Returns:
2057 * EFBIG - Destination file was not empty
2058 * 0 - success
2059 *
2060 *
2061 */
2062 int hfs_movedata (struct vnode *from_vp, struct vnode *to_vp) {
2063
2064 struct cnode *from_cp;
2065 struct cnode *to_cp;
2066 struct hfsmount *hfsmp = NULL;
2067 int error = 0;
2068 int started_tr = 0;
2069 int lockflags = 0;
2070 int overflow_blocks;
2071 int rsrc = 0;
2072
2073
2074 /* Get the HFS pointers */
2075 from_cp = VTOC(from_vp);
2076 to_cp = VTOC(to_vp);
2077 hfsmp = VTOHFS(from_vp);
2078
2079 /* Verify that neither source/dest file is open-unlinked */
2080 if (from_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
2081 error = EBUSY;
2082 goto movedata_exit;
2083 }
2084
2085 if (to_cp->c_flag & (C_DELETED | C_NOEXISTS)) {
2086 error = EBUSY;
2087 goto movedata_exit;
2088 }
2089
2090 /*
2091 * Verify the source file is not in use by anyone besides us.
2092 *
2093 * This function is typically invoked by a namespace handler
2094 * process responding to a temporarily stalled system call.
2095 * The FD that it is working off of is opened O_EVTONLY, so
2096 * it really has no active usecounts (the kusecount from O_EVTONLY
2097 * is subtracted from the total usecounts).
2098 *
2099 * As a result, we shouldn't have any active usecounts against
2100 * this vnode when we go to check it below.
2101 */
2102 if (vnode_isinuse(from_vp, 0)) {
2103 error = EBUSY;
2104 goto movedata_exit;
2105 }
2106
2107 if (from_cp->c_rsrc_vp == from_vp) {
2108 rsrc = 1;
2109 }
2110
2111 /*
2112 * We assume that the destination file is already empty.
2113 * Verify that it is.
2114 */
2115 if (rsrc) {
2116 if (to_cp->c_rsrcfork->ff_size > 0) {
2117 error = EFBIG;
2118 goto movedata_exit;
2119 }
2120 }
2121 else {
2122 if (to_cp->c_datafork->ff_size > 0) {
2123 error = EFBIG;
2124 goto movedata_exit;
2125 }
2126 }
2127
2128 /* If the source has the rsrc open, make sure the destination is also the rsrc */
2129 if (rsrc) {
2130 if (to_vp != to_cp->c_rsrc_vp) {
2131 error = EINVAL;
2132 goto movedata_exit;
2133 }
2134 }
2135 else {
2136 /* Verify that both forks are data forks */
2137 if (to_vp != to_cp->c_vp) {
2138 error = EINVAL;
2139 goto movedata_exit;
2140 }
2141 }
2142
2143 /*
2144 * See if the source file has overflow extents. If it doesn't, we don't
2145 * need to call into MoveData, and the catalog will be enough.
2146 */
2147 if (rsrc) {
2148 overflow_blocks = overflow_extents(from_cp->c_rsrcfork);
2149 }
2150 else {
2151 overflow_blocks = overflow_extents(from_cp->c_datafork);
2152 }
2153
2154 if ((error = hfs_start_transaction (hfsmp)) != 0) {
2155 goto movedata_exit;
2156 }
2157 started_tr = 1;
2158
2159 /* Lock the system files: catalog, extents, attributes */
2160 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2161
2162 /* Copy over any catalog allocation data into the new spot. */
2163 if (rsrc) {
2164 if ((error = hfs_move_fork (from_cp->c_rsrcfork, from_cp, to_cp->c_rsrcfork, to_cp))){
2165 hfs_systemfile_unlock(hfsmp, lockflags);
2166 goto movedata_exit;
2167 }
2168 }
2169 else {
2170 if ((error = hfs_move_fork (from_cp->c_datafork, from_cp, to_cp->c_datafork, to_cp))) {
2171 hfs_systemfile_unlock(hfsmp, lockflags);
2172 goto movedata_exit;
2173 }
2174 }
2175
2176 /*
2177 * Note that because all we're doing is moving the extents around, we can
2178 * probably do this in a single transaction: Each extent record (group of 8)
2179 * is 64 bytes. A extent overflow B-Tree node is typically 4k. This means
2180 * each node can hold roughly ~60 extent records == (480 extents).
2181 *
2182 * If a file was massively fragmented and had 20k extents, this means we'd
2183 * roughly touch 20k/480 == 41 to 42 nodes, plus the index nodes, for half
2184 * of the operation. (inserting or deleting). So if we're manipulating 80-100
2185 * nodes, this is basically 320k of data to write to the journal in
2186 * a bad case.
2187 */
2188 if (overflow_blocks != 0) {
2189 if (rsrc) {
2190 error = MoveData(hfsmp, from_cp->c_cnid, to_cp->c_cnid, 1);
2191 }
2192 else {
2193 error = MoveData (hfsmp, from_cp->c_cnid, to_cp->c_cnid, 0);
2194 }
2195 }
2196
2197 if (error) {
2198 /* Reverse the operation. Copy the fork data back into the source */
2199 if (rsrc) {
2200 hfs_move_fork (to_cp->c_rsrcfork, to_cp, from_cp->c_rsrcfork, from_cp);
2201 }
2202 else {
2203 hfs_move_fork (to_cp->c_datafork, to_cp, from_cp->c_datafork, from_cp);
2204 }
2205 }
2206 else {
2207 struct cat_fork *src_data = NULL;
2208 struct cat_fork *src_rsrc = NULL;
2209 struct cat_fork *dst_data = NULL;
2210 struct cat_fork *dst_rsrc = NULL;
2211
2212 /* Touch the times*/
2213 to_cp->c_touch_acctime = TRUE;
2214 to_cp->c_touch_chgtime = TRUE;
2215 to_cp->c_touch_modtime = TRUE;
2216
2217 from_cp->c_touch_acctime = TRUE;
2218 from_cp->c_touch_chgtime = TRUE;
2219 from_cp->c_touch_modtime = TRUE;
2220
2221 hfs_touchtimes(hfsmp, to_cp);
2222 hfs_touchtimes(hfsmp, from_cp);
2223
2224 if (from_cp->c_datafork) {
2225 src_data = &from_cp->c_datafork->ff_data;
2226 }
2227 if (from_cp->c_rsrcfork) {
2228 src_rsrc = &from_cp->c_rsrcfork->ff_data;
2229 }
2230
2231 if (to_cp->c_datafork) {
2232 dst_data = &to_cp->c_datafork->ff_data;
2233 }
2234 if (to_cp->c_rsrcfork) {
2235 dst_rsrc = &to_cp->c_rsrcfork->ff_data;
2236 }
2237
2238 /* Update the catalog nodes */
2239 (void) cat_update(hfsmp, &from_cp->c_desc, &from_cp->c_attr,
2240 src_data, src_rsrc);
2241
2242 (void) cat_update(hfsmp, &to_cp->c_desc, &to_cp->c_attr,
2243 dst_data, dst_rsrc);
2244
2245 }
2246 /* unlock the system files */
2247 hfs_systemfile_unlock(hfsmp, lockflags);
2248
2249
2250 movedata_exit:
2251 if (started_tr) {
2252 hfs_end_transaction(hfsmp);
2253 }
2254
2255 return error;
2256
2257 }
2258
2259 /*
2260 * Copy all of the catalog and runtime data in srcfork to dstfork.
2261 *
2262 * This allows us to maintain the invalid ranges across the movedata operation so
2263 * we don't need to force all of the pending IO right now. In addition, we move all
2264 * non overflow-extent extents into the destination here.
2265 */
2266 static int hfs_move_fork (struct filefork *srcfork, struct cnode *src_cp,
2267 struct filefork *dstfork, struct cnode *dst_cp) {
2268 struct rl_entry *invalid_range;
2269 int size = sizeof(struct HFSPlusExtentDescriptor);
2270 size = size * kHFSPlusExtentDensity;
2271
2272 /* If the dstfork has any invalid ranges, bail out */
2273 invalid_range = TAILQ_FIRST(&dstfork->ff_invalidranges);
2274 if (invalid_range != NULL) {
2275 return EFBIG;
2276 }
2277
2278 if (dstfork->ff_data.cf_size != 0 || dstfork->ff_data.cf_new_size != 0) {
2279 return EFBIG;
2280 }
2281
2282 /* First copy the invalid ranges */
2283 while ((invalid_range = TAILQ_FIRST(&srcfork->ff_invalidranges))) {
2284 off_t start = invalid_range->rl_start;
2285 off_t end = invalid_range->rl_end;
2286
2287 /* Remove it from the srcfork and add it to dstfork */
2288 rl_remove(start, end, &srcfork->ff_invalidranges);
2289 rl_add(start, end, &dstfork->ff_invalidranges);
2290 }
2291
2292 /*
2293 * Ignore the ff_union. We don't move symlinks or system files.
2294 * Now copy the in-catalog extent information
2295 */
2296 dstfork->ff_data.cf_size = srcfork->ff_data.cf_size;
2297 dstfork->ff_data.cf_new_size = srcfork->ff_data.cf_new_size;
2298 dstfork->ff_data.cf_vblocks = srcfork->ff_data.cf_vblocks;
2299 dstfork->ff_data.cf_blocks = srcfork->ff_data.cf_blocks;
2300
2301 /* just memcpy the whole array of extents to the new location. */
2302 memcpy (dstfork->ff_data.cf_extents, srcfork->ff_data.cf_extents, size);
2303
2304 /*
2305 * Copy the cnode attribute data.
2306 *
2307 */
2308 src_cp->c_blocks -= srcfork->ff_data.cf_vblocks;
2309 src_cp->c_blocks -= srcfork->ff_data.cf_blocks;
2310
2311 dst_cp->c_blocks += srcfork->ff_data.cf_vblocks;
2312 dst_cp->c_blocks += srcfork->ff_data.cf_blocks;
2313
2314 /* Now delete the entries in the source fork */
2315 srcfork->ff_data.cf_size = 0;
2316 srcfork->ff_data.cf_new_size = 0;
2317 srcfork->ff_data.cf_union.cfu_bytesread = 0;
2318 srcfork->ff_data.cf_vblocks = 0;
2319 srcfork->ff_data.cf_blocks = 0;
2320
2321 /* Zero out the old extents */
2322 bzero (srcfork->ff_data.cf_extents, size);
2323 return 0;
2324 }
2325
2326
2327 /*
2328 * cnode must be locked
2329 */
2330 int
2331 hfs_fsync(struct vnode *vp, int waitfor, int fullsync, struct proc *p)
2332 {
2333 struct cnode *cp = VTOC(vp);
2334 struct filefork *fp = NULL;
2335 int retval = 0;
2336 struct hfsmount *hfsmp = VTOHFS(vp);
2337 struct rl_entry *invalid_range;
2338 struct timeval tv;
2339 int waitdata; /* attributes necessary for data retrieval */
2340 int wait; /* all other attributes (e.g. atime, etc.) */
2341 int lockflag;
2342 int took_trunc_lock = 0;
2343 int locked_buffers = 0;
2344
2345 /*
2346 * Applications which only care about data integrity rather than full
2347 * file integrity may opt out of (delay) expensive metadata update
2348 * operations as a performance optimization.
2349 */
2350 wait = (waitfor == MNT_WAIT);
2351 waitdata = (waitfor == MNT_DWAIT) | wait;
2352 if (always_do_fullfsync)
2353 fullsync = 1;
2354
2355 /* HFS directories don't have any data blocks. */
2356 if (vnode_isdir(vp))
2357 goto metasync;
2358 fp = VTOF(vp);
2359
2360 /*
2361 * For system files flush the B-tree header and
2362 * for regular files write out any clusters
2363 */
2364 if (vnode_issystem(vp)) {
2365 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2366 // XXXdbg
2367 if (hfsmp->jnl == NULL) {
2368 BTFlushPath(VTOF(vp));
2369 }
2370 }
2371 } else if (UBCINFOEXISTS(vp)) {
2372 hfs_unlock(cp);
2373 hfs_lock_truncate(cp, HFS_SHARED_LOCK, HFS_LOCK_DEFAULT);
2374 took_trunc_lock = 1;
2375
2376 if (fp->ff_unallocblocks != 0) {
2377 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2378
2379 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2380 }
2381 /* Don't hold cnode lock when calling into cluster layer. */
2382 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2383
2384 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2385 }
2386 /*
2387 * When MNT_WAIT is requested and the zero fill timeout
2388 * has expired then we must explicitly zero out any areas
2389 * that are currently marked invalid (holes).
2390 *
2391 * Files with NODUMP can bypass zero filling here.
2392 */
2393 if (fp && (((cp->c_flag & C_ALWAYS_ZEROFILL) && !TAILQ_EMPTY(&fp->ff_invalidranges)) ||
2394 ((wait || (cp->c_flag & C_ZFWANTSYNC)) &&
2395 ((cp->c_bsdflags & UF_NODUMP) == 0) &&
2396 UBCINFOEXISTS(vp) && (vnode_issystem(vp) ==0) &&
2397 cp->c_zftimeout != 0))) {
2398
2399 microuptime(&tv);
2400 if ((cp->c_flag & C_ALWAYS_ZEROFILL) == 0 && !fullsync && tv.tv_sec < (long)cp->c_zftimeout) {
2401 /* Remember that a force sync was requested. */
2402 cp->c_flag |= C_ZFWANTSYNC;
2403 goto datasync;
2404 }
2405 if (!TAILQ_EMPTY(&fp->ff_invalidranges)) {
2406 if (!took_trunc_lock || (cp->c_truncatelockowner == HFS_SHARED_OWNER)) {
2407 hfs_unlock(cp);
2408 if (took_trunc_lock) {
2409 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2410 }
2411 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2412 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2413 took_trunc_lock = 1;
2414 }
2415 while ((invalid_range = TAILQ_FIRST(&fp->ff_invalidranges))) {
2416 off_t start = invalid_range->rl_start;
2417 off_t end = invalid_range->rl_end;
2418
2419 /* The range about to be written must be validated
2420 * first, so that VNOP_BLOCKMAP() will return the
2421 * appropriate mapping for the cluster code:
2422 */
2423 rl_remove(start, end, &fp->ff_invalidranges);
2424
2425 /* Don't hold cnode lock when calling into cluster layer. */
2426 hfs_unlock(cp);
2427 (void) cluster_write(vp, (struct uio *) 0,
2428 fp->ff_size, end + 1, start, (off_t)0,
2429 IO_HEADZEROFILL | IO_NOZERODIRTY | IO_NOCACHE);
2430 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2431 cp->c_flag |= C_MODIFIED;
2432 }
2433 hfs_unlock(cp);
2434 (void) cluster_push(vp, waitdata ? IO_SYNC : 0);
2435 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
2436 }
2437 cp->c_flag &= ~C_ZFWANTSYNC;
2438 cp->c_zftimeout = 0;
2439 }
2440 datasync:
2441 if (took_trunc_lock) {
2442 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2443 took_trunc_lock = 0;
2444 }
2445 /*
2446 * if we have a journal and if journal_active() returns != 0 then the
2447 * we shouldn't do anything to a locked block (because it is part
2448 * of a transaction). otherwise we'll just go through the normal
2449 * code path and flush the buffer. note journal_active() can return
2450 * -1 if the journal is invalid -- however we still need to skip any
2451 * locked blocks as they get cleaned up when we finish the transaction
2452 * or close the journal.
2453 */
2454 // if (hfsmp->jnl && journal_active(hfsmp->jnl) >= 0)
2455 if (hfsmp->jnl)
2456 lockflag = BUF_SKIP_LOCKED;
2457 else
2458 lockflag = 0;
2459
2460 /*
2461 * Flush all dirty buffers associated with a vnode.
2462 * Record how many of them were dirty AND locked (if necessary).
2463 */
2464 locked_buffers = buf_flushdirtyblks_skipinfo(vp, waitdata, lockflag, "hfs_fsync");
2465 if ((lockflag & BUF_SKIP_LOCKED) && (locked_buffers) && (vnode_vtype(vp) == VLNK)) {
2466 /*
2467 * If there are dirty symlink buffers, then we may need to take action
2468 * to prevent issues later on if we are journaled. If we're fsyncing a
2469 * symlink vnode then we are in one of three cases:
2470 *
2471 * 1) automatic sync has fired. In this case, we don't want the behavior to change.
2472 *
2473 * 2) Someone has opened the FD for the symlink (not what it points to)
2474 * and has issued an fsync against it. This should be rare, and we don't
2475 * want the behavior to change.
2476 *
2477 * 3) We are being called by a vclean which is trying to reclaim this
2478 * symlink vnode. If this is the case, then allowing this fsync to
2479 * proceed WITHOUT flushing the journal could result in the vclean
2480 * invalidating the buffer's blocks before the journal transaction is
2481 * written to disk. To prevent this, we force a journal flush
2482 * if the vnode is in the middle of a recycle (VL_TERMINATE or VL_DEAD is set).
2483 */
2484 if (vnode_isrecycled(vp)) {
2485 fullsync = 1;
2486 }
2487 }
2488
2489 metasync:
2490 if (vnode_isreg(vp) && vnode_issystem(vp)) {
2491 if (VTOF(vp)->fcbBTCBPtr != NULL) {
2492 microuptime(&tv);
2493 BTSetLastSync(VTOF(vp), tv.tv_sec);
2494 }
2495 cp->c_touch_acctime = FALSE;
2496 cp->c_touch_chgtime = FALSE;
2497 cp->c_touch_modtime = FALSE;
2498 } else if ( !(vp->v_flag & VSWAP) ) /* User file */ {
2499 retval = hfs_update(vp, wait);
2500
2501 /*
2502 * When MNT_WAIT is requested push out the catalog record for
2503 * this file. If they asked for a full fsync, we can skip this
2504 * because the journal_flush or hfs_metasync_all will push out
2505 * all of the metadata changes.
2506 */
2507 if ((retval == 0) && wait && !fullsync && cp->c_hint &&
2508 !ISSET(cp->c_flag, C_DELETED | C_NOEXISTS)) {
2509 hfs_metasync(VTOHFS(vp), (daddr64_t)cp->c_hint, p);
2510 }
2511
2512 /*
2513 * If this was a full fsync, make sure all metadata
2514 * changes get to stable storage.
2515 */
2516 if (fullsync) {
2517 if (hfsmp->jnl) {
2518 hfs_journal_flush(hfsmp, FALSE);
2519
2520 if (journal_uses_fua(hfsmp->jnl)) {
2521 /*
2522 * the journal_flush did NOT issue a sync track cache command,
2523 * and the fullsync indicates we are supposed to flush all cached
2524 * data to the media, so issue the sync track cache command
2525 * explicitly
2526 */
2527 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
2528 }
2529 } else {
2530 retval = hfs_metasync_all(hfsmp);
2531 /* XXX need to pass context! */
2532 VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, NULL);
2533 }
2534 }
2535 }
2536
2537 return (retval);
2538 }
2539
2540
2541 /* Sync an hfs catalog b-tree node */
2542 int
2543 hfs_metasync(struct hfsmount *hfsmp, daddr64_t node, __unused struct proc *p)
2544 {
2545 vnode_t vp;
2546 buf_t bp;
2547 int lockflags;
2548
2549 vp = HFSTOVCB(hfsmp)->catalogRefNum;
2550
2551 // XXXdbg - don't need to do this on a journaled volume
2552 if (hfsmp->jnl) {
2553 return 0;
2554 }
2555
2556 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
2557 /*
2558 * Look for a matching node that has been delayed
2559 * but is not part of a set (B_LOCKED).
2560 *
2561 * BLK_ONLYVALID causes buf_getblk to return a
2562 * buf_t for the daddr64_t specified only if it's
2563 * currently resident in the cache... the size
2564 * parameter to buf_getblk is ignored when this flag
2565 * is set
2566 */
2567 bp = buf_getblk(vp, node, 0, 0, 0, BLK_META | BLK_ONLYVALID);
2568
2569 if (bp) {
2570 if ((buf_flags(bp) & (B_LOCKED | B_DELWRI)) == B_DELWRI)
2571 (void) VNOP_BWRITE(bp);
2572 else
2573 buf_brelse(bp);
2574 }
2575
2576 hfs_systemfile_unlock(hfsmp, lockflags);
2577
2578 return (0);
2579 }
2580
2581
2582 /*
2583 * Sync all hfs B-trees. Use this instead of journal_flush for a volume
2584 * without a journal. Note that the volume bitmap does not get written;
2585 * we rely on fsck_hfs to fix that up (which it can do without any loss
2586 * of data).
2587 */
2588 int
2589 hfs_metasync_all(struct hfsmount *hfsmp)
2590 {
2591 int lockflags;
2592
2593 /* Lock all of the B-trees so we get a mutually consistent state */
2594 lockflags = hfs_systemfile_lock(hfsmp,
2595 SFL_CATALOG|SFL_EXTENTS|SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
2596
2597 /* Sync each of the B-trees */
2598 if (hfsmp->hfs_catalog_vp)
2599 hfs_btsync(hfsmp->hfs_catalog_vp, 0);
2600 if (hfsmp->hfs_extents_vp)
2601 hfs_btsync(hfsmp->hfs_extents_vp, 0);
2602 if (hfsmp->hfs_attribute_vp)
2603 hfs_btsync(hfsmp->hfs_attribute_vp, 0);
2604
2605 /* Wait for all of the writes to complete */
2606 if (hfsmp->hfs_catalog_vp)
2607 vnode_waitforwrites(hfsmp->hfs_catalog_vp, 0, 0, 0, "hfs_metasync_all");
2608 if (hfsmp->hfs_extents_vp)
2609 vnode_waitforwrites(hfsmp->hfs_extents_vp, 0, 0, 0, "hfs_metasync_all");
2610 if (hfsmp->hfs_attribute_vp)
2611 vnode_waitforwrites(hfsmp->hfs_attribute_vp, 0, 0, 0, "hfs_metasync_all");
2612
2613 hfs_systemfile_unlock(hfsmp, lockflags);
2614
2615 return 0;
2616 }
2617
2618
2619 /*ARGSUSED 1*/
2620 static int
2621 hfs_btsync_callback(struct buf *bp, __unused void *dummy)
2622 {
2623 buf_clearflags(bp, B_LOCKED);
2624 (void) buf_bawrite(bp);
2625
2626 return(BUF_CLAIMED);
2627 }
2628
2629
2630 int
2631 hfs_btsync(struct vnode *vp, int sync_transaction)
2632 {
2633 struct cnode *cp = VTOC(vp);
2634 struct timeval tv;
2635 int flags = 0;
2636
2637 if (sync_transaction)
2638 flags |= BUF_SKIP_NONLOCKED;
2639 /*
2640 * Flush all dirty buffers associated with b-tree.
2641 */
2642 buf_iterate(vp, hfs_btsync_callback, flags, 0);
2643
2644 microuptime(&tv);
2645 if (vnode_issystem(vp) && (VTOF(vp)->fcbBTCBPtr != NULL))
2646 (void) BTSetLastSync(VTOF(vp), tv.tv_sec);
2647 cp->c_touch_acctime = FALSE;
2648 cp->c_touch_chgtime = FALSE;
2649 cp->c_touch_modtime = FALSE;
2650
2651 return 0;
2652 }
2653
2654 /*
2655 * Remove a directory.
2656 */
2657 int
2658 hfs_vnop_rmdir(ap)
2659 struct vnop_rmdir_args /* {
2660 struct vnode *a_dvp;
2661 struct vnode *a_vp;
2662 struct componentname *a_cnp;
2663 vfs_context_t a_context;
2664 } */ *ap;
2665 {
2666 struct vnode *dvp = ap->a_dvp;
2667 struct vnode *vp = ap->a_vp;
2668 struct cnode *dcp = VTOC(dvp);
2669 struct cnode *cp = VTOC(vp);
2670 int error;
2671 time_t orig_ctime;
2672
2673 orig_ctime = VTOC(vp)->c_ctime;
2674
2675 if (!S_ISDIR(cp->c_mode)) {
2676 return (ENOTDIR);
2677 }
2678 if (dvp == vp) {
2679 return (EINVAL);
2680 }
2681
2682 check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
2683 cp = VTOC(vp);
2684
2685 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
2686 return (error);
2687 }
2688
2689 /* Check for a race with rmdir on the parent directory */
2690 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
2691 hfs_unlockpair (dcp, cp);
2692 return ENOENT;
2693 }
2694 error = hfs_removedir(dvp, vp, ap->a_cnp, 0, 0);
2695
2696 hfs_unlockpair(dcp, cp);
2697
2698 return (error);
2699 }
2700
2701 /*
2702 * Remove a directory
2703 *
2704 * Both dvp and vp cnodes are locked
2705 */
2706 int
2707 hfs_removedir(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
2708 int skip_reserve, int only_unlink)
2709 {
2710 struct cnode *cp;
2711 struct cnode *dcp;
2712 struct hfsmount * hfsmp;
2713 struct cat_desc desc;
2714 int lockflags;
2715 int error = 0, started_tr = 0;
2716
2717 cp = VTOC(vp);
2718 dcp = VTOC(dvp);
2719 hfsmp = VTOHFS(vp);
2720
2721 if (dcp == cp) {
2722 return (EINVAL); /* cannot remove "." */
2723 }
2724 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
2725 return (0);
2726 }
2727 if (cp->c_entries != 0) {
2728 return (ENOTEMPTY);
2729 }
2730
2731 /*
2732 * If the directory is open or in use (e.g. opendir() or current working
2733 * directory for some process); wait for inactive/reclaim to actually
2734 * remove cnode from the catalog. Both inactive and reclaim codepaths are capable
2735 * of removing open-unlinked directories from the catalog, as well as getting rid
2736 * of EAs still on the element. So change only_unlink to true, so that it will get
2737 * cleaned up below.
2738 *
2739 * Otherwise, we can get into a weird old mess where the directory has C_DELETED,
2740 * but it really means C_NOEXISTS because the item was actually removed from the
2741 * catalog. Then when we try to remove the entry from the catalog later on, it won't
2742 * really be there anymore.
2743 */
2744 if (vnode_isinuse(vp, 0)) {
2745 only_unlink = 1;
2746 }
2747
2748 /* Deal with directory hardlinks */
2749 if (cp->c_flag & C_HARDLINK) {
2750 /*
2751 * Note that if we have a directory which was a hardlink at any point,
2752 * its actual directory data is stored in the directory inode in the hidden
2753 * directory rather than the leaf element(s) present in the namespace.
2754 *
2755 * If there are still other hardlinks to this directory,
2756 * then we'll just eliminate this particular link and the vnode will still exist.
2757 * If this is the last link to an empty directory, then we'll open-unlink the
2758 * directory and it will be only tagged with C_DELETED (as opposed to C_NOEXISTS).
2759 *
2760 * We could also return EBUSY here.
2761 */
2762
2763 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
2764 }
2765
2766 /*
2767 * In a few cases, we may want to allow the directory to persist in an
2768 * open-unlinked state. If the directory is being open-unlinked (still has usecount
2769 * references), or if it has EAs, or if it was being deleted as part of a rename,
2770 * then we go ahead and move it to the hidden directory.
2771 *
2772 * If the directory is being open-unlinked, then we want to keep the catalog entry
2773 * alive so that future EA calls and fchmod/fstat etc. do not cause issues later.
2774 *
2775 * If the directory had EAs, then we want to use the open-unlink trick so that the
2776 * EA removal is not done in one giant transaction. Otherwise, it could cause a panic
2777 * due to overflowing the journal.
2778 *
2779 * Finally, if it was deleted as part of a rename, we move it to the hidden directory
2780 * in order to maintain rename atomicity.
2781 *
2782 * Note that the allow_dirs argument to hfs_removefile specifies that it is
2783 * supposed to handle directories for this case.
2784 */
2785
2786 if (((hfsmp->hfs_attribute_vp != NULL) &&
2787 ((cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0)) ||
2788 (only_unlink != 0)) {
2789
2790 int ret = hfs_removefile(dvp, vp, cnp, 0, 0, 1, NULL, only_unlink);
2791 /*
2792 * Even though hfs_vnop_rename calls vnode_recycle for us on tvp we call
2793 * it here just in case we were invoked by rmdir() on a directory that had
2794 * EAs. To ensure that we start reclaiming the space as soon as possible,
2795 * we call vnode_recycle on the directory.
2796 */
2797 vnode_recycle(vp);
2798
2799 return ret;
2800
2801 }
2802
2803 dcp->c_flag |= C_DIR_MODIFICATION;
2804
2805 #if QUOTA
2806 if (hfsmp->hfs_flags & HFS_QUOTAS)
2807 (void)hfs_getinoquota(cp);
2808 #endif
2809 if ((error = hfs_start_transaction(hfsmp)) != 0) {
2810 goto out;
2811 }
2812 started_tr = 1;
2813
2814 /*
2815 * Verify the directory is empty (and valid).
2816 * (Rmdir ".." won't be valid since
2817 * ".." will contain a reference to
2818 * the current directory and thus be
2819 * non-empty.)
2820 */
2821 if ((dcp->c_bsdflags & APPEND) || (cp->c_bsdflags & (IMMUTABLE | APPEND))) {
2822 error = EPERM;
2823 goto out;
2824 }
2825
2826 /* Remove the entry from the namei cache: */
2827 cache_purge(vp);
2828
2829 /*
2830 * Protect against a race with rename by using the component
2831 * name passed in and parent id from dvp (instead of using
2832 * the cp->c_desc which may have changed).
2833 */
2834 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
2835 desc.cd_namelen = cnp->cn_namelen;
2836 desc.cd_parentcnid = dcp->c_fileid;
2837 desc.cd_cnid = cp->c_cnid;
2838 desc.cd_flags = CD_ISDIR;
2839 desc.cd_encoding = cp->c_encoding;
2840 desc.cd_hint = 0;
2841
2842 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
2843 error = 0;
2844 goto out;
2845 }
2846
2847 /* Remove entry from catalog */
2848 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
2849
2850 if (!skip_reserve) {
2851 /*
2852 * Reserve some space in the Catalog file.
2853 */
2854 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
2855 hfs_systemfile_unlock(hfsmp, lockflags);
2856 goto out;
2857 }
2858 }
2859
2860 error = cat_delete(hfsmp, &desc, &cp->c_attr);
2861 if (error == 0) {
2862 /* The parent lost a child */
2863 if (dcp->c_entries > 0)
2864 dcp->c_entries--;
2865 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
2866 dcp->c_dirchangecnt++;
2867 dcp->c_touch_chgtime = TRUE;
2868 dcp->c_touch_modtime = TRUE;
2869 hfs_touchtimes(hfsmp, cp);
2870 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
2871 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
2872 }
2873
2874 hfs_systemfile_unlock(hfsmp, lockflags);
2875
2876 if (error)
2877 goto out;
2878
2879 #if QUOTA
2880 if (hfsmp->hfs_flags & HFS_QUOTAS)
2881 (void)hfs_chkiq(cp, -1, NOCRED, 0);
2882 #endif /* QUOTA */
2883
2884 hfs_volupdate(hfsmp, VOL_RMDIR, (dcp->c_cnid == kHFSRootFolderID));
2885
2886 /* Mark C_NOEXISTS since the catalog entry is now gone */
2887 cp->c_flag |= C_NOEXISTS;
2888 out:
2889 dcp->c_flag &= ~C_DIR_MODIFICATION;
2890 wakeup((caddr_t)&dcp->c_flag);
2891
2892 if (started_tr) {
2893 hfs_end_transaction(hfsmp);
2894 }
2895
2896 return (error);
2897 }
2898
2899
2900 /*
2901 * Remove a file or link.
2902 */
2903 int
2904 hfs_vnop_remove(ap)
2905 struct vnop_remove_args /* {
2906 struct vnode *a_dvp;
2907 struct vnode *a_vp;
2908 struct componentname *a_cnp;
2909 int a_flags;
2910 vfs_context_t a_context;
2911 } */ *ap;
2912 {
2913 struct vnode *dvp = ap->a_dvp;
2914 struct vnode *vp = ap->a_vp;
2915 struct cnode *dcp = VTOC(dvp);
2916 struct cnode *cp;
2917 struct vnode *rvp = NULL;
2918 int error=0, recycle_rsrc=0;
2919 int recycle_vnode = 0;
2920 uint32_t rsrc_vid = 0;
2921 time_t orig_ctime;
2922
2923 if (dvp == vp) {
2924 return (EINVAL);
2925 }
2926
2927 orig_ctime = VTOC(vp)->c_ctime;
2928 if (!vnode_isnamedstream(vp) && ((ap->a_flags & VNODE_REMOVE_SKIP_NAMESPACE_EVENT) == 0)) {
2929 error = check_for_tracked_file(vp, orig_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
2930 if (error) {
2931 // XXXdbg - decide on a policy for handling namespace handler failures!
2932 // for now we just let them proceed.
2933 }
2934 }
2935 error = 0;
2936
2937 cp = VTOC(vp);
2938
2939 relock:
2940
2941 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2942
2943 if ((error = hfs_lockpair(dcp, cp, HFS_EXCLUSIVE_LOCK))) {
2944 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
2945 if (rvp) {
2946 vnode_put (rvp);
2947 }
2948 return (error);
2949 }
2950
2951 /*
2952 * Lazily respond to determining if there is a valid resource fork
2953 * vnode attached to 'cp' if it is a regular file or symlink.
2954 * If the vnode does not exist, then we may proceed without having to
2955 * create it.
2956 *
2957 * If, however, it does exist, then we need to acquire an iocount on the
2958 * vnode after acquiring its vid. This ensures that if we have to do I/O
2959 * against it, it can't get recycled from underneath us in the middle
2960 * of this call.
2961 *
2962 * Note: this function may be invoked for directory hardlinks, so just skip these
2963 * steps if 'vp' is a directory.
2964 */
2965
2966 if ((vp->v_type == VLNK) || (vp->v_type == VREG)) {
2967 if ((cp->c_rsrc_vp) && (rvp == NULL)) {
2968 /* We need to acquire the rsrc vnode */
2969 rvp = cp->c_rsrc_vp;
2970 rsrc_vid = vnode_vid (rvp);
2971
2972 /* Unlock everything to acquire iocount on the rsrc vnode */
2973 hfs_unlock_truncate (cp, HFS_LOCK_DEFAULT);
2974 hfs_unlockpair (dcp, cp);
2975 /* Use the vid to maintain identity on rvp */
2976 if (vnode_getwithvid(rvp, rsrc_vid)) {
2977 /*
2978 * If this fails, then it was recycled or
2979 * reclaimed in the interim. Reset fields and
2980 * start over.
2981 */
2982 rvp = NULL;
2983 rsrc_vid = 0;
2984 }
2985 goto relock;
2986 }
2987 }
2988
2989 /*
2990 * Check to see if we raced rmdir for the parent directory
2991 * hfs_removefile already checks for a race on vp/cp
2992 */
2993 if (dcp->c_flag & (C_DELETED | C_NOEXISTS)) {
2994 error = ENOENT;
2995 goto rm_done;
2996 }
2997
2998 error = hfs_removefile(dvp, vp, ap->a_cnp, ap->a_flags, 0, 0, NULL, 0);
2999
3000 /*
3001 * If the remove succeeded in deleting the file, then we may need to mark
3002 * the resource fork for recycle so that it is reclaimed as quickly
3003 * as possible. If it were not recycled quickly, then this resource fork
3004 * vnode could keep a v_parent reference on the data fork, which prevents it
3005 * from going through reclaim (by giving it extra usecounts), except in the force-
3006 * unmount case.
3007 *
3008 * However, a caveat: we need to continue to supply resource fork
3009 * access to open-unlinked files even if the resource fork is not open. This is
3010 * a requirement for the compressed files work. Luckily, hfs_vgetrsrc will handle
3011 * this already if the data fork has been re-parented to the hidden directory.
3012 *
3013 * As a result, all we really need to do here is mark the resource fork vnode
3014 * for recycle. If it goes out of core, it can be brought in again if needed.
3015 * If the cnode was instead marked C_NOEXISTS, then there wouldn't be any
3016 * more work.
3017 */
3018 if (error == 0) {
3019 if (rvp) {
3020 recycle_rsrc = 1;
3021 }
3022 /*
3023 * If the target was actually removed from the catalog schedule it for
3024 * full reclamation/inactivation. We hold an iocount on it so it should just
3025 * get marked with MARKTERM
3026 */
3027 if (cp->c_flag & C_NOEXISTS) {
3028 recycle_vnode = 1;
3029 }
3030 }
3031
3032
3033 /*
3034 * Drop the truncate lock before unlocking the cnode
3035 * (which can potentially perform a vnode_put and
3036 * recycle the vnode which in turn might require the
3037 * truncate lock)
3038 */
3039 rm_done:
3040 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
3041 hfs_unlockpair(dcp, cp);
3042
3043 if (recycle_rsrc) {
3044 /* inactive or reclaim on rvp will clean up the blocks from the rsrc fork */
3045 vnode_recycle(rvp);
3046 }
3047 if (recycle_vnode) {
3048 vnode_recycle (vp);
3049 }
3050
3051 if (rvp) {
3052 /* drop iocount on rsrc fork, was obtained at beginning of fxn */
3053 vnode_put(rvp);
3054 }
3055
3056 return (error);
3057 }
3058
3059
3060 int
3061 hfs_removefile_callback(struct buf *bp, void *hfsmp) {
3062
3063 if ( !(buf_flags(bp) & B_META))
3064 panic("hfs: symlink bp @ %p is not marked meta-data!\n", bp);
3065 /*
3066 * it's part of the current transaction, kill it.
3067 */
3068 journal_kill_block(((struct hfsmount *)hfsmp)->jnl, bp);
3069
3070 return (BUF_CLAIMED);
3071 }
3072
3073 /*
3074 * hfs_removefile
3075 *
3076 * Similar to hfs_vnop_remove except there are additional options.
3077 * This function may be used to remove directories if they have
3078 * lots of EA's -- note the 'allow_dirs' argument.
3079 *
3080 * This function is able to delete blocks & fork data for the resource
3081 * fork even if it does not exist in core (and have a backing vnode).
3082 * It should infer the correct behavior based on the number of blocks
3083 * in the cnode and whether or not the resource fork pointer exists or
3084 * not. As a result, one only need pass in the 'vp' corresponding to the
3085 * data fork of this file (or main vnode in the case of a directory).
3086 * Passing in a resource fork will result in an error.
3087 *
3088 * Because we do not create any vnodes in this function, we are not at
3089 * risk of deadlocking against ourselves by double-locking.
3090 *
3091 * Requires cnode and truncate locks to be held.
3092 */
3093 int
3094 hfs_removefile(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
3095 int flags, int skip_reserve, int allow_dirs,
3096 __unused struct vnode *rvp, int only_unlink)
3097 {
3098 struct cnode *cp;
3099 struct cnode *dcp;
3100 struct vnode *rsrc_vp = NULL;
3101 struct hfsmount *hfsmp;
3102 struct cat_desc desc;
3103 struct timeval tv;
3104 int dataforkbusy = 0;
3105 int rsrcforkbusy = 0;
3106 int lockflags;
3107 int error = 0;
3108 int started_tr = 0;
3109 int isbigfile = 0, defer_remove=0, isdir=0;
3110 int update_vh = 0;
3111
3112 cp = VTOC(vp);
3113 dcp = VTOC(dvp);
3114 hfsmp = VTOHFS(vp);
3115
3116 /* Check if we lost a race post lookup. */
3117 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
3118 return (0);
3119 }
3120
3121 if (!hfs_valid_cnode(hfsmp, dvp, cnp, cp->c_fileid, NULL, &error)) {
3122 return 0;
3123 }
3124
3125 /* Make sure a remove is permitted */
3126 if (VNODE_IS_RSRC(vp)) {
3127 return (EPERM);
3128 }
3129 else {
3130 /*
3131 * We know it's a data fork.
3132 * Probe the cnode to see if we have a valid resource fork
3133 * in hand or not.
3134 */
3135 rsrc_vp = cp->c_rsrc_vp;
3136 }
3137
3138 /* Don't allow deleting the journal or journal_info_block. */
3139 if (hfs_is_journal_file(hfsmp, cp)) {
3140 return (EPERM);
3141 }
3142
3143 /*
3144 * If removing a symlink, then we need to ensure that the
3145 * data blocks for the symlink are not still in-flight or pending.
3146 * If so, we will unlink the symlink here, making its blocks
3147 * available for re-allocation by a subsequent transaction. That is OK, but
3148 * then the I/O for the data blocks could then go out before the journal
3149 * transaction that created it was flushed, leading to I/O ordering issues.
3150 */
3151 if (vp->v_type == VLNK) {
3152 /*
3153 * This will block if the asynchronous journal flush is in progress.
3154 * If this symlink is not being renamed over and doesn't have any open FDs,
3155 * then we'll remove it from the journal's bufs below in kill_block.
3156 */
3157 buf_wait_for_shadow_io (vp, 0);
3158 }
3159
3160 /*
3161 * Hard links require special handling.
3162 */
3163 if (cp->c_flag & C_HARDLINK) {
3164 if ((flags & VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) {
3165 return (EBUSY);
3166 } else {
3167 /* A directory hard link with a link count of one is
3168 * treated as a regular directory. Therefore it should
3169 * only be removed using rmdir().
3170 */
3171 if ((vnode_isdir(vp) == 1) && (cp->c_linkcount == 1) &&
3172 (allow_dirs == 0)) {
3173 return (EPERM);
3174 }
3175 return hfs_unlink(hfsmp, dvp, vp, cnp, skip_reserve);
3176 }
3177 }
3178
3179 /* Directories should call hfs_rmdir! (unless they have a lot of attributes) */
3180 if (vnode_isdir(vp)) {
3181 if (allow_dirs == 0)
3182 return (EPERM); /* POSIX */
3183 isdir = 1;
3184 }
3185 /* Sanity check the parent ids. */
3186 if ((cp->c_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3187 (cp->c_parentcnid != dcp->c_fileid)) {
3188 return (EINVAL);
3189 }
3190
3191 dcp->c_flag |= C_DIR_MODIFICATION;
3192
3193 // this guy is going away so mark him as such
3194 cp->c_flag |= C_DELETED;
3195
3196
3197 /* Remove our entry from the namei cache. */
3198 cache_purge(vp);
3199
3200 /*
3201 * If the caller was operating on a file (as opposed to a
3202 * directory with EAs), then we need to figure out
3203 * whether or not it has a valid resource fork vnode.
3204 *
3205 * If there was a valid resource fork vnode, then we need
3206 * to use hfs_truncate to eliminate its data. If there is
3207 * no vnode, then we hold the cnode lock which would
3208 * prevent it from being created. As a result,
3209 * we can use the data deletion functions which do not
3210 * require that a cnode/vnode pair exist.
3211 */
3212
3213 /* Check if this file is being used. */
3214 if (isdir == 0) {
3215 dataforkbusy = vnode_isinuse(vp, 0);
3216 /*
3217 * At this point, we know that 'vp' points to the
3218 * a data fork because we checked it up front. And if
3219 * there is no rsrc fork, rsrc_vp will be NULL.
3220 */
3221 if (rsrc_vp && (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3222 rsrcforkbusy = vnode_isinuse(rsrc_vp, 0);
3223 }
3224 }
3225
3226 /* Check if we have to break the deletion into multiple pieces. */
3227 if (isdir == 0) {
3228 isbigfile = ((cp->c_datafork->ff_size >= HFS_BIGFILE_SIZE) && overflow_extents(VTOF(vp)));
3229 }
3230
3231 /* Check if the file has xattrs. If it does we'll have to delete them in
3232 individual transactions in case there are too many */
3233 if ((hfsmp->hfs_attribute_vp != NULL) &&
3234 (cp->c_attr.ca_recflags & kHFSHasAttributesMask) != 0) {
3235 defer_remove = 1;
3236 }
3237
3238 /* If we are explicitly told to only unlink item and move to hidden dir, then do it */
3239 if (only_unlink) {
3240 defer_remove = 1;
3241 }
3242
3243 /*
3244 * Carbon semantics prohibit deleting busy files.
3245 * (enforced when VNODE_REMOVE_NODELETEBUSY is requested)
3246 */
3247 if (dataforkbusy || rsrcforkbusy) {
3248 if ((flags & VNODE_REMOVE_NODELETEBUSY) ||
3249 (hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid == 0)) {
3250 error = EBUSY;
3251 goto out;
3252 }
3253 }
3254
3255 #if QUOTA
3256 if (hfsmp->hfs_flags & HFS_QUOTAS)
3257 (void)hfs_getinoquota(cp);
3258 #endif /* QUOTA */
3259
3260 /*
3261 * Do a ubc_setsize to indicate we need to wipe contents if:
3262 * 1) item is a regular file.
3263 * 2) Neither fork is busy AND we are not told to unlink this.
3264 *
3265 * We need to check for the defer_remove since it can be set without
3266 * having a busy data or rsrc fork
3267 */
3268 if (isdir == 0 && (!dataforkbusy || !rsrcforkbusy) && (defer_remove == 0)) {
3269 /*
3270 * A ubc_setsize can cause a pagein so defer it
3271 * until after the cnode lock is dropped. The
3272 * cnode lock cannot be dropped/reacquired here
3273 * since we might already hold the journal lock.
3274 */
3275 if (!dataforkbusy && cp->c_datafork->ff_blocks && !isbigfile) {
3276 cp->c_flag |= C_NEED_DATA_SETSIZE;
3277 }
3278 if (!rsrcforkbusy && rsrc_vp) {
3279 cp->c_flag |= C_NEED_RSRC_SETSIZE;
3280 }
3281 }
3282
3283 if ((error = hfs_start_transaction(hfsmp)) != 0) {
3284 goto out;
3285 }
3286 started_tr = 1;
3287
3288 // XXXdbg - if we're journaled, kill any dirty symlink buffers
3289 if (hfsmp->jnl && vnode_islnk(vp) && (defer_remove == 0)) {
3290 buf_iterate(vp, hfs_removefile_callback, BUF_SKIP_NONLOCKED, (void *)hfsmp);
3291 }
3292
3293 /*
3294 * Prepare to truncate any non-busy forks. Busy forks will
3295 * get truncated when their vnode goes inactive.
3296 * Note that we will only enter this region if we
3297 * can avoid creating an open-unlinked file. If
3298 * either region is busy, we will have to create an open
3299 * unlinked file.
3300 *
3301 * Since we are deleting the file, we need to stagger the runtime
3302 * modifications to do things in such a way that a crash won't
3303 * result in us getting overlapped extents or any other
3304 * bad inconsistencies. As such, we call prepare_release_storage
3305 * which updates the UBC, updates quota information, and releases
3306 * any loaned blocks that belong to this file. No actual
3307 * truncation or bitmap manipulation is done until *AFTER*
3308 * the catalog record is removed.
3309 */
3310 if (isdir == 0 && (!dataforkbusy && !rsrcforkbusy) && (only_unlink == 0)) {
3311
3312 if (!dataforkbusy && !isbigfile && cp->c_datafork->ff_blocks != 0) {
3313
3314 error = hfs_prepare_release_storage (hfsmp, vp);
3315 if (error) {
3316 goto out;
3317 }
3318 update_vh = 1;
3319 }
3320
3321 /*
3322 * If the resource fork vnode does not exist, we can skip this step.
3323 */
3324 if (!rsrcforkbusy && rsrc_vp) {
3325 error = hfs_prepare_release_storage (hfsmp, rsrc_vp);
3326 if (error) {
3327 goto out;
3328 }
3329 update_vh = 1;
3330 }
3331 }
3332
3333 /*
3334 * Protect against a race with rename by using the component
3335 * name passed in and parent id from dvp (instead of using
3336 * the cp->c_desc which may have changed). Also, be aware that
3337 * because we allow directories to be passed in, we need to special case
3338 * this temporary descriptor in case we were handed a directory.
3339 */
3340 if (isdir) {
3341 desc.cd_flags = CD_ISDIR;
3342 }
3343 else {
3344 desc.cd_flags = 0;
3345 }
3346 desc.cd_encoding = cp->c_desc.cd_encoding;
3347 desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
3348 desc.cd_namelen = cnp->cn_namelen;
3349 desc.cd_parentcnid = dcp->c_fileid;
3350 desc.cd_hint = cp->c_desc.cd_hint;
3351 desc.cd_cnid = cp->c_cnid;
3352 microtime(&tv);
3353
3354 /*
3355 * There are two cases to consider:
3356 * 1. File/Dir is busy/big/defer_remove ==> move/rename the file/dir
3357 * 2. File is not in use ==> remove the file
3358 *
3359 * We can get a directory in case 1 because it may have had lots of attributes,
3360 * which need to get removed here.
3361 */
3362 if (dataforkbusy || rsrcforkbusy || isbigfile || defer_remove) {
3363 char delname[32];
3364 struct cat_desc to_desc;
3365 struct cat_desc todir_desc;
3366
3367 /*
3368 * Orphan this file or directory (move to hidden directory).
3369 * Again, we need to take care that we treat directories as directories,
3370 * and files as files. Because directories with attributes can be passed in
3371 * check to make sure that we have a directory or a file before filling in the
3372 * temporary descriptor's flags. We keep orphaned directories AND files in
3373 * the FILE_HARDLINKS private directory since we're generalizing over all
3374 * orphaned filesystem objects.
3375 */
3376 bzero(&todir_desc, sizeof(todir_desc));
3377 todir_desc.cd_parentcnid = 2;
3378
3379 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
3380 bzero(&to_desc, sizeof(to_desc));
3381 to_desc.cd_nameptr = (const u_int8_t *)delname;
3382 to_desc.cd_namelen = strlen(delname);
3383 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
3384 if (isdir) {
3385 to_desc.cd_flags = CD_ISDIR;
3386 }
3387 else {
3388 to_desc.cd_flags = 0;
3389 }
3390 to_desc.cd_cnid = cp->c_cnid;
3391
3392 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
3393 if (!skip_reserve) {
3394 if ((error = cat_preflight(hfsmp, CAT_RENAME, NULL, 0))) {
3395 hfs_systemfile_unlock(hfsmp, lockflags);
3396 goto out;
3397 }
3398 }
3399
3400 error = cat_rename(hfsmp, &desc, &todir_desc,
3401 &to_desc, (struct cat_desc *)NULL);
3402
3403 if (error == 0) {
3404 hfsmp->hfs_private_attr[FILE_HARDLINKS].ca_entries++;
3405 if (isdir == 1) {
3406 INC_FOLDERCOUNT(hfsmp, hfsmp->hfs_private_attr[FILE_HARDLINKS]);
3407 }
3408 (void) cat_update(hfsmp, &hfsmp->hfs_private_desc[FILE_HARDLINKS],
3409 &hfsmp->hfs_private_attr[FILE_HARDLINKS], NULL, NULL);
3410
3411 /* Update the parent directory */
3412 if (dcp->c_entries > 0)
3413 dcp->c_entries--;
3414 if (isdir == 1) {
3415 DEC_FOLDERCOUNT(hfsmp, dcp->c_attr);
3416 }
3417 dcp->c_dirchangecnt++;
3418 dcp->c_ctime = tv.tv_sec;
3419 dcp->c_mtime = tv.tv_sec;
3420 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3421
3422 /* Update the file or directory's state */
3423 cp->c_flag |= C_DELETED;
3424 cp->c_ctime = tv.tv_sec;
3425 --cp->c_linkcount;
3426 (void) cat_update(hfsmp, &to_desc, &cp->c_attr, NULL, NULL);
3427 }
3428 hfs_systemfile_unlock(hfsmp, lockflags);
3429 if (error)
3430 goto out;
3431
3432 }
3433 else {
3434 /*
3435 * Nobody is using this item; we can safely remove everything.
3436 */
3437 struct filefork *temp_rsrc_fork = NULL;
3438 #if QUOTA
3439 off_t savedbytes;
3440 int blksize = hfsmp->blockSize;
3441 #endif
3442 u_int32_t fileid = cp->c_fileid;
3443
3444 /*
3445 * Figure out if we need to read the resource fork data into
3446 * core before wiping out the catalog record.
3447 *
3448 * 1) Must not be a directory
3449 * 2) cnode's c_rsrcfork ptr must be NULL.
3450 * 3) rsrc fork must have actual blocks
3451 */
3452 if ((isdir == 0) && (cp->c_rsrcfork == NULL) &&
3453 (cp->c_blocks - VTOF(vp)->ff_blocks)) {
3454 /*
3455 * The resource fork vnode & filefork did not exist.
3456 * Create a temporary one for use in this function only.
3457 */
3458 MALLOC_ZONE (temp_rsrc_fork, struct filefork *, sizeof (struct filefork), M_HFSFORK, M_WAITOK);
3459 bzero(temp_rsrc_fork, sizeof(struct filefork));
3460 temp_rsrc_fork->ff_cp = cp;
3461 rl_init(&temp_rsrc_fork->ff_invalidranges);
3462 }
3463
3464 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3465
3466 /* Look up the resource fork first, if necessary */
3467 if (temp_rsrc_fork) {
3468 error = cat_lookup (hfsmp, &desc, 1, 0, (struct cat_desc*) NULL,
3469 (struct cat_attr*) NULL, &temp_rsrc_fork->ff_data, NULL);
3470 if (error) {
3471 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3472 hfs_systemfile_unlock (hfsmp, lockflags);
3473 goto out;
3474 }
3475 }
3476
3477 if (!skip_reserve) {
3478 if ((error = cat_preflight(hfsmp, CAT_DELETE, NULL, 0))) {
3479 if (temp_rsrc_fork) {
3480 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3481 }
3482 hfs_systemfile_unlock(hfsmp, lockflags);
3483 goto out;
3484 }
3485 }
3486
3487 error = cat_delete(hfsmp, &desc, &cp->c_attr);
3488
3489 if (error && error != ENXIO && error != ENOENT) {
3490 printf("hfs_removefile: deleting file %s (id=%d) vol=%s err=%d\n",
3491 cp->c_desc.cd_nameptr, cp->c_attr.ca_fileid, hfsmp->vcbVN, error);
3492 }
3493
3494 if (error == 0) {
3495 /* Update the parent directory */
3496 if (dcp->c_entries > 0)
3497 dcp->c_entries--;
3498 dcp->c_dirchangecnt++;
3499 dcp->c_ctime = tv.tv_sec;
3500 dcp->c_mtime = tv.tv_sec;
3501 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
3502 }
3503 hfs_systemfile_unlock(hfsmp, lockflags);
3504
3505 if (error) {
3506 if (temp_rsrc_fork) {
3507 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3508 }
3509 goto out;
3510 }
3511
3512 /*
3513 * Now that we've wiped out the catalog record, the file effectively doesn't
3514 * exist anymore. So update the quota records to reflect the loss of the
3515 * data fork and the resource fork.
3516 */
3517 #if QUOTA
3518 if (cp->c_datafork->ff_blocks > 0) {
3519 savedbytes = ((off_t)cp->c_datafork->ff_blocks * (off_t)blksize);
3520 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3521 }
3522
3523 /*
3524 * We may have just deleted the catalog record for a resource fork even
3525 * though it did not exist in core as a vnode. However, just because there
3526 * was a resource fork pointer in the cnode does not mean that it had any blocks.
3527 */
3528 if (temp_rsrc_fork || cp->c_rsrcfork) {
3529 if (cp->c_rsrcfork) {
3530 if (cp->c_rsrcfork->ff_blocks > 0) {
3531 savedbytes = ((off_t)cp->c_rsrcfork->ff_blocks * (off_t)blksize);
3532 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3533 }
3534 }
3535 else {
3536 /* we must have used a temporary fork */
3537 savedbytes = ((off_t)temp_rsrc_fork->ff_blocks * (off_t)blksize);
3538 (void) hfs_chkdq(cp, (int64_t)-(savedbytes), NOCRED, 0);
3539 }
3540 }
3541
3542 if (hfsmp->hfs_flags & HFS_QUOTAS) {
3543 (void)hfs_chkiq(cp, -1, NOCRED, 0);
3544 }
3545 #endif
3546
3547 /*
3548 * If we didn't get any errors deleting the catalog entry, then go ahead
3549 * and release the backing store now. The filefork pointers are still valid.
3550 */
3551 if (temp_rsrc_fork) {
3552 error = hfs_release_storage (hfsmp, cp->c_datafork, temp_rsrc_fork, fileid);
3553 }
3554 else {
3555 /* if cp->c_rsrcfork == NULL, hfs_release_storage will skip over it. */
3556 error = hfs_release_storage (hfsmp, cp->c_datafork, cp->c_rsrcfork, fileid);
3557 }
3558 if (error) {
3559 /*
3560 * If we encountered an error updating the extents and bitmap,
3561 * mark the volume inconsistent. At this point, the catalog record has
3562 * already been deleted, so we can't recover it at this point. We need
3563 * to proceed and update the volume header and mark the cnode C_NOEXISTS.
3564 * The subsequent fsck should be able to recover the free space for us.
3565 */
3566 hfs_mark_volume_inconsistent(hfsmp);
3567 }
3568 else {
3569 /* reset update_vh to 0, since hfs_release_storage should have done it for us */
3570 update_vh = 0;
3571 }
3572
3573 /* Get rid of the temporary rsrc fork */
3574 if (temp_rsrc_fork) {
3575 FREE_ZONE (temp_rsrc_fork, sizeof(struct filefork), M_HFSFORK);
3576 }
3577
3578 cp->c_flag |= C_NOEXISTS;
3579 cp->c_flag &= ~C_DELETED;
3580
3581 cp->c_touch_chgtime = TRUE; /* XXX needed ? */
3582 --cp->c_linkcount;
3583
3584 /*
3585 * We must never get a directory if we're in this else block. We could
3586 * accidentally drop the number of files in the volume header if we did.
3587 */
3588 hfs_volupdate(hfsmp, VOL_RMFILE, (dcp->c_cnid == kHFSRootFolderID));
3589
3590 }
3591
3592 /*
3593 * All done with this cnode's descriptor...
3594 *
3595 * Note: all future catalog calls for this cnode must be by
3596 * fileid only. This is OK for HFS (which doesn't have file
3597 * thread records) since HFS doesn't support the removal of
3598 * busy files.
3599 */
3600 cat_releasedesc(&cp->c_desc);
3601
3602 out:
3603 if (error) {
3604 cp->c_flag &= ~C_DELETED;
3605 }
3606
3607 if (update_vh) {
3608 /*
3609 * If we bailed out earlier, we may need to update the volume header
3610 * to deal with the borrowed blocks accounting.
3611 */
3612 hfs_volupdate (hfsmp, VOL_UPDATE, 0);
3613 }
3614
3615 if (started_tr) {
3616 hfs_end_transaction(hfsmp);
3617 }
3618
3619 dcp->c_flag &= ~C_DIR_MODIFICATION;
3620 wakeup((caddr_t)&dcp->c_flag);
3621
3622 return (error);
3623 }
3624
3625
3626 __private_extern__ void
3627 replace_desc(struct cnode *cp, struct cat_desc *cdp)
3628 {
3629 // fixes 4348457 and 4463138
3630 if (&cp->c_desc == cdp) {
3631 return;
3632 }
3633
3634 /* First release allocated name buffer */
3635 if (cp->c_desc.cd_flags & CD_HASBUF && cp->c_desc.cd_nameptr != 0) {
3636 const u_int8_t *name = cp->c_desc.cd_nameptr;
3637
3638 cp->c_desc.cd_nameptr = 0;
3639 cp->c_desc.cd_namelen = 0;
3640 cp->c_desc.cd_flags &= ~CD_HASBUF;
3641 vfs_removename((const char *)name);
3642 }
3643 bcopy(cdp, &cp->c_desc, sizeof(cp->c_desc));
3644
3645 /* Cnode now owns the name buffer */
3646 cdp->cd_nameptr = 0;
3647 cdp->cd_namelen = 0;
3648 cdp->cd_flags &= ~CD_HASBUF;
3649 }
3650
3651
3652 /*
3653 * Rename a cnode.
3654 *
3655 * The VFS layer guarantees that:
3656 * - source and destination will either both be directories, or
3657 * both not be directories.
3658 * - all the vnodes are from the same file system
3659 *
3660 * When the target is a directory, HFS must ensure that its empty.
3661 *
3662 * Note that this function requires up to 6 vnodes in order to work properly
3663 * if it is operating on files (and not on directories). This is because only
3664 * files can have resource forks, and we now require iocounts to be held on the
3665 * vnodes corresponding to the resource forks (if applicable) as well as
3666 * the files or directories undergoing rename. The problem with not holding
3667 * iocounts on the resource fork vnodes is that it can lead to a deadlock
3668 * situation: The rsrc fork of the source file may be recycled and reclaimed
3669 * in order to provide a vnode for the destination file's rsrc fork. Since
3670 * data and rsrc forks share the same cnode, we'd eventually try to lock the
3671 * source file's cnode in order to sync its rsrc fork to disk, but it's already
3672 * been locked. By taking the rsrc fork vnodes up front we ensure that they
3673 * cannot be recycled, and that the situation mentioned above cannot happen.
3674 */
3675 int
3676 hfs_vnop_rename(ap)
3677 struct vnop_rename_args /* {
3678 struct vnode *a_fdvp;
3679 struct vnode *a_fvp;
3680 struct componentname *a_fcnp;
3681 struct vnode *a_tdvp;
3682 struct vnode *a_tvp;
3683 struct componentname *a_tcnp;
3684 vfs_context_t a_context;
3685 } */ *ap;
3686 {
3687 struct vnode *tvp = ap->a_tvp;
3688 struct vnode *tdvp = ap->a_tdvp;
3689 struct vnode *fvp = ap->a_fvp;
3690 struct vnode *fdvp = ap->a_fdvp;
3691 /*
3692 * Note that we only need locals for the target/destination's
3693 * resource fork vnode (and only if necessary). We don't care if the
3694 * source has a resource fork vnode or not.
3695 */
3696 struct vnode *tvp_rsrc = NULLVP;
3697 uint32_t tvp_rsrc_vid = 0;
3698 struct componentname *tcnp = ap->a_tcnp;
3699 struct componentname *fcnp = ap->a_fcnp;
3700 struct proc *p = vfs_context_proc(ap->a_context);
3701 struct cnode *fcp;
3702 struct cnode *fdcp;
3703 struct cnode *tdcp;
3704 struct cnode *tcp;
3705 struct cnode *error_cnode;
3706 struct cat_desc from_desc;
3707 struct cat_desc to_desc;
3708 struct cat_desc out_desc;
3709 struct hfsmount *hfsmp;
3710 cat_cookie_t cookie;
3711 int tvp_deleted = 0;
3712 int started_tr = 0, got_cookie = 0;
3713 int took_trunc_lock = 0;
3714 int lockflags;
3715 int error;
3716 time_t orig_from_ctime, orig_to_ctime;
3717 int emit_rename = 1;
3718 int emit_delete = 1;
3719 int is_tracked = 0;
3720
3721 orig_from_ctime = VTOC(fvp)->c_ctime;
3722 if (tvp && VTOC(tvp)) {
3723 orig_to_ctime = VTOC(tvp)->c_ctime;
3724 } else {
3725 orig_to_ctime = ~0;
3726 }
3727
3728 hfsmp = VTOHFS(tdvp);
3729 /*
3730 * Do special case checks here. If fvp == tvp then we need to check the
3731 * cnode with locks held.
3732 */
3733 if (fvp == tvp) {
3734 int is_hardlink = 0;
3735 /*
3736 * In this case, we do *NOT* ever emit a DELETE event.
3737 * We may not necessarily emit a RENAME event
3738 */
3739 emit_delete = 0;
3740 if ((error = hfs_lock(VTOC(fvp), HFS_SHARED_LOCK, HFS_LOCK_DEFAULT))) {
3741 return error;
3742 }
3743 /* Check to see if the item is a hardlink or not */
3744 is_hardlink = (VTOC(fvp)->c_flag & C_HARDLINK);
3745 hfs_unlock (VTOC(fvp));
3746
3747 /*
3748 * If the item is not a hardlink, then case sensitivity must be off, otherwise
3749 * two names should not resolve to the same cnode unless they were case variants.
3750 */
3751 if (is_hardlink) {
3752 emit_rename = 0;
3753 /*
3754 * Hardlinks are a little trickier. We only want to emit a rename event
3755 * if the item is a hardlink, the parent directories are the same, case sensitivity
3756 * is off, and the case folded names are the same. See the fvp == tvp case below for more
3757 * info.
3758 */
3759
3760 if ((fdvp == tdvp) && ((hfsmp->hfs_flags & HFS_CASE_SENSITIVE) == 0)) {
3761 if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
3762 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
3763 /* Then in this case only it is ok to emit a rename */
3764 emit_rename = 1;
3765 }
3766 }
3767 }
3768 }
3769 if (emit_rename) {
3770 /* c_bsdflags should only be assessed while holding the cnode lock.
3771 * This is not done consistently throughout the code and can result
3772 * in race. This will be fixed via rdar://12181064
3773 */
3774 if (VTOC(fvp)->c_bsdflags & UF_TRACKED) {
3775 is_tracked = 1;
3776 }
3777 check_for_tracked_file(fvp, orig_from_ctime, NAMESPACE_HANDLER_RENAME_OP, NULL);
3778 }
3779
3780 if (tvp && VTOC(tvp)) {
3781 if (emit_delete) {
3782 check_for_tracked_file(tvp, orig_to_ctime, NAMESPACE_HANDLER_DELETE_OP, NULL);
3783 }
3784 }
3785
3786 retry:
3787 /* When tvp exists, take the truncate lock for hfs_removefile(). */
3788 if (tvp && (vnode_isreg(tvp) || vnode_islnk(tvp))) {
3789 hfs_lock_truncate(VTOC(tvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
3790 took_trunc_lock = 1;
3791 }
3792
3793 error = hfs_lockfour(VTOC(fdvp), VTOC(fvp), VTOC(tdvp), tvp ? VTOC(tvp) : NULL,
3794 HFS_EXCLUSIVE_LOCK, &error_cnode);
3795 if (error) {
3796 if (took_trunc_lock) {
3797 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
3798 took_trunc_lock = 0;
3799 }
3800
3801 /*
3802 * We hit an error path. If we were trying to re-acquire the locks
3803 * after coming through here once, we might have already obtained
3804 * an iocount on tvp's resource fork vnode. Drop that before dealing
3805 * with the failure. Note this is safe -- since we are in an
3806 * error handling path, we can't be holding the cnode locks.
3807 */
3808 if (tvp_rsrc) {
3809 vnode_put (tvp_rsrc);
3810 tvp_rsrc_vid = 0;
3811 tvp_rsrc = NULL;
3812 }
3813
3814 /*
3815 * tvp might no longer exist. If the cause of the lock failure
3816 * was tvp, then we can try again with tvp/tcp set to NULL.
3817 * This is ok because the vfs syscall will vnode_put the vnodes
3818 * after we return from hfs_vnop_rename.
3819 */
3820 if ((error == ENOENT) && (tvp != NULL) && (error_cnode == VTOC(tvp))) {
3821 tcp = NULL;
3822 tvp = NULL;
3823 goto retry;
3824 }
3825
3826 if (emit_rename && is_tracked) {
3827 resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_FAILED_OP | NAMESPACE_HANDLER_TRACK_EVENT);
3828 }
3829
3830 return (error);
3831 }
3832
3833 fdcp = VTOC(fdvp);
3834 fcp = VTOC(fvp);
3835 tdcp = VTOC(tdvp);
3836 tcp = tvp ? VTOC(tvp) : NULL;
3837
3838 /*
3839 * Acquire iocounts on the destination's resource fork vnode
3840 * if necessary. If dst/src are files and the dst has a resource
3841 * fork vnode, then we need to try and acquire an iocount on the rsrc vnode.
3842 * If it does not exist, then we don't care and can skip it.
3843 */
3844 if ((vnode_isreg(fvp)) || (vnode_islnk(fvp))) {
3845 if ((tvp) && (tcp->c_rsrc_vp) && (tvp_rsrc == NULL)) {
3846 tvp_rsrc = tcp->c_rsrc_vp;
3847 /*
3848 * We can look at the vid here because we're holding the
3849 * cnode lock on the underlying cnode for this rsrc vnode.
3850 */
3851 tvp_rsrc_vid = vnode_vid (tvp_rsrc);
3852
3853 /* Unlock everything to acquire iocount on this rsrc vnode */
3854 if (took_trunc_lock) {
3855 hfs_unlock_truncate (VTOC(tvp), HFS_LOCK_DEFAULT);
3856 took_trunc_lock = 0;
3857 }
3858 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
3859
3860 if (vnode_getwithvid (tvp_rsrc, tvp_rsrc_vid)) {
3861 /* iocount acquisition failed. Reset fields and start over.. */
3862 tvp_rsrc_vid = 0;
3863 tvp_rsrc = NULL;
3864 }
3865 goto retry;
3866 }
3867 }
3868
3869
3870
3871 /* Ensure we didn't race src or dst parent directories with rmdir. */
3872 if (fdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
3873 error = ENOENT;
3874 goto out;
3875 }
3876
3877 if (tdcp->c_flag & (C_NOEXISTS | C_DELETED)) {
3878 error = ENOENT;
3879 goto out;
3880 }
3881
3882
3883 /* Check for a race against unlink. The hfs_valid_cnode checks validate
3884 * the parent/child relationship with fdcp and tdcp, as well as the
3885 * component name of the target cnodes.
3886 */
3887 if ((fcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, fdvp, fcnp, fcp->c_fileid, NULL, &error)) {
3888 error = ENOENT;
3889 goto out;
3890 }
3891
3892 if (tcp && ((tcp->c_flag & (C_NOEXISTS | C_DELETED)) || !hfs_valid_cnode(hfsmp, tdvp, tcnp, tcp->c_fileid, NULL, &error))) {
3893 //
3894 // hmm, the destination vnode isn't valid any more.
3895 // in this case we can just drop him and pretend he
3896 // never existed in the first place.
3897 //
3898 if (took_trunc_lock) {
3899 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
3900 took_trunc_lock = 0;
3901 }
3902 error = 0;
3903
3904 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
3905
3906 tcp = NULL;
3907 tvp = NULL;
3908
3909 // retry the locking with tvp null'ed out
3910 goto retry;
3911 }
3912
3913 fdcp->c_flag |= C_DIR_MODIFICATION;
3914 if (fdvp != tdvp) {
3915 tdcp->c_flag |= C_DIR_MODIFICATION;
3916 }
3917
3918 /*
3919 * Disallow renaming of a directory hard link if the source and
3920 * destination parent directories are different, or a directory whose
3921 * descendant is a directory hard link and the one of the ancestors
3922 * of the destination directory is a directory hard link.
3923 */
3924 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
3925 if (fcp->c_flag & C_HARDLINK) {
3926 error = EPERM;
3927 goto out;
3928 }
3929 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
3930 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3931 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
3932 error = EPERM;
3933 hfs_systemfile_unlock(hfsmp, lockflags);
3934 goto out;
3935 }
3936 hfs_systemfile_unlock(hfsmp, lockflags);
3937 }
3938 }
3939
3940 /*
3941 * The following edge case is caught here:
3942 * (to cannot be a descendent of from)
3943 *
3944 * o fdvp
3945 * /
3946 * /
3947 * o fvp
3948 * \
3949 * \
3950 * o tdvp
3951 * /
3952 * /
3953 * o tvp
3954 */
3955 if (tdcp->c_parentcnid == fcp->c_fileid) {
3956 error = EINVAL;
3957 goto out;
3958 }
3959
3960 /*
3961 * The following two edge cases are caught here:
3962 * (note tvp is not empty)
3963 *
3964 * o tdvp o tdvp
3965 * / /
3966 * / /
3967 * o tvp tvp o fdvp
3968 * \ \
3969 * \ \
3970 * o fdvp o fvp
3971 * /
3972 * /
3973 * o fvp
3974 */
3975 if (tvp && vnode_isdir(tvp) && (tcp->c_entries != 0) && fvp != tvp) {
3976 error = ENOTEMPTY;
3977 goto out;
3978 }
3979
3980 /*
3981 * The following edge case is caught here:
3982 * (the from child and parent are the same)
3983 *
3984 * o tdvp
3985 * /
3986 * /
3987 * fdvp o fvp
3988 */
3989 if (fdvp == fvp) {
3990 error = EINVAL;
3991 goto out;
3992 }
3993
3994 /*
3995 * Make sure "from" vnode and its parent are changeable.
3996 */
3997 if ((fcp->c_bsdflags & (IMMUTABLE | APPEND)) || (fdcp->c_bsdflags & APPEND)) {
3998 error = EPERM;
3999 goto out;
4000 }
4001
4002 /*
4003 * If the destination parent directory is "sticky", then the
4004 * user must own the parent directory, or the destination of
4005 * the rename, otherwise the destination may not be changed
4006 * (except by root). This implements append-only directories.
4007 *
4008 * Note that checks for immutable and write access are done
4009 * by the call to hfs_removefile.
4010 */
4011 if (tvp && (tdcp->c_mode & S_ISTXT) &&
4012 (suser(vfs_context_ucred(tcnp->cn_context), NULL)) &&
4013 (kauth_cred_getuid(vfs_context_ucred(tcnp->cn_context)) != tdcp->c_uid) &&
4014 (hfs_owner_rights(hfsmp, tcp->c_uid, vfs_context_ucred(tcnp->cn_context), p, false)) ) {
4015 error = EPERM;
4016 goto out;
4017 }
4018
4019 /* Don't allow modification of the journal or journal_info_block */
4020 if (hfs_is_journal_file(hfsmp, fcp) ||
4021 (tcp && hfs_is_journal_file(hfsmp, tcp))) {
4022 error = EPERM;
4023 goto out;
4024 }
4025
4026 #if QUOTA
4027 if (tvp)
4028 (void)hfs_getinoquota(tcp);
4029 #endif
4030 /* Preflighting done, take fvp out of the name space. */
4031 cache_purge(fvp);
4032
4033 bzero(&from_desc, sizeof(from_desc));
4034 from_desc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4035 from_desc.cd_namelen = fcnp->cn_namelen;
4036 from_desc.cd_parentcnid = fdcp->c_fileid;
4037 from_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4038 from_desc.cd_cnid = fcp->c_cnid;
4039
4040 bzero(&to_desc, sizeof(to_desc));
4041 to_desc.cd_nameptr = (const u_int8_t *)tcnp->cn_nameptr;
4042 to_desc.cd_namelen = tcnp->cn_namelen;
4043 to_desc.cd_parentcnid = tdcp->c_fileid;
4044 to_desc.cd_flags = fcp->c_desc.cd_flags & ~(CD_HASBUF | CD_DECOMPOSED);
4045 to_desc.cd_cnid = fcp->c_cnid;
4046
4047 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4048 goto out;
4049 }
4050 started_tr = 1;
4051
4052 /* hfs_vnop_link() and hfs_vnop_rename() set kHFSHasChildLinkMask
4053 * inside a journal transaction and without holding a cnode lock.
4054 * As setting of this bit depends on being in journal transaction for
4055 * concurrency, check this bit again after we start journal transaction for rename
4056 * to ensure that this directory does not have any descendant that
4057 * is a directory hard link.
4058 */
4059 if (vnode_isdir(fvp) && (fdvp != tdvp)) {
4060 if (fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) {
4061 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4062 if (cat_check_link_ancestry(hfsmp, tdcp->c_fileid, 0)) {
4063 error = EPERM;
4064 hfs_systemfile_unlock(hfsmp, lockflags);
4065 goto out;
4066 }
4067 hfs_systemfile_unlock(hfsmp, lockflags);
4068 }
4069 }
4070
4071 // if it's a hardlink then re-lookup the name so
4072 // that we get the correct cnid in from_desc (see
4073 // the comment in hfs_removefile for more details)
4074 //
4075 if (fcp->c_flag & C_HARDLINK) {
4076 struct cat_desc tmpdesc;
4077 cnid_t real_cnid;
4078
4079 tmpdesc.cd_nameptr = (const u_int8_t *)fcnp->cn_nameptr;
4080 tmpdesc.cd_namelen = fcnp->cn_namelen;
4081 tmpdesc.cd_parentcnid = fdcp->c_fileid;
4082 tmpdesc.cd_hint = fdcp->c_childhint;
4083 tmpdesc.cd_flags = fcp->c_desc.cd_flags & CD_ISDIR;
4084 tmpdesc.cd_encoding = 0;
4085
4086 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4087
4088 if (cat_lookup(hfsmp, &tmpdesc, 0, 0, NULL, NULL, NULL, &real_cnid) != 0) {
4089 hfs_systemfile_unlock(hfsmp, lockflags);
4090 goto out;
4091 }
4092
4093 // use the real cnid instead of whatever happened to be there
4094 from_desc.cd_cnid = real_cnid;
4095 hfs_systemfile_unlock(hfsmp, lockflags);
4096 }
4097
4098 /*
4099 * Reserve some space in the Catalog file.
4100 */
4101 if ((error = cat_preflight(hfsmp, CAT_RENAME + CAT_DELETE, &cookie, p))) {
4102 goto out;
4103 }
4104 got_cookie = 1;
4105
4106 /*
4107 * If the destination exists then it may need to be removed.
4108 *
4109 * Due to HFS's locking system, we should always move the
4110 * existing 'tvp' element to the hidden directory in hfs_vnop_rename.
4111 * Because the VNOP_LOOKUP call enters and exits the filesystem independently
4112 * of the actual vnop that it was trying to do (stat, link, readlink),
4113 * we must release the cnode lock of that element during the interim to
4114 * do MAC checking, vnode authorization, and other calls. In that time,
4115 * the item can be deleted (or renamed over). However, only in the rename
4116 * case is it inappropriate to return ENOENT from any of those calls. Either
4117 * the call should return information about the old element (stale), or get
4118 * information about the newer element that we are about to write in its place.
4119 *
4120 * HFS lookup has been modified to detect a rename and re-drive its
4121 * lookup internally. For other calls that have already succeeded in
4122 * their lookup call and are waiting to acquire the cnode lock in order
4123 * to proceed, that cnode lock will not fail due to the cnode being marked
4124 * C_NOEXISTS, because it won't have been marked as such. It will only
4125 * have C_DELETED. Thus, they will simply act on the stale open-unlinked
4126 * element. All future callers will get the new element.
4127 *
4128 * To implement this behavior, we pass the "only_unlink" argument to
4129 * hfs_removefile and hfs_removedir. This will result in the vnode acting
4130 * as though it is open-unlinked. Additionally, when we are done moving the
4131 * element to the hidden directory, we vnode_recycle the target so that it is
4132 * reclaimed as soon as possible. Reclaim and inactive are both
4133 * capable of clearing out unused blocks for an open-unlinked file or dir.
4134 */
4135 if (tvp) {
4136 /*
4137 * When fvp matches tvp they could be case variants
4138 * or matching hard links.
4139 */
4140 if (fvp == tvp) {
4141 if (!(fcp->c_flag & C_HARDLINK)) {
4142 /*
4143 * If they're not hardlinks, then fvp == tvp must mean we
4144 * are using case-insensitive HFS because case-sensitive would
4145 * not use the same vnode for both. In this case we just update
4146 * the catalog for: a -> A
4147 */
4148 goto skip_rm; /* simple case variant */
4149
4150 }
4151 /* For all cases below, we must be using hardlinks */
4152 else if ((fdvp != tdvp) ||
4153 (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)) {
4154 /*
4155 * If the parent directories are not the same, AND the two items
4156 * are hardlinks, posix says to do nothing:
4157 * dir1/fred <-> dir2/bob and the op was mv dir1/fred -> dir2/bob
4158 * We just return 0 in this case.
4159 *
4160 * If case sensitivity is on, and we are using hardlinks
4161 * then renaming is supposed to do nothing.
4162 * dir1/fred <-> dir2/FRED, and op == mv dir1/fred -> dir2/FRED
4163 */
4164 goto out; /* matching hardlinks, nothing to do */
4165
4166 } else if (hfs_namecmp((const u_int8_t *)fcnp->cn_nameptr, fcnp->cn_namelen,
4167 (const u_int8_t *)tcnp->cn_nameptr, tcnp->cn_namelen) == 0) {
4168 /*
4169 * If we get here, then the following must be true:
4170 * a) We are running case-insensitive HFS+.
4171 * b) Both paths 'fvp' and 'tvp' are in the same parent directory.
4172 * c) the two names are case-variants of each other.
4173 *
4174 * In this case, we are really only dealing with a single catalog record
4175 * whose name is being updated.
4176 *
4177 * op is dir1/fred -> dir1/FRED
4178 *
4179 * We need to special case the name matching, because if
4180 * dir1/fred <-> dir1/bob were the two links, and the
4181 * op was dir1/fred -> dir1/bob
4182 * That would fail/do nothing.
4183 */
4184 goto skip_rm; /* case-variant hardlink in the same dir */
4185 } else {
4186 goto out; /* matching hardlink, nothing to do */
4187 }
4188 }
4189
4190
4191 if (vnode_isdir(tvp)) {
4192 /*
4193 * hfs_removedir will eventually call hfs_removefile on the directory
4194 * we're working on, because only hfs_removefile does the renaming of the
4195 * item to the hidden directory. The directory will stay around in the
4196 * hidden directory with C_DELETED until it gets an inactive or a reclaim.
4197 * That way, we can destroy all of the EAs as needed and allow new ones to be
4198 * written.
4199 */
4200 error = hfs_removedir(tdvp, tvp, tcnp, HFSRM_SKIP_RESERVE, 1);
4201 }
4202 else {
4203 error = hfs_removefile(tdvp, tvp, tcnp, 0, HFSRM_SKIP_RESERVE, 0, NULL, 1);
4204
4205 /*
4206 * If the destination file had a resource fork vnode, then we need to get rid of
4207 * its blocks when there are no more references to it. Because the call to
4208 * hfs_removefile above always open-unlinks things, we need to force an inactive/reclaim
4209 * on the resource fork vnode, in order to prevent block leaks. Otherwise,
4210 * the resource fork vnode could prevent the data fork vnode from going out of scope
4211 * because it holds a v_parent reference on it. So we mark it for termination
4212 * with a call to vnode_recycle. hfs_vnop_reclaim has been modified so that it
4213 * can clean up the blocks of open-unlinked files and resource forks.
4214 *
4215 * We can safely call vnode_recycle on the resource fork because we took an iocount
4216 * reference on it at the beginning of the function.
4217 */
4218
4219 if ((error == 0) && (tcp->c_flag & C_DELETED) && (tvp_rsrc)) {
4220 vnode_recycle(tvp_rsrc);
4221 }
4222 }
4223
4224 if (error) {
4225 goto out;
4226 }
4227
4228 tvp_deleted = 1;
4229
4230 /* Mark 'tcp' as being deleted due to a rename */
4231 tcp->c_flag |= C_RENAMED;
4232
4233 /*
4234 * Aggressively mark tvp/tcp for termination to ensure that we recover all blocks
4235 * as quickly as possible.
4236 */
4237 vnode_recycle(tvp);
4238 }
4239 skip_rm:
4240 /*
4241 * All done with tvp and fvp.
4242 *
4243 * We also jump to this point if there was no destination observed during lookup and namei.
4244 * However, because only iocounts are held at the VFS layer, there is nothing preventing a
4245 * competing thread from racing us and creating a file or dir at the destination of this rename
4246 * operation. If this occurs, it may cause us to get a spurious EEXIST out of the cat_rename
4247 * call below. To preserve rename's atomicity, we need to signal VFS to re-drive the
4248 * namei/lookup and restart the rename operation. EEXIST is an allowable errno to be bubbled
4249 * out of the rename syscall, but not for this reason, since it is a synonym errno for ENOTEMPTY.
4250 * To signal VFS, we return ERECYCLE (which is also used for lookup restarts). This errno
4251 * will be swallowed and it will restart the operation.
4252 */
4253
4254 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4255 error = cat_rename(hfsmp, &from_desc, &tdcp->c_desc, &to_desc, &out_desc);
4256 hfs_systemfile_unlock(hfsmp, lockflags);
4257
4258 if (error) {
4259 if (error == EEXIST) {
4260 error = ERECYCLE;
4261 }
4262 goto out;
4263 }
4264
4265 /* Invalidate negative cache entries in the destination directory */
4266 if (tdcp->c_flag & C_NEG_ENTRIES) {
4267 cache_purge_negatives(tdvp);
4268 tdcp->c_flag &= ~C_NEG_ENTRIES;
4269 }
4270
4271 /* Update cnode's catalog descriptor */
4272 replace_desc(fcp, &out_desc);
4273 fcp->c_parentcnid = tdcp->c_fileid;
4274 fcp->c_hint = 0;
4275
4276 /* Now indicate this cnode needs to have date-added written to the finderinfo */
4277 fcp->c_flag |= C_NEEDS_DATEADDED;
4278 (void) hfs_update (fvp, 0);
4279
4280
4281 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_RMDIR : VOL_RMFILE,
4282 (fdcp->c_cnid == kHFSRootFolderID));
4283 hfs_volupdate(hfsmp, vnode_isdir(fvp) ? VOL_MKDIR : VOL_MKFILE,
4284 (tdcp->c_cnid == kHFSRootFolderID));
4285
4286 /* Update both parent directories. */
4287 if (fdvp != tdvp) {
4288 if (vnode_isdir(fvp)) {
4289 /* If the source directory has directory hard link
4290 * descendants, set the kHFSHasChildLinkBit in the
4291 * destination parent hierarchy
4292 */
4293 if ((fcp->c_attr.ca_recflags & kHFSHasChildLinkMask) &&
4294 !(tdcp->c_attr.ca_recflags & kHFSHasChildLinkMask)) {
4295
4296 tdcp->c_attr.ca_recflags |= kHFSHasChildLinkMask;
4297
4298 error = cat_set_childlinkbit(hfsmp, tdcp->c_parentcnid);
4299 if (error) {
4300 printf ("hfs_vnop_rename: error updating parent chain for %u\n", tdcp->c_cnid);
4301 error = 0;
4302 }
4303 }
4304 INC_FOLDERCOUNT(hfsmp, tdcp->c_attr);
4305 DEC_FOLDERCOUNT(hfsmp, fdcp->c_attr);
4306 }
4307 tdcp->c_entries++;
4308 tdcp->c_dirchangecnt++;
4309 if (fdcp->c_entries > 0)
4310 fdcp->c_entries--;
4311 fdcp->c_dirchangecnt++;
4312 fdcp->c_touch_chgtime = TRUE;
4313 fdcp->c_touch_modtime = TRUE;
4314
4315 fdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
4316 (void) hfs_update(fdvp, 0);
4317 }
4318 tdcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
4319 tdcp->c_touch_chgtime = TRUE;
4320 tdcp->c_touch_modtime = TRUE;
4321
4322 tdcp->c_flag |= C_FORCEUPDATE; // XXXdbg - force it out!
4323 (void) hfs_update(tdvp, 0);
4324
4325 /* Update the vnode's name now that the rename has completed. */
4326 vnode_update_identity(fvp, tdvp, tcnp->cn_nameptr, tcnp->cn_namelen,
4327 tcnp->cn_hash, (VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME));
4328
4329 /*
4330 * At this point, we may have a resource fork vnode attached to the
4331 * 'from' vnode. If it exists, we will want to update its name, because
4332 * it contains the old name + _PATH_RSRCFORKSPEC. ("/..namedfork/rsrc").
4333 *
4334 * Note that the only thing we need to update here is the name attached to
4335 * the vnode, since a resource fork vnode does not have a separate resource
4336 * cnode -- it's still 'fcp'.
4337 */
4338 if (fcp->c_rsrc_vp) {
4339 char* rsrc_path = NULL;
4340 int len;
4341
4342 /* Create a new temporary buffer that's going to hold the new name */
4343 MALLOC_ZONE (rsrc_path, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
4344 len = snprintf (rsrc_path, MAXPATHLEN, "%s%s", tcnp->cn_nameptr, _PATH_RSRCFORKSPEC);
4345 len = MIN(len, MAXPATHLEN);
4346
4347 /*
4348 * vnode_update_identity will do the following for us:
4349 * 1) release reference on the existing rsrc vnode's name.
4350 * 2) copy/insert new name into the name cache
4351 * 3) attach the new name to the resource vnode
4352 * 4) update the vnode's vid
4353 */
4354 vnode_update_identity (fcp->c_rsrc_vp, fvp, rsrc_path, len, 0, (VNODE_UPDATE_NAME | VNODE_UPDATE_CACHE));
4355
4356 /* Free the memory associated with the resource fork's name */
4357 FREE_ZONE (rsrc_path, MAXPATHLEN, M_NAMEI);
4358 }
4359 out:
4360 if (got_cookie) {
4361 cat_postflight(hfsmp, &cookie, p);
4362 }
4363 if (started_tr) {
4364 hfs_end_transaction(hfsmp);
4365 }
4366
4367 fdcp->c_flag &= ~C_DIR_MODIFICATION;
4368 wakeup((caddr_t)&fdcp->c_flag);
4369 if (fdvp != tdvp) {
4370 tdcp->c_flag &= ~C_DIR_MODIFICATION;
4371 wakeup((caddr_t)&tdcp->c_flag);
4372 }
4373
4374 if (took_trunc_lock) {
4375 hfs_unlock_truncate(VTOC(tvp), HFS_LOCK_DEFAULT);
4376 }
4377
4378 hfs_unlockfour(fdcp, fcp, tdcp, tcp);
4379
4380 /* Now vnode_put the resource forks vnodes if necessary */
4381 if (tvp_rsrc) {
4382 vnode_put(tvp_rsrc);
4383 tvp_rsrc = NULL;
4384 }
4385
4386 /* After tvp is removed the only acceptable error is EIO */
4387 if (error && tvp_deleted)
4388 error = EIO;
4389
4390 if (emit_rename && is_tracked) {
4391 if (error) {
4392 resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_FAILED_OP | NAMESPACE_HANDLER_TRACK_EVENT);
4393 } else {
4394 resolve_nspace_item(fvp, NAMESPACE_HANDLER_RENAME_SUCCESS_OP | NAMESPACE_HANDLER_TRACK_EVENT);
4395 }
4396 }
4397
4398 return (error);
4399 }
4400
4401
4402 /*
4403 * Make a directory.
4404 */
4405 int
4406 hfs_vnop_mkdir(struct vnop_mkdir_args *ap)
4407 {
4408 /***** HACK ALERT ********/
4409 ap->a_cnp->cn_flags |= MAKEENTRY;
4410 return hfs_makenode(ap->a_dvp, ap->a_vpp, ap->a_cnp, ap->a_vap, ap->a_context);
4411 }
4412
4413
4414 /*
4415 * Create a symbolic link.
4416 */
4417 int
4418 hfs_vnop_symlink(struct vnop_symlink_args *ap)
4419 {
4420 struct vnode **vpp = ap->a_vpp;
4421 struct vnode *dvp = ap->a_dvp;
4422 struct vnode *vp = NULL;
4423 struct cnode *cp = NULL;
4424 struct hfsmount *hfsmp;
4425 struct filefork *fp;
4426 struct buf *bp = NULL;
4427 char *datap;
4428 int started_tr = 0;
4429 u_int32_t len;
4430 int error;
4431
4432 /* HFS standard disks don't support symbolic links */
4433 if (VTOVCB(dvp)->vcbSigWord != kHFSPlusSigWord)
4434 return (ENOTSUP);
4435
4436 /* Check for empty target name */
4437 if (ap->a_target[0] == 0)
4438 return (EINVAL);
4439
4440 hfsmp = VTOHFS(dvp);
4441 len = strlen(ap->a_target);
4442
4443 /* Check for free space */
4444 if (((u_int64_t)hfs_freeblks(hfsmp, 0) * (u_int64_t)hfsmp->blockSize) < len) {
4445 return (ENOSPC);
4446 }
4447
4448 /* Create the vnode */
4449 ap->a_vap->va_mode |= S_IFLNK;
4450 if ((error = hfs_makenode(dvp, vpp, ap->a_cnp, ap->a_vap, ap->a_context))) {
4451 goto out;
4452 }
4453 vp = *vpp;
4454 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT))) {
4455 goto out;
4456 }
4457 cp = VTOC(vp);
4458 fp = VTOF(vp);
4459
4460 if (cp->c_flag & (C_NOEXISTS | C_DELETED)) {
4461 goto out;
4462 }
4463
4464 #if QUOTA
4465 (void)hfs_getinoquota(cp);
4466 #endif /* QUOTA */
4467
4468 if ((error = hfs_start_transaction(hfsmp)) != 0) {
4469 goto out;
4470 }
4471 started_tr = 1;
4472
4473 /*
4474 * Allocate space for the link.
4475 *
4476 * Since we're already inside a transaction,
4477 * tell hfs_truncate to skip the ubc_setsize.
4478 *
4479 * Don't need truncate lock since a symlink is treated as a system file.
4480 */
4481 error = hfs_truncate(vp, len, IO_NOZEROFILL, 1, 0, ap->a_context);
4482
4483 /* On errors, remove the symlink file */
4484 if (error) {
4485 /*
4486 * End the transaction so we don't re-take the cnode lock
4487 * below while inside a transaction (lock order violation).
4488 */
4489 hfs_end_transaction(hfsmp);
4490
4491 /* hfs_removefile() requires holding the truncate lock */
4492 hfs_unlock(cp);
4493 hfs_lock_truncate(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
4494 hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
4495
4496 if (hfs_start_transaction(hfsmp) != 0) {
4497 started_tr = 0;
4498 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
4499 goto out;
4500 }
4501
4502 (void) hfs_removefile(dvp, vp, ap->a_cnp, 0, 0, 0, NULL, 0);
4503 hfs_unlock_truncate(cp, HFS_LOCK_DEFAULT);
4504 goto out;
4505 }
4506
4507 /* Write the link to disk */
4508 bp = buf_getblk(vp, (daddr64_t)0, roundup((int)fp->ff_size, hfsmp->hfs_physical_block_size),
4509 0, 0, BLK_META);
4510 if (hfsmp->jnl) {
4511 journal_modify_block_start(hfsmp->jnl, bp);
4512 }
4513 datap = (char *)buf_dataptr(bp);
4514 bzero(datap, buf_size(bp));
4515 bcopy(ap->a_target, datap, len);
4516
4517 if (hfsmp->jnl) {
4518 journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
4519 } else {
4520 buf_bawrite(bp);
4521 }
4522 /*
4523 * We defered the ubc_setsize for hfs_truncate
4524 * since we were inside a transaction.
4525 *
4526 * We don't need to drop the cnode lock here
4527 * since this is a symlink.
4528 */
4529 ubc_setsize(vp, len);
4530 out:
4531 if (started_tr)
4532 hfs_end_transaction(hfsmp);
4533 if ((cp != NULL) && (vp != NULL)) {
4534 hfs_unlock(cp);
4535 }
4536 if (error) {
4537 if (vp) {
4538 vnode_put(vp);
4539 }
4540 *vpp = NULL;
4541 }
4542 return (error);
4543 }
4544
4545
4546 /* structures to hold a "." or ".." directory entry */
4547 struct hfs_stddotentry {
4548 u_int32_t d_fileno; /* unique file number */
4549 u_int16_t d_reclen; /* length of this structure */
4550 u_int8_t d_type; /* dirent file type */
4551 u_int8_t d_namlen; /* len of filename */
4552 char d_name[4]; /* "." or ".." */
4553 };
4554
4555 struct hfs_extdotentry {
4556 u_int64_t d_fileno; /* unique file number */
4557 u_int64_t d_seekoff; /* seek offset (optional, used by servers) */
4558 u_int16_t d_reclen; /* length of this structure */
4559 u_int16_t d_namlen; /* len of filename */
4560 u_int8_t d_type; /* dirent file type */
4561 u_char d_name[3]; /* "." or ".." */
4562 };
4563
4564 typedef union {
4565 struct hfs_stddotentry std;
4566 struct hfs_extdotentry ext;
4567 } hfs_dotentry_t;
4568
4569 /*
4570 * hfs_vnop_readdir reads directory entries into the buffer pointed
4571 * to by uio, in a filesystem independent format. Up to uio_resid
4572 * bytes of data can be transferred. The data in the buffer is a
4573 * series of packed dirent structures where each one contains the
4574 * following entries:
4575 *
4576 * u_int32_t d_fileno; // file number of entry
4577 * u_int16_t d_reclen; // length of this record
4578 * u_int8_t d_type; // file type
4579 * u_int8_t d_namlen; // length of string in d_name
4580 * char d_name[MAXNAMELEN+1]; // null terminated file name
4581 *
4582 * The current position (uio_offset) refers to the next block of
4583 * entries. The offset can only be set to a value previously
4584 * returned by hfs_vnop_readdir or zero. This offset does not have
4585 * to match the number of bytes returned (in uio_resid).
4586 *
4587 * In fact, the offset used by HFS is essentially an index (26 bits)
4588 * with a tag (6 bits). The tag is for associating the next request
4589 * with the current request. This enables us to have multiple threads
4590 * reading the directory while the directory is also being modified.
4591 *
4592 * Each tag/index pair is tied to a unique directory hint. The hint
4593 * contains information (filename) needed to build the catalog b-tree
4594 * key for finding the next set of entries.
4595 *
4596 * If the directory is marked as deleted-but-in-use (cp->c_flag & C_DELETED),
4597 * do NOT synthesize entries for "." and "..".
4598 */
4599 int
4600 hfs_vnop_readdir(ap)
4601 struct vnop_readdir_args /* {
4602 vnode_t a_vp;
4603 uio_t a_uio;
4604 int a_flags;
4605 int *a_eofflag;
4606 int *a_numdirent;
4607 vfs_context_t a_context;
4608 } */ *ap;
4609 {
4610 struct vnode *vp = ap->a_vp;
4611 uio_t uio = ap->a_uio;
4612 struct cnode *cp;
4613 struct hfsmount *hfsmp;
4614 directoryhint_t *dirhint = NULL;
4615 directoryhint_t localhint;
4616 off_t offset;
4617 off_t startoffset;
4618 int error = 0;
4619 int eofflag = 0;
4620 user_addr_t user_start = 0;
4621 user_size_t user_len = 0;
4622 int index;
4623 unsigned int tag;
4624 int items;
4625 int lockflags;
4626 int extended;
4627 int nfs_cookies;
4628 cnid_t cnid_hint = 0;
4629
4630 items = 0;
4631 startoffset = offset = uio_offset(uio);
4632 extended = (ap->a_flags & VNODE_READDIR_EXTENDED);
4633 nfs_cookies = extended && (ap->a_flags & VNODE_READDIR_REQSEEKOFF);
4634
4635 /* Sanity check the uio data. */
4636 if (uio_iovcnt(uio) > 1)
4637 return (EINVAL);
4638
4639 if (VTOC(vp)->c_bsdflags & UF_COMPRESSED) {
4640 int compressed = hfs_file_is_compressed(VTOC(vp), 0); /* 0 == take the cnode lock */
4641 if (VTOCMP(vp) != NULL && !compressed) {
4642 error = check_for_dataless_file(vp, NAMESPACE_HANDLER_READ_OP);
4643 if (error) {
4644 return error;
4645 }
4646 }
4647 }
4648
4649 cp = VTOC(vp);
4650 hfsmp = VTOHFS(vp);
4651
4652 /* Note that the dirhint calls require an exclusive lock. */
4653 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
4654 return (error);
4655
4656 /* Pick up cnid hint (if any). */
4657 if (nfs_cookies) {
4658 cnid_hint = (cnid_t)(uio_offset(uio) >> 32);
4659 uio_setoffset(uio, uio_offset(uio) & 0x00000000ffffffffLL);
4660 if (cnid_hint == INT_MAX) { /* searching pass the last item */
4661 eofflag = 1;
4662 goto out;
4663 }
4664 }
4665 /*
4666 * Synthesize entries for "." and "..", unless the directory has
4667 * been deleted, but not closed yet (lazy delete in progress).
4668 */
4669 if (offset == 0 && !(cp->c_flag & C_DELETED)) {
4670 hfs_dotentry_t dotentry[2];
4671 size_t uiosize;
4672
4673 if (extended) {
4674 struct hfs_extdotentry *entry = &dotentry[0].ext;
4675
4676 entry->d_fileno = cp->c_cnid;
4677 entry->d_reclen = sizeof(struct hfs_extdotentry);
4678 entry->d_type = DT_DIR;
4679 entry->d_namlen = 1;
4680 entry->d_name[0] = '.';
4681 entry->d_name[1] = '\0';
4682 entry->d_name[2] = '\0';
4683 entry->d_seekoff = 1;
4684
4685 ++entry;
4686 entry->d_fileno = cp->c_parentcnid;
4687 entry->d_reclen = sizeof(struct hfs_extdotentry);
4688 entry->d_type = DT_DIR;
4689 entry->d_namlen = 2;
4690 entry->d_name[0] = '.';
4691 entry->d_name[1] = '.';
4692 entry->d_name[2] = '\0';
4693 entry->d_seekoff = 2;
4694 uiosize = 2 * sizeof(struct hfs_extdotentry);
4695 } else {
4696 struct hfs_stddotentry *entry = &dotentry[0].std;
4697
4698 entry->d_fileno = cp->c_cnid;
4699 entry->d_reclen = sizeof(struct hfs_stddotentry);
4700 entry->d_type = DT_DIR;
4701 entry->d_namlen = 1;
4702 *(int *)&entry->d_name[0] = 0;
4703 entry->d_name[0] = '.';
4704
4705 ++entry;
4706 entry->d_fileno = cp->c_parentcnid;
4707 entry->d_reclen = sizeof(struct hfs_stddotentry);
4708 entry->d_type = DT_DIR;
4709 entry->d_namlen = 2;
4710 *(int *)&entry->d_name[0] = 0;
4711 entry->d_name[0] = '.';
4712 entry->d_name[1] = '.';
4713 uiosize = 2 * sizeof(struct hfs_stddotentry);
4714 }
4715 if ((error = uiomove((caddr_t)&dotentry, uiosize, uio))) {
4716 goto out;
4717 }
4718 offset += 2;
4719 }
4720
4721 /* If there are no real entries then we're done. */
4722 if (cp->c_entries == 0) {
4723 error = 0;
4724 eofflag = 1;
4725 uio_setoffset(uio, offset);
4726 goto seekoffcalc;
4727 }
4728
4729 //
4730 // We have to lock the user's buffer here so that we won't
4731 // fault on it after we've acquired a shared lock on the
4732 // catalog file. The issue is that you can get a 3-way
4733 // deadlock if someone else starts a transaction and then
4734 // tries to lock the catalog file but can't because we're
4735 // here and we can't service our page fault because VM is
4736 // blocked trying to start a transaction as a result of
4737 // trying to free up pages for our page fault. It's messy
4738 // but it does happen on dual-processors that are paging
4739 // heavily (see radar 3082639 for more info). By locking
4740 // the buffer up-front we prevent ourselves from faulting
4741 // while holding the shared catalog file lock.
4742 //
4743 // Fortunately this and hfs_search() are the only two places
4744 // currently (10/30/02) that can fault on user data with a
4745 // shared lock on the catalog file.
4746 //
4747 if (hfsmp->jnl && uio_isuserspace(uio)) {
4748 user_start = uio_curriovbase(uio);
4749 user_len = uio_curriovlen(uio);
4750
4751 if ((error = vslock(user_start, user_len)) != 0) {
4752 user_start = 0;
4753 goto out;
4754 }
4755 }
4756 /* Convert offset into a catalog directory index. */
4757 index = (offset & HFS_INDEX_MASK) - 2;
4758 tag = offset & ~HFS_INDEX_MASK;
4759
4760 /* Lock catalog during cat_findname and cat_getdirentries. */
4761 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
4762
4763 /* When called from NFS, try and resolve a cnid hint. */
4764 if (nfs_cookies && cnid_hint != 0) {
4765 if (cat_findname(hfsmp, cnid_hint, &localhint.dh_desc) == 0) {
4766 if ( localhint.dh_desc.cd_parentcnid == cp->c_fileid) {
4767 localhint.dh_index = index - 1;
4768 localhint.dh_time = 0;
4769 bzero(&localhint.dh_link, sizeof(localhint.dh_link));
4770 dirhint = &localhint; /* don't forget to release the descriptor */
4771 } else {
4772 cat_releasedesc(&localhint.dh_desc);
4773 }
4774 }
4775 }
4776
4777 /* Get a directory hint (cnode must be locked exclusive) */
4778 if (dirhint == NULL) {
4779 dirhint = hfs_getdirhint(cp, ((index - 1) & HFS_INDEX_MASK) | tag, 0);
4780
4781 /* Hide tag from catalog layer. */
4782 dirhint->dh_index &= HFS_INDEX_MASK;
4783 if (dirhint->dh_index == HFS_INDEX_MASK) {
4784 dirhint->dh_index = -1;
4785 }
4786 }
4787
4788 if (index == 0) {
4789 dirhint->dh_threadhint = cp->c_dirthreadhint;
4790 }
4791 else {
4792 /*
4793 * If we have a non-zero index, there is a possibility that during the last
4794 * call to hfs_vnop_readdir we hit EOF for this directory. If that is the case
4795 * then we don't want to return any new entries for the caller. Just return 0
4796 * items, mark the eofflag, and bail out. Because we won't have done any work, the
4797 * code at the end of the function will release the dirhint for us.
4798 *
4799 * Don't forget to unlock the catalog lock on the way out, too.
4800 */
4801 if (dirhint->dh_desc.cd_flags & CD_EOF) {
4802 error = 0;
4803 eofflag = 1;
4804 uio_setoffset(uio, startoffset);
4805 hfs_systemfile_unlock (hfsmp, lockflags);
4806
4807 goto seekoffcalc;
4808 }
4809 }
4810
4811 /* Pack the buffer with dirent entries. */
4812 error = cat_getdirentries(hfsmp, cp->c_entries, dirhint, uio, ap->a_flags, &items, &eofflag);
4813
4814 if (index == 0 && error == 0) {
4815 cp->c_dirthreadhint = dirhint->dh_threadhint;
4816 }
4817
4818 hfs_systemfile_unlock(hfsmp, lockflags);
4819
4820 if (error != 0) {
4821 goto out;
4822 }
4823
4824 /* Get index to the next item */
4825 index += items;
4826
4827 if (items >= (int)cp->c_entries) {
4828 eofflag = 1;
4829 }
4830
4831 /* Convert catalog directory index back into an offset. */
4832 while (tag == 0)
4833 tag = (++cp->c_dirhinttag) << HFS_INDEX_BITS;
4834 uio_setoffset(uio, (index + 2) | tag);
4835 dirhint->dh_index |= tag;
4836
4837 seekoffcalc:
4838 cp->c_touch_acctime = TRUE;
4839
4840 if (ap->a_numdirent) {
4841 if (startoffset == 0)
4842 items += 2;
4843 *ap->a_numdirent = items;
4844 }
4845
4846 out:
4847 if (user_start) {
4848 vsunlock(user_start, user_len, TRUE);
4849 }
4850 /* If we didn't do anything then go ahead and dump the hint. */
4851 if ((dirhint != NULL) &&
4852 (dirhint != &localhint) &&
4853 (uio_offset(uio) == startoffset)) {
4854 hfs_reldirhint(cp, dirhint);
4855 eofflag = 1;
4856 }
4857 if (ap->a_eofflag) {
4858 *ap->a_eofflag = eofflag;
4859 }
4860 if (dirhint == &localhint) {
4861 cat_releasedesc(&localhint.dh_desc);
4862 }
4863 hfs_unlock(cp);
4864 return (error);
4865 }
4866
4867
4868 /*
4869 * Read contents of a symbolic link.
4870 */
4871 int
4872 hfs_vnop_readlink(ap)
4873 struct vnop_readlink_args /* {
4874 struct vnode *a_vp;
4875 struct uio *a_uio;
4876 vfs_context_t a_context;
4877 } */ *ap;
4878 {
4879 struct vnode *vp = ap->a_vp;
4880 struct cnode *cp;
4881 struct filefork *fp;
4882 int error;
4883
4884 if (!vnode_islnk(vp))
4885 return (EINVAL);
4886
4887 if ((error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
4888 return (error);
4889 cp = VTOC(vp);
4890 fp = VTOF(vp);
4891
4892 /* Zero length sym links are not allowed */
4893 if (fp->ff_size == 0 || fp->ff_size > MAXPATHLEN) {
4894 error = EINVAL;
4895 goto exit;
4896 }
4897
4898 /* Cache the path so we don't waste buffer cache resources */
4899 if (fp->ff_symlinkptr == NULL) {
4900 struct buf *bp = NULL;
4901
4902 MALLOC(fp->ff_symlinkptr, char *, fp->ff_size, M_TEMP, M_WAITOK);
4903 if (fp->ff_symlinkptr == NULL) {
4904 error = ENOMEM;
4905 goto exit;
4906 }
4907 error = (int)buf_meta_bread(vp, (daddr64_t)0,
4908 roundup((int)fp->ff_size, VTOHFS(vp)->hfs_physical_block_size),
4909 vfs_context_ucred(ap->a_context), &bp);
4910 if (error) {
4911 if (bp)
4912 buf_brelse(bp);
4913 if (fp->ff_symlinkptr) {
4914 FREE(fp->ff_symlinkptr, M_TEMP);
4915 fp->ff_symlinkptr = NULL;
4916 }
4917 goto exit;
4918 }
4919 bcopy((char *)buf_dataptr(bp), fp->ff_symlinkptr, (size_t)fp->ff_size);
4920
4921 if (VTOHFS(vp)->jnl && (buf_flags(bp) & B_LOCKED) == 0) {
4922 buf_markinvalid(bp); /* data no longer needed */
4923 }
4924 buf_brelse(bp);
4925 }
4926 error = uiomove((caddr_t)fp->ff_symlinkptr, (int)fp->ff_size, ap->a_uio);
4927
4928 /*
4929 * Keep track blocks read
4930 */
4931 if ((VTOHFS(vp)->hfc_stage == HFC_RECORDING) && (error == 0)) {
4932
4933 /*
4934 * If this file hasn't been seen since the start of
4935 * the current sampling period then start over.
4936 */
4937 if (cp->c_atime < VTOHFS(vp)->hfc_timebase)
4938 VTOF(vp)->ff_bytesread = fp->ff_size;
4939 else
4940 VTOF(vp)->ff_bytesread += fp->ff_size;
4941
4942 // if (VTOF(vp)->ff_bytesread > fp->ff_size)
4943 // cp->c_touch_acctime = TRUE;
4944 }
4945
4946 exit:
4947 hfs_unlock(cp);
4948 return (error);
4949 }
4950
4951
4952 /*
4953 * Get configurable pathname variables.
4954 */
4955 int
4956 hfs_vnop_pathconf(ap)
4957 struct vnop_pathconf_args /* {
4958 struct vnode *a_vp;
4959 int a_name;
4960 int *a_retval;
4961 vfs_context_t a_context;
4962 } */ *ap;
4963 {
4964
4965 int std_hfs = (VTOHFS(ap->a_vp)->hfs_flags & HFS_STANDARD);
4966 switch (ap->a_name) {
4967 case _PC_LINK_MAX:
4968 if (std_hfs == 0){
4969 *ap->a_retval = HFS_LINK_MAX;
4970 }
4971 #if CONFIG_HFS_STD
4972 else {
4973 *ap->a_retval = 1;
4974 }
4975 #endif
4976 break;
4977 case _PC_NAME_MAX:
4978 if (std_hfs == 0) {
4979 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
4980 }
4981 #if CONFIG_HFS_STD
4982 else {
4983 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
4984 }
4985 #endif
4986 break;
4987 case _PC_PATH_MAX:
4988 *ap->a_retval = PATH_MAX; /* 1024 */
4989 break;
4990 case _PC_PIPE_BUF:
4991 *ap->a_retval = PIPE_BUF;
4992 break;
4993 case _PC_CHOWN_RESTRICTED:
4994 *ap->a_retval = 200112; /* _POSIX_CHOWN_RESTRICTED */
4995 break;
4996 case _PC_NO_TRUNC:
4997 *ap->a_retval = 200112; /* _POSIX_NO_TRUNC */
4998 break;
4999 case _PC_NAME_CHARS_MAX:
5000 if (std_hfs == 0) {
5001 *ap->a_retval = kHFSPlusMaxFileNameChars; /* 255 */
5002 }
5003 #if CONFIG_HFS_STD
5004 else {
5005 *ap->a_retval = kHFSMaxFileNameChars; /* 31 */
5006 }
5007 #endif
5008 break;
5009 case _PC_CASE_SENSITIVE:
5010 if (VTOHFS(ap->a_vp)->hfs_flags & HFS_CASE_SENSITIVE)
5011 *ap->a_retval = 1;
5012 else
5013 *ap->a_retval = 0;
5014 break;
5015 case _PC_CASE_PRESERVING:
5016 *ap->a_retval = 1;
5017 break;
5018 case _PC_FILESIZEBITS:
5019 /* number of bits to store max file size */
5020 if (std_hfs == 0) {
5021 *ap->a_retval = 64;
5022 }
5023 #if CONFIG_HFS_STD
5024 else {
5025 *ap->a_retval = 32;
5026 }
5027 #endif
5028 break;
5029 case _PC_XATTR_SIZE_BITS:
5030 /* Number of bits to store maximum extended attribute size */
5031 *ap->a_retval = HFS_XATTR_SIZE_BITS;
5032 break;
5033 default:
5034 return (EINVAL);
5035 }
5036
5037 return (0);
5038 }
5039
5040
5041 /*
5042 * Update a cnode's on-disk metadata.
5043 *
5044 * If waitfor is set, then wait for the disk write of
5045 * the node to complete.
5046 *
5047 * The cnode must be locked exclusive
5048 */
5049 int
5050 hfs_update(struct vnode *vp, __unused int waitfor)
5051 {
5052 struct cnode *cp = VTOC(vp);
5053 struct proc *p;
5054 struct cat_fork *dataforkp = NULL;
5055 struct cat_fork *rsrcforkp = NULL;
5056 struct cat_fork datafork;
5057 struct cat_fork rsrcfork;
5058 struct hfsmount *hfsmp;
5059 int lockflags;
5060 int error;
5061 uint32_t tstate = 0;
5062
5063 p = current_proc();
5064 hfsmp = VTOHFS(vp);
5065
5066 if (((vnode_issystem(vp) && (cp->c_cnid < kHFSFirstUserCatalogNodeID))) ||
5067 hfsmp->hfs_catalog_vp == NULL){
5068 return (0);
5069 }
5070 if ((hfsmp->hfs_flags & HFS_READ_ONLY) || (cp->c_mode == 0)) {
5071 cp->c_flag &= ~C_MODIFIED;
5072 cp->c_touch_acctime = 0;
5073 cp->c_touch_chgtime = 0;
5074 cp->c_touch_modtime = 0;
5075 return (0);
5076 }
5077 if (kdebug_enable) {
5078 if (cp->c_touch_acctime)
5079 tstate |= DBG_HFS_UPDATE_ACCTIME;
5080 if (cp->c_touch_modtime)
5081 tstate |= DBG_HFS_UPDATE_MODTIME;
5082 if (cp->c_touch_chgtime)
5083 tstate |= DBG_HFS_UPDATE_CHGTIME;
5084
5085 if (cp->c_flag & C_MODIFIED)
5086 tstate |= DBG_HFS_UPDATE_MODIFIED;
5087 if (cp->c_flag & C_FORCEUPDATE)
5088 tstate |= DBG_HFS_UPDATE_FORCE;
5089 if (cp->c_flag & C_NEEDS_DATEADDED)
5090 tstate |= DBG_HFS_UPDATE_DATEADDED;
5091 }
5092 hfs_touchtimes(hfsmp, cp);
5093
5094 /* Nothing to update. */
5095 if ((cp->c_flag & (C_MODIFIED | C_FORCEUPDATE)) == 0) {
5096 return (0);
5097 }
5098
5099 if (cp->c_datafork)
5100 dataforkp = &cp->c_datafork->ff_data;
5101 if (cp->c_rsrcfork)
5102 rsrcforkp = &cp->c_rsrcfork->ff_data;
5103
5104 /*
5105 * For delayed allocations updates are
5106 * postponed until an fsync or the file
5107 * gets written to disk.
5108 *
5109 * Deleted files can defer meta data updates until inactive.
5110 *
5111 * If we're ever called with the C_FORCEUPDATE flag though
5112 * we have to do the update.
5113 */
5114 if (ISSET(cp->c_flag, C_FORCEUPDATE) == 0 &&
5115 (ISSET(cp->c_flag, C_DELETED) ||
5116 (dataforkp && cp->c_datafork->ff_unallocblocks) ||
5117 (rsrcforkp && cp->c_rsrcfork->ff_unallocblocks))) {
5118 // cp->c_flag &= ~(C_ACCESS | C_CHANGE | C_UPDATE);
5119 cp->c_flag |= C_MODIFIED;
5120
5121 return (0);
5122 }
5123
5124 KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_START, vp, tstate, 0, 0, 0);
5125
5126 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5127
5128 KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_END, vp, tstate, error, -1, 0);
5129 return error;
5130 }
5131
5132 /*
5133 * Modify the values passed to cat_update based on whether or not
5134 * the file has invalid ranges or borrowed blocks.
5135 */
5136 if (dataforkp) {
5137 off_t numbytes = 0;
5138
5139 /* copy the datafork into a temporary copy so we don't pollute the cnode's */
5140 bcopy(dataforkp, &datafork, sizeof(datafork));
5141 dataforkp = &datafork;
5142
5143 /*
5144 * If there are borrowed blocks, ensure that they are subtracted
5145 * from the total block count before writing the cnode entry to disk.
5146 * Only extents that have actually been marked allocated in the bitmap
5147 * should be reflected in the total block count for this fork.
5148 */
5149 if (cp->c_datafork->ff_unallocblocks != 0) {
5150 // make sure that we don't assign a negative block count
5151 if (cp->c_datafork->ff_blocks < cp->c_datafork->ff_unallocblocks) {
5152 panic("hfs: ff_blocks %d is less than unalloc blocks %d\n",
5153 cp->c_datafork->ff_blocks, cp->c_datafork->ff_unallocblocks);
5154 }
5155
5156 /* Also cap the LEOF to the total number of bytes that are allocated. */
5157 datafork.cf_blocks = (cp->c_datafork->ff_blocks - cp->c_datafork->ff_unallocblocks);
5158 datafork.cf_size = datafork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
5159 }
5160
5161 /*
5162 * For files with invalid ranges (holes) the on-disk
5163 * field representing the size of the file (cf_size)
5164 * must be no larger than the start of the first hole.
5165 * However, note that if the first invalid range exists
5166 * solely within borrowed blocks, then our LEOF and block
5167 * count should both be zero. As a result, set it to the
5168 * min of the current cf_size and the start of the first
5169 * invalid range, because it may have already been reduced
5170 * to zero by the borrowed blocks check above.
5171 */
5172 if (!TAILQ_EMPTY(&cp->c_datafork->ff_invalidranges)) {
5173 numbytes = TAILQ_FIRST(&cp->c_datafork->ff_invalidranges)->rl_start;
5174 datafork.cf_size = MIN((numbytes), (datafork.cf_size));
5175 }
5176 }
5177
5178 /*
5179 * For resource forks with delayed allocations, make sure
5180 * the block count and file size match the number of blocks
5181 * actually allocated to the file on disk.
5182 */
5183 if (rsrcforkp && (cp->c_rsrcfork->ff_unallocblocks != 0)) {
5184 bcopy(rsrcforkp, &rsrcfork, sizeof(rsrcfork));
5185 rsrcfork.cf_blocks = (cp->c_rsrcfork->ff_blocks - cp->c_rsrcfork->ff_unallocblocks);
5186 rsrcfork.cf_size = rsrcfork.cf_blocks * HFSTOVCB(hfsmp)->blockSize;
5187 rsrcforkp = &rsrcfork;
5188 }
5189 if (kdebug_enable) {
5190 long dbg_parms[NUMPARMS];
5191 int dbg_namelen;
5192
5193 dbg_namelen = NUMPARMS * sizeof(long);
5194 vn_getpath(vp, (char *)dbg_parms, &dbg_namelen);
5195
5196 if (dbg_namelen < (int)sizeof(dbg_parms))
5197 memset((char *)dbg_parms + dbg_namelen, 0, sizeof(dbg_parms) - dbg_namelen);
5198
5199 kdebug_lookup_gen_events(dbg_parms, dbg_namelen, (void *)vp, TRUE);
5200 }
5201
5202 /*
5203 * Lock the Catalog b-tree file.
5204 */
5205 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5206
5207 /* XXX - waitfor is not enforced */
5208 error = cat_update(hfsmp, &cp->c_desc, &cp->c_attr, dataforkp, rsrcforkp);
5209
5210 hfs_systemfile_unlock(hfsmp, lockflags);
5211
5212 /* After the updates are finished, clear the flags */
5213 cp->c_flag &= ~(C_MODIFIED | C_FORCEUPDATE);
5214
5215 hfs_end_transaction(hfsmp);
5216
5217 KERNEL_DEBUG_CONSTANT(0x3018000 | DBG_FUNC_END, vp, tstate, error, 0, 0);
5218
5219 return (error);
5220 }
5221
5222 /*
5223 * Allocate a new node
5224 * Note - Function does not create and return a vnode for whiteout creation.
5225 */
5226 int
5227 hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5228 struct vnode_attr *vap, vfs_context_t ctx)
5229 {
5230 struct cnode *cp = NULL;
5231 struct cnode *dcp = NULL;
5232 struct vnode *tvp;
5233 struct hfsmount *hfsmp;
5234 struct cat_desc in_desc, out_desc;
5235 struct cat_attr attr;
5236 struct timeval tv;
5237 int lockflags;
5238 int error, started_tr = 0;
5239 enum vtype vnodetype;
5240 int mode;
5241 int newvnode_flags = 0;
5242 u_int32_t gnv_flags = 0;
5243 int protectable_target = 0;
5244 int nocache = 0;
5245
5246 #if CONFIG_PROTECT
5247 struct cprotect *entry = NULL;
5248 int32_t cp_class = -1;
5249 if (VATTR_IS_ACTIVE(vap, va_dataprotect_class)) {
5250 cp_class = (int32_t)vap->va_dataprotect_class;
5251 }
5252 int protected_mount = 0;
5253 #endif
5254
5255
5256 if ((error = hfs_lock(VTOC(dvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)))
5257 return (error);
5258
5259 /* set the cnode pointer only after successfully acquiring lock */
5260 dcp = VTOC(dvp);
5261
5262 /* Don't allow creation of new entries in open-unlinked directories */
5263 if ((error = hfs_checkdeleted(dcp))) {
5264 hfs_unlock(dcp);
5265 return error;
5266 }
5267
5268 dcp->c_flag |= C_DIR_MODIFICATION;
5269
5270 hfsmp = VTOHFS(dvp);
5271
5272 *vpp = NULL;
5273 tvp = NULL;
5274 out_desc.cd_flags = 0;
5275 out_desc.cd_nameptr = NULL;
5276
5277 vnodetype = vap->va_type;
5278 if (vnodetype == VNON)
5279 vnodetype = VREG;
5280 mode = MAKEIMODE(vnodetype, vap->va_mode);
5281
5282 if (S_ISDIR (mode) || S_ISREG (mode)) {
5283 protectable_target = 1;
5284 }
5285
5286
5287 /* Check if were out of usable disk space. */
5288 if ((hfs_freeblks(hfsmp, 1) == 0) && (vfs_context_suser(ctx) != 0)) {
5289 error = ENOSPC;
5290 goto exit;
5291 }
5292
5293 microtime(&tv);
5294
5295 /* Setup the default attributes */
5296 bzero(&attr, sizeof(attr));
5297 attr.ca_mode = mode;
5298 attr.ca_linkcount = 1;
5299 if (VATTR_IS_ACTIVE(vap, va_rdev)) {
5300 attr.ca_rdev = vap->va_rdev;
5301 }
5302 if (VATTR_IS_ACTIVE(vap, va_create_time)) {
5303 VATTR_SET_SUPPORTED(vap, va_create_time);
5304 attr.ca_itime = vap->va_create_time.tv_sec;
5305 } else {
5306 attr.ca_itime = tv.tv_sec;
5307 }
5308 #if CONFIG_HFS_STD
5309 if ((hfsmp->hfs_flags & HFS_STANDARD) && gTimeZone.tz_dsttime) {
5310 attr.ca_itime += 3600; /* Same as what hfs_update does */
5311 }
5312 #endif
5313 attr.ca_atime = attr.ca_ctime = attr.ca_mtime = attr.ca_itime;
5314 attr.ca_atimeondisk = attr.ca_atime;
5315 if (VATTR_IS_ACTIVE(vap, va_flags)) {
5316 VATTR_SET_SUPPORTED(vap, va_flags);
5317 attr.ca_flags = vap->va_flags;
5318 }
5319
5320 /*
5321 * HFS+ only: all files get ThreadExists
5322 * HFSX only: dirs get HasFolderCount
5323 */
5324 if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
5325 if (vnodetype == VDIR) {
5326 if (hfsmp->hfs_flags & HFS_FOLDERCOUNT)
5327 attr.ca_recflags = kHFSHasFolderCountMask;
5328 } else {
5329 attr.ca_recflags = kHFSThreadExistsMask;
5330 }
5331 }
5332
5333 #if CONFIG_PROTECT
5334 if (cp_fs_protected(hfsmp->hfs_mp)) {
5335 protected_mount = 1;
5336 }
5337 /*
5338 * On a content-protected HFS+/HFSX filesystem, files and directories
5339 * cannot be created without atomically setting/creating the EA that
5340 * contains the protection class metadata and keys at the same time, in
5341 * the same transaction. As a result, pre-set the "EAs exist" flag
5342 * on the cat_attr for protectable catalog record creations. This will
5343 * cause the cnode creation routine in hfs_getnewvnode to mark the cnode
5344 * as having EAs.
5345 */
5346 if ((protected_mount) && (protectable_target)) {
5347 attr.ca_recflags |= kHFSHasAttributesMask;
5348 /* delay entering in the namecache */
5349 nocache = 1;
5350 }
5351 #endif
5352
5353
5354 /*
5355 * Add the date added to the item. See above, as
5356 * all of the dates are set to the itime.
5357 */
5358 hfs_write_dateadded (&attr, attr.ca_atime);
5359
5360 /* Initialize the gen counter to 1 */
5361 hfs_write_gencount(&attr, (uint32_t)1);
5362
5363 attr.ca_uid = vap->va_uid;
5364 attr.ca_gid = vap->va_gid;
5365 VATTR_SET_SUPPORTED(vap, va_mode);
5366 VATTR_SET_SUPPORTED(vap, va_uid);
5367 VATTR_SET_SUPPORTED(vap, va_gid);
5368
5369 #if QUOTA
5370 /* check to see if this node's creation would cause us to go over
5371 * quota. If so, abort this operation.
5372 */
5373 if (hfsmp->hfs_flags & HFS_QUOTAS) {
5374 if ((error = hfs_quotacheck(hfsmp, 1, attr.ca_uid, attr.ca_gid,
5375 vfs_context_ucred(ctx)))) {
5376 goto exit;
5377 }
5378 }
5379 #endif
5380
5381
5382 /* Tag symlinks with a type and creator. */
5383 if (vnodetype == VLNK) {
5384 struct FndrFileInfo *fip;
5385
5386 fip = (struct FndrFileInfo *)&attr.ca_finderinfo;
5387 fip->fdType = SWAP_BE32(kSymLinkFileType);
5388 fip->fdCreator = SWAP_BE32(kSymLinkCreator);
5389 }
5390 if (cnp->cn_flags & ISWHITEOUT)
5391 attr.ca_flags |= UF_OPAQUE;
5392
5393 /* Setup the descriptor */
5394 in_desc.cd_nameptr = (const u_int8_t *)cnp->cn_nameptr;
5395 in_desc.cd_namelen = cnp->cn_namelen;
5396 in_desc.cd_parentcnid = dcp->c_fileid;
5397 in_desc.cd_flags = S_ISDIR(mode) ? CD_ISDIR : 0;
5398 in_desc.cd_hint = dcp->c_childhint;
5399 in_desc.cd_encoding = 0;
5400
5401 #if CONFIG_PROTECT
5402 /*
5403 * To preserve file creation atomicity with regards to the content protection EA,
5404 * we must create the file in the catalog and then write out its EA in the same
5405 * transaction.
5406 *
5407 * We only denote the target class in this EA; key generation is not completed
5408 * until the file has been inserted into the catalog and will be done
5409 * in a separate transaction.
5410 */
5411 if ((protected_mount) && (protectable_target)) {
5412 error = cp_setup_newentry(hfsmp, dcp, cp_class, attr.ca_mode, &entry);
5413 if (error) {
5414 goto exit;
5415 }
5416 }
5417 #endif
5418
5419 if ((error = hfs_start_transaction(hfsmp)) != 0) {
5420 goto exit;
5421 }
5422 started_tr = 1;
5423
5424 // have to also lock the attribute file because cat_create() needs
5425 // to check that any fileID it wants to use does not have orphaned
5426 // attributes in it.
5427 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_ATTRIBUTE, HFS_EXCLUSIVE_LOCK);
5428 cnid_t new_id;
5429
5430 /* Reserve some space in the Catalog file. */
5431 if ((error = cat_preflight(hfsmp, CAT_CREATE, NULL, 0))) {
5432 hfs_systemfile_unlock(hfsmp, lockflags);
5433 goto exit;
5434 }
5435
5436 if ((error = cat_acquire_cnid(hfsmp, &new_id))) {
5437 hfs_systemfile_unlock (hfsmp, lockflags);
5438 goto exit;
5439 }
5440
5441 error = cat_create(hfsmp, new_id, &in_desc, &attr, &out_desc);
5442 if (error == 0) {
5443 /* Update the parent directory */
5444 dcp->c_childhint = out_desc.cd_hint; /* Cache directory's location */
5445 dcp->c_entries++;
5446 if (vnodetype == VDIR) {
5447 INC_FOLDERCOUNT(hfsmp, dcp->c_attr);
5448 }
5449 dcp->c_dirchangecnt++;
5450 dcp->c_ctime = tv.tv_sec;
5451 dcp->c_mtime = tv.tv_sec;
5452 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
5453
5454 #if CONFIG_PROTECT
5455 /*
5456 * If we are creating a content protected file, now is when
5457 * we create the EA. We must create it in the same transaction
5458 * that creates the file. We can also guarantee that the file
5459 * MUST exist because we are still holding the catalog lock
5460 * at this point.
5461 */
5462 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
5463 error = cp_setxattr (NULL, entry, hfsmp, attr.ca_fileid, XATTR_CREATE);
5464
5465 if (error) {
5466 int delete_err;
5467 /*
5468 * If we fail the EA creation, then we need to delete the file.
5469 * Luckily, we are still holding all of the right locks.
5470 */
5471 delete_err = cat_delete (hfsmp, &out_desc, &attr);
5472 if (delete_err == 0) {
5473 /* Update the parent directory */
5474 if (dcp->c_entries > 0)
5475 dcp->c_entries--;
5476 dcp->c_dirchangecnt++;
5477 dcp->c_ctime = tv.tv_sec;
5478 dcp->c_mtime = tv.tv_sec;
5479 (void) cat_update(hfsmp, &dcp->c_desc, &dcp->c_attr, NULL, NULL);
5480 }
5481
5482 /* Emit EINVAL if we fail to create EA*/
5483 error = EINVAL;
5484 }
5485 }
5486 #endif
5487 }
5488 hfs_systemfile_unlock(hfsmp, lockflags);
5489 if (error)
5490 goto exit;
5491
5492 /* Invalidate negative cache entries in the directory */
5493 if (dcp->c_flag & C_NEG_ENTRIES) {
5494 cache_purge_negatives(dvp);
5495 dcp->c_flag &= ~C_NEG_ENTRIES;
5496 }
5497
5498 hfs_volupdate(hfsmp, vnodetype == VDIR ? VOL_MKDIR : VOL_MKFILE,
5499 (dcp->c_cnid == kHFSRootFolderID));
5500
5501 // XXXdbg
5502 // have to end the transaction here before we call hfs_getnewvnode()
5503 // because that can cause us to try and reclaim a vnode on a different
5504 // file system which could cause us to start a transaction which can
5505 // deadlock with someone on that other file system (since we could be
5506 // holding two transaction locks as well as various vnodes and we did
5507 // not obtain the locks on them in the proper order).
5508 //
5509 // NOTE: this means that if the quota check fails or we have to update
5510 // the change time on a block-special device that those changes
5511 // will happen as part of independent transactions.
5512 //
5513 if (started_tr) {
5514 hfs_end_transaction(hfsmp);
5515 started_tr = 0;
5516 }
5517
5518 #if CONFIG_PROTECT
5519 /*
5520 * At this point, we must have encountered success with writing the EA.
5521 * Destroy our temporary cprotect (which had no keys).
5522 */
5523
5524 if ((attr.ca_fileid != 0) && (protected_mount) && (protectable_target)) {
5525 cp_entry_destroy (entry);
5526 entry = NULL;
5527 }
5528 #endif
5529
5530 /* Do not create vnode for whiteouts */
5531 if (S_ISWHT(mode)) {
5532 goto exit;
5533 }
5534
5535 gnv_flags |= GNV_CREATE;
5536 if (nocache) {
5537 gnv_flags |= GNV_NOCACHE;
5538 }
5539
5540 /*
5541 * Create a vnode for the object just created.
5542 *
5543 * NOTE: Maintaining the cnode lock on the parent directory is important,
5544 * as it prevents race conditions where other threads want to look up entries
5545 * in the directory and/or add things as we are in the process of creating
5546 * the vnode below. However, this has the potential for causing a
5547 * double lock panic when dealing with shadow files on a HFS boot partition.
5548 * The panic could occur if we are not cleaning up after ourselves properly
5549 * when done with a shadow file or in the error cases. The error would occur if we
5550 * try to create a new vnode, and then end up reclaiming another shadow vnode to
5551 * create the new one. However, if everything is working properly, this should
5552 * be a non-issue as we would never enter that reclaim codepath.
5553 *
5554 * The cnode is locked on successful return.
5555 */
5556 error = hfs_getnewvnode(hfsmp, dvp, cnp, &out_desc, gnv_flags, &attr,
5557 NULL, &tvp, &newvnode_flags);
5558 if (error)
5559 goto exit;
5560
5561 cp = VTOC(tvp);
5562 *vpp = tvp;
5563
5564 #if CONFIG_PROTECT
5565 /*
5566 * Now that we have a vnode-in-hand, generate keys for this namespace item.
5567 * If we fail to create the keys, then attempt to delete the item from the
5568 * namespace. If we can't delete the item, that's not desirable but also not fatal..
5569 * All of the places which deal with restoring/unwrapping keys must also be
5570 * prepared to encounter an entry that does not have keys.
5571 */
5572 if ((protectable_target) && (protected_mount)) {
5573 struct cprotect *keyed_entry = NULL;
5574
5575 if (cp->c_cpentry == NULL) {
5576 panic ("hfs_makenode: no cpentry for cnode (%p)", cp);
5577 }
5578
5579 error = cp_generate_keys (hfsmp, cp, cp->c_cpentry->cp_pclass, &keyed_entry);
5580 if (error == 0) {
5581 /*
5582 * Upon success, the keys were generated and written out.
5583 * Update the cp pointer in the cnode.
5584 */
5585 cp_replace_entry (cp, keyed_entry);
5586 if (nocache) {
5587 cache_enter (dvp, tvp, cnp);
5588 }
5589 }
5590 else {
5591 /* If key creation OR the setxattr failed, emit EPERM to userland */
5592 error = EPERM;
5593
5594 /*
5595 * Beware! This slightly violates the lock ordering for the
5596 * cnode/vnode 'tvp'. Ordinarily, you must acquire the truncate lock
5597 * which guards file size changes before acquiring the normal cnode lock
5598 * and calling hfs_removefile on an item.
5599 *
5600 * However, in this case, we are still holding the directory lock so
5601 * 'tvp' is not lookup-able and it was a newly created vnode so it
5602 * cannot have any content yet. The only reason we are initiating
5603 * the removefile is because we could not generate content protection keys
5604 * for this namespace item. Note also that we pass a '1' in the allow_dirs
5605 * argument for hfs_removefile because we may be creating a directory here.
5606 *
5607 * All this to say that while it is technically a violation it is
5608 * impossible to race with another thread for this cnode so it is safe.
5609 */
5610 int err = hfs_removefile (dvp, tvp, cnp, 0, 0, 1, NULL, 0);
5611 if (err) {
5612 printf("hfs_makenode: removefile failed (%d) for CP entry %p\n", err, tvp);
5613 }
5614
5615 /* Release the cnode lock and mark the vnode for termination */
5616 hfs_unlock (cp);
5617 err = vnode_recycle (tvp);
5618 if (err) {
5619 printf("hfs_makenode: vnode_recycle failed (%d) for CP entry %p\n", err, tvp);
5620 }
5621
5622 /* Drop the iocount on the new vnode to force reclamation/recycling */
5623 vnode_put (tvp);
5624 cp = NULL;
5625 *vpp = NULL;
5626 }
5627 }
5628 #endif
5629
5630 #if QUOTA
5631 /*
5632 * Once we create this vnode, we need to initialize its quota data
5633 * structures, if necessary. We know that it is OK to just go ahead and
5634 * initialize because we've already validated earlier (through the hfs_quotacheck
5635 * function) to see if creating this cnode/vnode would cause us to go over quota.
5636 */
5637 if (hfsmp->hfs_flags & HFS_QUOTAS) {
5638 if (cp) {
5639 /* cp could have been zeroed earlier */
5640 (void) hfs_getinoquota(cp);
5641 }
5642 }
5643 #endif
5644
5645 exit:
5646 cat_releasedesc(&out_desc);
5647
5648 #if CONFIG_PROTECT
5649 /*
5650 * We may have jumped here in error-handling various situations above.
5651 * If we haven't already dumped the temporary CP used to initialize
5652 * the file atomically, then free it now. cp_entry_destroy should null
5653 * out the pointer if it was called already.
5654 */
5655 if (entry) {
5656 cp_entry_destroy (entry);
5657 entry = NULL;
5658 }
5659 #endif
5660
5661 /*
5662 * Make sure we release cnode lock on dcp.
5663 */
5664 if (dcp) {
5665 dcp->c_flag &= ~C_DIR_MODIFICATION;
5666 wakeup((caddr_t)&dcp->c_flag);
5667
5668 hfs_unlock(dcp);
5669 }
5670 if (error == 0 && cp != NULL) {
5671 hfs_unlock(cp);
5672 }
5673 if (started_tr) {
5674 hfs_end_transaction(hfsmp);
5675 started_tr = 0;
5676 }
5677
5678 return (error);
5679 }
5680
5681
5682 /*
5683 * hfs_vgetrsrc acquires a resource fork vnode corresponding to the cnode that is
5684 * found in 'vp'. The rsrc fork vnode is returned with the cnode locked and iocount
5685 * on the rsrc vnode.
5686 *
5687 * *rvpp is an output argument for returning the pointer to the resource fork vnode.
5688 * In most cases, the resource fork vnode will not be set if we return an error.
5689 * However, if error_on_unlinked is set, we may have already acquired the resource fork vnode
5690 * before we discover the error (the file has gone open-unlinked). In this case only,
5691 * we may return a vnode in the output argument despite an error.
5692 *
5693 * If can_drop_lock is set, then it is safe for this function to temporarily drop
5694 * and then re-acquire the cnode lock. We may need to do this, for example, in order to
5695 * acquire an iocount or promote our lock.
5696 *
5697 * error_on_unlinked is an argument which indicates that we are to return an error if we
5698 * discover that the cnode has gone into an open-unlinked state ( C_DELETED or C_NOEXISTS)
5699 * is set in the cnode flags. This is only necessary if can_drop_lock is true, otherwise
5700 * there's really no reason to double-check for errors on the cnode.
5701 */
5702
5703 int
5704 hfs_vgetrsrc(struct hfsmount *hfsmp, struct vnode *vp, struct vnode **rvpp,
5705 int can_drop_lock, int error_on_unlinked)
5706 {
5707 struct vnode *rvp;
5708 struct vnode *dvp = NULLVP;
5709 struct cnode *cp = VTOC(vp);
5710 int error;
5711 int vid;
5712 int delete_status = 0;
5713
5714 if (vnode_vtype(vp) == VDIR) {
5715 return EINVAL;
5716 }
5717
5718 /*
5719 * Need to check the status of the cnode to validate it hasn't gone
5720 * open-unlinked on us before we can actually do work with it.
5721 */
5722 delete_status = hfs_checkdeleted(cp);
5723 if ((delete_status) && (error_on_unlinked)) {
5724 return delete_status;
5725 }
5726
5727 restart:
5728 /* Attempt to use existing vnode */
5729 if ((rvp = cp->c_rsrc_vp)) {
5730 vid = vnode_vid(rvp);
5731
5732 /*
5733 * It is not safe to hold the cnode lock when calling vnode_getwithvid()
5734 * for the alternate fork -- vnode_getwithvid() could deadlock waiting
5735 * for a VL_WANTTERM while another thread has an iocount on the alternate
5736 * fork vnode and is attempting to acquire the common cnode lock.
5737 *
5738 * But it's also not safe to drop the cnode lock when we're holding
5739 * multiple cnode locks, like during a hfs_removefile() operation
5740 * since we could lock out of order when re-acquiring the cnode lock.
5741 *
5742 * So we can only drop the lock here if its safe to drop it -- which is
5743 * most of the time with the exception being hfs_removefile().
5744 */
5745 if (can_drop_lock)
5746 hfs_unlock(cp);
5747
5748 error = vnode_getwithvid(rvp, vid);
5749
5750 if (can_drop_lock) {
5751 (void) hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_ALLOW_NOEXISTS);
5752
5753 /*
5754 * When we relinquished our cnode lock, the cnode could have raced
5755 * with a delete and gotten deleted. If the caller did not want
5756 * us to ignore open-unlinked files, then re-check the C_DELETED
5757 * state and see if we need to return an ENOENT here because the item
5758 * got deleted in the intervening time.
5759 */
5760 if (error_on_unlinked) {
5761 if ((delete_status = hfs_checkdeleted(cp))) {
5762 /*
5763 * If error == 0, this means that we succeeded in acquiring an iocount on the
5764 * rsrc fork vnode. However, if we're in this block of code, that means that we noticed
5765 * that the cnode has gone open-unlinked. In this case, the caller requested that we
5766 * not do any other work and return an errno. The caller will be responsible for
5767 * dropping the iocount we just acquired because we can't do it until we've released
5768 * the cnode lock.
5769 */
5770 if (error == 0) {
5771 *rvpp = rvp;
5772 }
5773 return delete_status;
5774 }
5775 }
5776
5777 /*
5778 * When our lock was relinquished, the resource fork
5779 * could have been recycled. Check for this and try
5780 * again.
5781 */
5782 if (error == ENOENT)
5783 goto restart;
5784 }
5785 if (error) {
5786 const char * name = (const char *)VTOC(vp)->c_desc.cd_nameptr;
5787
5788 if (name)
5789 printf("hfs_vgetrsrc: couldn't get resource"
5790 " fork for %s, vol=%s, err=%d\n", name, hfsmp->vcbVN, error);
5791 return (error);
5792 }
5793 } else {
5794 struct cat_fork rsrcfork;
5795 struct componentname cn;
5796 struct cat_desc *descptr = NULL;
5797 struct cat_desc to_desc;
5798 char delname[32];
5799 int lockflags;
5800 int newvnode_flags = 0;
5801
5802 /*
5803 * Make sure cnode lock is exclusive, if not upgrade it.
5804 *
5805 * We assume that we were called from a read-only VNOP (getattr)
5806 * and that its safe to have the cnode lock dropped and reacquired.
5807 */
5808 if (cp->c_lockowner != current_thread()) {
5809 if (!can_drop_lock) {
5810 return (EINVAL);
5811 }
5812 /*
5813 * If the upgrade fails we lose the lock and
5814 * have to take the exclusive lock on our own.
5815 */
5816 if (lck_rw_lock_shared_to_exclusive(&cp->c_rwlock) == FALSE)
5817 lck_rw_lock_exclusive(&cp->c_rwlock);
5818 cp->c_lockowner = current_thread();
5819 }
5820
5821 /*
5822 * hfs_vgetsrc may be invoked for a cnode that has already been marked
5823 * C_DELETED. This is because we need to continue to provide rsrc
5824 * fork access to open-unlinked files. In this case, build a fake descriptor
5825 * like in hfs_removefile. If we don't do this, buildkey will fail in
5826 * cat_lookup because this cnode has no name in its descriptor. However,
5827 * only do this if the caller did not specify that they wanted us to
5828 * error out upon encountering open-unlinked files.
5829 */
5830
5831 if ((error_on_unlinked) && (can_drop_lock)) {
5832 if ((error = hfs_checkdeleted(cp))) {
5833 return error;
5834 }
5835 }
5836
5837 if ((cp->c_flag & C_DELETED ) && (cp->c_desc.cd_namelen == 0)) {
5838 bzero (&to_desc, sizeof(to_desc));
5839 bzero (delname, 32);
5840 MAKE_DELETED_NAME(delname, sizeof(delname), cp->c_fileid);
5841 to_desc.cd_nameptr = (const u_int8_t*) delname;
5842 to_desc.cd_namelen = strlen(delname);
5843 to_desc.cd_parentcnid = hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid;
5844 to_desc.cd_flags = 0;
5845 to_desc.cd_cnid = cp->c_cnid;
5846
5847 descptr = &to_desc;
5848 }
5849 else {
5850 descptr = &cp->c_desc;
5851 }
5852
5853
5854 lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
5855
5856 /*
5857 * We call cat_idlookup (instead of cat_lookup) below because we can't
5858 * trust the descriptor in the provided cnode for lookups at this point.
5859 * Between the time of the original lookup of this vnode and now, the
5860 * descriptor could have gotten swapped or replaced. If this occurred,
5861 * the parent/name combo originally desired may not necessarily be provided
5862 * if we use the descriptor. Even worse, if the vnode represents
5863 * a hardlink, we could have removed one of the links from the namespace
5864 * but left the descriptor alone, since hfs_unlink does not invalidate
5865 * the descriptor in the cnode if other links still point to the inode.
5866 *
5867 * Consider the following (slightly contrived) scenario:
5868 * /tmp/a <--> /tmp/b (hardlinks).
5869 * 1. Thread A: open rsrc fork on /tmp/b.
5870 * 1a. Thread A: does lookup, goes out to lunch right before calling getnamedstream.
5871 * 2. Thread B does 'mv /foo/b /tmp/b'
5872 * 2. Thread B succeeds.
5873 * 3. Thread A comes back and wants rsrc fork info for /tmp/b.
5874 *
5875 * Even though the hardlink backing /tmp/b is now eliminated, the descriptor
5876 * is not removed/updated during the unlink process. So, if you were to
5877 * do a lookup on /tmp/b, you'd acquire an entirely different record's resource
5878 * fork.
5879 *
5880 * As a result, we use the fileid, which should be invariant for the lifetime
5881 * of the cnode (possibly barring calls to exchangedata).
5882 *
5883 * Addendum: We can't do the above for HFS standard since we aren't guaranteed to
5884 * have thread records for files. They were only required for directories. So
5885 * we need to do the lookup with the catalog name. This is OK since hardlinks were
5886 * never allowed on HFS standard.
5887 */
5888
5889 /* Get resource fork data */
5890 if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
5891 error = cat_idlookup (hfsmp, cp->c_fileid, 0, 1, NULL, NULL, &rsrcfork);
5892 }
5893 #if CONFIG_HFS_STD
5894 else {
5895 /*
5896 * HFS standard only:
5897 *
5898 * Get the resource fork for this item with a cat_lookup call, but do not
5899 * force a case lookup since HFS standard is case-insensitive only. We
5900 * don't want the descriptor; just the fork data here. If we tried to
5901 * do a ID lookup (via thread record -> catalog record), then we might fail
5902 * prematurely since, as noted above, thread records were not strictly required
5903 * on files in HFS.
5904 */
5905 error = cat_lookup (hfsmp, descptr, 1, 0, (struct cat_desc*)NULL,
5906 (struct cat_attr*)NULL, &rsrcfork, NULL);
5907 }
5908 #endif
5909
5910 hfs_systemfile_unlock(hfsmp, lockflags);
5911 if (error) {
5912 return (error);
5913 }
5914 /*
5915 * Supply hfs_getnewvnode with a component name.
5916 */
5917 cn.cn_pnbuf = NULL;
5918 if (descptr->cd_nameptr) {
5919 MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
5920 cn.cn_nameiop = LOOKUP;
5921 cn.cn_flags = ISLASTCN | HASBUF;
5922 cn.cn_context = NULL;
5923 cn.cn_pnlen = MAXPATHLEN;
5924 cn.cn_nameptr = cn.cn_pnbuf;
5925 cn.cn_hash = 0;
5926 cn.cn_consume = 0;
5927 cn.cn_namelen = snprintf(cn.cn_nameptr, MAXPATHLEN,
5928 "%s%s", descptr->cd_nameptr,
5929 _PATH_RSRCFORKSPEC);
5930 }
5931 dvp = vnode_getparent(vp);
5932 error = hfs_getnewvnode(hfsmp, dvp, cn.cn_pnbuf ? &cn : NULL,
5933 descptr, GNV_WANTRSRC | GNV_SKIPLOCK, &cp->c_attr,
5934 &rsrcfork, &rvp, &newvnode_flags);
5935 if (dvp)
5936 vnode_put(dvp);
5937 if (cn.cn_pnbuf)
5938 FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
5939 if (error)
5940 return (error);
5941 }
5942
5943 *rvpp = rvp;
5944 return (0);
5945 }
5946
5947 /*
5948 * Wrapper for special device reads
5949 */
5950 int
5951 hfsspec_read(ap)
5952 struct vnop_read_args /* {
5953 struct vnode *a_vp;
5954 struct uio *a_uio;
5955 int a_ioflag;
5956 vfs_context_t a_context;
5957 } */ *ap;
5958 {
5959 /*
5960 * Set access flag.
5961 */
5962 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
5963 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_read), ap));
5964 }
5965
5966 /*
5967 * Wrapper for special device writes
5968 */
5969 int
5970 hfsspec_write(ap)
5971 struct vnop_write_args /* {
5972 struct vnode *a_vp;
5973 struct uio *a_uio;
5974 int a_ioflag;
5975 vfs_context_t a_context;
5976 } */ *ap;
5977 {
5978 /*
5979 * Set update and change flags.
5980 */
5981 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
5982 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
5983 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_write), ap));
5984 }
5985
5986 /*
5987 * Wrapper for special device close
5988 *
5989 * Update the times on the cnode then do device close.
5990 */
5991 int
5992 hfsspec_close(ap)
5993 struct vnop_close_args /* {
5994 struct vnode *a_vp;
5995 int a_fflag;
5996 vfs_context_t a_context;
5997 } */ *ap;
5998 {
5999 struct vnode *vp = ap->a_vp;
6000 struct cnode *cp;
6001
6002 if (vnode_isinuse(ap->a_vp, 0)) {
6003 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6004 cp = VTOC(vp);
6005 hfs_touchtimes(VTOHFS(vp), cp);
6006 hfs_unlock(cp);
6007 }
6008 }
6009 return (VOCALL (spec_vnodeop_p, VOFFSET(vnop_close), ap));
6010 }
6011
6012 #if FIFO
6013 /*
6014 * Wrapper for fifo reads
6015 */
6016 static int
6017 hfsfifo_read(ap)
6018 struct vnop_read_args /* {
6019 struct vnode *a_vp;
6020 struct uio *a_uio;
6021 int a_ioflag;
6022 vfs_context_t a_context;
6023 } */ *ap;
6024 {
6025 /*
6026 * Set access flag.
6027 */
6028 VTOC(ap->a_vp)->c_touch_acctime = TRUE;
6029 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_read), ap));
6030 }
6031
6032 /*
6033 * Wrapper for fifo writes
6034 */
6035 static int
6036 hfsfifo_write(ap)
6037 struct vnop_write_args /* {
6038 struct vnode *a_vp;
6039 struct uio *a_uio;
6040 int a_ioflag;
6041 vfs_context_t a_context;
6042 } */ *ap;
6043 {
6044 /*
6045 * Set update and change flags.
6046 */
6047 VTOC(ap->a_vp)->c_touch_chgtime = TRUE;
6048 VTOC(ap->a_vp)->c_touch_modtime = TRUE;
6049 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_write), ap));
6050 }
6051
6052 /*
6053 * Wrapper for fifo close
6054 *
6055 * Update the times on the cnode then do device close.
6056 */
6057 static int
6058 hfsfifo_close(ap)
6059 struct vnop_close_args /* {
6060 struct vnode *a_vp;
6061 int a_fflag;
6062 vfs_context_t a_context;
6063 } */ *ap;
6064 {
6065 struct vnode *vp = ap->a_vp;
6066 struct cnode *cp;
6067
6068 if (vnode_isinuse(ap->a_vp, 1)) {
6069 if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0) {
6070 cp = VTOC(vp);
6071 hfs_touchtimes(VTOHFS(vp), cp);
6072 hfs_unlock(cp);
6073 }
6074 }
6075 return (VOCALL (fifo_vnodeop_p, VOFFSET(vnop_close), ap));
6076 }
6077
6078
6079 #endif /* FIFO */
6080
6081 /*
6082 * Synchronize a file's in-core state with that on disk.
6083 */
6084 int
6085 hfs_vnop_fsync(ap)
6086 struct vnop_fsync_args /* {
6087 struct vnode *a_vp;
6088 int a_waitfor;
6089 vfs_context_t a_context;
6090 } */ *ap;
6091 {
6092 struct vnode* vp = ap->a_vp;
6093 int error;
6094
6095 /* Note: We check hfs flags instead of vfs mount flag because during
6096 * read-write update, hfs marks itself read-write much earlier than
6097 * the vfs, and hence won't result in skipping of certain writes like
6098 * zero'ing out of unused nodes, creation of hotfiles btree, etc.
6099 */
6100 if (VTOHFS(vp)->hfs_flags & HFS_READ_ONLY) {
6101 return 0;
6102 }
6103
6104 #if CONFIG_PROTECT
6105 if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) {
6106 return (error);
6107 }
6108 #endif /* CONFIG_PROTECT */
6109
6110 /*
6111 * We need to allow ENOENT lock errors since unlink
6112 * systenm call can call VNOP_FSYNC during vclean.
6113 */
6114 error = hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
6115 if (error)
6116 return (0);
6117
6118 error = hfs_fsync(vp, ap->a_waitfor, 0, vfs_context_proc(ap->a_context));
6119
6120 hfs_unlock(VTOC(vp));
6121 return (error);
6122 }
6123
6124
6125 int
6126 hfs_vnop_whiteout(ap)
6127 struct vnop_whiteout_args /* {
6128 struct vnode *a_dvp;
6129 struct componentname *a_cnp;
6130 int a_flags;
6131 vfs_context_t a_context;
6132 } */ *ap;
6133 {
6134 int error = 0;
6135 struct vnode *vp = NULL;
6136 struct vnode_attr va;
6137 struct vnop_lookup_args lookup_args;
6138 struct vnop_remove_args remove_args;
6139 struct hfsmount *hfsmp;
6140
6141 hfsmp = VTOHFS(ap->a_dvp);
6142 if (hfsmp->hfs_flags & HFS_STANDARD) {
6143 error = ENOTSUP;
6144 goto exit;
6145 }
6146
6147 switch (ap->a_flags) {
6148 case LOOKUP:
6149 error = 0;
6150 break;
6151
6152 case CREATE:
6153 VATTR_INIT(&va);
6154 VATTR_SET(&va, va_type, VREG);
6155 VATTR_SET(&va, va_mode, S_IFWHT);
6156 VATTR_SET(&va, va_uid, 0);
6157 VATTR_SET(&va, va_gid, 0);
6158
6159 error = hfs_makenode(ap->a_dvp, &vp, ap->a_cnp, &va, ap->a_context);
6160 /* No need to release the vnode as no vnode is created for whiteouts */
6161 break;
6162
6163 case DELETE:
6164 lookup_args.a_dvp = ap->a_dvp;
6165 lookup_args.a_vpp = &vp;
6166 lookup_args.a_cnp = ap->a_cnp;
6167 lookup_args.a_context = ap->a_context;
6168
6169 error = hfs_vnop_lookup(&lookup_args);
6170 if (error) {
6171 break;
6172 }
6173
6174 remove_args.a_dvp = ap->a_dvp;
6175 remove_args.a_vp = vp;
6176 remove_args.a_cnp = ap->a_cnp;
6177 remove_args.a_flags = 0;
6178 remove_args.a_context = ap->a_context;
6179
6180 error = hfs_vnop_remove(&remove_args);
6181 vnode_put(vp);
6182 break;
6183
6184 default:
6185 panic("hfs_vnop_whiteout: unknown operation (flag = %x)\n", ap->a_flags);
6186 };
6187
6188 exit:
6189 return (error);
6190 }
6191
6192 int (**hfs_vnodeop_p)(void *);
6193
6194 #define VOPFUNC int (*)(void *)
6195
6196
6197 #if CONFIG_HFS_STD
6198 int (**hfs_std_vnodeop_p) (void *);
6199 static int hfs_readonly_op (__unused void* ap) { return (EROFS); }
6200
6201 /*
6202 * In 10.6 and forward, HFS Standard is read-only and deprecated. The vnop table below
6203 * is for use with HFS standard to block out operations that would modify the file system
6204 */
6205
6206 struct vnodeopv_entry_desc hfs_standard_vnodeop_entries[] = {
6207 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6208 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
6209 { &vnop_create_desc, (VOPFUNC)hfs_readonly_op }, /* create (READONLY) */
6210 { &vnop_mknod_desc, (VOPFUNC)hfs_readonly_op }, /* mknod (READONLY) */
6211 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
6212 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
6213 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6214 { &vnop_setattr_desc, (VOPFUNC)hfs_readonly_op }, /* setattr */
6215 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
6216 { &vnop_write_desc, (VOPFUNC)hfs_readonly_op }, /* write (READONLY) */
6217 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
6218 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
6219 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
6220 { &vnop_exchange_desc, (VOPFUNC)hfs_readonly_op }, /* exchange (READONLY)*/
6221 { &vnop_mmap_desc, (VOPFUNC)err_mmap }, /* mmap */
6222 { &vnop_fsync_desc, (VOPFUNC)hfs_readonly_op}, /* fsync (READONLY) */
6223 { &vnop_remove_desc, (VOPFUNC)hfs_readonly_op }, /* remove (READONLY) */
6224 { &vnop_link_desc, (VOPFUNC)hfs_readonly_op }, /* link ( READONLLY) */
6225 { &vnop_rename_desc, (VOPFUNC)hfs_readonly_op }, /* rename (READONLY)*/
6226 { &vnop_mkdir_desc, (VOPFUNC)hfs_readonly_op }, /* mkdir (READONLY) */
6227 { &vnop_rmdir_desc, (VOPFUNC)hfs_readonly_op }, /* rmdir (READONLY) */
6228 { &vnop_symlink_desc, (VOPFUNC)hfs_readonly_op }, /* symlink (READONLY) */
6229 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
6230 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
6231 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
6232 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6233 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6234 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
6235 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
6236 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6237 { &vnop_allocate_desc, (VOPFUNC)hfs_readonly_op }, /* allocate (READONLY) */
6238 #if CONFIG_SEARCHFS
6239 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
6240 #else
6241 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
6242 #endif
6243 { &vnop_bwrite_desc, (VOPFUNC)hfs_readonly_op }, /* bwrite (READONLY) */
6244 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
6245 { &vnop_pageout_desc,(VOPFUNC) hfs_readonly_op }, /* pageout (READONLY) */
6246 { &vnop_copyfile_desc, (VOPFUNC)hfs_readonly_op }, /* copyfile (READONLY)*/
6247 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6248 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6249 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
6250 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
6251 { &vnop_setxattr_desc, (VOPFUNC)hfs_readonly_op}, /* set xattr (READONLY) */
6252 { &vnop_removexattr_desc, (VOPFUNC)hfs_readonly_op}, /* remove xattr (READONLY) */
6253 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
6254 { &vnop_whiteout_desc, (VOPFUNC)hfs_readonly_op}, /* whiteout (READONLY) */
6255 #if NAMEDSTREAMS
6256 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
6257 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_readonly_op },
6258 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_readonly_op },
6259 #endif
6260 { NULL, (VOPFUNC)NULL }
6261 };
6262
6263 struct vnodeopv_desc hfs_std_vnodeop_opv_desc =
6264 { &hfs_std_vnodeop_p, hfs_standard_vnodeop_entries };
6265 #endif
6266
6267 /* VNOP table for HFS+ */
6268 struct vnodeopv_entry_desc hfs_vnodeop_entries[] = {
6269 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6270 { &vnop_lookup_desc, (VOPFUNC)hfs_vnop_lookup }, /* lookup */
6271 { &vnop_create_desc, (VOPFUNC)hfs_vnop_create }, /* create */
6272 { &vnop_mknod_desc, (VOPFUNC)hfs_vnop_mknod }, /* mknod */
6273 { &vnop_open_desc, (VOPFUNC)hfs_vnop_open }, /* open */
6274 { &vnop_close_desc, (VOPFUNC)hfs_vnop_close }, /* close */
6275 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6276 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
6277 { &vnop_read_desc, (VOPFUNC)hfs_vnop_read }, /* read */
6278 { &vnop_write_desc, (VOPFUNC)hfs_vnop_write }, /* write */
6279 { &vnop_ioctl_desc, (VOPFUNC)hfs_vnop_ioctl }, /* ioctl */
6280 { &vnop_select_desc, (VOPFUNC)hfs_vnop_select }, /* select */
6281 { &vnop_revoke_desc, (VOPFUNC)nop_revoke }, /* revoke */
6282 { &vnop_exchange_desc, (VOPFUNC)hfs_vnop_exchange }, /* exchange */
6283 { &vnop_mmap_desc, (VOPFUNC)hfs_vnop_mmap }, /* mmap */
6284 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
6285 { &vnop_remove_desc, (VOPFUNC)hfs_vnop_remove }, /* remove */
6286 { &vnop_link_desc, (VOPFUNC)hfs_vnop_link }, /* link */
6287 { &vnop_rename_desc, (VOPFUNC)hfs_vnop_rename }, /* rename */
6288 { &vnop_mkdir_desc, (VOPFUNC)hfs_vnop_mkdir }, /* mkdir */
6289 { &vnop_rmdir_desc, (VOPFUNC)hfs_vnop_rmdir }, /* rmdir */
6290 { &vnop_symlink_desc, (VOPFUNC)hfs_vnop_symlink }, /* symlink */
6291 { &vnop_readdir_desc, (VOPFUNC)hfs_vnop_readdir }, /* readdir */
6292 { &vnop_readdirattr_desc, (VOPFUNC)hfs_vnop_readdirattr }, /* readdirattr */
6293 { &vnop_readlink_desc, (VOPFUNC)hfs_vnop_readlink }, /* readlink */
6294 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6295 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6296 { &vnop_strategy_desc, (VOPFUNC)hfs_vnop_strategy }, /* strategy */
6297 { &vnop_pathconf_desc, (VOPFUNC)hfs_vnop_pathconf }, /* pathconf */
6298 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6299 { &vnop_allocate_desc, (VOPFUNC)hfs_vnop_allocate }, /* allocate */
6300 #if CONFIG_SEARCHFS
6301 { &vnop_searchfs_desc, (VOPFUNC)hfs_vnop_search }, /* search fs */
6302 #else
6303 { &vnop_searchfs_desc, (VOPFUNC)err_searchfs }, /* search fs */
6304 #endif
6305 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite }, /* bwrite */
6306 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* pagein */
6307 { &vnop_pageout_desc,(VOPFUNC) hfs_vnop_pageout }, /* pageout */
6308 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6309 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6310 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6311 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
6312 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
6313 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
6314 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
6315 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
6316 { &vnop_whiteout_desc, (VOPFUNC)hfs_vnop_whiteout},
6317 #if NAMEDSTREAMS
6318 { &vnop_getnamedstream_desc, (VOPFUNC)hfs_vnop_getnamedstream },
6319 { &vnop_makenamedstream_desc, (VOPFUNC)hfs_vnop_makenamedstream },
6320 { &vnop_removenamedstream_desc, (VOPFUNC)hfs_vnop_removenamedstream },
6321 #endif
6322 { NULL, (VOPFUNC)NULL }
6323 };
6324
6325 struct vnodeopv_desc hfs_vnodeop_opv_desc =
6326 { &hfs_vnodeop_p, hfs_vnodeop_entries };
6327
6328
6329 /* Spec Op vnop table for HFS+ */
6330 int (**hfs_specop_p)(void *);
6331 struct vnodeopv_entry_desc hfs_specop_entries[] = {
6332 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6333 { &vnop_lookup_desc, (VOPFUNC)spec_lookup }, /* lookup */
6334 { &vnop_create_desc, (VOPFUNC)spec_create }, /* create */
6335 { &vnop_mknod_desc, (VOPFUNC)spec_mknod }, /* mknod */
6336 { &vnop_open_desc, (VOPFUNC)spec_open }, /* open */
6337 { &vnop_close_desc, (VOPFUNC)hfsspec_close }, /* close */
6338 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6339 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
6340 { &vnop_read_desc, (VOPFUNC)hfsspec_read }, /* read */
6341 { &vnop_write_desc, (VOPFUNC)hfsspec_write }, /* write */
6342 { &vnop_ioctl_desc, (VOPFUNC)spec_ioctl }, /* ioctl */
6343 { &vnop_select_desc, (VOPFUNC)spec_select }, /* select */
6344 { &vnop_revoke_desc, (VOPFUNC)spec_revoke }, /* revoke */
6345 { &vnop_mmap_desc, (VOPFUNC)spec_mmap }, /* mmap */
6346 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
6347 { &vnop_remove_desc, (VOPFUNC)spec_remove }, /* remove */
6348 { &vnop_link_desc, (VOPFUNC)spec_link }, /* link */
6349 { &vnop_rename_desc, (VOPFUNC)spec_rename }, /* rename */
6350 { &vnop_mkdir_desc, (VOPFUNC)spec_mkdir }, /* mkdir */
6351 { &vnop_rmdir_desc, (VOPFUNC)spec_rmdir }, /* rmdir */
6352 { &vnop_symlink_desc, (VOPFUNC)spec_symlink }, /* symlink */
6353 { &vnop_readdir_desc, (VOPFUNC)spec_readdir }, /* readdir */
6354 { &vnop_readlink_desc, (VOPFUNC)spec_readlink }, /* readlink */
6355 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6356 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6357 { &vnop_strategy_desc, (VOPFUNC)spec_strategy }, /* strategy */
6358 { &vnop_pathconf_desc, (VOPFUNC)spec_pathconf }, /* pathconf */
6359 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6360 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
6361 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
6362 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
6363 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6364 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6365 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6366 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
6367 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
6368 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
6369 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
6370 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
6371 };
6372 struct vnodeopv_desc hfs_specop_opv_desc =
6373 { &hfs_specop_p, hfs_specop_entries };
6374
6375 #if FIFO
6376 /* HFS+ FIFO VNOP table */
6377 int (**hfs_fifoop_p)(void *);
6378 struct vnodeopv_entry_desc hfs_fifoop_entries[] = {
6379 { &vnop_default_desc, (VOPFUNC)vn_default_error },
6380 { &vnop_lookup_desc, (VOPFUNC)fifo_lookup }, /* lookup */
6381 { &vnop_create_desc, (VOPFUNC)fifo_create }, /* create */
6382 { &vnop_mknod_desc, (VOPFUNC)fifo_mknod }, /* mknod */
6383 { &vnop_open_desc, (VOPFUNC)fifo_open }, /* open */
6384 { &vnop_close_desc, (VOPFUNC)hfsfifo_close }, /* close */
6385 { &vnop_getattr_desc, (VOPFUNC)hfs_vnop_getattr }, /* getattr */
6386 { &vnop_setattr_desc, (VOPFUNC)hfs_vnop_setattr }, /* setattr */
6387 { &vnop_read_desc, (VOPFUNC)hfsfifo_read }, /* read */
6388 { &vnop_write_desc, (VOPFUNC)hfsfifo_write }, /* write */
6389 { &vnop_ioctl_desc, (VOPFUNC)fifo_ioctl }, /* ioctl */
6390 { &vnop_select_desc, (VOPFUNC)fifo_select }, /* select */
6391 { &vnop_revoke_desc, (VOPFUNC)fifo_revoke }, /* revoke */
6392 { &vnop_mmap_desc, (VOPFUNC)fifo_mmap }, /* mmap */
6393 { &vnop_fsync_desc, (VOPFUNC)hfs_vnop_fsync }, /* fsync */
6394 { &vnop_remove_desc, (VOPFUNC)fifo_remove }, /* remove */
6395 { &vnop_link_desc, (VOPFUNC)fifo_link }, /* link */
6396 { &vnop_rename_desc, (VOPFUNC)fifo_rename }, /* rename */
6397 { &vnop_mkdir_desc, (VOPFUNC)fifo_mkdir }, /* mkdir */
6398 { &vnop_rmdir_desc, (VOPFUNC)fifo_rmdir }, /* rmdir */
6399 { &vnop_symlink_desc, (VOPFUNC)fifo_symlink }, /* symlink */
6400 { &vnop_readdir_desc, (VOPFUNC)fifo_readdir }, /* readdir */
6401 { &vnop_readlink_desc, (VOPFUNC)fifo_readlink }, /* readlink */
6402 { &vnop_inactive_desc, (VOPFUNC)hfs_vnop_inactive }, /* inactive */
6403 { &vnop_reclaim_desc, (VOPFUNC)hfs_vnop_reclaim }, /* reclaim */
6404 { &vnop_strategy_desc, (VOPFUNC)fifo_strategy }, /* strategy */
6405 { &vnop_pathconf_desc, (VOPFUNC)fifo_pathconf }, /* pathconf */
6406 { &vnop_advlock_desc, (VOPFUNC)err_advlock }, /* advlock */
6407 { &vnop_bwrite_desc, (VOPFUNC)hfs_vnop_bwrite },
6408 { &vnop_pagein_desc, (VOPFUNC)hfs_vnop_pagein }, /* Pagein */
6409 { &vnop_pageout_desc, (VOPFUNC)hfs_vnop_pageout }, /* Pageout */
6410 { &vnop_copyfile_desc, (VOPFUNC)err_copyfile }, /* copyfile */
6411 { &vnop_blktooff_desc, (VOPFUNC)hfs_vnop_blktooff }, /* blktooff */
6412 { &vnop_offtoblk_desc, (VOPFUNC)hfs_vnop_offtoblk }, /* offtoblk */
6413 { &vnop_blockmap_desc, (VOPFUNC)hfs_vnop_blockmap }, /* blockmap */
6414 { &vnop_getxattr_desc, (VOPFUNC)hfs_vnop_getxattr},
6415 { &vnop_setxattr_desc, (VOPFUNC)hfs_vnop_setxattr},
6416 { &vnop_removexattr_desc, (VOPFUNC)hfs_vnop_removexattr},
6417 { &vnop_listxattr_desc, (VOPFUNC)hfs_vnop_listxattr},
6418 { (struct vnodeop_desc*)NULL, (VOPFUNC)NULL }
6419 };
6420 struct vnodeopv_desc hfs_fifoop_opv_desc =
6421 { &hfs_fifoop_p, hfs_fifoop_entries };
6422 #endif /* FIFO */
6423
6424
6425